content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
"""\nTests for helper functions in the cython tslibs.offsets\n"""\nfrom datetime import datetime\n\nimport pytest\n\nfrom pandas._libs.tslibs.ccalendar import (\n get_firstbday,\n get_lastbday,\n)\nimport pandas._libs.tslibs.offsets as liboffsets\nfrom pandas._libs.tslibs.offsets import roll_qtrday\n\nfrom pandas import Timestamp\n\n\n@pytest.fixture(params=["start", "end", "business_start", "business_end"])\ndef day_opt(request):\n return request.param\n\n\n@pytest.mark.parametrize(\n "dt,exp_week_day,exp_last_day",\n [\n (datetime(2017, 11, 30), 3, 30), # Business day.\n (datetime(1993, 10, 31), 6, 29), # Non-business day.\n ],\n)\ndef test_get_last_bday(dt, exp_week_day, exp_last_day):\n assert dt.weekday() == exp_week_day\n assert get_lastbday(dt.year, dt.month) == exp_last_day\n\n\n@pytest.mark.parametrize(\n "dt,exp_week_day,exp_first_day",\n [\n (datetime(2017, 4, 1), 5, 3), # Non-weekday.\n (datetime(1993, 10, 1), 4, 1), # Business day.\n ],\n)\ndef test_get_first_bday(dt, exp_week_day, exp_first_day):\n assert dt.weekday() == exp_week_day\n assert get_firstbday(dt.year, dt.month) == exp_first_day\n\n\n@pytest.mark.parametrize(\n "months,day_opt,expected",\n [\n (0, 15, datetime(2017, 11, 15)),\n (0, None, datetime(2017, 11, 30)),\n (1, "start", datetime(2017, 12, 1)),\n (-145, "end", datetime(2005, 10, 31)),\n (0, "business_end", datetime(2017, 11, 30)),\n (0, "business_start", datetime(2017, 11, 1)),\n ],\n)\ndef test_shift_month_dt(months, day_opt, expected):\n dt = datetime(2017, 11, 30)\n assert liboffsets.shift_month(dt, months, day_opt=day_opt) == expected\n\n\n@pytest.mark.parametrize(\n "months,day_opt,expected",\n [\n (1, "start", Timestamp("1929-06-01")),\n (-3, "end", Timestamp("1929-02-28")),\n (25, None, Timestamp("1931-06-5")),\n (-1, 31, Timestamp("1929-04-30")),\n ],\n)\ndef test_shift_month_ts(months, day_opt, expected):\n ts = Timestamp("1929-05-05")\n assert liboffsets.shift_month(ts, months, day_opt=day_opt) == expected\n\n\ndef test_shift_month_error():\n dt = datetime(2017, 11, 15)\n day_opt = "this should raise"\n\n with pytest.raises(ValueError, match=day_opt):\n liboffsets.shift_month(dt, 3, day_opt=day_opt)\n\n\n@pytest.mark.parametrize(\n "other,expected",\n [\n # Before March 1.\n (datetime(2017, 2, 10), {2: 1, -7: -7, 0: 0}),\n # After March 1.\n (Timestamp("2014-03-15", tz="US/Eastern"), {2: 2, -7: -6, 0: 1}),\n ],\n)\n@pytest.mark.parametrize("n", [2, -7, 0])\ndef test_roll_qtrday_year(other, expected, n):\n month = 3\n day_opt = "start" # `other` will be compared to March 1.\n\n assert roll_qtrday(other, n, month, day_opt, modby=12) == expected[n]\n\n\n@pytest.mark.parametrize(\n "other,expected",\n [\n # Before June 30.\n (datetime(1999, 6, 29), {5: 4, -7: -7, 0: 0}),\n # After June 30.\n (Timestamp(2072, 8, 24, 6, 17, 18), {5: 5, -7: -6, 0: 1}),\n ],\n)\n@pytest.mark.parametrize("n", [5, -7, 0])\ndef test_roll_qtrday_year2(other, expected, n):\n month = 6\n day_opt = "end" # `other` will be compared to June 30.\n\n assert roll_qtrday(other, n, month, day_opt, modby=12) == expected[n]\n\n\ndef test_get_day_of_month_error():\n # get_day_of_month is not directly exposed.\n # We test it via roll_qtrday.\n dt = datetime(2017, 11, 15)\n day_opt = "foo"\n\n with pytest.raises(ValueError, match=day_opt):\n # To hit the raising case we need month == dt.month and n > 0.\n roll_qtrday(dt, n=3, month=11, day_opt=day_opt, modby=12)\n\n\n@pytest.mark.parametrize(\n "month",\n [3, 5], # (other.month % 3) < (month % 3) # (other.month % 3) > (month % 3)\n)\n@pytest.mark.parametrize("n", [4, -3])\ndef test_roll_qtr_day_not_mod_unequal(day_opt, month, n):\n expected = {3: {-3: -2, 4: 4}, 5: {-3: -3, 4: 3}}\n\n other = Timestamp(2072, 10, 1, 6, 17, 18) # Saturday.\n assert roll_qtrday(other, n, month, day_opt, modby=3) == expected[month][n]\n\n\n@pytest.mark.parametrize(\n "other,month,exp_dict",\n [\n # Monday.\n (datetime(1999, 5, 31), 2, {-1: {"start": 0, "business_start": 0}}),\n # Saturday.\n (\n Timestamp(2072, 10, 1, 6, 17, 18),\n 4,\n {2: {"end": 1, "business_end": 1, "business_start": 1}},\n ),\n # First business day.\n (\n Timestamp(2072, 10, 3, 6, 17, 18),\n 4,\n {2: {"end": 1, "business_end": 1}, -1: {"start": 0}},\n ),\n ],\n)\n@pytest.mark.parametrize("n", [2, -1])\ndef test_roll_qtr_day_mod_equal(other, month, exp_dict, n, day_opt):\n # All cases have (other.month % 3) == (month % 3).\n expected = exp_dict.get(n, {}).get(day_opt, n)\n assert roll_qtrday(other, n, month, day_opt, modby=3) == expected\n\n\n@pytest.mark.parametrize(\n "n,expected", [(42, {29: 42, 1: 42, 31: 41}), (-4, {29: -4, 1: -3, 31: -4})]\n)\n@pytest.mark.parametrize("compare", [29, 1, 31])\ndef test_roll_convention(n, expected, compare):\n assert liboffsets.roll_convention(29, n, compare) == expected[compare]\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_liboffsets.py | test_liboffsets.py | Python | 5,108 | 0.95 | 0.075145 | 0.078014 | react-lib | 901 | 2024-12-16T22:40:01.265138 | GPL-3.0 | true | 194f595cf4e18b5e31bd6e2b397c2fb7 |
import numpy as np\n\nfrom pandas._libs.tslibs.dtypes import abbrev_to_npy_unit\nfrom pandas._libs.tslibs.vectorized import is_date_array_normalized\n\n# a datetime64 ndarray which *is* normalized\nday_arr = np.arange(10, dtype="i8").view("M8[D]")\n\n\nclass TestIsDateArrayNormalized:\n def test_is_date_array_normalized_day(self):\n arr = day_arr\n abbrev = "D"\n unit = abbrev_to_npy_unit(abbrev)\n result = is_date_array_normalized(arr.view("i8"), None, unit)\n assert result is True\n\n def test_is_date_array_normalized_seconds(self):\n abbrev = "s"\n arr = day_arr.astype(f"M8[{abbrev}]")\n unit = abbrev_to_npy_unit(abbrev)\n result = is_date_array_normalized(arr.view("i8"), None, unit)\n assert result is True\n\n arr[0] += np.timedelta64(1, abbrev)\n result2 = is_date_array_normalized(arr.view("i8"), None, unit)\n assert result2 is False\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_npy_units.py | test_npy_units.py | Python | 922 | 0.95 | 0.111111 | 0.047619 | python-kit | 55 | 2023-08-31T14:03:18.277168 | GPL-3.0 | true | ce08efb9cc46c6dee153e7ac81b1ae45 |
import numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.dtypes import NpyDatetimeUnit\nfrom pandas._libs.tslibs.np_datetime import (\n OutOfBoundsDatetime,\n OutOfBoundsTimedelta,\n astype_overflowsafe,\n is_unitless,\n py_get_unit_from_dtype,\n py_td64_to_tdstruct,\n)\n\nimport pandas._testing as tm\n\n\ndef test_is_unitless():\n dtype = np.dtype("M8[ns]")\n assert not is_unitless(dtype)\n\n dtype = np.dtype("datetime64")\n assert is_unitless(dtype)\n\n dtype = np.dtype("m8[ns]")\n assert not is_unitless(dtype)\n\n dtype = np.dtype("timedelta64")\n assert is_unitless(dtype)\n\n msg = "dtype must be datetime64 or timedelta64"\n with pytest.raises(ValueError, match=msg):\n is_unitless(np.dtype(np.int64))\n\n msg = "Argument 'dtype' has incorrect type"\n with pytest.raises(TypeError, match=msg):\n is_unitless("foo")\n\n\ndef test_get_unit_from_dtype():\n # datetime64\n assert py_get_unit_from_dtype(np.dtype("M8[Y]")) == NpyDatetimeUnit.NPY_FR_Y.value\n assert py_get_unit_from_dtype(np.dtype("M8[M]")) == NpyDatetimeUnit.NPY_FR_M.value\n assert py_get_unit_from_dtype(np.dtype("M8[W]")) == NpyDatetimeUnit.NPY_FR_W.value\n # B has been deprecated and removed -> no 3\n assert py_get_unit_from_dtype(np.dtype("M8[D]")) == NpyDatetimeUnit.NPY_FR_D.value\n assert py_get_unit_from_dtype(np.dtype("M8[h]")) == NpyDatetimeUnit.NPY_FR_h.value\n assert py_get_unit_from_dtype(np.dtype("M8[m]")) == NpyDatetimeUnit.NPY_FR_m.value\n assert py_get_unit_from_dtype(np.dtype("M8[s]")) == NpyDatetimeUnit.NPY_FR_s.value\n assert py_get_unit_from_dtype(np.dtype("M8[ms]")) == NpyDatetimeUnit.NPY_FR_ms.value\n assert py_get_unit_from_dtype(np.dtype("M8[us]")) == NpyDatetimeUnit.NPY_FR_us.value\n assert py_get_unit_from_dtype(np.dtype("M8[ns]")) == NpyDatetimeUnit.NPY_FR_ns.value\n assert py_get_unit_from_dtype(np.dtype("M8[ps]")) == NpyDatetimeUnit.NPY_FR_ps.value\n assert py_get_unit_from_dtype(np.dtype("M8[fs]")) == NpyDatetimeUnit.NPY_FR_fs.value\n assert py_get_unit_from_dtype(np.dtype("M8[as]")) == NpyDatetimeUnit.NPY_FR_as.value\n\n # timedelta64\n assert py_get_unit_from_dtype(np.dtype("m8[Y]")) == NpyDatetimeUnit.NPY_FR_Y.value\n assert py_get_unit_from_dtype(np.dtype("m8[M]")) == NpyDatetimeUnit.NPY_FR_M.value\n assert py_get_unit_from_dtype(np.dtype("m8[W]")) == NpyDatetimeUnit.NPY_FR_W.value\n # B has been deprecated and removed -> no 3\n assert py_get_unit_from_dtype(np.dtype("m8[D]")) == NpyDatetimeUnit.NPY_FR_D.value\n assert py_get_unit_from_dtype(np.dtype("m8[h]")) == NpyDatetimeUnit.NPY_FR_h.value\n assert py_get_unit_from_dtype(np.dtype("m8[m]")) == NpyDatetimeUnit.NPY_FR_m.value\n assert py_get_unit_from_dtype(np.dtype("m8[s]")) == NpyDatetimeUnit.NPY_FR_s.value\n assert py_get_unit_from_dtype(np.dtype("m8[ms]")) == NpyDatetimeUnit.NPY_FR_ms.value\n assert py_get_unit_from_dtype(np.dtype("m8[us]")) == NpyDatetimeUnit.NPY_FR_us.value\n assert py_get_unit_from_dtype(np.dtype("m8[ns]")) == NpyDatetimeUnit.NPY_FR_ns.value\n assert py_get_unit_from_dtype(np.dtype("m8[ps]")) == NpyDatetimeUnit.NPY_FR_ps.value\n assert py_get_unit_from_dtype(np.dtype("m8[fs]")) == NpyDatetimeUnit.NPY_FR_fs.value\n assert py_get_unit_from_dtype(np.dtype("m8[as]")) == NpyDatetimeUnit.NPY_FR_as.value\n\n\ndef test_td64_to_tdstruct():\n val = 12454636234 # arbitrary value\n\n res1 = py_td64_to_tdstruct(val, NpyDatetimeUnit.NPY_FR_ns.value)\n exp1 = {\n "days": 0,\n "hrs": 0,\n "min": 0,\n "sec": 12,\n "ms": 454,\n "us": 636,\n "ns": 234,\n "seconds": 12,\n "microseconds": 454636,\n "nanoseconds": 234,\n }\n assert res1 == exp1\n\n res2 = py_td64_to_tdstruct(val, NpyDatetimeUnit.NPY_FR_us.value)\n exp2 = {\n "days": 0,\n "hrs": 3,\n "min": 27,\n "sec": 34,\n "ms": 636,\n "us": 234,\n "ns": 0,\n "seconds": 12454,\n "microseconds": 636234,\n "nanoseconds": 0,\n }\n assert res2 == exp2\n\n res3 = py_td64_to_tdstruct(val, NpyDatetimeUnit.NPY_FR_ms.value)\n exp3 = {\n "days": 144,\n "hrs": 3,\n "min": 37,\n "sec": 16,\n "ms": 234,\n "us": 0,\n "ns": 0,\n "seconds": 13036,\n "microseconds": 234000,\n "nanoseconds": 0,\n }\n assert res3 == exp3\n\n # Note this out of bounds for nanosecond Timedelta\n res4 = py_td64_to_tdstruct(val, NpyDatetimeUnit.NPY_FR_s.value)\n exp4 = {\n "days": 144150,\n "hrs": 21,\n "min": 10,\n "sec": 34,\n "ms": 0,\n "us": 0,\n "ns": 0,\n "seconds": 76234,\n "microseconds": 0,\n "nanoseconds": 0,\n }\n assert res4 == exp4\n\n\nclass TestAstypeOverflowSafe:\n def test_pass_non_dt64_array(self):\n # check that we raise, not segfault\n arr = np.arange(5)\n dtype = np.dtype("M8[ns]")\n\n msg = (\n "astype_overflowsafe values.dtype and dtype must be either "\n "both-datetime64 or both-timedelta64"\n )\n with pytest.raises(TypeError, match=msg):\n astype_overflowsafe(arr, dtype, copy=True)\n\n with pytest.raises(TypeError, match=msg):\n astype_overflowsafe(arr, dtype, copy=False)\n\n def test_pass_non_dt64_dtype(self):\n # check that we raise, not segfault\n arr = np.arange(5, dtype="i8").view("M8[D]")\n dtype = np.dtype("m8[ns]")\n\n msg = (\n "astype_overflowsafe values.dtype and dtype must be either "\n "both-datetime64 or both-timedelta64"\n )\n with pytest.raises(TypeError, match=msg):\n astype_overflowsafe(arr, dtype, copy=True)\n\n with pytest.raises(TypeError, match=msg):\n astype_overflowsafe(arr, dtype, copy=False)\n\n def test_astype_overflowsafe_dt64(self):\n dtype = np.dtype("M8[ns]")\n\n dt = np.datetime64("2262-04-05", "D")\n arr = dt + np.arange(10, dtype="m8[D]")\n\n # arr.astype silently overflows, so this\n wrong = arr.astype(dtype)\n roundtrip = wrong.astype(arr.dtype)\n assert not (wrong == roundtrip).all()\n\n msg = "Out of bounds nanosecond timestamp"\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n astype_overflowsafe(arr, dtype)\n\n # But converting to microseconds is fine, and we match numpy's results.\n dtype2 = np.dtype("M8[us]")\n result = astype_overflowsafe(arr, dtype2)\n expected = arr.astype(dtype2)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_astype_overflowsafe_td64(self):\n dtype = np.dtype("m8[ns]")\n\n dt = np.datetime64("2262-04-05", "D")\n arr = dt + np.arange(10, dtype="m8[D]")\n arr = arr.view("m8[D]")\n\n # arr.astype silently overflows, so this\n wrong = arr.astype(dtype)\n roundtrip = wrong.astype(arr.dtype)\n assert not (wrong == roundtrip).all()\n\n msg = r"Cannot convert 106752 days to timedelta64\[ns\] without overflow"\n with pytest.raises(OutOfBoundsTimedelta, match=msg):\n astype_overflowsafe(arr, dtype)\n\n # But converting to microseconds is fine, and we match numpy's results.\n dtype2 = np.dtype("m8[us]")\n result = astype_overflowsafe(arr, dtype2)\n expected = arr.astype(dtype2)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_astype_overflowsafe_disallow_rounding(self):\n arr = np.array([-1500, 1500], dtype="M8[ns]")\n dtype = np.dtype("M8[us]")\n\n msg = "Cannot losslessly cast '-1500 ns' to us"\n with pytest.raises(ValueError, match=msg):\n astype_overflowsafe(arr, dtype, round_ok=False)\n\n result = astype_overflowsafe(arr, dtype, round_ok=True)\n expected = arr.astype(dtype)\n tm.assert_numpy_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_np_datetime.py | test_np_datetime.py | Python | 7,889 | 0.95 | 0.045045 | 0.059783 | node-utils | 939 | 2023-12-21T10:29:00.065451 | MIT | true | 79d2af481e82580ba8a014aafee89554 |
from datetime import datetime\n\nimport pytest\n\nfrom pandas._libs import tslib\n\nfrom pandas import Timestamp\n\n\n@pytest.mark.parametrize(\n "date_str, exp",\n [\n ("2011-01-02", datetime(2011, 1, 2)),\n ("2011-1-2", datetime(2011, 1, 2)),\n ("2011-01", datetime(2011, 1, 1)),\n ("2011-1", datetime(2011, 1, 1)),\n ("2011 01 02", datetime(2011, 1, 2)),\n ("2011.01.02", datetime(2011, 1, 2)),\n ("2011/01/02", datetime(2011, 1, 2)),\n ("2011\\01\\02", datetime(2011, 1, 2)),\n ("2013-01-01 05:30:00", datetime(2013, 1, 1, 5, 30)),\n ("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30)),\n ("2013-1-1 5:30:00+01:00", Timestamp(2013, 1, 1, 5, 30, tz="UTC+01:00")),\n ],\n)\ndef test_parsers_iso8601(date_str, exp):\n # see gh-12060\n #\n # Test only the ISO parser - flexibility to\n # different separators and leading zero's.\n actual = tslib._test_parse_iso8601(date_str)\n assert actual == exp\n\n\n@pytest.mark.parametrize(\n "date_str",\n [\n "2011-01/02",\n "2011=11=11",\n "201401",\n "201111",\n "200101",\n # Mixed separated and unseparated.\n "2005-0101",\n "200501-01",\n "20010101 12:3456",\n "20010101 1234:56",\n # HHMMSS must have two digits in\n # each component if unseparated.\n "20010101 1",\n "20010101 123",\n "20010101 12345",\n "20010101 12345Z",\n ],\n)\ndef test_parsers_iso8601_invalid(date_str):\n msg = f'Error parsing datetime string "{date_str}"'\n\n with pytest.raises(ValueError, match=msg):\n tslib._test_parse_iso8601(date_str)\n\n\ndef test_parsers_iso8601_invalid_offset_invalid():\n date_str = "2001-01-01 12-34-56"\n msg = f'Timezone hours offset out of range in datetime string "{date_str}"'\n\n with pytest.raises(ValueError, match=msg):\n tslib._test_parse_iso8601(date_str)\n\n\ndef test_parsers_iso8601_leading_space():\n # GH#25895 make sure isoparser doesn't overflow with long input\n date_str, expected = ("2013-1-1 5:30:00", datetime(2013, 1, 1, 5, 30))\n actual = tslib._test_parse_iso8601(" " * 200 + date_str)\n assert actual == expected\n\n\n@pytest.mark.parametrize(\n "date_str, timespec, exp",\n [\n ("2023-01-01 00:00:00", "auto", "2023-01-01T00:00:00"),\n ("2023-01-01 00:00:00", "seconds", "2023-01-01T00:00:00"),\n ("2023-01-01 00:00:00", "milliseconds", "2023-01-01T00:00:00.000"),\n ("2023-01-01 00:00:00", "microseconds", "2023-01-01T00:00:00.000000"),\n ("2023-01-01 00:00:00", "nanoseconds", "2023-01-01T00:00:00.000000000"),\n ("2023-01-01 00:00:00.001", "auto", "2023-01-01T00:00:00.001000"),\n ("2023-01-01 00:00:00.001", "seconds", "2023-01-01T00:00:00"),\n ("2023-01-01 00:00:00.001", "milliseconds", "2023-01-01T00:00:00.001"),\n ("2023-01-01 00:00:00.001", "microseconds", "2023-01-01T00:00:00.001000"),\n ("2023-01-01 00:00:00.001", "nanoseconds", "2023-01-01T00:00:00.001000000"),\n ("2023-01-01 00:00:00.000001", "auto", "2023-01-01T00:00:00.000001"),\n ("2023-01-01 00:00:00.000001", "seconds", "2023-01-01T00:00:00"),\n ("2023-01-01 00:00:00.000001", "milliseconds", "2023-01-01T00:00:00.000"),\n ("2023-01-01 00:00:00.000001", "microseconds", "2023-01-01T00:00:00.000001"),\n ("2023-01-01 00:00:00.000001", "nanoseconds", "2023-01-01T00:00:00.000001000"),\n ("2023-01-01 00:00:00.000000001", "auto", "2023-01-01T00:00:00.000000001"),\n ("2023-01-01 00:00:00.000000001", "seconds", "2023-01-01T00:00:00"),\n ("2023-01-01 00:00:00.000000001", "milliseconds", "2023-01-01T00:00:00.000"),\n ("2023-01-01 00:00:00.000000001", "microseconds", "2023-01-01T00:00:00.000000"),\n (\n "2023-01-01 00:00:00.000000001",\n "nanoseconds",\n "2023-01-01T00:00:00.000000001",\n ),\n ("2023-01-01 00:00:00.000001001", "auto", "2023-01-01T00:00:00.000001001"),\n ("2023-01-01 00:00:00.000001001", "seconds", "2023-01-01T00:00:00"),\n ("2023-01-01 00:00:00.000001001", "milliseconds", "2023-01-01T00:00:00.000"),\n ("2023-01-01 00:00:00.000001001", "microseconds", "2023-01-01T00:00:00.000001"),\n (\n "2023-01-01 00:00:00.000001001",\n "nanoseconds",\n "2023-01-01T00:00:00.000001001",\n ),\n ],\n)\ndef test_iso8601_formatter(date_str: str, timespec: str, exp: str):\n # GH#53020\n ts = Timestamp(date_str)\n assert ts.isoformat(timespec=timespec) == exp\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_parse_iso8601.py | test_parse_iso8601.py | Python | 4,535 | 0.95 | 0.05042 | 0.086538 | vue-tools | 528 | 2024-01-15T10:53:54.511286 | GPL-3.0 | true | 6a04b4d76845fbd63e595b5c08d18f6d |
"""\nTests for Timestamp parsing, aimed at pandas/_libs/tslibs/parsing.pyx\n"""\nfrom datetime import datetime\nimport re\n\nfrom dateutil.parser import parse as du_parse\nfrom dateutil.tz import tzlocal\nfrom hypothesis import given\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import (\n parsing,\n strptime,\n)\nfrom pandas._libs.tslibs.parsing import parse_datetime_string_with_reso\nfrom pandas.compat import (\n ISMUSL,\n is_platform_arm,\n is_platform_windows,\n)\nimport pandas.util._test_decorators as td\n\nimport pandas._testing as tm\nfrom pandas._testing._hypothesis import DATETIME_NO_TZ\n\n\n@pytest.mark.skipif(\n is_platform_windows() or ISMUSL or is_platform_arm(),\n reason="TZ setting incorrect on Windows and MUSL Linux",\n)\ndef test_parsing_tzlocal_deprecated():\n # GH#50791\n msg = (\n "Parsing 'EST' as tzlocal.*"\n "Pass the 'tz' keyword or call tz_localize after construction instead"\n )\n dtstr = "Jan 15 2004 03:00 EST"\n\n with tm.set_timezone("US/Eastern"):\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res, _ = parse_datetime_string_with_reso(dtstr)\n\n assert isinstance(res.tzinfo, tzlocal)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = parsing.py_parse_datetime_string(dtstr)\n assert isinstance(res.tzinfo, tzlocal)\n\n\ndef test_parse_datetime_string_with_reso():\n (parsed, reso) = parse_datetime_string_with_reso("4Q1984")\n (parsed_lower, reso_lower) = parse_datetime_string_with_reso("4q1984")\n\n assert reso == reso_lower\n assert parsed == parsed_lower\n\n\ndef test_parse_datetime_string_with_reso_nanosecond_reso():\n # GH#46811\n parsed, reso = parse_datetime_string_with_reso("2022-04-20 09:19:19.123456789")\n assert reso == "nanosecond"\n\n\ndef test_parse_datetime_string_with_reso_invalid_type():\n # Raise on invalid input, don't just return it\n msg = "Argument 'date_string' has incorrect type (expected str, got tuple)"\n with pytest.raises(TypeError, match=re.escape(msg)):\n parse_datetime_string_with_reso((4, 5))\n\n\n@pytest.mark.parametrize(\n "dashed,normal", [("1988-Q2", "1988Q2"), ("2Q-1988", "2Q1988")]\n)\ndef test_parse_time_quarter_with_dash(dashed, normal):\n # see gh-9688\n (parsed_dash, reso_dash) = parse_datetime_string_with_reso(dashed)\n (parsed, reso) = parse_datetime_string_with_reso(normal)\n\n assert parsed_dash == parsed\n assert reso_dash == reso\n\n\n@pytest.mark.parametrize("dashed", ["-2Q1992", "2-Q1992", "4-4Q1992"])\ndef test_parse_time_quarter_with_dash_error(dashed):\n msg = f"Unknown datetime string format, unable to parse: {dashed}"\n\n with pytest.raises(parsing.DateParseError, match=msg):\n parse_datetime_string_with_reso(dashed)\n\n\n@pytest.mark.parametrize(\n "date_string,expected",\n [\n ("123.1234", False),\n ("-50000", False),\n ("999", False),\n ("m", False),\n ("T", False),\n ("Mon Sep 16, 2013", True),\n ("2012-01-01", True),\n ("01/01/2012", True),\n ("01012012", True),\n ("0101", True),\n ("1-1", True),\n ],\n)\ndef test_does_not_convert_mixed_integer(date_string, expected):\n assert parsing._does_string_look_like_datetime(date_string) is expected\n\n\n@pytest.mark.parametrize(\n "date_str,kwargs,msg",\n [\n (\n "2013Q5",\n {},\n (\n "Incorrect quarterly string is given, "\n "quarter must be between 1 and 4: 2013Q5"\n ),\n ),\n # see gh-5418\n (\n "2013Q1",\n {"freq": "INVLD-L-DEC-SAT"},\n (\n "Unable to retrieve month information "\n "from given freq: INVLD-L-DEC-SAT"\n ),\n ),\n ],\n)\ndef test_parsers_quarterly_with_freq_error(date_str, kwargs, msg):\n with pytest.raises(parsing.DateParseError, match=msg):\n parsing.parse_datetime_string_with_reso(date_str, **kwargs)\n\n\n@pytest.mark.parametrize(\n "date_str,freq,expected",\n [\n ("2013Q2", None, datetime(2013, 4, 1)),\n ("2013Q2", "Y-APR", datetime(2012, 8, 1)),\n ("2013-Q2", "Y-DEC", datetime(2013, 4, 1)),\n ],\n)\ndef test_parsers_quarterly_with_freq(date_str, freq, expected):\n result, _ = parsing.parse_datetime_string_with_reso(date_str, freq=freq)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "date_str", ["2Q 2005", "2Q-200Y", "2Q-200", "22Q2005", "2Q200.", "6Q-20"]\n)\ndef test_parsers_quarter_invalid(date_str):\n if date_str == "6Q-20":\n msg = (\n "Incorrect quarterly string is given, quarter "\n f"must be between 1 and 4: {date_str}"\n )\n else:\n msg = f"Unknown datetime string format, unable to parse: {date_str}"\n\n with pytest.raises(ValueError, match=msg):\n parsing.parse_datetime_string_with_reso(date_str)\n\n\n@pytest.mark.parametrize(\n "date_str,expected",\n [("201101", datetime(2011, 1, 1, 0, 0)), ("200005", datetime(2000, 5, 1, 0, 0))],\n)\ndef test_parsers_month_freq(date_str, expected):\n result, _ = parsing.parse_datetime_string_with_reso(date_str, freq="ME")\n assert result == expected\n\n\n@td.skip_if_not_us_locale\n@pytest.mark.parametrize(\n "string,fmt",\n [\n ("20111230", "%Y%m%d"),\n ("201112300000", "%Y%m%d%H%M"),\n ("20111230000000", "%Y%m%d%H%M%S"),\n ("20111230T00", "%Y%m%dT%H"),\n ("20111230T0000", "%Y%m%dT%H%M"),\n ("20111230T000000", "%Y%m%dT%H%M%S"),\n ("2011-12-30", "%Y-%m-%d"),\n ("2011", "%Y"),\n ("2011-01", "%Y-%m"),\n ("30-12-2011", "%d-%m-%Y"),\n ("2011-12-30 00:00:00", "%Y-%m-%d %H:%M:%S"),\n ("2011-12-30T00:00:00", "%Y-%m-%dT%H:%M:%S"),\n ("2011-12-30T00:00:00UTC", "%Y-%m-%dT%H:%M:%S%Z"),\n ("2011-12-30T00:00:00Z", "%Y-%m-%dT%H:%M:%S%z"),\n ("2011-12-30T00:00:00+9", "%Y-%m-%dT%H:%M:%S%z"),\n ("2011-12-30T00:00:00+09", "%Y-%m-%dT%H:%M:%S%z"),\n ("2011-12-30T00:00:00+090", None),\n ("2011-12-30T00:00:00+0900", "%Y-%m-%dT%H:%M:%S%z"),\n ("2011-12-30T00:00:00-0900", "%Y-%m-%dT%H:%M:%S%z"),\n ("2011-12-30T00:00:00+09:00", "%Y-%m-%dT%H:%M:%S%z"),\n ("2011-12-30T00:00:00+09:000", None),\n ("2011-12-30T00:00:00+9:0", "%Y-%m-%dT%H:%M:%S%z"),\n ("2011-12-30T00:00:00+09:", None),\n ("2011-12-30T00:00:00.000000UTC", "%Y-%m-%dT%H:%M:%S.%f%Z"),\n ("2011-12-30T00:00:00.000000Z", "%Y-%m-%dT%H:%M:%S.%f%z"),\n ("2011-12-30T00:00:00.000000+9", "%Y-%m-%dT%H:%M:%S.%f%z"),\n ("2011-12-30T00:00:00.000000+09", "%Y-%m-%dT%H:%M:%S.%f%z"),\n ("2011-12-30T00:00:00.000000+090", None),\n ("2011-12-30T00:00:00.000000+0900", "%Y-%m-%dT%H:%M:%S.%f%z"),\n ("2011-12-30T00:00:00.000000-0900", "%Y-%m-%dT%H:%M:%S.%f%z"),\n ("2011-12-30T00:00:00.000000+09:00", "%Y-%m-%dT%H:%M:%S.%f%z"),\n ("2011-12-30T00:00:00.000000+09:000", None),\n ("2011-12-30T00:00:00.000000+9:0", "%Y-%m-%dT%H:%M:%S.%f%z"),\n ("2011-12-30T00:00:00.000000+09:", None),\n ("2011-12-30 00:00:00.000000", "%Y-%m-%d %H:%M:%S.%f"),\n ("Tue 24 Aug 2021 01:30:48", "%a %d %b %Y %H:%M:%S"),\n ("Tuesday 24 Aug 2021 01:30:48", "%A %d %b %Y %H:%M:%S"),\n ("Tue 24 Aug 2021 01:30:48 AM", "%a %d %b %Y %I:%M:%S %p"),\n ("Tuesday 24 Aug 2021 01:30:48 AM", "%A %d %b %Y %I:%M:%S %p"),\n ("27.03.2003 14:55:00.000", "%d.%m.%Y %H:%M:%S.%f"), # GH50317\n ],\n)\ndef test_guess_datetime_format_with_parseable_formats(string, fmt):\n with tm.maybe_produces_warning(\n UserWarning, fmt is not None and re.search(r"%d.*%m", fmt)\n ):\n result = parsing.guess_datetime_format(string)\n assert result == fmt\n\n\n@pytest.mark.parametrize("dayfirst,expected", [(True, "%d/%m/%Y"), (False, "%m/%d/%Y")])\ndef test_guess_datetime_format_with_dayfirst(dayfirst, expected):\n ambiguous_string = "01/01/2011"\n result = parsing.guess_datetime_format(ambiguous_string, dayfirst=dayfirst)\n assert result == expected\n\n\n@td.skip_if_not_us_locale\n@pytest.mark.parametrize(\n "string,fmt",\n [\n ("30/Dec/2011", "%d/%b/%Y"),\n ("30/December/2011", "%d/%B/%Y"),\n ("30/Dec/2011 00:00:00", "%d/%b/%Y %H:%M:%S"),\n ],\n)\ndef test_guess_datetime_format_with_locale_specific_formats(string, fmt):\n result = parsing.guess_datetime_format(string)\n assert result == fmt\n\n\n@pytest.mark.parametrize(\n "invalid_dt",\n [\n "01/2013",\n "12:00:00",\n "1/1/1/1",\n "this_is_not_a_datetime",\n "51a",\n "13/2019",\n "202001", # YYYYMM isn't ISO8601\n "2020/01", # YYYY/MM isn't ISO8601 either\n "87156549591102612381000001219H5",\n ],\n)\ndef test_guess_datetime_format_invalid_inputs(invalid_dt):\n # A datetime string must include a year, month and a day for it to be\n # guessable, in addition to being a string that looks like a datetime.\n assert parsing.guess_datetime_format(invalid_dt) is None\n\n\n@pytest.mark.parametrize("invalid_type_dt", [9, datetime(2011, 1, 1)])\ndef test_guess_datetime_format_wrong_type_inputs(invalid_type_dt):\n # A datetime string must include a year, month and a day for it to be\n # guessable, in addition to being a string that looks like a datetime.\n with pytest.raises(\n TypeError,\n match=r"^Argument 'dt_str' has incorrect type \(expected str, got .*\)$",\n ):\n parsing.guess_datetime_format(invalid_type_dt)\n\n\n@pytest.mark.parametrize(\n "string,fmt,dayfirst,warning",\n [\n ("2011-1-1", "%Y-%m-%d", False, None),\n ("2011-1-1", "%Y-%d-%m", True, None),\n ("1/1/2011", "%m/%d/%Y", False, None),\n ("1/1/2011", "%d/%m/%Y", True, None),\n ("30-1-2011", "%d-%m-%Y", False, UserWarning),\n ("30-1-2011", "%d-%m-%Y", True, None),\n ("2011-1-1 0:0:0", "%Y-%m-%d %H:%M:%S", False, None),\n ("2011-1-1 0:0:0", "%Y-%d-%m %H:%M:%S", True, None),\n ("2011-1-3T00:00:0", "%Y-%m-%dT%H:%M:%S", False, None),\n ("2011-1-3T00:00:0", "%Y-%d-%mT%H:%M:%S", True, None),\n ("2011-1-1 00:00:00", "%Y-%m-%d %H:%M:%S", False, None),\n ("2011-1-1 00:00:00", "%Y-%d-%m %H:%M:%S", True, None),\n ],\n)\ndef test_guess_datetime_format_no_padding(string, fmt, dayfirst, warning):\n # see gh-11142\n msg = (\n rf"Parsing dates in {fmt} format when dayfirst=False \(the default\) "\n "was specified. "\n "Pass `dayfirst=True` or specify a format to silence this warning."\n )\n with tm.assert_produces_warning(warning, match=msg):\n result = parsing.guess_datetime_format(string, dayfirst=dayfirst)\n assert result == fmt\n\n\ndef test_try_parse_dates():\n arr = np.array(["5/1/2000", "6/1/2000", "7/1/2000"], dtype=object)\n result = parsing.try_parse_dates(arr, parser=lambda x: du_parse(x, dayfirst=True))\n\n expected = np.array([du_parse(d, dayfirst=True) for d in arr])\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_parse_datetime_string_with_reso_check_instance_type_raise_exception():\n # issue 20684\n msg = "Argument 'date_string' has incorrect type (expected str, got tuple)"\n with pytest.raises(TypeError, match=re.escape(msg)):\n parse_datetime_string_with_reso((1, 2, 3))\n\n result = parse_datetime_string_with_reso("2019")\n expected = (datetime(2019, 1, 1), "year")\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "fmt,expected",\n [\n ("%Y %m %d %H:%M:%S", True),\n ("%Y/%m/%d %H:%M:%S", True),\n (r"%Y\%m\%d %H:%M:%S", True),\n ("%Y-%m-%d %H:%M:%S", True),\n ("%Y.%m.%d %H:%M:%S", True),\n ("%Y%m%d %H:%M:%S", True),\n ("%Y-%m-%dT%H:%M:%S", True),\n ("%Y-%m-%dT%H:%M:%S%z", True),\n ("%Y-%m-%dT%H:%M:%S%Z", False),\n ("%Y-%m-%dT%H:%M:%S.%f", True),\n ("%Y-%m-%dT%H:%M:%S.%f%z", True),\n ("%Y-%m-%dT%H:%M:%S.%f%Z", False),\n ("%Y%m%d", True),\n ("%Y%m", False),\n ("%Y", True),\n ("%Y-%m-%d", True),\n ("%Y-%m", True),\n ],\n)\ndef test_is_iso_format(fmt, expected):\n # see gh-41047\n result = strptime._test_format_is_iso(fmt)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "input",\n [\n "2018-01-01T00:00:00.123456789",\n "2018-01-01T00:00:00.123456",\n "2018-01-01T00:00:00.123",\n ],\n)\ndef test_guess_datetime_format_f(input):\n # https://github.com/pandas-dev/pandas/issues/49043\n result = parsing.guess_datetime_format(input)\n expected = "%Y-%m-%dT%H:%M:%S.%f"\n assert result == expected\n\n\ndef _helper_hypothesis_delimited_date(call, date_string, **kwargs):\n msg, result = None, None\n try:\n result = call(date_string, **kwargs)\n except ValueError as err:\n msg = str(err)\n return msg, result\n\n\n@given(DATETIME_NO_TZ)\n@pytest.mark.parametrize("delimiter", list(" -./"))\n@pytest.mark.parametrize("dayfirst", [True, False])\n@pytest.mark.parametrize(\n "date_format",\n ["%d %m %Y", "%m %d %Y", "%m %Y", "%Y %m %d", "%y %m %d", "%Y%m%d", "%y%m%d"],\n)\ndef test_hypothesis_delimited_date(\n request, date_format, dayfirst, delimiter, test_datetime\n):\n if date_format == "%m %Y" and delimiter == ".":\n request.applymarker(\n pytest.mark.xfail(\n reason="parse_datetime_string cannot reliably tell whether "\n "e.g. %m.%Y is a float or a date"\n )\n )\n date_string = test_datetime.strftime(date_format.replace(" ", delimiter))\n\n except_out_dateutil, result = _helper_hypothesis_delimited_date(\n parsing.py_parse_datetime_string, date_string, dayfirst=dayfirst\n )\n except_in_dateutil, expected = _helper_hypothesis_delimited_date(\n du_parse,\n date_string,\n default=datetime(1, 1, 1),\n dayfirst=dayfirst,\n yearfirst=False,\n )\n\n assert except_out_dateutil == except_in_dateutil\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_parsing.py | test_parsing.py | Python | 13,931 | 0.95 | 0.072289 | 0.03662 | react-lib | 350 | 2024-09-11T09:03:03.206991 | Apache-2.0 | true | 0679fb760d1b9ee42779b02e63f9e2e4 |
import numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import (\n iNaT,\n to_offset,\n)\nfrom pandas._libs.tslibs.period import (\n extract_ordinals,\n get_period_field_arr,\n period_asfreq,\n period_ordinal,\n)\n\nimport pandas._testing as tm\n\n\ndef get_freq_code(freqstr: str) -> int:\n off = to_offset(freqstr, is_period=True)\n # error: "BaseOffset" has no attribute "_period_dtype_code"\n code = off._period_dtype_code # type: ignore[attr-defined]\n return code\n\n\n@pytest.mark.parametrize(\n "freq1,freq2,expected",\n [\n ("D", "h", 24),\n ("D", "min", 1440),\n ("D", "s", 86400),\n ("D", "ms", 86400000),\n ("D", "us", 86400000000),\n ("D", "ns", 86400000000000),\n ("h", "min", 60),\n ("h", "s", 3600),\n ("h", "ms", 3600000),\n ("h", "us", 3600000000),\n ("h", "ns", 3600000000000),\n ("min", "s", 60),\n ("min", "ms", 60000),\n ("min", "us", 60000000),\n ("min", "ns", 60000000000),\n ("s", "ms", 1000),\n ("s", "us", 1000000),\n ("s", "ns", 1000000000),\n ("ms", "us", 1000),\n ("ms", "ns", 1000000),\n ("us", "ns", 1000),\n ],\n)\ndef test_intra_day_conversion_factors(freq1, freq2, expected):\n assert (\n period_asfreq(1, get_freq_code(freq1), get_freq_code(freq2), False) == expected\n )\n\n\n@pytest.mark.parametrize(\n "freq,expected", [("Y", 0), ("M", 0), ("W", 1), ("D", 0), ("B", 0)]\n)\ndef test_period_ordinal_start_values(freq, expected):\n # information for Jan. 1, 1970.\n assert period_ordinal(1970, 1, 1, 0, 0, 0, 0, 0, get_freq_code(freq)) == expected\n\n\n@pytest.mark.parametrize(\n "dt,expected",\n [\n ((1970, 1, 4, 0, 0, 0, 0, 0), 1),\n ((1970, 1, 5, 0, 0, 0, 0, 0), 2),\n ((2013, 10, 6, 0, 0, 0, 0, 0), 2284),\n ((2013, 10, 7, 0, 0, 0, 0, 0), 2285),\n ],\n)\ndef test_period_ordinal_week(dt, expected):\n args = dt + (get_freq_code("W"),)\n assert period_ordinal(*args) == expected\n\n\n@pytest.mark.parametrize(\n "day,expected",\n [\n # Thursday (Oct. 3, 2013).\n (3, 11415),\n # Friday (Oct. 4, 2013).\n (4, 11416),\n # Saturday (Oct. 5, 2013).\n (5, 11417),\n # Sunday (Oct. 6, 2013).\n (6, 11417),\n # Monday (Oct. 7, 2013).\n (7, 11417),\n # Tuesday (Oct. 8, 2013).\n (8, 11418),\n ],\n)\ndef test_period_ordinal_business_day(day, expected):\n # 5000 is PeriodDtypeCode for BusinessDay\n args = (2013, 10, day, 0, 0, 0, 0, 0, 5000)\n assert period_ordinal(*args) == expected\n\n\nclass TestExtractOrdinals:\n def test_extract_ordinals_raises(self):\n # with non-object, make sure we raise TypeError, not segfault\n arr = np.arange(5)\n freq = to_offset("D")\n with pytest.raises(TypeError, match="values must be object-dtype"):\n extract_ordinals(arr, freq)\n\n def test_extract_ordinals_2d(self):\n freq = to_offset("D")\n arr = np.empty(10, dtype=object)\n arr[:] = iNaT\n\n res = extract_ordinals(arr, freq)\n res2 = extract_ordinals(arr.reshape(5, 2), freq)\n tm.assert_numpy_array_equal(res, res2.reshape(-1))\n\n\ndef test_get_period_field_array_raises_on_out_of_range():\n msg = "Buffer dtype mismatch, expected 'const int64_t' but got 'double'"\n with pytest.raises(ValueError, match=msg):\n get_period_field_arr(-1, np.empty(1), 0)\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_period.py | test_period.py | Python | 3,424 | 0.95 | 0.089431 | 0.095238 | awesome-app | 983 | 2023-11-24T20:10:52.161875 | MIT | true | ad39e4e92a7a6558c1c26d1c00105320 |
import numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs import (\n Resolution,\n get_resolution,\n)\nfrom pandas._libs.tslibs.dtypes import NpyDatetimeUnit\n\nimport pandas._testing as tm\n\n\ndef test_get_resolution_nano():\n # don't return the fallback RESO_DAY\n arr = np.array([1], dtype=np.int64)\n res = get_resolution(arr)\n assert res == Resolution.RESO_NS\n\n\ndef test_get_resolution_non_nano_data():\n arr = np.array([1], dtype=np.int64)\n res = get_resolution(arr, None, NpyDatetimeUnit.NPY_FR_us.value)\n assert res == Resolution.RESO_US\n\n res = get_resolution(arr, pytz.UTC, NpyDatetimeUnit.NPY_FR_us.value)\n assert res == Resolution.RESO_US\n\n\n@pytest.mark.parametrize(\n "freqstr,expected",\n [\n ("Y", "year"),\n ("Q", "quarter"),\n ("M", "month"),\n ("D", "day"),\n ("h", "hour"),\n ("min", "minute"),\n ("s", "second"),\n ("ms", "millisecond"),\n ("us", "microsecond"),\n ("ns", "nanosecond"),\n ],\n)\ndef test_get_attrname_from_abbrev(freqstr, expected):\n reso = Resolution.get_reso_from_freqstr(freqstr)\n assert reso.attr_abbrev == freqstr\n assert reso.attrname == expected\n\n\n@pytest.mark.parametrize("freq", ["A", "H", "T", "S", "L", "U", "N"])\ndef test_units_A_H_T_S_L_U_N_deprecated_from_attrname_to_abbrevs(freq):\n # GH#52536\n msg = f"'{freq}' is deprecated and will be removed in a future version."\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n Resolution.get_reso_from_freqstr(freq)\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_resolution.py | test_resolution.py | Python | 1,544 | 0.95 | 0.070175 | 0.044444 | vue-tools | 456 | 2024-03-12T10:13:24.717261 | MIT | true | 28bd5dc5e14ab90d57933b49d71181be |
from datetime import (\n datetime,\n timezone,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.dtypes import NpyDatetimeUnit\nfrom pandas._libs.tslibs.strptime import array_strptime\n\nfrom pandas import (\n NaT,\n Timestamp,\n)\nimport pandas._testing as tm\n\ncreso_infer = NpyDatetimeUnit.NPY_FR_GENERIC.value\n\n\nclass TestArrayStrptimeResolutionInference:\n def test_array_strptime_resolution_all_nat(self):\n arr = np.array([NaT, np.nan], dtype=object)\n\n fmt = "%Y-%m-%d %H:%M:%S"\n res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer)\n assert res.dtype == "M8[s]"\n\n res, _ = array_strptime(arr, fmt=fmt, utc=True, creso=creso_infer)\n assert res.dtype == "M8[s]"\n\n @pytest.mark.parametrize("tz", [None, timezone.utc])\n def test_array_strptime_resolution_inference_homogeneous_strings(self, tz):\n dt = datetime(2016, 1, 2, 3, 4, 5, 678900, tzinfo=tz)\n\n fmt = "%Y-%m-%d %H:%M:%S"\n dtstr = dt.strftime(fmt)\n arr = np.array([dtstr] * 3, dtype=object)\n expected = np.array([dt.replace(tzinfo=None)] * 3, dtype="M8[s]")\n\n res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer)\n tm.assert_numpy_array_equal(res, expected)\n\n fmt = "%Y-%m-%d %H:%M:%S.%f"\n dtstr = dt.strftime(fmt)\n arr = np.array([dtstr] * 3, dtype=object)\n expected = np.array([dt.replace(tzinfo=None)] * 3, dtype="M8[us]")\n\n res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer)\n tm.assert_numpy_array_equal(res, expected)\n\n fmt = "ISO8601"\n res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer)\n tm.assert_numpy_array_equal(res, expected)\n\n @pytest.mark.parametrize("tz", [None, timezone.utc])\n def test_array_strptime_resolution_mixed(self, tz):\n dt = datetime(2016, 1, 2, 3, 4, 5, 678900, tzinfo=tz)\n\n ts = Timestamp(dt).as_unit("ns")\n\n arr = np.array([dt, ts], dtype=object)\n expected = np.array(\n [Timestamp(dt).as_unit("ns").asm8, ts.asm8],\n dtype="M8[ns]",\n )\n\n fmt = "%Y-%m-%d %H:%M:%S"\n res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer)\n tm.assert_numpy_array_equal(res, expected)\n\n fmt = "ISO8601"\n res, _ = array_strptime(arr, fmt=fmt, utc=False, creso=creso_infer)\n tm.assert_numpy_array_equal(res, expected)\n\n def test_array_strptime_resolution_todaynow(self):\n # specifically case where today/now is the *first* item\n vals = np.array(["today", np.datetime64("2017-01-01", "us")], dtype=object)\n\n now = Timestamp("now").asm8\n res, _ = array_strptime(vals, fmt="%Y-%m-%d", utc=False, creso=creso_infer)\n res2, _ = array_strptime(\n vals[::-1], fmt="%Y-%m-%d", utc=False, creso=creso_infer\n )\n\n # 1s is an arbitrary cutoff for call overhead; in local testing the\n # actual difference is about 250us\n tolerance = np.timedelta64(1, "s")\n\n assert res.dtype == "M8[us]"\n assert abs(res[0] - now) < tolerance\n assert res[1] == vals[1]\n\n assert res2.dtype == "M8[us]"\n assert abs(res2[1] - now) < tolerance * 2\n assert res2[0] == vals[1]\n\n def test_array_strptime_str_outside_nano_range(self):\n vals = np.array(["2401-09-15"], dtype=object)\n expected = np.array(["2401-09-15"], dtype="M8[s]")\n fmt = "ISO8601"\n res, _ = array_strptime(vals, fmt=fmt, creso=creso_infer)\n tm.assert_numpy_array_equal(res, expected)\n\n # non-iso -> different path\n vals2 = np.array(["Sep 15, 2401"], dtype=object)\n expected2 = np.array(["2401-09-15"], dtype="M8[s]")\n fmt2 = "%b %d, %Y"\n res2, _ = array_strptime(vals2, fmt=fmt2, creso=creso_infer)\n tm.assert_numpy_array_equal(res2, expected2)\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_strptime.py | test_strptime.py | Python | 3,896 | 0.95 | 0.063636 | 0.047619 | vue-tools | 611 | 2025-02-07T04:21:50.697588 | Apache-2.0 | true | e21fdf1a02add266d428d0aeb691ee63 |
import re\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.timedeltas import (\n array_to_timedelta64,\n delta_to_nanoseconds,\n ints_to_pytimedelta,\n)\n\nfrom pandas import (\n Timedelta,\n offsets,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "obj,expected",\n [\n (np.timedelta64(14, "D"), 14 * 24 * 3600 * 1e9),\n (Timedelta(minutes=-7), -7 * 60 * 1e9),\n (Timedelta(minutes=-7).to_pytimedelta(), -7 * 60 * 1e9),\n (Timedelta(seconds=1234e-9), 1234), # GH43764, GH40946\n (\n Timedelta(seconds=1e-9, milliseconds=1e-5, microseconds=1e-1),\n 111,\n ), # GH43764\n (\n Timedelta(days=1, seconds=1e-9, milliseconds=1e-5, microseconds=1e-1),\n 24 * 3600e9 + 111,\n ), # GH43764\n (offsets.Nano(125), 125),\n ],\n)\ndef test_delta_to_nanoseconds(obj, expected):\n result = delta_to_nanoseconds(obj)\n assert result == expected\n\n\ndef test_delta_to_nanoseconds_error():\n obj = np.array([123456789], dtype="m8[ns]")\n\n with pytest.raises(TypeError, match="<class 'numpy.ndarray'>"):\n delta_to_nanoseconds(obj)\n\n with pytest.raises(TypeError, match="float"):\n delta_to_nanoseconds(1.5)\n with pytest.raises(TypeError, match="int"):\n delta_to_nanoseconds(1)\n with pytest.raises(TypeError, match="int"):\n delta_to_nanoseconds(np.int64(2))\n with pytest.raises(TypeError, match="int"):\n delta_to_nanoseconds(np.int32(3))\n\n\ndef test_delta_to_nanoseconds_td64_MY_raises():\n msg = (\n "delta_to_nanoseconds does not support Y or M units, "\n "as their duration in nanoseconds is ambiguous"\n )\n\n td = np.timedelta64(1234, "Y")\n\n with pytest.raises(ValueError, match=msg):\n delta_to_nanoseconds(td)\n\n td = np.timedelta64(1234, "M")\n\n with pytest.raises(ValueError, match=msg):\n delta_to_nanoseconds(td)\n\n\n@pytest.mark.parametrize("unit", ["Y", "M"])\ndef test_unsupported_td64_unit_raises(unit):\n # GH 52806\n with pytest.raises(\n ValueError,\n match=f"Unit {unit} is not supported. "\n "Only unambiguous timedelta values durations are supported. "\n "Allowed units are 'W', 'D', 'h', 'm', 's', 'ms', 'us', 'ns'",\n ):\n Timedelta(np.timedelta64(1, unit))\n\n\ndef test_huge_nanoseconds_overflow():\n # GH 32402\n assert delta_to_nanoseconds(Timedelta(1e10)) == 1e10\n assert delta_to_nanoseconds(Timedelta(nanoseconds=1e10)) == 1e10\n\n\n@pytest.mark.parametrize(\n "kwargs", [{"Seconds": 1}, {"seconds": 1, "Nanoseconds": 1}, {"Foo": 2}]\n)\ndef test_kwarg_assertion(kwargs):\n err_message = (\n "cannot construct a Timedelta from the passed arguments, "\n "allowed keywords are "\n "[weeks, days, hours, minutes, seconds, "\n "milliseconds, microseconds, nanoseconds]"\n )\n\n with pytest.raises(ValueError, match=re.escape(err_message)):\n Timedelta(**kwargs)\n\n\nclass TestArrayToTimedelta64:\n def test_array_to_timedelta64_string_with_unit_2d_raises(self):\n # check the 'unit is not None and errors != "coerce"' path\n # in array_to_timedelta64 raises correctly with 2D values\n values = np.array([["1", 2], [3, "4"]], dtype=object)\n with pytest.raises(ValueError, match="unit must not be specified"):\n array_to_timedelta64(values, unit="s")\n\n def test_array_to_timedelta64_non_object_raises(self):\n # check we raise, not segfault\n values = np.arange(5)\n\n msg = "'values' must have object dtype"\n with pytest.raises(TypeError, match=msg):\n array_to_timedelta64(values)\n\n\n@pytest.mark.parametrize("unit", ["s", "ms", "us"])\ndef test_ints_to_pytimedelta(unit):\n # tests for non-nanosecond cases\n arr = np.arange(6, dtype=np.int64).view(f"m8[{unit}]")\n\n res = ints_to_pytimedelta(arr, box=False)\n # For non-nanosecond, .astype(object) gives pytimedelta objects\n # instead of integers\n expected = arr.astype(object)\n tm.assert_numpy_array_equal(res, expected)\n\n res = ints_to_pytimedelta(arr, box=True)\n expected = np.array([Timedelta(x) for x in arr], dtype=object)\n tm.assert_numpy_array_equal(res, expected)\n\n\n@pytest.mark.parametrize("unit", ["Y", "M", "ps", "fs", "as"])\ndef test_ints_to_pytimedelta_unsupported(unit):\n arr = np.arange(6, dtype=np.int64).view(f"m8[{unit}]")\n\n with pytest.raises(NotImplementedError, match=r"\d{1,2}"):\n ints_to_pytimedelta(arr, box=False)\n msg = "Only resolutions 's', 'ms', 'us', 'ns' are supported"\n with pytest.raises(NotImplementedError, match=msg):\n ints_to_pytimedelta(arr, box=True)\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_timedeltas.py | test_timedeltas.py | Python | 4,662 | 0.95 | 0.09396 | 0.068966 | awesome-app | 203 | 2024-02-29T16:11:00.886382 | BSD-3-Clause | true | 19c6330d7e09b0e9cc1ee97bfd8ef5e8 |
from datetime import (\n datetime,\n timedelta,\n timezone,\n)\n\nimport dateutil.tz\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs import (\n conversion,\n timezones,\n)\nfrom pandas.compat import is_platform_windows\n\nfrom pandas import Timestamp\n\n\ndef test_is_utc(utc_fixture):\n tz = timezones.maybe_get_tz(utc_fixture)\n assert timezones.is_utc(tz)\n\n\n@pytest.mark.parametrize("tz_name", list(pytz.common_timezones))\ndef test_cache_keys_are_distinct_for_pytz_vs_dateutil(tz_name):\n tz_p = timezones.maybe_get_tz(tz_name)\n tz_d = timezones.maybe_get_tz("dateutil/" + tz_name)\n\n if tz_d is None:\n pytest.skip(tz_name + ": dateutil does not know about this one")\n\n if not (tz_name == "UTC" and is_platform_windows()):\n # they both end up as tzwin("UTC") on windows\n assert timezones._p_tz_cache_key(tz_p) != timezones._p_tz_cache_key(tz_d)\n\n\ndef test_tzlocal_repr():\n # see gh-13583\n ts = Timestamp("2011-01-01", tz=dateutil.tz.tzlocal())\n assert ts.tz == dateutil.tz.tzlocal()\n assert "tz='tzlocal()')" in repr(ts)\n\n\ndef test_tzlocal_maybe_get_tz():\n # see gh-13583\n tz = timezones.maybe_get_tz("tzlocal()")\n assert tz == dateutil.tz.tzlocal()\n\n\ndef test_tzlocal_offset():\n # see gh-13583\n #\n # Get offset using normal datetime for test.\n ts = Timestamp("2011-01-01", tz=dateutil.tz.tzlocal())\n\n offset = dateutil.tz.tzlocal().utcoffset(datetime(2011, 1, 1))\n offset = offset.total_seconds()\n\n assert ts._value + offset == Timestamp("2011-01-01")._value\n\n\ndef test_tzlocal_is_not_utc():\n # even if the machine running the test is localized to UTC\n tz = dateutil.tz.tzlocal()\n assert not timezones.is_utc(tz)\n\n assert not timezones.tz_compare(tz, dateutil.tz.tzutc())\n\n\ndef test_tz_compare_utc(utc_fixture, utc_fixture2):\n tz = timezones.maybe_get_tz(utc_fixture)\n tz2 = timezones.maybe_get_tz(utc_fixture2)\n assert timezones.tz_compare(tz, tz2)\n\n\n@pytest.fixture(\n params=[\n (pytz.timezone("US/Eastern"), lambda tz, x: tz.localize(x)),\n (dateutil.tz.gettz("US/Eastern"), lambda tz, x: x.replace(tzinfo=tz)),\n ]\n)\ndef infer_setup(request):\n eastern, localize = request.param\n\n start_naive = datetime(2001, 1, 1)\n end_naive = datetime(2009, 1, 1)\n\n start = localize(eastern, start_naive)\n end = localize(eastern, end_naive)\n\n return eastern, localize, start, end, start_naive, end_naive\n\n\ndef test_infer_tz_compat(infer_setup):\n eastern, _, start, end, start_naive, end_naive = infer_setup\n\n assert (\n timezones.infer_tzinfo(start, end)\n is conversion.localize_pydatetime(start_naive, eastern).tzinfo\n )\n assert (\n timezones.infer_tzinfo(start, None)\n is conversion.localize_pydatetime(start_naive, eastern).tzinfo\n )\n assert (\n timezones.infer_tzinfo(None, end)\n is conversion.localize_pydatetime(end_naive, eastern).tzinfo\n )\n\n\ndef test_infer_tz_utc_localize(infer_setup):\n _, _, start, end, start_naive, end_naive = infer_setup\n utc = pytz.utc\n\n start = utc.localize(start_naive)\n end = utc.localize(end_naive)\n\n assert timezones.infer_tzinfo(start, end) is utc\n\n\n@pytest.mark.parametrize("ordered", [True, False])\ndef test_infer_tz_mismatch(infer_setup, ordered):\n eastern, _, _, _, start_naive, end_naive = infer_setup\n msg = "Inputs must both have the same timezone"\n\n utc = pytz.utc\n start = utc.localize(start_naive)\n end = conversion.localize_pydatetime(end_naive, eastern)\n\n args = (start, end) if ordered else (end, start)\n\n with pytest.raises(AssertionError, match=msg):\n timezones.infer_tzinfo(*args)\n\n\ndef test_maybe_get_tz_invalid_types():\n with pytest.raises(TypeError, match="<class 'float'>"):\n timezones.maybe_get_tz(44.0)\n\n with pytest.raises(TypeError, match="<class 'module'>"):\n timezones.maybe_get_tz(pytz)\n\n msg = "<class 'pandas._libs.tslibs.timestamps.Timestamp'>"\n with pytest.raises(TypeError, match=msg):\n timezones.maybe_get_tz(Timestamp("2021-01-01", tz="UTC"))\n\n\ndef test_maybe_get_tz_offset_only():\n # see gh-36004\n\n # timezone.utc\n tz = timezones.maybe_get_tz(timezone.utc)\n assert tz == timezone(timedelta(hours=0, minutes=0))\n\n # without UTC+- prefix\n tz = timezones.maybe_get_tz("+01:15")\n assert tz == timezone(timedelta(hours=1, minutes=15))\n\n tz = timezones.maybe_get_tz("-01:15")\n assert tz == timezone(-timedelta(hours=1, minutes=15))\n\n # with UTC+- prefix\n tz = timezones.maybe_get_tz("UTC+02:45")\n assert tz == timezone(timedelta(hours=2, minutes=45))\n\n tz = timezones.maybe_get_tz("UTC-02:45")\n assert tz == timezone(-timedelta(hours=2, minutes=45))\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_timezones.py | test_timezones.py | Python | 4,724 | 0.95 | 0.125 | 0.09322 | vue-tools | 349 | 2023-11-13T22:11:30.713933 | Apache-2.0 | true | 26f35c04ffe44afdf87427024f9504c8 |
import re\n\nimport pytest\n\nfrom pandas._libs.tslibs import (\n Timedelta,\n offsets,\n to_offset,\n)\n\n\n@pytest.mark.parametrize(\n "freq_input,expected",\n [\n (to_offset("10us"), offsets.Micro(10)),\n (offsets.Hour(), offsets.Hour()),\n ("2h30min", offsets.Minute(150)),\n ("2h 30min", offsets.Minute(150)),\n ("2h30min15s", offsets.Second(150 * 60 + 15)),\n ("2h 60min", offsets.Hour(3)),\n ("2h 20.5min", offsets.Second(8430)),\n ("1.5min", offsets.Second(90)),\n ("0.5s", offsets.Milli(500)),\n ("15ms500us", offsets.Micro(15500)),\n ("10s75ms", offsets.Milli(10075)),\n ("1s0.25ms", offsets.Micro(1000250)),\n ("1s0.25ms", offsets.Micro(1000250)),\n ("2800ns", offsets.Nano(2800)),\n ("2SME", offsets.SemiMonthEnd(2)),\n ("2SME-16", offsets.SemiMonthEnd(2, day_of_month=16)),\n ("2SMS-14", offsets.SemiMonthBegin(2, day_of_month=14)),\n ("2SMS-15", offsets.SemiMonthBegin(2)),\n ],\n)\ndef test_to_offset(freq_input, expected):\n result = to_offset(freq_input)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "freqstr,expected", [("-1s", -1), ("-2SME", -2), ("-1SMS", -1), ("-5min10s", -310)]\n)\ndef test_to_offset_negative(freqstr, expected):\n result = to_offset(freqstr)\n assert result.n == expected\n\n\n@pytest.mark.filterwarnings("ignore:.*'m' is deprecated.*:FutureWarning")\n@pytest.mark.parametrize(\n "freqstr",\n [\n "2h20m",\n "us1",\n "-us",\n "3us1",\n "-2-3us",\n "-2D:3h",\n "1.5.0s",\n "2SMS-15-15",\n "2SMS-15D",\n "100foo",\n # Invalid leading +/- signs.\n "+-1d",\n "-+1h",\n "+1",\n "-7",\n "+d",\n "-m",\n # Invalid shortcut anchors.\n "SME-0",\n "SME-28",\n "SME-29",\n "SME-FOO",\n "BSM",\n "SME--1",\n "SMS-1",\n "SMS-28",\n "SMS-30",\n "SMS-BAR",\n "SMS-BYR",\n "BSMS",\n "SMS--2",\n ],\n)\ndef test_to_offset_invalid(freqstr):\n # see gh-13930\n\n # We escape string because some of our\n # inputs contain regex special characters.\n msg = re.escape(f"Invalid frequency: {freqstr}")\n with pytest.raises(ValueError, match=msg):\n to_offset(freqstr)\n\n\ndef test_to_offset_no_evaluate():\n msg = str(("", ""))\n with pytest.raises(TypeError, match=msg):\n to_offset(("", ""))\n\n\ndef test_to_offset_tuple_unsupported():\n with pytest.raises(TypeError, match="pass as a string instead"):\n to_offset((5, "T"))\n\n\n@pytest.mark.parametrize(\n "freqstr,expected",\n [\n ("2D 3h", offsets.Hour(51)),\n ("2 D3 h", offsets.Hour(51)),\n ("2 D 3 h", offsets.Hour(51)),\n (" 2 D 3 h ", offsets.Hour(51)),\n (" h ", offsets.Hour()),\n (" 3 h ", offsets.Hour(3)),\n ],\n)\ndef test_to_offset_whitespace(freqstr, expected):\n result = to_offset(freqstr)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "freqstr,expected", [("00h 00min 01s", 1), ("-00h 03min 14s", -194)]\n)\ndef test_to_offset_leading_zero(freqstr, expected):\n result = to_offset(freqstr)\n assert result.n == expected\n\n\n@pytest.mark.parametrize("freqstr,expected", [("+1d", 1), ("+2h30min", 150)])\ndef test_to_offset_leading_plus(freqstr, expected):\n result = to_offset(freqstr)\n assert result.n == expected\n\n\n@pytest.mark.parametrize(\n "kwargs,expected",\n [\n ({"days": 1, "seconds": 1}, offsets.Second(86401)),\n ({"days": -1, "seconds": 1}, offsets.Second(-86399)),\n ({"hours": 1, "minutes": 10}, offsets.Minute(70)),\n ({"hours": 1, "minutes": -10}, offsets.Minute(50)),\n ({"weeks": 1}, offsets.Day(7)),\n ({"hours": 1}, offsets.Hour(1)),\n ({"hours": 1}, to_offset("60min")),\n ({"microseconds": 1}, offsets.Micro(1)),\n ({"microseconds": 0}, offsets.Nano(0)),\n ],\n)\ndef test_to_offset_pd_timedelta(kwargs, expected):\n # see gh-9064\n td = Timedelta(**kwargs)\n result = to_offset(td)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "shortcut,expected",\n [\n ("W", offsets.Week(weekday=6)),\n ("W-SUN", offsets.Week(weekday=6)),\n ("QE", offsets.QuarterEnd(startingMonth=12)),\n ("QE-DEC", offsets.QuarterEnd(startingMonth=12)),\n ("QE-MAY", offsets.QuarterEnd(startingMonth=5)),\n ("SME", offsets.SemiMonthEnd(day_of_month=15)),\n ("SME-15", offsets.SemiMonthEnd(day_of_month=15)),\n ("SME-1", offsets.SemiMonthEnd(day_of_month=1)),\n ("SME-27", offsets.SemiMonthEnd(day_of_month=27)),\n ("SMS-2", offsets.SemiMonthBegin(day_of_month=2)),\n ("SMS-27", offsets.SemiMonthBegin(day_of_month=27)),\n ],\n)\ndef test_anchored_shortcuts(shortcut, expected):\n result = to_offset(shortcut)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "freq_depr",\n [\n "2ye-mar",\n "2ys",\n "2qe",\n "2qs-feb",\n "2bqs",\n "2sms",\n "2bms",\n "2cbme",\n "2me",\n "2w",\n ],\n)\ndef test_to_offset_lowercase_frequency_deprecated(freq_depr):\n # GH#54939\n depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed in a "\n f"future version, please use '{freq_depr.upper()[1:]}' instead."\n\n with pytest.raises(FutureWarning, match=depr_msg):\n to_offset(freq_depr)\n\n\n@pytest.mark.parametrize(\n "freq_depr",\n [\n "2H",\n "2BH",\n "2MIN",\n "2S",\n "2Us",\n "2NS",\n ],\n)\ndef test_to_offset_uppercase_frequency_deprecated(freq_depr):\n # GH#54939\n depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed in a "\n f"future version, please use '{freq_depr.lower()[1:]}' instead."\n\n with pytest.raises(FutureWarning, match=depr_msg):\n to_offset(freq_depr)\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_to_offset.py | test_to_offset.py | Python | 5,873 | 0.95 | 0.054795 | 0.042105 | node-utils | 268 | 2023-07-11T00:19:01.873545 | Apache-2.0 | true | f9f69ae37512ee2a589225d609c20aab |
import numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs.tzconversion import tz_localize_to_utc\n\n\nclass TestTZLocalizeToUTC:\n def test_tz_localize_to_utc_ambiguous_infer(self):\n # val is a timestamp that is ambiguous when localized to US/Eastern\n val = 1_320_541_200_000_000_000\n vals = np.array([val, val - 1, val], dtype=np.int64)\n\n with pytest.raises(pytz.AmbiguousTimeError, match="2011-11-06 01:00:00"):\n tz_localize_to_utc(vals, pytz.timezone("US/Eastern"), ambiguous="infer")\n\n with pytest.raises(pytz.AmbiguousTimeError, match="are no repeated times"):\n tz_localize_to_utc(vals[:1], pytz.timezone("US/Eastern"), ambiguous="infer")\n\n vals[1] += 1\n msg = "There are 2 dst switches when there should only be 1"\n with pytest.raises(pytz.AmbiguousTimeError, match=msg):\n tz_localize_to_utc(vals, pytz.timezone("US/Eastern"), ambiguous="infer")\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_tzconversion.py | test_tzconversion.py | Python | 953 | 0.95 | 0.086957 | 0.058824 | vue-tools | 334 | 2024-03-03T21:05:33.713886 | Apache-2.0 | true | 822578a1ea90b7f0b0691d369f9ead9b |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_api.cpython-313.pyc | test_api.cpython-313.pyc | Other | 1,508 | 0.8 | 0 | 0 | node-utils | 588 | 2024-08-15T14:26:50.625352 | BSD-3-Clause | true | e066ac5fe0e63a4f8c2e04c7d3dd9647 |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_array_to_datetime.cpython-313.pyc | test_array_to_datetime.cpython-313.pyc | Other | 18,497 | 0.8 | 0 | 0.02 | vue-tools | 739 | 2023-12-25T20:36:25.893086 | MIT | true | c3c323e5174a0e0f2205606e12879641 |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_ccalendar.cpython-313.pyc | test_ccalendar.cpython-313.pyc | Other | 3,116 | 0.8 | 0 | 0 | python-kit | 648 | 2024-04-24T02:52:06.531974 | Apache-2.0 | true | 8f53d825af53560897625abe4b15dc8c |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_conversion.cpython-313.pyc | test_conversion.cpython-313.pyc | Other | 7,955 | 0.8 | 0 | 0.026316 | vue-tools | 218 | 2023-12-22T11:13:06.875148 | MIT | true | 3d5c95a5427fb7ba55b73f8928625a87 |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_fields.cpython-313.pyc | test_fields.cpython-313.pyc | Other | 2,503 | 0.8 | 0 | 0 | awesome-app | 912 | 2023-11-05T21:04:43.531732 | GPL-3.0 | true | ad9b1f8a163fcb1226a11579b16ecfc8 |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_libfrequencies.cpython-313.pyc | test_libfrequencies.cpython-313.pyc | Other | 1,330 | 0.8 | 0 | 0 | awesome-app | 250 | 2024-02-28T21:21:21.131150 | MIT | true | 5b26ec494ac9376b43476a504f973c1c |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_liboffsets.cpython-313.pyc | test_liboffsets.cpython-313.pyc | Other | 7,472 | 0.95 | 0.013333 | 0 | python-kit | 604 | 2023-12-13T07:54:34.115961 | BSD-3-Clause | true | 5c8d8a41e3f4428f612a5d2e6c64afa8 |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_npy_units.cpython-313.pyc | test_npy_units.cpython-313.pyc | Other | 1,877 | 0.8 | 0 | 0.066667 | python-kit | 118 | 2024-11-24T13:18:35.094101 | BSD-3-Clause | true | 27c43f785d375075b92295462a25e02f |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_np_datetime.cpython-313.pyc | test_np_datetime.cpython-313.pyc | Other | 12,575 | 0.8 | 0 | 0 | awesome-app | 510 | 2023-08-11T08:08:48.695700 | MIT | true | 20edd5ad136cafa16da3959ed23c4fb8 |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_parse_iso8601.cpython-313.pyc | test_parse_iso8601.cpython-313.pyc | Other | 4,336 | 0.8 | 0 | 0 | awesome-app | 755 | 2024-01-05T08:50:18.356302 | GPL-3.0 | true | 7594adf8e312dbee9e70a0d49e4b9e25 |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_parsing.cpython-313.pyc | test_parsing.cpython-313.pyc | Other | 16,910 | 0.8 | 0.006897 | 0.048611 | python-kit | 785 | 2024-05-19T19:14:22.117249 | MIT | true | 0808a1ebee17b7d0419838e9caa5787c |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_period.cpython-313.pyc | test_period.cpython-313.pyc | Other | 4,937 | 0.8 | 0 | 0 | python-kit | 520 | 2024-10-01T22:11:26.958234 | BSD-3-Clause | true | 9b52c705a185cab066aee02fdeabfedb |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_resolution.cpython-313.pyc | test_resolution.cpython-313.pyc | Other | 2,760 | 0.8 | 0 | 0 | awesome-app | 219 | 2023-12-12T06:03:35.121143 | MIT | true | 2286ad8d57753c9cee5fc039e9bc593a |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_strptime.cpython-313.pyc | test_strptime.cpython-313.pyc | Other | 6,212 | 0.8 | 0 | 0 | react-lib | 590 | 2024-07-01T11:57:41.024319 | GPL-3.0 | true | c4d39e0c973ee6cc5cd4e9d72bdd861c |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_timedeltas.cpython-313.pyc | test_timedeltas.cpython-313.pyc | Other | 8,354 | 0.95 | 0.01 | 0 | react-lib | 695 | 2025-04-04T04:51:44.482092 | Apache-2.0 | true | 51a296adad2b09823848ff44bb13551a |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_timezones.cpython-313.pyc | test_timezones.cpython-313.pyc | Other | 8,658 | 0.95 | 0.035714 | 0.012987 | react-lib | 498 | 2024-08-03T04:28:46.684406 | Apache-2.0 | true | 80feeb7ff69944a23fa8484403b70f40 |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_to_offset.cpython-313.pyc | test_to_offset.cpython-313.pyc | Other | 8,775 | 0.8 | 0 | 0 | react-lib | 37 | 2025-03-13T05:52:46.683987 | BSD-3-Clause | true | 2f1a265d13e51cbb8cbdc4306db0c070 |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\test_tzconversion.cpython-313.pyc | test_tzconversion.cpython-313.pyc | Other | 1,973 | 0.8 | 0 | 0 | vue-tools | 158 | 2023-12-03T06:00:41.536052 | GPL-3.0 | true | 0dea4df5829f187c8196f8eaaab29b9d |
\n\n | .venv\Lib\site-packages\pandas\tests\tslibs\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 194 | 0.7 | 0 | 0 | node-utils | 685 | 2024-07-07T13:51:01.358709 | BSD-3-Clause | true | 4639dcaa6dcdf27a39302a770ec523b5 |
import pytest\n\n\n@pytest.fixture(params=[True, False])\ndef check_dtype(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef check_exact(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef check_index_type(request):\n return request.param\n\n\n@pytest.fixture(params=[0.5e-3, 0.5e-5])\ndef rtol(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef check_categorical(request):\n return request.param\n | .venv\Lib\site-packages\pandas\tests\util\conftest.py | conftest.py | Python | 476 | 0.85 | 0.192308 | 0 | react-lib | 459 | 2024-10-11T09:47:42.781102 | GPL-3.0 | true | 9c26108e6f6e12c5124cc65122f24f05 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n NA,\n DataFrame,\n Index,\n NaT,\n Series,\n Timestamp,\n)\nimport pandas._testing as tm\n\n\ndef _assert_almost_equal_both(a, b, **kwargs):\n """\n Check that two objects are approximately equal.\n\n This check is performed commutatively.\n\n Parameters\n ----------\n a : object\n The first object to compare.\n b : object\n The second object to compare.\n **kwargs\n The arguments passed to `tm.assert_almost_equal`.\n """\n tm.assert_almost_equal(a, b, **kwargs)\n tm.assert_almost_equal(b, a, **kwargs)\n\n\ndef _assert_not_almost_equal(a, b, **kwargs):\n """\n Check that two objects are not approximately equal.\n\n Parameters\n ----------\n a : object\n The first object to compare.\n b : object\n The second object to compare.\n **kwargs\n The arguments passed to `tm.assert_almost_equal`.\n """\n try:\n tm.assert_almost_equal(a, b, **kwargs)\n msg = f"{a} and {b} were approximately equal when they shouldn't have been"\n pytest.fail(reason=msg)\n except AssertionError:\n pass\n\n\ndef _assert_not_almost_equal_both(a, b, **kwargs):\n """\n Check that two objects are not approximately equal.\n\n This check is performed commutatively.\n\n Parameters\n ----------\n a : object\n The first object to compare.\n b : object\n The second object to compare.\n **kwargs\n The arguments passed to `tm.assert_almost_equal`.\n """\n _assert_not_almost_equal(a, b, **kwargs)\n _assert_not_almost_equal(b, a, **kwargs)\n\n\n@pytest.mark.parametrize(\n "a,b",\n [\n (1.1, 1.1),\n (1.1, 1.100001),\n (np.int16(1), 1.000001),\n (np.float64(1.1), 1.1),\n (np.uint32(5), 5),\n ],\n)\ndef test_assert_almost_equal_numbers(a, b):\n _assert_almost_equal_both(a, b)\n\n\n@pytest.mark.parametrize(\n "a,b",\n [\n (1.1, 1),\n (1.1, True),\n (1, 2),\n (1.0001, np.int16(1)),\n # The following two examples are not "almost equal" due to tol.\n (0.1, 0.1001),\n (0.0011, 0.0012),\n ],\n)\ndef test_assert_not_almost_equal_numbers(a, b):\n _assert_not_almost_equal_both(a, b)\n\n\n@pytest.mark.parametrize(\n "a,b",\n [\n (1.1, 1.1),\n (1.1, 1.100001),\n (1.1, 1.1001),\n (0.000001, 0.000005),\n (1000.0, 1000.0005),\n # Testing this example, as per #13357\n (0.000011, 0.000012),\n ],\n)\ndef test_assert_almost_equal_numbers_atol(a, b):\n # Equivalent to the deprecated check_less_precise=True, enforced in 2.0\n _assert_almost_equal_both(a, b, rtol=0.5e-3, atol=0.5e-3)\n\n\n@pytest.mark.parametrize("a,b", [(1.1, 1.11), (0.1, 0.101), (0.000011, 0.001012)])\ndef test_assert_not_almost_equal_numbers_atol(a, b):\n _assert_not_almost_equal_both(a, b, atol=1e-3)\n\n\n@pytest.mark.parametrize(\n "a,b",\n [\n (1.1, 1.1),\n (1.1, 1.100001),\n (1.1, 1.1001),\n (1000.0, 1000.0005),\n (1.1, 1.11),\n (0.1, 0.101),\n ],\n)\ndef test_assert_almost_equal_numbers_rtol(a, b):\n _assert_almost_equal_both(a, b, rtol=0.05)\n\n\n@pytest.mark.parametrize("a,b", [(0.000011, 0.000012), (0.000001, 0.000005)])\ndef test_assert_not_almost_equal_numbers_rtol(a, b):\n _assert_not_almost_equal_both(a, b, rtol=0.05)\n\n\n@pytest.mark.parametrize(\n "a,b,rtol",\n [\n (1.00001, 1.00005, 0.001),\n (-0.908356 + 0.2j, -0.908358 + 0.2j, 1e-3),\n (0.1 + 1.009j, 0.1 + 1.006j, 0.1),\n (0.1001 + 2.0j, 0.1 + 2.001j, 0.01),\n ],\n)\ndef test_assert_almost_equal_complex_numbers(a, b, rtol):\n _assert_almost_equal_both(a, b, rtol=rtol)\n _assert_almost_equal_both(np.complex64(a), np.complex64(b), rtol=rtol)\n _assert_almost_equal_both(np.complex128(a), np.complex128(b), rtol=rtol)\n\n\n@pytest.mark.parametrize(\n "a,b,rtol",\n [\n (0.58310768, 0.58330768, 1e-7),\n (-0.908 + 0.2j, -0.978 + 0.2j, 0.001),\n (0.1 + 1j, 0.1 + 2j, 0.01),\n (-0.132 + 1.001j, -0.132 + 1.005j, 1e-5),\n (0.58310768j, 0.58330768j, 1e-9),\n ],\n)\ndef test_assert_not_almost_equal_complex_numbers(a, b, rtol):\n _assert_not_almost_equal_both(a, b, rtol=rtol)\n _assert_not_almost_equal_both(np.complex64(a), np.complex64(b), rtol=rtol)\n _assert_not_almost_equal_both(np.complex128(a), np.complex128(b), rtol=rtol)\n\n\n@pytest.mark.parametrize("a,b", [(0, 0), (0, 0.0), (0, np.float64(0)), (0.00000001, 0)])\ndef test_assert_almost_equal_numbers_with_zeros(a, b):\n _assert_almost_equal_both(a, b)\n\n\n@pytest.mark.parametrize("a,b", [(0.001, 0), (1, 0)])\ndef test_assert_not_almost_equal_numbers_with_zeros(a, b):\n _assert_not_almost_equal_both(a, b)\n\n\n@pytest.mark.parametrize("a,b", [(1, "abc"), (1, [1]), (1, object())])\ndef test_assert_not_almost_equal_numbers_with_mixed(a, b):\n _assert_not_almost_equal_both(a, b)\n\n\n@pytest.mark.parametrize(\n "left_dtype", ["M8[ns]", "m8[ns]", "float64", "int64", "object"]\n)\n@pytest.mark.parametrize(\n "right_dtype", ["M8[ns]", "m8[ns]", "float64", "int64", "object"]\n)\ndef test_assert_almost_equal_edge_case_ndarrays(left_dtype, right_dtype):\n # Empty compare.\n _assert_almost_equal_both(\n np.array([], dtype=left_dtype),\n np.array([], dtype=right_dtype),\n check_dtype=False,\n )\n\n\ndef test_assert_almost_equal_sets():\n # GH#51727\n _assert_almost_equal_both({1, 2, 3}, {1, 2, 3})\n\n\ndef test_assert_almost_not_equal_sets():\n # GH#51727\n msg = r"{1, 2, 3} != {1, 2, 4}"\n with pytest.raises(AssertionError, match=msg):\n _assert_almost_equal_both({1, 2, 3}, {1, 2, 4})\n\n\ndef test_assert_almost_equal_dicts():\n _assert_almost_equal_both({"a": 1, "b": 2}, {"a": 1, "b": 2})\n\n\n@pytest.mark.parametrize(\n "a,b",\n [\n ({"a": 1, "b": 2}, {"a": 1, "b": 3}),\n ({"a": 1, "b": 2}, {"a": 1, "b": 2, "c": 3}),\n ({"a": 1}, 1),\n ({"a": 1}, "abc"),\n ({"a": 1}, [1]),\n ],\n)\ndef test_assert_not_almost_equal_dicts(a, b):\n _assert_not_almost_equal_both(a, b)\n\n\n@pytest.mark.parametrize("val", [1, 2])\ndef test_assert_almost_equal_dict_like_object(val):\n dict_val = 1\n real_dict = {"a": val}\n\n class DictLikeObj:\n def keys(self):\n return ("a",)\n\n def __getitem__(self, item):\n if item == "a":\n return dict_val\n\n func = (\n _assert_almost_equal_both if val == dict_val else _assert_not_almost_equal_both\n )\n func(real_dict, DictLikeObj(), check_dtype=False)\n\n\ndef test_assert_almost_equal_strings():\n _assert_almost_equal_both("abc", "abc")\n\n\n@pytest.mark.parametrize(\n "a,b", [("abc", "abcd"), ("abc", "abd"), ("abc", 1), ("abc", [1])]\n)\ndef test_assert_not_almost_equal_strings(a, b):\n _assert_not_almost_equal_both(a, b)\n\n\n@pytest.mark.parametrize(\n "a,b", [([1, 2, 3], [1, 2, 3]), (np.array([1, 2, 3]), np.array([1, 2, 3]))]\n)\ndef test_assert_almost_equal_iterables(a, b):\n _assert_almost_equal_both(a, b)\n\n\n@pytest.mark.parametrize(\n "a,b",\n [\n # Class is different.\n (np.array([1, 2, 3]), [1, 2, 3]),\n # Dtype is different.\n (np.array([1, 2, 3]), np.array([1.0, 2.0, 3.0])),\n # Can't compare generators.\n (iter([1, 2, 3]), [1, 2, 3]),\n ([1, 2, 3], [1, 2, 4]),\n ([1, 2, 3], [1, 2, 3, 4]),\n ([1, 2, 3], 1),\n ],\n)\ndef test_assert_not_almost_equal_iterables(a, b):\n _assert_not_almost_equal(a, b)\n\n\ndef test_assert_almost_equal_null():\n _assert_almost_equal_both(None, None)\n\n\n@pytest.mark.parametrize("a,b", [(None, np.nan), (None, 0), (np.nan, 0)])\ndef test_assert_not_almost_equal_null(a, b):\n _assert_not_almost_equal(a, b)\n\n\n@pytest.mark.parametrize(\n "a,b",\n [\n (np.inf, np.inf),\n (np.inf, float("inf")),\n (np.array([np.inf, np.nan, -np.inf]), np.array([np.inf, np.nan, -np.inf])),\n ],\n)\ndef test_assert_almost_equal_inf(a, b):\n _assert_almost_equal_both(a, b)\n\n\nobjs = [NA, np.nan, NaT, None, np.datetime64("NaT"), np.timedelta64("NaT")]\n\n\n@pytest.mark.parametrize("left", objs)\n@pytest.mark.parametrize("right", objs)\ndef test_mismatched_na_assert_almost_equal_deprecation(left, right):\n left_arr = np.array([left], dtype=object)\n right_arr = np.array([right], dtype=object)\n\n msg = "Mismatched null-like values"\n\n if left is right:\n _assert_almost_equal_both(left, right, check_dtype=False)\n tm.assert_numpy_array_equal(left_arr, right_arr)\n tm.assert_index_equal(\n Index(left_arr, dtype=object), Index(right_arr, dtype=object)\n )\n tm.assert_series_equal(\n Series(left_arr, dtype=object), Series(right_arr, dtype=object)\n )\n tm.assert_frame_equal(\n DataFrame(left_arr, dtype=object), DataFrame(right_arr, dtype=object)\n )\n\n else:\n with tm.assert_produces_warning(FutureWarning, match=msg):\n _assert_almost_equal_both(left, right, check_dtype=False)\n\n # TODO: to get the same deprecation in assert_numpy_array_equal we need\n # to change/deprecate the default for strict_nan to become True\n # TODO: to get the same deprecation in assert_index_equal we need to\n # change/deprecate array_equivalent_object to be stricter, as\n # assert_index_equal uses Index.equal which uses array_equivalent.\n with tm.assert_produces_warning(FutureWarning, match=msg):\n tm.assert_series_equal(\n Series(left_arr, dtype=object), Series(right_arr, dtype=object)\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n tm.assert_frame_equal(\n DataFrame(left_arr, dtype=object), DataFrame(right_arr, dtype=object)\n )\n\n\ndef test_assert_not_almost_equal_inf():\n _assert_not_almost_equal_both(np.inf, 0)\n\n\n@pytest.mark.parametrize(\n "a,b",\n [\n (Index([1.0, 1.1]), Index([1.0, 1.100001])),\n (Series([1.0, 1.1]), Series([1.0, 1.100001])),\n (np.array([1.1, 2.000001]), np.array([1.1, 2.0])),\n (DataFrame({"a": [1.0, 1.1]}), DataFrame({"a": [1.0, 1.100001]})),\n ],\n)\ndef test_assert_almost_equal_pandas(a, b):\n _assert_almost_equal_both(a, b)\n\n\ndef test_assert_almost_equal_object():\n a = [Timestamp("2011-01-01"), Timestamp("2011-01-01")]\n b = [Timestamp("2011-01-01"), Timestamp("2011-01-01")]\n _assert_almost_equal_both(a, b)\n\n\ndef test_assert_almost_equal_value_mismatch():\n msg = "expected 2\\.00000 but got 1\\.00000, with rtol=1e-05, atol=1e-08"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(1, 2)\n\n\n@pytest.mark.parametrize(\n "a,b,klass1,klass2",\n [(np.array([1]), 1, "ndarray", "int"), (1, np.array([1]), "int", "ndarray")],\n)\ndef test_assert_almost_equal_class_mismatch(a, b, klass1, klass2):\n msg = f"""numpy array are different\n\nnumpy array classes are different\n\\[left\\]: {klass1}\n\\[right\\]: {klass2}"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(a, b)\n\n\ndef test_assert_almost_equal_value_mismatch1():\n msg = """numpy array are different\n\nnumpy array values are different \\(66\\.66667 %\\)\n\\[left\\]: \\[nan, 2\\.0, 3\\.0\\]\n\\[right\\]: \\[1\\.0, nan, 3\\.0\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(np.array([np.nan, 2, 3]), np.array([1, np.nan, 3]))\n\n\ndef test_assert_almost_equal_value_mismatch2():\n msg = """numpy array are different\n\nnumpy array values are different \\(50\\.0 %\\)\n\\[left\\]: \\[1, 2\\]\n\\[right\\]: \\[1, 3\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(np.array([1, 2]), np.array([1, 3]))\n\n\ndef test_assert_almost_equal_value_mismatch3():\n msg = """numpy array are different\n\nnumpy array values are different \\(16\\.66667 %\\)\n\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]\n\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(\n np.array([[1, 2], [3, 4], [5, 6]]), np.array([[1, 3], [3, 4], [5, 6]])\n )\n\n\ndef test_assert_almost_equal_value_mismatch4():\n msg = """numpy array are different\n\nnumpy array values are different \\(25\\.0 %\\)\n\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\]\\]\n\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\]\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(np.array([[1, 2], [3, 4]]), np.array([[1, 3], [3, 4]]))\n\n\ndef test_assert_almost_equal_shape_mismatch_override():\n msg = """Index are different\n\nIndex shapes are different\n\\[left\\]: \\(2L*,\\)\n\\[right\\]: \\(3L*,\\)"""\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(np.array([1, 2]), np.array([3, 4, 5]), obj="Index")\n\n\ndef test_assert_almost_equal_unicode():\n # see gh-20503\n msg = """numpy array are different\n\nnumpy array values are different \\(33\\.33333 %\\)\n\\[left\\]: \\[á, à, ä\\]\n\\[right\\]: \\[á, à, å\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(np.array(["á", "à", "ä"]), np.array(["á", "à", "å"]))\n\n\ndef test_assert_almost_equal_timestamp():\n a = np.array([Timestamp("2011-01-01"), Timestamp("2011-01-01")])\n b = np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")])\n\n msg = """numpy array are different\n\nnumpy array values are different \\(50\\.0 %\\)\n\\[left\\]: \\[2011-01-01 00:00:00, 2011-01-01 00:00:00\\]\n\\[right\\]: \\[2011-01-01 00:00:00, 2011-01-02 00:00:00\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal(a, b)\n\n\ndef test_assert_almost_equal_iterable_length_mismatch():\n msg = """Iterable are different\n\nIterable length are different\n\\[left\\]: 2\n\\[right\\]: 3"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal([1, 2], [3, 4, 5])\n\n\ndef test_assert_almost_equal_iterable_values_mismatch():\n msg = """Iterable are different\n\nIterable values are different \\(50\\.0 %\\)\n\\[left\\]: \\[1, 2\\]\n\\[right\\]: \\[1, 3\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_almost_equal([1, 2], [1, 3])\n\n\nsubarr = np.empty(2, dtype=object)\nsubarr[:] = [np.array([None, "b"], dtype=object), np.array(["c", "d"], dtype=object)]\n\nNESTED_CASES = [\n # nested array\n (\n np.array([np.array([50, 70, 90]), np.array([20, 30])], dtype=object),\n np.array([np.array([50, 70, 90]), np.array([20, 30])], dtype=object),\n ),\n # >1 level of nesting\n (\n np.array(\n [\n np.array([np.array([50, 70]), np.array([90])], dtype=object),\n np.array([np.array([20, 30])], dtype=object),\n ],\n dtype=object,\n ),\n np.array(\n [\n np.array([np.array([50, 70]), np.array([90])], dtype=object),\n np.array([np.array([20, 30])], dtype=object),\n ],\n dtype=object,\n ),\n ),\n # lists\n (\n np.array([[50, 70, 90], [20, 30]], dtype=object),\n np.array([[50, 70, 90], [20, 30]], dtype=object),\n ),\n # mixed array/list\n (\n np.array([np.array([1, 2, 3]), np.array([4, 5])], dtype=object),\n np.array([[1, 2, 3], [4, 5]], dtype=object),\n ),\n (\n np.array(\n [\n np.array([np.array([1, 2, 3]), np.array([4, 5])], dtype=object),\n np.array(\n [np.array([6]), np.array([7, 8]), np.array([9])], dtype=object\n ),\n ],\n dtype=object,\n ),\n np.array([[[1, 2, 3], [4, 5]], [[6], [7, 8], [9]]], dtype=object),\n ),\n # same-length lists\n (\n np.array([subarr, None], dtype=object),\n np.array([[[None, "b"], ["c", "d"]], None], dtype=object),\n ),\n # dicts\n (\n np.array([{"f1": 1, "f2": np.array(["a", "b"], dtype=object)}], dtype=object),\n np.array([{"f1": 1, "f2": np.array(["a", "b"], dtype=object)}], dtype=object),\n ),\n (\n np.array([{"f1": 1, "f2": np.array(["a", "b"], dtype=object)}], dtype=object),\n np.array([{"f1": 1, "f2": ["a", "b"]}], dtype=object),\n ),\n # array/list of dicts\n (\n np.array(\n [\n np.array(\n [{"f1": 1, "f2": np.array(["a", "b"], dtype=object)}], dtype=object\n ),\n np.array([], dtype=object),\n ],\n dtype=object,\n ),\n np.array([[{"f1": 1, "f2": ["a", "b"]}], []], dtype=object),\n ),\n]\n\n\n@pytest.mark.filterwarnings("ignore:elementwise comparison failed:DeprecationWarning")\n@pytest.mark.parametrize("a,b", NESTED_CASES)\ndef test_assert_almost_equal_array_nested(a, b):\n _assert_almost_equal_both(a, b)\n | .venv\Lib\site-packages\pandas\tests\util\test_assert_almost_equal.py | test_assert_almost_equal.py | Python | 16,803 | 0.95 | 0.087031 | 0.05423 | python-kit | 687 | 2024-09-11T02:37:43.917121 | GPL-3.0 | true | 1434b2947b0e48809bab8e12f35198f8 |
from types import SimpleNamespace\n\nimport pytest\n\nfrom pandas.core.dtypes.common import is_float\n\nimport pandas._testing as tm\n\n\ndef test_assert_attr_equal(nulls_fixture):\n obj = SimpleNamespace()\n obj.na_value = nulls_fixture\n tm.assert_attr_equal("na_value", obj, obj)\n\n\ndef test_assert_attr_equal_different_nulls(nulls_fixture, nulls_fixture2):\n obj = SimpleNamespace()\n obj.na_value = nulls_fixture\n\n obj2 = SimpleNamespace()\n obj2.na_value = nulls_fixture2\n\n if nulls_fixture is nulls_fixture2:\n tm.assert_attr_equal("na_value", obj, obj2)\n elif is_float(nulls_fixture) and is_float(nulls_fixture2):\n # we consider float("nan") and np.float64("nan") to be equivalent\n tm.assert_attr_equal("na_value", obj, obj2)\n elif type(nulls_fixture) is type(nulls_fixture2):\n # e.g. Decimal("NaN")\n tm.assert_attr_equal("na_value", obj, obj2)\n else:\n with pytest.raises(AssertionError, match='"na_value" are different'):\n tm.assert_attr_equal("na_value", obj, obj2)\n | .venv\Lib\site-packages\pandas\tests\util\test_assert_attr_equal.py | test_assert_attr_equal.py | Python | 1,045 | 0.95 | 0.090909 | 0.083333 | awesome-app | 185 | 2024-01-10T11:51:25.302608 | MIT | true | eed16be8b5cc053e63a9e1050845579e |
import pytest\n\nfrom pandas import Categorical\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "c",\n [Categorical([1, 2, 3, 4]), Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4, 5])],\n)\ndef test_categorical_equal(c):\n tm.assert_categorical_equal(c, c)\n\n\n@pytest.mark.parametrize("check_category_order", [True, False])\ndef test_categorical_equal_order_mismatch(check_category_order):\n c1 = Categorical([1, 2, 3, 4], categories=[1, 2, 3, 4])\n c2 = Categorical([1, 2, 3, 4], categories=[4, 3, 2, 1])\n kwargs = {"check_category_order": check_category_order}\n\n if check_category_order:\n msg = """Categorical\\.categories are different\n\nCategorical\\.categories values are different \\(100\\.0 %\\)\n\\[left\\]: Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)\n\\[right\\]: Index\\(\\[4, 3, 2, 1\\], dtype='int64'\\)"""\n with pytest.raises(AssertionError, match=msg):\n tm.assert_categorical_equal(c1, c2, **kwargs)\n else:\n tm.assert_categorical_equal(c1, c2, **kwargs)\n\n\ndef test_categorical_equal_categories_mismatch():\n msg = """Categorical\\.categories are different\n\nCategorical\\.categories values are different \\(25\\.0 %\\)\n\\[left\\]: Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)\n\\[right\\]: Index\\(\\[1, 2, 3, 5\\], dtype='int64'\\)"""\n\n c1 = Categorical([1, 2, 3, 4])\n c2 = Categorical([1, 2, 3, 5])\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_categorical_equal(c1, c2)\n\n\ndef test_categorical_equal_codes_mismatch():\n categories = [1, 2, 3, 4]\n msg = """Categorical\\.codes are different\n\nCategorical\\.codes values are different \\(50\\.0 %\\)\n\\[left\\]: \\[0, 1, 3, 2\\]\n\\[right\\]: \\[0, 1, 2, 3\\]"""\n\n c1 = Categorical([1, 2, 4, 3], categories=categories)\n c2 = Categorical([1, 2, 3, 4], categories=categories)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_categorical_equal(c1, c2)\n\n\ndef test_categorical_equal_ordered_mismatch():\n data = [1, 2, 3, 4]\n msg = """Categorical are different\n\nAttribute "ordered" are different\n\\[left\\]: False\n\\[right\\]: True"""\n\n c1 = Categorical(data, ordered=False)\n c2 = Categorical(data, ordered=True)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_categorical_equal(c1, c2)\n\n\n@pytest.mark.parametrize("obj", ["index", "foo", "pandas"])\ndef test_categorical_equal_object_override(obj):\n data = [1, 2, 3, 4]\n msg = f"""{obj} are different\n\nAttribute "ordered" are different\n\\[left\\]: False\n\\[right\\]: True"""\n\n c1 = Categorical(data, ordered=False)\n c2 = Categorical(data, ordered=True)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_categorical_equal(c1, c2, obj=obj)\n | .venv\Lib\site-packages\pandas\tests\util\test_assert_categorical_equal.py | test_assert_categorical_equal.py | Python | 2,728 | 0.85 | 0.077778 | 0 | react-lib | 6 | 2025-02-02T17:46:08.692973 | BSD-3-Clause | true | 30f82e779e940f0af8c59e290041cd19 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Timestamp,\n array,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays.sparse import SparseArray\n\n\n@pytest.mark.parametrize(\n "kwargs",\n [\n {}, # Default is check_exact=False\n {"check_exact": False},\n {"check_exact": True},\n ],\n)\ndef test_assert_extension_array_equal_not_exact(kwargs):\n # see gh-23709\n arr1 = SparseArray([-0.17387645482451206, 0.3414148016424936])\n arr2 = SparseArray([-0.17387645482451206, 0.3414148016424937])\n\n if kwargs.get("check_exact", False):\n msg = """\\nExtensionArray are different\n\nExtensionArray values are different \\(50\\.0 %\\)\n\\[left\\]: \\[-0\\.17387645482.*, 0\\.341414801642.*\\]\n\\[right\\]: \\[-0\\.17387645482.*, 0\\.341414801642.*\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_extension_array_equal(arr1, arr2, **kwargs)\n else:\n tm.assert_extension_array_equal(arr1, arr2, **kwargs)\n\n\n@pytest.mark.parametrize("decimals", range(10))\ndef test_assert_extension_array_equal_less_precise(decimals):\n rtol = 0.5 * 10**-decimals\n arr1 = SparseArray([0.5, 0.123456])\n arr2 = SparseArray([0.5, 0.123457])\n\n if decimals >= 5:\n msg = """\\nExtensionArray are different\n\nExtensionArray values are different \\(50\\.0 %\\)\n\\[left\\]: \\[0\\.5, 0\\.123456\\]\n\\[right\\]: \\[0\\.5, 0\\.123457\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_extension_array_equal(arr1, arr2, rtol=rtol)\n else:\n tm.assert_extension_array_equal(arr1, arr2, rtol=rtol)\n\n\ndef test_assert_extension_array_equal_dtype_mismatch(check_dtype):\n end = 5\n kwargs = {"check_dtype": check_dtype}\n\n arr1 = SparseArray(np.arange(end, dtype="int64"))\n arr2 = SparseArray(np.arange(end, dtype="int32"))\n\n if check_dtype:\n msg = """\\nExtensionArray are different\n\nAttribute "dtype" are different\n\\[left\\]: Sparse\\[int64, 0\\]\n\\[right\\]: Sparse\\[int32, 0\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_extension_array_equal(arr1, arr2, **kwargs)\n else:\n tm.assert_extension_array_equal(arr1, arr2, **kwargs)\n\n\ndef test_assert_extension_array_equal_missing_values():\n arr1 = SparseArray([np.nan, 1, 2, np.nan])\n arr2 = SparseArray([np.nan, 1, 2, 3])\n\n msg = """\\nExtensionArray NA mask are different\n\nExtensionArray NA mask values are different \\(25\\.0 %\\)\n\\[left\\]: \\[True, False, False, True\\]\n\\[right\\]: \\[True, False, False, False\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_extension_array_equal(arr1, arr2)\n\n\n@pytest.mark.parametrize("side", ["left", "right"])\ndef test_assert_extension_array_equal_non_extension_array(side):\n numpy_array = np.arange(5)\n extension_array = SparseArray(numpy_array)\n\n msg = f"{side} is not an ExtensionArray"\n args = (\n (numpy_array, extension_array)\n if side == "left"\n else (extension_array, numpy_array)\n )\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_extension_array_equal(*args)\n\n\n@pytest.mark.parametrize("right_dtype", ["Int32", "int64"])\ndef test_assert_extension_array_equal_ignore_dtype_mismatch(right_dtype):\n # https://github.com/pandas-dev/pandas/issues/35715\n left = array([1, 2, 3], dtype="Int64")\n right = array([1, 2, 3], dtype=right_dtype)\n tm.assert_extension_array_equal(left, right, check_dtype=False)\n\n\ndef test_assert_extension_array_equal_time_units():\n # https://github.com/pandas-dev/pandas/issues/55730\n timestamp = Timestamp("2023-11-04T12")\n naive = array([timestamp], dtype="datetime64[ns]")\n utc = array([timestamp], dtype="datetime64[ns, UTC]")\n\n tm.assert_extension_array_equal(naive, utc, check_dtype=False)\n tm.assert_extension_array_equal(utc, naive, check_dtype=False)\n | .venv\Lib\site-packages\pandas\tests\util\test_assert_extension_array_equal.py | test_assert_extension_array_equal.py | Python | 3,887 | 0.95 | 0.087302 | 0.031579 | node-utils | 368 | 2023-08-31T20:51:19.202727 | BSD-3-Clause | true | 95bcb0d1f5e70f7ff372b179ee0d900b |
import pytest\n\nimport pandas as pd\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\n\n@pytest.fixture(params=[True, False])\ndef by_blocks_fixture(request):\n return request.param\n\n\n@pytest.fixture(params=["DataFrame", "Series"])\ndef obj_fixture(request):\n return request.param\n\n\ndef _assert_frame_equal_both(a, b, **kwargs):\n """\n Check that two DataFrame equal.\n\n This check is performed commutatively.\n\n Parameters\n ----------\n a : DataFrame\n The first DataFrame to compare.\n b : DataFrame\n The second DataFrame to compare.\n kwargs : dict\n The arguments passed to `tm.assert_frame_equal`.\n """\n tm.assert_frame_equal(a, b, **kwargs)\n tm.assert_frame_equal(b, a, **kwargs)\n\n\n@pytest.mark.parametrize("check_like", [True, False])\ndef test_frame_equal_row_order_mismatch(check_like, obj_fixture):\n df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])\n df2 = DataFrame({"A": [3, 2, 1], "B": [6, 5, 4]}, index=["c", "b", "a"])\n\n if not check_like: # Do not ignore row-column orderings.\n msg = f"{obj_fixture}.index are different"\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)\n else:\n _assert_frame_equal_both(df1, df2, check_like=check_like, obj=obj_fixture)\n\n\n@pytest.mark.parametrize(\n "df1,df2",\n [\n (DataFrame({"A": [1, 2, 3]}), DataFrame({"A": [1, 2, 3, 4]})),\n (DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}), DataFrame({"A": [1, 2, 3]})),\n ],\n)\ndef test_frame_equal_shape_mismatch(df1, df2, obj_fixture):\n msg = f"{obj_fixture} are different"\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2, obj=obj_fixture)\n\n\n@pytest.mark.parametrize(\n "df1,df2,msg",\n [\n # Index\n (\n DataFrame.from_records({"a": [1, 2], "c": ["l1", "l2"]}, index=["a"]),\n DataFrame.from_records({"a": [1.0, 2.0], "c": ["l1", "l2"]}, index=["a"]),\n "DataFrame\\.index are different",\n ),\n # MultiIndex\n (\n DataFrame.from_records(\n {"a": [1, 2], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]\n ),\n DataFrame.from_records(\n {"a": [1.0, 2.0], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]\n ),\n "MultiIndex level \\[0\\] are different",\n ),\n ],\n)\ndef test_frame_equal_index_dtype_mismatch(df1, df2, msg, check_index_type):\n kwargs = {"check_index_type": check_index_type}\n\n if check_index_type:\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2, **kwargs)\n else:\n tm.assert_frame_equal(df1, df2, **kwargs)\n\n\ndef test_empty_dtypes(check_dtype):\n columns = ["col1", "col2"]\n df1 = DataFrame(columns=columns)\n df2 = DataFrame(columns=columns)\n\n kwargs = {"check_dtype": check_dtype}\n df1["col1"] = df1["col1"].astype("int64")\n\n if check_dtype:\n msg = r"Attributes of DataFrame\..* are different"\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2, **kwargs)\n else:\n tm.assert_frame_equal(df1, df2, **kwargs)\n\n\n@pytest.mark.parametrize("check_like", [True, False])\ndef test_frame_equal_index_mismatch(check_like, obj_fixture, using_infer_string):\n if using_infer_string:\n dtype = "str"\n else:\n dtype = "object"\n msg = f"""{obj_fixture}\\.index are different\n\n{obj_fixture}\\.index values are different \\(33\\.33333 %\\)\n\\[left\\]: Index\\(\\['a', 'b', 'c'\\], dtype='{dtype}'\\)\n\\[right\\]: Index\\(\\['a', 'b', 'd'\\], dtype='{dtype}'\\)\nAt positional index 2, first diff: c != d"""\n\n df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])\n df2 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "d"])\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)\n\n\n@pytest.mark.parametrize("check_like", [True, False])\ndef test_frame_equal_columns_mismatch(check_like, obj_fixture, using_infer_string):\n if using_infer_string:\n dtype = "str"\n else:\n dtype = "object"\n msg = f"""{obj_fixture}\\.columns are different\n\n{obj_fixture}\\.columns values are different \\(50\\.0 %\\)\n\\[left\\]: Index\\(\\['A', 'B'\\], dtype='{dtype}'\\)\n\\[right\\]: Index\\(\\['A', 'b'\\], dtype='{dtype}'\\)"""\n\n df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, index=["a", "b", "c"])\n df2 = DataFrame({"A": [1, 2, 3], "b": [4, 5, 6]}, index=["a", "b", "c"])\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2, check_like=check_like, obj=obj_fixture)\n\n\ndef test_frame_equal_block_mismatch(by_blocks_fixture, obj_fixture):\n obj = obj_fixture\n msg = f"""{obj}\\.iloc\\[:, 1\\] \\(column name="B"\\) are different\n\n{obj}\\.iloc\\[:, 1\\] \\(column name="B"\\) values are different \\(33\\.33333 %\\)\n\\[index\\]: \\[0, 1, 2\\]\n\\[left\\]: \\[4, 5, 6\\]\n\\[right\\]: \\[4, 5, 7\\]"""\n\n df1 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n df2 = DataFrame({"A": [1, 2, 3], "B": [4, 5, 7]})\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2, by_blocks=by_blocks_fixture, obj=obj_fixture)\n\n\n@pytest.mark.parametrize(\n "df1,df2,msg",\n [\n (\n DataFrame({"A": ["á", "à", "ä"], "E": ["é", "è", "ë"]}),\n DataFrame({"A": ["á", "à", "ä"], "E": ["é", "è", "e̊"]}),\n """{obj}\\.iloc\\[:, 1\\] \\(column name="E"\\) are different\n\n{obj}\\.iloc\\[:, 1\\] \\(column name="E"\\) values are different \\(33\\.33333 %\\)\n\\[index\\]: \\[0, 1, 2\\]\n\\[left\\]: \\[é, è, ë\\]\n\\[right\\]: \\[é, è, e̊\\]""",\n ),\n (\n DataFrame({"A": ["á", "à", "ä"], "E": ["é", "è", "ë"]}),\n DataFrame({"A": ["a", "a", "a"], "E": ["e", "e", "e"]}),\n """{obj}\\.iloc\\[:, 0\\] \\(column name="A"\\) are different\n\n{obj}\\.iloc\\[:, 0\\] \\(column name="A"\\) values are different \\(100\\.0 %\\)\n\\[index\\]: \\[0, 1, 2\\]\n\\[left\\]: \\[á, à, ä\\]\n\\[right\\]: \\[a, a, a\\]""",\n ),\n ],\n)\ndef test_frame_equal_unicode(df1, df2, msg, by_blocks_fixture, obj_fixture):\n # see gh-20503\n #\n # Test ensures that `tm.assert_frame_equals` raises the right exception\n # when comparing DataFrames containing differing unicode objects.\n msg = msg.format(obj=obj_fixture)\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2, by_blocks=by_blocks_fixture, obj=obj_fixture)\n\n\ndef test_assert_frame_equal_extension_dtype_mismatch():\n # https://github.com/pandas-dev/pandas/issues/32747\n left = DataFrame({"a": [1, 2, 3]}, dtype="Int64")\n right = left.astype(int)\n\n msg = (\n "Attributes of DataFrame\\.iloc\\[:, 0\\] "\n '\\(column name="a"\\) are different\n\n'\n 'Attribute "dtype" are different\n'\n "\\[left\\]: Int64\n"\n "\\[right\\]: int[32|64]"\n )\n\n tm.assert_frame_equal(left, right, check_dtype=False)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(left, right, check_dtype=True)\n\n\ndef test_assert_frame_equal_interval_dtype_mismatch():\n # https://github.com/pandas-dev/pandas/issues/32747\n left = DataFrame({"a": [pd.Interval(0, 1)]}, dtype="interval")\n right = left.astype(object)\n\n msg = (\n "Attributes of DataFrame\\.iloc\\[:, 0\\] "\n '\\(column name="a"\\) are different\n\n'\n 'Attribute "dtype" are different\n'\n "\\[left\\]: interval\\[int64, right\\]\n"\n "\\[right\\]: object"\n )\n\n tm.assert_frame_equal(left, right, check_dtype=False)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(left, right, check_dtype=True)\n\n\ndef test_assert_frame_equal_ignore_extension_dtype_mismatch():\n # https://github.com/pandas-dev/pandas/issues/35715\n left = DataFrame({"a": [1, 2, 3]}, dtype="Int64")\n right = DataFrame({"a": [1, 2, 3]}, dtype="Int32")\n tm.assert_frame_equal(left, right, check_dtype=False)\n\n\ndef test_assert_frame_equal_ignore_extension_dtype_mismatch_cross_class():\n # https://github.com/pandas-dev/pandas/issues/35715\n left = DataFrame({"a": [1, 2, 3]}, dtype="Int64")\n right = DataFrame({"a": [1, 2, 3]}, dtype="int64")\n tm.assert_frame_equal(left, right, check_dtype=False)\n\n\n@pytest.mark.parametrize(\n "dtype",\n [\n ("timedelta64[ns]"),\n ("datetime64[ns, UTC]"),\n ("Period[D]"),\n ],\n)\ndef test_assert_frame_equal_datetime_like_dtype_mismatch(dtype):\n df1 = DataFrame({"a": []}, dtype=dtype)\n df2 = DataFrame({"a": []})\n tm.assert_frame_equal(df1, df2, check_dtype=False)\n\n\ndef test_allows_duplicate_labels():\n left = DataFrame()\n right = DataFrame().set_flags(allows_duplicate_labels=False)\n tm.assert_frame_equal(left, left)\n tm.assert_frame_equal(right, right)\n tm.assert_frame_equal(left, right, check_flags=False)\n tm.assert_frame_equal(right, left, check_flags=False)\n\n with pytest.raises(AssertionError, match="<Flags"):\n tm.assert_frame_equal(left, right)\n\n with pytest.raises(AssertionError, match="<Flags"):\n tm.assert_frame_equal(left, right)\n\n\ndef test_assert_frame_equal_columns_mixed_dtype():\n # GH#39168\n df = DataFrame([[0, 1, 2]], columns=["foo", "bar", 42], index=[1, "test", 2])\n tm.assert_frame_equal(df, df, check_like=True)\n\n\ndef test_frame_equal_extension_dtype(frame_or_series, any_numeric_ea_dtype):\n # GH#39410\n obj = frame_or_series([1, 2], dtype=any_numeric_ea_dtype)\n tm.assert_equal(obj, obj, check_exact=True)\n\n\n@pytest.mark.parametrize("indexer", [(0, 1), (1, 0)])\ndef test_frame_equal_mixed_dtypes(frame_or_series, any_numeric_ea_dtype, indexer):\n dtypes = (any_numeric_ea_dtype, "int64")\n obj1 = frame_or_series([1, 2], dtype=dtypes[indexer[0]])\n obj2 = frame_or_series([1, 2], dtype=dtypes[indexer[1]])\n tm.assert_equal(obj1, obj2, check_exact=True, check_dtype=False)\n\n\ndef test_assert_frame_equal_check_like_different_indexes():\n # GH#39739\n df1 = DataFrame(index=pd.Index([], dtype="object"))\n df2 = DataFrame(index=pd.RangeIndex(start=0, stop=0, step=1))\n with pytest.raises(AssertionError, match="DataFrame.index are different"):\n tm.assert_frame_equal(df1, df2, check_like=True)\n\n\ndef test_assert_frame_equal_checking_allow_dups_flag():\n # GH#45554\n left = DataFrame([[1, 2], [3, 4]])\n left.flags.allows_duplicate_labels = False\n\n right = DataFrame([[1, 2], [3, 4]])\n right.flags.allows_duplicate_labels = True\n tm.assert_frame_equal(left, right, check_flags=False)\n\n with pytest.raises(AssertionError, match="allows_duplicate_labels"):\n tm.assert_frame_equal(left, right, check_flags=True)\n\n\ndef test_assert_frame_equal_check_like_categorical_midx():\n # GH#48975\n left = DataFrame(\n [[1], [2], [3]],\n index=pd.MultiIndex.from_arrays(\n [\n pd.Categorical(["a", "b", "c"]),\n pd.Categorical(["a", "b", "c"]),\n ]\n ),\n )\n right = DataFrame(\n [[3], [2], [1]],\n index=pd.MultiIndex.from_arrays(\n [\n pd.Categorical(["c", "b", "a"]),\n pd.Categorical(["c", "b", "a"]),\n ]\n ),\n )\n tm.assert_frame_equal(left, right, check_like=True)\n\n\ndef test_assert_frame_equal_ea_column_definition_in_exception_mask():\n # GH#50323\n df1 = DataFrame({"a": pd.Series([pd.NA, 1], dtype="Int64")})\n df2 = DataFrame({"a": pd.Series([1, 1], dtype="Int64")})\n\n msg = r'DataFrame.iloc\[:, 0\] \(column name="a"\) NA mask values are different'\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2)\n\n\ndef test_assert_frame_equal_ea_column_definition_in_exception():\n # GH#50323\n df1 = DataFrame({"a": pd.Series([pd.NA, 1], dtype="Int64")})\n df2 = DataFrame({"a": pd.Series([pd.NA, 2], dtype="Int64")})\n\n msg = r'DataFrame.iloc\[:, 0\] \(column name="a"\) values are different'\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2, check_exact=True)\n\n\ndef test_assert_frame_equal_ts_column():\n # GH#50323\n df1 = DataFrame({"a": [pd.Timestamp("2019-12-31"), pd.Timestamp("2020-12-31")]})\n df2 = DataFrame({"a": [pd.Timestamp("2020-12-31"), pd.Timestamp("2020-12-31")]})\n\n msg = r'DataFrame.iloc\[:, 0\] \(column name="a"\) values are different'\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2)\n\n\ndef test_assert_frame_equal_set():\n # GH#51727\n df1 = DataFrame({"set_column": [{1, 2, 3}, {4, 5, 6}]})\n df2 = DataFrame({"set_column": [{1, 2, 3}, {4, 5, 6}]})\n tm.assert_frame_equal(df1, df2)\n\n\ndef test_assert_frame_equal_set_mismatch():\n # GH#51727\n df1 = DataFrame({"set_column": [{1, 2, 3}, {4, 5, 6}]})\n df2 = DataFrame({"set_column": [{1, 2, 3}, {4, 5, 7}]})\n\n msg = r'DataFrame.iloc\[:, 0\] \(column name="set_column"\) values are different'\n with pytest.raises(AssertionError, match=msg):\n tm.assert_frame_equal(df1, df2)\n | .venv\Lib\site-packages\pandas\tests\util\test_assert_frame_equal.py | test_assert_frame_equal.py | Python | 13,376 | 0.95 | 0.083969 | 0.066007 | vue-tools | 200 | 2023-09-04T02:22:47.264287 | MIT | true | f47cc44036cb2579c7d6a7c32b0a1cde |
import numpy as np\nimport pytest\n\nfrom pandas import (\n NA,\n Categorical,\n CategoricalIndex,\n Index,\n MultiIndex,\n NaT,\n RangeIndex,\n)\nimport pandas._testing as tm\n\n\ndef test_index_equal_levels_mismatch():\n msg = """Index are different\n\nIndex levels are different\n\\[left\\]: 1, Index\\(\\[1, 2, 3\\], dtype='int64'\\)\n\\[right\\]: 2, MultiIndex\\(\\[\\('A', 1\\),\n \\('A', 2\\),\n \\('B', 3\\),\n \\('B', 4\\)\\],\n \\)"""\n\n idx1 = Index([1, 2, 3])\n idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)])\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2, exact=False)\n\n\ndef test_index_equal_values_mismatch(check_exact):\n msg = """MultiIndex level \\[1\\] are different\n\nMultiIndex level \\[1\\] values are different \\(25\\.0 %\\)\n\\[left\\]: Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)\n\\[right\\]: Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""\n\n idx1 = MultiIndex.from_tuples([("A", 2), ("A", 2), ("B", 3), ("B", 4)])\n idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)])\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2, check_exact=check_exact)\n\n\ndef test_index_equal_length_mismatch(check_exact):\n msg = """Index are different\n\nIndex length are different\n\\[left\\]: 3, Index\\(\\[1, 2, 3\\], dtype='int64'\\)\n\\[right\\]: 4, Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""\n\n idx1 = Index([1, 2, 3])\n idx2 = Index([1, 2, 3, 4])\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2, check_exact=check_exact)\n\n\n@pytest.mark.parametrize("exact", [False, "equiv"])\ndef test_index_equal_class(exact):\n idx1 = Index([0, 1, 2])\n idx2 = RangeIndex(3)\n\n tm.assert_index_equal(idx1, idx2, exact=exact)\n\n\ndef test_int_float_index_equal_class_mismatch(check_exact):\n msg = """Index are different\n\nAttribute "inferred_type" are different\n\\[left\\]: integer\n\\[right\\]: floating"""\n\n idx1 = Index([1, 2, 3])\n idx2 = Index([1, 2, 3], dtype=np.float64)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2, exact=True, check_exact=check_exact)\n\n\ndef test_range_index_equal_class_mismatch(check_exact):\n msg = """Index are different\n\nIndex classes are different\n\\[left\\]: Index\\(\\[1, 2, 3\\], dtype='int64'\\)\n\\[right\\]: """\n\n idx1 = Index([1, 2, 3])\n idx2 = RangeIndex(range(3))\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2, exact=True, check_exact=check_exact)\n\n\ndef test_index_equal_values_close(check_exact):\n idx1 = Index([1, 2, 3.0])\n idx2 = Index([1, 2, 3.0000000001])\n\n if check_exact:\n msg = """Index are different\n\nIndex values are different \\(33\\.33333 %\\)\n\\[left\\]: Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)\n\\[right\\]: Index\\(\\[1.0, 2.0, 3.0000000001\\], dtype='float64'\\)"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2, check_exact=check_exact)\n else:\n tm.assert_index_equal(idx1, idx2, check_exact=check_exact)\n\n\ndef test_index_equal_values_less_close(check_exact, rtol):\n idx1 = Index([1, 2, 3.0])\n idx2 = Index([1, 2, 3.0001])\n kwargs = {"check_exact": check_exact, "rtol": rtol}\n\n if check_exact or rtol < 0.5e-3:\n msg = """Index are different\n\nIndex values are different \\(33\\.33333 %\\)\n\\[left\\]: Index\\(\\[1.0, 2.0, 3.0], dtype='float64'\\)\n\\[right\\]: Index\\(\\[1.0, 2.0, 3.0001\\], dtype='float64'\\)"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2, **kwargs)\n else:\n tm.assert_index_equal(idx1, idx2, **kwargs)\n\n\ndef test_index_equal_values_too_far(check_exact, rtol):\n idx1 = Index([1, 2, 3])\n idx2 = Index([1, 2, 4])\n kwargs = {"check_exact": check_exact, "rtol": rtol}\n\n msg = """Index are different\n\nIndex values are different \\(33\\.33333 %\\)\n\\[left\\]: Index\\(\\[1, 2, 3\\], dtype='int64'\\)\n\\[right\\]: Index\\(\\[1, 2, 4\\], dtype='int64'\\)"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2, **kwargs)\n\n\n@pytest.mark.parametrize("check_order", [True, False])\ndef test_index_equal_value_order_mismatch(check_exact, rtol, check_order):\n idx1 = Index([1, 2, 3])\n idx2 = Index([3, 2, 1])\n\n msg = """Index are different\n\nIndex values are different \\(66\\.66667 %\\)\n\\[left\\]: Index\\(\\[1, 2, 3\\], dtype='int64'\\)\n\\[right\\]: Index\\(\\[3, 2, 1\\], dtype='int64'\\)"""\n\n if check_order:\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(\n idx1, idx2, check_exact=check_exact, rtol=rtol, check_order=True\n )\n else:\n tm.assert_index_equal(\n idx1, idx2, check_exact=check_exact, rtol=rtol, check_order=False\n )\n\n\ndef test_index_equal_level_values_mismatch(check_exact, rtol):\n idx1 = MultiIndex.from_tuples([("A", 2), ("A", 2), ("B", 3), ("B", 4)])\n idx2 = MultiIndex.from_tuples([("A", 1), ("A", 2), ("B", 3), ("B", 4)])\n kwargs = {"check_exact": check_exact, "rtol": rtol}\n\n msg = """MultiIndex level \\[1\\] are different\n\nMultiIndex level \\[1\\] values are different \\(25\\.0 %\\)\n\\[left\\]: Index\\(\\[2, 2, 3, 4\\], dtype='int64'\\)\n\\[right\\]: Index\\(\\[1, 2, 3, 4\\], dtype='int64'\\)"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2, **kwargs)\n\n\n@pytest.mark.parametrize(\n "name1,name2",\n [(None, "x"), ("x", "x"), (np.nan, np.nan), (NaT, NaT), (np.nan, NaT)],\n)\ndef test_index_equal_names(name1, name2):\n idx1 = Index([1, 2, 3], name=name1)\n idx2 = Index([1, 2, 3], name=name2)\n\n if name1 == name2 or name1 is name2:\n tm.assert_index_equal(idx1, idx2)\n else:\n name1 = "'x'" if name1 == "x" else name1\n name2 = "'x'" if name2 == "x" else name2\n msg = f"""Index are different\n\nAttribute "names" are different\n\\[left\\]: \\[{name1}\\]\n\\[right\\]: \\[{name2}\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2)\n\n\ndef test_index_equal_category_mismatch(check_categorical, using_infer_string):\n if using_infer_string:\n dtype = "str"\n else:\n dtype = "object"\n msg = f"""Index are different\n\nAttribute "dtype" are different\n\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False, \\ncategories_dtype={dtype}\\)\n\\[right\\]: CategoricalDtype\\(categories=\\['a', 'b', 'c'\\], \\nordered=False, categories_dtype={dtype}\\)"""\n\n idx1 = Index(Categorical(["a", "b"]))\n idx2 = Index(Categorical(["a", "b"], categories=["a", "b", "c"]))\n\n if check_categorical:\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2, check_categorical=check_categorical)\n else:\n tm.assert_index_equal(idx1, idx2, check_categorical=check_categorical)\n\n\n@pytest.mark.parametrize("exact", [False, True])\ndef test_index_equal_range_categories(check_categorical, exact):\n # GH41263\n msg = """\\nIndex are different\n\nIndex classes are different\n\\[left\\]: RangeIndex\\(start=0, stop=10, step=1\\)\n\\[right\\]: Index\\(\\[0, 1, 2, 3, 4, 5, 6, 7, 8, 9\\], dtype='int64'\\)"""\n\n rcat = CategoricalIndex(RangeIndex(10))\n icat = CategoricalIndex(list(range(10)))\n\n if check_categorical and exact:\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(rcat, icat, check_categorical=True, exact=True)\n else:\n tm.assert_index_equal(\n rcat, icat, check_categorical=check_categorical, exact=exact\n )\n\n\ndef test_assert_index_equal_different_inferred_types():\n # GH#31884\n msg = """\\nIndex are different\n\nAttribute "inferred_type" are different\n\\[left\\]: mixed\n\\[right\\]: datetime"""\n\n idx1 = Index([NA, np.datetime64("nat")])\n idx2 = Index([NA, NaT])\n with pytest.raises(AssertionError, match=msg):\n tm.assert_index_equal(idx1, idx2)\n\n\ndef test_assert_index_equal_different_names_check_order_false():\n # GH#47328\n idx1 = Index([1, 3], name="a")\n idx2 = Index([3, 1], name="b")\n with pytest.raises(AssertionError, match='"names" are different'):\n tm.assert_index_equal(idx1, idx2, check_order=False, check_names=True)\n\n\ndef test_assert_index_equal_mixed_dtype():\n # GH#39168\n idx = Index(["foo", "bar", 42])\n tm.assert_index_equal(idx, idx, check_order=False)\n\n\ndef test_assert_index_equal_ea_dtype_order_false(any_numeric_ea_dtype):\n # GH#47207\n idx1 = Index([1, 3], dtype=any_numeric_ea_dtype)\n idx2 = Index([3, 1], dtype=any_numeric_ea_dtype)\n tm.assert_index_equal(idx1, idx2, check_order=False)\n\n\ndef test_assert_index_equal_object_ints_order_false():\n # GH#47207\n idx1 = Index([1, 3], dtype="object")\n idx2 = Index([3, 1], dtype="object")\n tm.assert_index_equal(idx1, idx2, check_order=False)\n\n\n@pytest.mark.parametrize("check_categorical", [True, False])\n@pytest.mark.parametrize("check_names", [True, False])\ndef test_assert_ea_index_equal_non_matching_na(check_names, check_categorical):\n # GH#48608\n idx1 = Index([1, 2], dtype="Int64")\n idx2 = Index([1, NA], dtype="Int64")\n with pytest.raises(AssertionError, match="50.0 %"):\n tm.assert_index_equal(\n idx1, idx2, check_names=check_names, check_categorical=check_categorical\n )\n\n\n@pytest.mark.parametrize("check_categorical", [True, False])\ndef test_assert_multi_index_dtype_check_categorical(check_categorical):\n # GH#52126\n idx1 = MultiIndex.from_arrays([Categorical(np.array([1, 2], dtype=np.uint64))])\n idx2 = MultiIndex.from_arrays([Categorical(np.array([1, 2], dtype=np.int64))])\n if check_categorical:\n with pytest.raises(\n AssertionError, match=r"^MultiIndex level \[0\] are different"\n ):\n tm.assert_index_equal(idx1, idx2, check_categorical=check_categorical)\n else:\n tm.assert_index_equal(idx1, idx2, check_categorical=check_categorical)\n | .venv\Lib\site-packages\pandas\tests\util\test_assert_index_equal.py | test_assert_index_equal.py | Python | 10,154 | 0.95 | 0.097179 | 0.034188 | vue-tools | 709 | 2023-08-14T19:47:40.514002 | MIT | true | 23d7a0b28d96b288f0ce198e074abcc0 |
import pytest\n\nfrom pandas import interval_range\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "kwargs",\n [\n {"start": 0, "periods": 4},\n {"start": 1, "periods": 5},\n {"start": 5, "end": 10, "closed": "left"},\n ],\n)\ndef test_interval_array_equal(kwargs):\n arr = interval_range(**kwargs).values\n tm.assert_interval_array_equal(arr, arr)\n\n\ndef test_interval_array_equal_closed_mismatch():\n kwargs = {"start": 0, "periods": 5}\n arr1 = interval_range(closed="left", **kwargs).values\n arr2 = interval_range(closed="right", **kwargs).values\n\n msg = """\\nIntervalArray are different\n\nAttribute "closed" are different\n\\[left\\]: left\n\\[right\\]: right"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_interval_array_equal(arr1, arr2)\n\n\ndef test_interval_array_equal_periods_mismatch():\n kwargs = {"start": 0}\n arr1 = interval_range(periods=5, **kwargs).values\n arr2 = interval_range(periods=6, **kwargs).values\n\n msg = """\\nIntervalArray.left are different\n\nIntervalArray.left shapes are different\n\\[left\\]: \\(5,\\)\n\\[right\\]: \\(6,\\)"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_interval_array_equal(arr1, arr2)\n\n\ndef test_interval_array_equal_end_mismatch():\n kwargs = {"start": 0, "periods": 5}\n arr1 = interval_range(end=10, **kwargs).values\n arr2 = interval_range(end=20, **kwargs).values\n\n msg = """\\nIntervalArray.left are different\n\nIntervalArray.left values are different \\(80.0 %\\)\n\\[left\\]: \\[0, 2, 4, 6, 8\\]\n\\[right\\]: \\[0, 4, 8, 12, 16\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_interval_array_equal(arr1, arr2)\n\n\ndef test_interval_array_equal_start_mismatch():\n kwargs = {"periods": 4}\n arr1 = interval_range(start=0, **kwargs).values\n arr2 = interval_range(start=1, **kwargs).values\n\n msg = """\\nIntervalArray.left are different\n\nIntervalArray.left values are different \\(100.0 %\\)\n\\[left\\]: \\[0, 1, 2, 3\\]\n\\[right\\]: \\[1, 2, 3, 4\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_interval_array_equal(arr1, arr2)\n | .venv\Lib\site-packages\pandas\tests\util\test_assert_interval_array_equal.py | test_assert_interval_array_equal.py | Python | 2,158 | 0.85 | 0.061728 | 0 | node-utils | 976 | 2025-05-25T12:59:53.981640 | Apache-2.0 | true | d25620f1530b40453e20fd77ccd3eb76 |
import copy\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Timestamp\nimport pandas._testing as tm\n\n\ndef test_assert_numpy_array_equal_shape_mismatch():\n msg = """numpy array are different\n\nnumpy array shapes are different\n\\[left\\]: \\(2L*,\\)\n\\[right\\]: \\(3L*,\\)"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]))\n\n\ndef test_assert_numpy_array_equal_bad_type():\n expected = "Expected type"\n\n with pytest.raises(AssertionError, match=expected):\n tm.assert_numpy_array_equal(1, 2)\n\n\n@pytest.mark.parametrize(\n "a,b,klass1,klass2",\n [(np.array([1]), 1, "ndarray", "int"), (1, np.array([1]), "int", "ndarray")],\n)\ndef test_assert_numpy_array_equal_class_mismatch(a, b, klass1, klass2):\n msg = f"""numpy array are different\n\nnumpy array classes are different\n\\[left\\]: {klass1}\n\\[right\\]: {klass2}"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(a, b)\n\n\ndef test_assert_numpy_array_equal_value_mismatch1():\n msg = """numpy array are different\n\nnumpy array values are different \\(66\\.66667 %\\)\n\\[left\\]: \\[nan, 2\\.0, 3\\.0\\]\n\\[right\\]: \\[1\\.0, nan, 3\\.0\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(np.array([np.nan, 2, 3]), np.array([1, np.nan, 3]))\n\n\ndef test_assert_numpy_array_equal_value_mismatch2():\n msg = """numpy array are different\n\nnumpy array values are different \\(50\\.0 %\\)\n\\[left\\]: \\[1, 2\\]\n\\[right\\]: \\[1, 3\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(np.array([1, 2]), np.array([1, 3]))\n\n\ndef test_assert_numpy_array_equal_value_mismatch3():\n msg = """numpy array are different\n\nnumpy array values are different \\(16\\.66667 %\\)\n\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]\n\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(\n np.array([[1, 2], [3, 4], [5, 6]]), np.array([[1, 3], [3, 4], [5, 6]])\n )\n\n\ndef test_assert_numpy_array_equal_value_mismatch4():\n msg = """numpy array are different\n\nnumpy array values are different \\(50\\.0 %\\)\n\\[left\\]: \\[1\\.1, 2\\.000001\\]\n\\[right\\]: \\[1\\.1, 2.0\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(np.array([1.1, 2.000001]), np.array([1.1, 2.0]))\n\n\ndef test_assert_numpy_array_equal_value_mismatch5():\n msg = """numpy array are different\n\nnumpy array values are different \\(16\\.66667 %\\)\n\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\], \\[5, 6\\]\\]\n\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\], \\[5, 6\\]\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(\n np.array([[1, 2], [3, 4], [5, 6]]), np.array([[1, 3], [3, 4], [5, 6]])\n )\n\n\ndef test_assert_numpy_array_equal_value_mismatch6():\n msg = """numpy array are different\n\nnumpy array values are different \\(25\\.0 %\\)\n\\[left\\]: \\[\\[1, 2\\], \\[3, 4\\]\\]\n\\[right\\]: \\[\\[1, 3\\], \\[3, 4\\]\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(\n np.array([[1, 2], [3, 4]]), np.array([[1, 3], [3, 4]])\n )\n\n\ndef test_assert_numpy_array_equal_shape_mismatch_override():\n msg = """Index are different\n\nIndex shapes are different\n\\[left\\]: \\(2L*,\\)\n\\[right\\]: \\(3L*,\\)"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(np.array([1, 2]), np.array([3, 4, 5]), obj="Index")\n\n\ndef test_numpy_array_equal_unicode():\n # see gh-20503\n #\n # Test ensures that `tm.assert_numpy_array_equals` raises the right\n # exception when comparing np.arrays containing differing unicode objects.\n msg = """numpy array are different\n\nnumpy array values are different \\(33\\.33333 %\\)\n\\[left\\]: \\[á, à, ä\\]\n\\[right\\]: \\[á, à, å\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(\n np.array(["á", "à", "ä"]), np.array(["á", "à", "å"])\n )\n\n\ndef test_numpy_array_equal_object():\n a = np.array([Timestamp("2011-01-01"), Timestamp("2011-01-01")])\n b = np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")])\n\n msg = """numpy array are different\n\nnumpy array values are different \\(50\\.0 %\\)\n\\[left\\]: \\[2011-01-01 00:00:00, 2011-01-01 00:00:00\\]\n\\[right\\]: \\[2011-01-01 00:00:00, 2011-01-02 00:00:00\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(a, b)\n\n\n@pytest.mark.parametrize("other_type", ["same", "copy"])\n@pytest.mark.parametrize("check_same", ["same", "copy"])\ndef test_numpy_array_equal_copy_flag(other_type, check_same):\n a = np.array([1, 2, 3])\n msg = None\n\n if other_type == "same":\n other = a.view()\n else:\n other = a.copy()\n\n if check_same != other_type:\n msg = (\n r"array\(\[1, 2, 3\]\) is not array\(\[1, 2, 3\]\)"\n if check_same == "same"\n else r"array\(\[1, 2, 3\]\) is array\(\[1, 2, 3\]\)"\n )\n\n if msg is not None:\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(a, other, check_same=check_same)\n else:\n tm.assert_numpy_array_equal(a, other, check_same=check_same)\n\n\ndef test_numpy_array_equal_contains_na():\n # https://github.com/pandas-dev/pandas/issues/31881\n a = np.array([True, False])\n b = np.array([True, pd.NA], dtype=object)\n\n msg = """numpy array are different\n\nnumpy array values are different \\(50.0 %\\)\n\\[left\\]: \\[True, False\\]\n\\[right\\]: \\[True, <NA>\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(a, b)\n\n\ndef test_numpy_array_equal_identical_na(nulls_fixture):\n a = np.array([nulls_fixture], dtype=object)\n\n tm.assert_numpy_array_equal(a, a)\n\n # matching but not the identical object\n if hasattr(nulls_fixture, "copy"):\n other = nulls_fixture.copy()\n else:\n other = copy.copy(nulls_fixture)\n b = np.array([other], dtype=object)\n tm.assert_numpy_array_equal(a, b)\n\n\ndef test_numpy_array_equal_different_na():\n a = np.array([np.nan], dtype=object)\n b = np.array([pd.NA], dtype=object)\n\n msg = """numpy array are different\n\nnumpy array values are different \\(100.0 %\\)\n\\[left\\]: \\[nan\\]\n\\[right\\]: \\[<NA>\\]"""\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_numpy_array_equal(a, b)\n | .venv\Lib\site-packages\pandas\tests\util\test_assert_numpy_array_equal.py | test_assert_numpy_array_equal.py | Python | 6,624 | 0.95 | 0.09417 | 0.038961 | python-kit | 772 | 2023-11-19T23:27:05.742231 | BSD-3-Clause | true | 7678c490d37adf9e6c74c57c63728ea5 |
""""\nTest module for testing ``pandas._testing.assert_produces_warning``.\n"""\nimport warnings\n\nimport pytest\n\nfrom pandas.errors import (\n DtypeWarning,\n PerformanceWarning,\n)\n\nimport pandas._testing as tm\n\n\n@pytest.fixture(\n params=[\n RuntimeWarning,\n ResourceWarning,\n UserWarning,\n FutureWarning,\n DeprecationWarning,\n PerformanceWarning,\n DtypeWarning,\n ],\n)\ndef category(request):\n """\n Return unique warning.\n\n Useful for testing behavior of tm.assert_produces_warning with various categories.\n """\n return request.param\n\n\n@pytest.fixture(\n params=[\n (RuntimeWarning, UserWarning),\n (UserWarning, FutureWarning),\n (FutureWarning, RuntimeWarning),\n (DeprecationWarning, PerformanceWarning),\n (PerformanceWarning, FutureWarning),\n (DtypeWarning, DeprecationWarning),\n (ResourceWarning, DeprecationWarning),\n (FutureWarning, DeprecationWarning),\n ],\n ids=lambda x: type(x).__name__,\n)\ndef pair_different_warnings(request):\n """\n Return pair or different warnings.\n\n Useful for testing how several different warnings are handled\n in tm.assert_produces_warning.\n """\n return request.param\n\n\ndef f():\n warnings.warn("f1", FutureWarning)\n warnings.warn("f2", RuntimeWarning)\n\n\n@pytest.mark.filterwarnings("ignore:f1:FutureWarning")\ndef test_assert_produces_warning_honors_filter():\n # Raise by default.\n msg = r"Caused unexpected warning\(s\)"\n with pytest.raises(AssertionError, match=msg):\n with tm.assert_produces_warning(RuntimeWarning):\n f()\n\n with tm.assert_produces_warning(RuntimeWarning, raise_on_extra_warnings=False):\n f()\n\n\n@pytest.mark.parametrize(\n "message, match",\n [\n ("", None),\n ("", ""),\n ("Warning message", r".*"),\n ("Warning message", "War"),\n ("Warning message", r"[Ww]arning"),\n ("Warning message", "age"),\n ("Warning message", r"age$"),\n ("Message 12-234 with numbers", r"\d{2}-\d{3}"),\n ("Message 12-234 with numbers", r"^Mes.*\d{2}-\d{3}"),\n ("Message 12-234 with numbers", r"\d{2}-\d{3}\s\S+"),\n ("Message, which we do not match", None),\n ],\n)\ndef test_catch_warning_category_and_match(category, message, match):\n with tm.assert_produces_warning(category, match=match):\n warnings.warn(message, category)\n\n\ndef test_fail_to_match_runtime_warning():\n category = RuntimeWarning\n match = "Did not see this warning"\n unmatched = (\n r"Did not see warning 'RuntimeWarning' matching 'Did not see this warning'. "\n r"The emitted warning messages are "\n r"\[RuntimeWarning\('This is not a match.'\), "\n r"RuntimeWarning\('Another unmatched warning.'\)\]"\n )\n with pytest.raises(AssertionError, match=unmatched):\n with tm.assert_produces_warning(category, match=match):\n warnings.warn("This is not a match.", category)\n warnings.warn("Another unmatched warning.", category)\n\n\ndef test_fail_to_match_future_warning():\n category = FutureWarning\n match = "Warning"\n unmatched = (\n r"Did not see warning 'FutureWarning' matching 'Warning'. "\n r"The emitted warning messages are "\n r"\[FutureWarning\('This is not a match.'\), "\n r"FutureWarning\('Another unmatched warning.'\)\]"\n )\n with pytest.raises(AssertionError, match=unmatched):\n with tm.assert_produces_warning(category, match=match):\n warnings.warn("This is not a match.", category)\n warnings.warn("Another unmatched warning.", category)\n\n\ndef test_fail_to_match_resource_warning():\n category = ResourceWarning\n match = r"\d+"\n unmatched = (\n r"Did not see warning 'ResourceWarning' matching '\\d\+'. "\n r"The emitted warning messages are "\n r"\[ResourceWarning\('This is not a match.'\), "\n r"ResourceWarning\('Another unmatched warning.'\)\]"\n )\n with pytest.raises(AssertionError, match=unmatched):\n with tm.assert_produces_warning(category, match=match):\n warnings.warn("This is not a match.", category)\n warnings.warn("Another unmatched warning.", category)\n\n\ndef test_fail_to_catch_actual_warning(pair_different_warnings):\n expected_category, actual_category = pair_different_warnings\n match = "Did not see expected warning of class"\n with pytest.raises(AssertionError, match=match):\n with tm.assert_produces_warning(expected_category):\n warnings.warn("warning message", actual_category)\n\n\ndef test_ignore_extra_warning(pair_different_warnings):\n expected_category, extra_category = pair_different_warnings\n with tm.assert_produces_warning(expected_category, raise_on_extra_warnings=False):\n warnings.warn("Expected warning", expected_category)\n warnings.warn("Unexpected warning OK", extra_category)\n\n\ndef test_raise_on_extra_warning(pair_different_warnings):\n expected_category, extra_category = pair_different_warnings\n match = r"Caused unexpected warning\(s\)"\n with pytest.raises(AssertionError, match=match):\n with tm.assert_produces_warning(expected_category):\n warnings.warn("Expected warning", expected_category)\n warnings.warn("Unexpected warning NOT OK", extra_category)\n\n\ndef test_same_category_different_messages_first_match():\n category = UserWarning\n with tm.assert_produces_warning(category, match=r"^Match this"):\n warnings.warn("Match this", category)\n warnings.warn("Do not match that", category)\n warnings.warn("Do not match that either", category)\n\n\ndef test_same_category_different_messages_last_match():\n category = DeprecationWarning\n with tm.assert_produces_warning(category, match=r"^Match this"):\n warnings.warn("Do not match that", category)\n warnings.warn("Do not match that either", category)\n warnings.warn("Match this", category)\n\n\ndef test_match_multiple_warnings():\n # https://github.com/pandas-dev/pandas/issues/47829\n category = (FutureWarning, UserWarning)\n with tm.assert_produces_warning(category, match=r"^Match this"):\n warnings.warn("Match this", FutureWarning)\n warnings.warn("Match this too", UserWarning)\n\n\ndef test_right_category_wrong_match_raises(pair_different_warnings):\n target_category, other_category = pair_different_warnings\n with pytest.raises(AssertionError, match="Did not see warning.*matching"):\n with tm.assert_produces_warning(target_category, match=r"^Match this"):\n warnings.warn("Do not match it", target_category)\n warnings.warn("Match this", other_category)\n\n\n@pytest.mark.parametrize("false_or_none", [False, None])\nclass TestFalseOrNoneExpectedWarning:\n def test_raise_on_warning(self, false_or_none):\n msg = r"Caused unexpected warning\(s\)"\n with pytest.raises(AssertionError, match=msg):\n with tm.assert_produces_warning(false_or_none):\n f()\n\n def test_no_raise_without_warning(self, false_or_none):\n with tm.assert_produces_warning(false_or_none):\n pass\n\n def test_no_raise_with_false_raise_on_extra(self, false_or_none):\n with tm.assert_produces_warning(false_or_none, raise_on_extra_warnings=False):\n f()\n\n\ndef test_raises_during_exception():\n msg = "Did not see expected warning of class 'UserWarning'"\n with pytest.raises(AssertionError, match=msg):\n with tm.assert_produces_warning(UserWarning):\n raise ValueError\n\n with pytest.raises(AssertionError, match=msg):\n with tm.assert_produces_warning(UserWarning):\n warnings.warn("FutureWarning", FutureWarning)\n raise IndexError\n\n msg = "Caused unexpected warning"\n with pytest.raises(AssertionError, match=msg):\n with tm.assert_produces_warning(None):\n warnings.warn("FutureWarning", FutureWarning)\n raise SystemError\n\n\ndef test_passes_during_exception():\n with pytest.raises(SyntaxError, match="Error"):\n with tm.assert_produces_warning(None):\n raise SyntaxError("Error")\n\n with pytest.raises(ValueError, match="Error"):\n with tm.assert_produces_warning(FutureWarning, match="FutureWarning"):\n warnings.warn("FutureWarning", FutureWarning)\n raise ValueError("Error")\n | .venv\Lib\site-packages\pandas\tests\util\test_assert_produces_warning.py | test_assert_produces_warning.py | Python | 8,412 | 0.95 | 0.107884 | 0.010309 | react-lib | 410 | 2024-02-20T08:42:46.356347 | BSD-3-Clause | true | c6ec908d541b5bb48ea4f29e4ea8140d |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Series,\n)\nimport pandas._testing as tm\n\n\ndef _assert_series_equal_both(a, b, **kwargs):\n """\n Check that two Series equal.\n\n This check is performed commutatively.\n\n Parameters\n ----------\n a : Series\n The first Series to compare.\n b : Series\n The second Series to compare.\n kwargs : dict\n The arguments passed to `tm.assert_series_equal`.\n """\n tm.assert_series_equal(a, b, **kwargs)\n tm.assert_series_equal(b, a, **kwargs)\n\n\ndef _assert_not_series_equal(a, b, **kwargs):\n """\n Check that two Series are not equal.\n\n Parameters\n ----------\n a : Series\n The first Series to compare.\n b : Series\n The second Series to compare.\n kwargs : dict\n The arguments passed to `tm.assert_series_equal`.\n """\n try:\n tm.assert_series_equal(a, b, **kwargs)\n msg = "The two Series were equal when they shouldn't have been"\n\n pytest.fail(msg=msg)\n except AssertionError:\n pass\n\n\ndef _assert_not_series_equal_both(a, b, **kwargs):\n """\n Check that two Series are not equal.\n\n This check is performed commutatively.\n\n Parameters\n ----------\n a : Series\n The first Series to compare.\n b : Series\n The second Series to compare.\n kwargs : dict\n The arguments passed to `tm.assert_series_equal`.\n """\n _assert_not_series_equal(a, b, **kwargs)\n _assert_not_series_equal(b, a, **kwargs)\n\n\n@pytest.mark.parametrize("data", [range(3), list("abc"), list("áàä")])\ndef test_series_equal(data):\n _assert_series_equal_both(Series(data), Series(data))\n\n\n@pytest.mark.parametrize(\n "data1,data2",\n [\n (range(3), range(1, 4)),\n (list("abc"), list("xyz")),\n (list("áàä"), list("éèë")),\n (list("áàä"), list(b"aaa")),\n (range(3), range(4)),\n ],\n)\ndef test_series_not_equal_value_mismatch(data1, data2):\n _assert_not_series_equal_both(Series(data1), Series(data2))\n\n\n@pytest.mark.parametrize(\n "kwargs",\n [\n {"dtype": "float64"}, # dtype mismatch\n {"index": [1, 2, 4]}, # index mismatch\n {"name": "foo"}, # name mismatch\n ],\n)\ndef test_series_not_equal_metadata_mismatch(kwargs):\n data = range(3)\n s1 = Series(data)\n\n s2 = Series(data, **kwargs)\n _assert_not_series_equal_both(s1, s2)\n\n\n@pytest.mark.parametrize("data1,data2", [(0.12345, 0.12346), (0.1235, 0.1236)])\n@pytest.mark.parametrize("dtype", ["float32", "float64", "Float32"])\n@pytest.mark.parametrize("decimals", [0, 1, 2, 3, 5, 10])\ndef test_less_precise(data1, data2, dtype, decimals):\n rtol = 10**-decimals\n s1 = Series([data1], dtype=dtype)\n s2 = Series([data2], dtype=dtype)\n\n if decimals in (5, 10) or (decimals >= 3 and abs(data1 - data2) >= 0.0005):\n msg = "Series values are different"\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(s1, s2, rtol=rtol)\n else:\n _assert_series_equal_both(s1, s2, rtol=rtol)\n\n\n@pytest.mark.parametrize(\n "s1,s2,msg",\n [\n # Index\n (\n Series(["l1", "l2"], index=[1, 2]),\n Series(["l1", "l2"], index=[1.0, 2.0]),\n "Series\\.index are different",\n ),\n # MultiIndex\n (\n DataFrame.from_records(\n {"a": [1, 2], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]\n ).c,\n DataFrame.from_records(\n {"a": [1.0, 2.0], "b": [2.1, 1.5], "c": ["l1", "l2"]}, index=["a", "b"]\n ).c,\n "MultiIndex level \\[0\\] are different",\n ),\n ],\n)\ndef test_series_equal_index_dtype(s1, s2, msg, check_index_type):\n kwargs = {"check_index_type": check_index_type}\n\n if check_index_type:\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(s1, s2, **kwargs)\n else:\n tm.assert_series_equal(s1, s2, **kwargs)\n\n\n@pytest.mark.parametrize("check_like", [True, False])\ndef test_series_equal_order_mismatch(check_like):\n s1 = Series([1, 2, 3], index=["a", "b", "c"])\n s2 = Series([3, 2, 1], index=["c", "b", "a"])\n\n if not check_like: # Do not ignore index ordering.\n with pytest.raises(AssertionError, match="Series.index are different"):\n tm.assert_series_equal(s1, s2, check_like=check_like)\n else:\n _assert_series_equal_both(s1, s2, check_like=check_like)\n\n\n@pytest.mark.parametrize("check_index", [True, False])\ndef test_series_equal_index_mismatch(check_index):\n s1 = Series([1, 2, 3], index=["a", "b", "c"])\n s2 = Series([1, 2, 3], index=["c", "b", "a"])\n\n if check_index: # Do not ignore index.\n with pytest.raises(AssertionError, match="Series.index are different"):\n tm.assert_series_equal(s1, s2, check_index=check_index)\n else:\n _assert_series_equal_both(s1, s2, check_index=check_index)\n\n\ndef test_series_invalid_param_combination():\n left = Series(dtype=object)\n right = Series(dtype=object)\n with pytest.raises(\n ValueError, match="check_like must be False if check_index is False"\n ):\n tm.assert_series_equal(left, right, check_index=False, check_like=True)\n\n\ndef test_series_equal_length_mismatch(rtol):\n msg = """Series are different\n\nSeries length are different\n\\[left\\]: 3, RangeIndex\\(start=0, stop=3, step=1\\)\n\\[right\\]: 4, RangeIndex\\(start=0, stop=4, step=1\\)"""\n\n s1 = Series([1, 2, 3])\n s2 = Series([1, 2, 3, 4])\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(s1, s2, rtol=rtol)\n\n\ndef test_series_equal_numeric_values_mismatch(rtol):\n msg = """Series are different\n\nSeries values are different \\(33\\.33333 %\\)\n\\[index\\]: \\[0, 1, 2\\]\n\\[left\\]: \\[1, 2, 3\\]\n\\[right\\]: \\[1, 2, 4\\]"""\n\n s1 = Series([1, 2, 3])\n s2 = Series([1, 2, 4])\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(s1, s2, rtol=rtol)\n\n\ndef test_series_equal_categorical_values_mismatch(rtol, using_infer_string):\n if using_infer_string:\n msg = """Series are different\n\nSeries values are different \\(66\\.66667 %\\)\n\\[index\\]: \\[0, 1, 2\\]\n\\[left\\]: \\['a', 'b', 'c'\\]\nCategories \\(3, str\\): \\[a, b, c\\]\n\\[right\\]: \\['a', 'c', 'b'\\]\nCategories \\(3, str\\): \\[a, b, c\\]"""\n else:\n msg = """Series are different\n\nSeries values are different \\(66\\.66667 %\\)\n\\[index\\]: \\[0, 1, 2\\]\n\\[left\\]: \\['a', 'b', 'c'\\]\nCategories \\(3, object\\): \\['a', 'b', 'c'\\]\n\\[right\\]: \\['a', 'c', 'b'\\]\nCategories \\(3, object\\): \\['a', 'b', 'c'\\]"""\n\n s1 = Series(Categorical(["a", "b", "c"]))\n s2 = Series(Categorical(["a", "c", "b"]))\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(s1, s2, rtol=rtol)\n\n\ndef test_series_equal_datetime_values_mismatch(rtol):\n msg = """Series are different\n\nSeries values are different \\(100.0 %\\)\n\\[index\\]: \\[0, 1, 2\\]\n\\[left\\]: \\[1514764800000000000, 1514851200000000000, 1514937600000000000\\]\n\\[right\\]: \\[1549065600000000000, 1549152000000000000, 1549238400000000000\\]"""\n\n s1 = Series(pd.date_range("2018-01-01", periods=3, freq="D"))\n s2 = Series(pd.date_range("2019-02-02", periods=3, freq="D"))\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(s1, s2, rtol=rtol)\n\n\ndef test_series_equal_categorical_mismatch(check_categorical, using_infer_string):\n if using_infer_string:\n dtype = "str"\n else:\n dtype = "object"\n msg = f"""Attributes of Series are different\n\nAttribute "dtype" are different\n\\[left\\]: CategoricalDtype\\(categories=\\['a', 'b'\\], ordered=False, \\ncategories_dtype={dtype}\\)\n\\[right\\]: CategoricalDtype\\(categories=\\['a', 'b', 'c'\\], \\nordered=False, categories_dtype={dtype}\\)"""\n\n s1 = Series(Categorical(["a", "b"]))\n s2 = Series(Categorical(["a", "b"], categories=list("abc")))\n\n if check_categorical:\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(s1, s2, check_categorical=check_categorical)\n else:\n _assert_series_equal_both(s1, s2, check_categorical=check_categorical)\n\n\ndef test_assert_series_equal_extension_dtype_mismatch():\n # https://github.com/pandas-dev/pandas/issues/32747\n left = Series(pd.array([1, 2, 3], dtype="Int64"))\n right = left.astype(int)\n\n msg = """Attributes of Series are different\n\nAttribute "dtype" are different\n\\[left\\]: Int64\n\\[right\\]: int[32|64]"""\n\n tm.assert_series_equal(left, right, check_dtype=False)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(left, right, check_dtype=True)\n\n\ndef test_assert_series_equal_interval_dtype_mismatch():\n # https://github.com/pandas-dev/pandas/issues/32747\n left = Series([pd.Interval(0, 1)], dtype="interval")\n right = left.astype(object)\n\n msg = """Attributes of Series are different\n\nAttribute "dtype" are different\n\\[left\\]: interval\\[int64, right\\]\n\\[right\\]: object"""\n\n tm.assert_series_equal(left, right, check_dtype=False)\n\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(left, right, check_dtype=True)\n\n\ndef test_series_equal_series_type():\n class MySeries(Series):\n pass\n\n s1 = Series([1, 2])\n s2 = Series([1, 2])\n s3 = MySeries([1, 2])\n\n tm.assert_series_equal(s1, s2, check_series_type=False)\n tm.assert_series_equal(s1, s2, check_series_type=True)\n\n tm.assert_series_equal(s1, s3, check_series_type=False)\n tm.assert_series_equal(s3, s1, check_series_type=False)\n\n with pytest.raises(AssertionError, match="Series classes are different"):\n tm.assert_series_equal(s1, s3, check_series_type=True)\n\n with pytest.raises(AssertionError, match="Series classes are different"):\n tm.assert_series_equal(s3, s1, check_series_type=True)\n\n\ndef test_series_equal_exact_for_nonnumeric():\n # https://github.com/pandas-dev/pandas/issues/35446\n s1 = Series(["a", "b"])\n s2 = Series(["a", "b"])\n s3 = Series(["b", "a"])\n\n tm.assert_series_equal(s1, s2, check_exact=True)\n tm.assert_series_equal(s2, s1, check_exact=True)\n\n msg = """Series are different\n\nSeries values are different \\(100\\.0 %\\)\n\\[index\\]: \\[0, 1\\]\n\\[left\\]: \\[a, b\\]\n\\[right\\]: \\[b, a\\]"""\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(s1, s3, check_exact=True)\n\n msg = """Series are different\n\nSeries values are different \\(100\\.0 %\\)\n\\[index\\]: \\[0, 1\\]\n\\[left\\]: \\[b, a\\]\n\\[right\\]: \\[a, b\\]"""\n with pytest.raises(AssertionError, match=msg):\n tm.assert_series_equal(s3, s1, check_exact=True)\n\n\ndef test_assert_series_equal_ignore_extension_dtype_mismatch():\n # https://github.com/pandas-dev/pandas/issues/35715\n left = Series([1, 2, 3], dtype="Int64")\n right = Series([1, 2, 3], dtype="Int32")\n tm.assert_series_equal(left, right, check_dtype=False)\n\n\ndef test_assert_series_equal_ignore_extension_dtype_mismatch_cross_class():\n # https://github.com/pandas-dev/pandas/issues/35715\n left = Series([1, 2, 3], dtype="Int64")\n right = Series([1, 2, 3], dtype="int64")\n tm.assert_series_equal(left, right, check_dtype=False)\n\n\ndef test_allows_duplicate_labels():\n left = Series([1])\n right = Series([1]).set_flags(allows_duplicate_labels=False)\n tm.assert_series_equal(left, left)\n tm.assert_series_equal(right, right)\n tm.assert_series_equal(left, right, check_flags=False)\n tm.assert_series_equal(right, left, check_flags=False)\n\n with pytest.raises(AssertionError, match="<Flags"):\n tm.assert_series_equal(left, right)\n\n with pytest.raises(AssertionError, match="<Flags"):\n tm.assert_series_equal(left, right)\n\n\ndef test_assert_series_equal_identical_na(nulls_fixture):\n ser = Series([nulls_fixture])\n\n tm.assert_series_equal(ser, ser.copy())\n\n # while we're here do Index too\n idx = pd.Index(ser)\n tm.assert_index_equal(idx, idx.copy(deep=True))\n\n\ndef test_identical_nested_series_is_equal():\n # GH#22400\n x = Series(\n [\n 0,\n 0.0131142231938,\n 1.77774652865e-05,\n np.array([0.4722720840328748, 0.4216929783681722]),\n ]\n )\n y = Series(\n [\n 0,\n 0.0131142231938,\n 1.77774652865e-05,\n np.array([0.4722720840328748, 0.4216929783681722]),\n ]\n )\n # These two arrays should be equal, nesting could cause issue\n\n tm.assert_series_equal(x, x)\n tm.assert_series_equal(x, x, check_exact=True)\n tm.assert_series_equal(x, y)\n tm.assert_series_equal(x, y, check_exact=True)\n\n\n@pytest.mark.parametrize("dtype", ["datetime64", "timedelta64"])\ndef test_check_dtype_false_different_reso(dtype):\n # GH 52449\n ser_s = Series([1000213, 2131232, 21312331]).astype(f"{dtype}[s]")\n ser_ms = ser_s.astype(f"{dtype}[ms]")\n with pytest.raises(AssertionError, match="Attributes of Series are different"):\n tm.assert_series_equal(ser_s, ser_ms)\n tm.assert_series_equal(ser_ms, ser_s, check_dtype=False)\n\n ser_ms -= Series([1, 1, 1]).astype(f"{dtype}[ms]")\n\n with pytest.raises(AssertionError, match="Series are different"):\n tm.assert_series_equal(ser_s, ser_ms)\n\n with pytest.raises(AssertionError, match="Series are different"):\n tm.assert_series_equal(ser_s, ser_ms, check_dtype=False)\n\n\n@pytest.mark.parametrize("dtype", ["Int64", "int64"])\ndef test_large_unequal_ints(dtype):\n # https://github.com/pandas-dev/pandas/issues/55882\n left = Series([1577840521123000], dtype=dtype)\n right = Series([1577840521123543], dtype=dtype)\n with pytest.raises(AssertionError, match="Series are different"):\n tm.assert_series_equal(left, right)\n\n\n@pytest.mark.parametrize("dtype", [None, object])\n@pytest.mark.parametrize("check_exact", [True, False])\n@pytest.mark.parametrize("val", [3, 3.5])\ndef test_ea_and_numpy_no_dtype_check(val, check_exact, dtype):\n # GH#56651\n left = Series([1, 2, val], dtype=dtype)\n right = Series(pd.array([1, 2, val]))\n tm.assert_series_equal(left, right, check_dtype=False, check_exact=check_exact)\n\n\ndef test_assert_series_equal_int_tol():\n # GH#56646\n left = Series([81, 18, 121, 38, 74, 72, 81, 81, 146, 81, 81, 170, 74, 74])\n right = Series([72, 9, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72, 72])\n tm.assert_series_equal(left, right, rtol=1.5)\n\n tm.assert_frame_equal(left.to_frame(), right.to_frame(), rtol=1.5)\n tm.assert_extension_array_equal(\n left.astype("Int64").values, right.astype("Int64").values, rtol=1.5\n )\n\n\ndef test_assert_series_equal_index_exact_default():\n # GH#57067\n ser1 = Series(np.zeros(6, dtype=int), [0, 0.2, 0.4, 0.6, 0.8, 1])\n ser2 = Series(np.zeros(6, dtype=int), np.linspace(0, 1, 6))\n tm.assert_series_equal(ser1, ser2)\n tm.assert_frame_equal(ser1.to_frame(), ser2.to_frame())\n | .venv\Lib\site-packages\pandas\tests\util\test_assert_series_equal.py | test_assert_series_equal.py | Python | 15,072 | 0.95 | 0.084711 | 0.04065 | node-utils | 801 | 2024-04-08T06:18:20.533786 | BSD-3-Clause | true | 5fc2bddfaf3d3afd85fe948caf829420 |
from textwrap import dedent\n\nimport pytest\n\nfrom pandas.util._decorators import deprecate\n\nimport pandas._testing as tm\n\n\ndef new_func():\n """\n This is the summary. The deprecate directive goes next.\n\n This is the extended summary. The deprecate directive goes before this.\n """\n return "new_func called"\n\n\ndef new_func_no_docstring():\n return "new_func_no_docstring called"\n\n\ndef new_func_wrong_docstring():\n """Summary should be in the next line."""\n return "new_func_wrong_docstring called"\n\n\ndef new_func_with_deprecation():\n """\n This is the summary. The deprecate directive goes next.\n\n .. deprecated:: 1.0\n Use new_func instead.\n\n This is the extended summary. The deprecate directive goes before this.\n """\n\n\ndef test_deprecate_ok():\n depr_func = deprecate("depr_func", new_func, "1.0", msg="Use new_func instead.")\n\n with tm.assert_produces_warning(FutureWarning):\n result = depr_func()\n\n assert result == "new_func called"\n assert depr_func.__doc__ == dedent(new_func_with_deprecation.__doc__)\n\n\ndef test_deprecate_no_docstring():\n depr_func = deprecate(\n "depr_func", new_func_no_docstring, "1.0", msg="Use new_func instead."\n )\n with tm.assert_produces_warning(FutureWarning):\n result = depr_func()\n assert result == "new_func_no_docstring called"\n\n\ndef test_deprecate_wrong_docstring():\n msg = "deprecate needs a correctly formatted docstring"\n with pytest.raises(AssertionError, match=msg):\n deprecate(\n "depr_func", new_func_wrong_docstring, "1.0", msg="Use new_func instead."\n )\n | .venv\Lib\site-packages\pandas\tests\util\test_deprecate.py | test_deprecate.py | Python | 1,617 | 0.85 | 0.111111 | 0 | node-utils | 481 | 2024-06-12T06:33:32.786539 | GPL-3.0 | true | fe3257f0d5b245b219c67428d76f473f |
import pytest\n\nfrom pandas.util._decorators import deprecate_kwarg\n\nimport pandas._testing as tm\n\n\n@deprecate_kwarg("old", "new")\ndef _f1(new=False):\n return new\n\n\n_f2_mappings = {"yes": True, "no": False}\n\n\n@deprecate_kwarg("old", "new", _f2_mappings)\ndef _f2(new=False):\n return new\n\n\ndef _f3_mapping(x):\n return x + 1\n\n\n@deprecate_kwarg("old", "new", _f3_mapping)\ndef _f3(new=0):\n return new\n\n\n@pytest.mark.parametrize("key,klass", [("old", FutureWarning), ("new", None)])\ndef test_deprecate_kwarg(key, klass):\n x = 78\n\n with tm.assert_produces_warning(klass):\n assert _f1(**{key: x}) == x\n\n\n@pytest.mark.parametrize("key", list(_f2_mappings.keys()))\ndef test_dict_deprecate_kwarg(key):\n with tm.assert_produces_warning(FutureWarning):\n assert _f2(old=key) == _f2_mappings[key]\n\n\n@pytest.mark.parametrize("key", ["bogus", 12345, -1.23])\ndef test_missing_deprecate_kwarg(key):\n with tm.assert_produces_warning(FutureWarning):\n assert _f2(old=key) == key\n\n\n@pytest.mark.parametrize("x", [1, -1.4, 0])\ndef test_callable_deprecate_kwarg(x):\n with tm.assert_produces_warning(FutureWarning):\n assert _f3(old=x) == _f3_mapping(x)\n\n\ndef test_callable_deprecate_kwarg_fail():\n msg = "((can only|cannot) concatenate)|(must be str)|(Can't convert)"\n\n with pytest.raises(TypeError, match=msg):\n _f3(old="hello")\n\n\ndef test_bad_deprecate_kwarg():\n msg = "mapping from old to new argument values must be dict or callable!"\n\n with pytest.raises(TypeError, match=msg):\n\n @deprecate_kwarg("old", "new", 0)\n def f4(new=None):\n return new\n\n\n@deprecate_kwarg("old", None)\ndef _f4(old=True, unchanged=True):\n return old, unchanged\n\n\n@pytest.mark.parametrize("key", ["old", "unchanged"])\ndef test_deprecate_keyword(key):\n x = 9\n\n if key == "old":\n klass = FutureWarning\n expected = (x, True)\n else:\n klass = None\n expected = (True, x)\n\n with tm.assert_produces_warning(klass):\n assert _f4(**{key: x}) == expected\n | .venv\Lib\site-packages\pandas\tests\util\test_deprecate_kwarg.py | test_deprecate_kwarg.py | Python | 2,043 | 0.85 | 0.155556 | 0 | awesome-app | 860 | 2024-06-07T18:56:23.289099 | MIT | true | fb95a281aec64b366e6e86c2a9e4c895 |
"""\nTests for the `deprecate_nonkeyword_arguments` decorator\n"""\n\nimport inspect\n\nfrom pandas.util._decorators import deprecate_nonkeyword_arguments\n\nimport pandas._testing as tm\n\n\n@deprecate_nonkeyword_arguments(\n version="1.1", allowed_args=["a", "b"], name="f_add_inputs"\n)\ndef f(a, b=0, c=0, d=0):\n return a + b + c + d\n\n\ndef test_f_signature():\n assert str(inspect.signature(f)) == "(a, b=0, *, c=0, d=0)"\n\n\ndef test_one_argument():\n with tm.assert_produces_warning(None):\n assert f(19) == 19\n\n\ndef test_one_and_one_arguments():\n with tm.assert_produces_warning(None):\n assert f(19, d=6) == 25\n\n\ndef test_two_arguments():\n with tm.assert_produces_warning(None):\n assert f(1, 5) == 6\n\n\ndef test_two_and_two_arguments():\n with tm.assert_produces_warning(None):\n assert f(1, 3, c=3, d=5) == 12\n\n\ndef test_three_arguments():\n with tm.assert_produces_warning(FutureWarning):\n assert f(6, 3, 3) == 12\n\n\ndef test_four_arguments():\n with tm.assert_produces_warning(FutureWarning):\n assert f(1, 2, 3, 4) == 10\n\n\ndef test_three_arguments_with_name_in_warning():\n msg = (\n "Starting with pandas version 1.1 all arguments of f_add_inputs "\n "except for the arguments 'a' and 'b' will be keyword-only."\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert f(6, 3, 3) == 12\n\n\n@deprecate_nonkeyword_arguments(version="1.1")\ndef g(a, b=0, c=0, d=0):\n with tm.assert_produces_warning(None):\n return a + b + c + d\n\n\ndef test_g_signature():\n assert str(inspect.signature(g)) == "(a, *, b=0, c=0, d=0)"\n\n\ndef test_one_and_three_arguments_default_allowed_args():\n with tm.assert_produces_warning(None):\n assert g(1, b=3, c=3, d=5) == 12\n\n\ndef test_three_arguments_default_allowed_args():\n with tm.assert_produces_warning(FutureWarning):\n assert g(6, 3, 3) == 12\n\n\ndef test_three_positional_argument_with_warning_message_analysis():\n msg = (\n "Starting with pandas version 1.1 all arguments of g "\n "except for the argument 'a' will be keyword-only."\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert g(6, 3, 3) == 12\n\n\n@deprecate_nonkeyword_arguments(version="1.1")\ndef h(a=0, b=0, c=0, d=0):\n return a + b + c + d\n\n\ndef test_h_signature():\n assert str(inspect.signature(h)) == "(*, a=0, b=0, c=0, d=0)"\n\n\ndef test_all_keyword_arguments():\n with tm.assert_produces_warning(None):\n assert h(a=1, b=2) == 3\n\n\ndef test_one_positional_argument():\n with tm.assert_produces_warning(FutureWarning):\n assert h(23) == 23\n\n\ndef test_one_positional_argument_with_warning_message_analysis():\n msg = "Starting with pandas version 1.1 all arguments of h will be keyword-only."\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert h(19) == 19\n\n\n@deprecate_nonkeyword_arguments(version="1.1")\ndef i(a=0, /, b=0, *, c=0, d=0):\n return a + b + c + d\n\n\ndef test_i_signature():\n assert str(inspect.signature(i)) == "(*, a=0, b=0, c=0, d=0)"\n\n\nclass Foo:\n @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "bar"])\n def baz(self, bar=None, foobar=None): # pylint: disable=disallowed-name\n ...\n\n\ndef test_foo_signature():\n assert str(inspect.signature(Foo.baz)) == "(self, bar=None, *, foobar=None)"\n\n\ndef test_class():\n msg = (\n r"In a future version of pandas all arguments of Foo\.baz "\n r"except for the argument \'bar\' will be keyword-only"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n Foo().baz("qux", "quox")\n | .venv\Lib\site-packages\pandas\tests\util\test_deprecate_nonkeyword_arguments.py | test_deprecate_nonkeyword_arguments.py | Python | 3,623 | 0.95 | 0.205674 | 0 | react-lib | 579 | 2023-07-28T05:07:40.082245 | BSD-3-Clause | true | fa836416566d60b3d6d98c4304a6aaf7 |
from textwrap import dedent\n\nfrom pandas.util._decorators import doc\n\n\n@doc(method="cumsum", operation="sum")\ndef cumsum(whatever):\n """\n This is the {method} method.\n\n It computes the cumulative {operation}.\n """\n\n\n@doc(\n cumsum,\n dedent(\n """\n Examples\n --------\n\n >>> cumavg([1, 2, 3])\n 2\n """\n ),\n method="cumavg",\n operation="average",\n)\ndef cumavg(whatever):\n pass\n\n\n@doc(cumsum, method="cummax", operation="maximum")\ndef cummax(whatever):\n pass\n\n\n@doc(cummax, method="cummin", operation="minimum")\ndef cummin(whatever):\n pass\n\n\ndef test_docstring_formatting():\n docstr = dedent(\n """\n This is the cumsum method.\n\n It computes the cumulative sum.\n """\n )\n assert cumsum.__doc__ == docstr\n\n\ndef test_docstring_appending():\n docstr = dedent(\n """\n This is the cumavg method.\n\n It computes the cumulative average.\n\n Examples\n --------\n\n >>> cumavg([1, 2, 3])\n 2\n """\n )\n assert cumavg.__doc__ == docstr\n\n\ndef test_doc_template_from_func():\n docstr = dedent(\n """\n This is the cummax method.\n\n It computes the cumulative maximum.\n """\n )\n assert cummax.__doc__ == docstr\n\n\ndef test_inherit_doc_template():\n docstr = dedent(\n """\n This is the cummin method.\n\n It computes the cumulative minimum.\n """\n )\n assert cummin.__doc__ == docstr\n | .venv\Lib\site-packages\pandas\tests\util\test_doc.py | test_doc.py | Python | 1,492 | 0.85 | 0.088889 | 0 | python-kit | 337 | 2025-06-29T11:59:07.875782 | MIT | true | fd5c30d16e8f659e670746c2f79262ee |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.util.hashing import hash_tuples\nfrom pandas.util import (\n hash_array,\n hash_pandas_object,\n)\n\n\n@pytest.fixture(\n params=[\n Series([1, 2, 3] * 3, dtype="int32"),\n Series([None, 2.5, 3.5] * 3, dtype="float32"),\n Series(["a", "b", "c"] * 3, dtype="category"),\n Series(["d", "e", "f"] * 3),\n Series([True, False, True] * 3),\n Series(pd.date_range("20130101", periods=9)),\n Series(pd.date_range("20130101", periods=9, tz="US/Eastern")),\n Series(timedelta_range("2000", periods=9)),\n ]\n)\ndef series(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef index(request):\n return request.param\n\n\ndef test_consistency():\n # Check that our hash doesn't change because of a mistake\n # in the actual code; this is the ground truth.\n result = hash_pandas_object(Index(["foo", "bar", "baz"]))\n expected = Series(\n np.array(\n [3600424527151052760, 1374399572096150070, 477881037637427054],\n dtype="uint64",\n ),\n index=["foo", "bar", "baz"],\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_hash_array(series):\n arr = series.values\n tm.assert_numpy_array_equal(hash_array(arr), hash_array(arr))\n\n\n@pytest.mark.parametrize("dtype", ["U", object])\ndef test_hash_array_mixed(dtype):\n result1 = hash_array(np.array(["3", "4", "All"]))\n result2 = hash_array(np.array([3, 4, "All"], dtype=dtype))\n\n tm.assert_numpy_array_equal(result1, result2)\n\n\n@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")])\ndef test_hash_array_errors(val):\n msg = "must pass a ndarray-like"\n with pytest.raises(TypeError, match=msg):\n hash_array(val)\n\n\ndef test_hash_array_index_exception():\n # GH42003 TypeError instead of AttributeError\n obj = pd.DatetimeIndex(["2018-10-28 01:20:00"], tz="Europe/Berlin")\n\n msg = "Use hash_pandas_object instead"\n with pytest.raises(TypeError, match=msg):\n hash_array(obj)\n\n\ndef test_hash_tuples():\n tuples = [(1, "one"), (1, "two"), (2, "one")]\n result = hash_tuples(tuples)\n\n expected = hash_pandas_object(MultiIndex.from_tuples(tuples)).values\n tm.assert_numpy_array_equal(result, expected)\n\n # We only need to support MultiIndex and list-of-tuples\n msg = "|".join(["object is not iterable", "zip argument #1 must support iteration"])\n with pytest.raises(TypeError, match=msg):\n hash_tuples(tuples[0])\n\n\n@pytest.mark.parametrize("val", [5, "foo", pd.Timestamp("20130101")])\ndef test_hash_tuples_err(val):\n msg = "must be convertible to a list-of-tuples"\n with pytest.raises(TypeError, match=msg):\n hash_tuples(val)\n\n\ndef test_multiindex_unique():\n mi = MultiIndex.from_tuples([(118, 472), (236, 118), (51, 204), (102, 51)])\n assert mi.is_unique is True\n\n result = hash_pandas_object(mi)\n assert result.is_unique is True\n\n\ndef test_multiindex_objects():\n mi = MultiIndex(\n levels=[["b", "d", "a"], [1, 2, 3]],\n codes=[[0, 1, 0, 2], [2, 0, 0, 1]],\n names=["col1", "col2"],\n )\n recons = mi._sort_levels_monotonic()\n\n # These are equal.\n assert mi.equals(recons)\n assert Index(mi.values).equals(Index(recons.values))\n\n\n@pytest.mark.parametrize(\n "obj",\n [\n Series([1, 2, 3]),\n Series([1.0, 1.5, 3.2]),\n Series([1.0, 1.5, np.nan]),\n Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),\n Series(["a", "b", "c"]),\n Series(["a", np.nan, "c"]),\n Series(["a", None, "c"]),\n Series([True, False, True]),\n Series(dtype=object),\n DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),\n DataFrame(),\n DataFrame(np.full((10, 4), np.nan)),\n DataFrame(\n {\n "A": [0.0, 1.0, 2.0, 3.0, 4.0],\n "B": [0.0, 1.0, 0.0, 1.0, 0.0],\n "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),\n "D": pd.date_range("20130101", periods=5),\n }\n ),\n DataFrame(range(5), index=pd.date_range("2020-01-01", periods=5)),\n Series(range(5), index=pd.date_range("2020-01-01", periods=5)),\n Series(period_range("2020-01-01", periods=10, freq="D")),\n Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),\n ],\n)\ndef test_hash_pandas_object(obj, index):\n a = hash_pandas_object(obj, index=index)\n b = hash_pandas_object(obj, index=index)\n tm.assert_series_equal(a, b)\n\n\n@pytest.mark.parametrize(\n "obj",\n [\n Series([1, 2, 3]),\n Series([1.0, 1.5, 3.2]),\n Series([1.0, 1.5, np.nan]),\n Series([1.0, 1.5, 3.2], index=[1.5, 1.1, 3.3]),\n Series(["a", "b", "c"]),\n Series(["a", np.nan, "c"]),\n Series(["a", None, "c"]),\n Series([True, False, True]),\n DataFrame({"x": ["a", "b", "c"], "y": [1, 2, 3]}),\n DataFrame(np.full((10, 4), np.nan)),\n DataFrame(\n {\n "A": [0.0, 1.0, 2.0, 3.0, 4.0],\n "B": [0.0, 1.0, 0.0, 1.0, 0.0],\n "C": Index(["foo1", "foo2", "foo3", "foo4", "foo5"], dtype=object),\n "D": pd.date_range("20130101", periods=5),\n }\n ),\n DataFrame(range(5), index=pd.date_range("2020-01-01", periods=5)),\n Series(range(5), index=pd.date_range("2020-01-01", periods=5)),\n Series(period_range("2020-01-01", periods=10, freq="D")),\n Series(pd.date_range("20130101", periods=3, tz="US/Eastern")),\n ],\n)\ndef test_hash_pandas_object_diff_index_non_empty(obj):\n a = hash_pandas_object(obj, index=True)\n b = hash_pandas_object(obj, index=False)\n assert not (a == b).all()\n\n\n@pytest.mark.parametrize(\n "obj",\n [\n Index([1, 2, 3]),\n Index([True, False, True]),\n timedelta_range("1 day", periods=2),\n period_range("2020-01-01", freq="D", periods=2),\n MultiIndex.from_product(\n [range(5), ["foo", "bar", "baz"], pd.date_range("20130101", periods=2)]\n ),\n MultiIndex.from_product([pd.CategoricalIndex(list("aabc")), range(3)]),\n ],\n)\ndef test_hash_pandas_index(obj, index):\n a = hash_pandas_object(obj, index=index)\n b = hash_pandas_object(obj, index=index)\n tm.assert_series_equal(a, b)\n\n\ndef test_hash_pandas_series(series, index):\n a = hash_pandas_object(series, index=index)\n b = hash_pandas_object(series, index=index)\n tm.assert_series_equal(a, b)\n\n\ndef test_hash_pandas_series_diff_index(series):\n a = hash_pandas_object(series, index=True)\n b = hash_pandas_object(series, index=False)\n assert not (a == b).all()\n\n\n@pytest.mark.parametrize(\n "obj", [Series([], dtype="float64"), Series([], dtype="object"), Index([])]\n)\ndef test_hash_pandas_empty_object(obj, index):\n # These are by-definition the same with\n # or without the index as the data is empty.\n a = hash_pandas_object(obj, index=index)\n b = hash_pandas_object(obj, index=index)\n tm.assert_series_equal(a, b)\n\n\n@pytest.mark.parametrize(\n "s1",\n [\n Series(["a", "b", "c", "d"]),\n Series([1000, 2000, 3000, 4000]),\n Series(pd.date_range(0, periods=4)),\n ],\n)\n@pytest.mark.parametrize("categorize", [True, False])\ndef test_categorical_consistency(s1, categorize):\n # see gh-15143\n #\n # Check that categoricals hash consistent with their values,\n # not codes. This should work for categoricals of any dtype.\n s2 = s1.astype("category").cat.set_categories(s1)\n s3 = s2.cat.set_categories(list(reversed(s1)))\n\n # These should all hash identically.\n h1 = hash_pandas_object(s1, categorize=categorize)\n h2 = hash_pandas_object(s2, categorize=categorize)\n h3 = hash_pandas_object(s3, categorize=categorize)\n\n tm.assert_series_equal(h1, h2)\n tm.assert_series_equal(h1, h3)\n\n\ndef test_categorical_with_nan_consistency():\n c = pd.Categorical.from_codes(\n [-1, 0, 1, 2, 3, 4], categories=pd.date_range("2012-01-01", periods=5, name="B")\n )\n expected = hash_array(c, categorize=False)\n\n c = pd.Categorical.from_codes([-1, 0], categories=[pd.Timestamp("2012-01-01")])\n result = hash_array(c, categorize=False)\n\n assert result[0] in expected\n assert result[1] in expected\n\n\ndef test_pandas_errors():\n msg = "Unexpected type for hashing"\n with pytest.raises(TypeError, match=msg):\n hash_pandas_object(pd.Timestamp("20130101"))\n\n\ndef test_hash_keys():\n # Using different hash keys, should have\n # different hashes for the same data.\n #\n # This only matters for object dtypes.\n obj = Series(list("abc"))\n\n a = hash_pandas_object(obj, hash_key="9876543210123456")\n b = hash_pandas_object(obj, hash_key="9876543210123465")\n\n assert (a != b).all()\n\n\ndef test_df_hash_keys():\n # DataFrame version of the test_hash_keys.\n # https://github.com/pandas-dev/pandas/issues/41404\n obj = DataFrame({"x": np.arange(3), "y": list("abc")})\n\n a = hash_pandas_object(obj, hash_key="9876543210123456")\n b = hash_pandas_object(obj, hash_key="9876543210123465")\n\n assert (a != b).all()\n\n\ndef test_df_encoding():\n # Check that DataFrame recognizes optional encoding.\n # https://github.com/pandas-dev/pandas/issues/41404\n # https://github.com/pandas-dev/pandas/pull/42049\n obj = DataFrame({"x": np.arange(3), "y": list("a+c")})\n\n a = hash_pandas_object(obj, encoding="utf8")\n b = hash_pandas_object(obj, encoding="utf7")\n\n # Note that the "+" is encoded as "+-" in utf-7.\n assert a[0] == b[0]\n assert a[1] != b[1]\n assert a[2] == b[2]\n\n\ndef test_invalid_key():\n # This only matters for object dtypes.\n msg = "key should be a 16-byte string encoded"\n\n with pytest.raises(ValueError, match=msg):\n hash_pandas_object(Series(list("abc")), hash_key="foo")\n\n\ndef test_already_encoded(index):\n # If already encoded, then ok.\n obj = Series(list("abc")).str.encode("utf8")\n a = hash_pandas_object(obj, index=index)\n b = hash_pandas_object(obj, index=index)\n tm.assert_series_equal(a, b)\n\n\ndef test_alternate_encoding(index):\n obj = Series(list("abc"))\n a = hash_pandas_object(obj, index=index)\n b = hash_pandas_object(obj, index=index)\n tm.assert_series_equal(a, b)\n\n\n@pytest.mark.parametrize("l_exp", range(8))\n@pytest.mark.parametrize("l_add", [0, 1])\ndef test_same_len_hash_collisions(l_exp, l_add):\n length = 2 ** (l_exp + 8) + l_add\n idx = np.array([str(i) for i in range(length)], dtype=object)\n\n result = hash_array(idx, "utf8")\n assert not result[0] == result[1]\n\n\ndef test_hash_collisions():\n # Hash collisions are bad.\n #\n # https://github.com/pandas-dev/pandas/issues/14711#issuecomment-264885726\n hashes = [\n "Ingrid-9Z9fKIZmkO7i7Cn51Li34pJm44fgX6DYGBNj3VPlOH50m7HnBlPxfIwFMrcNJNMP6PSgLmwWnInciMWrCSAlLEvt7JkJl4IxiMrVbXSa8ZQoVaq5xoQPjltuJEfwdNlO6jo8qRRHvD8sBEBMQASrRa6TsdaPTPCBo3nwIBpE7YzzmyH0vMBhjQZLx1aCT7faSEx7PgFxQhHdKFWROcysamgy9iVj8DO2Fmwg1NNl93rIAqC3mdqfrCxrzfvIY8aJdzin2cHVzy3QUJxZgHvtUtOLxoqnUHsYbNTeq0xcLXpTZEZCxD4PGubIuCNf32c33M7HFsnjWSEjE2yVdWKhmSVodyF8hFYVmhYnMCztQnJrt3O8ZvVRXd5IKwlLexiSp4h888w7SzAIcKgc3g5XQJf6MlSMftDXm9lIsE1mJNiJEv6uY6pgvC3fUPhatlR5JPpVAHNSbSEE73MBzJrhCAbOLXQumyOXigZuPoME7QgJcBalliQol7YZ9",\n "Tim-b9MddTxOWW2AT1Py6vtVbZwGAmYCjbp89p8mxsiFoVX4FyDOF3wFiAkyQTUgwg9sVqVYOZo09Dh1AzhFHbgij52ylF0SEwgzjzHH8TGY8Lypart4p4onnDoDvVMBa0kdthVGKl6K0BDVGzyOXPXKpmnMF1H6rJzqHJ0HywfwS4XYpVwlAkoeNsiicHkJUFdUAhG229INzvIAiJuAHeJDUoyO4DCBqtoZ5TDend6TK7Y914yHlfH3g1WZu5LksKv68VQHJriWFYusW5e6ZZ6dKaMjTwEGuRgdT66iU5nqWTHRH8WSzpXoCFwGcTOwyuqPSe0fTe21DVtJn1FKj9F9nEnR9xOvJUO7E0piCIF4Ad9yAIDY4DBimpsTfKXCu1vdHpKYerzbndfuFe5AhfMduLYZJi5iAw8qKSwR5h86ttXV0Mc0QmXz8dsRvDgxjXSmupPxBggdlqUlC828hXiTPD7am0yETBV0F3bEtvPiNJfremszcV8NcqAoARMe",\n ]\n\n # These should be different.\n result1 = hash_array(np.asarray(hashes[0:1], dtype=object), "utf8")\n expected1 = np.array([14963968704024874985], dtype=np.uint64)\n tm.assert_numpy_array_equal(result1, expected1)\n\n result2 = hash_array(np.asarray(hashes[1:2], dtype=object), "utf8")\n expected2 = np.array([16428432627716348016], dtype=np.uint64)\n tm.assert_numpy_array_equal(result2, expected2)\n\n result = hash_array(np.asarray(hashes, dtype=object), "utf8")\n tm.assert_numpy_array_equal(result, np.concatenate([expected1, expected2], axis=0))\n\n\n@pytest.mark.parametrize(\n "data, result_data",\n [\n [[tuple("1"), tuple("2")], [10345501319357378243, 8331063931016360761]],\n [[(1,), (2,)], [9408946347443669104, 3278256261030523334]],\n ],\n)\ndef test_hash_with_tuple(data, result_data):\n # GH#28969 array containing a tuple raises on call to arr.astype(str)\n # apparently a numpy bug github.com/numpy/numpy/issues/9441\n\n df = DataFrame({"data": data})\n result = hash_pandas_object(df)\n expected = Series(result_data, dtype=np.uint64)\n tm.assert_series_equal(result, expected)\n\n\ndef test_hashable_tuple_args():\n # require that the elements of such tuples are themselves hashable\n\n df3 = DataFrame(\n {\n "data": [\n (\n 1,\n [],\n ),\n (\n 2,\n {},\n ),\n ]\n }\n )\n with pytest.raises(TypeError, match="unhashable type: 'list'"):\n hash_pandas_object(df3)\n\n\ndef test_hash_object_none_key():\n # https://github.com/pandas-dev/pandas/issues/30887\n result = pd.util.hash_pandas_object(Series(["a", "b"]), hash_key=None)\n expected = Series([4578374827886788867, 17338122309987883691], dtype="uint64")\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\util\test_hashing.py | test_hashing.py | Python | 13,857 | 0.95 | 0.088729 | 0.096677 | react-lib | 825 | 2024-06-03T19:12:36.021153 | MIT | true | 7090a22a8dc1f8ba9c89806827f5336f |
import pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import option_context\n\n\n@td.skip_if_installed("numba")\ndef test_numba_not_installed_option_context():\n with pytest.raises(ImportError, match="Missing optional"):\n with option_context("compute.use_numba", True):\n pass\n | .venv\Lib\site-packages\pandas\tests\util\test_numba.py | test_numba.py | Python | 308 | 0.85 | 0.083333 | 0 | node-utils | 686 | 2023-11-22T21:04:27.829127 | GPL-3.0 | true | 5df23acbaae1fdc0d214f57aa8db52dd |
import warnings\n\nimport pytest\n\nfrom pandas.util._exceptions import rewrite_warning\n\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "target_category, target_message, hit",\n [\n (FutureWarning, "Target message", True),\n (FutureWarning, "Target", True),\n (FutureWarning, "get mess", True),\n (FutureWarning, "Missed message", False),\n (DeprecationWarning, "Target message", False),\n ],\n)\n@pytest.mark.parametrize(\n "new_category",\n [\n None,\n DeprecationWarning,\n ],\n)\ndef test_rewrite_warning(target_category, target_message, hit, new_category):\n new_message = "Rewritten message"\n if hit:\n expected_category = new_category if new_category else target_category\n expected_message = new_message\n else:\n expected_category = FutureWarning\n expected_message = "Target message"\n with tm.assert_produces_warning(expected_category, match=expected_message):\n with rewrite_warning(\n target_message, target_category, new_message, new_category\n ):\n warnings.warn(message="Target message", category=FutureWarning)\n | .venv\Lib\site-packages\pandas\tests\util\test_rewrite_warning.py | test_rewrite_warning.py | Python | 1,151 | 0.85 | 0.076923 | 0 | vue-tools | 585 | 2023-12-14T07:38:43.446671 | GPL-3.0 | true | d7925cb1dc24ebd53091da237cecc8bf |
import numpy as np\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\ndef test_shares_memory_interval():\n obj = pd.interval_range(1, 5)\n\n assert tm.shares_memory(obj, obj)\n assert tm.shares_memory(obj, obj._data)\n assert tm.shares_memory(obj, obj[::-1])\n assert tm.shares_memory(obj, obj[:2])\n\n assert not tm.shares_memory(obj, obj._data.copy())\n\n\n@td.skip_if_no("pyarrow")\ndef test_shares_memory_string():\n # GH#55823\n import pyarrow as pa\n\n obj = pd.array(["a", "b"], dtype=pd.StringDtype("pyarrow", na_value=pd.NA))\n assert tm.shares_memory(obj, obj)\n\n obj = pd.array(["a", "b"], dtype=pd.StringDtype("pyarrow", na_value=np.nan))\n assert tm.shares_memory(obj, obj)\n\n obj = pd.array(["a", "b"], dtype=pd.ArrowDtype(pa.string()))\n assert tm.shares_memory(obj, obj)\n | .venv\Lib\site-packages\pandas\tests\util\test_shares_memory.py | test_shares_memory.py | Python | 852 | 0.95 | 0.0625 | 0.047619 | node-utils | 645 | 2025-03-08T21:33:50.201287 | BSD-3-Clause | true | 414a544b0640a0446b9c0dd9262df46e |
import json\nimport os\nimport re\n\nfrom pandas.util._print_versions import (\n _get_dependency_info,\n _get_sys_info,\n)\n\nimport pandas as pd\n\n\ndef test_show_versions(tmpdir):\n # GH39701\n as_json = os.path.join(tmpdir, "test_output.json")\n\n pd.show_versions(as_json=as_json)\n\n with open(as_json, encoding="utf-8") as fd:\n # check if file output is valid JSON, will raise an exception if not\n result = json.load(fd)\n\n # Basic check that each version element is found in output\n expected = {\n "system": _get_sys_info(),\n "dependencies": _get_dependency_info(),\n }\n\n assert result == expected\n\n\ndef test_show_versions_console_json(capsys):\n # GH39701\n pd.show_versions(as_json=True)\n stdout = capsys.readouterr().out\n\n # check valid json is printed to the console if as_json is True\n result = json.loads(stdout)\n\n # Basic check that each version element is found in output\n expected = {\n "system": _get_sys_info(),\n "dependencies": _get_dependency_info(),\n }\n\n assert result == expected\n\n\ndef test_show_versions_console(capsys):\n # gh-32041\n # gh-32041\n pd.show_versions(as_json=False)\n result = capsys.readouterr().out\n\n # check header\n assert "INSTALLED VERSIONS" in result\n\n # check full commit hash\n assert re.search(r"commit\s*:\s[0-9a-f]{40}\n", result)\n\n # check required dependency\n # 2020-12-09 npdev has "dirty" in the tag\n # 2022-05-25 npdev released with RC wo/ "dirty".\n # Just ensure we match [0-9]+\..* since npdev version is variable\n assert re.search(r"numpy\s*:\s[0-9]+\..*\n", result)\n\n # check optional dependency\n assert re.search(r"pyarrow\s*:\s([0-9]+.*|None)\n", result)\n\n\ndef test_json_output_match(capsys, tmpdir):\n # GH39701\n pd.show_versions(as_json=True)\n result_console = capsys.readouterr().out\n\n out_path = os.path.join(tmpdir, "test_json.json")\n pd.show_versions(as_json=out_path)\n with open(out_path, encoding="utf-8") as out_fd:\n result_file = out_fd.read()\n\n assert result_console == result_file\n | .venv\Lib\site-packages\pandas\tests\util\test_show_versions.py | test_show_versions.py | Python | 2,096 | 0.95 | 0.08642 | 0.275862 | vue-tools | 212 | 2023-07-17T05:44:11.522515 | MIT | true | 896cc36dfef7825638cfce78e13cc539 |
import os\n\nimport pytest\n\nfrom pandas import (\n array,\n compat,\n)\nimport pandas._testing as tm\n\n\ndef test_numpy_err_state_is_default():\n expected = {"over": "warn", "divide": "warn", "invalid": "warn", "under": "ignore"}\n import numpy as np\n\n # The error state should be unchanged after that import.\n assert np.geterr() == expected\n\n\ndef test_convert_rows_list_to_csv_str():\n rows_list = ["aaa", "bbb", "ccc"]\n ret = tm.convert_rows_list_to_csv_str(rows_list)\n\n if compat.is_platform_windows():\n expected = "aaa\r\nbbb\r\nccc\r\n"\n else:\n expected = "aaa\nbbb\nccc\n"\n\n assert ret == expected\n\n\n@pytest.mark.parametrize("strict_data_files", [True, False])\ndef test_datapath_missing(datapath):\n with pytest.raises(ValueError, match="Could not find file"):\n datapath("not_a_file")\n\n\ndef test_datapath(datapath):\n args = ("io", "data", "csv", "iris.csv")\n\n result = datapath(*args)\n expected = os.path.join(os.path.dirname(os.path.dirname(__file__)), *args)\n\n assert result == expected\n\n\ndef test_external_error_raised():\n with tm.external_error_raised(TypeError):\n raise TypeError("Should not check this error message, so it will pass")\n\n\ndef test_is_sorted():\n arr = array([1, 2, 3], dtype="Int64")\n tm.assert_is_sorted(arr)\n\n arr = array([4, 2, 3], dtype="Int64")\n with pytest.raises(AssertionError, match="ExtensionArray are different"):\n tm.assert_is_sorted(arr)\n | .venv\Lib\site-packages\pandas\tests\util\test_util.py | test_util.py | Python | 1,463 | 0.95 | 0.12069 | 0.026316 | awesome-app | 972 | 2024-07-10T17:36:07.817251 | Apache-2.0 | true | 032ae6eed3f743f0d6942a15612cbb88 |
import pytest\n\nfrom pandas.util._validators import validate_args\n\n\n@pytest.fixture\ndef _fname():\n return "func"\n\n\ndef test_bad_min_fname_arg_count(_fname):\n msg = "'max_fname_arg_count' must be non-negative"\n\n with pytest.raises(ValueError, match=msg):\n validate_args(_fname, (None,), -1, "foo")\n\n\ndef test_bad_arg_length_max_value_single(_fname):\n args = (None, None)\n compat_args = ("foo",)\n\n min_fname_arg_count = 0\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(args) + min_fname_arg_count\n msg = (\n rf"{_fname}\(\) takes at most {max_length} "\n rf"argument \({actual_length} given\)"\n )\n\n with pytest.raises(TypeError, match=msg):\n validate_args(_fname, args, min_fname_arg_count, compat_args)\n\n\ndef test_bad_arg_length_max_value_multiple(_fname):\n args = (None, None)\n compat_args = {"foo": None}\n\n min_fname_arg_count = 2\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(args) + min_fname_arg_count\n msg = (\n rf"{_fname}\(\) takes at most {max_length} "\n rf"arguments \({actual_length} given\)"\n )\n\n with pytest.raises(TypeError, match=msg):\n validate_args(_fname, args, min_fname_arg_count, compat_args)\n\n\n@pytest.mark.parametrize("i", range(1, 3))\ndef test_not_all_defaults(i, _fname):\n bad_arg = "foo"\n msg = (\n f"the '{bad_arg}' parameter is not supported "\n rf"in the pandas implementation of {_fname}\(\)"\n )\n\n compat_args = {"foo": 2, "bar": -1, "baz": 3}\n arg_vals = (1, -1, 3)\n\n with pytest.raises(ValueError, match=msg):\n validate_args(_fname, arg_vals[:i], 2, compat_args)\n\n\ndef test_validation(_fname):\n # No exceptions should be raised.\n validate_args(_fname, (None,), 2, {"out": None})\n\n compat_args = {"axis": 1, "out": None}\n validate_args(_fname, (1, None), 2, compat_args)\n | .venv\Lib\site-packages\pandas\tests\util\test_validate_args.py | test_validate_args.py | Python | 1,907 | 0.95 | 0.085714 | 0.020408 | vue-tools | 762 | 2024-01-01T13:41:46.235658 | Apache-2.0 | true | 0e3be4db6c15d2b8cf33e01049b17fbd |
import pytest\n\nfrom pandas.util._validators import validate_args_and_kwargs\n\n\n@pytest.fixture\ndef _fname():\n return "func"\n\n\ndef test_invalid_total_length_max_length_one(_fname):\n compat_args = ("foo",)\n kwargs = {"foo": "FOO"}\n args = ("FoO", "BaZ")\n\n min_fname_arg_count = 0\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(kwargs) + len(args) + min_fname_arg_count\n\n msg = (\n rf"{_fname}\(\) takes at most {max_length} "\n rf"argument \({actual_length} given\)"\n )\n\n with pytest.raises(TypeError, match=msg):\n validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)\n\n\ndef test_invalid_total_length_max_length_multiple(_fname):\n compat_args = ("foo", "bar", "baz")\n kwargs = {"foo": "FOO", "bar": "BAR"}\n args = ("FoO", "BaZ")\n\n min_fname_arg_count = 2\n max_length = len(compat_args) + min_fname_arg_count\n actual_length = len(kwargs) + len(args) + min_fname_arg_count\n\n msg = (\n rf"{_fname}\(\) takes at most {max_length} "\n rf"arguments \({actual_length} given\)"\n )\n\n with pytest.raises(TypeError, match=msg):\n validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)\n\n\n@pytest.mark.parametrize("args,kwargs", [((), {"foo": -5, "bar": 2}), ((-5, 2), {})])\ndef test_missing_args_or_kwargs(args, kwargs, _fname):\n bad_arg = "bar"\n min_fname_arg_count = 2\n\n compat_args = {"foo": -5, bad_arg: 1}\n\n msg = (\n rf"the '{bad_arg}' parameter is not supported "\n rf"in the pandas implementation of {_fname}\(\)"\n )\n\n with pytest.raises(ValueError, match=msg):\n validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)\n\n\ndef test_duplicate_argument(_fname):\n min_fname_arg_count = 2\n\n compat_args = {"foo": None, "bar": None, "baz": None}\n kwargs = {"foo": None, "bar": None}\n args = (None,) # duplicate value for "foo"\n\n msg = rf"{_fname}\(\) got multiple values for keyword argument 'foo'"\n\n with pytest.raises(TypeError, match=msg):\n validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)\n\n\ndef test_validation(_fname):\n # No exceptions should be raised.\n compat_args = {"foo": 1, "bar": None, "baz": -2}\n kwargs = {"baz": -2}\n\n args = (1, None)\n min_fname_arg_count = 2\n\n validate_args_and_kwargs(_fname, args, kwargs, min_fname_arg_count, compat_args)\n | .venv\Lib\site-packages\pandas\tests\util\test_validate_args_and_kwargs.py | test_validate_args_and_kwargs.py | Python | 2,456 | 0.95 | 0.095238 | 0.017544 | vue-tools | 994 | 2024-11-18T14:50:51.981849 | MIT | true | 27f3877494494677ebaf43be1bb00595 |
import numpy as np\nimport pytest\n\nfrom pandas.util._validators import validate_inclusive\n\nimport pandas as pd\n\n\n@pytest.mark.parametrize(\n "invalid_inclusive",\n (\n "ccc",\n 2,\n object(),\n None,\n np.nan,\n pd.NA,\n pd.DataFrame(),\n ),\n)\ndef test_invalid_inclusive(invalid_inclusive):\n with pytest.raises(\n ValueError,\n match="Inclusive has to be either 'both', 'neither', 'left' or 'right'",\n ):\n validate_inclusive(invalid_inclusive)\n\n\n@pytest.mark.parametrize(\n "valid_inclusive, expected_tuple",\n (\n ("left", (True, False)),\n ("right", (False, True)),\n ("both", (True, True)),\n ("neither", (False, False)),\n ),\n)\ndef test_valid_inclusive(valid_inclusive, expected_tuple):\n resultant_tuple = validate_inclusive(valid_inclusive)\n assert expected_tuple == resultant_tuple\n | .venv\Lib\site-packages\pandas\tests\util\test_validate_inclusive.py | test_validate_inclusive.py | Python | 896 | 0.85 | 0.05 | 0 | node-utils | 203 | 2025-06-14T00:51:06.069734 | MIT | true | 5a8c956f7b81b0fa15315bdc3b138402 |
import pytest\n\nfrom pandas.util._validators import (\n validate_bool_kwarg,\n validate_kwargs,\n)\n\n\n@pytest.fixture\ndef _fname():\n return "func"\n\n\ndef test_bad_kwarg(_fname):\n good_arg = "f"\n bad_arg = good_arg + "o"\n\n compat_args = {good_arg: "foo", bad_arg + "o": "bar"}\n kwargs = {good_arg: "foo", bad_arg: "bar"}\n\n msg = rf"{_fname}\(\) got an unexpected keyword argument '{bad_arg}'"\n\n with pytest.raises(TypeError, match=msg):\n validate_kwargs(_fname, kwargs, compat_args)\n\n\n@pytest.mark.parametrize("i", range(1, 3))\ndef test_not_all_none(i, _fname):\n bad_arg = "foo"\n msg = (\n rf"the '{bad_arg}' parameter is not supported "\n rf"in the pandas implementation of {_fname}\(\)"\n )\n\n compat_args = {"foo": 1, "bar": "s", "baz": None}\n\n kwarg_keys = ("foo", "bar", "baz")\n kwarg_vals = (2, "s", None)\n\n kwargs = dict(zip(kwarg_keys[:i], kwarg_vals[:i]))\n\n with pytest.raises(ValueError, match=msg):\n validate_kwargs(_fname, kwargs, compat_args)\n\n\ndef test_validation(_fname):\n # No exceptions should be raised.\n compat_args = {"f": None, "b": 1, "ba": "s"}\n\n kwargs = {"f": None, "b": 1}\n validate_kwargs(_fname, kwargs, compat_args)\n\n\n@pytest.mark.parametrize("name", ["inplace", "copy"])\n@pytest.mark.parametrize("value", [1, "True", [1, 2, 3], 5.0])\ndef test_validate_bool_kwarg_fail(name, value):\n msg = (\n f'For argument "{name}" expected type bool, '\n f"received type {type(value).__name__}"\n )\n\n with pytest.raises(ValueError, match=msg):\n validate_bool_kwarg(value, name)\n\n\n@pytest.mark.parametrize("name", ["inplace", "copy"])\n@pytest.mark.parametrize("value", [True, False, None])\ndef test_validate_bool_kwarg(name, value):\n assert validate_bool_kwarg(value, name) == value\n | .venv\Lib\site-packages\pandas\tests\util\test_validate_kwargs.py | test_validate_kwargs.py | Python | 1,808 | 0.95 | 0.086957 | 0.021277 | vue-tools | 760 | 2025-01-02T01:57:33.791988 | Apache-2.0 | true | 0b1c2ee8ec1ad7ce2454991f94562024 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\conftest.cpython-313.pyc | conftest.cpython-313.pyc | Other | 1,257 | 0.7 | 0 | 0 | awesome-app | 46 | 2024-01-22T16:49:09.138020 | MIT | true | 3271864757a1a148f8b6b7431fa91223 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_assert_almost_equal.cpython-313.pyc | test_assert_almost_equal.cpython-313.pyc | Other | 26,135 | 0.8 | 0 | 0.021097 | node-utils | 708 | 2024-04-27T02:18:27.051825 | MIT | true | 42ae9ae4aac54ee4a016de31a371ff82 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_assert_attr_equal.cpython-313.pyc | test_assert_attr_equal.cpython-313.pyc | Other | 1,568 | 0.8 | 0 | 0 | node-utils | 171 | 2024-11-19T19:27:00.091312 | Apache-2.0 | true | eea8cad8febfb6551b468c52f45fed7f |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_assert_categorical_equal.cpython-313.pyc | test_assert_categorical_equal.cpython-313.pyc | Other | 4,274 | 0.8 | 0 | 0 | node-utils | 5 | 2024-02-16T03:57:59.691456 | MIT | true | 5a23dd8cb385be8bab61a1f44efb4a76 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_assert_extension_array_equal.cpython-313.pyc | test_assert_extension_array_equal.cpython-313.pyc | Other | 5,787 | 0.8 | 0 | 0 | vue-tools | 250 | 2024-12-30T17:09:00.024031 | MIT | true | e75fd3a863f9f12678011b7dfd09b23a |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_assert_frame_equal.cpython-313.pyc | test_assert_frame_equal.cpython-313.pyc | Other | 18,941 | 0.8 | 0 | 0 | node-utils | 992 | 2024-05-26T00:35:57.380082 | MIT | true | ef7db74c7272be8b2028fe26da02c75c |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_assert_index_equal.cpython-313.pyc | test_assert_index_equal.cpython-313.pyc | Other | 14,759 | 0.8 | 0 | 0.016667 | vue-tools | 526 | 2025-01-17T01:12:16.535930 | Apache-2.0 | true | 54371c87fba03630cd80b1401d576e4a |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_assert_interval_array_equal.cpython-313.pyc | test_assert_interval_array_equal.cpython-313.pyc | Other | 3,580 | 0.7 | 0 | 0 | node-utils | 122 | 2025-02-07T22:16:38.423907 | BSD-3-Clause | true | 3c2395d202b2460373d091a76e655fa1 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_assert_numpy_array_equal.cpython-313.pyc | test_assert_numpy_array_equal.cpython-313.pyc | Other | 11,136 | 0.8 | 0 | 0 | node-utils | 595 | 2025-03-14T15:21:34.475208 | MIT | true | afd1739f475c73190c51642efb60ba06 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_assert_produces_warning.cpython-313.pyc | test_assert_produces_warning.cpython-313.pyc | Other | 13,689 | 0.95 | 0.01581 | 0.008439 | python-kit | 3 | 2024-07-31T00:52:57.961309 | GPL-3.0 | true | 06ff0ebe4b5878344116bbbbb069845a |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_assert_series_equal.cpython-313.pyc | test_assert_series_equal.cpython-313.pyc | Other | 22,643 | 0.8 | 0.003067 | 0 | node-utils | 651 | 2024-10-11T15:42:21.449064 | BSD-3-Clause | true | 6a6cdc84a1b1d341ac82837730110357 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_deprecate.cpython-313.pyc | test_deprecate.cpython-313.pyc | Other | 2,702 | 0.8 | 0 | 0.027027 | vue-tools | 378 | 2024-07-28T04:55:11.518704 | GPL-3.0 | true | 7b07ee8cc6ea5517813c2e54c57d73ac |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_deprecate_kwarg.cpython-313.pyc | test_deprecate_kwarg.cpython-313.pyc | Other | 4,702 | 0.8 | 0 | 0.018868 | python-kit | 834 | 2024-02-18T09:47:28.334046 | Apache-2.0 | true | 2ef893ce73b2a51bc509b872dca4faa1 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_deprecate_nonkeyword_arguments.cpython-313.pyc | test_deprecate_nonkeyword_arguments.cpython-313.pyc | Other | 8,367 | 0.8 | 0.070175 | 0.178571 | awesome-app | 502 | 2024-01-01T06:30:41.212486 | BSD-3-Clause | true | b3a88558bc38ac0fd0226f89e399c16b |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_doc.cpython-313.pyc | test_doc.cpython-313.pyc | Other | 2,441 | 0.8 | 0 | 0 | awesome-app | 482 | 2024-02-10T11:58:15.030114 | BSD-3-Clause | true | 25445e96e46e8dcff7a7f1f3750ed316 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_hashing.cpython-313.pyc | test_hashing.cpython-313.pyc | Other | 19,637 | 0.8 | 0.006173 | 0.019108 | node-utils | 953 | 2024-03-19T09:53:09.953139 | Apache-2.0 | true | d0bc6b054ec1300847e4e435754fd8fe |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_numba.cpython-313.pyc | test_numba.cpython-313.pyc | Other | 912 | 0.7 | 0 | 0 | vue-tools | 749 | 2023-10-11T17:02:40.936202 | BSD-3-Clause | true | 25f4f2b1411ca15b490ff9db06ff2b0e |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_rewrite_warning.cpython-313.pyc | test_rewrite_warning.cpython-313.pyc | Other | 1,558 | 0.8 | 0 | 0 | awesome-app | 645 | 2024-05-03T23:02:40.571707 | Apache-2.0 | true | 8c6b7324eef6618d8ab8bc4979b4e31e |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_shares_memory.cpython-313.pyc | test_shares_memory.cpython-313.pyc | Other | 2,176 | 0.7 | 0 | 0 | react-lib | 452 | 2024-05-02T21:30:25.261849 | Apache-2.0 | true | c1d3d6c9f9cbe5b35f889a359317bb11 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_show_versions.cpython-313.pyc | test_show_versions.cpython-313.pyc | Other | 2,923 | 0.8 | 0 | 0.058824 | python-kit | 147 | 2023-12-08T22:26:36.298633 | BSD-3-Clause | true | ce3785148fce40cc537c2cc55e3d61c8 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_util.cpython-313.pyc | test_util.cpython-313.pyc | Other | 2,981 | 0.8 | 0 | 0 | node-utils | 336 | 2025-06-28T17:07:42.292353 | GPL-3.0 | true | e747651e7c3f7696844e84e8ceebc377 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_validate_args.cpython-313.pyc | test_validate_args.cpython-313.pyc | Other | 3,043 | 0.7 | 0 | 0 | awesome-app | 64 | 2025-07-08T12:46:35.447313 | Apache-2.0 | true | 699582732d26994be164a170ee15502a |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_validate_args_and_kwargs.cpython-313.pyc | test_validate_args_and_kwargs.cpython-313.pyc | Other | 3,383 | 0.8 | 0.025 | 0 | node-utils | 91 | 2025-03-02T04:02:41.619452 | BSD-3-Clause | true | 9fcdc6b49427e49552421f79941c6642 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_validate_inclusive.cpython-313.pyc | test_validate_inclusive.cpython-313.pyc | Other | 1,453 | 0.7 | 0 | 0 | react-lib | 869 | 2024-04-16T22:54:27.065856 | MIT | true | 54f3e96ce87d6e29ec9b8df3ac6ee4da |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\test_validate_kwargs.cpython-313.pyc | test_validate_kwargs.cpython-313.pyc | Other | 3,261 | 0.8 | 0 | 0 | awesome-app | 185 | 2025-04-30T00:56:50.441557 | MIT | true | 5fa8cb98b3140e910d20924e3a0d8f94 |
\n\n | .venv\Lib\site-packages\pandas\tests\util\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 192 | 0.7 | 0 | 0 | vue-tools | 829 | 2024-11-18T14:45:07.609778 | GPL-3.0 | true | 68660058e410365f0b63072a443b2349 |
from datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n DataFrame,\n Series,\n bdate_range,\n)\n\n\n@pytest.fixture(params=[True, False])\ndef raw(request):\n """raw keyword argument for rolling.apply"""\n return request.param\n\n\n@pytest.fixture(\n params=[\n "sum",\n "mean",\n "median",\n "max",\n "min",\n "var",\n "std",\n "kurt",\n "skew",\n "count",\n "sem",\n ]\n)\ndef arithmetic_win_operators(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef center(request):\n return request.param\n\n\n@pytest.fixture(params=[None, 1])\ndef min_periods(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef parallel(request):\n """parallel keyword argument for numba.jit"""\n return request.param\n\n\n# Can parameterize nogil & nopython over True | False, but limiting per\n# https://github.com/pandas-dev/pandas/pull/41971#issuecomment-860607472\n\n\n@pytest.fixture(params=[False])\ndef nogil(request):\n """nogil keyword argument for numba.jit"""\n return request.param\n\n\n@pytest.fixture(params=[True])\ndef nopython(request):\n """nopython keyword argument for numba.jit"""\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef adjust(request):\n """adjust keyword argument for ewm"""\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef ignore_na(request):\n """ignore_na keyword argument for ewm"""\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef numeric_only(request):\n """numeric_only keyword argument"""\n return request.param\n\n\n@pytest.fixture(\n params=[\n pytest.param("numba", marks=[td.skip_if_no("numba"), pytest.mark.single_cpu]),\n "cython",\n ]\n)\ndef engine(request):\n """engine keyword argument for rolling.apply"""\n return request.param\n\n\n@pytest.fixture(\n params=[\n pytest.param(\n ("numba", True), marks=[td.skip_if_no("numba"), pytest.mark.single_cpu]\n ),\n ("cython", True),\n ("cython", False),\n ]\n)\ndef engine_and_raw(request):\n """engine and raw keyword arguments for rolling.apply"""\n return request.param\n\n\n@pytest.fixture(params=["1 day", timedelta(days=1), np.timedelta64(1, "D")])\ndef halflife_with_times(request):\n """Halflife argument for EWM when times is specified."""\n return request.param\n\n\n@pytest.fixture\ndef series():\n """Make mocked series as fixture."""\n arr = np.random.default_rng(2).standard_normal(100)\n locs = np.arange(20, 40)\n arr[locs] = np.nan\n series = Series(arr, index=bdate_range(datetime(2009, 1, 1), periods=100))\n return series\n\n\n@pytest.fixture\ndef frame():\n """Make mocked frame as fixture."""\n return DataFrame(\n np.random.default_rng(2).standard_normal((100, 10)),\n index=bdate_range(datetime(2009, 1, 1), periods=100),\n )\n\n\n@pytest.fixture(params=[None, 1, 2, 5, 10])\ndef step(request):\n """step keyword argument for rolling window operations."""\n return request.param\n | .venv\Lib\site-packages\pandas\tests\window\conftest.py | conftest.py | Python | 3,137 | 0.95 | 0.178082 | 0.018349 | vue-tools | 690 | 2025-05-12T16:33:05.554649 | MIT | true | 9f024e8e7457d088fb7cf9fd19a99492 |
import numpy as np\nimport pytest\n\nfrom pandas.errors import (\n DataError,\n SpecificationError,\n)\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Period,\n Series,\n Timestamp,\n concat,\n date_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\ndef test_getitem(step):\n frame = DataFrame(np.random.default_rng(2).standard_normal((5, 5)))\n r = frame.rolling(window=5, step=step)\n tm.assert_index_equal(r._selected_obj.columns, frame[::step].columns)\n\n r = frame.rolling(window=5, step=step)[1]\n assert r._selected_obj.name == frame[::step].columns[1]\n\n # technically this is allowed\n r = frame.rolling(window=5, step=step)[1, 3]\n tm.assert_index_equal(r._selected_obj.columns, frame[::step].columns[[1, 3]])\n\n r = frame.rolling(window=5, step=step)[[1, 3]]\n tm.assert_index_equal(r._selected_obj.columns, frame[::step].columns[[1, 3]])\n\n\ndef test_select_bad_cols():\n df = DataFrame([[1, 2]], columns=["A", "B"])\n g = df.rolling(window=5)\n with pytest.raises(KeyError, match="Columns not found: 'C'"):\n g[["C"]]\n with pytest.raises(KeyError, match="^[^A]+$"):\n # A should not be referenced as a bad column...\n # will have to rethink regex if you change message!\n g[["A", "C"]]\n\n\ndef test_attribute_access():\n df = DataFrame([[1, 2]], columns=["A", "B"])\n r = df.rolling(window=5)\n tm.assert_series_equal(r.A.sum(), r["A"].sum())\n msg = "'Rolling' object has no attribute 'F'"\n with pytest.raises(AttributeError, match=msg):\n r.F\n\n\ndef tests_skip_nuisance(step):\n df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"})\n r = df.rolling(window=3, step=step)\n result = r[["A", "B"]].sum()\n expected = DataFrame(\n {"A": [np.nan, np.nan, 3, 6, 9], "B": [np.nan, np.nan, 18, 21, 24]},\n columns=list("AB"),\n )[::step]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_sum_object_str_raises(step):\n df = DataFrame({"A": range(5), "B": range(5, 10), "C": "foo"})\n r = df.rolling(window=3, step=step)\n with pytest.raises(\n DataError, match="Cannot aggregate non-numeric type: object|str"\n ):\n # GH#42738, enforced in 2.0\n r.sum()\n\n\ndef test_agg(step):\n df = DataFrame({"A": range(5), "B": range(0, 10, 2)})\n\n r = df.rolling(window=3, step=step)\n a_mean = r["A"].mean()\n a_std = r["A"].std()\n a_sum = r["A"].sum()\n b_mean = r["B"].mean()\n b_std = r["B"].std()\n\n with tm.assert_produces_warning(FutureWarning, match="using Rolling.[mean|std]"):\n result = r.aggregate([np.mean, np.std])\n expected = concat([a_mean, a_std, b_mean, b_std], axis=1)\n expected.columns = MultiIndex.from_product([["A", "B"], ["mean", "std"]])\n tm.assert_frame_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match="using Rolling.[mean|std]"):\n result = r.aggregate({"A": np.mean, "B": np.std})\n\n expected = concat([a_mean, b_std], axis=1)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n result = r.aggregate({"A": ["mean", "std"]})\n expected = concat([a_mean, a_std], axis=1)\n expected.columns = MultiIndex.from_tuples([("A", "mean"), ("A", "std")])\n tm.assert_frame_equal(result, expected)\n\n result = r["A"].aggregate(["mean", "sum"])\n expected = concat([a_mean, a_sum], axis=1)\n expected.columns = ["mean", "sum"]\n tm.assert_frame_equal(result, expected)\n\n msg = "nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n # using a dict with renaming\n r.aggregate({"A": {"mean": "mean", "sum": "sum"}})\n\n with pytest.raises(SpecificationError, match=msg):\n r.aggregate(\n {"A": {"mean": "mean", "sum": "sum"}, "B": {"mean2": "mean", "sum2": "sum"}}\n )\n\n result = r.aggregate({"A": ["mean", "std"], "B": ["mean", "std"]})\n expected = concat([a_mean, a_std, b_mean, b_std], axis=1)\n\n exp_cols = [("A", "mean"), ("A", "std"), ("B", "mean"), ("B", "std")]\n expected.columns = MultiIndex.from_tuples(exp_cols)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n\n@pytest.mark.parametrize(\n "func", [["min"], ["mean", "max"], {"b": "sum"}, {"b": "prod", "c": "median"}]\n)\ndef test_multi_axis_1_raises(func):\n # GH#46904\n df = DataFrame({"a": [1, 1, 2], "b": [3, 4, 5], "c": [6, 7, 8]})\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n r = df.rolling(window=3, axis=1)\n with pytest.raises(NotImplementedError, match="axis other than 0 is not supported"):\n r.agg(func)\n\n\ndef test_agg_apply(raw):\n # passed lambda\n df = DataFrame({"A": range(5), "B": range(0, 10, 2)})\n\n r = df.rolling(window=3)\n a_sum = r["A"].sum()\n\n with tm.assert_produces_warning(FutureWarning, match="using Rolling.[sum|std]"):\n result = r.agg({"A": np.sum, "B": lambda x: np.std(x, ddof=1)})\n rcustom = r["B"].apply(lambda x: np.std(x, ddof=1), raw=raw)\n expected = concat([a_sum, rcustom], axis=1)\n tm.assert_frame_equal(result, expected, check_like=True)\n\n\ndef test_agg_consistency(step):\n df = DataFrame({"A": range(5), "B": range(0, 10, 2)})\n r = df.rolling(window=3, step=step)\n\n with tm.assert_produces_warning(FutureWarning, match="using Rolling.[sum|mean]"):\n result = r.agg([np.sum, np.mean]).columns\n expected = MultiIndex.from_product([list("AB"), ["sum", "mean"]])\n tm.assert_index_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match="using Rolling.[sum|mean]"):\n result = r["A"].agg([np.sum, np.mean]).columns\n expected = Index(["sum", "mean"])\n tm.assert_index_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match="using Rolling.[sum|mean]"):\n result = r.agg({"A": [np.sum, np.mean]}).columns\n expected = MultiIndex.from_tuples([("A", "sum"), ("A", "mean")])\n tm.assert_index_equal(result, expected)\n\n\ndef test_agg_nested_dicts():\n # API change for disallowing these types of nested dicts\n df = DataFrame({"A": range(5), "B": range(0, 10, 2)})\n r = df.rolling(window=3)\n\n msg = "nested renamer is not supported"\n with pytest.raises(SpecificationError, match=msg):\n r.aggregate({"r1": {"A": ["mean", "sum"]}, "r2": {"B": ["mean", "sum"]}})\n\n expected = concat(\n [r["A"].mean(), r["A"].std(), r["B"].mean(), r["B"].std()], axis=1\n )\n expected.columns = MultiIndex.from_tuples(\n [("ra", "mean"), ("ra", "std"), ("rb", "mean"), ("rb", "std")]\n )\n with pytest.raises(SpecificationError, match=msg):\n r[["A", "B"]].agg({"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}})\n\n with pytest.raises(SpecificationError, match=msg):\n r.agg({"A": {"ra": ["mean", "std"]}, "B": {"rb": ["mean", "std"]}})\n\n\ndef test_count_nonnumeric_types(step):\n # GH12541\n cols = [\n "int",\n "float",\n "string",\n "datetime",\n "timedelta",\n "periods",\n "fl_inf",\n "fl_nan",\n "str_nan",\n "dt_nat",\n "periods_nat",\n ]\n dt_nat_col = [Timestamp("20170101"), Timestamp("20170203"), Timestamp(None)]\n\n df = DataFrame(\n {\n "int": [1, 2, 3],\n "float": [4.0, 5.0, 6.0],\n "string": list("abc"),\n "datetime": date_range("20170101", periods=3),\n "timedelta": timedelta_range("1 s", periods=3, freq="s"),\n "periods": [\n Period("2012-01"),\n Period("2012-02"),\n Period("2012-03"),\n ],\n "fl_inf": [1.0, 2.0, np.inf],\n "fl_nan": [1.0, 2.0, np.nan],\n "str_nan": ["aa", "bb", np.nan],\n "dt_nat": dt_nat_col,\n "periods_nat": [\n Period("2012-01"),\n Period("2012-02"),\n Period(None),\n ],\n },\n columns=cols,\n )\n\n expected = DataFrame(\n {\n "int": [1.0, 2.0, 2.0],\n "float": [1.0, 2.0, 2.0],\n "string": [1.0, 2.0, 2.0],\n "datetime": [1.0, 2.0, 2.0],\n "timedelta": [1.0, 2.0, 2.0],\n "periods": [1.0, 2.0, 2.0],\n "fl_inf": [1.0, 2.0, 2.0],\n "fl_nan": [1.0, 2.0, 1.0],\n "str_nan": [1.0, 2.0, 1.0],\n "dt_nat": [1.0, 2.0, 1.0],\n "periods_nat": [1.0, 2.0, 1.0],\n },\n columns=cols,\n )[::step]\n\n result = df.rolling(window=2, min_periods=0, step=step).count()\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(1, min_periods=0, step=step).count()\n expected = df.notna().astype(float)[::step]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_preserve_metadata():\n # GH 10565\n s = Series(np.arange(100), name="foo")\n\n s2 = s.rolling(30).sum()\n s3 = s.rolling(20).sum()\n assert s2.name == "foo"\n assert s3.name == "foo"\n\n\n@pytest.mark.parametrize(\n "func,window_size,expected_vals",\n [\n (\n "rolling",\n 2,\n [\n [np.nan, np.nan, np.nan, np.nan],\n [15.0, 20.0, 25.0, 20.0],\n [25.0, 30.0, 35.0, 30.0],\n [np.nan, np.nan, np.nan, np.nan],\n [20.0, 30.0, 35.0, 30.0],\n [35.0, 40.0, 60.0, 40.0],\n [60.0, 80.0, 85.0, 80],\n ],\n ),\n (\n "expanding",\n None,\n [\n [10.0, 10.0, 20.0, 20.0],\n [15.0, 20.0, 25.0, 20.0],\n [20.0, 30.0, 30.0, 20.0],\n [10.0, 10.0, 30.0, 30.0],\n [20.0, 30.0, 35.0, 30.0],\n [26.666667, 40.0, 50.0, 30.0],\n [40.0, 80.0, 60.0, 30.0],\n ],\n ),\n ],\n)\ndef test_multiple_agg_funcs(func, window_size, expected_vals):\n # GH 15072\n df = DataFrame(\n [\n ["A", 10, 20],\n ["A", 20, 30],\n ["A", 30, 40],\n ["B", 10, 30],\n ["B", 30, 40],\n ["B", 40, 80],\n ["B", 80, 90],\n ],\n columns=["stock", "low", "high"],\n )\n\n f = getattr(df.groupby("stock"), func)\n if window_size:\n window = f(window_size)\n else:\n window = f()\n\n index = MultiIndex.from_tuples(\n [("A", 0), ("A", 1), ("A", 2), ("B", 3), ("B", 4), ("B", 5), ("B", 6)],\n names=["stock", None],\n )\n columns = MultiIndex.from_tuples(\n [("low", "mean"), ("low", "max"), ("high", "mean"), ("high", "min")]\n )\n expected = DataFrame(expected_vals, index=index, columns=columns)\n\n result = window.agg({"low": ["mean", "max"], "high": ["mean", "min"]})\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dont_modify_attributes_after_methods(\n arithmetic_win_operators, closed, center, min_periods, step\n):\n # GH 39554\n roll_obj = Series(range(1)).rolling(\n 1, center=center, closed=closed, min_periods=min_periods, step=step\n )\n expected = {attr: getattr(roll_obj, attr) for attr in roll_obj._attributes}\n getattr(roll_obj, arithmetic_win_operators)()\n result = {attr: getattr(roll_obj, attr) for attr in roll_obj._attributes}\n assert result == expected\n\n\ndef test_centered_axis_validation(step):\n # ok\n msg = "The 'axis' keyword in Series.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n Series(np.ones(10)).rolling(window=3, center=True, axis=0, step=step).mean()\n\n # bad axis\n msg = "No axis named 1 for object type Series"\n with pytest.raises(ValueError, match=msg):\n Series(np.ones(10)).rolling(window=3, center=True, axis=1, step=step).mean()\n\n # ok ok\n df = DataFrame(np.ones((10, 10)))\n msg = "The 'axis' keyword in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.rolling(window=3, center=True, axis=0, step=step).mean()\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.rolling(window=3, center=True, axis=1, step=step).mean()\n\n # bad axis\n msg = "No axis named 2 for object type DataFrame"\n with pytest.raises(ValueError, match=msg):\n (df.rolling(window=3, center=True, axis=2, step=step).mean())\n\n\ndef test_rolling_min_min_periods(step):\n a = Series([1, 2, 3, 4, 5])\n result = a.rolling(window=100, min_periods=1, step=step).min()\n expected = Series(np.ones(len(a)))[::step]\n tm.assert_series_equal(result, expected)\n msg = "min_periods 5 must be <= window 3"\n with pytest.raises(ValueError, match=msg):\n Series([1, 2, 3]).rolling(window=3, min_periods=5, step=step).min()\n\n\ndef test_rolling_max_min_periods(step):\n a = Series([1, 2, 3, 4, 5], dtype=np.float64)\n result = a.rolling(window=100, min_periods=1, step=step).max()\n expected = a[::step]\n tm.assert_almost_equal(result, expected)\n msg = "min_periods 5 must be <= window 3"\n with pytest.raises(ValueError, match=msg):\n Series([1, 2, 3]).rolling(window=3, min_periods=5, step=step).max()\n | .venv\Lib\site-packages\pandas\tests\window\test_api.py | test_api.py | Python | 13,189 | 0.95 | 0.065327 | 0.048632 | node-utils | 280 | 2024-04-22T22:14:46.553092 | MIT | true | d4e31f2bf831556be3e9eaad13af3702 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n concat,\n date_range,\n isna,\n notna,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries import offsets\n\n# suppress warnings about empty slices, as we are deliberately testing\n# with a 0-length Series\npytestmark = pytest.mark.filterwarnings(\n "ignore:.*(empty slice|0 for slice).*:RuntimeWarning"\n)\n\n\ndef f(x):\n return x[np.isfinite(x)].mean()\n\n\n@pytest.mark.parametrize("bad_raw", [None, 1, 0])\ndef test_rolling_apply_invalid_raw(bad_raw):\n with pytest.raises(ValueError, match="raw parameter must be `True` or `False`"):\n Series(range(3)).rolling(1).apply(len, raw=bad_raw)\n\n\ndef test_rolling_apply_out_of_bounds(engine_and_raw):\n # gh-1850\n engine, raw = engine_and_raw\n\n vals = Series([1, 2, 3, 4])\n\n result = vals.rolling(10).apply(np.sum, engine=engine, raw=raw)\n assert result.isna().all()\n\n result = vals.rolling(10, min_periods=1).apply(np.sum, engine=engine, raw=raw)\n expected = Series([1, 3, 6, 10], dtype=float)\n tm.assert_almost_equal(result, expected)\n\n\n@pytest.mark.parametrize("window", [2, "2s"])\ndef test_rolling_apply_with_pandas_objects(window):\n # 5071\n df = DataFrame(\n {\n "A": np.random.default_rng(2).standard_normal(5),\n "B": np.random.default_rng(2).integers(0, 10, size=5),\n },\n index=date_range("20130101", periods=5, freq="s"),\n )\n\n # we have an equal spaced timeseries index\n # so simulate removing the first period\n def f(x):\n if x.index[0] == df.index[0]:\n return np.nan\n return x.iloc[-1]\n\n result = df.rolling(window).apply(f, raw=False)\n expected = df.iloc[2:].reindex_like(df)\n tm.assert_frame_equal(result, expected)\n\n with tm.external_error_raised(AttributeError):\n df.rolling(window).apply(f, raw=True)\n\n\ndef test_rolling_apply(engine_and_raw, step):\n engine, raw = engine_and_raw\n\n expected = Series([], dtype="float64")\n result = expected.rolling(10, step=step).apply(\n lambda x: x.mean(), engine=engine, raw=raw\n )\n tm.assert_series_equal(result, expected)\n\n # gh-8080\n s = Series([None, None, None])\n result = s.rolling(2, min_periods=0, step=step).apply(\n lambda x: len(x), engine=engine, raw=raw\n )\n expected = Series([1.0, 2.0, 2.0])[::step]\n tm.assert_series_equal(result, expected)\n\n result = s.rolling(2, min_periods=0, step=step).apply(len, engine=engine, raw=raw)\n tm.assert_series_equal(result, expected)\n\n\ndef test_all_apply(engine_and_raw):\n engine, raw = engine_and_raw\n\n df = (\n DataFrame(\n {"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}\n ).set_index("A")\n * 2\n )\n er = df.rolling(window=1)\n r = df.rolling(window="1s")\n\n result = r.apply(lambda x: 1, engine=engine, raw=raw)\n expected = er.apply(lambda x: 1, engine=engine, raw=raw)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_ragged_apply(engine_and_raw):\n engine, raw = engine_and_raw\n\n df = DataFrame({"B": range(5)})\n df.index = [\n Timestamp("20130101 09:00:00"),\n Timestamp("20130101 09:00:02"),\n Timestamp("20130101 09:00:03"),\n Timestamp("20130101 09:00:05"),\n Timestamp("20130101 09:00:06"),\n ]\n\n f = lambda x: 1\n result = df.rolling(window="1s", min_periods=1).apply(f, engine=engine, raw=raw)\n expected = df.copy()\n expected["B"] = 1.0\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="2s", min_periods=1).apply(f, engine=engine, raw=raw)\n expected = df.copy()\n expected["B"] = 1.0\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="5s", min_periods=1).apply(f, engine=engine, raw=raw)\n expected = df.copy()\n expected["B"] = 1.0\n tm.assert_frame_equal(result, expected)\n\n\ndef test_invalid_engine():\n with pytest.raises(ValueError, match="engine must be either 'numba' or 'cython'"):\n Series(range(1)).rolling(1).apply(lambda x: x, engine="foo")\n\n\ndef test_invalid_engine_kwargs_cython():\n with pytest.raises(ValueError, match="cython engine does not accept engine_kwargs"):\n Series(range(1)).rolling(1).apply(\n lambda x: x, engine="cython", engine_kwargs={"nopython": False}\n )\n\n\ndef test_invalid_raw_numba():\n with pytest.raises(\n ValueError, match="raw must be `True` when using the numba engine"\n ):\n Series(range(1)).rolling(1).apply(lambda x: x, raw=False, engine="numba")\n\n\n@pytest.mark.parametrize("args_kwargs", [[None, {"par": 10}], [(10,), None]])\ndef test_rolling_apply_args_kwargs(args_kwargs):\n # GH 33433\n def numpysum(x, par):\n return np.sum(x + par)\n\n df = DataFrame({"gr": [1, 1], "a": [1, 2]})\n\n idx = Index(["gr", "a"])\n expected = DataFrame([[11.0, 11.0], [11.0, 12.0]], columns=idx)\n\n result = df.rolling(1).apply(numpysum, args=args_kwargs[0], kwargs=args_kwargs[1])\n tm.assert_frame_equal(result, expected)\n\n midx = MultiIndex.from_tuples([(1, 0), (1, 1)], names=["gr", None])\n expected = Series([11.0, 12.0], index=midx, name="a")\n\n gb_rolling = df.groupby("gr")["a"].rolling(1)\n\n result = gb_rolling.apply(numpysum, args=args_kwargs[0], kwargs=args_kwargs[1])\n tm.assert_series_equal(result, expected)\n\n\ndef test_nans(raw):\n obj = Series(np.random.default_rng(2).standard_normal(50))\n obj[:10] = np.nan\n obj[-10:] = np.nan\n\n result = obj.rolling(50, min_periods=30).apply(f, raw=raw)\n tm.assert_almost_equal(result.iloc[-1], np.mean(obj[10:-10]))\n\n # min_periods is working correctly\n result = obj.rolling(20, min_periods=15).apply(f, raw=raw)\n assert isna(result.iloc[23])\n assert not isna(result.iloc[24])\n\n assert not isna(result.iloc[-6])\n assert isna(result.iloc[-5])\n\n obj2 = Series(np.random.default_rng(2).standard_normal(20))\n result = obj2.rolling(10, min_periods=5).apply(f, raw=raw)\n assert isna(result.iloc[3])\n assert notna(result.iloc[4])\n\n result0 = obj.rolling(20, min_periods=0).apply(f, raw=raw)\n result1 = obj.rolling(20, min_periods=1).apply(f, raw=raw)\n tm.assert_almost_equal(result0, result1)\n\n\ndef test_center(raw):\n obj = Series(np.random.default_rng(2).standard_normal(50))\n obj[:10] = np.nan\n obj[-10:] = np.nan\n\n result = obj.rolling(20, min_periods=15, center=True).apply(f, raw=raw)\n expected = (\n concat([obj, Series([np.nan] * 9)])\n .rolling(20, min_periods=15)\n .apply(f, raw=raw)\n .iloc[9:]\n .reset_index(drop=True)\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_series(raw, series):\n result = series.rolling(50).apply(f, raw=raw)\n assert isinstance(result, Series)\n tm.assert_almost_equal(result.iloc[-1], np.mean(series[-50:]))\n\n\ndef test_frame(raw, frame):\n result = frame.rolling(50).apply(f, raw=raw)\n assert isinstance(result, DataFrame)\n tm.assert_series_equal(\n result.iloc[-1, :],\n frame.iloc[-50:, :].apply(np.mean, axis=0, raw=raw),\n check_names=False,\n )\n\n\ndef test_time_rule_series(raw, series):\n win = 25\n minp = 10\n ser = series[::2].resample("B").mean()\n series_result = ser.rolling(window=win, min_periods=minp).apply(f, raw=raw)\n last_date = series_result.index[-1]\n prev_date = last_date - 24 * offsets.BDay()\n\n trunc_series = series[::2].truncate(prev_date, last_date)\n tm.assert_almost_equal(series_result.iloc[-1], np.mean(trunc_series))\n\n\ndef test_time_rule_frame(raw, frame):\n win = 25\n minp = 10\n frm = frame[::2].resample("B").mean()\n frame_result = frm.rolling(window=win, min_periods=minp).apply(f, raw=raw)\n last_date = frame_result.index[-1]\n prev_date = last_date - 24 * offsets.BDay()\n\n trunc_frame = frame[::2].truncate(prev_date, last_date)\n tm.assert_series_equal(\n frame_result.xs(last_date),\n trunc_frame.apply(np.mean, raw=raw),\n check_names=False,\n )\n\n\n@pytest.mark.parametrize("minp", [0, 99, 100])\ndef test_min_periods(raw, series, minp, step):\n result = series.rolling(len(series) + 1, min_periods=minp, step=step).apply(\n f, raw=raw\n )\n expected = series.rolling(len(series), min_periods=minp, step=step).apply(\n f, raw=raw\n )\n nan_mask = isna(result)\n tm.assert_series_equal(nan_mask, isna(expected))\n\n nan_mask = ~nan_mask\n tm.assert_almost_equal(result[nan_mask], expected[nan_mask])\n\n\ndef test_center_reindex_series(raw, series):\n # shifter index\n s = [f"x{x:d}" for x in range(12)]\n minp = 10\n\n series_xp = (\n series.reindex(list(series.index) + s)\n .rolling(window=25, min_periods=minp)\n .apply(f, raw=raw)\n .shift(-12)\n .reindex(series.index)\n )\n series_rs = series.rolling(window=25, min_periods=minp, center=True).apply(\n f, raw=raw\n )\n tm.assert_series_equal(series_xp, series_rs)\n\n\ndef test_center_reindex_frame(raw):\n # shifter index\n frame = DataFrame(range(100), index=date_range("2020-01-01", freq="D", periods=100))\n s = [f"x{x:d}" for x in range(12)]\n minp = 10\n\n frame_xp = (\n frame.reindex(list(frame.index) + s)\n .rolling(window=25, min_periods=minp)\n .apply(f, raw=raw)\n .shift(-12)\n .reindex(frame.index)\n )\n frame_rs = frame.rolling(window=25, min_periods=minp, center=True).apply(f, raw=raw)\n tm.assert_frame_equal(frame_xp, frame_rs)\n\n\ndef test_axis1(raw):\n # GH 45912\n df = DataFrame([1, 2])\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.rolling(window=1, axis=1).apply(np.sum, raw=raw)\n expected = DataFrame([1.0, 2.0])\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\window\test_apply.py | test_apply.py | Python | 9,865 | 0.95 | 0.085366 | 0.051793 | node-utils | 221 | 2023-07-24T12:53:13.799588 | MIT | true | 1a19b511d2b51213b46cab239a02405f |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n MultiIndex,\n Series,\n concat,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.api.indexers import (\n BaseIndexer,\n FixedForwardWindowIndexer,\n)\nfrom pandas.core.indexers.objects import (\n ExpandingIndexer,\n FixedWindowIndexer,\n VariableOffsetWindowIndexer,\n)\n\nfrom pandas.tseries.offsets import BusinessDay\n\n\ndef test_bad_get_window_bounds_signature():\n class BadIndexer(BaseIndexer):\n def get_window_bounds(self):\n return None\n\n indexer = BadIndexer()\n with pytest.raises(ValueError, match="BadIndexer does not implement"):\n Series(range(5)).rolling(indexer)\n\n\ndef test_expanding_indexer():\n s = Series(range(10))\n indexer = ExpandingIndexer()\n result = s.rolling(indexer).mean()\n expected = s.expanding().mean()\n tm.assert_series_equal(result, expected)\n\n\ndef test_indexer_constructor_arg():\n # Example found in computation.rst\n use_expanding = [True, False, True, False, True]\n df = DataFrame({"values": range(5)})\n\n class CustomIndexer(BaseIndexer):\n def get_window_bounds(self, num_values, min_periods, center, closed, step):\n start = np.empty(num_values, dtype=np.int64)\n end = np.empty(num_values, dtype=np.int64)\n for i in range(num_values):\n if self.use_expanding[i]:\n start[i] = 0\n end[i] = i + 1\n else:\n start[i] = i\n end[i] = i + self.window_size\n return start, end\n\n indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)\n result = df.rolling(indexer).sum()\n expected = DataFrame({"values": [0.0, 1.0, 3.0, 3.0, 10.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_indexer_accepts_rolling_args():\n df = DataFrame({"values": range(5)})\n\n class CustomIndexer(BaseIndexer):\n def get_window_bounds(self, num_values, min_periods, center, closed, step):\n start = np.empty(num_values, dtype=np.int64)\n end = np.empty(num_values, dtype=np.int64)\n for i in range(num_values):\n if (\n center\n and min_periods == 1\n and closed == "both"\n and step == 1\n and i == 2\n ):\n start[i] = 0\n end[i] = num_values\n else:\n start[i] = i\n end[i] = i + self.window_size\n return start, end\n\n indexer = CustomIndexer(window_size=1)\n result = df.rolling(\n indexer, center=True, min_periods=1, closed="both", step=1\n ).sum()\n expected = DataFrame({"values": [0.0, 1.0, 10.0, 3.0, 4.0]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func,np_func,expected,np_kwargs",\n [\n ("count", len, [3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 3.0, 2.0, np.nan], {}),\n ("min", np.min, [0.0, 1.0, 2.0, 3.0, 4.0, 6.0, 6.0, 7.0, 8.0, np.nan], {}),\n (\n "max",\n np.max,\n [2.0, 3.0, 4.0, 100.0, 100.0, 100.0, 8.0, 9.0, 9.0, np.nan],\n {},\n ),\n (\n "std",\n np.std,\n [\n 1.0,\n 1.0,\n 1.0,\n 55.71654452,\n 54.85739087,\n 53.9845657,\n 1.0,\n 1.0,\n 0.70710678,\n np.nan,\n ],\n {"ddof": 1},\n ),\n (\n "var",\n np.var,\n [\n 1.0,\n 1.0,\n 1.0,\n 3104.333333,\n 3009.333333,\n 2914.333333,\n 1.0,\n 1.0,\n 0.500000,\n np.nan,\n ],\n {"ddof": 1},\n ),\n (\n "median",\n np.median,\n [1.0, 2.0, 3.0, 4.0, 6.0, 7.0, 7.0, 8.0, 8.5, np.nan],\n {},\n ),\n ],\n)\ndef test_rolling_forward_window(\n frame_or_series, func, np_func, expected, np_kwargs, step\n):\n # GH 32865\n values = np.arange(10.0)\n values[5] = 100.0\n\n indexer = FixedForwardWindowIndexer(window_size=3)\n\n match = "Forward-looking windows can't have center=True"\n with pytest.raises(ValueError, match=match):\n rolling = frame_or_series(values).rolling(window=indexer, center=True)\n getattr(rolling, func)()\n\n match = "Forward-looking windows don't support setting the closed argument"\n with pytest.raises(ValueError, match=match):\n rolling = frame_or_series(values).rolling(window=indexer, closed="right")\n getattr(rolling, func)()\n\n rolling = frame_or_series(values).rolling(window=indexer, min_periods=2, step=step)\n result = getattr(rolling, func)()\n\n # Check that the function output matches the explicitly provided array\n expected = frame_or_series(expected)[::step]\n tm.assert_equal(result, expected)\n\n # Check that the rolling function output matches applying an alternative\n # function to the rolling window object\n expected2 = frame_or_series(rolling.apply(lambda x: np_func(x, **np_kwargs)))\n tm.assert_equal(result, expected2)\n\n # Check that the function output matches applying an alternative function\n # if min_periods isn't specified\n # GH 39604: After count-min_periods deprecation, apply(lambda x: len(x))\n # is equivalent to count after setting min_periods=0\n min_periods = 0 if func == "count" else None\n rolling3 = frame_or_series(values).rolling(window=indexer, min_periods=min_periods)\n result3 = getattr(rolling3, func)()\n expected3 = frame_or_series(rolling3.apply(lambda x: np_func(x, **np_kwargs)))\n tm.assert_equal(result3, expected3)\n\n\ndef test_rolling_forward_skewness(frame_or_series, step):\n values = np.arange(10.0)\n values[5] = 100.0\n\n indexer = FixedForwardWindowIndexer(window_size=5)\n rolling = frame_or_series(values).rolling(window=indexer, min_periods=3, step=step)\n result = rolling.skew()\n\n expected = frame_or_series(\n [\n 0.0,\n 2.232396,\n 2.229508,\n 2.228340,\n 2.229091,\n 2.231989,\n 0.0,\n 0.0,\n np.nan,\n np.nan,\n ]\n )[::step]\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func,expected",\n [\n ("cov", [2.0, 2.0, 2.0, 97.0, 2.0, -93.0, 2.0, 2.0, np.nan, np.nan]),\n (\n "corr",\n [\n 1.0,\n 1.0,\n 1.0,\n 0.8704775290207161,\n 0.018229084250926637,\n -0.861357304646493,\n 1.0,\n 1.0,\n np.nan,\n np.nan,\n ],\n ),\n ],\n)\ndef test_rolling_forward_cov_corr(func, expected):\n values1 = np.arange(10).reshape(-1, 1)\n values2 = values1 * 2\n values1[5, 0] = 100\n values = np.concatenate([values1, values2], axis=1)\n\n indexer = FixedForwardWindowIndexer(window_size=3)\n rolling = DataFrame(values).rolling(window=indexer, min_periods=3)\n # We are interested in checking only pairwise covariance / correlation\n result = getattr(rolling, func)().loc[(slice(None), 1), 0]\n result = result.reset_index(drop=True)\n expected = Series(expected).reset_index(drop=True)\n expected.name = result.name\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "closed,expected_data",\n [\n ["right", [0.0, 1.0, 2.0, 3.0, 7.0, 12.0, 6.0, 7.0, 8.0, 9.0]],\n ["left", [0.0, 0.0, 1.0, 2.0, 5.0, 9.0, 5.0, 6.0, 7.0, 8.0]],\n ],\n)\ndef test_non_fixed_variable_window_indexer(closed, expected_data):\n index = date_range("2020", periods=10)\n df = DataFrame(range(10), index=index)\n offset = BusinessDay(1)\n indexer = VariableOffsetWindowIndexer(index=index, offset=offset)\n result = df.rolling(indexer, closed=closed).sum()\n expected = DataFrame(expected_data, index=index)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_variableoffsetwindowindexer_not_dti():\n # GH 54379\n with pytest.raises(ValueError, match="index must be a DatetimeIndex."):\n VariableOffsetWindowIndexer(index="foo", offset=BusinessDay(1))\n\n\ndef test_variableoffsetwindowindexer_not_offset():\n # GH 54379\n idx = date_range("2020", periods=10)\n with pytest.raises(ValueError, match="offset must be a DateOffset-like object."):\n VariableOffsetWindowIndexer(index=idx, offset="foo")\n\n\ndef test_fixed_forward_indexer_count(step):\n # GH: 35579\n df = DataFrame({"b": [None, None, None, 7]})\n indexer = FixedForwardWindowIndexer(window_size=2)\n result = df.rolling(window=indexer, min_periods=0, step=step).count()\n expected = DataFrame({"b": [0.0, 0.0, 1.0, 1.0]})[::step]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n ("end_value", "values"), [(1, [0.0, 1, 1, 3, 2]), (-1, [0.0, 1, 0, 3, 1])]\n)\n@pytest.mark.parametrize(("func", "args"), [("median", []), ("quantile", [0.5])])\ndef test_indexer_quantile_sum(end_value, values, func, args):\n # GH 37153\n class CustomIndexer(BaseIndexer):\n def get_window_bounds(self, num_values, min_periods, center, closed, step):\n start = np.empty(num_values, dtype=np.int64)\n end = np.empty(num_values, dtype=np.int64)\n for i in range(num_values):\n if self.use_expanding[i]:\n start[i] = 0\n end[i] = max(i + end_value, 1)\n else:\n start[i] = i\n end[i] = i + self.window_size\n return start, end\n\n use_expanding = [True, False, True, False, True]\n df = DataFrame({"values": range(5)})\n\n indexer = CustomIndexer(window_size=1, use_expanding=use_expanding)\n result = getattr(df.rolling(indexer), func)(*args)\n expected = DataFrame({"values": values})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "indexer_class", [FixedWindowIndexer, FixedForwardWindowIndexer, ExpandingIndexer]\n)\n@pytest.mark.parametrize("window_size", [1, 2, 12])\n@pytest.mark.parametrize(\n "df_data",\n [\n {"a": [1, 1], "b": [0, 1]},\n {"a": [1, 2], "b": [0, 1]},\n {"a": [1] * 16, "b": [np.nan, 1, 2, np.nan] + list(range(4, 16))},\n ],\n)\ndef test_indexers_are_reusable_after_groupby_rolling(\n indexer_class, window_size, df_data\n):\n # GH 43267\n df = DataFrame(df_data)\n num_trials = 3\n indexer = indexer_class(window_size=window_size)\n original_window_size = indexer.window_size\n for i in range(num_trials):\n df.groupby("a")["b"].rolling(window=indexer, min_periods=1).mean()\n assert indexer.window_size == original_window_size\n\n\n@pytest.mark.parametrize(\n "window_size, num_values, expected_start, expected_end",\n [\n (1, 1, [0], [1]),\n (1, 2, [0, 1], [1, 2]),\n (2, 1, [0], [1]),\n (2, 2, [0, 1], [2, 2]),\n (5, 12, range(12), list(range(5, 12)) + [12] * 5),\n (12, 5, range(5), [5] * 5),\n (0, 0, np.array([]), np.array([])),\n (1, 0, np.array([]), np.array([])),\n (0, 1, [0], [0]),\n ],\n)\ndef test_fixed_forward_indexer_bounds(\n window_size, num_values, expected_start, expected_end, step\n):\n # GH 43267\n indexer = FixedForwardWindowIndexer(window_size=window_size)\n start, end = indexer.get_window_bounds(num_values=num_values, step=step)\n\n tm.assert_numpy_array_equal(\n start, np.array(expected_start[::step]), check_dtype=False\n )\n tm.assert_numpy_array_equal(end, np.array(expected_end[::step]), check_dtype=False)\n assert len(start) == len(end)\n\n\n@pytest.mark.parametrize(\n "df, window_size, expected",\n [\n (\n DataFrame({"b": [0, 1, 2], "a": [1, 2, 2]}),\n 2,\n Series(\n [0, 1.5, 2.0],\n index=MultiIndex.from_arrays([[1, 2, 2], range(3)], names=["a", None]),\n name="b",\n dtype=np.float64,\n ),\n ),\n (\n DataFrame(\n {\n "b": [np.nan, 1, 2, np.nan] + list(range(4, 18)),\n "a": [1] * 7 + [2] * 11,\n "c": range(18),\n }\n ),\n 12,\n Series(\n [\n 3.6,\n 3.6,\n 4.25,\n 5.0,\n 5.0,\n 5.5,\n 6.0,\n 12.0,\n 12.5,\n 13.0,\n 13.5,\n 14.0,\n 14.5,\n 15.0,\n 15.5,\n 16.0,\n 16.5,\n 17.0,\n ],\n index=MultiIndex.from_arrays(\n [[1] * 7 + [2] * 11, range(18)], names=["a", None]\n ),\n name="b",\n dtype=np.float64,\n ),\n ),\n ],\n)\ndef test_rolling_groupby_with_fixed_forward_specific(df, window_size, expected):\n # GH 43267\n indexer = FixedForwardWindowIndexer(window_size=window_size)\n result = df.groupby("a")["b"].rolling(window=indexer, min_periods=1).mean()\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "group_keys",\n [\n (1,),\n (1, 2),\n (2, 1),\n (1, 1, 2),\n (1, 2, 1),\n (1, 1, 2, 2),\n (1, 2, 3, 2, 3),\n (1, 1, 2) * 4,\n (1, 2, 3) * 5,\n ],\n)\n@pytest.mark.parametrize("window_size", [1, 2, 3, 4, 5, 8, 20])\ndef test_rolling_groupby_with_fixed_forward_many(group_keys, window_size):\n # GH 43267\n df = DataFrame(\n {\n "a": np.array(list(group_keys)),\n "b": np.arange(len(group_keys), dtype=np.float64) + 17,\n "c": np.arange(len(group_keys), dtype=np.int64),\n }\n )\n\n indexer = FixedForwardWindowIndexer(window_size=window_size)\n result = df.groupby("a")["b"].rolling(window=indexer, min_periods=1).sum()\n result.index.names = ["a", "c"]\n\n groups = df.groupby("a")[["a", "b", "c"]]\n manual = concat(\n [\n g.assign(\n b=[\n g["b"].iloc[i : i + window_size].sum(min_count=1)\n for i in range(len(g))\n ]\n )\n for _, g in groups\n ]\n )\n manual = manual.set_index(["a", "c"])["b"]\n\n tm.assert_series_equal(result, manual)\n\n\ndef test_unequal_start_end_bounds():\n class CustomIndexer(BaseIndexer):\n def get_window_bounds(self, num_values, min_periods, center, closed, step):\n return np.array([1]), np.array([1, 2])\n\n indexer = CustomIndexer()\n roll = Series(1).rolling(indexer)\n match = "start"\n with pytest.raises(ValueError, match=match):\n roll.mean()\n\n with pytest.raises(ValueError, match=match):\n next(iter(roll))\n\n with pytest.raises(ValueError, match=match):\n roll.corr(pairwise=True)\n\n with pytest.raises(ValueError, match=match):\n roll.cov(pairwise=True)\n\n\ndef test_unequal_bounds_to_object():\n # GH 44470\n class CustomIndexer(BaseIndexer):\n def get_window_bounds(self, num_values, min_periods, center, closed, step):\n return np.array([1]), np.array([2])\n\n indexer = CustomIndexer()\n roll = Series([1, 1]).rolling(indexer)\n match = "start and end"\n with pytest.raises(ValueError, match=match):\n roll.mean()\n\n with pytest.raises(ValueError, match=match):\n next(iter(roll))\n\n with pytest.raises(ValueError, match=match):\n roll.corr(pairwise=True)\n\n with pytest.raises(ValueError, match=match):\n roll.cov(pairwise=True)\n | .venv\Lib\site-packages\pandas\tests\window\test_base_indexer.py | test_base_indexer.py | Python | 15,954 | 0.95 | 0.088632 | 0.042035 | react-lib | 469 | 2025-03-15T07:30:51.322107 | Apache-2.0 | true | 0d0e25f56792a6ac641725fd58f7c5ac |
from functools import partial\nimport sys\n\nimport numpy as np\nimport pytest\n\nimport pandas._libs.window.aggregations as window_aggregations\n\nfrom pandas import Series\nimport pandas._testing as tm\n\n\ndef _get_rolling_aggregations():\n # list pairs of name and function\n # each function has this signature:\n # (const float64_t[:] values, ndarray[int64_t] start,\n # ndarray[int64_t] end, int64_t minp) -> np.ndarray\n named_roll_aggs = (\n [\n ("roll_sum", window_aggregations.roll_sum),\n ("roll_mean", window_aggregations.roll_mean),\n ]\n + [\n (f"roll_var({ddof})", partial(window_aggregations.roll_var, ddof=ddof))\n for ddof in [0, 1]\n ]\n + [\n ("roll_skew", window_aggregations.roll_skew),\n ("roll_kurt", window_aggregations.roll_kurt),\n ("roll_median_c", window_aggregations.roll_median_c),\n ("roll_max", window_aggregations.roll_max),\n ("roll_min", window_aggregations.roll_min),\n ]\n + [\n (\n f"roll_quantile({quantile},{interpolation})",\n partial(\n window_aggregations.roll_quantile,\n quantile=quantile,\n interpolation=interpolation,\n ),\n )\n for quantile in [0.0001, 0.5, 0.9999]\n for interpolation in window_aggregations.interpolation_types\n ]\n + [\n (\n f"roll_rank({percentile},{method},{ascending})",\n partial(\n window_aggregations.roll_rank,\n percentile=percentile,\n method=method,\n ascending=ascending,\n ),\n )\n for percentile in [True, False]\n for method in window_aggregations.rolling_rank_tiebreakers.keys()\n for ascending in [True, False]\n ]\n )\n # unzip to a list of 2 tuples, names and functions\n unzipped = list(zip(*named_roll_aggs))\n return {"ids": unzipped[0], "params": unzipped[1]}\n\n\n_rolling_aggregations = _get_rolling_aggregations()\n\n\n@pytest.fixture(\n params=_rolling_aggregations["params"], ids=_rolling_aggregations["ids"]\n)\ndef rolling_aggregation(request):\n """Make a rolling aggregation function as fixture."""\n return request.param\n\n\ndef test_rolling_aggregation_boundary_consistency(rolling_aggregation):\n # GH-45647\n minp, step, width, size, selection = 0, 1, 3, 11, [2, 7]\n values = np.arange(1, 1 + size, dtype=np.float64)\n end = np.arange(width, size, step, dtype=np.int64)\n start = end - width\n selarr = np.array(selection, dtype=np.int32)\n result = Series(rolling_aggregation(values, start[selarr], end[selarr], minp))\n expected = Series(rolling_aggregation(values, start, end, minp)[selarr])\n tm.assert_equal(expected, result)\n\n\ndef test_rolling_aggregation_with_unused_elements(rolling_aggregation):\n # GH-45647\n minp, width = 0, 5 # width at least 4 for kurt\n size = 2 * width + 5\n values = np.arange(1, size + 1, dtype=np.float64)\n values[width : width + 2] = sys.float_info.min\n values[width + 2] = np.nan\n values[width + 3 : width + 5] = sys.float_info.max\n start = np.array([0, size - width], dtype=np.int64)\n end = np.array([width, size], dtype=np.int64)\n loc = np.array(\n [j for i in range(len(start)) for j in range(start[i], end[i])],\n dtype=np.int32,\n )\n result = Series(rolling_aggregation(values, start, end, minp))\n compact_values = np.array(values[loc], dtype=np.float64)\n compact_start = np.arange(0, len(start) * width, width, dtype=np.int64)\n compact_end = compact_start + width\n expected = Series(\n rolling_aggregation(compact_values, compact_start, compact_end, minp)\n )\n assert np.isfinite(expected.values).all(), "Not all expected values are finite"\n tm.assert_equal(expected, result)\n | .venv\Lib\site-packages\pandas\tests\window\test_cython_aggregations.py | test_cython_aggregations.py | Python | 3,967 | 0.95 | 0.144144 | 0.071429 | react-lib | 523 | 2025-04-29T14:57:27.926496 | GPL-3.0 | true | 48bfe01d9e33c022704f769eadd43ff7 |
import numpy as np\nimport pytest\n\nfrom pandas.errors import DataError\n\nfrom pandas.core.dtypes.common import pandas_dtype\n\nfrom pandas import (\n NA,\n DataFrame,\n Series,\n)\nimport pandas._testing as tm\n\n# gh-12373 : rolling functions error on float32 data\n# make sure rolling functions works for different dtypes\n#\n# further note that we are only checking rolling for fully dtype\n# compliance (though both expanding and ewm inherit)\n\n\ndef get_dtype(dtype, coerce_int=None):\n if coerce_int is False and "int" in dtype:\n return None\n return pandas_dtype(dtype)\n\n\n@pytest.fixture(\n params=[\n "object",\n "category",\n "int8",\n "int16",\n "int32",\n "int64",\n "uint8",\n "uint16",\n "uint32",\n "uint64",\n "float16",\n "float32",\n "float64",\n "m8[ns]",\n "M8[ns]",\n "datetime64[ns, UTC]",\n ]\n)\ndef dtypes(request):\n """Dtypes for window tests"""\n return request.param\n\n\n@pytest.mark.parametrize(\n "method, data, expected_data, coerce_int, min_periods",\n [\n ("count", np.arange(5), [1, 2, 2, 2, 2], True, 0),\n ("count", np.arange(10, 0, -2), [1, 2, 2, 2, 2], True, 0),\n ("count", [0, 1, 2, np.nan, 4], [1, 2, 2, 1, 1], False, 0),\n ("max", np.arange(5), [np.nan, 1, 2, 3, 4], True, None),\n ("max", np.arange(10, 0, -2), [np.nan, 10, 8, 6, 4], True, None),\n ("max", [0, 1, 2, np.nan, 4], [np.nan, 1, 2, np.nan, np.nan], False, None),\n ("min", np.arange(5), [np.nan, 0, 1, 2, 3], True, None),\n ("min", np.arange(10, 0, -2), [np.nan, 8, 6, 4, 2], True, None),\n ("min", [0, 1, 2, np.nan, 4], [np.nan, 0, 1, np.nan, np.nan], False, None),\n ("sum", np.arange(5), [np.nan, 1, 3, 5, 7], True, None),\n ("sum", np.arange(10, 0, -2), [np.nan, 18, 14, 10, 6], True, None),\n ("sum", [0, 1, 2, np.nan, 4], [np.nan, 1, 3, np.nan, np.nan], False, None),\n ("mean", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True, None),\n ("mean", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True, None),\n ("mean", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 1.5, np.nan, np.nan], False, None),\n ("std", np.arange(5), [np.nan] + [np.sqrt(0.5)] * 4, True, None),\n ("std", np.arange(10, 0, -2), [np.nan] + [np.sqrt(2)] * 4, True, None),\n (\n "std",\n [0, 1, 2, np.nan, 4],\n [np.nan] + [np.sqrt(0.5)] * 2 + [np.nan] * 2,\n False,\n None,\n ),\n ("var", np.arange(5), [np.nan, 0.5, 0.5, 0.5, 0.5], True, None),\n ("var", np.arange(10, 0, -2), [np.nan, 2, 2, 2, 2], True, None),\n ("var", [0, 1, 2, np.nan, 4], [np.nan, 0.5, 0.5, np.nan, np.nan], False, None),\n ("median", np.arange(5), [np.nan, 0.5, 1.5, 2.5, 3.5], True, None),\n ("median", np.arange(10, 0, -2), [np.nan, 9, 7, 5, 3], True, None),\n (\n "median",\n [0, 1, 2, np.nan, 4],\n [np.nan, 0.5, 1.5, np.nan, np.nan],\n False,\n None,\n ),\n ],\n)\ndef test_series_dtypes(\n method, data, expected_data, coerce_int, dtypes, min_periods, step\n):\n ser = Series(data, dtype=get_dtype(dtypes, coerce_int=coerce_int))\n rolled = ser.rolling(2, min_periods=min_periods, step=step)\n\n if dtypes in ("m8[ns]", "M8[ns]", "datetime64[ns, UTC]") and method != "count":\n msg = "No numeric types to aggregate"\n with pytest.raises(DataError, match=msg):\n getattr(rolled, method)()\n else:\n result = getattr(rolled, method)()\n expected = Series(expected_data, dtype="float64")[::step]\n tm.assert_almost_equal(result, expected)\n\n\ndef test_series_nullable_int(any_signed_int_ea_dtype, step):\n # GH 43016\n ser = Series([0, 1, NA], dtype=any_signed_int_ea_dtype)\n result = ser.rolling(2, step=step).mean()\n expected = Series([np.nan, 0.5, np.nan])[::step]\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method, expected_data, min_periods",\n [\n ("count", {0: Series([1, 2, 2, 2, 2]), 1: Series([1, 2, 2, 2, 2])}, 0),\n (\n "max",\n {0: Series([np.nan, 2, 4, 6, 8]), 1: Series([np.nan, 3, 5, 7, 9])},\n None,\n ),\n (\n "min",\n {0: Series([np.nan, 0, 2, 4, 6]), 1: Series([np.nan, 1, 3, 5, 7])},\n None,\n ),\n (\n "sum",\n {0: Series([np.nan, 2, 6, 10, 14]), 1: Series([np.nan, 4, 8, 12, 16])},\n None,\n ),\n (\n "mean",\n {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},\n None,\n ),\n (\n "std",\n {\n 0: Series([np.nan] + [np.sqrt(2)] * 4),\n 1: Series([np.nan] + [np.sqrt(2)] * 4),\n },\n None,\n ),\n (\n "var",\n {0: Series([np.nan, 2, 2, 2, 2]), 1: Series([np.nan, 2, 2, 2, 2])},\n None,\n ),\n (\n "median",\n {0: Series([np.nan, 1, 3, 5, 7]), 1: Series([np.nan, 2, 4, 6, 8])},\n None,\n ),\n ],\n)\ndef test_dataframe_dtypes(method, expected_data, dtypes, min_periods, step):\n df = DataFrame(np.arange(10).reshape((5, 2)), dtype=get_dtype(dtypes))\n rolled = df.rolling(2, min_periods=min_periods, step=step)\n\n if dtypes in ("m8[ns]", "M8[ns]", "datetime64[ns, UTC]") and method != "count":\n msg = "Cannot aggregate non-numeric type"\n with pytest.raises(DataError, match=msg):\n getattr(rolled, method)()\n else:\n result = getattr(rolled, method)()\n expected = DataFrame(expected_data, dtype="float64")[::step]\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\window\test_dtypes.py | test_dtypes.py | Python | 5,785 | 0.95 | 0.063584 | 0.038217 | python-kit | 749 | 2023-12-07T18:08:11.983896 | Apache-2.0 | true | f3d7eabd238ace09d4f921cb53aaca4a |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef test_doc_string():\n df = DataFrame({"B": [0, 1, 2, np.nan, 4]})\n df\n df.ewm(com=0.5).mean()\n\n\ndef test_constructor(frame_or_series):\n c = frame_or_series(range(5)).ewm\n\n # valid\n c(com=0.5)\n c(span=1.5)\n c(alpha=0.5)\n c(halflife=0.75)\n c(com=0.5, span=None)\n c(alpha=0.5, com=None)\n c(halflife=0.75, alpha=None)\n\n # not valid: mutually exclusive\n msg = "comass, span, halflife, and alpha are mutually exclusive"\n with pytest.raises(ValueError, match=msg):\n c(com=0.5, alpha=0.5)\n with pytest.raises(ValueError, match=msg):\n c(span=1.5, halflife=0.75)\n with pytest.raises(ValueError, match=msg):\n c(alpha=0.5, span=1.5)\n\n # not valid: com < 0\n msg = "comass must satisfy: comass >= 0"\n with pytest.raises(ValueError, match=msg):\n c(com=-0.5)\n\n # not valid: span < 1\n msg = "span must satisfy: span >= 1"\n with pytest.raises(ValueError, match=msg):\n c(span=0.5)\n\n # not valid: halflife <= 0\n msg = "halflife must satisfy: halflife > 0"\n with pytest.raises(ValueError, match=msg):\n c(halflife=0)\n\n # not valid: alpha <= 0 or alpha > 1\n msg = "alpha must satisfy: 0 < alpha <= 1"\n for alpha in (-0.5, 1.5):\n with pytest.raises(ValueError, match=msg):\n c(alpha=alpha)\n\n\ndef test_ewma_times_not_datetime_type():\n msg = r"times must be datetime64 dtype."\n with pytest.raises(ValueError, match=msg):\n Series(range(5)).ewm(times=np.arange(5))\n\n\ndef test_ewma_times_not_same_length():\n msg = "times must be the same length as the object."\n with pytest.raises(ValueError, match=msg):\n Series(range(5)).ewm(times=np.arange(4).astype("datetime64[ns]"))\n\n\ndef test_ewma_halflife_not_correct_type():\n msg = "halflife must be a timedelta convertible object"\n with pytest.raises(ValueError, match=msg):\n Series(range(5)).ewm(halflife=1, times=np.arange(5).astype("datetime64[ns]"))\n\n\ndef test_ewma_halflife_without_times(halflife_with_times):\n msg = "halflife can only be a timedelta convertible argument if times is not None."\n with pytest.raises(ValueError, match=msg):\n Series(range(5)).ewm(halflife=halflife_with_times)\n\n\n@pytest.mark.parametrize(\n "times",\n [\n np.arange(10).astype("datetime64[D]").astype("datetime64[ns]"),\n date_range("2000", freq="D", periods=10),\n date_range("2000", freq="D", periods=10).tz_localize("UTC"),\n ],\n)\n@pytest.mark.parametrize("min_periods", [0, 2])\ndef test_ewma_with_times_equal_spacing(halflife_with_times, times, min_periods):\n halflife = halflife_with_times\n data = np.arange(10.0)\n data[::2] = np.nan\n df = DataFrame({"A": data})\n result = df.ewm(halflife=halflife, min_periods=min_periods, times=times).mean()\n expected = df.ewm(halflife=1.0, min_periods=min_periods).mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_ewma_with_times_variable_spacing(tz_aware_fixture, unit):\n tz = tz_aware_fixture\n halflife = "23 days"\n times = (\n DatetimeIndex(["2020-01-01", "2020-01-10T00:04:05", "2020-02-23T05:00:23"])\n .tz_localize(tz)\n .as_unit(unit)\n )\n data = np.arange(3)\n df = DataFrame(data)\n result = df.ewm(halflife=halflife, times=times).mean()\n expected = DataFrame([0.0, 0.5674161888241773, 1.545239952073459])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_ewm_with_nat_raises(halflife_with_times):\n # GH#38535\n ser = Series(range(1))\n times = DatetimeIndex(["NaT"])\n with pytest.raises(ValueError, match="Cannot convert NaT values to integer"):\n ser.ewm(com=0.1, halflife=halflife_with_times, times=times)\n\n\ndef test_ewm_with_times_getitem(halflife_with_times):\n # GH 40164\n halflife = halflife_with_times\n data = np.arange(10.0)\n data[::2] = np.nan\n times = date_range("2000", freq="D", periods=10)\n df = DataFrame({"A": data, "B": data})\n result = df.ewm(halflife=halflife, times=times)["A"].mean()\n expected = df.ewm(halflife=1.0)["A"].mean()\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("arg", ["com", "halflife", "span", "alpha"])\ndef test_ewm_getitem_attributes_retained(arg, adjust, ignore_na):\n # GH 40164\n kwargs = {arg: 1, "adjust": adjust, "ignore_na": ignore_na}\n ewm = DataFrame({"A": range(1), "B": range(1)}).ewm(**kwargs)\n expected = {attr: getattr(ewm, attr) for attr in ewm._attributes}\n ewm_slice = ewm["A"]\n result = {attr: getattr(ewm, attr) for attr in ewm_slice._attributes}\n assert result == expected\n\n\ndef test_ewma_times_adjust_false_raises():\n # GH 40098\n with pytest.raises(\n NotImplementedError, match="times is not supported with adjust=False."\n ):\n Series(range(1)).ewm(\n 0.1, adjust=False, times=date_range("2000", freq="D", periods=1)\n )\n\n\n@pytest.mark.parametrize(\n "func, expected",\n [\n [\n "mean",\n DataFrame(\n {\n 0: range(5),\n 1: range(4, 9),\n 2: [7.428571, 9, 10.571429, 12.142857, 13.714286],\n },\n dtype=float,\n ),\n ],\n [\n "std",\n DataFrame(\n {\n 0: [np.nan] * 5,\n 1: [4.242641] * 5,\n 2: [4.6291, 5.196152, 5.781745, 6.380775, 6.989788],\n }\n ),\n ],\n [\n "var",\n DataFrame(\n {\n 0: [np.nan] * 5,\n 1: [18.0] * 5,\n 2: [21.428571, 27, 33.428571, 40.714286, 48.857143],\n }\n ),\n ],\n ],\n)\ndef test_float_dtype_ewma(func, expected, float_numpy_dtype):\n # GH#42452\n\n df = DataFrame(\n {0: range(5), 1: range(6, 11), 2: range(10, 20, 2)}, dtype=float_numpy_dtype\n )\n msg = "Support for axis=1 in DataFrame.ewm is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n e = df.ewm(alpha=0.5, axis=1)\n result = getattr(e, func)()\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_times_string_col_raises():\n # GH 43265\n df = DataFrame(\n {"A": np.arange(10.0), "time_col": date_range("2000", freq="D", periods=10)}\n )\n with pytest.raises(ValueError, match="times must be datetime64"):\n df.ewm(halflife="1 day", min_periods=0, times="time_col")\n\n\ndef test_ewm_sum_adjust_false_notimplemented():\n data = Series(range(1)).ewm(com=1, adjust=False)\n with pytest.raises(NotImplementedError, match="sum is not"):\n data.sum()\n\n\n@pytest.mark.parametrize(\n "expected_data, ignore",\n [[[10.0, 5.0, 2.5, 11.25], False], [[10.0, 5.0, 5.0, 12.5], True]],\n)\ndef test_ewm_sum(expected_data, ignore):\n # xref from Numbagg tests\n # https://github.com/numbagg/numbagg/blob/v0.2.1/numbagg/test/test_moving.py#L50\n data = Series([10, 0, np.nan, 10])\n result = data.ewm(alpha=0.5, ignore_na=ignore).sum()\n expected = Series(expected_data)\n tm.assert_series_equal(result, expected)\n\n\ndef test_ewma_adjust():\n vals = Series(np.zeros(1000))\n vals[5] = 1\n result = vals.ewm(span=100, adjust=False).mean().sum()\n assert np.abs(result - 1) < 1e-2\n\n\ndef test_ewma_cases(adjust, ignore_na):\n # try adjust/ignore_na args matrix\n\n s = Series([1.0, 2.0, 4.0, 8.0])\n\n if adjust:\n expected = Series([1.0, 1.6, 2.736842, 4.923077])\n else:\n expected = Series([1.0, 1.333333, 2.222222, 4.148148])\n\n result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()\n tm.assert_series_equal(result, expected)\n\n\ndef test_ewma_nan_handling():\n s = Series([1.0] + [np.nan] * 5 + [1.0])\n result = s.ewm(com=5).mean()\n tm.assert_series_equal(result, Series([1.0] * len(s)))\n\n s = Series([np.nan] * 2 + [1.0] + [np.nan] * 2 + [1.0])\n result = s.ewm(com=5).mean()\n tm.assert_series_equal(result, Series([np.nan] * 2 + [1.0] * 4))\n\n\n@pytest.mark.parametrize(\n "s, adjust, ignore_na, w",\n [\n (\n Series([np.nan, 1.0, 101.0]),\n True,\n False,\n [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],\n ),\n (\n Series([np.nan, 1.0, 101.0]),\n True,\n True,\n [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), 1.0],\n ),\n (\n Series([np.nan, 1.0, 101.0]),\n False,\n False,\n [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],\n ),\n (\n Series([np.nan, 1.0, 101.0]),\n False,\n True,\n [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), (1.0 / (1.0 + 2.0))],\n ),\n (\n Series([1.0, np.nan, 101.0]),\n True,\n False,\n [(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, 1.0],\n ),\n (\n Series([1.0, np.nan, 101.0]),\n True,\n True,\n [(1.0 - (1.0 / (1.0 + 2.0))), np.nan, 1.0],\n ),\n (\n Series([1.0, np.nan, 101.0]),\n False,\n False,\n [(1.0 - (1.0 / (1.0 + 2.0))) ** 2, np.nan, (1.0 / (1.0 + 2.0))],\n ),\n (\n Series([1.0, np.nan, 101.0]),\n False,\n True,\n [(1.0 - (1.0 / (1.0 + 2.0))), np.nan, (1.0 / (1.0 + 2.0))],\n ),\n (\n Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),\n True,\n False,\n [np.nan, (1.0 - (1.0 / (1.0 + 2.0))) ** 3, np.nan, np.nan, 1.0, np.nan],\n ),\n (\n Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),\n True,\n True,\n [np.nan, (1.0 - (1.0 / (1.0 + 2.0))), np.nan, np.nan, 1.0, np.nan],\n ),\n (\n Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),\n False,\n False,\n [\n np.nan,\n (1.0 - (1.0 / (1.0 + 2.0))) ** 3,\n np.nan,\n np.nan,\n (1.0 / (1.0 + 2.0)),\n np.nan,\n ],\n ),\n (\n Series([np.nan, 1.0, np.nan, np.nan, 101.0, np.nan]),\n False,\n True,\n [\n np.nan,\n (1.0 - (1.0 / (1.0 + 2.0))),\n np.nan,\n np.nan,\n (1.0 / (1.0 + 2.0)),\n np.nan,\n ],\n ),\n (\n Series([1.0, np.nan, 101.0, 50.0]),\n True,\n False,\n [\n (1.0 - (1.0 / (1.0 + 2.0))) ** 3,\n np.nan,\n (1.0 - (1.0 / (1.0 + 2.0))),\n 1.0,\n ],\n ),\n (\n Series([1.0, np.nan, 101.0, 50.0]),\n True,\n True,\n [\n (1.0 - (1.0 / (1.0 + 2.0))) ** 2,\n np.nan,\n (1.0 - (1.0 / (1.0 + 2.0))),\n 1.0,\n ],\n ),\n (\n Series([1.0, np.nan, 101.0, 50.0]),\n False,\n False,\n [\n (1.0 - (1.0 / (1.0 + 2.0))) ** 3,\n np.nan,\n (1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),\n (1.0 / (1.0 + 2.0))\n * ((1.0 - (1.0 / (1.0 + 2.0))) ** 2 + (1.0 / (1.0 + 2.0))),\n ],\n ),\n (\n Series([1.0, np.nan, 101.0, 50.0]),\n False,\n True,\n [\n (1.0 - (1.0 / (1.0 + 2.0))) ** 2,\n np.nan,\n (1.0 - (1.0 / (1.0 + 2.0))) * (1.0 / (1.0 + 2.0)),\n (1.0 / (1.0 + 2.0)),\n ],\n ),\n ],\n)\ndef test_ewma_nan_handling_cases(s, adjust, ignore_na, w):\n # GH 7603\n expected = (s.multiply(w).cumsum() / Series(w).cumsum()).ffill()\n result = s.ewm(com=2.0, adjust=adjust, ignore_na=ignore_na).mean()\n\n tm.assert_series_equal(result, expected)\n if ignore_na is False:\n # check that ignore_na defaults to False\n result = s.ewm(com=2.0, adjust=adjust).mean()\n tm.assert_series_equal(result, expected)\n\n\ndef test_ewm_alpha():\n # GH 10789\n arr = np.random.default_rng(2).standard_normal(100)\n locs = np.arange(20, 40)\n arr[locs] = np.nan\n\n s = Series(arr)\n a = s.ewm(alpha=0.61722699889169674).mean()\n b = s.ewm(com=0.62014947789973052).mean()\n c = s.ewm(span=2.240298955799461).mean()\n d = s.ewm(halflife=0.721792864318).mean()\n tm.assert_series_equal(a, b)\n tm.assert_series_equal(a, c)\n tm.assert_series_equal(a, d)\n\n\ndef test_ewm_domain_checks():\n # GH 12492\n arr = np.random.default_rng(2).standard_normal(100)\n locs = np.arange(20, 40)\n arr[locs] = np.nan\n\n s = Series(arr)\n msg = "comass must satisfy: comass >= 0"\n with pytest.raises(ValueError, match=msg):\n s.ewm(com=-0.1)\n s.ewm(com=0.0)\n s.ewm(com=0.1)\n\n msg = "span must satisfy: span >= 1"\n with pytest.raises(ValueError, match=msg):\n s.ewm(span=-0.1)\n with pytest.raises(ValueError, match=msg):\n s.ewm(span=0.0)\n with pytest.raises(ValueError, match=msg):\n s.ewm(span=0.9)\n s.ewm(span=1.0)\n s.ewm(span=1.1)\n\n msg = "halflife must satisfy: halflife > 0"\n with pytest.raises(ValueError, match=msg):\n s.ewm(halflife=-0.1)\n with pytest.raises(ValueError, match=msg):\n s.ewm(halflife=0.0)\n s.ewm(halflife=0.1)\n\n msg = "alpha must satisfy: 0 < alpha <= 1"\n with pytest.raises(ValueError, match=msg):\n s.ewm(alpha=-0.1)\n with pytest.raises(ValueError, match=msg):\n s.ewm(alpha=0.0)\n s.ewm(alpha=0.1)\n s.ewm(alpha=1.0)\n with pytest.raises(ValueError, match=msg):\n s.ewm(alpha=1.1)\n\n\n@pytest.mark.parametrize("method", ["mean", "std", "var"])\ndef test_ew_empty_series(method):\n vals = Series([], dtype=np.float64)\n\n ewm = vals.ewm(3)\n result = getattr(ewm, method)()\n tm.assert_almost_equal(result, vals)\n\n\n@pytest.mark.parametrize("min_periods", [0, 1])\n@pytest.mark.parametrize("name", ["mean", "var", "std"])\ndef test_ew_min_periods(min_periods, name):\n # excluding NaNs correctly\n arr = np.random.default_rng(2).standard_normal(50)\n arr[:10] = np.nan\n arr[-10:] = np.nan\n s = Series(arr)\n\n # check min_periods\n # GH 7898\n result = getattr(s.ewm(com=50, min_periods=2), name)()\n assert result[:11].isna().all()\n assert not result[11:].isna().any()\n\n result = getattr(s.ewm(com=50, min_periods=min_periods), name)()\n if name == "mean":\n assert result[:10].isna().all()\n assert not result[10:].isna().any()\n else:\n # ewm.std, ewm.var (with bias=False) require at least\n # two values\n assert result[:11].isna().all()\n assert not result[11:].isna().any()\n\n # check series of length 0\n result = getattr(Series(dtype=object).ewm(com=50, min_periods=min_periods), name)()\n tm.assert_series_equal(result, Series(dtype="float64"))\n\n # check series of length 1\n result = getattr(Series([1.0]).ewm(50, min_periods=min_periods), name)()\n if name == "mean":\n tm.assert_series_equal(result, Series([1.0]))\n else:\n # ewm.std, ewm.var with bias=False require at least\n # two values\n tm.assert_series_equal(result, Series([np.nan]))\n\n # pass in ints\n result2 = getattr(Series(np.arange(50)).ewm(span=10), name)()\n assert result2.dtype == np.float64\n\n\n@pytest.mark.parametrize("name", ["cov", "corr"])\ndef test_ewm_corr_cov(name):\n A = Series(np.random.default_rng(2).standard_normal(50), index=range(50))\n B = A[2:] + np.random.default_rng(2).standard_normal(48)\n\n A[:10] = np.nan\n B.iloc[-10:] = np.nan\n\n result = getattr(A.ewm(com=20, min_periods=5), name)(B)\n assert np.isnan(result.values[:14]).all()\n assert not np.isnan(result.values[14:]).any()\n\n\n@pytest.mark.parametrize("min_periods", [0, 1, 2])\n@pytest.mark.parametrize("name", ["cov", "corr"])\ndef test_ewm_corr_cov_min_periods(name, min_periods):\n # GH 7898\n A = Series(np.random.default_rng(2).standard_normal(50), index=range(50))\n B = A[2:] + np.random.default_rng(2).standard_normal(48)\n\n A[:10] = np.nan\n B.iloc[-10:] = np.nan\n\n result = getattr(A.ewm(com=20, min_periods=min_periods), name)(B)\n # binary functions (ewmcov, ewmcorr) with bias=False require at\n # least two values\n assert np.isnan(result.values[:11]).all()\n assert not np.isnan(result.values[11:]).any()\n\n # check series of length 0\n empty = Series([], dtype=np.float64)\n result = getattr(empty.ewm(com=50, min_periods=min_periods), name)(empty)\n tm.assert_series_equal(result, empty)\n\n # check series of length 1\n result = getattr(Series([1.0]).ewm(com=50, min_periods=min_periods), name)(\n Series([1.0])\n )\n tm.assert_series_equal(result, Series([np.nan]))\n\n\n@pytest.mark.parametrize("name", ["cov", "corr"])\ndef test_different_input_array_raise_exception(name):\n A = Series(np.random.default_rng(2).standard_normal(50), index=range(50))\n A[:10] = np.nan\n\n msg = "other must be a DataFrame or Series"\n # exception raised is Exception\n with pytest.raises(ValueError, match=msg):\n getattr(A.ewm(com=20, min_periods=5), name)(\n np.random.default_rng(2).standard_normal(50)\n )\n\n\n@pytest.mark.parametrize("name", ["var", "std", "mean"])\ndef test_ewma_series(series, name):\n series_result = getattr(series.ewm(com=10), name)()\n assert isinstance(series_result, Series)\n\n\n@pytest.mark.parametrize("name", ["var", "std", "mean"])\ndef test_ewma_frame(frame, name):\n frame_result = getattr(frame.ewm(com=10), name)()\n assert isinstance(frame_result, DataFrame)\n\n\ndef test_ewma_span_com_args(series):\n A = series.ewm(com=9.5).mean()\n B = series.ewm(span=20).mean()\n tm.assert_almost_equal(A, B)\n msg = "comass, span, halflife, and alpha are mutually exclusive"\n with pytest.raises(ValueError, match=msg):\n series.ewm(com=9.5, span=20)\n\n msg = "Must pass one of comass, span, halflife, or alpha"\n with pytest.raises(ValueError, match=msg):\n series.ewm().mean()\n\n\ndef test_ewma_halflife_arg(series):\n A = series.ewm(com=13.932726172912965).mean()\n B = series.ewm(halflife=10.0).mean()\n tm.assert_almost_equal(A, B)\n msg = "comass, span, halflife, and alpha are mutually exclusive"\n with pytest.raises(ValueError, match=msg):\n series.ewm(span=20, halflife=50)\n with pytest.raises(ValueError, match=msg):\n series.ewm(com=9.5, halflife=50)\n with pytest.raises(ValueError, match=msg):\n series.ewm(com=9.5, span=20, halflife=50)\n msg = "Must pass one of comass, span, halflife, or alpha"\n with pytest.raises(ValueError, match=msg):\n series.ewm()\n\n\ndef test_ewm_alpha_arg(series):\n # GH 10789\n s = series\n msg = "Must pass one of comass, span, halflife, or alpha"\n with pytest.raises(ValueError, match=msg):\n s.ewm()\n\n msg = "comass, span, halflife, and alpha are mutually exclusive"\n with pytest.raises(ValueError, match=msg):\n s.ewm(com=10.0, alpha=0.5)\n with pytest.raises(ValueError, match=msg):\n s.ewm(span=10.0, alpha=0.5)\n with pytest.raises(ValueError, match=msg):\n s.ewm(halflife=10.0, alpha=0.5)\n\n\n@pytest.mark.parametrize("func", ["cov", "corr"])\ndef test_ewm_pairwise_cov_corr(func, frame):\n result = getattr(frame.ewm(span=10, min_periods=5), func)()\n result = result.loc[(slice(None), 1), 5]\n result.index = result.index.droplevel(1)\n expected = getattr(frame[1].ewm(span=10, min_periods=5), func)(frame[5])\n tm.assert_series_equal(result, expected, check_names=False)\n\n\ndef test_numeric_only_frame(arithmetic_win_operators, numeric_only):\n # GH#46560\n kernel = arithmetic_win_operators\n df = DataFrame({"a": [1], "b": 2, "c": 3})\n df["c"] = df["c"].astype(object)\n ewm = df.ewm(span=2, min_periods=1)\n op = getattr(ewm, kernel, None)\n if op is not None:\n result = op(numeric_only=numeric_only)\n\n columns = ["a", "b"] if numeric_only else ["a", "b", "c"]\n expected = df[columns].agg([kernel]).reset_index(drop=True).astype(float)\n assert list(expected.columns) == columns\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("kernel", ["corr", "cov"])\n@pytest.mark.parametrize("use_arg", [True, False])\ndef test_numeric_only_corr_cov_frame(kernel, numeric_only, use_arg):\n # GH#46560\n df = DataFrame({"a": [1, 2, 3], "b": 2, "c": 3})\n df["c"] = df["c"].astype(object)\n arg = (df,) if use_arg else ()\n ewm = df.ewm(span=2, min_periods=1)\n op = getattr(ewm, kernel)\n result = op(*arg, numeric_only=numeric_only)\n\n # Compare result to op using float dtypes, dropping c when numeric_only is True\n columns = ["a", "b"] if numeric_only else ["a", "b", "c"]\n df2 = df[columns].astype(float)\n arg2 = (df2,) if use_arg else ()\n ewm2 = df2.ewm(span=2, min_periods=1)\n op2 = getattr(ewm2, kernel)\n expected = op2(*arg2, numeric_only=numeric_only)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", [int, object])\ndef test_numeric_only_series(arithmetic_win_operators, numeric_only, dtype):\n # GH#46560\n kernel = arithmetic_win_operators\n ser = Series([1], dtype=dtype)\n ewm = ser.ewm(span=2, min_periods=1)\n op = getattr(ewm, kernel, None)\n if op is None:\n # Nothing to test\n pytest.skip("No op to test")\n if numeric_only and dtype is object:\n msg = f"ExponentialMovingWindow.{kernel} does not implement numeric_only"\n with pytest.raises(NotImplementedError, match=msg):\n op(numeric_only=numeric_only)\n else:\n result = op(numeric_only=numeric_only)\n expected = ser.agg([kernel]).reset_index(drop=True).astype(float)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("kernel", ["corr", "cov"])\n@pytest.mark.parametrize("use_arg", [True, False])\n@pytest.mark.parametrize("dtype", [int, object])\ndef test_numeric_only_corr_cov_series(kernel, use_arg, numeric_only, dtype):\n # GH#46560\n ser = Series([1, 2, 3], dtype=dtype)\n arg = (ser,) if use_arg else ()\n ewm = ser.ewm(span=2, min_periods=1)\n op = getattr(ewm, kernel)\n if numeric_only and dtype is object:\n msg = f"ExponentialMovingWindow.{kernel} does not implement numeric_only"\n with pytest.raises(NotImplementedError, match=msg):\n op(*arg, numeric_only=numeric_only)\n else:\n result = op(*arg, numeric_only=numeric_only)\n\n ser2 = ser.astype(float)\n arg2 = (ser2,) if use_arg else ()\n ewm2 = ser2.ewm(span=2, min_periods=1)\n op2 = getattr(ewm2, kernel)\n expected = op2(*arg2, numeric_only=numeric_only)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\window\test_ewm.py | test_ewm.py | Python | 23,020 | 0.95 | 0.078404 | 0.070033 | vue-tools | 978 | 2025-03-06T20:30:57.353215 | BSD-3-Clause | true | 0064b1e58b30d402ee2cabd03ddddeba |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n MultiIndex,\n Series,\n isna,\n notna,\n)\nimport pandas._testing as tm\n\n\ndef test_doc_string():\n df = DataFrame({"B": [0, 1, 2, np.nan, 4]})\n df\n df.expanding(2).sum()\n\n\ndef test_constructor(frame_or_series):\n # GH 12669\n\n c = frame_or_series(range(5)).expanding\n\n # valid\n c(min_periods=1)\n\n\n@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])\ndef test_constructor_invalid(frame_or_series, w):\n # not valid\n\n c = frame_or_series(range(5)).expanding\n msg = "min_periods must be an integer"\n with pytest.raises(ValueError, match=msg):\n c(min_periods=w)\n\n\n@pytest.mark.parametrize(\n "expander",\n [\n 1,\n pytest.param(\n "ls",\n marks=pytest.mark.xfail(\n reason="GH#16425 expanding with offset not supported"\n ),\n ),\n ],\n)\ndef test_empty_df_expanding(expander):\n # GH 15819 Verifies that datetime and integer expanding windows can be\n # applied to empty DataFrames\n\n expected = DataFrame()\n result = DataFrame().expanding(expander).sum()\n tm.assert_frame_equal(result, expected)\n\n # Verifies that datetime and integer expanding windows can be applied\n # to empty DataFrames with datetime index\n expected = DataFrame(index=DatetimeIndex([]))\n result = DataFrame(index=DatetimeIndex([])).expanding(expander).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_missing_minp_zero():\n # https://github.com/pandas-dev/pandas/pull/18921\n # minp=0\n x = Series([np.nan])\n result = x.expanding(min_periods=0).sum()\n expected = Series([0.0])\n tm.assert_series_equal(result, expected)\n\n # minp=1\n result = x.expanding(min_periods=1).sum()\n expected = Series([np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_expanding_axis(axis_frame):\n # see gh-23372.\n df = DataFrame(np.ones((10, 20)))\n axis = df._get_axis_number(axis_frame)\n\n if axis == 0:\n msg = "The 'axis' keyword in DataFrame.expanding is deprecated"\n expected = DataFrame(\n {i: [np.nan] * 2 + [float(j) for j in range(3, 11)] for i in range(20)}\n )\n else:\n # axis == 1\n msg = "Support for axis=1 in DataFrame.expanding is deprecated"\n expected = DataFrame([[np.nan] * 2 + [float(i) for i in range(3, 21)]] * 10)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.expanding(3, axis=axis_frame).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_expanding_count_with_min_periods(frame_or_series):\n # GH 26996\n result = frame_or_series(range(5)).expanding(min_periods=3).count()\n expected = frame_or_series([np.nan, np.nan, 3.0, 4.0, 5.0])\n tm.assert_equal(result, expected)\n\n\ndef test_expanding_count_default_min_periods_with_null_values(frame_or_series):\n # GH 26996\n values = [1, 2, 3, np.nan, 4, 5, 6]\n expected_counts = [1.0, 2.0, 3.0, 3.0, 4.0, 5.0, 6.0]\n\n result = frame_or_series(values).expanding().count()\n expected = frame_or_series(expected_counts)\n tm.assert_equal(result, expected)\n\n\ndef test_expanding_count_with_min_periods_exceeding_series_length(frame_or_series):\n # GH 25857\n result = frame_or_series(range(5)).expanding(min_periods=6).count()\n expected = frame_or_series([np.nan, np.nan, np.nan, np.nan, np.nan])\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "df,expected,min_periods",\n [\n (\n DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [1, 2], "B": [4, 5]}, [0, 1]),\n ({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),\n ],\n 3,\n ),\n (\n DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [1, 2], "B": [4, 5]}, [0, 1]),\n ({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),\n ],\n 2,\n ),\n (\n DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [1, 2], "B": [4, 5]}, [0, 1]),\n ({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),\n ],\n 1,\n ),\n (DataFrame({"A": [1], "B": [4]}), [], 2),\n (DataFrame(), [({}, [])], 1),\n (\n DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),\n [\n ({"A": [1.0], "B": [np.nan]}, [0]),\n ({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),\n ({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),\n ],\n 3,\n ),\n (\n DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),\n [\n ({"A": [1.0], "B": [np.nan]}, [0]),\n ({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),\n ({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),\n ],\n 2,\n ),\n (\n DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),\n [\n ({"A": [1.0], "B": [np.nan]}, [0]),\n ({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),\n ({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),\n ],\n 1,\n ),\n ],\n)\ndef test_iter_expanding_dataframe(df, expected, min_periods):\n # GH 11704\n expected = [DataFrame(values, index=index) for (values, index) in expected]\n\n for expected, actual in zip(expected, df.expanding(min_periods)):\n tm.assert_frame_equal(actual, expected)\n\n\n@pytest.mark.parametrize(\n "ser,expected,min_periods",\n [\n (Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 3),\n (Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 2),\n (Series([1, 2, 3]), [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])], 1),\n (Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2),\n (Series([np.nan, 2]), [([np.nan], [0]), ([np.nan, 2], [0, 1])], 2),\n (Series([], dtype="int64"), [], 2),\n ],\n)\ndef test_iter_expanding_series(ser, expected, min_periods):\n # GH 11704\n expected = [Series(values, index=index) for (values, index) in expected]\n\n for expected, actual in zip(expected, ser.expanding(min_periods)):\n tm.assert_series_equal(actual, expected)\n\n\ndef test_center_invalid():\n # GH 20647\n df = DataFrame()\n with pytest.raises(TypeError, match=".* got an unexpected keyword"):\n df.expanding(center=True)\n\n\ndef test_expanding_sem(frame_or_series):\n # GH: 26476\n obj = frame_or_series([0, 1, 2])\n result = obj.expanding().sem()\n if isinstance(result, DataFrame):\n result = Series(result[0].values)\n expected = Series([np.nan] + [0.707107] * 2)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["skew", "kurt"])\ndef test_expanding_skew_kurt_numerical_stability(method):\n # GH: 6929\n s = Series(np.random.default_rng(2).random(10))\n expected = getattr(s.expanding(3), method)()\n s = s + 5000\n result = getattr(s.expanding(3), method)()\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("window", [1, 3, 10, 20])\n@pytest.mark.parametrize("method", ["min", "max", "average"])\n@pytest.mark.parametrize("pct", [True, False])\n@pytest.mark.parametrize("ascending", [True, False])\n@pytest.mark.parametrize("test_data", ["default", "duplicates", "nans"])\ndef test_rank(window, method, pct, ascending, test_data):\n length = 20\n if test_data == "default":\n ser = Series(data=np.random.default_rng(2).random(length))\n elif test_data == "duplicates":\n ser = Series(data=np.random.default_rng(2).choice(3, length))\n elif test_data == "nans":\n ser = Series(\n data=np.random.default_rng(2).choice(\n [1.0, 0.25, 0.75, np.nan, np.inf, -np.inf], length\n )\n )\n\n expected = ser.expanding(window).apply(\n lambda x: x.rank(method=method, pct=pct, ascending=ascending).iloc[-1]\n )\n result = ser.expanding(window).rank(method=method, pct=pct, ascending=ascending)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_expanding_corr(series):\n A = series.dropna()\n B = (A + np.random.default_rng(2).standard_normal(len(A)))[:-5]\n\n result = A.expanding().corr(B)\n\n rolling_result = A.rolling(window=len(A), min_periods=1).corr(B)\n\n tm.assert_almost_equal(rolling_result, result)\n\n\ndef test_expanding_count(series):\n result = series.expanding(min_periods=0).count()\n tm.assert_almost_equal(\n result, series.rolling(window=len(series), min_periods=0).count()\n )\n\n\ndef test_expanding_quantile(series):\n result = series.expanding().quantile(0.5)\n\n rolling_result = series.rolling(window=len(series), min_periods=1).quantile(0.5)\n\n tm.assert_almost_equal(result, rolling_result)\n\n\ndef test_expanding_cov(series):\n A = series\n B = (A + np.random.default_rng(2).standard_normal(len(A)))[:-5]\n\n result = A.expanding().cov(B)\n\n rolling_result = A.rolling(window=len(A), min_periods=1).cov(B)\n\n tm.assert_almost_equal(rolling_result, result)\n\n\ndef test_expanding_cov_pairwise(frame):\n result = frame.expanding().cov()\n\n rolling_result = frame.rolling(window=len(frame), min_periods=1).cov()\n\n tm.assert_frame_equal(result, rolling_result)\n\n\ndef test_expanding_corr_pairwise(frame):\n result = frame.expanding().corr()\n\n rolling_result = frame.rolling(window=len(frame), min_periods=1).corr()\n tm.assert_frame_equal(result, rolling_result)\n\n\n@pytest.mark.parametrize(\n "func,static_comp",\n [\n ("sum", np.sum),\n ("mean", lambda x: np.mean(x, axis=0)),\n ("max", lambda x: np.max(x, axis=0)),\n ("min", lambda x: np.min(x, axis=0)),\n ],\n ids=["sum", "mean", "max", "min"],\n)\ndef test_expanding_func(func, static_comp, frame_or_series):\n data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))\n\n msg = "The 'axis' keyword in (Series|DataFrame).expanding is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n obj = data.expanding(min_periods=1, axis=0)\n result = getattr(obj, func)()\n assert isinstance(result, frame_or_series)\n\n msg = "The behavior of DataFrame.sum with axis=None is deprecated"\n warn = None\n if frame_or_series is DataFrame and static_comp is np.sum:\n warn = FutureWarning\n with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):\n expected = static_comp(data[:11])\n if frame_or_series is Series:\n tm.assert_almost_equal(result[10], expected)\n else:\n tm.assert_series_equal(result.iloc[10], expected, check_names=False)\n\n\n@pytest.mark.parametrize(\n "func,static_comp",\n [("sum", np.sum), ("mean", np.mean), ("max", np.max), ("min", np.min)],\n ids=["sum", "mean", "max", "min"],\n)\ndef test_expanding_min_periods(func, static_comp):\n ser = Series(np.random.default_rng(2).standard_normal(50))\n\n msg = "The 'axis' keyword in Series.expanding is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = getattr(ser.expanding(min_periods=30, axis=0), func)()\n assert result[:29].isna().all()\n tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))\n\n # min_periods is working correctly\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = getattr(ser.expanding(min_periods=15, axis=0), func)()\n assert isna(result.iloc[13])\n assert notna(result.iloc[14])\n\n ser2 = Series(np.random.default_rng(2).standard_normal(20))\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = getattr(ser2.expanding(min_periods=5, axis=0), func)()\n assert isna(result[3])\n assert notna(result[4])\n\n # min_periods=0\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result0 = getattr(ser.expanding(min_periods=0, axis=0), func)()\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result1 = getattr(ser.expanding(min_periods=1, axis=0), func)()\n tm.assert_almost_equal(result0, result1)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = getattr(ser.expanding(min_periods=1, axis=0), func)()\n tm.assert_almost_equal(result.iloc[-1], static_comp(ser[:50]))\n\n\ndef test_expanding_apply(engine_and_raw, frame_or_series):\n engine, raw = engine_and_raw\n data = frame_or_series(np.array(list(range(10)) + [np.nan] * 10))\n result = data.expanding(min_periods=1).apply(\n lambda x: x.mean(), raw=raw, engine=engine\n )\n assert isinstance(result, frame_or_series)\n\n if frame_or_series is Series:\n tm.assert_almost_equal(result[9], np.mean(data[:11], axis=0))\n else:\n tm.assert_series_equal(\n result.iloc[9], np.mean(data[:11], axis=0), check_names=False\n )\n\n\ndef test_expanding_min_periods_apply(engine_and_raw):\n engine, raw = engine_and_raw\n ser = Series(np.random.default_rng(2).standard_normal(50))\n\n result = ser.expanding(min_periods=30).apply(\n lambda x: x.mean(), raw=raw, engine=engine\n )\n assert result[:29].isna().all()\n tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))\n\n # min_periods is working correctly\n result = ser.expanding(min_periods=15).apply(\n lambda x: x.mean(), raw=raw, engine=engine\n )\n assert isna(result.iloc[13])\n assert notna(result.iloc[14])\n\n ser2 = Series(np.random.default_rng(2).standard_normal(20))\n result = ser2.expanding(min_periods=5).apply(\n lambda x: x.mean(), raw=raw, engine=engine\n )\n assert isna(result[3])\n assert notna(result[4])\n\n # min_periods=0\n result0 = ser.expanding(min_periods=0).apply(\n lambda x: x.mean(), raw=raw, engine=engine\n )\n result1 = ser.expanding(min_periods=1).apply(\n lambda x: x.mean(), raw=raw, engine=engine\n )\n tm.assert_almost_equal(result0, result1)\n\n result = ser.expanding(min_periods=1).apply(\n lambda x: x.mean(), raw=raw, engine=engine\n )\n tm.assert_almost_equal(result.iloc[-1], np.mean(ser[:50]))\n\n\n@pytest.mark.parametrize(\n "f",\n [\n lambda x: (x.expanding(min_periods=5).cov(x, pairwise=True)),\n lambda x: (x.expanding(min_periods=5).corr(x, pairwise=True)),\n ],\n)\ndef test_moment_functions_zero_length_pairwise(f):\n df1 = DataFrame()\n df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))\n df2["a"] = df2["a"].astype("float64")\n\n df1_expected = DataFrame(index=MultiIndex.from_product([df1.index, df1.columns]))\n df2_expected = DataFrame(\n index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),\n columns=Index(["a"], name="foo"),\n dtype="float64",\n )\n\n df1_result = f(df1)\n tm.assert_frame_equal(df1_result, df1_expected)\n\n df2_result = f(df2)\n tm.assert_frame_equal(df2_result, df2_expected)\n\n\n@pytest.mark.parametrize(\n "f",\n [\n lambda x: x.expanding().count(),\n lambda x: x.expanding(min_periods=5).cov(x, pairwise=False),\n lambda x: x.expanding(min_periods=5).corr(x, pairwise=False),\n lambda x: x.expanding(min_periods=5).max(),\n lambda x: x.expanding(min_periods=5).min(),\n lambda x: x.expanding(min_periods=5).sum(),\n lambda x: x.expanding(min_periods=5).mean(),\n lambda x: x.expanding(min_periods=5).std(),\n lambda x: x.expanding(min_periods=5).var(),\n lambda x: x.expanding(min_periods=5).skew(),\n lambda x: x.expanding(min_periods=5).kurt(),\n lambda x: x.expanding(min_periods=5).quantile(0.5),\n lambda x: x.expanding(min_periods=5).median(),\n lambda x: x.expanding(min_periods=5).apply(sum, raw=False),\n lambda x: x.expanding(min_periods=5).apply(sum, raw=True),\n ],\n)\ndef test_moment_functions_zero_length(f):\n # GH 8056\n s = Series(dtype=np.float64)\n s_expected = s\n df1 = DataFrame()\n df1_expected = df1\n df2 = DataFrame(columns=["a"])\n df2["a"] = df2["a"].astype("float64")\n df2_expected = df2\n\n s_result = f(s)\n tm.assert_series_equal(s_result, s_expected)\n\n df1_result = f(df1)\n tm.assert_frame_equal(df1_result, df1_expected)\n\n df2_result = f(df2)\n tm.assert_frame_equal(df2_result, df2_expected)\n\n\ndef test_expanding_apply_empty_series(engine_and_raw):\n engine, raw = engine_and_raw\n ser = Series([], dtype=np.float64)\n tm.assert_series_equal(\n ser, ser.expanding().apply(lambda x: x.mean(), raw=raw, engine=engine)\n )\n\n\ndef test_expanding_apply_min_periods_0(engine_and_raw):\n # GH 8080\n engine, raw = engine_and_raw\n s = Series([None, None, None])\n result = s.expanding(min_periods=0).apply(lambda x: len(x), raw=raw, engine=engine)\n expected = Series([1.0, 2.0, 3.0])\n tm.assert_series_equal(result, expected)\n\n\ndef test_expanding_cov_diff_index():\n # GH 7512\n s1 = Series([1, 2, 3], index=[0, 1, 2])\n s2 = Series([1, 3], index=[0, 2])\n result = s1.expanding().cov(s2)\n expected = Series([None, None, 2.0])\n tm.assert_series_equal(result, expected)\n\n s2a = Series([1, None, 3], index=[0, 1, 2])\n result = s1.expanding().cov(s2a)\n tm.assert_series_equal(result, expected)\n\n s1 = Series([7, 8, 10], index=[0, 1, 3])\n s2 = Series([7, 9, 10], index=[0, 2, 3])\n result = s1.expanding().cov(s2)\n expected = Series([None, None, None, 4.5])\n tm.assert_series_equal(result, expected)\n\n\ndef test_expanding_corr_diff_index():\n # GH 7512\n s1 = Series([1, 2, 3], index=[0, 1, 2])\n s2 = Series([1, 3], index=[0, 2])\n result = s1.expanding().corr(s2)\n expected = Series([None, None, 1.0])\n tm.assert_series_equal(result, expected)\n\n s2a = Series([1, None, 3], index=[0, 1, 2])\n result = s1.expanding().corr(s2a)\n tm.assert_series_equal(result, expected)\n\n s1 = Series([7, 8, 10], index=[0, 1, 3])\n s2 = Series([7, 9, 10], index=[0, 2, 3])\n result = s1.expanding().corr(s2)\n expected = Series([None, None, None, 1.0])\n tm.assert_series_equal(result, expected)\n\n\ndef test_expanding_cov_pairwise_diff_length():\n # GH 7512\n df1 = DataFrame([[1, 5], [3, 2], [3, 9]], columns=Index(["A", "B"], name="foo"))\n df1a = DataFrame(\n [[1, 5], [3, 9]], index=[0, 2], columns=Index(["A", "B"], name="foo")\n )\n df2 = DataFrame(\n [[5, 6], [None, None], [2, 1]], columns=Index(["X", "Y"], name="foo")\n )\n df2a = DataFrame(\n [[5, 6], [2, 1]], index=[0, 2], columns=Index(["X", "Y"], name="foo")\n )\n # TODO: xref gh-15826\n # .loc is not preserving the names\n result1 = df1.expanding().cov(df2, pairwise=True).loc[2]\n result2 = df1.expanding().cov(df2a, pairwise=True).loc[2]\n result3 = df1a.expanding().cov(df2, pairwise=True).loc[2]\n result4 = df1a.expanding().cov(df2a, pairwise=True).loc[2]\n expected = DataFrame(\n [[-3.0, -6.0], [-5.0, -10.0]],\n columns=Index(["A", "B"], name="foo"),\n index=Index(["X", "Y"], name="foo"),\n )\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n tm.assert_frame_equal(result3, expected)\n tm.assert_frame_equal(result4, expected)\n\n\ndef test_expanding_corr_pairwise_diff_length():\n # GH 7512\n df1 = DataFrame(\n [[1, 2], [3, 2], [3, 4]], columns=["A", "B"], index=Index(range(3), name="bar")\n )\n df1a = DataFrame(\n [[1, 2], [3, 4]], index=Index([0, 2], name="bar"), columns=["A", "B"]\n )\n df2 = DataFrame(\n [[5, 6], [None, None], [2, 1]],\n columns=["X", "Y"],\n index=Index(range(3), name="bar"),\n )\n df2a = DataFrame(\n [[5, 6], [2, 1]], index=Index([0, 2], name="bar"), columns=["X", "Y"]\n )\n result1 = df1.expanding().corr(df2, pairwise=True).loc[2]\n result2 = df1.expanding().corr(df2a, pairwise=True).loc[2]\n result3 = df1a.expanding().corr(df2, pairwise=True).loc[2]\n result4 = df1a.expanding().corr(df2a, pairwise=True).loc[2]\n expected = DataFrame(\n [[-1.0, -1.0], [-1.0, -1.0]], columns=["A", "B"], index=Index(["X", "Y"])\n )\n tm.assert_frame_equal(result1, expected)\n tm.assert_frame_equal(result2, expected)\n tm.assert_frame_equal(result3, expected)\n tm.assert_frame_equal(result4, expected)\n\n\ndef test_expanding_apply_args_kwargs(engine_and_raw):\n def mean_w_arg(x, const):\n return np.mean(x) + const\n\n engine, raw = engine_and_raw\n\n df = DataFrame(np.random.default_rng(2).random((20, 3)))\n\n expected = df.expanding().apply(np.mean, engine=engine, raw=raw) + 20.0\n\n result = df.expanding().apply(mean_w_arg, engine=engine, raw=raw, args=(20,))\n tm.assert_frame_equal(result, expected)\n\n result = df.expanding().apply(mean_w_arg, raw=raw, kwargs={"const": 20})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_numeric_only_frame(arithmetic_win_operators, numeric_only):\n # GH#46560\n kernel = arithmetic_win_operators\n df = DataFrame({"a": [1], "b": 2, "c": 3})\n df["c"] = df["c"].astype(object)\n expanding = df.expanding()\n op = getattr(expanding, kernel, None)\n if op is not None:\n result = op(numeric_only=numeric_only)\n\n columns = ["a", "b"] if numeric_only else ["a", "b", "c"]\n expected = df[columns].agg([kernel]).reset_index(drop=True).astype(float)\n assert list(expected.columns) == columns\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("kernel", ["corr", "cov"])\n@pytest.mark.parametrize("use_arg", [True, False])\ndef test_numeric_only_corr_cov_frame(kernel, numeric_only, use_arg):\n # GH#46560\n df = DataFrame({"a": [1, 2, 3], "b": 2, "c": 3})\n df["c"] = df["c"].astype(object)\n arg = (df,) if use_arg else ()\n expanding = df.expanding()\n op = getattr(expanding, kernel)\n result = op(*arg, numeric_only=numeric_only)\n\n # Compare result to op using float dtypes, dropping c when numeric_only is True\n columns = ["a", "b"] if numeric_only else ["a", "b", "c"]\n df2 = df[columns].astype(float)\n arg2 = (df2,) if use_arg else ()\n expanding2 = df2.expanding()\n op2 = getattr(expanding2, kernel)\n expected = op2(*arg2, numeric_only=numeric_only)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", [int, object])\ndef test_numeric_only_series(arithmetic_win_operators, numeric_only, dtype):\n # GH#46560\n kernel = arithmetic_win_operators\n ser = Series([1], dtype=dtype)\n expanding = ser.expanding()\n op = getattr(expanding, kernel)\n if numeric_only and dtype is object:\n msg = f"Expanding.{kernel} does not implement numeric_only"\n with pytest.raises(NotImplementedError, match=msg):\n op(numeric_only=numeric_only)\n else:\n result = op(numeric_only=numeric_only)\n expected = ser.agg([kernel]).reset_index(drop=True).astype(float)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("kernel", ["corr", "cov"])\n@pytest.mark.parametrize("use_arg", [True, False])\n@pytest.mark.parametrize("dtype", [int, object])\ndef test_numeric_only_corr_cov_series(kernel, use_arg, numeric_only, dtype):\n # GH#46560\n ser = Series([1, 2, 3], dtype=dtype)\n arg = (ser,) if use_arg else ()\n expanding = ser.expanding()\n op = getattr(expanding, kernel)\n if numeric_only and dtype is object:\n msg = f"Expanding.{kernel} does not implement numeric_only"\n with pytest.raises(NotImplementedError, match=msg):\n op(*arg, numeric_only=numeric_only)\n else:\n result = op(*arg, numeric_only=numeric_only)\n\n ser2 = ser.astype(float)\n arg2 = (ser2,) if use_arg else ()\n expanding2 = ser2.expanding()\n op2 = getattr(expanding2, kernel)\n expected = op2(*arg2, numeric_only=numeric_only)\n tm.assert_series_equal(result, expected)\n\n\ndef test_keyword_quantile_deprecated():\n # GH #52550\n ser = Series([1, 2, 3, 4])\n with tm.assert_produces_warning(FutureWarning):\n ser.expanding().quantile(quantile=0.5)\n | .venv\Lib\site-packages\pandas\tests\window\test_expanding.py | test_expanding.py | Python | 24,239 | 0.95 | 0.087137 | 0.064736 | vue-tools | 840 | 2024-12-03T23:46:39.984673 | Apache-2.0 | true | 8ba5b2a99a6de30931d46b143be8ed91 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n date_range,\n to_datetime,\n)\nimport pandas._testing as tm\nfrom pandas.api.indexers import BaseIndexer\nfrom pandas.core.groupby.groupby import get_groupby\n\n\n@pytest.fixture\ndef times_frame():\n """Frame for testing times argument in EWM groupby."""\n return DataFrame(\n {\n "A": ["a", "b", "c", "a", "b", "c", "a", "b", "c", "a"],\n "B": [0, 0, 0, 1, 1, 1, 2, 2, 2, 3],\n "C": to_datetime(\n [\n "2020-01-01",\n "2020-01-01",\n "2020-01-01",\n "2020-01-02",\n "2020-01-10",\n "2020-01-22",\n "2020-01-03",\n "2020-01-23",\n "2020-01-23",\n "2020-01-04",\n ]\n ),\n }\n )\n\n\n@pytest.fixture\ndef roll_frame():\n return DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)})\n\n\nclass TestRolling:\n def test_groupby_unsupported_argument(self, roll_frame):\n msg = r"groupby\(\) got an unexpected keyword argument 'foo'"\n with pytest.raises(TypeError, match=msg):\n roll_frame.groupby("A", foo=1)\n\n def test_getitem(self, roll_frame):\n g = roll_frame.groupby("A")\n g_mutated = get_groupby(roll_frame, by="A")\n\n expected = g_mutated.B.apply(lambda x: x.rolling(2).mean())\n\n result = g.rolling(2).mean().B\n tm.assert_series_equal(result, expected)\n\n result = g.rolling(2).B.mean()\n tm.assert_series_equal(result, expected)\n\n result = g.B.rolling(2).mean()\n tm.assert_series_equal(result, expected)\n\n result = roll_frame.B.groupby(roll_frame.A).rolling(2).mean()\n tm.assert_series_equal(result, expected)\n\n def test_getitem_multiple(self, roll_frame):\n # GH 13174\n g = roll_frame.groupby("A")\n r = g.rolling(2, min_periods=0)\n g_mutated = get_groupby(roll_frame, by="A")\n expected = g_mutated.B.apply(lambda x: x.rolling(2, min_periods=0).count())\n\n result = r.B.count()\n tm.assert_series_equal(result, expected)\n\n result = r.B.count()\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "f",\n [\n "sum",\n "mean",\n "min",\n "max",\n "count",\n "kurt",\n "skew",\n ],\n )\n def test_rolling(self, f, roll_frame):\n g = roll_frame.groupby("A", group_keys=False)\n r = g.rolling(window=4)\n\n result = getattr(r, f)()\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(lambda x: getattr(x.rolling(4), f)())\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop("A", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([roll_frame["A"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("f", ["std", "var"])\n def test_rolling_ddof(self, f, roll_frame):\n g = roll_frame.groupby("A", group_keys=False)\n r = g.rolling(window=4)\n\n result = getattr(r, f)(ddof=1)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(lambda x: getattr(x.rolling(4), f)(ddof=1))\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop("A", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([roll_frame["A"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]\n )\n def test_rolling_quantile(self, interpolation, roll_frame):\n g = roll_frame.groupby("A", group_keys=False)\n r = g.rolling(window=4)\n\n result = r.quantile(0.4, interpolation=interpolation)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(\n lambda x: x.rolling(4).quantile(0.4, interpolation=interpolation)\n )\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop("A", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([roll_frame["A"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("f, expected_val", [["corr", 1], ["cov", 0.5]])\n def test_rolling_corr_cov_other_same_size_as_groups(self, f, expected_val):\n # GH 42915\n df = DataFrame(\n {"value": range(10), "idx1": [1] * 5 + [2] * 5, "idx2": [1, 2, 3, 4, 5] * 2}\n ).set_index(["idx1", "idx2"])\n other = DataFrame({"value": range(5), "idx2": [1, 2, 3, 4, 5]}).set_index(\n "idx2"\n )\n result = getattr(df.groupby(level=0).rolling(2), f)(other)\n expected_data = ([np.nan] + [expected_val] * 4) * 2\n expected = DataFrame(\n expected_data,\n columns=["value"],\n index=MultiIndex.from_arrays(\n [\n [1] * 5 + [2] * 5,\n [1] * 5 + [2] * 5,\n list(range(1, 6)) * 2,\n ],\n names=["idx1", "idx1", "idx2"],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("f", ["corr", "cov"])\n def test_rolling_corr_cov_other_diff_size_as_groups(self, f, roll_frame):\n g = roll_frame.groupby("A")\n r = g.rolling(window=4)\n\n result = getattr(r, f)(roll_frame)\n\n def func(x):\n return getattr(x.rolling(4), f)(roll_frame)\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(func)\n # GH 39591: The grouped column should be all np.nan\n # (groupby.apply inserts 0s for cov)\n expected["A"] = np.nan\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("f", ["corr", "cov"])\n def test_rolling_corr_cov_pairwise(self, f, roll_frame):\n g = roll_frame.groupby("A")\n r = g.rolling(window=4)\n\n result = getattr(r.B, f)(pairwise=True)\n\n def func(x):\n return getattr(x.B.rolling(4), f)(pairwise=True)\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(func)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "func, expected_values",\n [("cov", [[1.0, 1.0], [1.0, 4.0]]), ("corr", [[1.0, 0.5], [0.5, 1.0]])],\n )\n def test_rolling_corr_cov_unordered(self, func, expected_values):\n # GH 43386\n df = DataFrame(\n {\n "a": ["g1", "g2", "g1", "g1"],\n "b": [0, 0, 1, 2],\n "c": [2, 0, 6, 4],\n }\n )\n rol = df.groupby("a").rolling(3)\n result = getattr(rol, func)()\n expected = DataFrame(\n {\n "b": 4 * [np.nan] + expected_values[0] + 2 * [np.nan],\n "c": 4 * [np.nan] + expected_values[1] + 2 * [np.nan],\n },\n index=MultiIndex.from_tuples(\n [\n ("g1", 0, "b"),\n ("g1", 0, "c"),\n ("g1", 2, "b"),\n ("g1", 2, "c"),\n ("g1", 3, "b"),\n ("g1", 3, "c"),\n ("g2", 1, "b"),\n ("g2", 1, "c"),\n ],\n names=["a", None, None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_rolling_apply(self, raw, roll_frame):\n g = roll_frame.groupby("A", group_keys=False)\n r = g.rolling(window=4)\n\n # reduction\n result = r.apply(lambda x: x.sum(), raw=raw)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(lambda x: x.rolling(4).apply(lambda y: y.sum(), raw=raw))\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop("A", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([roll_frame["A"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n def test_rolling_apply_mutability(self):\n # GH 14013\n df = DataFrame({"A": ["foo"] * 3 + ["bar"] * 3, "B": [1] * 6})\n g = df.groupby("A")\n\n mi = MultiIndex.from_tuples(\n [("bar", 3), ("bar", 4), ("bar", 5), ("foo", 0), ("foo", 1), ("foo", 2)]\n )\n\n mi.names = ["A", None]\n # Grouped column should not be a part of the output\n expected = DataFrame([np.nan, 2.0, 2.0] * 2, columns=["B"], index=mi)\n\n result = g.rolling(window=2).sum()\n tm.assert_frame_equal(result, expected)\n\n # Call an arbitrary function on the groupby\n g.sum()\n\n # Make sure nothing has been mutated\n result = g.rolling(window=2).sum()\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("expected_value,raw_value", [[1.0, True], [0.0, False]])\n def test_groupby_rolling(self, expected_value, raw_value):\n # GH 31754\n\n def isnumpyarray(x):\n return int(isinstance(x, np.ndarray))\n\n df = DataFrame({"id": [1, 1, 1], "value": [1, 2, 3]})\n result = df.groupby("id").value.rolling(1).apply(isnumpyarray, raw=raw_value)\n expected = Series(\n [expected_value] * 3,\n index=MultiIndex.from_tuples(((1, 0), (1, 1), (1, 2)), names=["id", None]),\n name="value",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_center_center(self):\n # GH 35552\n series = Series(range(1, 6))\n result = series.groupby(series).rolling(center=True, window=3).mean()\n expected = Series(\n [np.nan] * 5,\n index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3), (5, 4))),\n )\n tm.assert_series_equal(result, expected)\n\n series = Series(range(1, 5))\n result = series.groupby(series).rolling(center=True, window=3).mean()\n expected = Series(\n [np.nan] * 4,\n index=MultiIndex.from_tuples(((1, 0), (2, 1), (3, 2), (4, 3))),\n )\n tm.assert_series_equal(result, expected)\n\n df = DataFrame({"a": ["a"] * 5 + ["b"] * 6, "b": range(11)})\n result = df.groupby("a").rolling(center=True, window=3).mean()\n expected = DataFrame(\n [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, 9, np.nan],\n index=MultiIndex.from_tuples(\n (\n ("a", 0),\n ("a", 1),\n ("a", 2),\n ("a", 3),\n ("a", 4),\n ("b", 5),\n ("b", 6),\n ("b", 7),\n ("b", 8),\n ("b", 9),\n ("b", 10),\n ),\n names=["a", None],\n ),\n columns=["b"],\n )\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame({"a": ["a"] * 5 + ["b"] * 5, "b": range(10)})\n result = df.groupby("a").rolling(center=True, window=3).mean()\n expected = DataFrame(\n [np.nan, 1, 2, 3, np.nan, np.nan, 6, 7, 8, np.nan],\n index=MultiIndex.from_tuples(\n (\n ("a", 0),\n ("a", 1),\n ("a", 2),\n ("a", 3),\n ("a", 4),\n ("b", 5),\n ("b", 6),\n ("b", 7),\n ("b", 8),\n ("b", 9),\n ),\n names=["a", None],\n ),\n columns=["b"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_center_on(self):\n # GH 37141\n df = DataFrame(\n data={\n "Date": date_range("2020-01-01", "2020-01-10"),\n "gb": ["group_1"] * 6 + ["group_2"] * 4,\n "value": range(10),\n }\n )\n result = (\n df.groupby("gb")\n .rolling(6, on="Date", center=True, min_periods=1)\n .value.mean()\n )\n mi = MultiIndex.from_arrays([df["gb"], df["Date"]], names=["gb", "Date"])\n expected = Series(\n [1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 7.0, 7.5, 7.5, 7.5],\n name="value",\n index=mi,\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("min_periods", [5, 4, 3])\n def test_groupby_rolling_center_min_periods(self, min_periods):\n # GH 36040\n df = DataFrame({"group": ["A"] * 10 + ["B"] * 10, "data": range(20)})\n\n window_size = 5\n result = (\n df.groupby("group")\n .rolling(window_size, center=True, min_periods=min_periods)\n .mean()\n )\n result = result.reset_index()[["group", "data"]]\n\n grp_A_mean = [1.0, 1.5, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 7.5, 8.0]\n grp_B_mean = [x + 10.0 for x in grp_A_mean]\n\n num_nans = max(0, min_periods - 3) # For window_size of 5\n nans = [np.nan] * num_nans\n grp_A_expected = nans + grp_A_mean[num_nans : 10 - num_nans] + nans\n grp_B_expected = nans + grp_B_mean[num_nans : 10 - num_nans] + nans\n\n expected = DataFrame(\n {"group": ["A"] * 10 + ["B"] * 10, "data": grp_A_expected + grp_B_expected}\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_subselect_rolling(self):\n # GH 35486\n df = DataFrame(\n {"a": [1, 2, 3, 2], "b": [4.0, 2.0, 3.0, 1.0], "c": [10, 20, 30, 20]}\n )\n result = df.groupby("a")[["b"]].rolling(2).max()\n expected = DataFrame(\n [np.nan, np.nan, 2.0, np.nan],\n columns=["b"],\n index=MultiIndex.from_tuples(\n ((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n result = df.groupby("a")["b"].rolling(2).max()\n expected = Series(\n [np.nan, np.nan, 2.0, np.nan],\n index=MultiIndex.from_tuples(\n ((1, 0), (2, 1), (2, 3), (3, 2)), names=["a", None]\n ),\n name="b",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_custom_indexer(self):\n # GH 35557\n class SimpleIndexer(BaseIndexer):\n def get_window_bounds(\n self,\n num_values=0,\n min_periods=None,\n center=None,\n closed=None,\n step=None,\n ):\n min_periods = self.window_size if min_periods is None else 0\n end = np.arange(num_values, dtype=np.int64) + 1\n start = end.copy() - self.window_size\n start[start < 0] = min_periods\n return start, end\n\n df = DataFrame(\n {"a": [1.0, 2.0, 3.0, 4.0, 5.0] * 3}, index=[0] * 5 + [1] * 5 + [2] * 5\n )\n result = (\n df.groupby(df.index)\n .rolling(SimpleIndexer(window_size=3), min_periods=1)\n .sum()\n )\n expected = df.groupby(df.index).rolling(window=3, min_periods=1).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_subset_with_closed(self):\n # GH 35549\n df = DataFrame(\n {\n "column1": range(8),\n "column2": range(8),\n "group": ["A"] * 4 + ["B"] * 4,\n "date": [\n Timestamp(date)\n for date in ["2019-01-01", "2019-01-01", "2019-01-02", "2019-01-02"]\n ]\n * 2,\n }\n )\n result = (\n df.groupby("group").rolling("1D", on="date", closed="left")["column1"].sum()\n )\n expected = Series(\n [np.nan, np.nan, 1.0, 1.0, np.nan, np.nan, 9.0, 9.0],\n index=MultiIndex.from_frame(\n df[["group", "date"]],\n names=["group", "date"],\n ),\n name="column1",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_subset_rolling_subset_with_closed(self):\n # GH 35549\n df = DataFrame(\n {\n "column1": range(8),\n "column2": range(8),\n "group": ["A"] * 4 + ["B"] * 4,\n "date": [\n Timestamp(date)\n for date in ["2019-01-01", "2019-01-01", "2019-01-02", "2019-01-02"]\n ]\n * 2,\n }\n )\n\n result = (\n df.groupby("group")[["column1", "date"]]\n .rolling("1D", on="date", closed="left")["column1"]\n .sum()\n )\n expected = Series(\n [np.nan, np.nan, 1.0, 1.0, np.nan, np.nan, 9.0, 9.0],\n index=MultiIndex.from_frame(\n df[["group", "date"]],\n names=["group", "date"],\n ),\n name="column1",\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("func", ["max", "min"])\n def test_groupby_rolling_index_changed(self, func):\n # GH: #36018 nlevels of MultiIndex changed\n ds = Series(\n [1, 2, 2],\n index=MultiIndex.from_tuples(\n [("a", "x"), ("a", "y"), ("c", "z")], names=["1", "2"]\n ),\n name="a",\n )\n\n result = getattr(ds.groupby(ds).rolling(2), func)()\n expected = Series(\n [np.nan, np.nan, 2.0],\n index=MultiIndex.from_tuples(\n [(1, "a", "x"), (2, "a", "y"), (2, "c", "z")], names=["a", "1", "2"]\n ),\n name="a",\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_empty_frame(self):\n # GH 36197\n expected = DataFrame({"s1": []})\n result = expected.groupby("s1").rolling(window=1).sum()\n # GH 32262\n expected = expected.drop(columns="s1")\n # GH-38057 from_tuples gives empty object dtype, we now get float/int levels\n # expected.index = MultiIndex.from_tuples([], names=["s1", None])\n expected.index = MultiIndex.from_product(\n [Index([], dtype="float64"), Index([], dtype="int64")], names=["s1", None]\n )\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame({"s1": [], "s2": []})\n result = expected.groupby(["s1", "s2"]).rolling(window=1).sum()\n # GH 32262\n expected = expected.drop(columns=["s1", "s2"])\n expected.index = MultiIndex.from_product(\n [\n Index([], dtype="float64"),\n Index([], dtype="float64"),\n Index([], dtype="int64"),\n ],\n names=["s1", "s2", None],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_string_index(self):\n # GH: 36727\n df = DataFrame(\n [\n ["A", "group_1", Timestamp(2019, 1, 1, 9)],\n ["B", "group_1", Timestamp(2019, 1, 2, 9)],\n ["Z", "group_2", Timestamp(2019, 1, 3, 9)],\n ["H", "group_1", Timestamp(2019, 1, 6, 9)],\n ["E", "group_2", Timestamp(2019, 1, 20, 9)],\n ],\n columns=["index", "group", "eventTime"],\n ).set_index("index")\n\n groups = df.groupby("group")\n df["count_to_date"] = groups.cumcount()\n rolling_groups = groups.rolling("10d", on="eventTime")\n result = rolling_groups.apply(lambda df: df.shape[0])\n expected = DataFrame(\n [\n ["A", "group_1", Timestamp(2019, 1, 1, 9), 1.0],\n ["B", "group_1", Timestamp(2019, 1, 2, 9), 2.0],\n ["H", "group_1", Timestamp(2019, 1, 6, 9), 3.0],\n ["Z", "group_2", Timestamp(2019, 1, 3, 9), 1.0],\n ["E", "group_2", Timestamp(2019, 1, 20, 9), 1.0],\n ],\n columns=["index", "group", "eventTime", "count_to_date"],\n ).set_index(["group", "index"])\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_no_sort(self):\n # GH 36889\n result = (\n DataFrame({"foo": [2, 1], "bar": [2, 1]})\n .groupby("foo", sort=False)\n .rolling(1)\n .min()\n )\n expected = DataFrame(\n np.array([[2.0, 2.0], [1.0, 1.0]]),\n columns=["foo", "bar"],\n index=MultiIndex.from_tuples([(2, 0), (1, 1)], names=["foo", None]),\n )\n # GH 32262\n expected = expected.drop(columns="foo")\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_count_closed_on(self, unit):\n # GH 35869\n df = DataFrame(\n {\n "column1": range(6),\n "column2": range(6),\n "group": 3 * ["A", "B"],\n "date": date_range(end="20190101", periods=6, unit=unit),\n }\n )\n result = (\n df.groupby("group")\n .rolling("3d", on="date", closed="left")["column1"]\n .count()\n )\n dti = DatetimeIndex(\n [\n "2018-12-27",\n "2018-12-29",\n "2018-12-31",\n "2018-12-28",\n "2018-12-30",\n "2019-01-01",\n ],\n dtype=f"M8[{unit}]",\n )\n mi = MultiIndex.from_arrays(\n [\n ["A", "A", "A", "B", "B", "B"],\n dti,\n ],\n names=["group", "date"],\n )\n expected = Series(\n [np.nan, 1.0, 1.0, np.nan, 1.0, 1.0],\n name="column1",\n index=mi,\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n ("func", "kwargs"),\n [("rolling", {"window": 2, "min_periods": 1}), ("expanding", {})],\n )\n def test_groupby_rolling_sem(self, func, kwargs):\n # GH: 26476\n df = DataFrame(\n [["a", 1], ["a", 2], ["b", 1], ["b", 2], ["b", 3]], columns=["a", "b"]\n )\n result = getattr(df.groupby("a"), func)(**kwargs).sem()\n expected = DataFrame(\n {"a": [np.nan] * 5, "b": [np.nan, 0.70711, np.nan, 0.70711, 0.70711]},\n index=MultiIndex.from_tuples(\n [("a", 0), ("a", 1), ("b", 2), ("b", 3), ("b", 4)], names=["a", None]\n ),\n )\n # GH 32262\n expected = expected.drop(columns="a")\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n ("rollings", "key"), [({"on": "a"}, "a"), ({"on": None}, "index")]\n )\n def test_groupby_rolling_nans_in_index(self, rollings, key):\n # GH: 34617\n df = DataFrame(\n {\n "a": to_datetime(["2020-06-01 12:00", "2020-06-01 14:00", np.nan]),\n "b": [1, 2, 3],\n "c": [1, 1, 1],\n }\n )\n if key == "index":\n df = df.set_index("a")\n with pytest.raises(ValueError, match=f"{key} values must not have NaT"):\n df.groupby("c").rolling("60min", **rollings)\n\n @pytest.mark.parametrize("group_keys", [True, False])\n def test_groupby_rolling_group_keys(self, group_keys):\n # GH 37641\n # GH 38523: GH 37641 actually was not a bug.\n # group_keys only applies to groupby.apply directly\n arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]]\n index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))\n\n s = Series([1, 2, 3], index=index)\n result = s.groupby(["idx1", "idx2"], group_keys=group_keys).rolling(1).mean()\n expected = Series(\n [1.0, 2.0, 3.0],\n index=MultiIndex.from_tuples(\n [\n ("val1", "val1", "val1", "val1"),\n ("val1", "val1", "val1", "val1"),\n ("val2", "val2", "val2", "val2"),\n ],\n names=["idx1", "idx2", "idx1", "idx2"],\n ),\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_index_level_and_column_label(self):\n # The groupby keys should not appear as a resulting column\n arrays = [["val1", "val1", "val2"], ["val1", "val1", "val2"]]\n index = MultiIndex.from_arrays(arrays, names=("idx1", "idx2"))\n\n df = DataFrame({"A": [1, 1, 2], "B": range(3)}, index=index)\n result = df.groupby(["idx1", "A"]).rolling(1).mean()\n expected = DataFrame(\n {"B": [0.0, 1.0, 2.0]},\n index=MultiIndex.from_tuples(\n [\n ("val1", 1, "val1", "val1"),\n ("val1", 1, "val1", "val1"),\n ("val2", 2, "val2", "val2"),\n ],\n names=["idx1", "A", "idx1", "idx2"],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_groupby_rolling_resulting_multiindex(self):\n # a few different cases checking the created MultiIndex of the result\n # https://github.com/pandas-dev/pandas/pull/38057\n\n # grouping by 1 columns -> 2-level MI as result\n df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4})\n result = df.groupby("b").rolling(3).mean()\n expected_index = MultiIndex.from_tuples(\n [(1, 0), (1, 2), (1, 4), (1, 6), (2, 1), (2, 3), (2, 5), (2, 7)],\n names=["b", None],\n )\n tm.assert_index_equal(result.index, expected_index)\n\n def test_groupby_rolling_resulting_multiindex2(self):\n # grouping by 2 columns -> 3-level MI as result\n df = DataFrame({"a": np.arange(12.0), "b": [1, 2] * 6, "c": [1, 2, 3, 4] * 3})\n result = df.groupby(["b", "c"]).rolling(2).sum()\n expected_index = MultiIndex.from_tuples(\n [\n (1, 1, 0),\n (1, 1, 4),\n (1, 1, 8),\n (1, 3, 2),\n (1, 3, 6),\n (1, 3, 10),\n (2, 2, 1),\n (2, 2, 5),\n (2, 2, 9),\n (2, 4, 3),\n (2, 4, 7),\n (2, 4, 11),\n ],\n names=["b", "c", None],\n )\n tm.assert_index_equal(result.index, expected_index)\n\n def test_groupby_rolling_resulting_multiindex3(self):\n # grouping with 1 level on dataframe with 2-level MI -> 3-level MI as result\n df = DataFrame({"a": np.arange(8.0), "b": [1, 2] * 4, "c": [1, 2, 3, 4] * 2})\n df = df.set_index("c", append=True)\n result = df.groupby("b").rolling(3).mean()\n expected_index = MultiIndex.from_tuples(\n [\n (1, 0, 1),\n (1, 2, 3),\n (1, 4, 1),\n (1, 6, 3),\n (2, 1, 2),\n (2, 3, 4),\n (2, 5, 2),\n (2, 7, 4),\n ],\n names=["b", None, "c"],\n )\n tm.assert_index_equal(result.index, expected_index, exact="equiv")\n\n def test_groupby_rolling_object_doesnt_affect_groupby_apply(self, roll_frame):\n # GH 39732\n g = roll_frame.groupby("A", group_keys=False)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(lambda x: x.rolling(4).sum()).index\n _ = g.rolling(window=4)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = g.apply(lambda x: x.rolling(4).sum()).index\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n ("window", "min_periods", "closed", "expected"),\n [\n (2, 0, "left", [None, 0.0, 1.0, 1.0, None, 0.0, 1.0, 1.0]),\n (2, 2, "left", [None, None, 1.0, 1.0, None, None, 1.0, 1.0]),\n (4, 4, "left", [None, None, None, None, None, None, None, None]),\n (4, 4, "right", [None, None, None, 5.0, None, None, None, 5.0]),\n ],\n )\n def test_groupby_rolling_var(self, window, min_periods, closed, expected):\n df = DataFrame([1, 2, 3, 4, 5, 6, 7, 8])\n result = (\n df.groupby([1, 2, 1, 2, 1, 2, 1, 2])\n .rolling(window=window, min_periods=min_periods, closed=closed)\n .var(0)\n )\n expected_result = DataFrame(\n np.array(expected, dtype="float64"),\n index=MultiIndex(\n levels=[np.array([1, 2]), [0, 1, 2, 3, 4, 5, 6, 7]],\n codes=[[0, 0, 0, 0, 1, 1, 1, 1], [0, 2, 4, 6, 1, 3, 5, 7]],\n ),\n )\n tm.assert_frame_equal(result, expected_result)\n\n @pytest.mark.parametrize(\n "columns", [MultiIndex.from_tuples([("A", ""), ("B", "C")]), ["A", "B"]]\n )\n def test_by_column_not_in_values(self, columns):\n # GH 32262\n df = DataFrame([[1, 0]] * 20 + [[2, 0]] * 12 + [[3, 0]] * 8, columns=columns)\n g = df.groupby("A")\n original_obj = g.obj.copy(deep=True)\n r = g.rolling(4)\n result = r.sum()\n assert "A" not in result.columns\n tm.assert_frame_equal(g.obj, original_obj)\n\n def test_groupby_level(self):\n # GH 38523, 38787\n arrays = [\n ["Falcon", "Falcon", "Parrot", "Parrot"],\n ["Captive", "Wild", "Captive", "Wild"],\n ]\n index = MultiIndex.from_arrays(arrays, names=("Animal", "Type"))\n df = DataFrame({"Max Speed": [390.0, 350.0, 30.0, 20.0]}, index=index)\n result = df.groupby(level=0)["Max Speed"].rolling(2).sum()\n expected = Series(\n [np.nan, 740.0, np.nan, 50.0],\n index=MultiIndex.from_tuples(\n [\n ("Falcon", "Falcon", "Captive"),\n ("Falcon", "Falcon", "Wild"),\n ("Parrot", "Parrot", "Captive"),\n ("Parrot", "Parrot", "Wild"),\n ],\n names=["Animal", "Animal", "Type"],\n ),\n name="Max Speed",\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "by, expected_data",\n [\n [["id"], {"num": [100.0, 150.0, 150.0, 200.0]}],\n [\n ["id", "index"],\n {\n "date": [\n Timestamp("2018-01-01"),\n Timestamp("2018-01-02"),\n Timestamp("2018-01-01"),\n Timestamp("2018-01-02"),\n ],\n "num": [100.0, 200.0, 150.0, 250.0],\n },\n ],\n ],\n )\n def test_as_index_false(self, by, expected_data, unit):\n # GH 39433\n data = [\n ["A", "2018-01-01", 100.0],\n ["A", "2018-01-02", 200.0],\n ["B", "2018-01-01", 150.0],\n ["B", "2018-01-02", 250.0],\n ]\n df = DataFrame(data, columns=["id", "date", "num"])\n df["date"] = df["date"].astype(f"M8[{unit}]")\n df = df.set_index(["date"])\n\n gp_by = [getattr(df, attr) for attr in by]\n result = (\n df.groupby(gp_by, as_index=False).rolling(window=2, min_periods=1).mean()\n )\n\n expected = {"id": ["A", "A", "B", "B"]}\n expected.update(expected_data)\n expected = DataFrame(\n expected,\n index=df.index,\n )\n if "date" in expected_data:\n expected["date"] = expected["date"].astype(f"M8[{unit}]")\n tm.assert_frame_equal(result, expected)\n\n def test_nan_and_zero_endpoints(self, any_int_numpy_dtype):\n # https://github.com/twosigma/pandas/issues/53\n typ = np.dtype(any_int_numpy_dtype).type\n size = 1000\n idx = np.repeat(typ(0), size)\n idx[-1] = 1\n\n val = 5e25\n arr = np.repeat(val, size)\n arr[0] = np.nan\n arr[-1] = 0\n\n df = DataFrame(\n {\n "index": idx,\n "adl2": arr,\n }\n ).set_index("index")\n result = df.groupby("index")["adl2"].rolling(window=10, min_periods=1).mean()\n expected = Series(\n arr,\n name="adl2",\n index=MultiIndex.from_arrays(\n [\n Index([0] * 999 + [1], dtype=typ, name="index"),\n Index([0] * 999 + [1], dtype=typ, name="index"),\n ],\n ),\n )\n tm.assert_series_equal(result, expected)\n\n def test_groupby_rolling_non_monotonic(self):\n # GH 43909\n\n shuffled = [3, 0, 1, 2]\n sec = 1_000\n df = DataFrame(\n [{"t": Timestamp(2 * x * sec), "x": x + 1, "c": 42} for x in shuffled]\n )\n with pytest.raises(ValueError, match=r".* must be monotonic"):\n df.groupby("c").rolling(on="t", window="3s")\n\n def test_groupby_monotonic(self):\n # GH 15130\n # we don't need to validate monotonicity when grouping\n\n # GH 43909 we should raise an error here to match\n # behaviour of non-groupby rolling.\n\n data = [\n ["David", "1/1/2015", 100],\n ["David", "1/5/2015", 500],\n ["David", "5/30/2015", 50],\n ["David", "7/25/2015", 50],\n ["Ryan", "1/4/2014", 100],\n ["Ryan", "1/19/2015", 500],\n ["Ryan", "3/31/2016", 50],\n ["Joe", "7/1/2015", 100],\n ["Joe", "9/9/2015", 500],\n ["Joe", "10/15/2015", 50],\n ]\n\n df = DataFrame(data=data, columns=["name", "date", "amount"])\n df["date"] = to_datetime(df["date"])\n df = df.sort_values("date")\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = (\n df.set_index("date")\n .groupby("name")\n .apply(lambda x: x.rolling("180D")["amount"].sum())\n )\n result = df.groupby("name").rolling("180D", on="date")["amount"].sum()\n tm.assert_series_equal(result, expected)\n\n def test_datelike_on_monotonic_within_each_group(self):\n # GH 13966 (similar to #15130, closed by #15175)\n\n # superseded by 43909\n # GH 46061: OK if the on is monotonic relative to each each group\n\n dates = date_range(start="2016-01-01 09:30:00", periods=20, freq="s")\n df = DataFrame(\n {\n "A": [1] * 20 + [2] * 12 + [3] * 8,\n "B": np.concatenate((dates, dates)),\n "C": np.arange(40),\n }\n )\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = (\n df.set_index("B")\n .groupby("A")\n .apply(lambda x: x.rolling("4s")["C"].mean())\n )\n result = df.groupby("A").rolling("4s", on="B").C.mean()\n tm.assert_series_equal(result, expected)\n\n def test_datelike_on_not_monotonic_within_each_group(self):\n # GH 46061\n df = DataFrame(\n {\n "A": [1] * 3 + [2] * 3,\n "B": [Timestamp(year, 1, 1) for year in [2020, 2021, 2019]] * 2,\n "C": range(6),\n }\n )\n with pytest.raises(ValueError, match="Each group within B must be monotonic."):\n df.groupby("A").rolling("365D", on="B")\n\n\nclass TestExpanding:\n @pytest.fixture\n def frame(self):\n return DataFrame({"A": [1] * 20 + [2] * 12 + [3] * 8, "B": np.arange(40)})\n\n @pytest.mark.parametrize(\n "f", ["sum", "mean", "min", "max", "count", "kurt", "skew"]\n )\n def test_expanding(self, f, frame):\n g = frame.groupby("A", group_keys=False)\n r = g.expanding()\n\n result = getattr(r, f)()\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(lambda x: getattr(x.expanding(), f)())\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop("A", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([frame["A"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("f", ["std", "var"])\n def test_expanding_ddof(self, f, frame):\n g = frame.groupby("A", group_keys=False)\n r = g.expanding()\n\n result = getattr(r, f)(ddof=0)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(lambda x: getattr(x.expanding(), f)(ddof=0))\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop("A", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([frame["A"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "interpolation", ["linear", "lower", "higher", "midpoint", "nearest"]\n )\n def test_expanding_quantile(self, interpolation, frame):\n g = frame.groupby("A", group_keys=False)\n r = g.expanding()\n\n result = r.quantile(0.4, interpolation=interpolation)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(\n lambda x: x.expanding().quantile(0.4, interpolation=interpolation)\n )\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop("A", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([frame["A"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("f", ["corr", "cov"])\n def test_expanding_corr_cov(self, f, frame):\n g = frame.groupby("A")\n r = g.expanding()\n\n result = getattr(r, f)(frame)\n\n def func_0(x):\n return getattr(x.expanding(), f)(frame)\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(func_0)\n # GH 39591: groupby.apply returns 1 instead of nan for windows\n # with all nan values\n null_idx = list(range(20, 61)) + list(range(72, 113))\n expected.iloc[null_idx, 1] = np.nan\n # GH 39591: The grouped column should be all np.nan\n # (groupby.apply inserts 0s for cov)\n expected["A"] = np.nan\n tm.assert_frame_equal(result, expected)\n\n result = getattr(r.B, f)(pairwise=True)\n\n def func_1(x):\n return getattr(x.B.expanding(), f)(pairwise=True)\n\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(func_1)\n tm.assert_series_equal(result, expected)\n\n def test_expanding_apply(self, raw, frame):\n g = frame.groupby("A", group_keys=False)\n r = g.expanding()\n\n # reduction\n result = r.apply(lambda x: x.sum(), raw=raw)\n msg = "DataFrameGroupBy.apply operated on the grouping columns"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = g.apply(\n lambda x: x.expanding().apply(lambda y: y.sum(), raw=raw)\n )\n # groupby.apply doesn't drop the grouped-by column\n expected = expected.drop("A", axis=1)\n # GH 39732\n expected_index = MultiIndex.from_arrays([frame["A"], range(40)])\n expected.index = expected_index\n tm.assert_frame_equal(result, expected)\n\n\nclass TestEWM:\n @pytest.mark.parametrize(\n "method, expected_data",\n [\n ["mean", [0.0, 0.6666666666666666, 1.4285714285714286, 2.2666666666666666]],\n ["std", [np.nan, 0.707107, 0.963624, 1.177164]],\n ["var", [np.nan, 0.5, 0.9285714285714286, 1.3857142857142857]],\n ],\n )\n def test_methods(self, method, expected_data):\n # GH 16037\n df = DataFrame({"A": ["a"] * 4, "B": range(4)})\n result = getattr(df.groupby("A").ewm(com=1.0), method)()\n expected = DataFrame(\n {"B": expected_data},\n index=MultiIndex.from_tuples(\n [\n ("a", 0),\n ("a", 1),\n ("a", 2),\n ("a", 3),\n ],\n names=["A", None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "method, expected_data",\n [["corr", [np.nan, 1.0, 1.0, 1]], ["cov", [np.nan, 0.5, 0.928571, 1.385714]]],\n )\n def test_pairwise_methods(self, method, expected_data):\n # GH 16037\n df = DataFrame({"A": ["a"] * 4, "B": range(4)})\n result = getattr(df.groupby("A").ewm(com=1.0), method)()\n expected = DataFrame(\n {"B": expected_data},\n index=MultiIndex.from_tuples(\n [\n ("a", 0, "B"),\n ("a", 1, "B"),\n ("a", 2, "B"),\n ("a", 3, "B"),\n ],\n names=["A", None, None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n expected = df.groupby("A")[["B"]].apply(\n lambda x: getattr(x.ewm(com=1.0), method)()\n )\n tm.assert_frame_equal(result, expected)\n\n def test_times(self, times_frame):\n # GH 40951\n halflife = "23 days"\n # GH#42738\n times = times_frame.pop("C")\n result = times_frame.groupby("A").ewm(halflife=halflife, times=times).mean()\n expected = DataFrame(\n {\n "B": [\n 0.0,\n 0.507534,\n 1.020088,\n 1.537661,\n 0.0,\n 0.567395,\n 1.221209,\n 0.0,\n 0.653141,\n 1.195003,\n ]\n },\n index=MultiIndex.from_tuples(\n [\n ("a", 0),\n ("a", 3),\n ("a", 6),\n ("a", 9),\n ("b", 1),\n ("b", 4),\n ("b", 7),\n ("c", 2),\n ("c", 5),\n ("c", 8),\n ],\n names=["A", None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n def test_times_array(self, times_frame):\n # GH 40951\n halflife = "23 days"\n times = times_frame.pop("C")\n gb = times_frame.groupby("A")\n result = gb.ewm(halflife=halflife, times=times).mean()\n expected = gb.ewm(halflife=halflife, times=times.values).mean()\n tm.assert_frame_equal(result, expected)\n\n def test_dont_mutate_obj_after_slicing(self):\n # GH 43355\n df = DataFrame(\n {\n "id": ["a", "a", "b", "b", "b"],\n "timestamp": date_range("2021-9-1", periods=5, freq="h"),\n "y": range(5),\n }\n )\n grp = df.groupby("id").rolling("1h", on="timestamp")\n result = grp.count()\n expected_df = DataFrame(\n {\n "timestamp": date_range("2021-9-1", periods=5, freq="h"),\n "y": [1.0] * 5,\n },\n index=MultiIndex.from_arrays(\n [["a", "a", "b", "b", "b"], list(range(5))], names=["id", None]\n ),\n )\n tm.assert_frame_equal(result, expected_df)\n\n result = grp["y"].count()\n expected_series = Series(\n [1.0] * 5,\n index=MultiIndex.from_arrays(\n [\n ["a", "a", "b", "b", "b"],\n date_range("2021-9-1", periods=5, freq="h"),\n ],\n names=["id", "timestamp"],\n ),\n name="y",\n )\n tm.assert_series_equal(result, expected_series)\n # This is the key test\n result = grp.count()\n tm.assert_frame_equal(result, expected_df)\n\n\ndef test_rolling_corr_with_single_integer_in_index():\n # GH 44078\n df = DataFrame({"a": [(1,), (1,), (1,)], "b": [4, 5, 6]})\n gb = df.groupby(["a"])\n result = gb.rolling(2).corr(other=df)\n index = MultiIndex.from_tuples([((1,), 0), ((1,), 1), ((1,), 2)], names=["a", None])\n expected = DataFrame(\n {"a": [np.nan, np.nan, np.nan], "b": [np.nan, 1.0, 1.0]}, index=index\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_corr_with_tuples_in_index():\n # GH 44078\n df = DataFrame(\n {\n "a": [\n (\n 1,\n 2,\n ),\n (\n 1,\n 2,\n ),\n (\n 1,\n 2,\n ),\n ],\n "b": [4, 5, 6],\n }\n )\n gb = df.groupby(["a"])\n result = gb.rolling(2).corr(other=df)\n index = MultiIndex.from_tuples(\n [((1, 2), 0), ((1, 2), 1), ((1, 2), 2)], names=["a", None]\n )\n expected = DataFrame(\n {"a": [np.nan, np.nan, np.nan], "b": [np.nan, 1.0, 1.0]}, index=index\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\window\test_groupby.py | test_groupby.py | Python | 46,639 | 0.95 | 0.062215 | 0.072391 | python-kit | 257 | 2023-12-16T03:37:12.576965 | Apache-2.0 | true | 43c15c52f3bfd49695165e0d82a8e698 |
import numpy as np\nimport pytest\n\nfrom pandas.compat import is_platform_arm\nfrom pandas.errors import NumbaUtilError\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n DataFrame,\n Series,\n option_context,\n to_datetime,\n)\nimport pandas._testing as tm\nfrom pandas.util.version import Version\n\npytestmark = [pytest.mark.single_cpu]\n\nnumba = pytest.importorskip("numba")\npytestmark.append(\n pytest.mark.skipif(\n Version(numba.__version__) == Version("0.61") and is_platform_arm(),\n reason=f"Segfaults on ARM platforms with numba {numba.__version__}",\n )\n)\n\n\n@pytest.fixture(params=["single", "table"])\ndef method(request):\n """method keyword in rolling/expanding/ewm constructor"""\n return request.param\n\n\n@pytest.fixture(\n params=[\n ["sum", {}],\n ["mean", {}],\n ["median", {}],\n ["max", {}],\n ["min", {}],\n ["var", {}],\n ["var", {"ddof": 0}],\n ["std", {}],\n ["std", {"ddof": 0}],\n ]\n)\ndef arithmetic_numba_supported_operators(request):\n return request.param\n\n\n@td.skip_if_no("numba")\n@pytest.mark.filterwarnings("ignore")\n# Filter warnings when parallel=True and the function can't be parallelized by Numba\nclass TestEngine:\n @pytest.mark.parametrize("jit", [True, False])\n def test_numba_vs_cython_apply(self, jit, nogil, parallel, nopython, center, step):\n def f(x, *args):\n arg_sum = 0\n for arg in args:\n arg_sum += arg\n return np.mean(x) + arg_sum\n\n if jit:\n import numba\n\n f = numba.jit(f)\n\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n args = (2,)\n\n s = Series(range(10))\n result = s.rolling(2, center=center, step=step).apply(\n f, args=args, engine="numba", engine_kwargs=engine_kwargs, raw=True\n )\n expected = s.rolling(2, center=center, step=step).apply(\n f, engine="cython", args=args, raw=True\n )\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "data",\n [\n DataFrame(np.eye(5)),\n DataFrame(\n [\n [5, 7, 7, 7, np.nan, np.inf, 4, 3, 3, 3],\n [5, 7, 7, 7, np.nan, np.inf, 7, 3, 3, 3],\n [np.nan, np.nan, 5, 6, 7, 5, 5, 5, 5, 5],\n ]\n ).T,\n Series(range(5), name="foo"),\n Series([20, 10, 10, np.inf, 1, 1, 2, 3]),\n Series([20, 10, 10, np.nan, 10, 1, 2, 3]),\n ],\n )\n def test_numba_vs_cython_rolling_methods(\n self,\n data,\n nogil,\n parallel,\n nopython,\n arithmetic_numba_supported_operators,\n step,\n ):\n method, kwargs = arithmetic_numba_supported_operators\n\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n\n roll = data.rolling(3, step=step)\n result = getattr(roll, method)(\n engine="numba", engine_kwargs=engine_kwargs, **kwargs\n )\n expected = getattr(roll, method)(engine="cython", **kwargs)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize(\n "data", [DataFrame(np.eye(5)), Series(range(5), name="foo")]\n )\n def test_numba_vs_cython_expanding_methods(\n self, data, nogil, parallel, nopython, arithmetic_numba_supported_operators\n ):\n method, kwargs = arithmetic_numba_supported_operators\n\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n\n data = DataFrame(np.eye(5))\n expand = data.expanding()\n result = getattr(expand, method)(\n engine="numba", engine_kwargs=engine_kwargs, **kwargs\n )\n expected = getattr(expand, method)(engine="cython", **kwargs)\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize("jit", [True, False])\n def test_cache_apply(self, jit, nogil, parallel, nopython, step):\n # Test that the functions are cached correctly if we switch functions\n def func_1(x):\n return np.mean(x) + 4\n\n def func_2(x):\n return np.std(x) * 5\n\n if jit:\n import numba\n\n func_1 = numba.jit(func_1)\n func_2 = numba.jit(func_2)\n\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n\n roll = Series(range(10)).rolling(2, step=step)\n result = roll.apply(\n func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True\n )\n expected = roll.apply(func_1, engine="cython", raw=True)\n tm.assert_series_equal(result, expected)\n\n result = roll.apply(\n func_2, engine="numba", engine_kwargs=engine_kwargs, raw=True\n )\n expected = roll.apply(func_2, engine="cython", raw=True)\n tm.assert_series_equal(result, expected)\n # This run should use the cached func_1\n result = roll.apply(\n func_1, engine="numba", engine_kwargs=engine_kwargs, raw=True\n )\n expected = roll.apply(func_1, engine="cython", raw=True)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "window,window_kwargs",\n [\n ["rolling", {"window": 3, "min_periods": 0}],\n ["expanding", {}],\n ],\n )\n def test_dont_cache_args(\n self, window, window_kwargs, nogil, parallel, nopython, method\n ):\n # GH 42287\n\n def add(values, x):\n return np.sum(values) + x\n\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n df = DataFrame({"value": [0, 0, 0]})\n result = getattr(df, window)(method=method, **window_kwargs).apply(\n add, raw=True, engine="numba", engine_kwargs=engine_kwargs, args=(1,)\n )\n expected = DataFrame({"value": [1.0, 1.0, 1.0]})\n tm.assert_frame_equal(result, expected)\n\n result = getattr(df, window)(method=method, **window_kwargs).apply(\n add, raw=True, engine="numba", engine_kwargs=engine_kwargs, args=(2,)\n )\n expected = DataFrame({"value": [2.0, 2.0, 2.0]})\n tm.assert_frame_equal(result, expected)\n\n def test_dont_cache_engine_kwargs(self):\n # If the user passes a different set of engine_kwargs don't return the same\n # jitted function\n nogil = False\n parallel = True\n nopython = True\n\n def func(x):\n return nogil + parallel + nopython\n\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n df = DataFrame({"value": [0, 0, 0]})\n result = df.rolling(1).apply(\n func, raw=True, engine="numba", engine_kwargs=engine_kwargs\n )\n expected = DataFrame({"value": [2.0, 2.0, 2.0]})\n tm.assert_frame_equal(result, expected)\n\n parallel = False\n engine_kwargs = {"nopython": nopython, "nogil": nogil, "parallel": parallel}\n result = df.rolling(1).apply(\n func, raw=True, engine="numba", engine_kwargs=engine_kwargs\n )\n expected = DataFrame({"value": [1.0, 1.0, 1.0]})\n tm.assert_frame_equal(result, expected)\n\n\n@td.skip_if_no("numba")\nclass TestEWM:\n @pytest.mark.parametrize(\n "grouper", [lambda x: x, lambda x: x.groupby("A")], ids=["None", "groupby"]\n )\n @pytest.mark.parametrize("method", ["mean", "sum"])\n def test_invalid_engine(self, grouper, method):\n df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})\n with pytest.raises(ValueError, match="engine must be either"):\n getattr(grouper(df).ewm(com=1.0), method)(engine="foo")\n\n @pytest.mark.parametrize(\n "grouper", [lambda x: x, lambda x: x.groupby("A")], ids=["None", "groupby"]\n )\n @pytest.mark.parametrize("method", ["mean", "sum"])\n def test_invalid_engine_kwargs(self, grouper, method):\n df = DataFrame({"A": ["a", "b", "a", "b"], "B": range(4)})\n with pytest.raises(ValueError, match="cython engine does not"):\n getattr(grouper(df).ewm(com=1.0), method)(\n engine="cython", engine_kwargs={"nopython": True}\n )\n\n @pytest.mark.parametrize("grouper", ["None", "groupby"])\n @pytest.mark.parametrize("method", ["mean", "sum"])\n def test_cython_vs_numba(\n self, grouper, method, nogil, parallel, nopython, ignore_na, adjust\n ):\n df = DataFrame({"B": range(4)})\n if grouper == "None":\n grouper = lambda x: x\n else:\n df["A"] = ["a", "b", "a", "b"]\n grouper = lambda x: x.groupby("A")\n if method == "sum":\n adjust = True\n ewm = grouper(df).ewm(com=1.0, adjust=adjust, ignore_na=ignore_na)\n\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n result = getattr(ewm, method)(engine="numba", engine_kwargs=engine_kwargs)\n expected = getattr(ewm, method)(engine="cython")\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("grouper", ["None", "groupby"])\n def test_cython_vs_numba_times(self, grouper, nogil, parallel, nopython, ignore_na):\n # GH 40951\n\n df = DataFrame({"B": [0, 0, 1, 1, 2, 2]})\n if grouper == "None":\n grouper = lambda x: x\n else:\n grouper = lambda x: x.groupby("A")\n df["A"] = ["a", "b", "a", "b", "b", "a"]\n\n halflife = "23 days"\n times = to_datetime(\n [\n "2020-01-01",\n "2020-01-01",\n "2020-01-02",\n "2020-01-10",\n "2020-02-23",\n "2020-01-03",\n ]\n )\n ewm = grouper(df).ewm(\n halflife=halflife, adjust=True, ignore_na=ignore_na, times=times\n )\n\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n\n result = ewm.mean(engine="numba", engine_kwargs=engine_kwargs)\n expected = ewm.mean(engine="cython")\n\n tm.assert_frame_equal(result, expected)\n\n\n@td.skip_if_no("numba")\ndef test_use_global_config():\n def f(x):\n return np.mean(x) + 2\n\n s = Series(range(10))\n with option_context("compute.use_numba", True):\n result = s.rolling(2).apply(f, engine=None, raw=True)\n expected = s.rolling(2).apply(f, engine="numba", raw=True)\n tm.assert_series_equal(expected, result)\n\n\n@td.skip_if_no("numba")\ndef test_invalid_kwargs_nopython():\n with pytest.raises(NumbaUtilError, match="numba does not support kwargs with"):\n Series(range(1)).rolling(1).apply(\n lambda x: x, kwargs={"a": 1}, engine="numba", raw=True\n )\n\n\n@td.skip_if_no("numba")\n@pytest.mark.slow\n@pytest.mark.filterwarnings("ignore")\n# Filter warnings when parallel=True and the function can't be parallelized by Numba\nclass TestTableMethod:\n def test_table_series_valueerror(self):\n def f(x):\n return np.sum(x, axis=0) + 1\n\n with pytest.raises(\n ValueError, match="method='table' not applicable for Series objects."\n ):\n Series(range(1)).rolling(1, method="table").apply(\n f, engine="numba", raw=True\n )\n\n def test_table_method_rolling_methods(\n self,\n axis,\n nogil,\n parallel,\n nopython,\n arithmetic_numba_supported_operators,\n step,\n ):\n method, kwargs = arithmetic_numba_supported_operators\n\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n\n df = DataFrame(np.eye(3))\n roll_table = df.rolling(2, method="table", axis=axis, min_periods=0, step=step)\n if method in ("var", "std"):\n with pytest.raises(NotImplementedError, match=f"{method} not supported"):\n getattr(roll_table, method)(\n engine_kwargs=engine_kwargs, engine="numba", **kwargs\n )\n else:\n roll_single = df.rolling(\n 2, method="single", axis=axis, min_periods=0, step=step\n )\n result = getattr(roll_table, method)(\n engine_kwargs=engine_kwargs, engine="numba", **kwargs\n )\n expected = getattr(roll_single, method)(\n engine_kwargs=engine_kwargs, engine="numba", **kwargs\n )\n tm.assert_frame_equal(result, expected)\n\n def test_table_method_rolling_apply(self, axis, nogil, parallel, nopython, step):\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n\n def f(x):\n return np.sum(x, axis=0) + 1\n\n df = DataFrame(np.eye(3))\n result = df.rolling(\n 2, method="table", axis=axis, min_periods=0, step=step\n ).apply(f, raw=True, engine_kwargs=engine_kwargs, engine="numba")\n expected = df.rolling(\n 2, method="single", axis=axis, min_periods=0, step=step\n ).apply(f, raw=True, engine_kwargs=engine_kwargs, engine="numba")\n tm.assert_frame_equal(result, expected)\n\n def test_table_method_rolling_weighted_mean(self, step):\n def weighted_mean(x):\n arr = np.ones((1, x.shape[1]))\n arr[:, :2] = (x[:, :2] * x[:, 2]).sum(axis=0) / x[:, 2].sum()\n return arr\n\n df = DataFrame([[1, 2, 0.6], [2, 3, 0.4], [3, 4, 0.2], [4, 5, 0.7]])\n result = df.rolling(2, method="table", min_periods=0, step=step).apply(\n weighted_mean, raw=True, engine="numba"\n )\n expected = DataFrame(\n [\n [1.0, 2.0, 1.0],\n [1.8, 2.0, 1.0],\n [3.333333, 2.333333, 1.0],\n [1.555556, 7, 1.0],\n ]\n )[::step]\n tm.assert_frame_equal(result, expected)\n\n def test_table_method_expanding_apply(self, axis, nogil, parallel, nopython):\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n\n def f(x):\n return np.sum(x, axis=0) + 1\n\n df = DataFrame(np.eye(3))\n result = df.expanding(method="table", axis=axis).apply(\n f, raw=True, engine_kwargs=engine_kwargs, engine="numba"\n )\n expected = df.expanding(method="single", axis=axis).apply(\n f, raw=True, engine_kwargs=engine_kwargs, engine="numba"\n )\n tm.assert_frame_equal(result, expected)\n\n def test_table_method_expanding_methods(\n self, axis, nogil, parallel, nopython, arithmetic_numba_supported_operators\n ):\n method, kwargs = arithmetic_numba_supported_operators\n\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n\n df = DataFrame(np.eye(3))\n expand_table = df.expanding(method="table", axis=axis)\n if method in ("var", "std"):\n with pytest.raises(NotImplementedError, match=f"{method} not supported"):\n getattr(expand_table, method)(\n engine_kwargs=engine_kwargs, engine="numba", **kwargs\n )\n else:\n expand_single = df.expanding(method="single", axis=axis)\n result = getattr(expand_table, method)(\n engine_kwargs=engine_kwargs, engine="numba", **kwargs\n )\n expected = getattr(expand_single, method)(\n engine_kwargs=engine_kwargs, engine="numba", **kwargs\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("data", [np.eye(3), np.ones((2, 3)), np.ones((3, 2))])\n @pytest.mark.parametrize("method", ["mean", "sum"])\n def test_table_method_ewm(self, data, method, axis, nogil, parallel, nopython):\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n\n df = DataFrame(data)\n\n result = getattr(df.ewm(com=1, method="table", axis=axis), method)(\n engine_kwargs=engine_kwargs, engine="numba"\n )\n expected = getattr(df.ewm(com=1, method="single", axis=axis), method)(\n engine_kwargs=engine_kwargs, engine="numba"\n )\n tm.assert_frame_equal(result, expected)\n\n\n@td.skip_if_no("numba")\ndef test_npfunc_no_warnings():\n df = DataFrame({"col1": [1, 2, 3, 4, 5]})\n with tm.assert_produces_warning(False):\n df.col1.rolling(2).apply(np.prod, raw=True, engine="numba")\n | .venv\Lib\site-packages\pandas\tests\window\test_numba.py | test_numba.py | Python | 16,373 | 0.95 | 0.105376 | 0.02046 | node-utils | 768 | 2024-06-17T13:38:26.924290 | GPL-3.0 | true | 7996d5923d44674483a841241ed12db3 |
import numpy as np\nimport pytest\n\nfrom pandas.compat import is_platform_arm\n\nfrom pandas import (\n DataFrame,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.util.version import Version\n\npytestmark = [pytest.mark.single_cpu]\n\nnumba = pytest.importorskip("numba")\npytestmark.append(\n pytest.mark.skipif(\n Version(numba.__version__) == Version("0.61") and is_platform_arm(),\n reason=f"Segfaults on ARM platforms with numba {numba.__version__}",\n )\n)\n\n\n@pytest.mark.filterwarnings("ignore")\n# Filter warnings when parallel=True and the function can't be parallelized by Numba\nclass TestEWM:\n def test_invalid_update(self):\n df = DataFrame({"a": range(5), "b": range(5)})\n online_ewm = df.head(2).ewm(0.5).online()\n with pytest.raises(\n ValueError,\n match="Must call mean with update=None first before passing update",\n ):\n online_ewm.mean(update=df.head(1))\n\n @pytest.mark.slow\n @pytest.mark.parametrize(\n "obj", [DataFrame({"a": range(5), "b": range(5)}), Series(range(5), name="foo")]\n )\n def test_online_vs_non_online_mean(\n self, obj, nogil, parallel, nopython, adjust, ignore_na\n ):\n expected = obj.ewm(0.5, adjust=adjust, ignore_na=ignore_na).mean()\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n\n online_ewm = (\n obj.head(2)\n .ewm(0.5, adjust=adjust, ignore_na=ignore_na)\n .online(engine_kwargs=engine_kwargs)\n )\n # Test resetting once\n for _ in range(2):\n result = online_ewm.mean()\n tm.assert_equal(result, expected.head(2))\n\n result = online_ewm.mean(update=obj.tail(3))\n tm.assert_equal(result, expected.tail(3))\n\n online_ewm.reset()\n\n @pytest.mark.xfail(raises=NotImplementedError)\n @pytest.mark.parametrize(\n "obj", [DataFrame({"a": range(5), "b": range(5)}), Series(range(5), name="foo")]\n )\n def test_update_times_mean(\n self, obj, nogil, parallel, nopython, adjust, ignore_na, halflife_with_times\n ):\n times = Series(\n np.array(\n ["2020-01-01", "2020-01-05", "2020-01-07", "2020-01-17", "2020-01-21"],\n dtype="datetime64[ns]",\n )\n )\n expected = obj.ewm(\n 0.5,\n adjust=adjust,\n ignore_na=ignore_na,\n times=times,\n halflife=halflife_with_times,\n ).mean()\n\n engine_kwargs = {"nogil": nogil, "parallel": parallel, "nopython": nopython}\n online_ewm = (\n obj.head(2)\n .ewm(\n 0.5,\n adjust=adjust,\n ignore_na=ignore_na,\n times=times.head(2),\n halflife=halflife_with_times,\n )\n .online(engine_kwargs=engine_kwargs)\n )\n # Test resetting once\n for _ in range(2):\n result = online_ewm.mean()\n tm.assert_equal(result, expected.head(2))\n\n result = online_ewm.mean(update=obj.tail(3), update_times=times.tail(3))\n tm.assert_equal(result, expected.tail(3))\n\n online_ewm.reset()\n\n @pytest.mark.parametrize("method", ["aggregate", "std", "corr", "cov", "var"])\n def test_ewm_notimplementederror_raises(self, method):\n ser = Series(range(10))\n kwargs = {}\n if method == "aggregate":\n kwargs["func"] = lambda x: x\n\n with pytest.raises(NotImplementedError, match=".* is not implemented."):\n getattr(ser.ewm(1).online(), method)(**kwargs)\n | .venv\Lib\site-packages\pandas\tests\window\test_online.py | test_online.py | Python | 3,644 | 0.95 | 0.080357 | 0.03125 | react-lib | 909 | 2024-07-30T20:05:36.126737 | GPL-3.0 | true | 515a0cd800da5fb0217828bc50b68301 |
import numpy as np\nimport pytest\n\nfrom pandas.compat import IS64\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.algorithms import safe_sort\n\n\n@pytest.fixture(\n params=[\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 0]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1, 1]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", "C"]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[1.0, 0]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0.0, 1]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=["C", 1]),\n DataFrame([[2.0, 4.0], [1.0, 2.0], [5.0, 2.0], [8.0, 1.0]], columns=[1, 0.0]),\n DataFrame([[2, 4.0], [1, 2.0], [5, 2.0], [8, 1.0]], columns=[0, 1.0]),\n DataFrame([[2, 4], [1, 2], [5, 2], [8, 1.0]], columns=[1.0, "X"]),\n ]\n)\ndef pairwise_frames(request):\n """Pairwise frames test_pairwise"""\n return request.param\n\n\n@pytest.fixture\ndef pairwise_target_frame():\n """Pairwise target frame for test_pairwise"""\n return DataFrame([[2, 4], [1, 2], [5, 2], [8, 1]], columns=[0, 1])\n\n\n@pytest.fixture\ndef pairwise_other_frame():\n """Pairwise other frame for test_pairwise"""\n return DataFrame(\n [[None, 1, 1], [None, 1, 2], [None, 3, 2], [None, 8, 1]],\n columns=["Y", "Z", "X"],\n )\n\n\ndef test_rolling_cov(series):\n A = series\n B = A + np.random.default_rng(2).standard_normal(len(A))\n\n result = A.rolling(window=50, min_periods=25).cov(B)\n tm.assert_almost_equal(result.iloc[-1], np.cov(A[-50:], B[-50:])[0, 1])\n\n\ndef test_rolling_corr(series):\n A = series\n B = A + np.random.default_rng(2).standard_normal(len(A))\n\n result = A.rolling(window=50, min_periods=25).corr(B)\n tm.assert_almost_equal(result.iloc[-1], np.corrcoef(A[-50:], B[-50:])[0, 1])\n\n\ndef test_rolling_corr_bias_correction():\n # test for correct bias correction\n a = Series(\n np.arange(20, dtype=np.float64), index=date_range("2020-01-01", periods=20)\n )\n b = a.copy()\n a[:5] = np.nan\n b[:10] = np.nan\n\n result = a.rolling(window=len(a), min_periods=1).corr(b)\n tm.assert_almost_equal(result.iloc[-1], a.corr(b))\n\n\n@pytest.mark.parametrize("func", ["cov", "corr"])\ndef test_rolling_pairwise_cov_corr(func, frame):\n result = getattr(frame.rolling(window=10, min_periods=5), func)()\n result = result.loc[(slice(None), 1), 5]\n result.index = result.index.droplevel(1)\n expected = getattr(frame[1].rolling(window=10, min_periods=5), func)(frame[5])\n tm.assert_series_equal(result, expected, check_names=False)\n\n\n@pytest.mark.parametrize("method", ["corr", "cov"])\ndef test_flex_binary_frame(method, frame):\n series = frame[1]\n\n res = getattr(series.rolling(window=10), method)(frame)\n res2 = getattr(frame.rolling(window=10), method)(series)\n exp = frame.apply(lambda x: getattr(series.rolling(window=10), method)(x))\n\n tm.assert_frame_equal(res, exp)\n tm.assert_frame_equal(res2, exp)\n\n frame2 = frame.copy()\n frame2 = DataFrame(\n np.random.default_rng(2).standard_normal(frame2.shape),\n index=frame2.index,\n columns=frame2.columns,\n )\n\n res3 = getattr(frame.rolling(window=10), method)(frame2)\n exp = DataFrame(\n {k: getattr(frame[k].rolling(window=10), method)(frame2[k]) for k in frame}\n )\n tm.assert_frame_equal(res3, exp)\n\n\n@pytest.mark.parametrize("window", range(7))\ndef test_rolling_corr_with_zero_variance(window):\n # GH 18430\n s = Series(np.zeros(20))\n other = Series(np.arange(20))\n\n assert s.rolling(window=window).corr(other=other).isna().all()\n\n\ndef test_corr_sanity():\n # GH 3155\n df = DataFrame(\n np.array(\n [\n [0.87024726, 0.18505595],\n [0.64355431, 0.3091617],\n [0.92372966, 0.50552513],\n [0.00203756, 0.04520709],\n [0.84780328, 0.33394331],\n [0.78369152, 0.63919667],\n ]\n )\n )\n\n res = df[0].rolling(5, center=True).corr(df[1])\n assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)\n\n df = DataFrame(np.random.default_rng(2).random((30, 2)))\n res = df[0].rolling(5, center=True).corr(df[1])\n assert all(np.abs(np.nan_to_num(x)) <= 1 for x in res)\n\n\ndef test_rolling_cov_diff_length():\n # GH 7512\n s1 = Series([1, 2, 3], index=[0, 1, 2])\n s2 = Series([1, 3], index=[0, 2])\n result = s1.rolling(window=3, min_periods=2).cov(s2)\n expected = Series([None, None, 2.0])\n tm.assert_series_equal(result, expected)\n\n s2a = Series([1, None, 3], index=[0, 1, 2])\n result = s1.rolling(window=3, min_periods=2).cov(s2a)\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_corr_diff_length():\n # GH 7512\n s1 = Series([1, 2, 3], index=[0, 1, 2])\n s2 = Series([1, 3], index=[0, 2])\n result = s1.rolling(window=3, min_periods=2).corr(s2)\n expected = Series([None, None, 1.0])\n tm.assert_series_equal(result, expected)\n\n s2a = Series([1, None, 3], index=[0, 1, 2])\n result = s1.rolling(window=3, min_periods=2).corr(s2a)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "f",\n [\n lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),\n lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),\n ],\n)\ndef test_rolling_functions_window_non_shrinkage_binary(f):\n # corr/cov return a MI DataFrame\n df = DataFrame(\n [[1, 5], [3, 2], [3, 9], [-1, 0]],\n columns=Index(["A", "B"], name="foo"),\n index=Index(range(4), name="bar"),\n )\n df_expected = DataFrame(\n columns=Index(["A", "B"], name="foo"),\n index=MultiIndex.from_product([df.index, df.columns], names=["bar", "foo"]),\n dtype="float64",\n )\n df_result = f(df)\n tm.assert_frame_equal(df_result, df_expected)\n\n\n@pytest.mark.parametrize(\n "f",\n [\n lambda x: (x.rolling(window=10, min_periods=5).cov(x, pairwise=True)),\n lambda x: (x.rolling(window=10, min_periods=5).corr(x, pairwise=True)),\n ],\n)\ndef test_moment_functions_zero_length_pairwise(f):\n df1 = DataFrame()\n df2 = DataFrame(columns=Index(["a"], name="foo"), index=Index([], name="bar"))\n df2["a"] = df2["a"].astype("float64")\n\n df1_expected = DataFrame(index=MultiIndex.from_product([df1.index, df1.columns]))\n df2_expected = DataFrame(\n index=MultiIndex.from_product([df2.index, df2.columns], names=["bar", "foo"]),\n columns=Index(["a"], name="foo"),\n dtype="float64",\n )\n\n df1_result = f(df1)\n tm.assert_frame_equal(df1_result, df1_expected)\n\n df2_result = f(df2)\n tm.assert_frame_equal(df2_result, df2_expected)\n\n\nclass TestPairwise:\n # GH 7738\n @pytest.mark.parametrize("f", [lambda x: x.cov(), lambda x: x.corr()])\n def test_no_flex(self, pairwise_frames, pairwise_target_frame, f):\n # DataFrame methods (which do not call flex_binary_moment())\n\n result = f(pairwise_frames)\n tm.assert_index_equal(result.index, pairwise_frames.columns)\n tm.assert_index_equal(result.columns, pairwise_frames.columns)\n expected = f(pairwise_target_frame)\n # since we have sorted the results\n # we can only compare non-nans\n result = result.dropna().values\n expected = expected.dropna().values\n\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n @pytest.mark.parametrize(\n "f",\n [\n lambda x: x.expanding().cov(pairwise=True),\n lambda x: x.expanding().corr(pairwise=True),\n lambda x: x.rolling(window=3).cov(pairwise=True),\n lambda x: x.rolling(window=3).corr(pairwise=True),\n lambda x: x.ewm(com=3).cov(pairwise=True),\n lambda x: x.ewm(com=3).corr(pairwise=True),\n ],\n )\n def test_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f):\n # DataFrame with itself, pairwise=True\n # note that we may construct the 1st level of the MI\n # in a non-monotonic way, so compare accordingly\n result = f(pairwise_frames)\n tm.assert_index_equal(\n result.index.levels[0], pairwise_frames.index, check_names=False\n )\n tm.assert_index_equal(\n safe_sort(result.index.levels[1]),\n safe_sort(pairwise_frames.columns.unique()),\n )\n tm.assert_index_equal(result.columns, pairwise_frames.columns)\n expected = f(pairwise_target_frame)\n # since we have sorted the results\n # we can only compare non-nans\n result = result.dropna().values\n expected = expected.dropna().values\n\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n @pytest.mark.parametrize(\n "f",\n [\n lambda x: x.expanding().cov(pairwise=False),\n lambda x: x.expanding().corr(pairwise=False),\n lambda x: x.rolling(window=3).cov(pairwise=False),\n lambda x: x.rolling(window=3).corr(pairwise=False),\n lambda x: x.ewm(com=3).cov(pairwise=False),\n lambda x: x.ewm(com=3).corr(pairwise=False),\n ],\n )\n def test_no_pairwise_with_self(self, pairwise_frames, pairwise_target_frame, f):\n # DataFrame with itself, pairwise=False\n result = f(pairwise_frames)\n tm.assert_index_equal(result.index, pairwise_frames.index)\n tm.assert_index_equal(result.columns, pairwise_frames.columns)\n expected = f(pairwise_target_frame)\n # since we have sorted the results\n # we can only compare non-nans\n result = result.dropna().values\n expected = expected.dropna().values\n\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n @pytest.mark.parametrize(\n "f",\n [\n lambda x, y: x.expanding().cov(y, pairwise=True),\n lambda x, y: x.expanding().corr(y, pairwise=True),\n lambda x, y: x.rolling(window=3).cov(y, pairwise=True),\n # TODO: We're missing a flag somewhere in meson\n pytest.param(\n lambda x, y: x.rolling(window=3).corr(y, pairwise=True),\n marks=pytest.mark.xfail(\n not IS64, reason="Precision issues on 32 bit", strict=False\n ),\n ),\n lambda x, y: x.ewm(com=3).cov(y, pairwise=True),\n lambda x, y: x.ewm(com=3).corr(y, pairwise=True),\n ],\n )\n def test_pairwise_with_other(\n self, pairwise_frames, pairwise_target_frame, pairwise_other_frame, f\n ):\n # DataFrame with another DataFrame, pairwise=True\n result = f(pairwise_frames, pairwise_other_frame)\n tm.assert_index_equal(\n result.index.levels[0], pairwise_frames.index, check_names=False\n )\n tm.assert_index_equal(\n safe_sort(result.index.levels[1]),\n safe_sort(pairwise_other_frame.columns.unique()),\n )\n expected = f(pairwise_target_frame, pairwise_other_frame)\n # since we have sorted the results\n # we can only compare non-nans\n result = result.dropna().values\n expected = expected.dropna().values\n\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n @pytest.mark.filterwarnings("ignore:RuntimeWarning")\n @pytest.mark.parametrize(\n "f",\n [\n lambda x, y: x.expanding().cov(y, pairwise=False),\n lambda x, y: x.expanding().corr(y, pairwise=False),\n lambda x, y: x.rolling(window=3).cov(y, pairwise=False),\n lambda x, y: x.rolling(window=3).corr(y, pairwise=False),\n lambda x, y: x.ewm(com=3).cov(y, pairwise=False),\n lambda x, y: x.ewm(com=3).corr(y, pairwise=False),\n ],\n )\n def test_no_pairwise_with_other(self, pairwise_frames, pairwise_other_frame, f):\n # DataFrame with another DataFrame, pairwise=False\n result = (\n f(pairwise_frames, pairwise_other_frame)\n if pairwise_frames.columns.is_unique\n else None\n )\n if result is not None:\n # we can have int and str columns\n expected_index = pairwise_frames.index.union(pairwise_other_frame.index)\n expected_columns = pairwise_frames.columns.union(\n pairwise_other_frame.columns\n )\n tm.assert_index_equal(result.index, expected_index)\n tm.assert_index_equal(result.columns, expected_columns)\n else:\n with pytest.raises(ValueError, match="'arg1' columns are not unique"):\n f(pairwise_frames, pairwise_other_frame)\n with pytest.raises(ValueError, match="'arg2' columns are not unique"):\n f(pairwise_other_frame, pairwise_frames)\n\n @pytest.mark.parametrize(\n "f",\n [\n lambda x, y: x.expanding().cov(y),\n lambda x, y: x.expanding().corr(y),\n lambda x, y: x.rolling(window=3).cov(y),\n lambda x, y: x.rolling(window=3).corr(y),\n lambda x, y: x.ewm(com=3).cov(y),\n lambda x, y: x.ewm(com=3).corr(y),\n ],\n )\n def test_pairwise_with_series(self, pairwise_frames, pairwise_target_frame, f):\n # DataFrame with a Series\n result = f(pairwise_frames, Series([1, 1, 3, 8]))\n tm.assert_index_equal(result.index, pairwise_frames.index)\n tm.assert_index_equal(result.columns, pairwise_frames.columns)\n expected = f(pairwise_target_frame, Series([1, 1, 3, 8]))\n # since we have sorted the results\n # we can only compare non-nans\n result = result.dropna().values\n expected = expected.dropna().values\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n result = f(Series([1, 1, 3, 8]), pairwise_frames)\n tm.assert_index_equal(result.index, pairwise_frames.index)\n tm.assert_index_equal(result.columns, pairwise_frames.columns)\n expected = f(Series([1, 1, 3, 8]), pairwise_target_frame)\n # since we have sorted the results\n # we can only compare non-nans\n result = result.dropna().values\n expected = expected.dropna().values\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n def test_corr_freq_memory_error(self):\n # GH 31789\n s = Series(range(5), index=date_range("2020", periods=5))\n result = s.rolling("12h").corr(s)\n expected = Series([np.nan] * 5, index=date_range("2020", periods=5))\n tm.assert_series_equal(result, expected)\n\n def test_cov_mulittindex(self):\n # GH 34440\n\n columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])\n index = range(3)\n df = DataFrame(np.arange(24).reshape(3, 8), index=index, columns=columns)\n\n result = df.ewm(alpha=0.1).cov()\n\n index = MultiIndex.from_product([range(3), list("ab"), list("xy"), list("AB")])\n columns = MultiIndex.from_product([list("ab"), list("xy"), list("AB")])\n expected = DataFrame(\n np.vstack(\n (\n np.full((8, 8), np.nan),\n np.full((8, 8), 32.000000),\n np.full((8, 8), 63.881919),\n )\n ),\n index=index,\n columns=columns,\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_multindex_columns_pairwise_func(self):\n # GH 21157\n columns = MultiIndex.from_arrays([["M", "N"], ["P", "Q"]], names=["a", "b"])\n df = DataFrame(np.ones((5, 2)), columns=columns)\n result = df.rolling(3).corr()\n expected = DataFrame(\n np.nan,\n index=MultiIndex.from_arrays(\n [\n np.repeat(np.arange(5, dtype=np.int64), 2),\n ["M", "N"] * 5,\n ["P", "Q"] * 5,\n ],\n names=[None, "a", "b"],\n ),\n columns=columns,\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\window\test_pairwise.py | test_pairwise.py | Python | 16,141 | 0.95 | 0.07191 | 0.084211 | node-utils | 313 | 2024-07-12T15:48:04.797521 | MIT | true | ee249a5ee8ec2dee11d7c5b75715c40e |
from datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import (\n IS64,\n is_platform_arm,\n is_platform_power,\n)\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n MultiIndex,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n period_range,\n to_datetime,\n to_timedelta,\n)\nimport pandas._testing as tm\nfrom pandas.api.indexers import BaseIndexer\nfrom pandas.core.indexers.objects import VariableOffsetWindowIndexer\n\nfrom pandas.tseries.offsets import BusinessDay\n\n\ndef test_doc_string():\n df = DataFrame({"B": [0, 1, 2, np.nan, 4]})\n df\n df.rolling(2).sum()\n df.rolling(2, min_periods=1).sum()\n\n\ndef test_constructor(frame_or_series):\n # GH 12669\n\n c = frame_or_series(range(5)).rolling\n\n # valid\n c(0)\n c(window=2)\n c(window=2, min_periods=1)\n c(window=2, min_periods=1, center=True)\n c(window=2, min_periods=1, center=False)\n\n # GH 13383\n\n msg = "window must be an integer 0 or greater"\n\n with pytest.raises(ValueError, match=msg):\n c(-1)\n\n\n@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])\ndef test_invalid_constructor(frame_or_series, w):\n # not valid\n\n c = frame_or_series(range(5)).rolling\n\n msg = "|".join(\n [\n "window must be an integer",\n "passed window foo is not compatible with a datetimelike index",\n ]\n )\n with pytest.raises(ValueError, match=msg):\n c(window=w)\n\n msg = "min_periods must be an integer"\n with pytest.raises(ValueError, match=msg):\n c(window=2, min_periods=w)\n\n msg = "center must be a boolean"\n with pytest.raises(ValueError, match=msg):\n c(window=2, min_periods=1, center=w)\n\n\n@pytest.mark.parametrize(\n "window",\n [\n timedelta(days=3),\n Timedelta(days=3),\n "3D",\n VariableOffsetWindowIndexer(\n index=date_range("2015-12-25", periods=5), offset=BusinessDay(1)\n ),\n ],\n)\ndef test_freq_window_not_implemented(window):\n # GH 15354\n df = DataFrame(\n np.arange(10),\n index=date_range("2015-12-24", periods=10, freq="D"),\n )\n with pytest.raises(\n NotImplementedError, match="^step (not implemented|is not supported)"\n ):\n df.rolling(window, step=3).sum()\n\n\n@pytest.mark.parametrize("agg", ["cov", "corr"])\ndef test_step_not_implemented_for_cov_corr(agg):\n # GH 15354\n roll = DataFrame(range(2)).rolling(1, step=2)\n with pytest.raises(NotImplementedError, match="step not implemented"):\n getattr(roll, agg)()\n\n\n@pytest.mark.parametrize("window", [timedelta(days=3), Timedelta(days=3)])\ndef test_constructor_with_timedelta_window(window):\n # GH 15440\n n = 10\n df = DataFrame(\n {"value": np.arange(n)},\n index=date_range("2015-12-24", periods=n, freq="D"),\n )\n expected_data = np.append([0.0, 1.0], np.arange(3.0, 27.0, 3))\n\n result = df.rolling(window=window).sum()\n expected = DataFrame(\n {"value": expected_data},\n index=date_range("2015-12-24", periods=n, freq="D"),\n )\n tm.assert_frame_equal(result, expected)\n expected = df.rolling("3D").sum()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("window", [timedelta(days=3), Timedelta(days=3), "3D"])\ndef test_constructor_timedelta_window_and_minperiods(window, raw):\n # GH 15305\n n = 10\n df = DataFrame(\n {"value": np.arange(n)},\n index=date_range("2017-08-08", periods=n, freq="D"),\n )\n expected = DataFrame(\n {"value": np.append([np.nan, 1.0], np.arange(3.0, 27.0, 3))},\n index=date_range("2017-08-08", periods=n, freq="D"),\n )\n result_roll_sum = df.rolling(window=window, min_periods=2).sum()\n result_roll_generic = df.rolling(window=window, min_periods=2).apply(sum, raw=raw)\n tm.assert_frame_equal(result_roll_sum, expected)\n tm.assert_frame_equal(result_roll_generic, expected)\n\n\ndef test_closed_fixed(closed, arithmetic_win_operators):\n # GH 34315\n func_name = arithmetic_win_operators\n df_fixed = DataFrame({"A": [0, 1, 2, 3, 4]})\n df_time = DataFrame({"A": [0, 1, 2, 3, 4]}, index=date_range("2020", periods=5))\n\n result = getattr(\n df_fixed.rolling(2, closed=closed, min_periods=1),\n func_name,\n )()\n expected = getattr(\n df_time.rolling("2D", closed=closed, min_periods=1),\n func_name,\n )().reset_index(drop=True)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "closed, window_selections",\n [\n (\n "both",\n [\n [True, True, False, False, False],\n [True, True, True, False, False],\n [False, True, True, True, False],\n [False, False, True, True, True],\n [False, False, False, True, True],\n ],\n ),\n (\n "left",\n [\n [True, False, False, False, False],\n [True, True, False, False, False],\n [False, True, True, False, False],\n [False, False, True, True, False],\n [False, False, False, True, True],\n ],\n ),\n (\n "right",\n [\n [True, True, False, False, False],\n [False, True, True, False, False],\n [False, False, True, True, False],\n [False, False, False, True, True],\n [False, False, False, False, True],\n ],\n ),\n (\n "neither",\n [\n [True, False, False, False, False],\n [False, True, False, False, False],\n [False, False, True, False, False],\n [False, False, False, True, False],\n [False, False, False, False, True],\n ],\n ),\n ],\n)\ndef test_datetimelike_centered_selections(\n closed, window_selections, arithmetic_win_operators\n):\n # GH 34315\n func_name = arithmetic_win_operators\n df_time = DataFrame(\n {"A": [0.0, 1.0, 2.0, 3.0, 4.0]}, index=date_range("2020", periods=5)\n )\n\n expected = DataFrame(\n {"A": [getattr(df_time["A"].iloc[s], func_name)() for s in window_selections]},\n index=date_range("2020", periods=5),\n )\n\n if func_name == "sem":\n kwargs = {"ddof": 0}\n else:\n kwargs = {}\n\n result = getattr(\n df_time.rolling("2D", closed=closed, min_periods=1, center=True),\n func_name,\n )(**kwargs)\n\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n\n@pytest.mark.parametrize(\n "window,closed,expected",\n [\n ("3s", "right", [3.0, 3.0, 3.0]),\n ("3s", "both", [3.0, 3.0, 3.0]),\n ("3s", "left", [3.0, 3.0, 3.0]),\n ("3s", "neither", [3.0, 3.0, 3.0]),\n ("2s", "right", [3.0, 2.0, 2.0]),\n ("2s", "both", [3.0, 3.0, 3.0]),\n ("2s", "left", [1.0, 3.0, 3.0]),\n ("2s", "neither", [1.0, 2.0, 2.0]),\n ],\n)\ndef test_datetimelike_centered_offset_covers_all(\n window, closed, expected, frame_or_series\n):\n # GH 42753\n\n index = [\n Timestamp("20130101 09:00:01"),\n Timestamp("20130101 09:00:02"),\n Timestamp("20130101 09:00:02"),\n ]\n df = frame_or_series([1, 1, 1], index=index)\n\n result = df.rolling(window, closed=closed, center=True).sum()\n expected = frame_or_series(expected, index=index)\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "window,closed,expected",\n [\n ("2D", "right", [4, 4, 4, 4, 4, 4, 2, 2]),\n ("2D", "left", [2, 2, 4, 4, 4, 4, 4, 4]),\n ("2D", "both", [4, 4, 6, 6, 6, 6, 4, 4]),\n ("2D", "neither", [2, 2, 2, 2, 2, 2, 2, 2]),\n ],\n)\ndef test_datetimelike_nonunique_index_centering(\n window, closed, expected, frame_or_series\n):\n index = DatetimeIndex(\n [\n "2020-01-01",\n "2020-01-01",\n "2020-01-02",\n "2020-01-02",\n "2020-01-03",\n "2020-01-03",\n "2020-01-04",\n "2020-01-04",\n ]\n )\n\n df = frame_or_series([1] * 8, index=index, dtype=float)\n expected = frame_or_series(expected, index=index, dtype=float)\n\n result = df.rolling(window, center=True, closed=closed).sum()\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "closed,expected",\n [\n ("left", [np.nan, np.nan, 1, 1, 1, 10, 14, 14, 18, 21]),\n ("neither", [np.nan, np.nan, 1, 1, 1, 9, 5, 5, 13, 8]),\n ("right", [0, 1, 3, 6, 10, 14, 11, 18, 21, 17]),\n ("both", [0, 1, 3, 6, 10, 15, 20, 27, 26, 30]),\n ],\n)\ndef test_variable_window_nonunique(closed, expected, frame_or_series):\n # GH 20712\n index = DatetimeIndex(\n [\n "2011-01-01",\n "2011-01-01",\n "2011-01-02",\n "2011-01-02",\n "2011-01-02",\n "2011-01-03",\n "2011-01-04",\n "2011-01-04",\n "2011-01-05",\n "2011-01-06",\n ]\n )\n\n df = frame_or_series(range(10), index=index, dtype=float)\n expected = frame_or_series(expected, index=index, dtype=float)\n\n result = df.rolling("2D", closed=closed).sum()\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "closed,expected",\n [\n ("left", [np.nan, np.nan, 1, 1, 1, 10, 15, 15, 18, 21]),\n ("neither", [np.nan, np.nan, 1, 1, 1, 10, 15, 15, 13, 8]),\n ("right", [0, 1, 3, 6, 10, 15, 21, 28, 21, 17]),\n ("both", [0, 1, 3, 6, 10, 15, 21, 28, 26, 30]),\n ],\n)\ndef test_variable_offset_window_nonunique(closed, expected, frame_or_series):\n # GH 20712\n index = DatetimeIndex(\n [\n "2011-01-01",\n "2011-01-01",\n "2011-01-02",\n "2011-01-02",\n "2011-01-02",\n "2011-01-03",\n "2011-01-04",\n "2011-01-04",\n "2011-01-05",\n "2011-01-06",\n ]\n )\n\n df = frame_or_series(range(10), index=index, dtype=float)\n expected = frame_or_series(expected, index=index, dtype=float)\n\n offset = BusinessDay(2)\n indexer = VariableOffsetWindowIndexer(index=index, offset=offset)\n result = df.rolling(indexer, closed=closed, min_periods=1).sum()\n\n tm.assert_equal(result, expected)\n\n\ndef test_even_number_window_alignment():\n # see discussion in GH 38780\n s = Series(range(3), index=date_range(start="2020-01-01", freq="D", periods=3))\n\n # behavior of index- and datetime-based windows differs here!\n # s.rolling(window=2, min_periods=1, center=True).mean()\n\n result = s.rolling(window="2D", min_periods=1, center=True).mean()\n\n expected = Series([0.5, 1.5, 2], index=s.index)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_closed_fixed_binary_col(center, step):\n # GH 34315\n data = [0, 1, 1, 0, 0, 1, 0, 1]\n df = DataFrame(\n {"binary_col": data},\n index=date_range(start="2020-01-01", freq="min", periods=len(data)),\n )\n\n if center:\n expected_data = [2 / 3, 0.5, 0.4, 0.5, 0.428571, 0.5, 0.571429, 0.5]\n else:\n expected_data = [np.nan, 0, 0.5, 2 / 3, 0.5, 0.4, 0.5, 0.428571]\n\n expected = DataFrame(\n expected_data,\n columns=["binary_col"],\n index=date_range(start="2020-01-01", freq="min", periods=len(expected_data)),\n )[::step]\n\n rolling = df.rolling(\n window=len(df), closed="left", min_periods=1, center=center, step=step\n )\n result = rolling.mean()\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("closed", ["neither", "left"])\ndef test_closed_empty(closed, arithmetic_win_operators):\n # GH 26005\n func_name = arithmetic_win_operators\n ser = Series(data=np.arange(5), index=date_range("2000", periods=5, freq="2D"))\n roll = ser.rolling("1D", closed=closed)\n\n result = getattr(roll, func_name)()\n expected = Series([np.nan] * 5, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("func", ["min", "max"])\ndef test_closed_one_entry(func):\n # GH24718\n ser = Series(data=[2], index=date_range("2000", periods=1))\n result = getattr(ser.rolling("10D", closed="left"), func)()\n tm.assert_series_equal(result, Series([np.nan], index=ser.index))\n\n\n@pytest.mark.parametrize("func", ["min", "max"])\ndef test_closed_one_entry_groupby(func):\n # GH24718\n ser = DataFrame(\n data={"A": [1, 1, 2], "B": [3, 2, 1]},\n index=date_range("2000", periods=3),\n )\n result = getattr(\n ser.groupby("A", sort=False)["B"].rolling("10D", closed="left"), func\n )()\n exp_idx = MultiIndex.from_arrays(arrays=[[1, 1, 2], ser.index], names=("A", None))\n expected = Series(data=[np.nan, 3, np.nan], index=exp_idx, name="B")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("input_dtype", ["int", "float"])\n@pytest.mark.parametrize(\n "func,closed,expected",\n [\n ("min", "right", [0.0, 0, 0, 1, 2, 3, 4, 5, 6, 7]),\n ("min", "both", [0.0, 0, 0, 0, 1, 2, 3, 4, 5, 6]),\n ("min", "neither", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6, 7]),\n ("min", "left", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, 6]),\n ("max", "right", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n ("max", "both", [0.0, 1, 2, 3, 4, 5, 6, 7, 8, 9]),\n ("max", "neither", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),\n ("max", "left", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7, 8]),\n ],\n)\ndef test_closed_min_max_datetime(input_dtype, func, closed, expected):\n # see gh-21704\n ser = Series(\n data=np.arange(10).astype(input_dtype),\n index=date_range("2000", periods=10),\n )\n\n result = getattr(ser.rolling("3D", closed=closed), func)()\n expected = Series(expected, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_closed_uneven():\n # see gh-21704\n ser = Series(data=np.arange(10), index=date_range("2000", periods=10))\n\n # uneven\n ser = ser.drop(index=ser.index[[1, 5]])\n result = ser.rolling("3D", closed="left").min()\n expected = Series([np.nan, 0, 0, 2, 3, 4, 6, 6], index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "func,closed,expected",\n [\n ("min", "right", [np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),\n ("min", "both", [np.nan, 0, 0, 0, 1, 2, 3, 4, 5, np.nan]),\n ("min", "neither", [np.nan, np.nan, 0, 1, 2, 3, 4, 5, np.nan, np.nan]),\n ("min", "left", [np.nan, np.nan, 0, 0, 1, 2, 3, 4, 5, np.nan]),\n ("max", "right", [np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan, np.nan]),\n ("max", "both", [np.nan, 1, 2, 3, 4, 5, 6, 6, 6, np.nan]),\n ("max", "neither", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, np.nan, np.nan]),\n ("max", "left", [np.nan, np.nan, 1, 2, 3, 4, 5, 6, 6, np.nan]),\n ],\n)\ndef test_closed_min_max_minp(func, closed, expected):\n # see gh-21704\n ser = Series(data=np.arange(10), index=date_range("2000", periods=10))\n # Explicit cast to float to avoid implicit cast when setting nan\n ser = ser.astype("float")\n ser[ser.index[-3:]] = np.nan\n result = getattr(ser.rolling("3D", min_periods=2, closed=closed), func)()\n expected = Series(expected, index=ser.index)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "closed,expected",\n [\n ("right", [0, 0.5, 1, 2, 3, 4, 5, 6, 7, 8]),\n ("both", [0, 0.5, 1, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),\n ("neither", [np.nan, 0, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5]),\n ("left", [np.nan, 0, 0.5, 1, 2, 3, 4, 5, 6, 7]),\n ],\n)\ndef test_closed_median_quantile(closed, expected):\n # GH 26005\n ser = Series(data=np.arange(10), index=date_range("2000", periods=10))\n roll = ser.rolling("3D", closed=closed)\n expected = Series(expected, index=ser.index)\n\n result = roll.median()\n tm.assert_series_equal(result, expected)\n\n result = roll.quantile(0.5)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("roller", ["1s", 1])\ndef tests_empty_df_rolling(roller):\n # GH 15819 Verifies that datetime and integer rolling windows can be\n # applied to empty DataFrames\n expected = DataFrame()\n result = DataFrame().rolling(roller).sum()\n tm.assert_frame_equal(result, expected)\n\n # Verifies that datetime and integer rolling windows can be applied to\n # empty DataFrames with datetime index\n expected = DataFrame(index=DatetimeIndex([]))\n result = DataFrame(index=DatetimeIndex([])).rolling(roller).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_window_median_quantile():\n # GH 26005\n expected = Series([np.nan, np.nan, np.nan])\n roll = Series(np.arange(3)).rolling(0)\n\n result = roll.median()\n tm.assert_series_equal(result, expected)\n\n result = roll.quantile(0.1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_missing_minp_zero():\n # https://github.com/pandas-dev/pandas/pull/18921\n # minp=0\n x = Series([np.nan])\n result = x.rolling(1, min_periods=0).sum()\n expected = Series([0.0])\n tm.assert_series_equal(result, expected)\n\n # minp=1\n result = x.rolling(1, min_periods=1).sum()\n expected = Series([np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_missing_minp_zero_variable():\n # https://github.com/pandas-dev/pandas/pull/18921\n x = Series(\n [np.nan] * 4,\n index=DatetimeIndex(["2017-01-01", "2017-01-04", "2017-01-06", "2017-01-07"]),\n )\n result = x.rolling(Timedelta("2d"), min_periods=0).sum()\n expected = Series(0.0, index=x.index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_multi_index_names():\n # GH 16789, 16825\n cols = MultiIndex.from_product([["A", "B"], ["C", "D", "E"]], names=["1", "2"])\n df = DataFrame(np.ones((10, 6)), columns=cols)\n result = df.rolling(3).cov()\n\n tm.assert_index_equal(result.columns, df.columns)\n assert result.index.names == [None, "1", "2"]\n\n\ndef test_rolling_axis_sum(axis_frame):\n # see gh-23372.\n df = DataFrame(np.ones((10, 20)))\n axis = df._get_axis_number(axis_frame)\n\n if axis == 0:\n msg = "The 'axis' keyword in DataFrame.rolling"\n expected = DataFrame({i: [np.nan] * 2 + [3.0] * 8 for i in range(20)})\n else:\n # axis == 1\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n expected = DataFrame([[np.nan] * 2 + [3.0] * 18] * 10)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.rolling(3, axis=axis_frame).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_axis_count(axis_frame):\n # see gh-26055\n df = DataFrame({"x": range(3), "y": range(3)})\n\n axis = df._get_axis_number(axis_frame)\n\n if axis in [0, "index"]:\n msg = "The 'axis' keyword in DataFrame.rolling"\n expected = DataFrame({"x": [1.0, 2.0, 2.0], "y": [1.0, 2.0, 2.0]})\n else:\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n expected = DataFrame({"x": [1.0, 1.0, 1.0], "y": [2.0, 2.0, 2.0]})\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.rolling(2, axis=axis_frame, min_periods=0).count()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_readonly_array():\n # GH-27766\n arr = np.array([1, 3, np.nan, 3, 5])\n arr.setflags(write=False)\n result = Series(arr).rolling(2).mean()\n expected = Series([np.nan, 2, np.nan, np.nan, 4])\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_datetime(axis_frame, tz_naive_fixture):\n # GH-28192\n tz = tz_naive_fixture\n df = DataFrame(\n {i: [1] * 2 for i in date_range("2019-8-01", "2019-08-03", freq="D", tz=tz)}\n )\n\n if axis_frame in [0, "index"]:\n msg = "The 'axis' keyword in DataFrame.rolling"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.T.rolling("2D", axis=axis_frame).sum().T\n else:\n msg = "Support for axis=1 in DataFrame.rolling"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.rolling("2D", axis=axis_frame).sum()\n expected = DataFrame(\n {\n **{\n i: [1.0] * 2\n for i in date_range("2019-8-01", periods=1, freq="D", tz=tz)\n },\n **{\n i: [2.0] * 2\n for i in date_range("2019-8-02", "2019-8-03", freq="D", tz=tz)\n },\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("center", [True, False])\ndef test_rolling_window_as_string(center):\n # see gh-22590\n date_today = datetime.now()\n days = date_range(date_today, date_today + timedelta(365), freq="D")\n\n data = np.ones(len(days))\n df = DataFrame({"DateCol": days, "metric": data})\n\n df.set_index("DateCol", inplace=True)\n result = df.rolling(window="21D", min_periods=2, closed="left", center=center)[\n "metric"\n ].agg("max")\n\n index = days.rename("DateCol")\n index = index._with_freq(None)\n expected_data = np.ones(len(days), dtype=np.float64)\n if not center:\n expected_data[:2] = np.nan\n expected = Series(expected_data, index=index, name="metric")\n tm.assert_series_equal(result, expected)\n\n\ndef test_min_periods1():\n # GH#6795\n df = DataFrame([0, 1, 2, 1, 0], columns=["a"])\n result = df["a"].rolling(3, center=True, min_periods=1).max()\n expected = Series([1.0, 2.0, 2.0, 2.0, 1.0], name="a")\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_count_with_min_periods(frame_or_series):\n # GH 26996\n result = frame_or_series(range(5)).rolling(3, min_periods=3).count()\n expected = frame_or_series([np.nan, np.nan, 3.0, 3.0, 3.0])\n tm.assert_equal(result, expected)\n\n\ndef test_rolling_count_default_min_periods_with_null_values(frame_or_series):\n # GH 26996\n values = [1, 2, 3, np.nan, 4, 5, 6]\n expected_counts = [1.0, 2.0, 3.0, 2.0, 2.0, 2.0, 3.0]\n\n # GH 31302\n result = frame_or_series(values).rolling(3, min_periods=0).count()\n expected = frame_or_series(expected_counts)\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "df,expected,window,min_periods",\n [\n (\n DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [1, 2], "B": [4, 5]}, [0, 1]),\n ({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),\n ],\n 3,\n None,\n ),\n (\n DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [1, 2], "B": [4, 5]}, [0, 1]),\n ({"A": [2, 3], "B": [5, 6]}, [1, 2]),\n ],\n 2,\n 1,\n ),\n (\n DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [1, 2], "B": [4, 5]}, [0, 1]),\n ({"A": [2, 3], "B": [5, 6]}, [1, 2]),\n ],\n 2,\n 2,\n ),\n (\n DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [2], "B": [5]}, [1]),\n ({"A": [3], "B": [6]}, [2]),\n ],\n 1,\n 1,\n ),\n (\n DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}),\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [2], "B": [5]}, [1]),\n ({"A": [3], "B": [6]}, [2]),\n ],\n 1,\n 0,\n ),\n (DataFrame({"A": [1], "B": [4]}), [], 2, None),\n (DataFrame({"A": [1], "B": [4]}), [], 2, 1),\n (DataFrame(), [({}, [])], 2, None),\n (\n DataFrame({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}),\n [\n ({"A": [1.0], "B": [np.nan]}, [0]),\n ({"A": [1, np.nan], "B": [np.nan, 5]}, [0, 1]),\n ({"A": [1, np.nan, 3], "B": [np.nan, 5, 6]}, [0, 1, 2]),\n ],\n 3,\n 2,\n ),\n ],\n)\ndef test_iter_rolling_dataframe(df, expected, window, min_periods):\n # GH 11704\n expected = [DataFrame(values, index=index) for (values, index) in expected]\n\n for expected, actual in zip(expected, df.rolling(window, min_periods=min_periods)):\n tm.assert_frame_equal(actual, expected)\n\n\n@pytest.mark.parametrize(\n "expected,window",\n [\n (\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [1, 2], "B": [4, 5]}, [0, 1]),\n ({"A": [2, 3], "B": [5, 6]}, [1, 2]),\n ],\n "2D",\n ),\n (\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [1, 2], "B": [4, 5]}, [0, 1]),\n ({"A": [1, 2, 3], "B": [4, 5, 6]}, [0, 1, 2]),\n ],\n "3D",\n ),\n (\n [\n ({"A": [1], "B": [4]}, [0]),\n ({"A": [2], "B": [5]}, [1]),\n ({"A": [3], "B": [6]}, [2]),\n ],\n "1D",\n ),\n ],\n)\ndef test_iter_rolling_on_dataframe(expected, window):\n # GH 11704, 40373\n df = DataFrame(\n {\n "A": [1, 2, 3, 4, 5],\n "B": [4, 5, 6, 7, 8],\n "C": date_range(start="2016-01-01", periods=5, freq="D"),\n }\n )\n\n expected = [\n DataFrame(values, index=df.loc[index, "C"]) for (values, index) in expected\n ]\n for expected, actual in zip(expected, df.rolling(window, on="C")):\n tm.assert_frame_equal(actual, expected)\n\n\ndef test_iter_rolling_on_dataframe_unordered():\n # GH 43386\n df = DataFrame({"a": ["x", "y", "x"], "b": [0, 1, 2]})\n results = list(df.groupby("a").rolling(2))\n expecteds = [df.iloc[idx, [1]] for idx in [[0], [0, 2], [1]]]\n for result, expected in zip(results, expecteds):\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "ser,expected,window, min_periods",\n [\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])],\n 3,\n None,\n ),\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([1, 2, 3], [0, 1, 2])],\n 3,\n 1,\n ),\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])],\n 2,\n 1,\n ),\n (\n Series([1, 2, 3]),\n [([1], [0]), ([1, 2], [0, 1]), ([2, 3], [1, 2])],\n 2,\n 2,\n ),\n (Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 0),\n (Series([1, 2, 3]), [([1], [0]), ([2], [1]), ([3], [2])], 1, 1),\n (Series([1, 2]), [([1], [0]), ([1, 2], [0, 1])], 2, 0),\n (Series([], dtype="int64"), [], 2, 1),\n ],\n)\ndef test_iter_rolling_series(ser, expected, window, min_periods):\n # GH 11704\n expected = [Series(values, index=index) for (values, index) in expected]\n\n for expected, actual in zip(expected, ser.rolling(window, min_periods=min_periods)):\n tm.assert_series_equal(actual, expected)\n\n\n@pytest.mark.parametrize(\n "expected,expected_index,window",\n [\n (\n [[0], [1], [2], [3], [4]],\n [\n date_range("2020-01-01", periods=1, freq="D"),\n date_range("2020-01-02", periods=1, freq="D"),\n date_range("2020-01-03", periods=1, freq="D"),\n date_range("2020-01-04", periods=1, freq="D"),\n date_range("2020-01-05", periods=1, freq="D"),\n ],\n "1D",\n ),\n (\n [[0], [0, 1], [1, 2], [2, 3], [3, 4]],\n [\n date_range("2020-01-01", periods=1, freq="D"),\n date_range("2020-01-01", periods=2, freq="D"),\n date_range("2020-01-02", periods=2, freq="D"),\n date_range("2020-01-03", periods=2, freq="D"),\n date_range("2020-01-04", periods=2, freq="D"),\n ],\n "2D",\n ),\n (\n [[0], [0, 1], [0, 1, 2], [1, 2, 3], [2, 3, 4]],\n [\n date_range("2020-01-01", periods=1, freq="D"),\n date_range("2020-01-01", periods=2, freq="D"),\n date_range("2020-01-01", periods=3, freq="D"),\n date_range("2020-01-02", periods=3, freq="D"),\n date_range("2020-01-03", periods=3, freq="D"),\n ],\n "3D",\n ),\n ],\n)\ndef test_iter_rolling_datetime(expected, expected_index, window):\n # GH 11704\n ser = Series(range(5), index=date_range(start="2020-01-01", periods=5, freq="D"))\n\n expected = [\n Series(values, index=idx) for (values, idx) in zip(expected, expected_index)\n ]\n\n for expected, actual in zip(expected, ser.rolling(window)):\n tm.assert_series_equal(actual, expected)\n\n\n@pytest.mark.parametrize(\n "grouping,_index",\n [\n (\n {"level": 0},\n MultiIndex.from_tuples(\n [(0, 0), (0, 0), (1, 1), (1, 1), (1, 1)], names=[None, None]\n ),\n ),\n (\n {"by": "X"},\n MultiIndex.from_tuples(\n [(0, 0), (1, 0), (2, 1), (3, 1), (4, 1)], names=["X", None]\n ),\n ),\n ],\n)\ndef test_rolling_positional_argument(grouping, _index, raw):\n # GH 34605\n\n def scaled_sum(*args):\n if len(args) < 2:\n raise ValueError("The function needs two arguments")\n array, scale = args\n return array.sum() / scale\n\n df = DataFrame(data={"X": range(5)}, index=[0, 0, 1, 1, 1])\n\n expected = DataFrame(data={"X": [0.0, 0.5, 1.0, 1.5, 2.0]}, index=_index)\n # GH 40341\n if "by" in grouping:\n expected = expected.drop(columns="X", errors="ignore")\n result = df.groupby(**grouping).rolling(1).apply(scaled_sum, raw=raw, args=(2,))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("add", [0.0, 2.0])\ndef test_rolling_numerical_accuracy_kahan_mean(add, unit):\n # GH: 36031 implementing kahan summation\n dti = DatetimeIndex(\n [\n Timestamp("19700101 09:00:00"),\n Timestamp("19700101 09:00:03"),\n Timestamp("19700101 09:00:06"),\n ]\n ).as_unit(unit)\n df = DataFrame(\n {"A": [3002399751580331.0 + add, -0.0, -0.0]},\n index=dti,\n )\n result = (\n df.resample("1s").ffill().rolling("3s", closed="left", min_periods=3).mean()\n )\n dates = date_range("19700101 09:00:00", periods=7, freq="s", unit=unit)\n expected = DataFrame(\n {\n "A": [\n np.nan,\n np.nan,\n np.nan,\n 3002399751580330.5,\n 2001599834386887.25,\n 1000799917193443.625,\n 0.0,\n ]\n },\n index=dates,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_numerical_accuracy_kahan_sum():\n # GH: 13254\n df = DataFrame([2.186, -1.647, 0.0, 0.0, 0.0, 0.0], columns=["x"])\n result = df["x"].rolling(3).sum()\n expected = Series([np.nan, np.nan, 0.539, -1.647, 0.0, 0.0], name="x")\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_numerical_accuracy_jump():\n # GH: 32761\n index = date_range(start="2020-01-01", end="2020-01-02", freq="60s").append(\n DatetimeIndex(["2020-01-03"])\n )\n data = np.random.default_rng(2).random(len(index))\n\n df = DataFrame({"data": data}, index=index)\n result = df.rolling("60s").mean()\n tm.assert_frame_equal(result, df[["data"]])\n\n\ndef test_rolling_numerical_accuracy_small_values():\n # GH: 10319\n s = Series(\n data=[0.00012456, 0.0003, -0.0, -0.0],\n index=date_range("1999-02-03", "1999-02-06"),\n )\n result = s.rolling(1).mean()\n tm.assert_series_equal(result, s)\n\n\ndef test_rolling_numerical_too_large_numbers():\n # GH: 11645\n dates = date_range("2015-01-01", periods=10, freq="D")\n ds = Series(data=range(10), index=dates, dtype=np.float64)\n ds.iloc[2] = -9e33\n result = ds.rolling(5).mean()\n expected = Series(\n [\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n -1.8e33,\n -1.8e33,\n -1.8e33,\n 5.0,\n 6.0,\n 7.0,\n ],\n index=dates,\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n ("func", "value"),\n [("sum", 2.0), ("max", 1.0), ("min", 1.0), ("mean", 1.0), ("median", 1.0)],\n)\ndef test_rolling_mixed_dtypes_axis_1(func, value):\n # GH: 20649\n df = DataFrame(1, index=[1, 2], columns=["a", "b", "c"])\n df["c"] = 1.0\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n roll = df.rolling(window=2, min_periods=1, axis=1)\n result = getattr(roll, func)()\n expected = DataFrame(\n {"a": [1.0, 1.0], "b": [value, value], "c": [value, value]},\n index=[1, 2],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_axis_one_with_nan():\n # GH: 35596\n df = DataFrame(\n [\n [0, 1, 2, 4, np.nan, np.nan, np.nan],\n [0, 1, 2, np.nan, np.nan, np.nan, np.nan],\n [0, 2, 2, np.nan, 2, np.nan, 1],\n ]\n )\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.rolling(window=7, min_periods=1, axis="columns").sum()\n expected = DataFrame(\n [\n [0.0, 1.0, 3.0, 7.0, 7.0, 7.0, 7.0],\n [0.0, 1.0, 3.0, 3.0, 3.0, 3.0, 3.0],\n [0.0, 2.0, 4.0, 4.0, 6.0, 6.0, 7.0],\n ]\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "value",\n ["test", to_datetime("2019-12-31"), to_timedelta("1 days 06:05:01.00003")],\n)\ndef test_rolling_axis_1_non_numeric_dtypes(value):\n # GH: 20649\n df = DataFrame({"a": [1, 2]})\n df["b"] = value\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.rolling(window=2, min_periods=1, axis=1).sum()\n expected = DataFrame({"a": [1.0, 2.0]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_on_df_transposed():\n # GH: 32724\n df = DataFrame({"A": [1, None], "B": [4, 5], "C": [7, 8]})\n expected = DataFrame({"A": [1.0, np.nan], "B": [5.0, 5.0], "C": [11.0, 13.0]})\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.rolling(min_periods=1, window=2, axis=1).sum()\n tm.assert_frame_equal(result, expected)\n\n result = df.T.rolling(min_periods=1, window=2).sum().T\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n ("index", "window"),\n [\n (\n period_range(start="2020-01-01 08:00", end="2020-01-01 08:08", freq="min"),\n "2min",\n ),\n (\n period_range(\n start="2020-01-01 08:00", end="2020-01-01 12:00", freq="30min"\n ),\n "1h",\n ),\n ],\n)\n@pytest.mark.parametrize(\n ("func", "values"),\n [\n ("min", [np.nan, 0, 0, 1, 2, 3, 4, 5, 6]),\n ("max", [np.nan, 0, 1, 2, 3, 4, 5, 6, 7]),\n ("sum", [np.nan, 0, 1, 3, 5, 7, 9, 11, 13]),\n ],\n)\ndef test_rolling_period_index(index, window, func, values):\n # GH: 34225\n ds = Series([0, 1, 2, 3, 4, 5, 6, 7, 8], index=index)\n result = getattr(ds.rolling(window, closed="left"), func)()\n expected = Series(values, index=index)\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_sem(frame_or_series):\n # GH: 26476\n obj = frame_or_series([0, 1, 2])\n result = obj.rolling(2, min_periods=1).sem()\n if isinstance(result, DataFrame):\n result = Series(result[0].values)\n expected = Series([np.nan] + [0.7071067811865476] * 2)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.xfail(\n is_platform_arm() or is_platform_power(),\n reason="GH 38921",\n)\n@pytest.mark.parametrize(\n ("func", "third_value", "values"),\n [\n ("var", 1, [5e33, 0, 0.5, 0.5, 2, 0]),\n ("std", 1, [7.071068e16, 0, 0.7071068, 0.7071068, 1.414214, 0]),\n ("var", 2, [5e33, 0.5, 0, 0.5, 2, 0]),\n ("std", 2, [7.071068e16, 0.7071068, 0, 0.7071068, 1.414214, 0]),\n ],\n)\ndef test_rolling_var_numerical_issues(func, third_value, values):\n # GH: 37051\n ds = Series([99999999999999999, 1, third_value, 2, 3, 1, 1])\n result = getattr(ds.rolling(2), func)()\n expected = Series([np.nan] + values)\n tm.assert_series_equal(result, expected)\n # GH 42064\n # new `roll_var` will output 0.0 correctly\n tm.assert_series_equal(result == 0, expected == 0)\n\n\ndef test_timeoffset_as_window_parameter_for_corr(unit):\n # GH: 28266\n dti = DatetimeIndex(\n [\n Timestamp("20130101 09:00:00"),\n Timestamp("20130102 09:00:02"),\n Timestamp("20130103 09:00:03"),\n Timestamp("20130105 09:00:05"),\n Timestamp("20130106 09:00:06"),\n ]\n ).as_unit(unit)\n mi = MultiIndex.from_product([dti, ["B", "A"]])\n\n exp = DataFrame(\n {\n "B": [\n np.nan,\n np.nan,\n 0.9999999999999998,\n -1.0,\n 1.0,\n -0.3273268353539892,\n 0.9999999999999998,\n 1.0,\n 0.9999999999999998,\n 1.0,\n ],\n "A": [\n np.nan,\n np.nan,\n -1.0,\n 1.0000000000000002,\n -0.3273268353539892,\n 0.9999999999999966,\n 1.0,\n 1.0000000000000002,\n 1.0,\n 1.0000000000000002,\n ],\n },\n index=mi,\n )\n\n df = DataFrame(\n {"B": [0, 1, 2, 4, 3], "A": [7, 4, 6, 9, 3]},\n index=dti,\n )\n\n res = df.rolling(window="3d").corr()\n\n tm.assert_frame_equal(exp, res)\n\n\n@pytest.mark.parametrize("method", ["var", "sum", "mean", "skew", "kurt", "min", "max"])\ndef test_rolling_decreasing_indices(method):\n """\n Make sure that decreasing indices give the same results as increasing indices.\n\n GH 36933\n """\n df = DataFrame({"values": np.arange(-15, 10) ** 2})\n df_reverse = DataFrame({"values": df["values"][::-1]}, index=df.index[::-1])\n\n increasing = getattr(df.rolling(window=5), method)()\n decreasing = getattr(df_reverse.rolling(window=5), method)()\n\n assert np.abs(decreasing.values[::-1][:-4] - increasing.values[4:]).max() < 1e-12\n\n\n@pytest.mark.parametrize(\n "window,closed,expected",\n [\n ("2s", "right", [1.0, 3.0, 5.0, 3.0]),\n ("2s", "left", [0.0, 1.0, 3.0, 5.0]),\n ("2s", "both", [1.0, 3.0, 6.0, 5.0]),\n ("2s", "neither", [0.0, 1.0, 2.0, 3.0]),\n ("3s", "right", [1.0, 3.0, 6.0, 5.0]),\n ("3s", "left", [1.0, 3.0, 6.0, 5.0]),\n ("3s", "both", [1.0, 3.0, 6.0, 5.0]),\n ("3s", "neither", [1.0, 3.0, 6.0, 5.0]),\n ],\n)\ndef test_rolling_decreasing_indices_centered(window, closed, expected, frame_or_series):\n """\n Ensure that a symmetrical inverted index return same result as non-inverted.\n """\n # GH 43927\n\n index = date_range("2020", periods=4, freq="1s")\n df_inc = frame_or_series(range(4), index=index)\n df_dec = frame_or_series(range(4), index=index[::-1])\n\n expected_inc = frame_or_series(expected, index=index)\n expected_dec = frame_or_series(expected, index=index[::-1])\n\n result_inc = df_inc.rolling(window, closed=closed, center=True).sum()\n result_dec = df_dec.rolling(window, closed=closed, center=True).sum()\n\n tm.assert_equal(result_inc, expected_inc)\n tm.assert_equal(result_dec, expected_dec)\n\n\n@pytest.mark.parametrize(\n "window,expected",\n [\n ("1ns", [1.0, 1.0, 1.0, 1.0]),\n ("3ns", [2.0, 3.0, 3.0, 2.0]),\n ],\n)\ndef test_rolling_center_nanosecond_resolution(\n window, closed, expected, frame_or_series\n):\n index = date_range("2020", periods=4, freq="1ns")\n df = frame_or_series([1, 1, 1, 1], index=index, dtype=float)\n expected = frame_or_series(expected, index=index, dtype=float)\n result = df.rolling(window, closed=closed, center=True).sum()\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method,expected",\n [\n (\n "var",\n [\n float("nan"),\n 43.0,\n float("nan"),\n 136.333333,\n 43.5,\n 94.966667,\n 182.0,\n 318.0,\n ],\n ),\n (\n "mean",\n [float("nan"), 7.5, float("nan"), 21.5, 6.0, 9.166667, 13.0, 17.5],\n ),\n (\n "sum",\n [float("nan"), 30.0, float("nan"), 86.0, 30.0, 55.0, 91.0, 140.0],\n ),\n (\n "skew",\n [\n float("nan"),\n 0.709296,\n float("nan"),\n 0.407073,\n 0.984656,\n 0.919184,\n 0.874674,\n 0.842418,\n ],\n ),\n (\n "kurt",\n [\n float("nan"),\n -0.5916711736073559,\n float("nan"),\n -1.0028993131317954,\n -0.06103844629409494,\n -0.254143227116194,\n -0.37362637362637585,\n -0.45439658241367054,\n ],\n ),\n ],\n)\ndef test_rolling_non_monotonic(method, expected):\n """\n Make sure the (rare) branch of non-monotonic indices is covered by a test.\n\n output from 1.1.3 is assumed to be the expected output. Output of sum/mean has\n manually been verified.\n\n GH 36933.\n """\n # Based on an example found in computation.rst\n use_expanding = [True, False, True, False, True, True, True, True]\n df = DataFrame({"values": np.arange(len(use_expanding)) ** 2})\n\n class CustomIndexer(BaseIndexer):\n def get_window_bounds(self, num_values, min_periods, center, closed, step):\n start = np.empty(num_values, dtype=np.int64)\n end = np.empty(num_values, dtype=np.int64)\n for i in range(num_values):\n if self.use_expanding[i]:\n start[i] = 0\n end[i] = i + 1\n else:\n start[i] = i\n end[i] = i + self.window_size\n return start, end\n\n indexer = CustomIndexer(window_size=4, use_expanding=use_expanding)\n\n result = getattr(df.rolling(indexer), method)()\n expected = DataFrame({"values": expected})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n ("index", "window"),\n [\n ([0, 1, 2, 3, 4], 2),\n (date_range("2001-01-01", freq="D", periods=5), "2D"),\n ],\n)\ndef test_rolling_corr_timedelta_index(index, window):\n # GH: 31286\n x = Series([1, 2, 3, 4, 5], index=index)\n y = x.copy()\n x.iloc[0:2] = 0.0\n result = x.rolling(window).corr(y)\n expected = Series([np.nan, np.nan, 1, 1, 1], index=index)\n tm.assert_almost_equal(result, expected)\n\n\ndef test_groupby_rolling_nan_included():\n # GH 35542\n data = {"group": ["g1", np.nan, "g1", "g2", np.nan], "B": [0, 1, 2, 3, 4]}\n df = DataFrame(data)\n result = df.groupby("group", dropna=False).rolling(1, min_periods=1).mean()\n expected = DataFrame(\n {"B": [0.0, 2.0, 3.0, 1.0, 4.0]},\n # GH-38057 from_tuples puts the NaNs in the codes, result expects them\n # to be in the levels, at the moment\n # index=MultiIndex.from_tuples(\n # [("g1", 0), ("g1", 2), ("g2", 3), (np.nan, 1), (np.nan, 4)],\n # names=["group", None],\n # ),\n index=MultiIndex(\n [["g1", "g2", np.nan], [0, 1, 2, 3, 4]],\n [[0, 0, 1, 2, 2], [0, 2, 3, 1, 4]],\n names=["group", None],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["skew", "kurt"])\ndef test_rolling_skew_kurt_numerical_stability(method):\n # GH#6929\n ser = Series(np.random.default_rng(2).random(10))\n ser_copy = ser.copy()\n expected = getattr(ser.rolling(3), method)()\n tm.assert_series_equal(ser, ser_copy)\n ser = ser + 50000\n result = getattr(ser.rolling(3), method)()\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n ("method", "values"),\n [\n ("skew", [2.0, 0.854563, 0.0, 1.999984]),\n ("kurt", [4.0, -1.289256, -1.2, 3.999946]),\n ],\n)\ndef test_rolling_skew_kurt_large_value_range(method, values):\n # GH: 37557\n s = Series([3000000, 1, 1, 2, 3, 4, 999])\n result = getattr(s.rolling(4), method)()\n expected = Series([np.nan] * 3 + values)\n tm.assert_series_equal(result, expected)\n\n\ndef test_invalid_method():\n with pytest.raises(ValueError, match="method must be 'table' or 'single"):\n Series(range(1)).rolling(1, method="foo")\n\n\n@pytest.mark.parametrize("window", [1, "1d"])\ndef test_rolling_descending_date_order_with_offset(window, frame_or_series):\n # GH#40002\n idx = date_range(start="2020-01-01", end="2020-01-03", freq="1d")\n obj = frame_or_series(range(1, 4), index=idx)\n result = obj.rolling("1d", closed="left").sum()\n expected = frame_or_series([np.nan, 1, 2], index=idx)\n tm.assert_equal(result, expected)\n\n result = obj.iloc[::-1].rolling("1d", closed="left").sum()\n idx = date_range(start="2020-01-03", end="2020-01-01", freq="-1d")\n expected = frame_or_series([np.nan, 3, 2], index=idx)\n tm.assert_equal(result, expected)\n\n\ndef test_rolling_var_floating_artifact_precision():\n # GH 37051\n s = Series([7, 5, 5, 5])\n result = s.rolling(3).var()\n expected = Series([np.nan, np.nan, 4 / 3, 0])\n tm.assert_series_equal(result, expected, atol=1.0e-15, rtol=1.0e-15)\n # GH 42064\n # new `roll_var` will output 0.0 correctly\n tm.assert_series_equal(result == 0, expected == 0)\n\n\ndef test_rolling_std_small_values():\n # GH 37051\n s = Series(\n [\n 0.00000054,\n 0.00000053,\n 0.00000054,\n ]\n )\n result = s.rolling(2).std()\n expected = Series([np.nan, 7.071068e-9, 7.071068e-9])\n tm.assert_series_equal(result, expected, atol=1.0e-15, rtol=1.0e-15)\n\n\n@pytest.mark.parametrize(\n "start, exp_values",\n [\n (1, [0.03, 0.0155, 0.0155, 0.011, 0.01025]),\n (2, [0.001, 0.001, 0.0015, 0.00366666]),\n ],\n)\ndef test_rolling_mean_all_nan_window_floating_artifacts(start, exp_values):\n # GH#41053\n df = DataFrame(\n [\n 0.03,\n 0.03,\n 0.001,\n np.nan,\n 0.002,\n 0.008,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n 0.005,\n 0.2,\n ]\n )\n\n values = exp_values + [\n 0.00366666,\n 0.005,\n 0.005,\n 0.008,\n np.nan,\n np.nan,\n 0.005,\n 0.102500,\n ]\n expected = DataFrame(\n values,\n index=list(range(start, len(values) + start)),\n )\n result = df.iloc[start:].rolling(5, min_periods=0).mean()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_sum_all_nan_window_floating_artifacts():\n # GH#41053\n df = DataFrame([0.002, 0.008, 0.005, np.nan, np.nan, np.nan])\n result = df.rolling(3, min_periods=0).sum()\n expected = DataFrame([0.002, 0.010, 0.015, 0.013, 0.005, 0.0])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rolling_zero_window():\n # GH 22719\n s = Series(range(1))\n result = s.rolling(0).min()\n expected = Series([np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_float_dtype(float_numpy_dtype):\n # GH#42452\n df = DataFrame({"A": range(5), "B": range(10, 15)}, dtype=float_numpy_dtype)\n expected = DataFrame(\n {"A": [np.nan] * 5, "B": range(10, 20, 2)},\n dtype=float_numpy_dtype,\n )\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.rolling(2, axis=1).sum()\n tm.assert_frame_equal(result, expected, check_dtype=False)\n\n\ndef test_rolling_numeric_dtypes():\n # GH#41779\n df = DataFrame(np.arange(40).reshape(4, 10), columns=list("abcdefghij")).astype(\n {\n "a": "float16",\n "b": "float32",\n "c": "float64",\n "d": "int8",\n "e": "int16",\n "f": "int32",\n "g": "uint8",\n "h": "uint16",\n "i": "uint32",\n "j": "uint64",\n }\n )\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.rolling(window=2, min_periods=1, axis=1).min()\n expected = DataFrame(\n {\n "a": range(0, 40, 10),\n "b": range(0, 40, 10),\n "c": range(1, 40, 10),\n "d": range(2, 40, 10),\n "e": range(3, 40, 10),\n "f": range(4, 40, 10),\n "g": range(5, 40, 10),\n "h": range(6, 40, 10),\n "i": range(7, 40, 10),\n "j": range(8, 40, 10),\n },\n dtype="float64",\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("window", [1, 3, 10, 20])\n@pytest.mark.parametrize("method", ["min", "max", "average"])\n@pytest.mark.parametrize("pct", [True, False])\n@pytest.mark.parametrize("ascending", [True, False])\n@pytest.mark.parametrize("test_data", ["default", "duplicates", "nans"])\ndef test_rank(window, method, pct, ascending, test_data):\n length = 20\n if test_data == "default":\n ser = Series(data=np.random.default_rng(2).random(length))\n elif test_data == "duplicates":\n ser = Series(data=np.random.default_rng(2).choice(3, length))\n elif test_data == "nans":\n ser = Series(\n data=np.random.default_rng(2).choice(\n [1.0, 0.25, 0.75, np.nan, np.inf, -np.inf], length\n )\n )\n\n expected = ser.rolling(window).apply(\n lambda x: x.rank(method=method, pct=pct, ascending=ascending).iloc[-1]\n )\n result = ser.rolling(window).rank(method=method, pct=pct, ascending=ascending)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_quantile_np_percentile():\n # #9413: Tests that rolling window's quantile default behavior\n # is analogous to Numpy's percentile\n row = 10\n col = 5\n idx = date_range("20100101", periods=row, freq="B")\n df = DataFrame(\n np.random.default_rng(2).random(row * col).reshape((row, -1)), index=idx\n )\n\n df_quantile = df.quantile([0.25, 0.5, 0.75], axis=0)\n np_percentile = np.percentile(df, [25, 50, 75], axis=0)\n\n tm.assert_almost_equal(df_quantile.values, np.array(np_percentile))\n\n\n@pytest.mark.parametrize("quantile", [0.0, 0.1, 0.45, 0.5, 1])\n@pytest.mark.parametrize(\n "interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]\n)\n@pytest.mark.parametrize(\n "data",\n [\n [1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0],\n [8.0, 1.0, 3.0, 4.0, 5.0, 2.0, 6.0, 7.0],\n [0.0, np.nan, 0.2, np.nan, 0.4],\n [np.nan, np.nan, np.nan, np.nan],\n [np.nan, 0.1, np.nan, 0.3, 0.4, 0.5],\n [0.5],\n [np.nan, 0.7, 0.6],\n ],\n)\ndef test_rolling_quantile_interpolation_options(quantile, interpolation, data):\n # Tests that rolling window's quantile behavior is analogous to\n # Series' quantile for each interpolation option\n s = Series(data)\n\n q1 = s.quantile(quantile, interpolation)\n q2 = s.expanding(min_periods=1).quantile(quantile, interpolation).iloc[-1]\n\n if np.isnan(q1):\n assert np.isnan(q2)\n else:\n if not IS64:\n # Less precision on 32-bit\n assert np.allclose([q1], [q2], rtol=1e-07, atol=0)\n else:\n assert q1 == q2\n\n\ndef test_invalid_quantile_value():\n data = np.arange(5)\n s = Series(data)\n\n msg = "Interpolation 'invalid' is not supported"\n with pytest.raises(ValueError, match=msg):\n s.rolling(len(data), min_periods=1).quantile(0.5, interpolation="invalid")\n\n\ndef test_rolling_quantile_param():\n ser = Series([0.0, 0.1, 0.5, 0.9, 1.0])\n msg = "quantile value -0.1 not in \\[0, 1\\]"\n with pytest.raises(ValueError, match=msg):\n ser.rolling(3).quantile(-0.1)\n\n msg = "quantile value 10.0 not in \\[0, 1\\]"\n with pytest.raises(ValueError, match=msg):\n ser.rolling(3).quantile(10.0)\n\n msg = "must be real number, not str"\n with pytest.raises(TypeError, match=msg):\n ser.rolling(3).quantile("foo")\n\n\ndef test_rolling_std_1obs():\n vals = Series([1.0, 2.0, 3.0, 4.0, 5.0])\n\n result = vals.rolling(1, min_periods=1).std()\n expected = Series([np.nan] * 5)\n tm.assert_series_equal(result, expected)\n\n result = vals.rolling(1, min_periods=1).std(ddof=0)\n expected = Series([0.0] * 5)\n tm.assert_series_equal(result, expected)\n\n result = Series([np.nan, np.nan, 3, 4, 5]).rolling(3, min_periods=2).std()\n assert np.isnan(result[2])\n\n\ndef test_rolling_std_neg_sqrt():\n # unit test from Bottleneck\n\n # Test move_nanstd for neg sqrt.\n\n a = Series(\n [\n 0.0011448196318903589,\n 0.00028718669878572767,\n 0.00028718669878572767,\n 0.00028718669878572767,\n 0.00028718669878572767,\n ]\n )\n b = a.rolling(window=3).std()\n assert np.isfinite(b[2:]).all()\n\n b = a.ewm(span=3).std()\n assert np.isfinite(b[2:]).all()\n\n\ndef test_step_not_integer_raises():\n with pytest.raises(ValueError, match="step must be an integer"):\n DataFrame(range(2)).rolling(1, step="foo")\n\n\ndef test_step_not_positive_raises():\n with pytest.raises(ValueError, match="step must be >= 0"):\n DataFrame(range(2)).rolling(1, step=-1)\n\n\n@pytest.mark.parametrize(\n ["values", "window", "min_periods", "expected"],\n [\n [\n [20, 10, 10, np.inf, 1, 1, 2, 3],\n 3,\n 1,\n [np.nan, 50, 100 / 3, 0, 40.5, 0, 1 / 3, 1],\n ],\n [\n [20, 10, 10, np.nan, 10, 1, 2, 3],\n 3,\n 1,\n [np.nan, 50, 100 / 3, 0, 0, 40.5, 73 / 3, 1],\n ],\n [\n [np.nan, 5, 6, 7, 5, 5, 5],\n 3,\n 3,\n [np.nan] * 3 + [1, 1, 4 / 3, 0],\n ],\n [\n [5, 7, 7, 7, np.nan, np.inf, 4, 3, 3, 3],\n 3,\n 3,\n [np.nan] * 2 + [4 / 3, 0] + [np.nan] * 4 + [1 / 3, 0],\n ],\n [\n [5, 7, 7, 7, np.nan, np.inf, 7, 3, 3, 3],\n 3,\n 3,\n [np.nan] * 2 + [4 / 3, 0] + [np.nan] * 4 + [16 / 3, 0],\n ],\n [\n [5, 7] * 4,\n 3,\n 3,\n [np.nan] * 2 + [4 / 3] * 6,\n ],\n [\n [5, 7, 5, np.nan, 7, 5, 7],\n 3,\n 2,\n [np.nan, 2, 4 / 3] + [2] * 3 + [4 / 3],\n ],\n ],\n)\ndef test_rolling_var_same_value_count_logic(values, window, min_periods, expected):\n # GH 42064.\n\n expected = Series(expected)\n sr = Series(values)\n\n # With new algo implemented, result will be set to .0 in rolling var\n # if sufficient amount of consecutively same values are found.\n result_var = sr.rolling(window, min_periods=min_periods).var()\n\n # use `assert_series_equal` twice to check for equality,\n # because `check_exact=True` will fail in 32-bit tests due to\n # precision loss.\n\n # 1. result should be close to correct value\n # non-zero values can still differ slightly from "truth"\n # as the result of online algorithm\n tm.assert_series_equal(result_var, expected)\n # 2. zeros should be exactly the same since the new algo takes effect here\n tm.assert_series_equal(expected == 0, result_var == 0)\n\n # std should also pass as it's just a sqrt of var\n result_std = sr.rolling(window, min_periods=min_periods).std()\n tm.assert_series_equal(result_std, np.sqrt(expected))\n tm.assert_series_equal(expected == 0, result_std == 0)\n\n\ndef test_rolling_mean_sum_floating_artifacts():\n # GH 42064.\n\n sr = Series([1 / 3, 4, 0, 0, 0, 0, 0])\n r = sr.rolling(3)\n result = r.mean()\n assert (result[-3:] == 0).all()\n result = r.sum()\n assert (result[-3:] == 0).all()\n\n\ndef test_rolling_skew_kurt_floating_artifacts():\n # GH 42064 46431\n\n sr = Series([1 / 3, 4, 0, 0, 0, 0, 0])\n r = sr.rolling(4)\n result = r.skew()\n assert (result[-2:] == 0).all()\n result = r.kurt()\n assert (result[-2:] == -3).all()\n\n\ndef test_numeric_only_frame(arithmetic_win_operators, numeric_only):\n # GH#46560\n kernel = arithmetic_win_operators\n df = DataFrame({"a": [1], "b": 2, "c": 3})\n df["c"] = df["c"].astype(object)\n rolling = df.rolling(2, min_periods=1)\n op = getattr(rolling, kernel)\n result = op(numeric_only=numeric_only)\n\n columns = ["a", "b"] if numeric_only else ["a", "b", "c"]\n expected = df[columns].agg([kernel]).reset_index(drop=True).astype(float)\n assert list(expected.columns) == columns\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("kernel", ["corr", "cov"])\n@pytest.mark.parametrize("use_arg", [True, False])\ndef test_numeric_only_corr_cov_frame(kernel, numeric_only, use_arg):\n # GH#46560\n df = DataFrame({"a": [1, 2, 3], "b": 2, "c": 3})\n df["c"] = df["c"].astype(object)\n arg = (df,) if use_arg else ()\n rolling = df.rolling(2, min_periods=1)\n op = getattr(rolling, kernel)\n result = op(*arg, numeric_only=numeric_only)\n\n # Compare result to op using float dtypes, dropping c when numeric_only is True\n columns = ["a", "b"] if numeric_only else ["a", "b", "c"]\n df2 = df[columns].astype(float)\n arg2 = (df2,) if use_arg else ()\n rolling2 = df2.rolling(2, min_periods=1)\n op2 = getattr(rolling2, kernel)\n expected = op2(*arg2, numeric_only=numeric_only)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", [int, object])\ndef test_numeric_only_series(arithmetic_win_operators, numeric_only, dtype):\n # GH#46560\n kernel = arithmetic_win_operators\n ser = Series([1], dtype=dtype)\n rolling = ser.rolling(2, min_periods=1)\n op = getattr(rolling, kernel)\n if numeric_only and dtype is object:\n msg = f"Rolling.{kernel} does not implement numeric_only"\n with pytest.raises(NotImplementedError, match=msg):\n op(numeric_only=numeric_only)\n else:\n result = op(numeric_only=numeric_only)\n expected = ser.agg([kernel]).reset_index(drop=True).astype(float)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("kernel", ["corr", "cov"])\n@pytest.mark.parametrize("use_arg", [True, False])\n@pytest.mark.parametrize("dtype", [int, object])\ndef test_numeric_only_corr_cov_series(kernel, use_arg, numeric_only, dtype):\n # GH#46560\n ser = Series([1, 2, 3], dtype=dtype)\n arg = (ser,) if use_arg else ()\n rolling = ser.rolling(2, min_periods=1)\n op = getattr(rolling, kernel)\n if numeric_only and dtype is object:\n msg = f"Rolling.{kernel} does not implement numeric_only"\n with pytest.raises(NotImplementedError, match=msg):\n op(*arg, numeric_only=numeric_only)\n else:\n result = op(*arg, numeric_only=numeric_only)\n\n ser2 = ser.astype(float)\n arg2 = (ser2,) if use_arg else ()\n rolling2 = ser2.rolling(2, min_periods=1)\n op2 = getattr(rolling2, kernel)\n expected = op2(*arg2, numeric_only=numeric_only)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])\n@pytest.mark.parametrize("tz", [None, "UTC", "Europe/Prague"])\ndef test_rolling_timedelta_window_non_nanoseconds(unit, tz):\n # Test Sum, GH#55106\n df_time = DataFrame(\n {"A": range(5)}, index=date_range("2013-01-01", freq="1s", periods=5, tz=tz)\n )\n sum_in_nanosecs = df_time.rolling("1s").sum()\n # microseconds / milliseconds should not break the correct rolling\n df_time.index = df_time.index.as_unit(unit)\n sum_in_microsecs = df_time.rolling("1s").sum()\n sum_in_microsecs.index = sum_in_microsecs.index.as_unit("ns")\n tm.assert_frame_equal(sum_in_nanosecs, sum_in_microsecs)\n\n # Test max, GH#55026\n ref_dates = date_range("2023-01-01", "2023-01-10", unit="ns", tz=tz)\n ref_series = Series(0, index=ref_dates)\n ref_series.iloc[0] = 1\n ref_max_series = ref_series.rolling(Timedelta(days=4)).max()\n\n dates = date_range("2023-01-01", "2023-01-10", unit=unit, tz=tz)\n series = Series(0, index=dates)\n series.iloc[0] = 1\n max_series = series.rolling(Timedelta(days=4)).max()\n\n ref_df = DataFrame(ref_max_series)\n df = DataFrame(max_series)\n df.index = df.index.as_unit("ns")\n\n tm.assert_frame_equal(ref_df, df)\n | .venv\Lib\site-packages\pandas\tests\window\test_rolling.py | test_rolling.py | Python | 61,158 | 0.75 | 0.071753 | 0.071174 | vue-tools | 877 | 2024-04-18T02:16:01.405423 | BSD-3-Clause | true | 23ce8380d3b1c43f37f487357fa03676 |
from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Series,\n concat,\n isna,\n notna,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries import offsets\n\n\n@pytest.mark.parametrize(\n "compare_func, roll_func, kwargs",\n [\n [np.mean, "mean", {}],\n [np.nansum, "sum", {}],\n [\n lambda x: np.isfinite(x).astype(float).sum(),\n "count",\n {},\n ],\n [np.median, "median", {}],\n [np.min, "min", {}],\n [np.max, "max", {}],\n [lambda x: np.std(x, ddof=1), "std", {}],\n [lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],\n [lambda x: np.var(x, ddof=1), "var", {}],\n [lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],\n ],\n)\ndef test_series(series, compare_func, roll_func, kwargs, step):\n result = getattr(series.rolling(50, step=step), roll_func)(**kwargs)\n assert isinstance(result, Series)\n end = range(0, len(series), step or 1)[-1] + 1\n tm.assert_almost_equal(result.iloc[-1], compare_func(series[end - 50 : end]))\n\n\n@pytest.mark.parametrize(\n "compare_func, roll_func, kwargs",\n [\n [np.mean, "mean", {}],\n [np.nansum, "sum", {}],\n [\n lambda x: np.isfinite(x).astype(float).sum(),\n "count",\n {},\n ],\n [np.median, "median", {}],\n [np.min, "min", {}],\n [np.max, "max", {}],\n [lambda x: np.std(x, ddof=1), "std", {}],\n [lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],\n [lambda x: np.var(x, ddof=1), "var", {}],\n [lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],\n ],\n)\ndef test_frame(raw, frame, compare_func, roll_func, kwargs, step):\n result = getattr(frame.rolling(50, step=step), roll_func)(**kwargs)\n assert isinstance(result, DataFrame)\n end = range(0, len(frame), step or 1)[-1] + 1\n tm.assert_series_equal(\n result.iloc[-1, :],\n frame.iloc[end - 50 : end, :].apply(compare_func, axis=0, raw=raw),\n check_names=False,\n )\n\n\n@pytest.mark.parametrize(\n "compare_func, roll_func, kwargs, minp",\n [\n [np.mean, "mean", {}, 10],\n [np.nansum, "sum", {}, 10],\n [lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],\n [np.median, "median", {}, 10],\n [np.min, "min", {}, 10],\n [np.max, "max", {}, 10],\n [lambda x: np.std(x, ddof=1), "std", {}, 10],\n [lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],\n [lambda x: np.var(x, ddof=1), "var", {}, 10],\n [lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],\n ],\n)\ndef test_time_rule_series(series, compare_func, roll_func, kwargs, minp):\n win = 25\n ser = series[::2].resample("B").mean()\n series_result = getattr(ser.rolling(window=win, min_periods=minp), roll_func)(\n **kwargs\n )\n last_date = series_result.index[-1]\n prev_date = last_date - 24 * offsets.BDay()\n\n trunc_series = series[::2].truncate(prev_date, last_date)\n tm.assert_almost_equal(series_result.iloc[-1], compare_func(trunc_series))\n\n\n@pytest.mark.parametrize(\n "compare_func, roll_func, kwargs, minp",\n [\n [np.mean, "mean", {}, 10],\n [np.nansum, "sum", {}, 10],\n [lambda x: np.isfinite(x).astype(float).sum(), "count", {}, 0],\n [np.median, "median", {}, 10],\n [np.min, "min", {}, 10],\n [np.max, "max", {}, 10],\n [lambda x: np.std(x, ddof=1), "std", {}, 10],\n [lambda x: np.std(x, ddof=0), "std", {"ddof": 0}, 10],\n [lambda x: np.var(x, ddof=1), "var", {}, 10],\n [lambda x: np.var(x, ddof=0), "var", {"ddof": 0}, 10],\n ],\n)\ndef test_time_rule_frame(raw, frame, compare_func, roll_func, kwargs, minp):\n win = 25\n frm = frame[::2].resample("B").mean()\n frame_result = getattr(frm.rolling(window=win, min_periods=minp), roll_func)(\n **kwargs\n )\n last_date = frame_result.index[-1]\n prev_date = last_date - 24 * offsets.BDay()\n\n trunc_frame = frame[::2].truncate(prev_date, last_date)\n tm.assert_series_equal(\n frame_result.xs(last_date),\n trunc_frame.apply(compare_func, raw=raw),\n check_names=False,\n )\n\n\n@pytest.mark.parametrize(\n "compare_func, roll_func, kwargs",\n [\n [np.mean, "mean", {}],\n [np.nansum, "sum", {}],\n [np.median, "median", {}],\n [np.min, "min", {}],\n [np.max, "max", {}],\n [lambda x: np.std(x, ddof=1), "std", {}],\n [lambda x: np.std(x, ddof=0), "std", {"ddof": 0}],\n [lambda x: np.var(x, ddof=1), "var", {}],\n [lambda x: np.var(x, ddof=0), "var", {"ddof": 0}],\n ],\n)\ndef test_nans(compare_func, roll_func, kwargs):\n obj = Series(np.random.default_rng(2).standard_normal(50))\n obj[:10] = np.nan\n obj[-10:] = np.nan\n\n result = getattr(obj.rolling(50, min_periods=30), roll_func)(**kwargs)\n tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))\n\n # min_periods is working correctly\n result = getattr(obj.rolling(20, min_periods=15), roll_func)(**kwargs)\n assert isna(result.iloc[23])\n assert not isna(result.iloc[24])\n\n assert not isna(result.iloc[-6])\n assert isna(result.iloc[-5])\n\n obj2 = Series(np.random.default_rng(2).standard_normal(20))\n result = getattr(obj2.rolling(10, min_periods=5), roll_func)(**kwargs)\n assert isna(result.iloc[3])\n assert notna(result.iloc[4])\n\n if roll_func != "sum":\n result0 = getattr(obj.rolling(20, min_periods=0), roll_func)(**kwargs)\n result1 = getattr(obj.rolling(20, min_periods=1), roll_func)(**kwargs)\n tm.assert_almost_equal(result0, result1)\n\n\ndef test_nans_count():\n obj = Series(np.random.default_rng(2).standard_normal(50))\n obj[:10] = np.nan\n obj[-10:] = np.nan\n result = obj.rolling(50, min_periods=30).count()\n tm.assert_almost_equal(\n result.iloc[-1], np.isfinite(obj[10:-10]).astype(float).sum()\n )\n\n\n@pytest.mark.parametrize(\n "roll_func, kwargs",\n [\n ["mean", {}],\n ["sum", {}],\n ["median", {}],\n ["min", {}],\n ["max", {}],\n ["std", {}],\n ["std", {"ddof": 0}],\n ["var", {}],\n ["var", {"ddof": 0}],\n ],\n)\n@pytest.mark.parametrize("minp", [0, 99, 100])\ndef test_min_periods(series, minp, roll_func, kwargs, step):\n result = getattr(\n series.rolling(len(series) + 1, min_periods=minp, step=step), roll_func\n )(**kwargs)\n expected = getattr(\n series.rolling(len(series), min_periods=minp, step=step), roll_func\n )(**kwargs)\n nan_mask = isna(result)\n tm.assert_series_equal(nan_mask, isna(expected))\n\n nan_mask = ~nan_mask\n tm.assert_almost_equal(result[nan_mask], expected[nan_mask])\n\n\ndef test_min_periods_count(series, step):\n result = series.rolling(len(series) + 1, min_periods=0, step=step).count()\n expected = series.rolling(len(series), min_periods=0, step=step).count()\n nan_mask = isna(result)\n tm.assert_series_equal(nan_mask, isna(expected))\n\n nan_mask = ~nan_mask\n tm.assert_almost_equal(result[nan_mask], expected[nan_mask])\n\n\n@pytest.mark.parametrize(\n "roll_func, kwargs, minp",\n [\n ["mean", {}, 15],\n ["sum", {}, 15],\n ["count", {}, 0],\n ["median", {}, 15],\n ["min", {}, 15],\n ["max", {}, 15],\n ["std", {}, 15],\n ["std", {"ddof": 0}, 15],\n ["var", {}, 15],\n ["var", {"ddof": 0}, 15],\n ],\n)\ndef test_center(roll_func, kwargs, minp):\n obj = Series(np.random.default_rng(2).standard_normal(50))\n obj[:10] = np.nan\n obj[-10:] = np.nan\n\n result = getattr(obj.rolling(20, min_periods=minp, center=True), roll_func)(\n **kwargs\n )\n expected = (\n getattr(\n concat([obj, Series([np.nan] * 9)]).rolling(20, min_periods=minp), roll_func\n )(**kwargs)\n .iloc[9:]\n .reset_index(drop=True)\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "roll_func, kwargs, minp, fill_value",\n [\n ["mean", {}, 10, None],\n ["sum", {}, 10, None],\n ["count", {}, 0, 0],\n ["median", {}, 10, None],\n ["min", {}, 10, None],\n ["max", {}, 10, None],\n ["std", {}, 10, None],\n ["std", {"ddof": 0}, 10, None],\n ["var", {}, 10, None],\n ["var", {"ddof": 0}, 10, None],\n ],\n)\ndef test_center_reindex_series(series, roll_func, kwargs, minp, fill_value):\n # shifter index\n s = [f"x{x:d}" for x in range(12)]\n\n series_xp = (\n getattr(\n series.reindex(list(series.index) + s).rolling(window=25, min_periods=minp),\n roll_func,\n )(**kwargs)\n .shift(-12)\n .reindex(series.index)\n )\n series_rs = getattr(\n series.rolling(window=25, min_periods=minp, center=True), roll_func\n )(**kwargs)\n if fill_value is not None:\n series_xp = series_xp.fillna(fill_value)\n tm.assert_series_equal(series_xp, series_rs)\n\n\n@pytest.mark.parametrize(\n "roll_func, kwargs, minp, fill_value",\n [\n ["mean", {}, 10, None],\n ["sum", {}, 10, None],\n ["count", {}, 0, 0],\n ["median", {}, 10, None],\n ["min", {}, 10, None],\n ["max", {}, 10, None],\n ["std", {}, 10, None],\n ["std", {"ddof": 0}, 10, None],\n ["var", {}, 10, None],\n ["var", {"ddof": 0}, 10, None],\n ],\n)\ndef test_center_reindex_frame(frame, roll_func, kwargs, minp, fill_value):\n # shifter index\n s = [f"x{x:d}" for x in range(12)]\n\n frame_xp = (\n getattr(\n frame.reindex(list(frame.index) + s).rolling(window=25, min_periods=minp),\n roll_func,\n )(**kwargs)\n .shift(-12)\n .reindex(frame.index)\n )\n frame_rs = getattr(\n frame.rolling(window=25, min_periods=minp, center=True), roll_func\n )(**kwargs)\n if fill_value is not None:\n frame_xp = frame_xp.fillna(fill_value)\n tm.assert_frame_equal(frame_xp, frame_rs)\n\n\n@pytest.mark.parametrize(\n "f",\n [\n lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),\n lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),\n lambda x: x.rolling(window=10, min_periods=5).max(),\n lambda x: x.rolling(window=10, min_periods=5).min(),\n lambda x: x.rolling(window=10, min_periods=5).sum(),\n lambda x: x.rolling(window=10, min_periods=5).mean(),\n lambda x: x.rolling(window=10, min_periods=5).std(),\n lambda x: x.rolling(window=10, min_periods=5).var(),\n lambda x: x.rolling(window=10, min_periods=5).skew(),\n lambda x: x.rolling(window=10, min_periods=5).kurt(),\n lambda x: x.rolling(window=10, min_periods=5).quantile(q=0.5),\n lambda x: x.rolling(window=10, min_periods=5).median(),\n lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),\n lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),\n pytest.param(\n lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),\n marks=td.skip_if_no("scipy"),\n ),\n ],\n)\ndef test_rolling_functions_window_non_shrinkage(f):\n # GH 7764\n s = Series(range(4))\n s_expected = Series(np.nan, index=s.index)\n df = DataFrame([[1, 5], [3, 2], [3, 9], [-1, 0]], columns=["A", "B"])\n df_expected = DataFrame(np.nan, index=df.index, columns=df.columns)\n\n s_result = f(s)\n tm.assert_series_equal(s_result, s_expected)\n\n df_result = f(df)\n tm.assert_frame_equal(df_result, df_expected)\n\n\ndef test_rolling_max_gh6297(step):\n """Replicate result expected in GH #6297"""\n indices = [datetime(1975, 1, i) for i in range(1, 6)]\n # So that we can have 2 datapoints on one of the days\n indices.append(datetime(1975, 1, 3, 6, 0))\n series = Series(range(1, 7), index=indices)\n # Use floats instead of ints as values\n series = series.map(lambda x: float(x))\n # Sort chronologically\n series = series.sort_index()\n\n expected = Series(\n [1.0, 2.0, 6.0, 4.0, 5.0],\n index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),\n )[::step]\n x = series.resample("D").max().rolling(window=1, step=step).max()\n tm.assert_series_equal(expected, x)\n\n\ndef test_rolling_max_resample(step):\n indices = [datetime(1975, 1, i) for i in range(1, 6)]\n # So that we can have 3 datapoints on last day (4, 10, and 20)\n indices.append(datetime(1975, 1, 5, 1))\n indices.append(datetime(1975, 1, 5, 2))\n series = Series(list(range(5)) + [10, 20], index=indices)\n # Use floats instead of ints as values\n series = series.map(lambda x: float(x))\n # Sort chronologically\n series = series.sort_index()\n\n # Default how should be max\n expected = Series(\n [0.0, 1.0, 2.0, 3.0, 20.0],\n index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),\n )[::step]\n x = series.resample("D").max().rolling(window=1, step=step).max()\n tm.assert_series_equal(expected, x)\n\n # Now specify median (10.0)\n expected = Series(\n [0.0, 1.0, 2.0, 3.0, 10.0],\n index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),\n )[::step]\n x = series.resample("D").median().rolling(window=1, step=step).max()\n tm.assert_series_equal(expected, x)\n\n # Now specify mean (4+10+20)/3\n v = (4.0 + 10.0 + 20.0) / 3.0\n expected = Series(\n [0.0, 1.0, 2.0, 3.0, v],\n index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),\n )[::step]\n x = series.resample("D").mean().rolling(window=1, step=step).max()\n tm.assert_series_equal(expected, x)\n\n\ndef test_rolling_min_resample(step):\n indices = [datetime(1975, 1, i) for i in range(1, 6)]\n # So that we can have 3 datapoints on last day (4, 10, and 20)\n indices.append(datetime(1975, 1, 5, 1))\n indices.append(datetime(1975, 1, 5, 2))\n series = Series(list(range(5)) + [10, 20], index=indices)\n # Use floats instead of ints as values\n series = series.map(lambda x: float(x))\n # Sort chronologically\n series = series.sort_index()\n\n # Default how should be min\n expected = Series(\n [0.0, 1.0, 2.0, 3.0, 4.0],\n index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),\n )[::step]\n r = series.resample("D").min().rolling(window=1, step=step)\n tm.assert_series_equal(expected, r.min())\n\n\ndef test_rolling_median_resample():\n indices = [datetime(1975, 1, i) for i in range(1, 6)]\n # So that we can have 3 datapoints on last day (4, 10, and 20)\n indices.append(datetime(1975, 1, 5, 1))\n indices.append(datetime(1975, 1, 5, 2))\n series = Series(list(range(5)) + [10, 20], index=indices)\n # Use floats instead of ints as values\n series = series.map(lambda x: float(x))\n # Sort chronologically\n series = series.sort_index()\n\n # Default how should be median\n expected = Series(\n [0.0, 1.0, 2.0, 3.0, 10],\n index=DatetimeIndex([datetime(1975, 1, i, 0) for i in range(1, 6)], freq="D"),\n )\n x = series.resample("D").median().rolling(window=1).median()\n tm.assert_series_equal(expected, x)\n\n\ndef test_rolling_median_memory_error():\n # GH11722\n n = 20000\n Series(np.random.default_rng(2).standard_normal(n)).rolling(\n window=2, center=False\n ).median()\n Series(np.random.default_rng(2).standard_normal(n)).rolling(\n window=2, center=False\n ).median()\n\n\n@pytest.mark.parametrize(\n "data_type",\n [np.dtype(f"f{width}") for width in [4, 8]]\n + [np.dtype(f"{sign}{width}") for width in [1, 2, 4, 8] for sign in "ui"],\n)\ndef test_rolling_min_max_numeric_types(data_type):\n # GH12373\n\n # Just testing that these don't throw exceptions and that\n # the return type is float64. Other tests will cover quantitative\n # correctness\n result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).max()\n assert result.dtypes[0] == np.dtype("f8")\n result = DataFrame(np.arange(20, dtype=data_type)).rolling(window=5).min()\n assert result.dtypes[0] == np.dtype("f8")\n\n\n@pytest.mark.parametrize(\n "f",\n [\n lambda x: x.rolling(window=10, min_periods=0).count(),\n lambda x: x.rolling(window=10, min_periods=5).cov(x, pairwise=False),\n lambda x: x.rolling(window=10, min_periods=5).corr(x, pairwise=False),\n lambda x: x.rolling(window=10, min_periods=5).max(),\n lambda x: x.rolling(window=10, min_periods=5).min(),\n lambda x: x.rolling(window=10, min_periods=5).sum(),\n lambda x: x.rolling(window=10, min_periods=5).mean(),\n lambda x: x.rolling(window=10, min_periods=5).std(),\n lambda x: x.rolling(window=10, min_periods=5).var(),\n lambda x: x.rolling(window=10, min_periods=5).skew(),\n lambda x: x.rolling(window=10, min_periods=5).kurt(),\n lambda x: x.rolling(window=10, min_periods=5).quantile(0.5),\n lambda x: x.rolling(window=10, min_periods=5).median(),\n lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=False),\n lambda x: x.rolling(window=10, min_periods=5).apply(sum, raw=True),\n pytest.param(\n lambda x: x.rolling(win_type="boxcar", window=10, min_periods=5).mean(),\n marks=td.skip_if_no("scipy"),\n ),\n ],\n)\ndef test_moment_functions_zero_length(f):\n # GH 8056\n s = Series(dtype=np.float64)\n s_expected = s\n df1 = DataFrame()\n df1_expected = df1\n df2 = DataFrame(columns=["a"])\n df2["a"] = df2["a"].astype("float64")\n df2_expected = df2\n\n s_result = f(s)\n tm.assert_series_equal(s_result, s_expected)\n\n df1_result = f(df1)\n tm.assert_frame_equal(df1_result, df1_expected)\n\n df2_result = f(df2)\n tm.assert_frame_equal(df2_result, df2_expected)\n | .venv\Lib\site-packages\pandas\tests\window\test_rolling_functions.py | test_rolling_functions.py | Python | 17,877 | 0.95 | 0.069549 | 0.064378 | node-utils | 798 | 2025-01-07T00:50:16.283354 | GPL-3.0 | true | 7552f4f4564b88d6d48e8eba5a2f022b |
from functools import partial\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Series,\n concat,\n isna,\n notna,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries import offsets\n\n\ndef scoreatpercentile(a, per):\n values = np.sort(a, axis=0)\n\n idx = int(per / 1.0 * (values.shape[0] - 1))\n\n if idx == values.shape[0] - 1:\n retval = values[-1]\n\n else:\n qlow = idx / (values.shape[0] - 1)\n qhig = (idx + 1) / (values.shape[0] - 1)\n vlow = values[idx]\n vhig = values[idx + 1]\n retval = vlow + (vhig - vlow) * (per - qlow) / (qhig - qlow)\n\n return retval\n\n\n@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])\ndef test_series(series, q, step):\n compare_func = partial(scoreatpercentile, per=q)\n result = series.rolling(50, step=step).quantile(q)\n assert isinstance(result, Series)\n end = range(0, len(series), step or 1)[-1] + 1\n tm.assert_almost_equal(result.iloc[-1], compare_func(series[end - 50 : end]))\n\n\n@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])\ndef test_frame(raw, frame, q, step):\n compare_func = partial(scoreatpercentile, per=q)\n result = frame.rolling(50, step=step).quantile(q)\n assert isinstance(result, DataFrame)\n end = range(0, len(frame), step or 1)[-1] + 1\n tm.assert_series_equal(\n result.iloc[-1, :],\n frame.iloc[end - 50 : end, :].apply(compare_func, axis=0, raw=raw),\n check_names=False,\n )\n\n\n@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])\ndef test_time_rule_series(series, q):\n compare_func = partial(scoreatpercentile, per=q)\n win = 25\n ser = series[::2].resample("B").mean()\n series_result = ser.rolling(window=win, min_periods=10).quantile(q)\n last_date = series_result.index[-1]\n prev_date = last_date - 24 * offsets.BDay()\n\n trunc_series = series[::2].truncate(prev_date, last_date)\n tm.assert_almost_equal(series_result.iloc[-1], compare_func(trunc_series))\n\n\n@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])\ndef test_time_rule_frame(raw, frame, q):\n compare_func = partial(scoreatpercentile, per=q)\n win = 25\n frm = frame[::2].resample("B").mean()\n frame_result = frm.rolling(window=win, min_periods=10).quantile(q)\n last_date = frame_result.index[-1]\n prev_date = last_date - 24 * offsets.BDay()\n\n trunc_frame = frame[::2].truncate(prev_date, last_date)\n tm.assert_series_equal(\n frame_result.xs(last_date),\n trunc_frame.apply(compare_func, raw=raw),\n check_names=False,\n )\n\n\n@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])\ndef test_nans(q):\n compare_func = partial(scoreatpercentile, per=q)\n obj = Series(np.random.default_rng(2).standard_normal(50))\n obj[:10] = np.nan\n obj[-10:] = np.nan\n\n result = obj.rolling(50, min_periods=30).quantile(q)\n tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))\n\n # min_periods is working correctly\n result = obj.rolling(20, min_periods=15).quantile(q)\n assert isna(result.iloc[23])\n assert not isna(result.iloc[24])\n\n assert not isna(result.iloc[-6])\n assert isna(result.iloc[-5])\n\n obj2 = Series(np.random.default_rng(2).standard_normal(20))\n result = obj2.rolling(10, min_periods=5).quantile(q)\n assert isna(result.iloc[3])\n assert notna(result.iloc[4])\n\n result0 = obj.rolling(20, min_periods=0).quantile(q)\n result1 = obj.rolling(20, min_periods=1).quantile(q)\n tm.assert_almost_equal(result0, result1)\n\n\n@pytest.mark.parametrize("minp", [0, 99, 100])\n@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])\ndef test_min_periods(series, minp, q, step):\n result = series.rolling(len(series) + 1, min_periods=minp, step=step).quantile(q)\n expected = series.rolling(len(series), min_periods=minp, step=step).quantile(q)\n nan_mask = isna(result)\n tm.assert_series_equal(nan_mask, isna(expected))\n\n nan_mask = ~nan_mask\n tm.assert_almost_equal(result[nan_mask], expected[nan_mask])\n\n\n@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])\ndef test_center(q):\n obj = Series(np.random.default_rng(2).standard_normal(50))\n obj[:10] = np.nan\n obj[-10:] = np.nan\n\n result = obj.rolling(20, center=True).quantile(q)\n expected = (\n concat([obj, Series([np.nan] * 9)])\n .rolling(20)\n .quantile(q)\n .iloc[9:]\n .reset_index(drop=True)\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])\ndef test_center_reindex_series(series, q):\n # shifter index\n s = [f"x{x:d}" for x in range(12)]\n\n series_xp = (\n series.reindex(list(series.index) + s)\n .rolling(window=25)\n .quantile(q)\n .shift(-12)\n .reindex(series.index)\n )\n\n series_rs = series.rolling(window=25, center=True).quantile(q)\n tm.assert_series_equal(series_xp, series_rs)\n\n\n@pytest.mark.parametrize("q", [0.0, 0.1, 0.5, 0.9, 1.0])\ndef test_center_reindex_frame(frame, q):\n # shifter index\n s = [f"x{x:d}" for x in range(12)]\n\n frame_xp = (\n frame.reindex(list(frame.index) + s)\n .rolling(window=25)\n .quantile(q)\n .shift(-12)\n .reindex(frame.index)\n )\n frame_rs = frame.rolling(window=25, center=True).quantile(q)\n tm.assert_frame_equal(frame_xp, frame_rs)\n\n\ndef test_keyword_quantile_deprecated():\n # GH #52550\n s = Series([1, 2, 3, 4])\n with tm.assert_produces_warning(FutureWarning):\n s.rolling(2).quantile(quantile=0.4)\n | .venv\Lib\site-packages\pandas\tests\window\test_rolling_quantile.py | test_rolling_quantile.py | Python | 5,516 | 0.95 | 0.076923 | 0.028369 | react-lib | 394 | 2023-11-15T22:05:23.917255 | GPL-3.0 | true | 1a295f7246ca385d32b4286f4ad3c291 |
from functools import partial\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Series,\n concat,\n isna,\n notna,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries import offsets\n\n\n@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])\ndef test_series(series, sp_func, roll_func):\n sp_stats = pytest.importorskip("scipy.stats")\n\n compare_func = partial(getattr(sp_stats, sp_func), bias=False)\n result = getattr(series.rolling(50), roll_func)()\n assert isinstance(result, Series)\n tm.assert_almost_equal(result.iloc[-1], compare_func(series[-50:]))\n\n\n@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])\ndef test_frame(raw, frame, sp_func, roll_func):\n sp_stats = pytest.importorskip("scipy.stats")\n\n compare_func = partial(getattr(sp_stats, sp_func), bias=False)\n result = getattr(frame.rolling(50), roll_func)()\n assert isinstance(result, DataFrame)\n tm.assert_series_equal(\n result.iloc[-1, :],\n frame.iloc[-50:, :].apply(compare_func, axis=0, raw=raw),\n check_names=False,\n )\n\n\n@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])\ndef test_time_rule_series(series, sp_func, roll_func):\n sp_stats = pytest.importorskip("scipy.stats")\n\n compare_func = partial(getattr(sp_stats, sp_func), bias=False)\n win = 25\n ser = series[::2].resample("B").mean()\n series_result = getattr(ser.rolling(window=win, min_periods=10), roll_func)()\n last_date = series_result.index[-1]\n prev_date = last_date - 24 * offsets.BDay()\n\n trunc_series = series[::2].truncate(prev_date, last_date)\n tm.assert_almost_equal(series_result.iloc[-1], compare_func(trunc_series))\n\n\n@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])\ndef test_time_rule_frame(raw, frame, sp_func, roll_func):\n sp_stats = pytest.importorskip("scipy.stats")\n\n compare_func = partial(getattr(sp_stats, sp_func), bias=False)\n win = 25\n frm = frame[::2].resample("B").mean()\n frame_result = getattr(frm.rolling(window=win, min_periods=10), roll_func)()\n last_date = frame_result.index[-1]\n prev_date = last_date - 24 * offsets.BDay()\n\n trunc_frame = frame[::2].truncate(prev_date, last_date)\n tm.assert_series_equal(\n frame_result.xs(last_date),\n trunc_frame.apply(compare_func, raw=raw),\n check_names=False,\n )\n\n\n@pytest.mark.parametrize("sp_func, roll_func", [["kurtosis", "kurt"], ["skew", "skew"]])\ndef test_nans(sp_func, roll_func):\n sp_stats = pytest.importorskip("scipy.stats")\n\n compare_func = partial(getattr(sp_stats, sp_func), bias=False)\n obj = Series(np.random.default_rng(2).standard_normal(50))\n obj[:10] = np.nan\n obj[-10:] = np.nan\n\n result = getattr(obj.rolling(50, min_periods=30), roll_func)()\n tm.assert_almost_equal(result.iloc[-1], compare_func(obj[10:-10]))\n\n # min_periods is working correctly\n result = getattr(obj.rolling(20, min_periods=15), roll_func)()\n assert isna(result.iloc[23])\n assert not isna(result.iloc[24])\n\n assert not isna(result.iloc[-6])\n assert isna(result.iloc[-5])\n\n obj2 = Series(np.random.default_rng(2).standard_normal(20))\n result = getattr(obj2.rolling(10, min_periods=5), roll_func)()\n assert isna(result.iloc[3])\n assert notna(result.iloc[4])\n\n result0 = getattr(obj.rolling(20, min_periods=0), roll_func)()\n result1 = getattr(obj.rolling(20, min_periods=1), roll_func)()\n tm.assert_almost_equal(result0, result1)\n\n\n@pytest.mark.parametrize("minp", [0, 99, 100])\n@pytest.mark.parametrize("roll_func", ["kurt", "skew"])\ndef test_min_periods(series, minp, roll_func, step):\n result = getattr(\n series.rolling(len(series) + 1, min_periods=minp, step=step), roll_func\n )()\n expected = getattr(\n series.rolling(len(series), min_periods=minp, step=step), roll_func\n )()\n nan_mask = isna(result)\n tm.assert_series_equal(nan_mask, isna(expected))\n\n nan_mask = ~nan_mask\n tm.assert_almost_equal(result[nan_mask], expected[nan_mask])\n\n\n@pytest.mark.parametrize("roll_func", ["kurt", "skew"])\ndef test_center(roll_func):\n obj = Series(np.random.default_rng(2).standard_normal(50))\n obj[:10] = np.nan\n obj[-10:] = np.nan\n\n result = getattr(obj.rolling(20, center=True), roll_func)()\n expected = (\n getattr(concat([obj, Series([np.nan] * 9)]).rolling(20), roll_func)()\n .iloc[9:]\n .reset_index(drop=True)\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("roll_func", ["kurt", "skew"])\ndef test_center_reindex_series(series, roll_func):\n # shifter index\n s = [f"x{x:d}" for x in range(12)]\n\n series_xp = (\n getattr(\n series.reindex(list(series.index) + s).rolling(window=25),\n roll_func,\n )()\n .shift(-12)\n .reindex(series.index)\n )\n series_rs = getattr(series.rolling(window=25, center=True), roll_func)()\n tm.assert_series_equal(series_xp, series_rs)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize("roll_func", ["kurt", "skew"])\ndef test_center_reindex_frame(frame, roll_func):\n # shifter index\n s = [f"x{x:d}" for x in range(12)]\n\n frame_xp = (\n getattr(\n frame.reindex(list(frame.index) + s).rolling(window=25),\n roll_func,\n )()\n .shift(-12)\n .reindex(frame.index)\n )\n frame_rs = getattr(frame.rolling(window=25, center=True), roll_func)()\n tm.assert_frame_equal(frame_xp, frame_rs)\n\n\ndef test_rolling_skew_edge_cases(step):\n expected = Series([np.nan] * 4 + [0.0])[::step]\n # yields all NaN (0 variance)\n d = Series([1] * 5)\n x = d.rolling(window=5, step=step).skew()\n # index 4 should be 0 as it contains 5 same obs\n tm.assert_series_equal(expected, x)\n\n expected = Series([np.nan] * 5)[::step]\n # yields all NaN (window too small)\n d = Series(np.random.default_rng(2).standard_normal(5))\n x = d.rolling(window=2, step=step).skew()\n tm.assert_series_equal(expected, x)\n\n # yields [NaN, NaN, NaN, 0.177994, 1.548824]\n d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])\n expected = Series([np.nan, np.nan, np.nan, 0.177994, 1.548824])[::step]\n x = d.rolling(window=4, step=step).skew()\n tm.assert_series_equal(expected, x)\n\n\ndef test_rolling_kurt_edge_cases(step):\n expected = Series([np.nan] * 4 + [-3.0])[::step]\n\n # yields all NaN (0 variance)\n d = Series([1] * 5)\n x = d.rolling(window=5, step=step).kurt()\n tm.assert_series_equal(expected, x)\n\n # yields all NaN (window too small)\n expected = Series([np.nan] * 5)[::step]\n d = Series(np.random.default_rng(2).standard_normal(5))\n x = d.rolling(window=3, step=step).kurt()\n tm.assert_series_equal(expected, x)\n\n # yields [NaN, NaN, NaN, 1.224307, 2.671499]\n d = Series([-1.50837035, -0.1297039, 0.19501095, 1.73508164, 0.41941401])\n expected = Series([np.nan, np.nan, np.nan, 1.224307, 2.671499])[::step]\n x = d.rolling(window=4, step=step).kurt()\n tm.assert_series_equal(expected, x)\n\n\ndef test_rolling_skew_eq_value_fperr(step):\n # #18804 all rolling skew for all equal values should return Nan\n # #46717 update: all equal values should return 0 instead of NaN\n a = Series([1.1] * 15).rolling(window=10, step=step).skew()\n assert (a[a.index >= 9] == 0).all()\n assert a[a.index < 9].isna().all()\n\n\ndef test_rolling_kurt_eq_value_fperr(step):\n # #18804 all rolling kurt for all equal values should return Nan\n # #46717 update: all equal values should return -3 instead of NaN\n a = Series([1.1] * 15).rolling(window=10, step=step).kurt()\n assert (a[a.index >= 9] == -3).all()\n assert a[a.index < 9].isna().all()\n | .venv\Lib\site-packages\pandas\tests\window\test_rolling_skew_kurt.py | test_rolling_skew_kurt.py | Python | 7,807 | 0.95 | 0.07489 | 0.079096 | awesome-app | 98 | 2024-09-08T06:41:00.714871 | Apache-2.0 | true | d209aaa70845180c320da5c9f1d1411d |
import numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n MultiIndex,\n NaT,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries import offsets\n\n\n@pytest.fixture\ndef regular():\n return DataFrame(\n {"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}\n ).set_index("A")\n\n\n@pytest.fixture\ndef ragged():\n df = DataFrame({"B": range(5)})\n df.index = [\n Timestamp("20130101 09:00:00"),\n Timestamp("20130101 09:00:02"),\n Timestamp("20130101 09:00:03"),\n Timestamp("20130101 09:00:05"),\n Timestamp("20130101 09:00:06"),\n ]\n return df\n\n\nclass TestRollingTS:\n # rolling time-series friendly\n # xref GH13327\n\n def test_doc_string(self):\n df = DataFrame(\n {"B": [0, 1, 2, np.nan, 4]},\n index=[\n Timestamp("20130101 09:00:00"),\n Timestamp("20130101 09:00:02"),\n Timestamp("20130101 09:00:03"),\n Timestamp("20130101 09:00:05"),\n Timestamp("20130101 09:00:06"),\n ],\n )\n df\n df.rolling("2s").sum()\n\n def test_invalid_window_non_int(self, regular):\n # not a valid freq\n msg = "passed window foobar is not compatible with a datetimelike index"\n with pytest.raises(ValueError, match=msg):\n regular.rolling(window="foobar")\n # not a datetimelike index\n msg = "window must be an integer"\n with pytest.raises(ValueError, match=msg):\n regular.reset_index().rolling(window="foobar")\n\n @pytest.mark.parametrize("freq", ["2MS", offsets.MonthBegin(2)])\n def test_invalid_window_nonfixed(self, freq, regular):\n # non-fixed freqs\n msg = "\\<2 \\* MonthBegins\\> is a non-fixed frequency"\n with pytest.raises(ValueError, match=msg):\n regular.rolling(window=freq)\n\n @pytest.mark.parametrize("freq", ["1D", offsets.Day(2), "2ms"])\n def test_valid_window(self, freq, regular):\n regular.rolling(window=freq)\n\n @pytest.mark.parametrize("minp", [1.0, "foo", np.array([1, 2, 3])])\n def test_invalid_minp(self, minp, regular):\n # non-integer min_periods\n msg = (\n r"local variable 'minp' referenced before assignment|"\n "min_periods must be an integer"\n )\n with pytest.raises(ValueError, match=msg):\n regular.rolling(window="1D", min_periods=minp)\n\n def test_on(self, regular):\n df = regular\n\n # not a valid column\n msg = (\n r"invalid on specified as foobar, must be a column "\n "\\(of DataFrame\\), an Index or None"\n )\n with pytest.raises(ValueError, match=msg):\n df.rolling(window="2s", on="foobar")\n\n # column is valid\n df = df.copy()\n df["C"] = date_range("20130101", periods=len(df))\n df.rolling(window="2d", on="C").sum()\n\n # invalid columns\n msg = "window must be an integer"\n with pytest.raises(ValueError, match=msg):\n df.rolling(window="2d", on="B")\n\n # ok even though on non-selected\n df.rolling(window="2d", on="C").B.sum()\n\n def test_monotonic_on(self):\n # on/index must be monotonic\n df = DataFrame(\n {"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}\n )\n\n assert df.A.is_monotonic_increasing\n df.rolling("2s", on="A").sum()\n\n df = df.set_index("A")\n assert df.index.is_monotonic_increasing\n df.rolling("2s").sum()\n\n def test_non_monotonic_on(self):\n # GH 19248\n df = DataFrame(\n {"A": date_range("20130101", periods=5, freq="s"), "B": range(5)}\n )\n df = df.set_index("A")\n non_monotonic_index = df.index.to_list()\n non_monotonic_index[0] = non_monotonic_index[3]\n df.index = non_monotonic_index\n\n assert not df.index.is_monotonic_increasing\n\n msg = "index values must be monotonic"\n with pytest.raises(ValueError, match=msg):\n df.rolling("2s").sum()\n\n df = df.reset_index()\n\n msg = (\n r"invalid on specified as A, must be a column "\n "\\(of DataFrame\\), an Index or None"\n )\n with pytest.raises(ValueError, match=msg):\n df.rolling("2s", on="A").sum()\n\n def test_frame_on(self):\n df = DataFrame(\n {"B": range(5), "C": date_range("20130101 09:00:00", periods=5, freq="3s")}\n )\n\n df["A"] = [\n Timestamp("20130101 09:00:00"),\n Timestamp("20130101 09:00:02"),\n Timestamp("20130101 09:00:03"),\n Timestamp("20130101 09:00:05"),\n Timestamp("20130101 09:00:06"),\n ]\n\n # we are doing simulating using 'on'\n expected = df.set_index("A").rolling("2s").B.sum().reset_index(drop=True)\n\n result = df.rolling("2s", on="A").B.sum()\n tm.assert_series_equal(result, expected)\n\n # test as a frame\n # we should be ignoring the 'on' as an aggregation column\n # note that the expected is setting, computing, and resetting\n # so the columns need to be switched compared\n # to the actual result where they are ordered as in the\n # original\n expected = (\n df.set_index("A").rolling("2s")[["B"]].sum().reset_index()[["B", "A"]]\n )\n\n result = df.rolling("2s", on="A")[["B"]].sum()\n tm.assert_frame_equal(result, expected)\n\n def test_frame_on2(self, unit):\n # using multiple aggregation columns\n dti = DatetimeIndex(\n [\n Timestamp("20130101 09:00:00"),\n Timestamp("20130101 09:00:02"),\n Timestamp("20130101 09:00:03"),\n Timestamp("20130101 09:00:05"),\n Timestamp("20130101 09:00:06"),\n ]\n ).as_unit(unit)\n df = DataFrame(\n {\n "A": [0, 1, 2, 3, 4],\n "B": [0, 1, 2, np.nan, 4],\n "C": dti,\n },\n columns=["A", "C", "B"],\n )\n\n expected1 = DataFrame(\n {"A": [0.0, 1, 3, 3, 7], "B": [0, 1, 3, np.nan, 4], "C": df["C"]},\n columns=["A", "C", "B"],\n )\n\n result = df.rolling("2s", on="C").sum()\n expected = expected1\n tm.assert_frame_equal(result, expected)\n\n expected = Series([0, 1, 3, np.nan, 4], name="B")\n result = df.rolling("2s", on="C").B.sum()\n tm.assert_series_equal(result, expected)\n\n expected = expected1[["A", "B", "C"]]\n result = df.rolling("2s", on="C")[["A", "B", "C"]].sum()\n tm.assert_frame_equal(result, expected)\n\n def test_basic_regular(self, regular):\n df = regular.copy()\n\n df.index = date_range("20130101", periods=5, freq="D")\n expected = df.rolling(window=1, min_periods=1).sum()\n result = df.rolling(window="1D").sum()\n tm.assert_frame_equal(result, expected)\n\n df.index = date_range("20130101", periods=5, freq="2D")\n expected = df.rolling(window=1, min_periods=1).sum()\n result = df.rolling(window="2D", min_periods=1).sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.rolling(window=1, min_periods=1).sum()\n result = df.rolling(window="2D", min_periods=1).sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.rolling(window=1).sum()\n result = df.rolling(window="2D").sum()\n tm.assert_frame_equal(result, expected)\n\n def test_min_periods(self, regular):\n # compare for min_periods\n df = regular\n\n # these slightly different\n expected = df.rolling(2, min_periods=1).sum()\n result = df.rolling("2s").sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.rolling(2, min_periods=1).sum()\n result = df.rolling("2s", min_periods=1).sum()\n tm.assert_frame_equal(result, expected)\n\n def test_closed(self, regular, unit):\n # xref GH13965\n\n dti = DatetimeIndex(\n [\n Timestamp("20130101 09:00:01"),\n Timestamp("20130101 09:00:02"),\n Timestamp("20130101 09:00:03"),\n Timestamp("20130101 09:00:04"),\n Timestamp("20130101 09:00:06"),\n ]\n ).as_unit(unit)\n\n df = DataFrame(\n {"A": [1] * 5},\n index=dti,\n )\n\n # closed must be 'right', 'left', 'both', 'neither'\n msg = "closed must be 'right', 'left', 'both' or 'neither'"\n with pytest.raises(ValueError, match=msg):\n regular.rolling(window="2s", closed="blabla")\n\n expected = df.copy()\n expected["A"] = [1.0, 2, 2, 2, 1]\n result = df.rolling("2s", closed="right").sum()\n tm.assert_frame_equal(result, expected)\n\n # default should be 'right'\n result = df.rolling("2s").sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.copy()\n expected["A"] = [1.0, 2, 3, 3, 2]\n result = df.rolling("2s", closed="both").sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.copy()\n expected["A"] = [np.nan, 1.0, 2, 2, 1]\n result = df.rolling("2s", closed="left").sum()\n tm.assert_frame_equal(result, expected)\n\n expected = df.copy()\n expected["A"] = [np.nan, 1.0, 1, 1, np.nan]\n result = df.rolling("2s", closed="neither").sum()\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_sum(self, ragged):\n df = ragged\n result = df.rolling(window="1s", min_periods=1).sum()\n expected = df.copy()\n expected["B"] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="2s", min_periods=1).sum()\n expected = df.copy()\n expected["B"] = [0.0, 1, 3, 3, 7]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="2s", min_periods=2).sum()\n expected = df.copy()\n expected["B"] = [np.nan, np.nan, 3, np.nan, 7]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="3s", min_periods=1).sum()\n expected = df.copy()\n expected["B"] = [0.0, 1, 3, 5, 7]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="3s").sum()\n expected = df.copy()\n expected["B"] = [0.0, 1, 3, 5, 7]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="4s", min_periods=1).sum()\n expected = df.copy()\n expected["B"] = [0.0, 1, 3, 6, 9]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="4s", min_periods=3).sum()\n expected = df.copy()\n expected["B"] = [np.nan, np.nan, 3, 6, 9]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="5s", min_periods=1).sum()\n expected = df.copy()\n expected["B"] = [0.0, 1, 3, 6, 10]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_mean(self, ragged):\n df = ragged\n result = df.rolling(window="1s", min_periods=1).mean()\n expected = df.copy()\n expected["B"] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="2s", min_periods=1).mean()\n expected = df.copy()\n expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_median(self, ragged):\n df = ragged\n result = df.rolling(window="1s", min_periods=1).median()\n expected = df.copy()\n expected["B"] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="2s", min_periods=1).median()\n expected = df.copy()\n expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_quantile(self, ragged):\n df = ragged\n result = df.rolling(window="1s", min_periods=1).quantile(0.5)\n expected = df.copy()\n expected["B"] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="2s", min_periods=1).quantile(0.5)\n expected = df.copy()\n expected["B"] = [0.0, 1, 1.5, 3.0, 3.5]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_std(self, ragged):\n df = ragged\n result = df.rolling(window="1s", min_periods=1).std(ddof=0)\n expected = df.copy()\n expected["B"] = [0.0] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="1s", min_periods=1).std(ddof=1)\n expected = df.copy()\n expected["B"] = [np.nan] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="3s", min_periods=1).std(ddof=0)\n expected = df.copy()\n expected["B"] = [0.0] + [0.5] * 4\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="5s", min_periods=1).std(ddof=1)\n expected = df.copy()\n expected["B"] = [np.nan, 0.707107, 1.0, 1.0, 1.290994]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_var(self, ragged):\n df = ragged\n result = df.rolling(window="1s", min_periods=1).var(ddof=0)\n expected = df.copy()\n expected["B"] = [0.0] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="1s", min_periods=1).var(ddof=1)\n expected = df.copy()\n expected["B"] = [np.nan] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="3s", min_periods=1).var(ddof=0)\n expected = df.copy()\n expected["B"] = [0.0] + [0.25] * 4\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="5s", min_periods=1).var(ddof=1)\n expected = df.copy()\n expected["B"] = [np.nan, 0.5, 1.0, 1.0, 1 + 2 / 3.0]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_skew(self, ragged):\n df = ragged\n result = df.rolling(window="3s", min_periods=1).skew()\n expected = df.copy()\n expected["B"] = [np.nan] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="5s", min_periods=1).skew()\n expected = df.copy()\n expected["B"] = [np.nan] * 2 + [0.0, 0.0, 0.0]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_kurt(self, ragged):\n df = ragged\n result = df.rolling(window="3s", min_periods=1).kurt()\n expected = df.copy()\n expected["B"] = [np.nan] * 5\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="5s", min_periods=1).kurt()\n expected = df.copy()\n expected["B"] = [np.nan] * 4 + [-1.2]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_count(self, ragged):\n df = ragged\n result = df.rolling(window="1s", min_periods=1).count()\n expected = df.copy()\n expected["B"] = [1.0, 1, 1, 1, 1]\n tm.assert_frame_equal(result, expected)\n\n df = ragged\n result = df.rolling(window="1s").count()\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="2s", min_periods=1).count()\n expected = df.copy()\n expected["B"] = [1.0, 1, 2, 1, 2]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="2s", min_periods=2).count()\n expected = df.copy()\n expected["B"] = [np.nan, np.nan, 2, np.nan, 2]\n tm.assert_frame_equal(result, expected)\n\n def test_regular_min(self):\n df = DataFrame(\n {"A": date_range("20130101", periods=5, freq="s"), "B": [0.0, 1, 2, 3, 4]}\n ).set_index("A")\n result = df.rolling("1s").min()\n expected = df.copy()\n expected["B"] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n df = DataFrame(\n {"A": date_range("20130101", periods=5, freq="s"), "B": [5, 4, 3, 4, 5]}\n ).set_index("A")\n\n tm.assert_frame_equal(result, expected)\n result = df.rolling("2s").min()\n expected = df.copy()\n expected["B"] = [5.0, 4, 3, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling("5s").min()\n expected = df.copy()\n expected["B"] = [5.0, 4, 3, 3, 3]\n tm.assert_frame_equal(result, expected)\n\n def test_ragged_min(self, ragged):\n df = ragged\n\n result = df.rolling(window="1s", min_periods=1).min()\n expected = df.copy()\n expected["B"] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="2s", min_periods=1).min()\n expected = df.copy()\n expected["B"] = [0.0, 1, 1, 3, 3]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="5s", min_periods=1).min()\n expected = df.copy()\n expected["B"] = [0.0, 0, 0, 1, 1]\n tm.assert_frame_equal(result, expected)\n\n def test_perf_min(self):\n N = 10000\n\n dfp = DataFrame(\n {"B": np.random.default_rng(2).standard_normal(N)},\n index=date_range("20130101", periods=N, freq="s"),\n )\n expected = dfp.rolling(2, min_periods=1).min()\n result = dfp.rolling("2s").min()\n assert ((result - expected) < 0.01).all().all()\n\n expected = dfp.rolling(200, min_periods=1).min()\n result = dfp.rolling("200s").min()\n assert ((result - expected) < 0.01).all().all()\n\n def test_ragged_max(self, ragged):\n df = ragged\n\n result = df.rolling(window="1s", min_periods=1).max()\n expected = df.copy()\n expected["B"] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="2s", min_periods=1).max()\n expected = df.copy()\n expected["B"] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n result = df.rolling(window="5s", min_periods=1).max()\n expected = df.copy()\n expected["B"] = [0.0, 1, 2, 3, 4]\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "freq, op, result_data",\n [\n ("ms", "min", [0.0] * 10),\n ("ms", "mean", [0.0] * 9 + [2.0 / 9]),\n ("ms", "max", [0.0] * 9 + [2.0]),\n ("s", "min", [0.0] * 10),\n ("s", "mean", [0.0] * 9 + [2.0 / 9]),\n ("s", "max", [0.0] * 9 + [2.0]),\n ("min", "min", [0.0] * 10),\n ("min", "mean", [0.0] * 9 + [2.0 / 9]),\n ("min", "max", [0.0] * 9 + [2.0]),\n ("h", "min", [0.0] * 10),\n ("h", "mean", [0.0] * 9 + [2.0 / 9]),\n ("h", "max", [0.0] * 9 + [2.0]),\n ("D", "min", [0.0] * 10),\n ("D", "mean", [0.0] * 9 + [2.0 / 9]),\n ("D", "max", [0.0] * 9 + [2.0]),\n ],\n )\n def test_freqs_ops(self, freq, op, result_data):\n # GH 21096\n index = date_range(start="2018-1-1 01:00:00", freq=f"1{freq}", periods=10)\n # Explicit cast to float to avoid implicit cast when setting nan\n s = Series(data=0, index=index, dtype="float")\n s.iloc[1] = np.nan\n s.iloc[-1] = 2\n result = getattr(s.rolling(window=f"10{freq}"), op)()\n expected = Series(data=result_data, index=index)\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "f",\n [\n "sum",\n "mean",\n "count",\n "median",\n "std",\n "var",\n "kurt",\n "skew",\n "min",\n "max",\n ],\n )\n def test_all(self, f, regular):\n # simple comparison of integer vs time-based windowing\n df = regular * 2\n er = df.rolling(window=1)\n r = df.rolling(window="1s")\n\n result = getattr(r, f)()\n expected = getattr(er, f)()\n tm.assert_frame_equal(result, expected)\n\n result = r.quantile(0.5)\n expected = er.quantile(0.5)\n tm.assert_frame_equal(result, expected)\n\n def test_all2(self, arithmetic_win_operators):\n f = arithmetic_win_operators\n # more sophisticated comparison of integer vs.\n # time-based windowing\n df = DataFrame(\n {"B": np.arange(50)}, index=date_range("20130101", periods=50, freq="h")\n )\n # in-range data\n dft = df.between_time("09:00", "16:00")\n\n r = dft.rolling(window="5h")\n\n result = getattr(r, f)()\n\n # we need to roll the days separately\n # to compare with a time-based roll\n # finally groupby-apply will return a multi-index\n # so we need to drop the day\n def agg_by_day(x):\n x = x.between_time("09:00", "16:00")\n return getattr(x.rolling(5, min_periods=1), f)()\n\n expected = (\n df.groupby(df.index.day).apply(agg_by_day).reset_index(level=0, drop=True)\n )\n\n tm.assert_frame_equal(result, expected)\n\n def test_rolling_cov_offset(self):\n # GH16058\n\n idx = date_range("2017-01-01", periods=24, freq="1h")\n ss = Series(np.arange(len(idx)), index=idx)\n\n result = ss.rolling("2h").cov()\n expected = Series([np.nan] + [0.5] * (len(idx) - 1), index=idx)\n tm.assert_series_equal(result, expected)\n\n expected2 = ss.rolling(2, min_periods=1).cov()\n tm.assert_series_equal(result, expected2)\n\n result = ss.rolling("3h").cov()\n expected = Series([np.nan, 0.5] + [1.0] * (len(idx) - 2), index=idx)\n tm.assert_series_equal(result, expected)\n\n expected2 = ss.rolling(3, min_periods=1).cov()\n tm.assert_series_equal(result, expected2)\n\n def test_rolling_on_decreasing_index(self, unit):\n # GH-19248, GH-32385\n index = DatetimeIndex(\n [\n Timestamp("20190101 09:00:30"),\n Timestamp("20190101 09:00:27"),\n Timestamp("20190101 09:00:20"),\n Timestamp("20190101 09:00:18"),\n Timestamp("20190101 09:00:10"),\n ]\n ).as_unit(unit)\n\n df = DataFrame({"column": [3, 4, 4, 5, 6]}, index=index)\n result = df.rolling("5s").min()\n expected = DataFrame({"column": [3.0, 3.0, 4.0, 4.0, 6.0]}, index=index)\n tm.assert_frame_equal(result, expected)\n\n def test_rolling_on_empty(self):\n # GH-32385\n df = DataFrame({"column": []}, index=[])\n result = df.rolling("5s").min()\n expected = DataFrame({"column": []}, index=[])\n tm.assert_frame_equal(result, expected)\n\n def test_rolling_on_multi_index_level(self):\n # GH-15584\n df = DataFrame(\n {"column": range(6)},\n index=MultiIndex.from_product(\n [date_range("20190101", periods=3), range(2)], names=["date", "seq"]\n ),\n )\n result = df.rolling("10d", on=df.index.get_level_values("date")).sum()\n expected = DataFrame(\n {"column": [0.0, 1.0, 3.0, 6.0, 10.0, 15.0]}, index=df.index\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("msg, axis", [["column", 1], ["index", 0]])\ndef test_nat_axis_error(msg, axis):\n idx = [Timestamp("2020"), NaT]\n kwargs = {"columns" if axis == 1 else "index": idx}\n df = DataFrame(np.eye(2), **kwargs)\n warn_msg = "The 'axis' keyword in DataFrame.rolling is deprecated"\n if axis == 1:\n warn_msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with pytest.raises(ValueError, match=f"{msg} values must not have NaT"):\n with tm.assert_produces_warning(FutureWarning, match=warn_msg):\n df.rolling("D", axis=axis).mean()\n\n\n@td.skip_if_no("pyarrow")\ndef test_arrow_datetime_axis():\n # GH 55849\n expected = Series(\n np.arange(5, dtype=np.float64),\n index=Index(\n date_range("2020-01-01", periods=5), dtype="timestamp[ns][pyarrow]"\n ),\n )\n result = expected.rolling("1D").sum()\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\window\test_timeseries_window.py | test_timeseries_window.py | Python | 24,250 | 0.95 | 0.06014 | 0.067797 | vue-tools | 461 | 2025-06-12T16:09:58.583318 | GPL-3.0 | true | bfc8c51642867fb351a7576af6323998 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Series,\n Timedelta,\n concat,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.api.indexers import BaseIndexer\n\n\n@pytest.fixture(\n params=[\n "triang",\n "blackman",\n "hamming",\n "bartlett",\n "bohman",\n "blackmanharris",\n "nuttall",\n "barthann",\n ]\n)\ndef win_types(request):\n return request.param\n\n\n@pytest.fixture(params=["kaiser", "gaussian", "general_gaussian", "exponential"])\ndef win_types_special(request):\n return request.param\n\n\ndef test_constructor(frame_or_series):\n # GH 12669\n pytest.importorskip("scipy")\n c = frame_or_series(range(5)).rolling\n\n # valid\n c(win_type="boxcar", window=2, min_periods=1)\n c(win_type="boxcar", window=2, min_periods=1, center=True)\n c(win_type="boxcar", window=2, min_periods=1, center=False)\n\n\n@pytest.mark.parametrize("w", [2.0, "foo", np.array([2])])\ndef test_invalid_constructor(frame_or_series, w):\n # not valid\n pytest.importorskip("scipy")\n c = frame_or_series(range(5)).rolling\n with pytest.raises(ValueError, match="min_periods must be an integer"):\n c(win_type="boxcar", window=2, min_periods=w)\n with pytest.raises(ValueError, match="center must be a boolean"):\n c(win_type="boxcar", window=2, min_periods=1, center=w)\n\n\n@pytest.mark.parametrize("wt", ["foobar", 1])\ndef test_invalid_constructor_wintype(frame_or_series, wt):\n pytest.importorskip("scipy")\n c = frame_or_series(range(5)).rolling\n with pytest.raises(ValueError, match="Invalid win_type"):\n c(win_type=wt, window=2)\n\n\ndef test_constructor_with_win_type(frame_or_series, win_types):\n # GH 12669\n pytest.importorskip("scipy")\n c = frame_or_series(range(5)).rolling\n c(win_type=win_types, window=2)\n\n\n@pytest.mark.parametrize("arg", ["median", "kurt", "skew"])\ndef test_agg_function_support(arg):\n pytest.importorskip("scipy")\n df = DataFrame({"A": np.arange(5)})\n roll = df.rolling(2, win_type="triang")\n\n msg = f"'{arg}' is not a valid function for 'Window' object"\n with pytest.raises(AttributeError, match=msg):\n roll.agg(arg)\n\n with pytest.raises(AttributeError, match=msg):\n roll.agg([arg])\n\n with pytest.raises(AttributeError, match=msg):\n roll.agg({"A": arg})\n\n\ndef test_invalid_scipy_arg():\n # This error is raised by scipy\n pytest.importorskip("scipy")\n msg = r"boxcar\(\) got an unexpected"\n with pytest.raises(TypeError, match=msg):\n Series(range(3)).rolling(1, win_type="boxcar").mean(foo="bar")\n\n\ndef test_constructor_with_win_type_invalid(frame_or_series):\n # GH 13383\n pytest.importorskip("scipy")\n c = frame_or_series(range(5)).rolling\n\n msg = "window must be an integer 0 or greater"\n\n with pytest.raises(ValueError, match=msg):\n c(-1, win_type="boxcar")\n\n\ndef test_window_with_args(step):\n # make sure that we are aggregating window functions correctly with arg\n pytest.importorskip("scipy")\n r = Series(np.random.default_rng(2).standard_normal(100)).rolling(\n window=10, min_periods=1, win_type="gaussian", step=step\n )\n expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1)\n expected.columns = ["<lambda>", "<lambda>"]\n result = r.aggregate([lambda x: x.mean(std=10), lambda x: x.mean(std=0.01)])\n tm.assert_frame_equal(result, expected)\n\n def a(x):\n return x.mean(std=10)\n\n def b(x):\n return x.mean(std=0.01)\n\n expected = concat([r.mean(std=10), r.mean(std=0.01)], axis=1)\n expected.columns = ["a", "b"]\n result = r.aggregate([a, b])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_win_type_with_method_invalid():\n pytest.importorskip("scipy")\n with pytest.raises(\n NotImplementedError, match="'single' is the only supported method type."\n ):\n Series(range(1)).rolling(1, win_type="triang", method="table")\n\n\n@pytest.mark.parametrize("arg", [2000000000, "2s", Timedelta("2s")])\ndef test_consistent_win_type_freq(arg):\n # GH 15969\n pytest.importorskip("scipy")\n s = Series(range(1))\n with pytest.raises(ValueError, match="Invalid win_type freq"):\n s.rolling(arg, win_type="freq")\n\n\ndef test_win_type_freq_return_none():\n # GH 48838\n freq_roll = Series(range(2), index=date_range("2020", periods=2)).rolling("2s")\n assert freq_roll.win_type is None\n\n\ndef test_win_type_not_implemented():\n pytest.importorskip("scipy")\n\n class CustomIndexer(BaseIndexer):\n def get_window_bounds(self, num_values, min_periods, center, closed, step):\n return np.array([0, 1]), np.array([1, 2])\n\n df = DataFrame({"values": range(2)})\n indexer = CustomIndexer()\n with pytest.raises(NotImplementedError, match="BaseIndexer subclasses not"):\n df.rolling(indexer, win_type="boxcar")\n\n\ndef test_cmov_mean(step):\n # GH 8238\n pytest.importorskip("scipy")\n vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])\n result = Series(vals).rolling(5, center=True, step=step).mean()\n expected_values = [\n np.nan,\n np.nan,\n 9.962,\n 11.27,\n 11.564,\n 12.516,\n 12.818,\n 12.952,\n np.nan,\n np.nan,\n ]\n expected = Series(expected_values)[::step]\n tm.assert_series_equal(expected, result)\n\n\ndef test_cmov_window(step):\n # GH 8238\n pytest.importorskip("scipy")\n vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])\n result = Series(vals).rolling(5, win_type="boxcar", center=True, step=step).mean()\n expected_values = [\n np.nan,\n np.nan,\n 9.962,\n 11.27,\n 11.564,\n 12.516,\n 12.818,\n 12.952,\n np.nan,\n np.nan,\n ]\n expected = Series(expected_values)[::step]\n tm.assert_series_equal(expected, result)\n\n\ndef test_cmov_window_corner(step):\n # GH 8238\n # all nan\n pytest.importorskip("scipy")\n vals = Series([np.nan] * 10)\n result = vals.rolling(5, center=True, win_type="boxcar", step=step).mean()\n assert np.isnan(result).all()\n\n # empty\n vals = Series([], dtype=object)\n result = vals.rolling(5, center=True, win_type="boxcar", step=step).mean()\n assert len(result) == 0\n\n # shorter than window\n vals = Series(np.random.default_rng(2).standard_normal(5))\n result = vals.rolling(10, win_type="boxcar", step=step).mean()\n assert np.isnan(result).all()\n assert len(result) == len(range(0, 5, step or 1))\n\n\n@pytest.mark.parametrize(\n "f,xp",\n [\n (\n "mean",\n [\n [np.nan, np.nan],\n [np.nan, np.nan],\n [9.252, 9.392],\n [8.644, 9.906],\n [8.87, 10.208],\n [6.81, 8.588],\n [7.792, 8.644],\n [9.05, 7.824],\n [np.nan, np.nan],\n [np.nan, np.nan],\n ],\n ),\n (\n "std",\n [\n [np.nan, np.nan],\n [np.nan, np.nan],\n [3.789706, 4.068313],\n [3.429232, 3.237411],\n [3.589269, 3.220810],\n [3.405195, 2.380655],\n [3.281839, 2.369869],\n [3.676846, 1.801799],\n [np.nan, np.nan],\n [np.nan, np.nan],\n ],\n ),\n (\n "var",\n [\n [np.nan, np.nan],\n [np.nan, np.nan],\n [14.36187, 16.55117],\n [11.75963, 10.48083],\n [12.88285, 10.37362],\n [11.59535, 5.66752],\n [10.77047, 5.61628],\n [13.51920, 3.24648],\n [np.nan, np.nan],\n [np.nan, np.nan],\n ],\n ),\n (\n "sum",\n [\n [np.nan, np.nan],\n [np.nan, np.nan],\n [46.26, 46.96],\n [43.22, 49.53],\n [44.35, 51.04],\n [34.05, 42.94],\n [38.96, 43.22],\n [45.25, 39.12],\n [np.nan, np.nan],\n [np.nan, np.nan],\n ],\n ),\n ],\n)\ndef test_cmov_window_frame(f, xp, step):\n # Gh 8238\n pytest.importorskip("scipy")\n df = DataFrame(\n np.array(\n [\n [12.18, 3.64],\n [10.18, 9.16],\n [13.24, 14.61],\n [4.51, 8.11],\n [6.15, 11.44],\n [9.14, 6.21],\n [11.31, 10.67],\n [2.94, 6.51],\n [9.42, 8.39],\n [12.44, 7.34],\n ]\n )\n )\n xp = DataFrame(np.array(xp))[::step]\n\n roll = df.rolling(5, win_type="boxcar", center=True, step=step)\n rs = getattr(roll, f)()\n\n tm.assert_frame_equal(xp, rs)\n\n\n@pytest.mark.parametrize("min_periods", [0, 1, 2, 3, 4, 5])\ndef test_cmov_window_na_min_periods(step, min_periods):\n pytest.importorskip("scipy")\n vals = Series(np.random.default_rng(2).standard_normal(10))\n vals[4] = np.nan\n vals[8] = np.nan\n\n xp = vals.rolling(5, min_periods=min_periods, center=True, step=step).mean()\n rs = vals.rolling(\n 5, win_type="boxcar", min_periods=min_periods, center=True, step=step\n ).mean()\n tm.assert_series_equal(xp, rs)\n\n\ndef test_cmov_window_regular(win_types, step):\n # GH 8238\n pytest.importorskip("scipy")\n vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])\n xps = {\n "hamming": [\n np.nan,\n np.nan,\n 8.71384,\n 9.56348,\n 12.38009,\n 14.03687,\n 13.8567,\n 11.81473,\n np.nan,\n np.nan,\n ],\n "triang": [\n np.nan,\n np.nan,\n 9.28667,\n 10.34667,\n 12.00556,\n 13.33889,\n 13.38,\n 12.33667,\n np.nan,\n np.nan,\n ],\n "barthann": [\n np.nan,\n np.nan,\n 8.4425,\n 9.1925,\n 12.5575,\n 14.3675,\n 14.0825,\n 11.5675,\n np.nan,\n np.nan,\n ],\n "bohman": [\n np.nan,\n np.nan,\n 7.61599,\n 9.1764,\n 12.83559,\n 14.17267,\n 14.65923,\n 11.10401,\n np.nan,\n np.nan,\n ],\n "blackmanharris": [\n np.nan,\n np.nan,\n 6.97691,\n 9.16438,\n 13.05052,\n 14.02156,\n 15.10512,\n 10.74574,\n np.nan,\n np.nan,\n ],\n "nuttall": [\n np.nan,\n np.nan,\n 7.04618,\n 9.16786,\n 13.02671,\n 14.03559,\n 15.05657,\n 10.78514,\n np.nan,\n np.nan,\n ],\n "blackman": [\n np.nan,\n np.nan,\n 7.73345,\n 9.17869,\n 12.79607,\n 14.20036,\n 14.57726,\n 11.16988,\n np.nan,\n np.nan,\n ],\n "bartlett": [\n np.nan,\n np.nan,\n 8.4425,\n 9.1925,\n 12.5575,\n 14.3675,\n 14.0825,\n 11.5675,\n np.nan,\n np.nan,\n ],\n }\n\n xp = Series(xps[win_types])[::step]\n rs = Series(vals).rolling(5, win_type=win_types, center=True, step=step).mean()\n tm.assert_series_equal(xp, rs)\n\n\ndef test_cmov_window_regular_linear_range(win_types, step):\n # GH 8238\n pytest.importorskip("scipy")\n vals = np.array(range(10), dtype=float)\n xp = vals.copy()\n xp[:2] = np.nan\n xp[-2:] = np.nan\n xp = Series(xp)[::step]\n\n rs = Series(vals).rolling(5, win_type=win_types, center=True, step=step).mean()\n tm.assert_series_equal(xp, rs)\n\n\ndef test_cmov_window_regular_missing_data(win_types, step):\n # GH 8238\n pytest.importorskip("scipy")\n vals = np.array(\n [6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, np.nan, 10.63, 14.48]\n )\n xps = {\n "bartlett": [\n np.nan,\n np.nan,\n 9.70333,\n 10.5225,\n 8.4425,\n 9.1925,\n 12.5575,\n 14.3675,\n 15.61667,\n 13.655,\n ],\n "blackman": [\n np.nan,\n np.nan,\n 9.04582,\n 11.41536,\n 7.73345,\n 9.17869,\n 12.79607,\n 14.20036,\n 15.8706,\n 13.655,\n ],\n "barthann": [\n np.nan,\n np.nan,\n 9.70333,\n 10.5225,\n 8.4425,\n 9.1925,\n 12.5575,\n 14.3675,\n 15.61667,\n 13.655,\n ],\n "bohman": [\n np.nan,\n np.nan,\n 8.9444,\n 11.56327,\n 7.61599,\n 9.1764,\n 12.83559,\n 14.17267,\n 15.90976,\n 13.655,\n ],\n "hamming": [\n np.nan,\n np.nan,\n 9.59321,\n 10.29694,\n 8.71384,\n 9.56348,\n 12.38009,\n 14.20565,\n 15.24694,\n 13.69758,\n ],\n "nuttall": [\n np.nan,\n np.nan,\n 8.47693,\n 12.2821,\n 7.04618,\n 9.16786,\n 13.02671,\n 14.03673,\n 16.08759,\n 13.65553,\n ],\n "triang": [\n np.nan,\n np.nan,\n 9.33167,\n 9.76125,\n 9.28667,\n 10.34667,\n 12.00556,\n 13.82125,\n 14.49429,\n 13.765,\n ],\n "blackmanharris": [\n np.nan,\n np.nan,\n 8.42526,\n 12.36824,\n 6.97691,\n 9.16438,\n 13.05052,\n 14.02175,\n 16.1098,\n 13.65509,\n ],\n }\n\n xp = Series(xps[win_types])[::step]\n rs = Series(vals).rolling(5, win_type=win_types, min_periods=3, step=step).mean()\n tm.assert_series_equal(xp, rs)\n\n\ndef test_cmov_window_special(win_types_special, step):\n # GH 8238\n pytest.importorskip("scipy")\n kwds = {\n "kaiser": {"beta": 1.0},\n "gaussian": {"std": 1.0},\n "general_gaussian": {"p": 2.0, "sig": 2.0},\n "exponential": {"tau": 10},\n }\n\n vals = np.array([6.95, 15.21, 4.72, 9.12, 13.81, 13.49, 16.68, 9.48, 10.63, 14.48])\n\n xps = {\n "gaussian": [\n np.nan,\n np.nan,\n 8.97297,\n 9.76077,\n 12.24763,\n 13.89053,\n 13.65671,\n 12.01002,\n np.nan,\n np.nan,\n ],\n "general_gaussian": [\n np.nan,\n np.nan,\n 9.85011,\n 10.71589,\n 11.73161,\n 13.08516,\n 12.95111,\n 12.74577,\n np.nan,\n np.nan,\n ],\n "kaiser": [\n np.nan,\n np.nan,\n 9.86851,\n 11.02969,\n 11.65161,\n 12.75129,\n 12.90702,\n 12.83757,\n np.nan,\n np.nan,\n ],\n "exponential": [\n np.nan,\n np.nan,\n 9.83364,\n 11.10472,\n 11.64551,\n 12.66138,\n 12.92379,\n 12.83770,\n np.nan,\n np.nan,\n ],\n }\n\n xp = Series(xps[win_types_special])[::step]\n rs = (\n Series(vals)\n .rolling(5, win_type=win_types_special, center=True, step=step)\n .mean(**kwds[win_types_special])\n )\n tm.assert_series_equal(xp, rs)\n\n\ndef test_cmov_window_special_linear_range(win_types_special, step):\n # GH 8238\n pytest.importorskip("scipy")\n kwds = {\n "kaiser": {"beta": 1.0},\n "gaussian": {"std": 1.0},\n "general_gaussian": {"p": 2.0, "sig": 2.0},\n "slepian": {"width": 0.5},\n "exponential": {"tau": 10},\n }\n\n vals = np.array(range(10), dtype=float)\n xp = vals.copy()\n xp[:2] = np.nan\n xp[-2:] = np.nan\n xp = Series(xp)[::step]\n\n rs = (\n Series(vals)\n .rolling(5, win_type=win_types_special, center=True, step=step)\n .mean(**kwds[win_types_special])\n )\n tm.assert_series_equal(xp, rs)\n\n\ndef test_weighted_var_big_window_no_segfault(win_types, center):\n # GitHub Issue #46772\n pytest.importorskip("scipy")\n x = Series(0)\n result = x.rolling(window=16, center=center, win_type=win_types).var()\n expected = Series(np.nan)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_rolling_center_axis_1():\n pytest.importorskip("scipy")\n df = DataFrame(\n {"a": [1, 1, 0, 0, 0, 1], "b": [1, 0, 0, 1, 0, 0], "c": [1, 0, 0, 1, 0, 1]}\n )\n\n msg = "Support for axis=1 in DataFrame.rolling is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.rolling(window=3, axis=1, win_type="boxcar", center=True).sum()\n\n expected = DataFrame(\n {"a": [np.nan] * 6, "b": [3.0, 1.0, 0.0, 2.0, 0.0, 2.0], "c": [np.nan] * 6}\n )\n\n tm.assert_frame_equal(result, expected, check_dtype=True)\n | .venv\Lib\site-packages\pandas\tests\window\test_win_type.py | test_win_type.py | Python | 17,522 | 0.95 | 0.047965 | 0.036244 | python-kit | 750 | 2025-06-13T22:03:12.356027 | Apache-2.0 | true | 3351300f1e6fc50791e22e25db7f6a3e |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.