content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
\n\n | .venv\Lib\site-packages\pandas\tests\series\__pycache__\test_subclass.cpython-313.pyc | test_subclass.cpython-313.pyc | Other | 5,517 | 0.8 | 0 | 0 | vue-tools | 406 | 2023-09-10T07:32:25.112247 | BSD-3-Clause | true | 6963fc1abe8715a5f6840dbbc6c7d0c7 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\__pycache__\test_ufunc.cpython-313.pyc | test_ufunc.cpython-313.pyc | Other | 23,034 | 0.8 | 0 | 0.008299 | python-kit | 576 | 2025-04-15T19:36:54.867976 | Apache-2.0 | true | ec22bad0fc65ecd505d11936b1a945df |
\n\n | .venv\Lib\site-packages\pandas\tests\series\__pycache__\test_unary.cpython-313.pyc | test_unary.cpython-313.pyc | Other | 2,829 | 0.8 | 0 | 0 | node-utils | 471 | 2025-03-05T12:02:22.019301 | MIT | true | c26445742bb2eef8fb6b7c3d04054917 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\__pycache__\test_validate.cpython-313.pyc | test_validate.cpython-313.pyc | Other | 1,182 | 0.7 | 0.083333 | 0 | python-kit | 178 | 2024-04-02T05:52:46.176979 | MIT | true | 144decb70689f97b02a03ac9693a79e7 |
\n\n | .venv\Lib\site-packages\pandas\tests\series\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 194 | 0.7 | 0 | 0 | vue-tools | 983 | 2024-04-10T12:25:33.159126 | BSD-3-Clause | true | c0d549a157a38f1f37bb1289d9074a9c |
import pytest\n\nfrom pandas import Series\nfrom pandas.core.strings.accessor import StringMethods\n\n_any_string_method = [\n ("cat", (), {"sep": ","}),\n ("cat", (Series(list("zyx")),), {"sep": ",", "join": "left"}),\n ("center", (10,), {}),\n ("contains", ("a",), {}),\n ("count", ("a",), {}),\n ("decode", ("UTF-8",), {}),\n ("encode", ("UTF-8",), {}),\n ("endswith", ("a",), {}),\n ("endswith", ((),), {}),\n ("endswith", (("a",),), {}),\n ("endswith", (("a", "b"),), {}),\n ("endswith", (("a", "MISSING"),), {}),\n ("endswith", ("a",), {"na": True}),\n ("endswith", ("a",), {"na": False}),\n ("extract", ("([a-z]*)",), {"expand": False}),\n ("extract", ("([a-z]*)",), {"expand": True}),\n ("extractall", ("([a-z]*)",), {}),\n ("find", ("a",), {}),\n ("findall", ("a",), {}),\n ("get", (0,), {}),\n # because "index" (and "rindex") fail intentionally\n # if the string is not found, search only for empty string\n ("index", ("",), {}),\n ("join", (",",), {}),\n ("ljust", (10,), {}),\n ("match", ("a",), {}),\n ("fullmatch", ("a",), {}),\n ("normalize", ("NFC",), {}),\n ("pad", (10,), {}),\n ("partition", (" ",), {"expand": False}),\n ("partition", (" ",), {"expand": True}),\n ("repeat", (3,), {}),\n ("replace", ("a", "z"), {}),\n ("rfind", ("a",), {}),\n ("rindex", ("",), {}),\n ("rjust", (10,), {}),\n ("rpartition", (" ",), {"expand": False}),\n ("rpartition", (" ",), {"expand": True}),\n ("slice", (0, 1), {}),\n ("slice_replace", (0, 1, "z"), {}),\n ("split", (" ",), {"expand": False}),\n ("split", (" ",), {"expand": True}),\n ("startswith", ("a",), {}),\n ("startswith", (("a",),), {}),\n ("startswith", (("a", "b"),), {}),\n ("startswith", (("a", "MISSING"),), {}),\n ("startswith", ((),), {}),\n ("startswith", ("a",), {"na": True}),\n ("startswith", ("a",), {"na": False}),\n ("removeprefix", ("a",), {}),\n ("removesuffix", ("a",), {}),\n # translating unicode points of "a" to "d"\n ("translate", ({97: 100},), {}),\n ("wrap", (2,), {}),\n ("zfill", (10,), {}),\n] + list(\n zip(\n [\n # methods without positional arguments: zip with empty tuple and empty dict\n "capitalize",\n "cat",\n "get_dummies",\n "isalnum",\n "isalpha",\n "isdecimal",\n "isdigit",\n "islower",\n "isnumeric",\n "isspace",\n "istitle",\n "isupper",\n "len",\n "lower",\n "lstrip",\n "partition",\n "rpartition",\n "rsplit",\n "rstrip",\n "slice",\n "slice_replace",\n "split",\n "strip",\n "swapcase",\n "title",\n "upper",\n "casefold",\n ],\n [()] * 100,\n [{}] * 100,\n )\n)\nids, _, _ = zip(*_any_string_method) # use method name as fixture-id\nmissing_methods = {f for f in dir(StringMethods) if not f.startswith("_")} - set(ids)\n\n# test that the above list captures all methods of StringMethods\nassert not missing_methods\n\n\n@pytest.fixture(params=_any_string_method, ids=ids)\ndef any_string_method(request):\n """\n Fixture for all public methods of `StringMethods`\n\n This fixture returns a tuple of the method name and sample arguments\n necessary to call the method.\n\n Returns\n -------\n method_name : str\n The name of the method in `StringMethods`\n args : tuple\n Sample values for the positional arguments\n kwargs : dict\n Sample values for the keyword arguments\n\n Examples\n --------\n >>> def test_something(any_string_method):\n ... s = Series(['a', 'b', np.nan, 'd'])\n ...\n ... method_name, args, kwargs = any_string_method\n ... method = getattr(s.str, method_name)\n ... # will not raise\n ... method(*args, **kwargs)\n """\n return request.param\n | .venv\Lib\site-packages\pandas\tests\strings\conftest.py | conftest.py | Python | 3,960 | 0.95 | 0.068182 | 0.040323 | python-kit | 115 | 2024-01-05T06:16:46.873464 | Apache-2.0 | true | f9d540d7242491d78ce1ae16a13624b1 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n CategoricalDtype,\n DataFrame,\n Index,\n MultiIndex,\n Series,\n _testing as tm,\n option_context,\n)\nfrom pandas.core.strings.accessor import StringMethods\n\n# subset of the full set from pandas/conftest.py\n_any_allowed_skipna_inferred_dtype = [\n ("string", ["a", np.nan, "c"]),\n ("bytes", [b"a", np.nan, b"c"]),\n ("empty", [np.nan, np.nan, np.nan]),\n ("empty", []),\n ("mixed-integer", ["a", np.nan, 2]),\n]\nids, _ = zip(*_any_allowed_skipna_inferred_dtype) # use inferred type as id\n\n\n@pytest.fixture(params=_any_allowed_skipna_inferred_dtype, ids=ids)\ndef any_allowed_skipna_inferred_dtype(request):\n """\n Fixture for all (inferred) dtypes allowed in StringMethods.__init__\n\n The covered (inferred) types are:\n * 'string'\n * 'empty'\n * 'bytes'\n * 'mixed'\n * 'mixed-integer'\n\n Returns\n -------\n inferred_dtype : str\n The string for the inferred dtype from _libs.lib.infer_dtype\n values : np.ndarray\n An array of object dtype that will be inferred to have\n `inferred_dtype`\n\n Examples\n --------\n >>> from pandas._libs import lib\n >>>\n >>> def test_something(any_allowed_skipna_inferred_dtype):\n ... inferred_dtype, values = any_allowed_skipna_inferred_dtype\n ... # will pass\n ... assert lib.infer_dtype(values, skipna=True) == inferred_dtype\n ...\n ... # constructor for .str-accessor will also pass\n ... Series(values).str\n """\n inferred_dtype, values = request.param\n values = np.array(values, dtype=object) # object dtype to avoid casting\n\n # correctness of inference tested in tests/dtypes/test_inference.py\n return inferred_dtype, values\n\n\ndef test_api(any_string_dtype):\n # GH 6106, GH 9322\n assert Series.str is StringMethods\n assert isinstance(Series([""], dtype=any_string_dtype).str, StringMethods)\n\n\ndef test_api_mi_raises():\n # GH 23679\n mi = MultiIndex.from_arrays([["a", "b", "c"]])\n msg = "Can only use .str accessor with Index, not MultiIndex"\n with pytest.raises(AttributeError, match=msg):\n mi.str\n assert not hasattr(mi, "str")\n\n\n@pytest.mark.parametrize("dtype", [object, "category"])\ndef test_api_per_dtype(index_or_series, dtype, any_skipna_inferred_dtype):\n # one instance of parametrized fixture\n box = index_or_series\n inferred_dtype, values = any_skipna_inferred_dtype\n\n t = box(values, dtype=dtype) # explicit dtype to avoid casting\n\n types_passing_constructor = [\n "string",\n "unicode",\n "empty",\n "bytes",\n "mixed",\n "mixed-integer",\n ]\n if inferred_dtype in types_passing_constructor:\n # GH 6106\n assert isinstance(t.str, StringMethods)\n else:\n # GH 9184, GH 23011, GH 23163\n msg = "Can only use .str accessor with string values.*"\n with pytest.raises(AttributeError, match=msg):\n t.str\n assert not hasattr(t, "str")\n\n\n@pytest.mark.parametrize("dtype", [object, "category"])\ndef test_api_per_method(\n index_or_series,\n dtype,\n any_allowed_skipna_inferred_dtype,\n any_string_method,\n request,\n using_infer_string,\n):\n # this test does not check correctness of the different methods,\n # just that the methods work on the specified (inferred) dtypes,\n # and raise on all others\n box = index_or_series\n\n # one instance of each parametrized fixture\n inferred_dtype, values = any_allowed_skipna_inferred_dtype\n method_name, args, kwargs = any_string_method\n\n reason = None\n if box is Index and values.size == 0:\n if method_name in ["partition", "rpartition"] and kwargs.get("expand", True):\n raises = TypeError\n reason = "Method cannot deal with empty Index"\n elif method_name == "split" and kwargs.get("expand", None):\n raises = TypeError\n reason = "Split fails on empty Series when expand=True"\n elif method_name == "get_dummies":\n raises = ValueError\n reason = "Need to fortify get_dummies corner cases"\n\n elif (\n box is Index\n and inferred_dtype == "empty"\n and dtype == object\n and method_name == "get_dummies"\n ):\n raises = ValueError\n reason = "Need to fortify get_dummies corner cases"\n\n if reason is not None:\n mark = pytest.mark.xfail(raises=raises, reason=reason)\n request.applymarker(mark)\n\n t = box(values, dtype=dtype) # explicit dtype to avoid casting\n method = getattr(t.str, method_name)\n\n if using_infer_string and dtype == "category":\n string_allowed = method_name not in ["decode"]\n else:\n string_allowed = True\n bytes_allowed = method_name in ["decode", "get", "len", "slice"]\n # as of v0.23.4, all methods except 'cat' are very lenient with the\n # allowed data types, just returning NaN for entries that error.\n # This could be changed with an 'errors'-kwarg to the `str`-accessor,\n # see discussion in GH 13877\n mixed_allowed = method_name not in ["cat"]\n\n allowed_types = (\n ["empty"]\n + ["string", "unicode"] * string_allowed\n + ["bytes"] * bytes_allowed\n + ["mixed", "mixed-integer"] * mixed_allowed\n )\n\n if inferred_dtype in allowed_types:\n # xref GH 23555, GH 23556\n with option_context("future.no_silent_downcasting", True):\n method(*args, **kwargs) # works!\n else:\n # GH 23011, GH 23163\n msg = (\n f"Cannot use .str.{method_name} with values of "\n f"inferred dtype {repr(inferred_dtype)}."\n "|a bytes-like object is required, not 'str'"\n )\n with pytest.raises(TypeError, match=msg):\n method(*args, **kwargs)\n\n\ndef test_api_for_categorical(any_string_method, any_string_dtype):\n # https://github.com/pandas-dev/pandas/issues/10661\n s = Series(list("aabb"), dtype=any_string_dtype)\n s = s + " " + s\n c = s.astype("category")\n c = c.astype(CategoricalDtype(c.dtype.categories.astype("object")))\n assert isinstance(c.str, StringMethods)\n\n method_name, args, kwargs = any_string_method\n\n result = getattr(c.str, method_name)(*args, **kwargs)\n expected = getattr(s.astype("object").str, method_name)(*args, **kwargs)\n\n if isinstance(result, DataFrame):\n tm.assert_frame_equal(result, expected)\n elif isinstance(result, Series):\n tm.assert_series_equal(result, expected)\n else:\n # str.cat(others=None) returns string, for example\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\strings\test_api.py | test_api.py | Python | 6,609 | 0.95 | 0.092683 | 0.137931 | vue-tools | 606 | 2023-11-08T19:24:16.156768 | Apache-2.0 | true | 6f8d7a6a79eb46cb614a4bcbc139e22d |
from datetime import datetime\nimport operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n Series,\n _testing as tm,\n)\n\n\ndef test_title(any_string_dtype):\n s = Series(["FOO", "BAR", np.nan, "Blah", "blurg"], dtype=any_string_dtype)\n result = s.str.title()\n expected = Series(["Foo", "Bar", np.nan, "Blah", "Blurg"], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_title_mixed_object():\n s = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])\n result = s.str.title()\n expected = Series(\n ["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", None, np.nan, np.nan],\n dtype=object,\n )\n tm.assert_almost_equal(result, expected)\n\n\ndef test_lower_upper(any_string_dtype):\n s = Series(["om", np.nan, "nom", "nom"], dtype=any_string_dtype)\n\n result = s.str.upper()\n expected = Series(["OM", np.nan, "NOM", "NOM"], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n result = result.str.lower()\n tm.assert_series_equal(result, s)\n\n\ndef test_lower_upper_mixed_object():\n s = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])\n\n result = s.str.upper()\n expected = Series(\n ["A", np.nan, "B", np.nan, np.nan, "FOO", None, np.nan, np.nan], dtype=object\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.lower()\n expected = Series(\n ["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan], dtype=object\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data, expected",\n [\n (\n ["FOO", "BAR", np.nan, "Blah", "blurg"],\n ["Foo", "Bar", np.nan, "Blah", "Blurg"],\n ),\n (["a", "b", "c"], ["A", "B", "C"]),\n (["a b", "a bc. de"], ["A b", "A bc. de"]),\n ],\n)\ndef test_capitalize(data, expected, any_string_dtype):\n s = Series(data, dtype=any_string_dtype)\n result = s.str.capitalize()\n expected = Series(expected, dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_capitalize_mixed_object():\n s = Series(["FOO", np.nan, "bar", True, datetime.today(), "blah", None, 1, 2.0])\n result = s.str.capitalize()\n expected = Series(\n ["Foo", np.nan, "Bar", np.nan, np.nan, "Blah", None, np.nan, np.nan],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_swapcase(any_string_dtype):\n s = Series(["FOO", "BAR", np.nan, "Blah", "blurg"], dtype=any_string_dtype)\n result = s.str.swapcase()\n expected = Series(["foo", "bar", np.nan, "bLAH", "BLURG"], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_swapcase_mixed_object():\n s = Series(["FOO", np.nan, "bar", True, datetime.today(), "Blah", None, 1, 2.0])\n result = s.str.swapcase()\n expected = Series(\n ["foo", np.nan, "BAR", np.nan, np.nan, "bLAH", None, np.nan, np.nan],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_casefold():\n # GH25405\n expected = Series(["ss", np.nan, "case", "ssd"])\n s = Series(["ß", np.nan, "case", "ßd"])\n result = s.str.casefold()\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_casemethods(any_string_dtype):\n values = ["aaa", "bbb", "CCC", "Dddd", "eEEE"]\n s = Series(values, dtype=any_string_dtype)\n assert s.str.lower().tolist() == [v.lower() for v in values]\n assert s.str.upper().tolist() == [v.upper() for v in values]\n assert s.str.title().tolist() == [v.title() for v in values]\n assert s.str.capitalize().tolist() == [v.capitalize() for v in values]\n assert s.str.swapcase().tolist() == [v.swapcase() for v in values]\n\n\ndef test_pad(any_string_dtype):\n s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype)\n\n result = s.str.pad(5, side="left")\n expected = Series(\n [" a", " b", np.nan, " c", np.nan, "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.pad(5, side="right")\n expected = Series(\n ["a ", "b ", np.nan, "c ", np.nan, "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.pad(5, side="both")\n expected = Series(\n [" a ", " b ", np.nan, " c ", np.nan, "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_pad_mixed_object():\n s = Series(["a", np.nan, "b", True, datetime.today(), "ee", None, 1, 2.0])\n\n result = s.str.pad(5, side="left")\n expected = Series(\n [" a", np.nan, " b", np.nan, np.nan, " ee", None, np.nan, np.nan],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.pad(5, side="right")\n expected = Series(\n ["a ", np.nan, "b ", np.nan, np.nan, "ee ", None, np.nan, np.nan],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.pad(5, side="both")\n expected = Series(\n [" a ", np.nan, " b ", np.nan, np.nan, " ee ", None, np.nan, np.nan],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_pad_fillchar(any_string_dtype):\n s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype)\n\n result = s.str.pad(5, side="left", fillchar="X")\n expected = Series(\n ["XXXXa", "XXXXb", np.nan, "XXXXc", np.nan, "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.pad(5, side="right", fillchar="X")\n expected = Series(\n ["aXXXX", "bXXXX", np.nan, "cXXXX", np.nan, "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.pad(5, side="both", fillchar="X")\n expected = Series(\n ["XXaXX", "XXbXX", np.nan, "XXcXX", np.nan, "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_pad_fillchar_bad_arg_raises(any_string_dtype):\n s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype)\n\n msg = "fillchar must be a character, not str"\n with pytest.raises(TypeError, match=msg):\n s.str.pad(5, fillchar="XY")\n\n msg = "fillchar must be a character, not int"\n with pytest.raises(TypeError, match=msg):\n s.str.pad(5, fillchar=5)\n\n\n@pytest.mark.parametrize("method_name", ["center", "ljust", "rjust", "zfill", "pad"])\ndef test_pad_width_bad_arg_raises(method_name, any_string_dtype):\n # see gh-13598\n s = Series(["1", "22", "a", "bb"], dtype=any_string_dtype)\n op = operator.methodcaller(method_name, "f")\n\n msg = "width must be of integer type, not str"\n with pytest.raises(TypeError, match=msg):\n op(s.str)\n\n\ndef test_center_ljust_rjust(any_string_dtype):\n s = Series(["a", "b", np.nan, "c", np.nan, "eeeeee"], dtype=any_string_dtype)\n\n result = s.str.center(5)\n expected = Series(\n [" a ", " b ", np.nan, " c ", np.nan, "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.ljust(5)\n expected = Series(\n ["a ", "b ", np.nan, "c ", np.nan, "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.rjust(5)\n expected = Series(\n [" a", " b", np.nan, " c", np.nan, "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_center_ljust_rjust_mixed_object():\n s = Series(["a", np.nan, "b", True, datetime.today(), "c", "eee", None, 1, 2.0])\n\n result = s.str.center(5)\n expected = Series(\n [\n " a ",\n np.nan,\n " b ",\n np.nan,\n np.nan,\n " c ",\n " eee ",\n None,\n np.nan,\n np.nan,\n ],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.ljust(5)\n expected = Series(\n [\n "a ",\n np.nan,\n "b ",\n np.nan,\n np.nan,\n "c ",\n "eee ",\n None,\n np.nan,\n np.nan,\n ],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.rjust(5)\n expected = Series(\n [\n " a",\n np.nan,\n " b",\n np.nan,\n np.nan,\n " c",\n " eee",\n None,\n np.nan,\n np.nan,\n ],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_center_ljust_rjust_fillchar(any_string_dtype):\n # GH#54533, GH#54792\n s = Series(["a", "bb", "cccc", "ddddd", "eeeeee"], dtype=any_string_dtype)\n\n result = s.str.center(5, fillchar="X")\n expected = Series(\n ["XXaXX", "XXbbX", "Xcccc", "ddddd", "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n expected = np.array([v.center(5, "X") for v in np.array(s)], dtype=np.object_)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected)\n\n result = s.str.ljust(5, fillchar="X")\n expected = Series(\n ["aXXXX", "bbXXX", "ccccX", "ddddd", "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n expected = np.array([v.ljust(5, "X") for v in np.array(s)], dtype=np.object_)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected)\n\n result = s.str.rjust(5, fillchar="X")\n expected = Series(\n ["XXXXa", "XXXbb", "Xcccc", "ddddd", "eeeeee"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n expected = np.array([v.rjust(5, "X") for v in np.array(s)], dtype=np.object_)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected)\n\n\ndef test_center_ljust_rjust_fillchar_bad_arg_raises(any_string_dtype):\n s = Series(["a", "bb", "cccc", "ddddd", "eeeeee"], dtype=any_string_dtype)\n\n # If fillchar is not a character, normal str raises TypeError\n # 'aaa'.ljust(5, 'XY')\n # TypeError: must be char, not str\n template = "fillchar must be a character, not {dtype}"\n\n with pytest.raises(TypeError, match=template.format(dtype="str")):\n s.str.center(5, fillchar="XY")\n\n with pytest.raises(TypeError, match=template.format(dtype="str")):\n s.str.ljust(5, fillchar="XY")\n\n with pytest.raises(TypeError, match=template.format(dtype="str")):\n s.str.rjust(5, fillchar="XY")\n\n with pytest.raises(TypeError, match=template.format(dtype="int")):\n s.str.center(5, fillchar=1)\n\n with pytest.raises(TypeError, match=template.format(dtype="int")):\n s.str.ljust(5, fillchar=1)\n\n with pytest.raises(TypeError, match=template.format(dtype="int")):\n s.str.rjust(5, fillchar=1)\n\n\ndef test_zfill(any_string_dtype):\n s = Series(["1", "22", "aaa", "333", "45678"], dtype=any_string_dtype)\n\n result = s.str.zfill(5)\n expected = Series(\n ["00001", "00022", "00aaa", "00333", "45678"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n expected = np.array([v.zfill(5) for v in np.array(s)], dtype=np.object_)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected)\n\n result = s.str.zfill(3)\n expected = Series(["001", "022", "aaa", "333", "45678"], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n expected = np.array([v.zfill(3) for v in np.array(s)], dtype=np.object_)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.object_), expected)\n\n s = Series(["1", np.nan, "aaa", np.nan, "45678"], dtype=any_string_dtype)\n result = s.str.zfill(5)\n expected = Series(\n ["00001", np.nan, "00aaa", np.nan, "45678"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_wrap(any_string_dtype):\n # test values are: two words less than width, two words equal to width,\n # two words greater than width, one word less than width, one word\n # equal to width, one word greater than width, multiple tokens with\n # trailing whitespace equal to width\n s = Series(\n [\n "hello world",\n "hello world!",\n "hello world!!",\n "abcdefabcde",\n "abcdefabcdef",\n "abcdefabcdefa",\n "ab ab ab ab ",\n "ab ab ab ab a",\n "\t",\n ],\n dtype=any_string_dtype,\n )\n\n # expected values\n expected = Series(\n [\n "hello world",\n "hello world!",\n "hello\nworld!!",\n "abcdefabcde",\n "abcdefabcdef",\n "abcdefabcdef\na",\n "ab ab ab ab",\n "ab ab ab ab\na",\n "",\n ],\n dtype=any_string_dtype,\n )\n\n result = s.str.wrap(12, break_long_words=True)\n tm.assert_series_equal(result, expected)\n\n\ndef test_wrap_unicode(any_string_dtype):\n # test with pre and post whitespace (non-unicode), NaN, and non-ascii Unicode\n s = Series(\n [" pre ", np.nan, "\xac\u20ac\U00008000 abadcafe"], dtype=any_string_dtype\n )\n expected = Series(\n [" pre", np.nan, "\xac\u20ac\U00008000 ab\nadcafe"], dtype=any_string_dtype\n )\n result = s.str.wrap(6)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\strings\test_case_justify.py | test_case_justify.py | Python | 13,361 | 0.95 | 0.07565 | 0.035398 | vue-tools | 371 | 2023-09-15T18:49:50.874443 | BSD-3-Clause | true | da73e6f4b67d5fabf5d4799dbfc1556c |
import re\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n _testing as tm,\n concat,\n option_context,\n)\n\n\n@pytest.mark.parametrize("other", [None, Series, Index])\ndef test_str_cat_name(index_or_series, other):\n # GH 21053\n box = index_or_series\n values = ["a", "b"]\n if other:\n other = other(values)\n else:\n other = values\n result = box(values, name="name").str.cat(other, sep=",")\n assert result.name == "name"\n\n\n@pytest.mark.parametrize(\n "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]\n)\ndef test_str_cat(index_or_series, infer_string):\n with option_context("future.infer_string", infer_string):\n box = index_or_series\n # test_cat above tests "str_cat" from ndarray;\n # here testing "str.cat" from Series/Index to ndarray/list\n s = box(["a", "a", "b", "b", "c", np.nan])\n\n # single array\n result = s.str.cat()\n expected = "aabbc"\n assert result == expected\n\n result = s.str.cat(na_rep="-")\n expected = "aabbc-"\n assert result == expected\n\n result = s.str.cat(sep="_", na_rep="NA")\n expected = "a_a_b_b_c_NA"\n assert result == expected\n\n t = np.array(["a", np.nan, "b", "d", "foo", np.nan], dtype=object)\n expected = box(["aa", "a-", "bb", "bd", "cfoo", "--"])\n\n # Series/Index with array\n result = s.str.cat(t, na_rep="-")\n tm.assert_equal(result, expected)\n\n # Series/Index with list\n result = s.str.cat(list(t), na_rep="-")\n tm.assert_equal(result, expected)\n\n # errors for incorrect lengths\n rgx = r"If `others` contains arrays or lists \(or other list-likes.*"\n z = Series(["1", "2", "3"])\n\n with pytest.raises(ValueError, match=rgx):\n s.str.cat(z.values)\n\n with pytest.raises(ValueError, match=rgx):\n s.str.cat(list(z))\n\n\ndef test_str_cat_raises_intuitive_error(index_or_series):\n # GH 11334\n box = index_or_series\n s = box(["a", "b", "c", "d"])\n message = "Did you mean to supply a `sep` keyword?"\n with pytest.raises(ValueError, match=message):\n s.str.cat("|")\n with pytest.raises(ValueError, match=message):\n s.str.cat(" ")\n\n\n@pytest.mark.parametrize(\n "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]\n)\n@pytest.mark.parametrize("sep", ["", None])\n@pytest.mark.parametrize("dtype_target", ["object", "category"])\n@pytest.mark.parametrize("dtype_caller", ["object", "category"])\ndef test_str_cat_categorical(\n index_or_series, dtype_caller, dtype_target, sep, infer_string\n):\n box = index_or_series\n\n with option_context("future.infer_string", infer_string):\n s = Index(["a", "a", "b", "a"], dtype=dtype_caller)\n s = s if box == Index else Series(s, index=s, dtype=s.dtype)\n t = Index(["b", "a", "b", "c"], dtype=dtype_target)\n\n expected = Index(\n ["ab", "aa", "bb", "ac"], dtype=object if dtype_caller == "object" else None\n )\n expected = (\n expected\n if box == Index\n else Series(\n expected, index=Index(s, dtype=dtype_caller), dtype=expected.dtype\n )\n )\n\n # Series/Index with unaligned Index -> t.values\n result = s.str.cat(t.values, sep=sep)\n tm.assert_equal(result, expected)\n\n # Series/Index with Series having matching Index\n t = Series(t.values, index=Index(s, dtype=dtype_caller))\n result = s.str.cat(t, sep=sep)\n tm.assert_equal(result, expected)\n\n # Series/Index with Series.values\n result = s.str.cat(t.values, sep=sep)\n tm.assert_equal(result, expected)\n\n # Series/Index with Series having different Index\n t = Series(t.values, index=t.values)\n expected = Index(\n ["aa", "aa", "bb", "bb", "aa"],\n dtype=object if dtype_caller == "object" else None,\n )\n dtype = object if dtype_caller == "object" else s.dtype.categories.dtype\n expected = (\n expected\n if box == Index\n else Series(\n expected,\n index=Index(expected.str[:1], dtype=dtype),\n dtype=expected.dtype,\n )\n )\n\n result = s.str.cat(t, sep=sep)\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data",\n [[1, 2, 3], [0.1, 0.2, 0.3], [1, 2, "b"]],\n ids=["integers", "floats", "mixed"],\n)\n# without dtype=object, np.array would cast [1, 2, 'b'] to ['1', '2', 'b']\n@pytest.mark.parametrize(\n "box",\n [Series, Index, list, lambda x: np.array(x, dtype=object)],\n ids=["Series", "Index", "list", "np.array"],\n)\ndef test_str_cat_wrong_dtype_raises(box, data):\n # GH 22722\n s = Series(["a", "b", "c"])\n t = box(data)\n\n msg = "Concatenation requires list-likes containing only strings.*"\n with pytest.raises(TypeError, match=msg):\n # need to use outer and na_rep, as otherwise Index would not raise\n s.str.cat(t, join="outer", na_rep="-")\n\n\ndef test_str_cat_mixed_inputs(index_or_series):\n box = index_or_series\n s = Index(["a", "b", "c", "d"])\n s = s if box == Index else Series(s, index=s)\n\n t = Series(["A", "B", "C", "D"], index=s.values)\n d = concat([t, Series(s, index=s)], axis=1)\n\n expected = Index(["aAa", "bBb", "cCc", "dDd"])\n expected = expected if box == Index else Series(expected.values, index=s.values)\n\n # Series/Index with DataFrame\n result = s.str.cat(d)\n tm.assert_equal(result, expected)\n\n # Series/Index with two-dimensional ndarray\n result = s.str.cat(d.values)\n tm.assert_equal(result, expected)\n\n # Series/Index with list of Series\n result = s.str.cat([t, s])\n tm.assert_equal(result, expected)\n\n # Series/Index with mixed list of Series/array\n result = s.str.cat([t, s.values])\n tm.assert_equal(result, expected)\n\n # Series/Index with list of Series; different indexes\n t.index = ["b", "c", "d", "a"]\n expected = box(["aDa", "bAb", "cBc", "dCd"])\n expected = expected if box == Index else Series(expected.values, index=s.values)\n result = s.str.cat([t, s])\n tm.assert_equal(result, expected)\n\n # Series/Index with mixed list; different index\n result = s.str.cat([t, s.values])\n tm.assert_equal(result, expected)\n\n # Series/Index with DataFrame; different indexes\n d.index = ["b", "c", "d", "a"]\n expected = box(["aDd", "bAa", "cBb", "dCc"])\n expected = expected if box == Index else Series(expected.values, index=s.values)\n result = s.str.cat(d)\n tm.assert_equal(result, expected)\n\n # errors for incorrect lengths\n rgx = r"If `others` contains arrays or lists \(or other list-likes.*"\n z = Series(["1", "2", "3"])\n e = concat([z, z], axis=1)\n\n # two-dimensional ndarray\n with pytest.raises(ValueError, match=rgx):\n s.str.cat(e.values)\n\n # list of list-likes\n with pytest.raises(ValueError, match=rgx):\n s.str.cat([z.values, s.values])\n\n # mixed list of Series/list-like\n with pytest.raises(ValueError, match=rgx):\n s.str.cat([z.values, s])\n\n # errors for incorrect arguments in list-like\n rgx = "others must be Series, Index, DataFrame,.*"\n # make sure None/NaN do not crash checks in _get_series_list\n u = Series(["a", np.nan, "c", None])\n\n # mix of string and Series\n with pytest.raises(TypeError, match=rgx):\n s.str.cat([u, "u"])\n\n # DataFrame in list\n with pytest.raises(TypeError, match=rgx):\n s.str.cat([u, d])\n\n # 2-dim ndarray in list\n with pytest.raises(TypeError, match=rgx):\n s.str.cat([u, d.values])\n\n # nested lists\n with pytest.raises(TypeError, match=rgx):\n s.str.cat([u, [u, d]])\n\n # forbidden input type: set\n # GH 23009\n with pytest.raises(TypeError, match=rgx):\n s.str.cat(set(u))\n\n # forbidden input type: set in list\n # GH 23009\n with pytest.raises(TypeError, match=rgx):\n s.str.cat([u, set(u)])\n\n # other forbidden input type, e.g. int\n with pytest.raises(TypeError, match=rgx):\n s.str.cat(1)\n\n # nested list-likes\n with pytest.raises(TypeError, match=rgx):\n s.str.cat(iter([t.values, list(s)]))\n\n\n@pytest.mark.parametrize("join", ["left", "outer", "inner", "right"])\ndef test_str_cat_align_indexed(index_or_series, join):\n # https://github.com/pandas-dev/pandas/issues/18657\n box = index_or_series\n\n s = Series(["a", "b", "c", "d"], index=["a", "b", "c", "d"])\n t = Series(["D", "A", "E", "B"], index=["d", "a", "e", "b"])\n sa, ta = s.align(t, join=join)\n # result after manual alignment of inputs\n expected = sa.str.cat(ta, na_rep="-")\n\n if box == Index:\n s = Index(s)\n sa = Index(sa)\n expected = Index(expected)\n\n result = s.str.cat(t, join=join, na_rep="-")\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize("join", ["left", "outer", "inner", "right"])\ndef test_str_cat_align_mixed_inputs(join):\n s = Series(["a", "b", "c", "d"])\n t = Series(["d", "a", "e", "b"], index=[3, 0, 4, 1])\n d = concat([t, t], axis=1)\n\n expected_outer = Series(["aaa", "bbb", "c--", "ddd", "-ee"])\n expected = expected_outer.loc[s.index.join(t.index, how=join)]\n\n # list of Series\n result = s.str.cat([t, t], join=join, na_rep="-")\n tm.assert_series_equal(result, expected)\n\n # DataFrame\n result = s.str.cat(d, join=join, na_rep="-")\n tm.assert_series_equal(result, expected)\n\n # mixed list of indexed/unindexed\n u = np.array(["A", "B", "C", "D"])\n expected_outer = Series(["aaA", "bbB", "c-C", "ddD", "-e-"])\n # joint index of rhs [t, u]; u will be forced have index of s\n rhs_idx = (\n t.index.intersection(s.index)\n if join == "inner"\n else t.index.union(s.index)\n if join == "outer"\n else t.index.append(s.index.difference(t.index))\n )\n\n expected = expected_outer.loc[s.index.join(rhs_idx, how=join)]\n result = s.str.cat([t, u], join=join, na_rep="-")\n tm.assert_series_equal(result, expected)\n\n with pytest.raises(TypeError, match="others must be Series,.*"):\n # nested lists are forbidden\n s.str.cat([t, list(u)], join=join)\n\n # errors for incorrect lengths\n rgx = r"If `others` contains arrays or lists \(or other list-likes.*"\n z = Series(["1", "2", "3"]).values\n\n # unindexed object of wrong length\n with pytest.raises(ValueError, match=rgx):\n s.str.cat(z, join=join)\n\n # unindexed object of wrong length in list\n with pytest.raises(ValueError, match=rgx):\n s.str.cat([t, z], join=join)\n\n\ndef test_str_cat_all_na(index_or_series, index_or_series2):\n # GH 24044\n box = index_or_series\n other = index_or_series2\n\n # check that all NaNs in caller / target work\n s = Index(["a", "b", "c", "d"])\n s = s if box == Index else Series(s, index=s)\n t = other([np.nan] * 4, dtype=object)\n # add index of s for alignment\n t = t if other == Index else Series(t, index=s)\n\n # all-NA target\n if box == Series:\n expected = Series([np.nan] * 4, index=s.index, dtype=s.dtype)\n else: # box == Index\n # TODO: Strimg option, this should return string dtype\n expected = Index([np.nan] * 4, dtype=object)\n result = s.str.cat(t, join="left")\n tm.assert_equal(result, expected)\n\n # all-NA caller (only for Series)\n if other == Series:\n expected = Series([np.nan] * 4, dtype=object, index=t.index)\n result = t.str.cat(s, join="left")\n tm.assert_series_equal(result, expected)\n\n\ndef test_str_cat_special_cases():\n s = Series(["a", "b", "c", "d"])\n t = Series(["d", "a", "e", "b"], index=[3, 0, 4, 1])\n\n # iterator of elements with different types\n expected = Series(["aaa", "bbb", "c-c", "ddd", "-e-"])\n result = s.str.cat(iter([t, s.values]), join="outer", na_rep="-")\n tm.assert_series_equal(result, expected)\n\n # right-align with different indexes in others\n expected = Series(["aa-", "d-d"], index=[0, 3])\n result = s.str.cat([t.loc[[0]], t.loc[[3]]], join="right", na_rep="-")\n tm.assert_series_equal(result, expected)\n\n\ndef test_cat_on_filtered_index():\n df = DataFrame(\n index=MultiIndex.from_product(\n [[2011, 2012], [1, 2, 3]], names=["year", "month"]\n )\n )\n\n df = df.reset_index()\n df = df[df.month > 1]\n\n str_year = df.year.astype("str")\n str_month = df.month.astype("str")\n str_both = str_year.str.cat(str_month, sep=" ")\n\n assert str_both.loc[1] == "2011 2"\n\n str_multiple = str_year.str.cat([str_month, str_month], sep=" ")\n\n assert str_multiple.loc[1] == "2011 2 2"\n\n\n@pytest.mark.parametrize("klass", [tuple, list, np.array, Series, Index])\ndef test_cat_different_classes(klass):\n # https://github.com/pandas-dev/pandas/issues/33425\n s = Series(["a", "b", "c"])\n result = s.str.cat(klass(["x", "y", "z"]))\n expected = Series(["ax", "by", "cz"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_cat_on_series_dot_str():\n # GH 28277\n ps = Series(["AbC", "de", "FGHI", "j", "kLLLm"])\n\n message = re.escape(\n "others must be Series, Index, DataFrame, np.ndarray "\n "or list-like (either containing only strings or "\n "containing only objects of type Series/Index/"\n "np.ndarray[1-dim])"\n )\n with pytest.raises(TypeError, match=message):\n ps.str.cat(others=ps.str)\n | .venv\Lib\site-packages\pandas\tests\strings\test_cat.py | test_cat.py | Python | 13,575 | 0.95 | 0.086651 | 0.172619 | react-lib | 347 | 2025-03-30T06:50:35.326201 | GPL-3.0 | true | d716daeeafba0eeed561c4e771c98b12 |
from datetime import datetime\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.dtypes import ArrowDtype\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n _testing as tm,\n)\n\n\ndef test_extract_expand_kwarg_wrong_type_raises(any_string_dtype):\n # TODO: should this raise TypeError\n values = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)\n with pytest.raises(ValueError, match="expand must be True or False"):\n values.str.extract(".*(BAD[_]+).*(BAD)", expand=None)\n\n\ndef test_extract_expand_kwarg(any_string_dtype):\n s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)\n expected = DataFrame(["BAD__", np.nan, np.nan], dtype=any_string_dtype)\n\n result = s.str.extract(".*(BAD[_]+).*")\n tm.assert_frame_equal(result, expected)\n\n result = s.str.extract(".*(BAD[_]+).*", expand=True)\n tm.assert_frame_equal(result, expected)\n\n expected = DataFrame(\n [["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype\n )\n result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=False)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extract_expand_False_mixed_object():\n ser = Series(\n ["aBAD_BAD", np.nan, "BAD_b_BAD", True, datetime.today(), "foo", None, 1, 2.0]\n )\n\n # two groups\n result = ser.str.extract(".*(BAD[_]+).*(BAD)", expand=False)\n er = [np.nan, np.nan] # empty row\n expected = DataFrame(\n [["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er], dtype=object\n )\n tm.assert_frame_equal(result, expected)\n\n # single group\n result = ser.str.extract(".*(BAD[_]+).*BAD", expand=False)\n expected = Series(\n ["BAD_", np.nan, "BAD_", np.nan, np.nan, np.nan, None, np.nan, np.nan],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_extract_expand_index_raises():\n # GH9980\n # Index only works with one regex group since\n # multi-group would expand to a frame\n idx = Index(["A1", "A2", "A3", "A4", "B5"])\n msg = "only one regex group is supported with Index"\n with pytest.raises(ValueError, match=msg):\n idx.str.extract("([AB])([123])", expand=False)\n\n\ndef test_extract_expand_no_capture_groups_raises(index_or_series, any_string_dtype):\n s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype)\n msg = "pattern contains no capture groups"\n\n # no groups\n with pytest.raises(ValueError, match=msg):\n s_or_idx.str.extract("[ABC][123]", expand=False)\n\n # only non-capturing groups\n with pytest.raises(ValueError, match=msg):\n s_or_idx.str.extract("(?:[AB]).*", expand=False)\n\n\ndef test_extract_expand_single_capture_group(index_or_series, any_string_dtype):\n # single group renames series/index properly\n s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype)\n result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=False)\n\n expected = index_or_series(["A", "A"], name="uno", dtype=any_string_dtype)\n if index_or_series == Series:\n tm.assert_series_equal(result, expected)\n else:\n tm.assert_index_equal(result, expected)\n\n\ndef test_extract_expand_capture_groups(any_string_dtype):\n s = Series(["A1", "B2", "C3"], dtype=any_string_dtype)\n # one group, no matches\n result = s.str.extract("(_)", expand=False)\n expected = Series([np.nan, np.nan, np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n # two groups, no matches\n result = s.str.extract("(_)(_)", expand=False)\n expected = DataFrame(\n [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n # one group, some matches\n result = s.str.extract("([AB])[123]", expand=False)\n expected = Series(["A", "B", np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n # two groups, some matches\n result = s.str.extract("([AB])([123])", expand=False)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n # one named group\n result = s.str.extract("(?P<letter>[AB])", expand=False)\n expected = Series(["A", "B", np.nan], name="letter", dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n # two named groups\n result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=False)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], [np.nan, np.nan]],\n columns=["letter", "number"],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n # mix named and unnamed groups\n result = s.str.extract("([AB])(?P<number>[123])", expand=False)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], [np.nan, np.nan]],\n columns=[0, "number"],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n # one normal group, one non-capturing group\n result = s.str.extract("([AB])(?:[123])", expand=False)\n expected = Series(["A", "B", np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n # two normal groups, one non-capturing group\n s = Series(["A11", "B22", "C33"], dtype=any_string_dtype)\n result = s.str.extract("([AB])([123])(?:[123])", expand=False)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n # one optional group followed by one normal group\n s = Series(["A1", "B2", "3"], dtype=any_string_dtype)\n result = s.str.extract("(?P<letter>[AB])?(?P<number>[123])", expand=False)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], [np.nan, "3"]],\n columns=["letter", "number"],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n # one normal group followed by one optional group\n s = Series(["A1", "B2", "C"], dtype=any_string_dtype)\n result = s.str.extract("(?P<letter>[ABC])(?P<number>[123])?", expand=False)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], ["C", np.nan]],\n columns=["letter", "number"],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extract_expand_capture_groups_index(index, any_string_dtype):\n # https://github.com/pandas-dev/pandas/issues/6348\n # not passing index to the extractor\n data = ["A1", "B2", "C"]\n\n if len(index) == 0:\n pytest.skip("Test requires len(index) > 0")\n while len(index) < len(data):\n index = index.repeat(2)\n\n index = index[: len(data)]\n ser = Series(data, index=index, dtype=any_string_dtype)\n\n result = ser.str.extract(r"(\d)", expand=False)\n expected = Series(["1", "2", np.nan], index=index, dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n result = ser.str.extract(r"(?P<letter>\D)(?P<number>\d)?", expand=False)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], ["C", np.nan]],\n columns=["letter", "number"],\n index=index,\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extract_single_series_name_is_preserved(any_string_dtype):\n s = Series(["a3", "b3", "c2"], name="bob", dtype=any_string_dtype)\n result = s.str.extract(r"(?P<sue>[a-z])", expand=False)\n expected = Series(["a", "b", "c"], name="sue", dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_extract_expand_True(any_string_dtype):\n # Contains tests like those in test_match and some others.\n s = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)\n\n result = s.str.extract(".*(BAD[_]+).*(BAD)", expand=True)\n expected = DataFrame(\n [["BAD__", "BAD"], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extract_expand_True_mixed_object():\n er = [np.nan, np.nan] # empty row\n mixed = Series(\n [\n "aBAD_BAD",\n np.nan,\n "BAD_b_BAD",\n True,\n datetime.today(),\n "foo",\n None,\n 1,\n 2.0,\n ]\n )\n\n result = mixed.str.extract(".*(BAD[_]+).*(BAD)", expand=True)\n expected = DataFrame(\n [["BAD_", "BAD"], er, ["BAD_", "BAD"], er, er, er, er, er, er], dtype=object\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extract_expand_True_single_capture_group_raises(\n index_or_series, any_string_dtype\n):\n # these should work for both Series and Index\n # no groups\n s_or_idx = index_or_series(["A1", "B2", "C3"], dtype=any_string_dtype)\n msg = "pattern contains no capture groups"\n with pytest.raises(ValueError, match=msg):\n s_or_idx.str.extract("[ABC][123]", expand=True)\n\n # only non-capturing groups\n with pytest.raises(ValueError, match=msg):\n s_or_idx.str.extract("(?:[AB]).*", expand=True)\n\n\ndef test_extract_expand_True_single_capture_group(index_or_series, any_string_dtype):\n # single group renames series/index properly\n s_or_idx = index_or_series(["A1", "A2"], dtype=any_string_dtype)\n result = s_or_idx.str.extract(r"(?P<uno>A)\d", expand=True)\n expected = DataFrame({"uno": ["A", "A"]}, dtype=any_string_dtype)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("name", [None, "series_name"])\ndef test_extract_series(name, any_string_dtype):\n # extract should give the same result whether or not the series has a name.\n s = Series(["A1", "B2", "C3"], name=name, dtype=any_string_dtype)\n\n # one group, no matches\n result = s.str.extract("(_)", expand=True)\n expected = DataFrame([np.nan, np.nan, np.nan], dtype=any_string_dtype)\n tm.assert_frame_equal(result, expected)\n\n # two groups, no matches\n result = s.str.extract("(_)(_)", expand=True)\n expected = DataFrame(\n [[np.nan, np.nan], [np.nan, np.nan], [np.nan, np.nan]], dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n # one group, some matches\n result = s.str.extract("([AB])[123]", expand=True)\n expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype)\n tm.assert_frame_equal(result, expected)\n\n # two groups, some matches\n result = s.str.extract("([AB])([123])", expand=True)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n # one named group\n result = s.str.extract("(?P<letter>[AB])", expand=True)\n expected = DataFrame({"letter": ["A", "B", np.nan]}, dtype=any_string_dtype)\n tm.assert_frame_equal(result, expected)\n\n # two named groups\n result = s.str.extract("(?P<letter>[AB])(?P<number>[123])", expand=True)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], [np.nan, np.nan]],\n columns=["letter", "number"],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n # mix named and unnamed groups\n result = s.str.extract("([AB])(?P<number>[123])", expand=True)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], [np.nan, np.nan]],\n columns=[0, "number"],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n # one normal group, one non-capturing group\n result = s.str.extract("([AB])(?:[123])", expand=True)\n expected = DataFrame(["A", "B", np.nan], dtype=any_string_dtype)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extract_optional_groups(any_string_dtype):\n # two normal groups, one non-capturing group\n s = Series(["A11", "B22", "C33"], dtype=any_string_dtype)\n result = s.str.extract("([AB])([123])(?:[123])", expand=True)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], [np.nan, np.nan]], dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n # one optional group followed by one normal group\n s = Series(["A1", "B2", "3"], dtype=any_string_dtype)\n result = s.str.extract("(?P<letter>[AB])?(?P<number>[123])", expand=True)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], [np.nan, "3"]],\n columns=["letter", "number"],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n # one normal group followed by one optional group\n s = Series(["A1", "B2", "C"], dtype=any_string_dtype)\n result = s.str.extract("(?P<letter>[ABC])(?P<number>[123])?", expand=True)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], ["C", np.nan]],\n columns=["letter", "number"],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extract_dataframe_capture_groups_index(index, any_string_dtype):\n # GH6348\n # not passing index to the extractor\n\n data = ["A1", "B2", "C"]\n\n if len(index) < len(data):\n pytest.skip(f"Index needs more than {len(data)} values")\n\n index = index[: len(data)]\n s = Series(data, index=index, dtype=any_string_dtype)\n\n result = s.str.extract(r"(\d)", expand=True)\n expected = DataFrame(["1", "2", np.nan], index=index, dtype=any_string_dtype)\n tm.assert_frame_equal(result, expected)\n\n result = s.str.extract(r"(?P<letter>\D)(?P<number>\d)?", expand=True)\n expected = DataFrame(\n [["A", "1"], ["B", "2"], ["C", np.nan]],\n columns=["letter", "number"],\n index=index,\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extract_single_group_returns_frame(any_string_dtype):\n # GH11386 extract should always return DataFrame, even when\n # there is only one group. Prior to v0.18.0, extract returned\n # Series when there was only one group in the regex.\n s = Series(["a3", "b3", "c2"], name="series_name", dtype=any_string_dtype)\n result = s.str.extract(r"(?P<letter>[a-z])", expand=True)\n expected = DataFrame({"letter": ["a", "b", "c"]}, dtype=any_string_dtype)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extractall(any_string_dtype):\n data = [\n "dave@google.com",\n "tdhock5@gmail.com",\n "maudelaperriere@gmail.com",\n "rob@gmail.com some text steve@gmail.com",\n "a@b.com some text c@d.com and e@f.com",\n np.nan,\n "",\n ]\n expected_tuples = [\n ("dave", "google", "com"),\n ("tdhock5", "gmail", "com"),\n ("maudelaperriere", "gmail", "com"),\n ("rob", "gmail", "com"),\n ("steve", "gmail", "com"),\n ("a", "b", "com"),\n ("c", "d", "com"),\n ("e", "f", "com"),\n ]\n pat = r"""\n (?P<user>[a-z0-9]+)\n @\n (?P<domain>[a-z]+)\n \.\n (?P<tld>[a-z]{2,4})\n """\n expected_columns = ["user", "domain", "tld"]\n s = Series(data, dtype=any_string_dtype)\n # extractall should return a DataFrame with one row for each match, indexed by the\n # subject from which the match came.\n expected_index = MultiIndex.from_tuples(\n [(0, 0), (1, 0), (2, 0), (3, 0), (3, 1), (4, 0), (4, 1), (4, 2)],\n names=(None, "match"),\n )\n expected = DataFrame(\n expected_tuples, expected_index, expected_columns, dtype=any_string_dtype\n )\n result = s.str.extractall(pat, flags=re.VERBOSE)\n tm.assert_frame_equal(result, expected)\n\n # The index of the input Series should be used to construct the index of the output\n # DataFrame:\n mi = MultiIndex.from_tuples(\n [\n ("single", "Dave"),\n ("single", "Toby"),\n ("single", "Maude"),\n ("multiple", "robAndSteve"),\n ("multiple", "abcdef"),\n ("none", "missing"),\n ("none", "empty"),\n ]\n )\n s = Series(data, index=mi, dtype=any_string_dtype)\n expected_index = MultiIndex.from_tuples(\n [\n ("single", "Dave", 0),\n ("single", "Toby", 0),\n ("single", "Maude", 0),\n ("multiple", "robAndSteve", 0),\n ("multiple", "robAndSteve", 1),\n ("multiple", "abcdef", 0),\n ("multiple", "abcdef", 1),\n ("multiple", "abcdef", 2),\n ],\n names=(None, None, "match"),\n )\n expected = DataFrame(\n expected_tuples, expected_index, expected_columns, dtype=any_string_dtype\n )\n result = s.str.extractall(pat, flags=re.VERBOSE)\n tm.assert_frame_equal(result, expected)\n\n # MultiIndexed subject with names.\n s = Series(data, index=mi, dtype=any_string_dtype)\n s.index.names = ("matches", "description")\n expected_index.names = ("matches", "description", "match")\n expected = DataFrame(\n expected_tuples, expected_index, expected_columns, dtype=any_string_dtype\n )\n result = s.str.extractall(pat, flags=re.VERBOSE)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "pat,expected_names",\n [\n # optional groups.\n ("(?P<letter>[AB])?(?P<number>[123])", ["letter", "number"]),\n # only one of two groups has a name.\n ("([AB])?(?P<number>[123])", [0, "number"]),\n ],\n)\ndef test_extractall_column_names(pat, expected_names, any_string_dtype):\n s = Series(["", "A1", "32"], dtype=any_string_dtype)\n\n result = s.str.extractall(pat)\n expected = DataFrame(\n [("A", "1"), (np.nan, "3"), (np.nan, "2")],\n index=MultiIndex.from_tuples([(1, 0), (2, 0), (2, 1)], names=(None, "match")),\n columns=expected_names,\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extractall_single_group(any_string_dtype):\n s = Series(["a3", "b3", "d4c2"], name="series_name", dtype=any_string_dtype)\n expected_index = MultiIndex.from_tuples(\n [(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, "match")\n )\n\n # extractall(one named group) returns DataFrame with one named column.\n result = s.str.extractall(r"(?P<letter>[a-z])")\n expected = DataFrame(\n {"letter": ["a", "b", "d", "c"]}, index=expected_index, dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n # extractall(one un-named group) returns DataFrame with one un-named column.\n result = s.str.extractall(r"([a-z])")\n expected = DataFrame(\n ["a", "b", "d", "c"], index=expected_index, dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extractall_single_group_with_quantifier(any_string_dtype):\n # GH#13382\n # extractall(one un-named group with quantifier) returns DataFrame with one un-named\n # column.\n s = Series(["ab3", "abc3", "d4cd2"], name="series_name", dtype=any_string_dtype)\n result = s.str.extractall(r"([a-z]+)")\n expected = DataFrame(\n ["ab", "abc", "d", "cd"],\n index=MultiIndex.from_tuples(\n [(0, 0), (1, 0), (2, 0), (2, 1)], names=(None, "match")\n ),\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data, names",\n [\n ([], (None,)),\n ([], ("i1",)),\n ([], (None, "i2")),\n ([], ("i1", "i2")),\n (["a3", "b3", "d4c2"], (None,)),\n (["a3", "b3", "d4c2"], ("i1", "i2")),\n (["a3", "b3", "d4c2"], (None, "i2")),\n (["a3", "b3", "d4c2"], ("i1", "i2")),\n ],\n)\ndef test_extractall_no_matches(data, names, any_string_dtype):\n # GH19075 extractall with no matches should return a valid MultiIndex\n n = len(data)\n if len(names) == 1:\n index = Index(range(n), name=names[0])\n else:\n tuples = (tuple([i] * (n - 1)) for i in range(n))\n index = MultiIndex.from_tuples(tuples, names=names)\n s = Series(data, name="series_name", index=index, dtype=any_string_dtype)\n expected_index = MultiIndex.from_tuples([], names=(names + ("match",)))\n\n # one un-named group.\n result = s.str.extractall("(z)")\n expected = DataFrame(columns=[0], index=expected_index, dtype=any_string_dtype)\n tm.assert_frame_equal(result, expected)\n\n # two un-named groups.\n result = s.str.extractall("(z)(z)")\n expected = DataFrame(columns=[0, 1], index=expected_index, dtype=any_string_dtype)\n tm.assert_frame_equal(result, expected)\n\n # one named group.\n result = s.str.extractall("(?P<first>z)")\n expected = DataFrame(\n columns=["first"], index=expected_index, dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n # two named groups.\n result = s.str.extractall("(?P<first>z)(?P<second>z)")\n expected = DataFrame(\n columns=["first", "second"], index=expected_index, dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n # one named, one un-named.\n result = s.str.extractall("(z)(?P<second>z)")\n expected = DataFrame(\n columns=[0, "second"], index=expected_index, dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extractall_stringindex(any_string_dtype):\n s = Series(["a1a2", "b1", "c1"], name="xxx", dtype=any_string_dtype)\n result = s.str.extractall(r"[ab](?P<digit>\d)")\n expected = DataFrame(\n {"digit": ["1", "2", "1"]},\n index=MultiIndex.from_tuples([(0, 0), (0, 1), (1, 0)], names=[None, "match"]),\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n # index should return the same result as the default index without name thus\n # index.name doesn't affect to the result\n if any_string_dtype == "object":\n for idx in [\n Index(["a1a2", "b1", "c1"], dtype=object),\n Index(["a1a2", "b1", "c1"], name="xxx", dtype=object),\n ]:\n result = idx.str.extractall(r"[ab](?P<digit>\d)")\n tm.assert_frame_equal(result, expected)\n\n s = Series(\n ["a1a2", "b1", "c1"],\n name="s_name",\n index=Index(["XX", "yy", "zz"], name="idx_name"),\n dtype=any_string_dtype,\n )\n result = s.str.extractall(r"[ab](?P<digit>\d)")\n expected = DataFrame(\n {"digit": ["1", "2", "1"]},\n index=MultiIndex.from_tuples(\n [("XX", 0), ("XX", 1), ("yy", 0)], names=["idx_name", "match"]\n ),\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_extractall_no_capture_groups_raises(any_string_dtype):\n # Does not make sense to use extractall with a regex that has no capture groups.\n # (it returns DataFrame with one column for each capture group)\n s = Series(["a3", "b3", "d4c2"], name="series_name", dtype=any_string_dtype)\n with pytest.raises(ValueError, match="no capture groups"):\n s.str.extractall(r"[a-z]")\n\n\ndef test_extract_index_one_two_groups():\n s = Series(["a3", "b3", "d4c2"], index=["A3", "B3", "D4"], name="series_name")\n r = s.index.str.extract(r"([A-Z])", expand=True)\n e = DataFrame(["A", "B", "D"])\n tm.assert_frame_equal(r, e)\n\n # Prior to v0.18.0, index.str.extract(regex with one group)\n # returned Index. With more than one group, extract raised an\n # error (GH9980). Now extract always returns DataFrame.\n r = s.index.str.extract(r"(?P<letter>[A-Z])(?P<digit>[0-9])", expand=True)\n e_list = [("A", "3"), ("B", "3"), ("D", "4")]\n e = DataFrame(e_list, columns=["letter", "digit"])\n tm.assert_frame_equal(r, e)\n\n\ndef test_extractall_same_as_extract(any_string_dtype):\n s = Series(["a3", "b3", "c2"], name="series_name", dtype=any_string_dtype)\n\n pattern_two_noname = r"([a-z])([0-9])"\n extract_two_noname = s.str.extract(pattern_two_noname, expand=True)\n has_multi_index = s.str.extractall(pattern_two_noname)\n no_multi_index = has_multi_index.xs(0, level="match")\n tm.assert_frame_equal(extract_two_noname, no_multi_index)\n\n pattern_two_named = r"(?P<letter>[a-z])(?P<digit>[0-9])"\n extract_two_named = s.str.extract(pattern_two_named, expand=True)\n has_multi_index = s.str.extractall(pattern_two_named)\n no_multi_index = has_multi_index.xs(0, level="match")\n tm.assert_frame_equal(extract_two_named, no_multi_index)\n\n pattern_one_named = r"(?P<group_name>[a-z])"\n extract_one_named = s.str.extract(pattern_one_named, expand=True)\n has_multi_index = s.str.extractall(pattern_one_named)\n no_multi_index = has_multi_index.xs(0, level="match")\n tm.assert_frame_equal(extract_one_named, no_multi_index)\n\n pattern_one_noname = r"([a-z])"\n extract_one_noname = s.str.extract(pattern_one_noname, expand=True)\n has_multi_index = s.str.extractall(pattern_one_noname)\n no_multi_index = has_multi_index.xs(0, level="match")\n tm.assert_frame_equal(extract_one_noname, no_multi_index)\n\n\ndef test_extractall_same_as_extract_subject_index(any_string_dtype):\n # same as above tests, but s has an MultiIndex.\n mi = MultiIndex.from_tuples(\n [("A", "first"), ("B", "second"), ("C", "third")],\n names=("capital", "ordinal"),\n )\n s = Series(["a3", "b3", "c2"], index=mi, name="series_name", dtype=any_string_dtype)\n\n pattern_two_noname = r"([a-z])([0-9])"\n extract_two_noname = s.str.extract(pattern_two_noname, expand=True)\n has_match_index = s.str.extractall(pattern_two_noname)\n no_match_index = has_match_index.xs(0, level="match")\n tm.assert_frame_equal(extract_two_noname, no_match_index)\n\n pattern_two_named = r"(?P<letter>[a-z])(?P<digit>[0-9])"\n extract_two_named = s.str.extract(pattern_two_named, expand=True)\n has_match_index = s.str.extractall(pattern_two_named)\n no_match_index = has_match_index.xs(0, level="match")\n tm.assert_frame_equal(extract_two_named, no_match_index)\n\n pattern_one_named = r"(?P<group_name>[a-z])"\n extract_one_named = s.str.extract(pattern_one_named, expand=True)\n has_match_index = s.str.extractall(pattern_one_named)\n no_match_index = has_match_index.xs(0, level="match")\n tm.assert_frame_equal(extract_one_named, no_match_index)\n\n pattern_one_noname = r"([a-z])"\n extract_one_noname = s.str.extract(pattern_one_noname, expand=True)\n has_match_index = s.str.extractall(pattern_one_noname)\n no_match_index = has_match_index.xs(0, level="match")\n tm.assert_frame_equal(extract_one_noname, no_match_index)\n\n\ndef test_extractall_preserves_dtype():\n # Ensure that when extractall is called on a series with specific dtypes set, that\n # the dtype is preserved in the resulting DataFrame's column.\n pa = pytest.importorskip("pyarrow")\n\n result = Series(["abc", "ab"], dtype=ArrowDtype(pa.string())).str.extractall("(ab)")\n assert result.dtypes[0] == "string[pyarrow]"\n | .venv\Lib\site-packages\pandas\tests\strings\test_extract.py | test_extract.py | Python | 26,463 | 0.95 | 0.053867 | 0.119403 | react-lib | 521 | 2024-07-06T08:19:49.572035 | BSD-3-Clause | true | fc516068ade8a164f7246c61bc846cba |
from datetime import datetime\nimport re\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n Series,\n _testing as tm,\n)\nfrom pandas.tests.strings import (\n _convert_na_value,\n is_object_or_nan_string_dtype,\n)\n\n# --------------------------------------------------------------------------------------\n# str.contains\n# --------------------------------------------------------------------------------------\n\n\ndef test_contains(any_string_dtype):\n values = np.array(\n ["foo", np.nan, "fooommm__foo", "mmm_", "foommm[_]+bar"], dtype=np.object_\n )\n values = Series(values, dtype=any_string_dtype)\n pat = "mmm[_]+"\n\n result = values.str.contains(pat)\n if any_string_dtype == "str":\n # NaN propagates as False\n expected = Series([False, False, True, True, False], dtype=bool)\n else:\n expected_dtype = (\n "object" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series(\n np.array([False, np.nan, True, True, False], dtype=np.object_),\n dtype=expected_dtype,\n )\n\n tm.assert_series_equal(result, expected)\n\n result = values.str.contains(pat, regex=False)\n if any_string_dtype == "str":\n expected = Series([False, False, False, False, True], dtype=bool)\n else:\n expected = Series(\n np.array([False, np.nan, False, False, True], dtype=np.object_),\n dtype=expected_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n values = Series(\n np.array(["foo", "xyz", "fooommm__foo", "mmm_"], dtype=object),\n dtype=any_string_dtype,\n )\n result = values.str.contains(pat)\n expected_dtype = (\n np.bool_ if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series(np.array([False, False, True, True]), dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n # case insensitive using regex\n values = Series(\n np.array(["Foo", "xYz", "fOOomMm__fOo", "MMM_"], dtype=object),\n dtype=any_string_dtype,\n )\n\n result = values.str.contains("FOO|mmm", case=False)\n expected = Series(np.array([True, False, True, True]), dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n # case insensitive without regex\n result = values.str.contains("foo", regex=False, case=False)\n expected = Series(np.array([True, False, True, False]), dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n # unicode\n values = Series(\n np.array(["foo", np.nan, "fooommm__foo", "mmm_"], dtype=np.object_),\n dtype=any_string_dtype,\n )\n pat = "mmm[_]+"\n\n result = values.str.contains(pat)\n if any_string_dtype == "str":\n expected = Series([False, False, True, True], dtype=bool)\n else:\n expected_dtype = (\n "object" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series(\n np.array([False, np.nan, True, True], dtype=np.object_),\n dtype=expected_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n result = values.str.contains(pat, na=False)\n expected_dtype = (\n np.bool_ if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series(np.array([False, False, True, True]), dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n values = Series(\n np.array(["foo", "xyz", "fooommm__foo", "mmm_"], dtype=np.object_),\n dtype=any_string_dtype,\n )\n result = values.str.contains(pat)\n expected = Series(np.array([False, False, True, True]), dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_contains_object_mixed():\n mixed = Series(\n np.array(\n ["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],\n dtype=object,\n )\n )\n result = mixed.str.contains("o")\n expected = Series(\n np.array(\n [False, np.nan, False, np.nan, np.nan, True, None, np.nan, np.nan],\n dtype=np.object_,\n )\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_contains_na_kwarg_for_object_category():\n # gh 22158\n\n # na for category\n values = Series(["a", "b", "c", "a", np.nan], dtype="category")\n result = values.str.contains("a", na=True)\n expected = Series([True, False, False, True, True])\n tm.assert_series_equal(result, expected)\n\n result = values.str.contains("a", na=False)\n expected = Series([True, False, False, True, False])\n tm.assert_series_equal(result, expected)\n\n # na for objects\n values = Series(["a", "b", "c", "a", np.nan])\n result = values.str.contains("a", na=True)\n expected = Series([True, False, False, True, True])\n tm.assert_series_equal(result, expected)\n\n result = values.str.contains("a", na=False)\n expected = Series([True, False, False, True, False])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "na, expected",\n [\n (None, pd.NA),\n (True, True),\n (False, False),\n (0, False),\n (3, True),\n (np.nan, pd.NA),\n ],\n)\n@pytest.mark.parametrize("regex", [True, False])\ndef test_contains_na_kwarg_for_nullable_string_dtype(\n nullable_string_dtype, na, expected, regex\n):\n # https://github.com/pandas-dev/pandas/pull/41025#issuecomment-824062416\n\n values = Series(["a", "b", "c", "a", np.nan], dtype=nullable_string_dtype)\n\n msg = (\n "Allowing a non-bool 'na' in obj.str.contains is deprecated and "\n "will raise in a future version"\n )\n warn = None\n if not pd.isna(na) and not isinstance(na, bool):\n warn = FutureWarning\n with tm.assert_produces_warning(warn, match=msg):\n result = values.str.contains("a", na=na, regex=regex)\n expected = Series([True, False, False, True, expected], dtype="boolean")\n tm.assert_series_equal(result, expected)\n\n\ndef test_contains_moar(any_string_dtype):\n # PR #1179\n s = Series(\n ["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"],\n dtype=any_string_dtype,\n )\n\n result = s.str.contains("a")\n if any_string_dtype == "str":\n # NaN propagates as False\n expected_dtype = bool\n na_value = False\n else:\n expected_dtype = (\n "object" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n na_value = np.nan\n expected = Series(\n [False, False, False, True, True, False, na_value, False, False, True],\n dtype=expected_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.contains("a", case=False)\n expected = Series(\n [True, False, False, True, True, False, na_value, True, False, True],\n dtype=expected_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.contains("Aa")\n expected = Series(\n [False, False, False, True, False, False, na_value, False, False, False],\n dtype=expected_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.contains("ba")\n expected = Series(\n [False, False, False, True, False, False, na_value, False, False, False],\n dtype=expected_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n result = s.str.contains("ba", case=False)\n expected = Series(\n [False, False, False, True, True, False, na_value, True, False, False],\n dtype=expected_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_contains_nan(any_string_dtype):\n # PR #14171\n s = Series([np.nan, np.nan, np.nan], dtype=any_string_dtype)\n\n result = s.str.contains("foo", na=False)\n expected_dtype = (\n np.bool_ if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series([False, False, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n result = s.str.contains("foo", na=True)\n expected = Series([True, True, True], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n # TODO(infer_string)\n # this particular combination of events is broken on 2.3\n # would require cherry picking #58483, which in turn requires #57481\n # which introduce many behavioral changes\n if not (\n hasattr(any_string_dtype, "storage")\n and any_string_dtype.storage == "python"\n and any_string_dtype.na_value is np.nan\n ):\n msg = (\n "Allowing a non-bool 'na' in obj.str.contains is deprecated and "\n "will raise in a future version"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = s.str.contains("foo", na="foo")\n if any_string_dtype == "object":\n expected = Series(["foo", "foo", "foo"], dtype=np.object_)\n elif any_string_dtype.na_value is np.nan:\n expected = Series([True, True, True], dtype=np.bool_)\n else:\n expected = Series([True, True, True], dtype="boolean")\n tm.assert_series_equal(result, expected)\n\n result = s.str.contains("foo")\n if any_string_dtype == "str":\n # NaN propagates as False\n expected = Series([False, False, False], dtype=bool)\n else:\n expected_dtype = (\n "object" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series([np.nan, np.nan, np.nan], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\n# --------------------------------------------------------------------------------------\n# str.startswith\n# --------------------------------------------------------------------------------------\n\n\ndef test_startswith_endswith_validate_na(request, any_string_dtype):\n if (\n any_string_dtype == "string"\n and any_string_dtype.na_value is np.nan\n and any_string_dtype.storage == "python"\n ):\n request.applymarker(pytest.mark.xfail(reason="TODO(infer_string)"))\n # GH#59615\n ser = Series(\n ["om", np.nan, "foo_nom", "nom", "bar_foo", np.nan, "foo"],\n dtype=any_string_dtype,\n )\n\n msg = "Allowing a non-bool 'na' in obj.str.startswith is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n ser.str.startswith("kapow", na="baz")\n msg = "Allowing a non-bool 'na' in obj.str.endswith is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n ser.str.endswith("bar", na="baz")\n\n\n@pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")\n@pytest.mark.parametrize("pat", ["foo", ("foo", "baz")])\n@pytest.mark.parametrize("dtype", ["object", "category"])\n@pytest.mark.parametrize("null_value", [None, np.nan, pd.NA])\n@pytest.mark.parametrize("na", [True, False])\ndef test_startswith(pat, dtype, null_value, na, using_infer_string):\n # add category dtype parametrizations for GH-36241\n values = Series(\n ["om", null_value, "foo_nom", "nom", "bar_foo", null_value, "foo"],\n dtype=dtype,\n )\n\n result = values.str.startswith(pat)\n exp = Series([False, np.nan, True, False, False, np.nan, True])\n if dtype == "object" and null_value is pd.NA:\n # GH#18463\n exp = exp.fillna(null_value)\n elif dtype == "object" and null_value is None:\n exp[exp.isna()] = None\n elif using_infer_string and dtype == "category":\n exp = exp.fillna(False).astype(bool)\n tm.assert_series_equal(result, exp)\n\n result = values.str.startswith(pat, na=na)\n exp = Series([False, na, True, False, False, na, True])\n tm.assert_series_equal(result, exp)\n\n # mixed\n mixed = np.array(\n ["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],\n dtype=np.object_,\n )\n rs = Series(mixed).str.startswith("f")\n xp = Series([False, np.nan, False, np.nan, np.nan, True, None, np.nan, np.nan])\n tm.assert_series_equal(rs, xp)\n\n\n@pytest.mark.parametrize("na", [None, True, False])\ndef test_startswith_string_dtype(any_string_dtype, na):\n values = Series(\n ["om", None, "foo_nom", "nom", "bar_foo", None, "foo", "regex", "rege."],\n dtype=any_string_dtype,\n )\n result = values.str.startswith("foo", na=na)\n\n expected_dtype = (\n (object if na is None else bool)\n if is_object_or_nan_string_dtype(any_string_dtype)\n else "boolean"\n )\n if any_string_dtype == "str":\n # NaN propagates as False\n expected_dtype = bool\n if na is None:\n na = False\n exp = Series(\n [False, na, True, False, False, na, True, False, False], dtype=expected_dtype\n )\n tm.assert_series_equal(result, exp)\n\n result = values.str.startswith("rege.", na=na)\n exp = Series(\n [False, na, False, False, False, na, False, False, True], dtype=expected_dtype\n )\n tm.assert_series_equal(result, exp)\n\n\n# --------------------------------------------------------------------------------------\n# str.endswith\n# --------------------------------------------------------------------------------------\n\n\n@pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")\n@pytest.mark.parametrize("pat", ["foo", ("foo", "baz")])\n@pytest.mark.parametrize("dtype", ["object", "category"])\n@pytest.mark.parametrize("null_value", [None, np.nan, pd.NA])\n@pytest.mark.parametrize("na", [True, False])\ndef test_endswith(pat, dtype, null_value, na, using_infer_string):\n # add category dtype parametrizations for GH-36241\n values = Series(\n ["om", null_value, "foo_nom", "nom", "bar_foo", null_value, "foo"],\n dtype=dtype,\n )\n\n result = values.str.endswith(pat)\n exp = Series([False, np.nan, False, False, True, np.nan, True])\n if dtype == "object" and null_value is pd.NA:\n # GH#18463\n exp = exp.fillna(null_value)\n elif dtype == "object" and null_value is None:\n exp[exp.isna()] = None\n elif using_infer_string and dtype == "category":\n exp = exp.fillna(False).astype(bool)\n tm.assert_series_equal(result, exp)\n\n result = values.str.endswith(pat, na=na)\n exp = Series([False, na, False, False, True, na, True])\n tm.assert_series_equal(result, exp)\n\n # mixed\n mixed = np.array(\n ["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],\n dtype=object,\n )\n rs = Series(mixed).str.endswith("f")\n xp = Series([False, np.nan, False, np.nan, np.nan, False, None, np.nan, np.nan])\n tm.assert_series_equal(rs, xp)\n\n\n@pytest.mark.parametrize("na", [None, True, False])\ndef test_endswith_string_dtype(any_string_dtype, na):\n values = Series(\n ["om", None, "foo_nom", "nom", "bar_foo", None, "foo", "regex", "rege."],\n dtype=any_string_dtype,\n )\n result = values.str.endswith("foo", na=na)\n expected_dtype = (\n (object if na is None else bool)\n if is_object_or_nan_string_dtype(any_string_dtype)\n else "boolean"\n )\n if any_string_dtype == "str":\n # NaN propagates as False\n expected_dtype = bool\n if na is None:\n na = False\n exp = Series(\n [False, na, False, False, True, na, True, False, False], dtype=expected_dtype\n )\n tm.assert_series_equal(result, exp)\n\n result = values.str.endswith("rege.", na=na)\n exp = Series(\n [False, na, False, False, False, na, False, False, True], dtype=expected_dtype\n )\n tm.assert_series_equal(result, exp)\n\n\n# --------------------------------------------------------------------------------------\n# str.replace\n# --------------------------------------------------------------------------------------\n\n\ndef test_replace(any_string_dtype):\n ser = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype)\n\n result = ser.str.replace("BAD[_]*", "", regex=True)\n expected = Series(["foobar", np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_replace_max_replacements(any_string_dtype):\n ser = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype)\n\n expected = Series(["foobarBAD", np.nan], dtype=any_string_dtype)\n result = ser.str.replace("BAD[_]*", "", n=1, regex=True)\n tm.assert_series_equal(result, expected)\n\n expected = Series(["foo__barBAD", np.nan], dtype=any_string_dtype)\n result = ser.str.replace("BAD", "", n=1, regex=False)\n tm.assert_series_equal(result, expected)\n\n\ndef test_replace_mixed_object():\n ser = Series(\n ["aBAD", np.nan, "bBAD", True, datetime.today(), "fooBAD", None, 1, 2.0]\n )\n result = Series(ser).str.replace("BAD[_]*", "", regex=True)\n expected = Series(\n ["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan], dtype=object\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_replace_unicode(any_string_dtype):\n ser = Series([b"abcd,\xc3\xa0".decode("utf-8")], dtype=any_string_dtype)\n expected = Series([b"abcd, \xc3\xa0".decode("utf-8")], dtype=any_string_dtype)\n result = ser.str.replace(r"(?<=\w),(?=\w)", ", ", flags=re.UNICODE, regex=True)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("repl", [None, 3, {"a": "b"}])\n@pytest.mark.parametrize("data", [["a", "b", None], ["a", "b", "c", "ad"]])\ndef test_replace_wrong_repl_type_raises(any_string_dtype, index_or_series, repl, data):\n # https://github.com/pandas-dev/pandas/issues/13438\n msg = "repl must be a string or callable"\n obj = index_or_series(data, dtype=any_string_dtype)\n with pytest.raises(TypeError, match=msg):\n obj.str.replace("a", repl)\n\n\ndef test_replace_callable(any_string_dtype):\n # GH 15055\n ser = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype)\n\n # test with callable\n repl = lambda m: m.group(0).swapcase()\n result = ser.str.replace("[a-z][A-Z]{2}", repl, n=2, regex=True)\n expected = Series(["foObaD__baRbaD", np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "repl", [lambda: None, lambda m, x: None, lambda m, x, y=None: None]\n)\ndef test_replace_callable_raises(any_string_dtype, repl):\n # GH 15055\n values = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype)\n\n # test with wrong number of arguments, raising an error\n msg = (\n r"((takes)|(missing)) (?(2)from \d+ to )?\d+ "\n r"(?(3)required )positional arguments?"\n )\n with pytest.raises(TypeError, match=msg):\n values.str.replace("a", repl, regex=True)\n\n\ndef test_replace_callable_named_groups(any_string_dtype):\n # test regex named groups\n ser = Series(["Foo Bar Baz", np.nan], dtype=any_string_dtype)\n pat = r"(?P<first>\w+) (?P<middle>\w+) (?P<last>\w+)"\n repl = lambda m: m.group("middle").swapcase()\n result = ser.str.replace(pat, repl, regex=True)\n expected = Series(["bAR", np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_replace_compiled_regex(any_string_dtype):\n # GH 15446\n ser = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype)\n\n # test with compiled regex\n pat = re.compile(r"BAD_*")\n result = ser.str.replace(pat, "", regex=True)\n expected = Series(["foobar", np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n result = ser.str.replace(pat, "", n=1, regex=True)\n expected = Series(["foobarBAD", np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_replace_compiled_regex_mixed_object():\n pat = re.compile(r"BAD_*")\n ser = Series(\n ["aBAD", np.nan, "bBAD", True, datetime.today(), "fooBAD", None, 1, 2.0]\n )\n result = Series(ser).str.replace(pat, "", regex=True)\n expected = Series(\n ["a", np.nan, "b", np.nan, np.nan, "foo", None, np.nan, np.nan], dtype=object\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_replace_compiled_regex_unicode(any_string_dtype):\n ser = Series([b"abcd,\xc3\xa0".decode("utf-8")], dtype=any_string_dtype)\n expected = Series([b"abcd, \xc3\xa0".decode("utf-8")], dtype=any_string_dtype)\n pat = re.compile(r"(?<=\w),(?=\w)", flags=re.UNICODE)\n result = ser.str.replace(pat, ", ", regex=True)\n tm.assert_series_equal(result, expected)\n\n\ndef test_replace_compiled_regex_raises(any_string_dtype):\n # case and flags provided to str.replace will have no effect\n # and will produce warnings\n ser = Series(["fooBAD__barBAD__bad", np.nan], dtype=any_string_dtype)\n pat = re.compile(r"BAD_*")\n\n msg = "case and flags cannot be set when pat is a compiled regex"\n\n with pytest.raises(ValueError, match=msg):\n ser.str.replace(pat, "", flags=re.IGNORECASE, regex=True)\n\n with pytest.raises(ValueError, match=msg):\n ser.str.replace(pat, "", case=False, regex=True)\n\n with pytest.raises(ValueError, match=msg):\n ser.str.replace(pat, "", case=True, regex=True)\n\n\ndef test_replace_compiled_regex_callable(any_string_dtype):\n # test with callable\n ser = Series(["fooBAD__barBAD", np.nan], dtype=any_string_dtype)\n repl = lambda m: m.group(0).swapcase()\n pat = re.compile("[a-z][A-Z]{2}")\n result = ser.str.replace(pat, repl, n=2, regex=True)\n expected = Series(["foObaD__baRbaD", np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "regex,expected", [(True, ["bao", "bao", np.nan]), (False, ["bao", "foo", np.nan])]\n)\ndef test_replace_literal(regex, expected, any_string_dtype):\n # GH16808 literal replace (regex=False vs regex=True)\n ser = Series(["f.o", "foo", np.nan], dtype=any_string_dtype)\n expected = Series(expected, dtype=any_string_dtype)\n result = ser.str.replace("f.", "ba", regex=regex)\n tm.assert_series_equal(result, expected)\n\n\ndef test_replace_literal_callable_raises(any_string_dtype):\n ser = Series([], dtype=any_string_dtype)\n repl = lambda m: m.group(0).swapcase()\n\n msg = "Cannot use a callable replacement when regex=False"\n with pytest.raises(ValueError, match=msg):\n ser.str.replace("abc", repl, regex=False)\n\n\ndef test_replace_literal_compiled_raises(any_string_dtype):\n ser = Series([], dtype=any_string_dtype)\n pat = re.compile("[a-z][A-Z]{2}")\n\n msg = "Cannot use a compiled regex as replacement pattern with regex=False"\n with pytest.raises(ValueError, match=msg):\n ser.str.replace(pat, "", regex=False)\n\n\ndef test_replace_moar(any_string_dtype):\n # PR #1179\n ser = Series(\n ["A", "B", "C", "Aaba", "Baca", "", np.nan, "CABA", "dog", "cat"],\n dtype=any_string_dtype,\n )\n\n result = ser.str.replace("A", "YYY")\n expected = Series(\n ["YYY", "B", "C", "YYYaba", "Baca", "", np.nan, "CYYYBYYY", "dog", "cat"],\n dtype=any_string_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n result = ser.str.replace("A", "YYY", case=False)\n expected = Series(\n [\n "YYY",\n "B",\n "C",\n "YYYYYYbYYY",\n "BYYYcYYY",\n "",\n np.nan,\n "CYYYBYYY",\n "dog",\n "cYYYt",\n ],\n dtype=any_string_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n result = ser.str.replace("^.a|dog", "XX-XX ", case=False, regex=True)\n expected = Series(\n [\n "A",\n "B",\n "C",\n "XX-XX ba",\n "XX-XX ca",\n "",\n np.nan,\n "XX-XX BA",\n "XX-XX ",\n "XX-XX t",\n ],\n dtype=any_string_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_replace_not_case_sensitive_not_regex(any_string_dtype):\n # https://github.com/pandas-dev/pandas/issues/41602\n ser = Series(["A.", "a.", "Ab", "ab", np.nan], dtype=any_string_dtype)\n\n result = ser.str.replace("a", "c", case=False, regex=False)\n expected = Series(["c.", "c.", "cb", "cb", np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n result = ser.str.replace("a.", "c.", case=False, regex=False)\n expected = Series(["c.", "c.", "Ab", "ab", np.nan], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_replace_regex(any_string_dtype):\n # https://github.com/pandas-dev/pandas/pull/24809\n s = Series(["a", "b", "ac", np.nan, ""], dtype=any_string_dtype)\n result = s.str.replace("^.$", "a", regex=True)\n expected = Series(["a", "a", "ac", np.nan, ""], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("regex", [True, False])\ndef test_replace_regex_single_character(regex, any_string_dtype):\n # https://github.com/pandas-dev/pandas/pull/24809, enforced in 2.0\n # GH 24804\n s = Series(["a.b", ".", "b", np.nan, ""], dtype=any_string_dtype)\n\n result = s.str.replace(".", "a", regex=regex)\n if regex:\n expected = Series(["aaa", "a", "a", np.nan, ""], dtype=any_string_dtype)\n else:\n expected = Series(["aab", "a", "b", np.nan, ""], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\n# --------------------------------------------------------------------------------------\n# str.match\n# --------------------------------------------------------------------------------------\n\n\ndef test_match(any_string_dtype):\n if any_string_dtype == "str":\n # NaN propagates as False\n expected_dtype = bool\n na_value = False\n else:\n expected_dtype = (\n "object" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n na_value = np.nan\n\n values = Series(["fooBAD__barBAD", np.nan, "foo"], dtype=any_string_dtype)\n result = values.str.match(".*(BAD[_]+).*(BAD)")\n expected = Series([True, na_value, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n values = Series(\n ["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"], dtype=any_string_dtype\n )\n result = values.str.match(".*BAD[_]+.*BAD")\n expected = Series([True, True, na_value, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n result = values.str.match("BAD[_]+.*BAD")\n expected = Series([False, True, na_value, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n values = Series(\n ["fooBAD__barBAD", "^BAD_BADleroybrown", np.nan, "foo"], dtype=any_string_dtype\n )\n result = values.str.match("^BAD[_]+.*BAD")\n expected = Series([False, False, na_value, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n result = values.str.match("\\^BAD[_]+.*BAD")\n expected = Series([False, True, na_value, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_match_mixed_object():\n mixed = Series(\n [\n "aBAD_BAD",\n np.nan,\n "BAD_b_BAD",\n True,\n datetime.today(),\n "foo",\n None,\n 1,\n 2.0,\n ]\n )\n result = Series(mixed).str.match(".*(BAD[_]+).*(BAD)")\n expected = Series([True, np.nan, True, np.nan, np.nan, False, None, np.nan, np.nan])\n assert isinstance(result, Series)\n tm.assert_series_equal(result, expected)\n\n\ndef test_match_na_kwarg(any_string_dtype):\n # GH #6609\n s = Series(["a", "b", np.nan], dtype=any_string_dtype)\n\n result = s.str.match("a", na=False)\n expected_dtype = (\n np.bool_ if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series([True, False, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n result = s.str.match("a")\n if any_string_dtype == "str":\n # NaN propagates as False\n expected_dtype = bool\n na_value = False\n else:\n expected_dtype = (\n "object" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n na_value = np.nan\n\n expected = Series([True, False, na_value], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_match_case_kwarg(any_string_dtype):\n values = Series(["ab", "AB", "abc", "ABC"], dtype=any_string_dtype)\n result = values.str.match("ab", case=False)\n expected_dtype = (\n np.bool_ if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series([True, True, True, True], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\n# --------------------------------------------------------------------------------------\n# str.fullmatch\n# --------------------------------------------------------------------------------------\n\n\ndef test_fullmatch(any_string_dtype):\n # GH 32806\n ser = Series(\n ["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"], dtype=any_string_dtype\n )\n result = ser.str.fullmatch(".*BAD[_]+.*BAD")\n if any_string_dtype == "str":\n # NaN propagates as False\n expected = Series([True, False, False, False], dtype=bool)\n else:\n expected_dtype = (\n "object" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series([True, False, np.nan, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_fullmatch_dollar_literal(any_string_dtype):\n # GH 56652\n ser = Series(["foo", "foo$foo", np.nan, "foo$"], dtype=any_string_dtype)\n result = ser.str.fullmatch("foo\\$")\n if any_string_dtype == "str":\n # NaN propagates as False\n expected = Series([False, False, False, True], dtype=bool)\n else:\n expected_dtype = (\n "object" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series([False, False, np.nan, True], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_fullmatch_na_kwarg(any_string_dtype):\n ser = Series(\n ["fooBAD__barBAD", "BAD_BADleroybrown", np.nan, "foo"], dtype=any_string_dtype\n )\n result = ser.str.fullmatch(".*BAD[_]+.*BAD", na=False)\n expected_dtype = (\n np.bool_ if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series([True, False, False, False], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_fullmatch_case_kwarg(any_string_dtype):\n ser = Series(["ab", "AB", "abc", "ABC"], dtype=any_string_dtype)\n expected_dtype = (\n np.bool_ if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n\n expected = Series([True, False, False, False], dtype=expected_dtype)\n\n result = ser.str.fullmatch("ab", case=True)\n tm.assert_series_equal(result, expected)\n\n expected = Series([True, True, False, False], dtype=expected_dtype)\n\n result = ser.str.fullmatch("ab", case=False)\n tm.assert_series_equal(result, expected)\n\n result = ser.str.fullmatch("ab", flags=re.IGNORECASE)\n tm.assert_series_equal(result, expected)\n\n\n# --------------------------------------------------------------------------------------\n# str.findall\n# --------------------------------------------------------------------------------------\n\n\ndef test_findall(any_string_dtype):\n ser = Series(["fooBAD__barBAD", np.nan, "foo", "BAD"], dtype=any_string_dtype)\n result = ser.str.findall("BAD[_]*")\n expected = Series([["BAD__", "BAD"], np.nan, [], ["BAD"]])\n expected = _convert_na_value(ser, expected)\n tm.assert_series_equal(result, expected)\n\n\ndef test_findall_mixed_object():\n ser = Series(\n [\n "fooBAD__barBAD",\n np.nan,\n "foo",\n True,\n datetime.today(),\n "BAD",\n None,\n 1,\n 2.0,\n ]\n )\n\n result = ser.str.findall("BAD[_]*")\n expected = Series(\n [\n ["BAD__", "BAD"],\n np.nan,\n [],\n np.nan,\n np.nan,\n ["BAD"],\n None,\n np.nan,\n np.nan,\n ]\n )\n\n tm.assert_series_equal(result, expected)\n\n\n# --------------------------------------------------------------------------------------\n# str.find\n# --------------------------------------------------------------------------------------\n\n\ndef test_find(any_string_dtype):\n ser = Series(\n ["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF", "XXXX"], dtype=any_string_dtype\n )\n expected_dtype = (\n np.int64 if is_object_or_nan_string_dtype(any_string_dtype) else "Int64"\n )\n\n result = ser.str.find("EF")\n expected = Series([4, 3, 1, 0, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n expected = np.array([v.find("EF") for v in np.array(ser)], dtype=np.int64)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected)\n\n result = ser.str.rfind("EF")\n expected = Series([4, 5, 7, 4, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n expected = np.array([v.rfind("EF") for v in np.array(ser)], dtype=np.int64)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected)\n\n result = ser.str.find("EF", 3)\n expected = Series([4, 3, 7, 4, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n expected = np.array([v.find("EF", 3) for v in np.array(ser)], dtype=np.int64)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected)\n\n result = ser.str.rfind("EF", 3)\n expected = Series([4, 5, 7, 4, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n expected = np.array([v.rfind("EF", 3) for v in np.array(ser)], dtype=np.int64)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected)\n\n result = ser.str.find("EF", 3, 6)\n expected = Series([4, 3, -1, 4, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n expected = np.array([v.find("EF", 3, 6) for v in np.array(ser)], dtype=np.int64)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected)\n\n result = ser.str.rfind("EF", 3, 6)\n expected = Series([4, 3, -1, 4, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n expected = np.array([v.rfind("EF", 3, 6) for v in np.array(ser)], dtype=np.int64)\n tm.assert_numpy_array_equal(np.array(result, dtype=np.int64), expected)\n\n\ndef test_find_bad_arg_raises(any_string_dtype):\n ser = Series([], dtype=any_string_dtype)\n with pytest.raises(TypeError, match="expected a string object, not int"):\n ser.str.find(0)\n\n with pytest.raises(TypeError, match="expected a string object, not int"):\n ser.str.rfind(0)\n\n\ndef test_find_nan(any_string_dtype):\n ser = Series(\n ["ABCDEFG", np.nan, "DEFGHIJEF", np.nan, "XXXX"], dtype=any_string_dtype\n )\n expected_dtype = (\n np.float64 if is_object_or_nan_string_dtype(any_string_dtype) else "Int64"\n )\n\n result = ser.str.find("EF")\n expected = Series([4, np.nan, 1, np.nan, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n result = ser.str.rfind("EF")\n expected = Series([4, np.nan, 7, np.nan, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n result = ser.str.find("EF", 3)\n expected = Series([4, np.nan, 7, np.nan, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n result = ser.str.rfind("EF", 3)\n expected = Series([4, np.nan, 7, np.nan, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n result = ser.str.find("EF", 3, 6)\n expected = Series([4, np.nan, -1, np.nan, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n result = ser.str.rfind("EF", 3, 6)\n expected = Series([4, np.nan, -1, np.nan, -1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\n# --------------------------------------------------------------------------------------\n# str.translate\n# --------------------------------------------------------------------------------------\n\n\n@pytest.mark.parametrize(\n "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]\n)\ndef test_translate(index_or_series, any_string_dtype, infer_string):\n obj = index_or_series(\n ["abcdefg", "abcc", "cdddfg", "cdefggg"], dtype=any_string_dtype\n )\n table = str.maketrans("abc", "cde")\n result = obj.str.translate(table)\n expected = index_or_series(\n ["cdedefg", "cdee", "edddfg", "edefggg"], dtype=any_string_dtype\n )\n tm.assert_equal(result, expected)\n\n\ndef test_translate_mixed_object():\n # Series with non-string values\n s = Series(["a", "b", "c", 1.2])\n table = str.maketrans("abc", "cde")\n expected = Series(["c", "d", "e", np.nan], dtype=object)\n result = s.str.translate(table)\n tm.assert_series_equal(result, expected)\n\n\n# --------------------------------------------------------------------------------------\n\n\ndef test_flags_kwarg(any_string_dtype):\n data = {\n "Dave": "dave@google.com",\n "Steve": "steve@gmail.com",\n "Rob": "rob@gmail.com",\n "Wes": np.nan,\n }\n data = Series(data, dtype=any_string_dtype)\n\n pat = r"([A-Z0-9._%+-]+)@([A-Z0-9.-]+)\.([A-Z]{2,4})"\n\n result = data.str.extract(pat, flags=re.IGNORECASE, expand=True)\n assert result.iloc[0].tolist() == ["dave", "google", "com"]\n\n result = data.str.match(pat, flags=re.IGNORECASE)\n assert result.iloc[0]\n\n result = data.str.fullmatch(pat, flags=re.IGNORECASE)\n assert result.iloc[0]\n\n result = data.str.findall(pat, flags=re.IGNORECASE)\n assert result.iloc[0][0] == ("dave", "google", "com")\n\n result = data.str.count(pat, flags=re.IGNORECASE)\n assert result.iloc[0] == 1\n\n msg = "has match groups"\n with tm.assert_produces_warning(UserWarning, match=msg):\n result = data.str.contains(pat, flags=re.IGNORECASE)\n assert result.iloc[0]\n | .venv\Lib\site-packages\pandas\tests\strings\test_find_replace.py | test_find_replace.py | Python | 37,896 | 0.95 | 0.089661 | 0.087937 | python-kit | 183 | 2025-01-16T14:04:44.275804 | GPL-3.0 | true | 384111b302835e081a199debf98b0a6a |
import numpy as np\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n _testing as tm,\n)\n\n\ndef test_get_dummies(any_string_dtype):\n s = Series(["a|b", "a|c", np.nan], dtype=any_string_dtype)\n result = s.str.get_dummies("|")\n expected = DataFrame([[1, 1, 0], [1, 0, 1], [0, 0, 0]], columns=list("abc"))\n tm.assert_frame_equal(result, expected)\n\n s = Series(["a;b", "a", 7], dtype=any_string_dtype)\n result = s.str.get_dummies(";")\n expected = DataFrame([[0, 1, 1], [0, 1, 0], [1, 0, 0]], columns=list("7ab"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_get_dummies_index():\n # GH9980, GH8028\n idx = Index(["a|b", "a|c", "b|c"])\n result = idx.str.get_dummies("|")\n\n expected = MultiIndex.from_tuples(\n [(1, 1, 0), (1, 0, 1), (0, 1, 1)], names=("a", "b", "c")\n )\n tm.assert_index_equal(result, expected)\n\n\ndef test_get_dummies_with_name_dummy(any_string_dtype):\n # GH 12180\n # Dummies named 'name' should work as expected\n s = Series(["a", "b,name", "b"], dtype=any_string_dtype)\n result = s.str.get_dummies(",")\n expected = DataFrame([[1, 0, 0], [0, 1, 1], [0, 1, 0]], columns=["a", "b", "name"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_get_dummies_with_name_dummy_index():\n # GH 12180\n # Dummies named 'name' should work as expected\n idx = Index(["a|b", "name|c", "b|name"])\n result = idx.str.get_dummies("|")\n\n expected = MultiIndex.from_tuples(\n [(1, 1, 0, 0), (0, 0, 1, 1), (0, 1, 0, 1)], names=("a", "b", "c", "name")\n )\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\strings\test_get_dummies.py | test_get_dummies.py | Python | 1,608 | 0.95 | 0.075472 | 0.121951 | awesome-app | 50 | 2024-08-06T14:40:16.489560 | BSD-3-Clause | true | d63d75854df7eb13c2668010710e1801 |
from datetime import datetime\nimport re\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n _testing as tm,\n)\nfrom pandas.tests.strings import (\n _convert_na_value,\n is_object_or_nan_string_dtype,\n)\n\n\n@pytest.mark.parametrize("method", ["split", "rsplit"])\ndef test_split(any_string_dtype, method):\n values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)\n\n result = getattr(values.str, method)("_")\n exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])\n exp = _convert_na_value(values, exp)\n tm.assert_series_equal(result, exp)\n\n\n@pytest.mark.parametrize("method", ["split", "rsplit"])\ndef test_split_more_than_one_char(any_string_dtype, method):\n # more than one char\n values = Series(["a__b__c", "c__d__e", np.nan, "f__g__h"], dtype=any_string_dtype)\n result = getattr(values.str, method)("__")\n exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])\n exp = _convert_na_value(values, exp)\n tm.assert_series_equal(result, exp)\n\n result = getattr(values.str, method)("__", expand=False)\n tm.assert_series_equal(result, exp)\n\n\ndef test_split_more_regex_split(any_string_dtype):\n # regex split\n values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)\n result = values.str.split("[,_]")\n exp = Series([["a", "b", "c"], ["c", "d", "e"], np.nan, ["f", "g", "h"]])\n exp = _convert_na_value(values, exp)\n tm.assert_series_equal(result, exp)\n\n\ndef test_split_regex(any_string_dtype):\n # GH 43563\n # explicit regex = True split\n values = Series("xxxjpgzzz.jpg", dtype=any_string_dtype)\n result = values.str.split(r"\.jpg", regex=True)\n exp = Series([["xxxjpgzzz", ""]])\n tm.assert_series_equal(result, exp)\n\n\ndef test_split_regex_explicit(any_string_dtype):\n # explicit regex = True split with compiled regex\n regex_pat = re.compile(r".jpg")\n values = Series("xxxjpgzzz.jpg", dtype=any_string_dtype)\n result = values.str.split(regex_pat)\n exp = Series([["xx", "zzz", ""]])\n tm.assert_series_equal(result, exp)\n\n # explicit regex = False split\n result = values.str.split(r"\.jpg", regex=False)\n exp = Series([["xxxjpgzzz.jpg"]])\n tm.assert_series_equal(result, exp)\n\n # non explicit regex split, pattern length == 1\n result = values.str.split(r".")\n exp = Series([["xxxjpgzzz", "jpg"]])\n tm.assert_series_equal(result, exp)\n\n # non explicit regex split, pattern length != 1\n result = values.str.split(r".jpg")\n exp = Series([["xx", "zzz", ""]])\n tm.assert_series_equal(result, exp)\n\n # regex=False with pattern compiled regex raises error\n with pytest.raises(\n ValueError,\n match="Cannot use a compiled regex as replacement pattern with regex=False",\n ):\n values.str.split(regex_pat, regex=False)\n\n\n@pytest.mark.parametrize("expand", [None, False])\n@pytest.mark.parametrize("method", ["split", "rsplit"])\ndef test_split_object_mixed(expand, method):\n mixed = Series(["a_b_c", np.nan, "d_e_f", True, datetime.today(), None, 1, 2.0])\n result = getattr(mixed.str, method)("_", expand=expand)\n exp = Series(\n [\n ["a", "b", "c"],\n np.nan,\n ["d", "e", "f"],\n np.nan,\n np.nan,\n None,\n np.nan,\n np.nan,\n ]\n )\n assert isinstance(result, Series)\n tm.assert_almost_equal(result, exp)\n\n\n@pytest.mark.parametrize("method", ["split", "rsplit"])\n@pytest.mark.parametrize("n", [None, 0])\ndef test_split_n(any_string_dtype, method, n):\n s = Series(["a b", pd.NA, "b c"], dtype=any_string_dtype)\n expected = Series([["a", "b"], pd.NA, ["b", "c"]])\n result = getattr(s.str, method)(" ", n=n)\n expected = _convert_na_value(s, expected)\n tm.assert_series_equal(result, expected)\n\n\ndef test_rsplit(any_string_dtype):\n # regex split is not supported by rsplit\n values = Series(["a,b_c", "c_d,e", np.nan, "f,g,h"], dtype=any_string_dtype)\n result = values.str.rsplit("[,_]")\n exp = Series([["a,b_c"], ["c_d,e"], np.nan, ["f,g,h"]])\n exp = _convert_na_value(values, exp)\n tm.assert_series_equal(result, exp)\n\n\ndef test_rsplit_max_number(any_string_dtype):\n # setting max number of splits, make sure it's from reverse\n values = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)\n result = values.str.rsplit("_", n=1)\n exp = Series([["a_b", "c"], ["c_d", "e"], np.nan, ["f_g", "h"]])\n exp = _convert_na_value(values, exp)\n tm.assert_series_equal(result, exp)\n\n\ndef test_split_blank_string(any_string_dtype):\n # expand blank split GH 20067\n values = Series([""], name="test", dtype=any_string_dtype)\n result = values.str.split(expand=True)\n exp = DataFrame([[]], dtype=any_string_dtype) # NOTE: this is NOT an empty df\n tm.assert_frame_equal(result, exp)\n\n\ndef test_split_blank_string_with_non_empty(any_string_dtype):\n values = Series(["a b c", "a b", "", " "], name="test", dtype=any_string_dtype)\n result = values.str.split(expand=True)\n exp = DataFrame(\n [\n ["a", "b", "c"],\n ["a", "b", None],\n [None, None, None],\n [None, None, None],\n ],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, exp)\n\n\n@pytest.mark.parametrize("method", ["split", "rsplit"])\ndef test_split_noargs(any_string_dtype, method):\n # #1859\n s = Series(["Wes McKinney", "Travis Oliphant"], dtype=any_string_dtype)\n result = getattr(s.str, method)()\n expected = ["Travis", "Oliphant"]\n assert result[1] == expected\n\n\n@pytest.mark.parametrize(\n "data, pat",\n [\n (["bd asdf jfg", "kjasdflqw asdfnfk"], None),\n (["bd asdf jfg", "kjasdflqw asdfnfk"], "asdf"),\n (["bd_asdf_jfg", "kjasdflqw_asdfnfk"], "_"),\n ],\n)\n@pytest.mark.parametrize("n", [-1, 0])\ndef test_split_maxsplit(data, pat, any_string_dtype, n):\n # re.split 0, str.split -1\n s = Series(data, dtype=any_string_dtype)\n\n result = s.str.split(pat=pat, n=n)\n xp = s.str.split(pat=pat)\n tm.assert_series_equal(result, xp)\n\n\n@pytest.mark.parametrize(\n "data, pat, expected",\n [\n (\n ["split once", "split once too!"],\n None,\n Series({0: ["split", "once"], 1: ["split", "once too!"]}),\n ),\n (\n ["split_once", "split_once_too!"],\n "_",\n Series({0: ["split", "once"], 1: ["split", "once_too!"]}),\n ),\n ],\n)\ndef test_split_no_pat_with_nonzero_n(data, pat, expected, any_string_dtype):\n s = Series(data, dtype=any_string_dtype)\n result = s.str.split(pat=pat, n=1)\n tm.assert_series_equal(expected, result, check_index_type=False)\n\n\ndef test_split_to_dataframe_no_splits(any_string_dtype):\n s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)\n result = s.str.split("_", expand=True)\n exp = DataFrame({0: Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)})\n tm.assert_frame_equal(result, exp)\n\n\ndef test_split_to_dataframe(any_string_dtype):\n s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)\n result = s.str.split("_", expand=True)\n exp = DataFrame(\n {0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, exp)\n\n\ndef test_split_to_dataframe_unequal_splits(any_string_dtype):\n s = Series(\n ["some_unequal_splits", "one_of_these_things_is_not"], dtype=any_string_dtype\n )\n result = s.str.split("_", expand=True)\n exp = DataFrame(\n {\n 0: ["some", "one"],\n 1: ["unequal", "of"],\n 2: ["splits", "these"],\n 3: [None, "things"],\n 4: [None, "is"],\n 5: [None, "not"],\n },\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, exp)\n\n\ndef test_split_to_dataframe_with_index(any_string_dtype):\n s = Series(\n ["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype\n )\n result = s.str.split("_", expand=True)\n exp = DataFrame(\n {0: ["some", "with"], 1: ["splits", "index"]},\n index=["preserve", "me"],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, exp)\n\n with pytest.raises(ValueError, match="expand must be"):\n s.str.split("_", expand="not_a_boolean")\n\n\ndef test_split_to_multiindex_expand_no_splits():\n # https://github.com/pandas-dev/pandas/issues/23677\n\n idx = Index(["nosplit", "alsonosplit", np.nan])\n result = idx.str.split("_", expand=True)\n exp = idx\n tm.assert_index_equal(result, exp)\n assert result.nlevels == 1\n\n\ndef test_split_to_multiindex_expand():\n idx = Index(["some_equal_splits", "with_no_nans", np.nan, None])\n result = idx.str.split("_", expand=True)\n exp = MultiIndex.from_tuples(\n [\n ("some", "equal", "splits"),\n ("with", "no", "nans"),\n [np.nan, np.nan, np.nan],\n [None, None, None],\n ]\n )\n tm.assert_index_equal(result, exp)\n assert result.nlevels == 3\n\n\ndef test_split_to_multiindex_expand_unequal_splits():\n idx = Index(["some_unequal_splits", "one_of_these_things_is_not", np.nan, None])\n result = idx.str.split("_", expand=True)\n exp = MultiIndex.from_tuples(\n [\n ("some", "unequal", "splits", np.nan, np.nan, np.nan),\n ("one", "of", "these", "things", "is", "not"),\n (np.nan, np.nan, np.nan, np.nan, np.nan, np.nan),\n (None, None, None, None, None, None),\n ]\n )\n tm.assert_index_equal(result, exp)\n assert result.nlevels == 6\n\n with pytest.raises(ValueError, match="expand must be"):\n idx.str.split("_", expand="not_a_boolean")\n\n\ndef test_rsplit_to_dataframe_expand_no_splits(any_string_dtype):\n s = Series(["nosplit", "alsonosplit"], dtype=any_string_dtype)\n result = s.str.rsplit("_", expand=True)\n exp = DataFrame({0: Series(["nosplit", "alsonosplit"])}, dtype=any_string_dtype)\n tm.assert_frame_equal(result, exp)\n\n\ndef test_rsplit_to_dataframe_expand(any_string_dtype):\n s = Series(["some_equal_splits", "with_no_nans"], dtype=any_string_dtype)\n result = s.str.rsplit("_", expand=True)\n exp = DataFrame(\n {0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, exp)\n\n result = s.str.rsplit("_", expand=True, n=2)\n exp = DataFrame(\n {0: ["some", "with"], 1: ["equal", "no"], 2: ["splits", "nans"]},\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, exp)\n\n result = s.str.rsplit("_", expand=True, n=1)\n exp = DataFrame(\n {0: ["some_equal", "with_no"], 1: ["splits", "nans"]}, dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, exp)\n\n\ndef test_rsplit_to_dataframe_expand_with_index(any_string_dtype):\n s = Series(\n ["some_splits", "with_index"], index=["preserve", "me"], dtype=any_string_dtype\n )\n result = s.str.rsplit("_", expand=True)\n exp = DataFrame(\n {0: ["some", "with"], 1: ["splits", "index"]},\n index=["preserve", "me"],\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, exp)\n\n\ndef test_rsplit_to_multiindex_expand_no_split():\n idx = Index(["nosplit", "alsonosplit"])\n result = idx.str.rsplit("_", expand=True)\n exp = idx\n tm.assert_index_equal(result, exp)\n assert result.nlevels == 1\n\n\ndef test_rsplit_to_multiindex_expand():\n idx = Index(["some_equal_splits", "with_no_nans"])\n result = idx.str.rsplit("_", expand=True)\n exp = MultiIndex.from_tuples([("some", "equal", "splits"), ("with", "no", "nans")])\n tm.assert_index_equal(result, exp)\n assert result.nlevels == 3\n\n\ndef test_rsplit_to_multiindex_expand_n():\n idx = Index(["some_equal_splits", "with_no_nans"])\n result = idx.str.rsplit("_", expand=True, n=1)\n exp = MultiIndex.from_tuples([("some_equal", "splits"), ("with_no", "nans")])\n tm.assert_index_equal(result, exp)\n assert result.nlevels == 2\n\n\ndef test_split_nan_expand(any_string_dtype):\n # gh-18450\n s = Series(["foo,bar,baz", np.nan], dtype=any_string_dtype)\n result = s.str.split(",", expand=True)\n exp = DataFrame(\n [["foo", "bar", "baz"], [np.nan, np.nan, np.nan]], dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, exp)\n\n # check that these are actually np.nan/pd.NA and not None\n # TODO see GH 18463\n # tm.assert_frame_equal does not differentiate\n if is_object_or_nan_string_dtype(any_string_dtype):\n assert all(np.isnan(x) for x in result.iloc[1])\n else:\n assert all(x is pd.NA for x in result.iloc[1])\n\n\ndef test_split_with_name_series(any_string_dtype):\n # GH 12617\n\n # should preserve name\n s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)\n res = s.str.split(",")\n exp = Series([["a", "b"], ["c", "d"]], name="xxx")\n tm.assert_series_equal(res, exp)\n\n res = s.str.split(",", expand=True)\n exp = DataFrame([["a", "b"], ["c", "d"]], dtype=any_string_dtype)\n tm.assert_frame_equal(res, exp)\n\n\ndef test_split_with_name_index():\n # GH 12617\n idx = Index(["a,b", "c,d"], name="xxx")\n res = idx.str.split(",")\n exp = Index([["a", "b"], ["c", "d"]], name="xxx")\n assert res.nlevels == 1\n tm.assert_index_equal(res, exp)\n\n res = idx.str.split(",", expand=True)\n exp = MultiIndex.from_tuples([("a", "b"), ("c", "d")])\n assert res.nlevels == 2\n tm.assert_index_equal(res, exp)\n\n\n@pytest.mark.parametrize(\n "method, exp",\n [\n [\n "partition",\n [\n ("a", "__", "b__c"),\n ("c", "__", "d__e"),\n np.nan,\n ("f", "__", "g__h"),\n None,\n ],\n ],\n [\n "rpartition",\n [\n ("a__b", "__", "c"),\n ("c__d", "__", "e"),\n np.nan,\n ("f__g", "__", "h"),\n None,\n ],\n ],\n ],\n)\ndef test_partition_series_more_than_one_char(method, exp, any_string_dtype):\n # https://github.com/pandas-dev/pandas/issues/23558\n # more than one char\n s = Series(["a__b__c", "c__d__e", np.nan, "f__g__h", None], dtype=any_string_dtype)\n result = getattr(s.str, method)("__", expand=False)\n expected = Series(exp)\n expected = _convert_na_value(s, expected)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method, exp",\n [\n [\n "partition",\n [("a", " ", "b c"), ("c", " ", "d e"), np.nan, ("f", " ", "g h"), None],\n ],\n [\n "rpartition",\n [("a b", " ", "c"), ("c d", " ", "e"), np.nan, ("f g", " ", "h"), None],\n ],\n ],\n)\ndef test_partition_series_none(any_string_dtype, method, exp):\n # https://github.com/pandas-dev/pandas/issues/23558\n # None\n s = Series(["a b c", "c d e", np.nan, "f g h", None], dtype=any_string_dtype)\n result = getattr(s.str, method)(expand=False)\n expected = Series(exp)\n expected = _convert_na_value(s, expected)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method, exp",\n [\n [\n "partition",\n [("abc", "", ""), ("cde", "", ""), np.nan, ("fgh", "", ""), None],\n ],\n [\n "rpartition",\n [("", "", "abc"), ("", "", "cde"), np.nan, ("", "", "fgh"), None],\n ],\n ],\n)\ndef test_partition_series_not_split(any_string_dtype, method, exp):\n # https://github.com/pandas-dev/pandas/issues/23558\n # Not split\n s = Series(["abc", "cde", np.nan, "fgh", None], dtype=any_string_dtype)\n result = getattr(s.str, method)("_", expand=False)\n expected = Series(exp)\n expected = _convert_na_value(s, expected)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method, exp",\n [\n [\n "partition",\n [("a", "_", "b_c"), ("c", "_", "d_e"), np.nan, ("f", "_", "g_h")],\n ],\n [\n "rpartition",\n [("a_b", "_", "c"), ("c_d", "_", "e"), np.nan, ("f_g", "_", "h")],\n ],\n ],\n)\ndef test_partition_series_unicode(any_string_dtype, method, exp):\n # https://github.com/pandas-dev/pandas/issues/23558\n # unicode\n s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)\n\n result = getattr(s.str, method)("_", expand=False)\n expected = Series(exp)\n expected = _convert_na_value(s, expected)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["partition", "rpartition"])\ndef test_partition_series_stdlib(any_string_dtype, method):\n # https://github.com/pandas-dev/pandas/issues/23558\n # compare to standard lib\n s = Series(["A_B_C", "B_C_D", "E_F_G", "EFGHEF"], dtype=any_string_dtype)\n result = getattr(s.str, method)("_", expand=False).tolist()\n assert result == [getattr(v, method)("_") for v in s]\n\n\n@pytest.mark.parametrize(\n "method, expand, exp, exp_levels",\n [\n [\n "partition",\n False,\n np.array(\n [("a", "_", "b_c"), ("c", "_", "d_e"), ("f", "_", "g_h"), np.nan, None],\n dtype=object,\n ),\n 1,\n ],\n [\n "rpartition",\n False,\n np.array(\n [("a_b", "_", "c"), ("c_d", "_", "e"), ("f_g", "_", "h"), np.nan, None],\n dtype=object,\n ),\n 1,\n ],\n ],\n)\ndef test_partition_index(method, expand, exp, exp_levels):\n # https://github.com/pandas-dev/pandas/issues/23558\n\n values = Index(["a_b_c", "c_d_e", "f_g_h", np.nan, None])\n\n result = getattr(values.str, method)("_", expand=expand)\n exp = Index(exp)\n tm.assert_index_equal(result, exp)\n assert result.nlevels == exp_levels\n\n\n@pytest.mark.parametrize(\n "method, exp",\n [\n [\n "partition",\n {\n 0: ["a", "c", np.nan, "f", None],\n 1: ["_", "_", np.nan, "_", None],\n 2: ["b_c", "d_e", np.nan, "g_h", None],\n },\n ],\n [\n "rpartition",\n {\n 0: ["a_b", "c_d", np.nan, "f_g", None],\n 1: ["_", "_", np.nan, "_", None],\n 2: ["c", "e", np.nan, "h", None],\n },\n ],\n ],\n)\ndef test_partition_to_dataframe(any_string_dtype, method, exp):\n # https://github.com/pandas-dev/pandas/issues/23558\n\n s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)\n result = getattr(s.str, method)("_")\n expected = DataFrame(\n exp,\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method, exp",\n [\n [\n "partition",\n {\n 0: ["a", "c", np.nan, "f", None],\n 1: ["_", "_", np.nan, "_", None],\n 2: ["b_c", "d_e", np.nan, "g_h", None],\n },\n ],\n [\n "rpartition",\n {\n 0: ["a_b", "c_d", np.nan, "f_g", None],\n 1: ["_", "_", np.nan, "_", None],\n 2: ["c", "e", np.nan, "h", None],\n },\n ],\n ],\n)\ndef test_partition_to_dataframe_from_series(any_string_dtype, method, exp):\n # https://github.com/pandas-dev/pandas/issues/23558\n s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h", None], dtype=any_string_dtype)\n result = getattr(s.str, method)("_", expand=True)\n expected = DataFrame(\n exp,\n dtype=any_string_dtype,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_partition_with_name(any_string_dtype):\n # GH 12617\n\n s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)\n result = s.str.partition(",")\n expected = DataFrame(\n {0: ["a", "c"], 1: [",", ","], 2: ["b", "d"]}, dtype=any_string_dtype\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_partition_with_name_expand(any_string_dtype):\n # GH 12617\n # should preserve name\n s = Series(["a,b", "c,d"], name="xxx", dtype=any_string_dtype)\n result = s.str.partition(",", expand=False)\n expected = Series([("a", ",", "b"), ("c", ",", "d")], name="xxx")\n tm.assert_series_equal(result, expected)\n\n\ndef test_partition_index_with_name():\n idx = Index(["a,b", "c,d"], name="xxx")\n result = idx.str.partition(",")\n expected = MultiIndex.from_tuples([("a", ",", "b"), ("c", ",", "d")])\n assert result.nlevels == 3\n tm.assert_index_equal(result, expected)\n\n\ndef test_partition_index_with_name_expand_false():\n idx = Index(["a,b", "c,d"], name="xxx")\n # should preserve name\n result = idx.str.partition(",", expand=False)\n expected = Index(np.array([("a", ",", "b"), ("c", ",", "d")]), name="xxx")\n assert result.nlevels == 1\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize("method", ["partition", "rpartition"])\ndef test_partition_sep_kwarg(any_string_dtype, method):\n # GH 22676; depr kwarg "pat" in favor of "sep"\n s = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)\n\n expected = getattr(s.str, method)(sep="_")\n result = getattr(s.str, method)("_")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_get():\n ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"])\n result = ser.str.split("_").str.get(1)\n expected = Series(["b", "d", np.nan, "g"], dtype=object)\n tm.assert_series_equal(result, expected)\n\n\ndef test_get_mixed_object():\n ser = Series(["a_b_c", np.nan, "c_d_e", True, datetime.today(), None, 1, 2.0])\n result = ser.str.split("_").str.get(1)\n expected = Series(\n ["b", np.nan, "d", np.nan, np.nan, None, np.nan, np.nan], dtype=object\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("idx", [2, -3])\ndef test_get_bounds(idx):\n ser = Series(["1_2_3_4_5", "6_7_8_9_10", "11_12"])\n result = ser.str.split("_").str.get(idx)\n expected = Series(["3", "8", np.nan], dtype=object)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "idx, exp", [[2, [3, 3, np.nan, "b"]], [-1, [3, 3, np.nan, np.nan]]]\n)\ndef test_get_complex(idx, exp):\n # GH 20671, getting value not in dict raising `KeyError`\n ser = Series([(1, 2, 3), [1, 2, 3], {1, 2, 3}, {1: "a", 2: "b", 3: "c"}])\n\n result = ser.str.get(idx)\n expected = Series(exp)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("to_type", [tuple, list, np.array])\ndef test_get_complex_nested(to_type):\n ser = Series([to_type([to_type([1, 2])])])\n\n result = ser.str.get(0)\n expected = Series([to_type([1, 2])])\n tm.assert_series_equal(result, expected)\n\n result = ser.str.get(1)\n expected = Series([np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_get_strings(any_string_dtype):\n ser = Series(["a", "ab", np.nan, "abc"], dtype=any_string_dtype)\n result = ser.str.get(2)\n expected = Series([np.nan, np.nan, np.nan, "c"], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\strings\test_split_partition.py | test_split_partition.py | Python | 23,250 | 0.95 | 0.072207 | 0.067323 | awesome-app | 274 | 2024-03-24T08:26:09.657300 | Apache-2.0 | true | 2b91880292faed0d5fdd676ea53d8f04 |
from datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.core.strings.accessor import StringMethods\nfrom pandas.tests.strings import is_object_or_nan_string_dtype\n\n\n@pytest.mark.parametrize("pattern", [0, True, Series(["foo", "bar"])])\ndef test_startswith_endswith_non_str_patterns(pattern):\n # GH3485\n ser = Series(["foo", "bar"])\n msg = f"expected a string or tuple, not {type(pattern).__name__}"\n with pytest.raises(TypeError, match=msg):\n ser.str.startswith(pattern)\n with pytest.raises(TypeError, match=msg):\n ser.str.endswith(pattern)\n\n\ndef test_iter_raises():\n # GH 54173\n ser = Series(["foo", "bar"])\n with pytest.raises(TypeError, match="'StringMethods' object is not iterable"):\n iter(ser.str)\n\n\n# test integer/float dtypes (inferred by constructor) and mixed\n\n\ndef test_count(any_string_dtype):\n ser = Series(["foo", "foofoo", np.nan, "foooofooofommmfoo"], dtype=any_string_dtype)\n result = ser.str.count("f[o]+")\n expected_dtype = (\n np.float64 if is_object_or_nan_string_dtype(any_string_dtype) else "Int64"\n )\n expected = Series([1, 2, np.nan, 4], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_count_mixed_object():\n ser = Series(\n ["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0],\n dtype=object,\n )\n result = ser.str.count("a")\n expected = Series([1, np.nan, 0, np.nan, np.nan, 0, np.nan, np.nan, np.nan])\n tm.assert_series_equal(result, expected)\n\n\ndef test_repeat(any_string_dtype):\n ser = Series(["a", "b", np.nan, "c", np.nan, "d"], dtype=any_string_dtype)\n\n result = ser.str.repeat(3)\n expected = Series(\n ["aaa", "bbb", np.nan, "ccc", np.nan, "ddd"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n result = ser.str.repeat([1, 2, 3, 4, 5, 6])\n expected = Series(\n ["a", "bb", np.nan, "cccc", np.nan, "dddddd"], dtype=any_string_dtype\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_repeat_mixed_object():\n ser = Series(["a", np.nan, "b", True, datetime.today(), "foo", None, 1, 2.0])\n result = ser.str.repeat(3)\n expected = Series(\n ["aaa", np.nan, "bbb", np.nan, np.nan, "foofoofoo", None, np.nan, np.nan],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("arg, repeat", [[None, 4], ["b", None]])\ndef test_repeat_with_null(any_string_dtype, arg, repeat):\n # GH: 31632\n ser = Series(["a", arg], dtype=any_string_dtype)\n result = ser.str.repeat([3, repeat])\n expected = Series(["aaa", None], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_empty_str_methods(any_string_dtype):\n empty_str = empty = Series(dtype=any_string_dtype)\n empty_inferred_str = Series(dtype="str")\n if is_object_or_nan_string_dtype(any_string_dtype):\n empty_int = Series(dtype="int64")\n empty_bool = Series(dtype=bool)\n else:\n empty_int = Series(dtype="Int64")\n empty_bool = Series(dtype="boolean")\n empty_object = Series(dtype=object)\n empty_bytes = Series(dtype=object)\n empty_df = DataFrame()\n\n # GH7241\n # (extract) on empty series\n\n tm.assert_series_equal(empty_str, empty.str.cat(empty))\n assert "" == empty.str.cat()\n tm.assert_series_equal(empty_str, empty.str.title())\n tm.assert_series_equal(empty_int, empty.str.count("a"))\n tm.assert_series_equal(empty_bool, empty.str.contains("a"))\n tm.assert_series_equal(empty_bool, empty.str.startswith("a"))\n tm.assert_series_equal(empty_bool, empty.str.endswith("a"))\n tm.assert_series_equal(empty_str, empty.str.lower())\n tm.assert_series_equal(empty_str, empty.str.upper())\n tm.assert_series_equal(empty_str, empty.str.replace("a", "b"))\n tm.assert_series_equal(empty_str, empty.str.repeat(3))\n tm.assert_series_equal(empty_bool, empty.str.match("^a"))\n tm.assert_frame_equal(\n DataFrame(columns=[0], dtype=any_string_dtype),\n empty.str.extract("()", expand=True),\n )\n tm.assert_frame_equal(\n DataFrame(columns=[0, 1], dtype=any_string_dtype),\n empty.str.extract("()()", expand=True),\n )\n tm.assert_series_equal(empty_str, empty.str.extract("()", expand=False))\n tm.assert_frame_equal(\n DataFrame(columns=[0, 1], dtype=any_string_dtype),\n empty.str.extract("()()", expand=False),\n )\n tm.assert_frame_equal(empty_df.set_axis([], axis=1), empty.str.get_dummies())\n tm.assert_series_equal(empty_str, empty_str.str.join(""))\n tm.assert_series_equal(empty_int, empty.str.len())\n tm.assert_series_equal(empty_object, empty_str.str.findall("a"))\n tm.assert_series_equal(empty_int, empty.str.find("a"))\n tm.assert_series_equal(empty_int, empty.str.rfind("a"))\n tm.assert_series_equal(empty_str, empty.str.pad(42))\n tm.assert_series_equal(empty_str, empty.str.center(42))\n tm.assert_series_equal(empty_object, empty.str.split("a"))\n tm.assert_series_equal(empty_object, empty.str.rsplit("a"))\n tm.assert_series_equal(empty_object, empty.str.partition("a", expand=False))\n tm.assert_frame_equal(empty_df, empty.str.partition("a"))\n tm.assert_series_equal(empty_object, empty.str.rpartition("a", expand=False))\n tm.assert_frame_equal(empty_df, empty.str.rpartition("a"))\n tm.assert_series_equal(empty_str, empty.str.slice(stop=1))\n tm.assert_series_equal(empty_str, empty.str.slice(step=1))\n tm.assert_series_equal(empty_str, empty.str.strip())\n tm.assert_series_equal(empty_str, empty.str.lstrip())\n tm.assert_series_equal(empty_str, empty.str.rstrip())\n tm.assert_series_equal(empty_str, empty.str.wrap(42))\n tm.assert_series_equal(empty_str, empty.str.get(0))\n tm.assert_series_equal(empty_inferred_str, empty_bytes.str.decode("ascii"))\n tm.assert_series_equal(empty_bytes, empty.str.encode("ascii"))\n # ismethods should always return boolean (GH 29624)\n tm.assert_series_equal(empty_bool, empty.str.isalnum())\n tm.assert_series_equal(empty_bool, empty.str.isalpha())\n tm.assert_series_equal(empty_bool, empty.str.isdigit())\n tm.assert_series_equal(empty_bool, empty.str.isspace())\n tm.assert_series_equal(empty_bool, empty.str.islower())\n tm.assert_series_equal(empty_bool, empty.str.isupper())\n tm.assert_series_equal(empty_bool, empty.str.istitle())\n tm.assert_series_equal(empty_bool, empty.str.isnumeric())\n tm.assert_series_equal(empty_bool, empty.str.isdecimal())\n tm.assert_series_equal(empty_str, empty.str.capitalize())\n tm.assert_series_equal(empty_str, empty.str.swapcase())\n tm.assert_series_equal(empty_str, empty.str.normalize("NFC"))\n\n table = str.maketrans("a", "b")\n tm.assert_series_equal(empty_str, empty.str.translate(table))\n\n\n@pytest.mark.parametrize(\n "method, expected",\n [\n ("isalnum", [True, True, True, True, True, False, True, True, False, False]),\n ("isalpha", [True, True, True, False, False, False, True, False, False, False]),\n (\n "isdigit",\n [False, False, False, True, False, False, False, True, False, False],\n ),\n (\n "isnumeric",\n [False, False, False, True, False, False, False, True, False, False],\n ),\n (\n "isspace",\n [False, False, False, False, False, False, False, False, False, True],\n ),\n (\n "islower",\n [False, True, False, False, False, False, False, False, False, False],\n ),\n (\n "isupper",\n [True, False, False, False, True, False, True, False, False, False],\n ),\n (\n "istitle",\n [True, False, True, False, True, False, False, False, False, False],\n ),\n ],\n)\ndef test_ismethods(method, expected, any_string_dtype):\n ser = Series(\n ["A", "b", "Xy", "4", "3A", "", "TT", "55", "-", " "], dtype=any_string_dtype\n )\n expected_dtype = (\n "bool" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series(expected, dtype=expected_dtype)\n result = getattr(ser.str, method)()\n tm.assert_series_equal(result, expected)\n\n # compare with standard library\n expected_stdlib = [getattr(item, method)() for item in ser]\n assert list(result) == expected_stdlib\n\n # with missing value\n ser.iloc[[1, 2, 3, 4]] = np.nan\n result = getattr(ser.str, method)()\n if ser.dtype == "object":\n expected = expected.astype(object)\n expected.iloc[[1, 2, 3, 4]] = np.nan\n elif ser.dtype == "str":\n # NaN propagates as False\n expected.iloc[[1, 2, 3, 4]] = False\n else:\n # nullable dtypes propagate NaN\n expected.iloc[[1, 2, 3, 4]] = np.nan\n\n\n@pytest.mark.parametrize(\n "method, expected",\n [\n ("isnumeric", [False, True, True, False, True, True, False]),\n ("isdecimal", [False, True, False, False, False, True, False]),\n ],\n)\ndef test_isnumeric_unicode(method, expected, any_string_dtype):\n # 0x00bc: ¼ VULGAR FRACTION ONE QUARTER\n # 0x2605: ★ not number\n # 0x1378: ፸ ETHIOPIC NUMBER SEVENTY\n # 0xFF13: 3 Em 3 # noqa: RUF003\n ser = Series(\n ["A", "3", "¼", "★", "፸", "3", "four"], dtype=any_string_dtype # noqa: RUF001\n )\n expected_dtype = (\n "bool" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series(expected, dtype=expected_dtype)\n result = getattr(ser.str, method)()\n tm.assert_series_equal(result, expected)\n\n # compare with standard library\n expected = [getattr(item, method)() for item in ser]\n assert list(result) == expected\n\n\n@pytest.mark.filterwarnings("ignore:Downcasting object dtype arrays:FutureWarning")\n@pytest.mark.parametrize(\n "method, expected",\n [\n ("isnumeric", [False, np.nan, True, False, np.nan, True, False]),\n ("isdecimal", [False, np.nan, False, False, np.nan, True, False]),\n ],\n)\ndef test_isnumeric_unicode_missing(method, expected, any_string_dtype):\n values = ["A", np.nan, "¼", "★", np.nan, "3", "four"] # noqa: RUF001\n ser = Series(values, dtype=any_string_dtype)\n if any_string_dtype == "str":\n # NaN propagates as False\n expected = Series(expected, dtype=object).fillna(False).astype(bool)\n else:\n expected_dtype = (\n "object" if is_object_or_nan_string_dtype(any_string_dtype) else "boolean"\n )\n expected = Series(expected, dtype=expected_dtype)\n result = getattr(ser.str, method)()\n tm.assert_series_equal(result, expected)\n\n\ndef test_spilt_join_roundtrip(any_string_dtype):\n ser = Series(["a_b_c", "c_d_e", np.nan, "f_g_h"], dtype=any_string_dtype)\n result = ser.str.split("_").str.join("_")\n expected = ser.astype(object)\n tm.assert_series_equal(result, expected)\n\n\ndef test_spilt_join_roundtrip_mixed_object():\n ser = Series(\n ["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0]\n )\n result = ser.str.split("_").str.join("_")\n expected = Series(\n ["a_b", np.nan, "asdf_cas_asdf", np.nan, np.nan, "foo", None, np.nan, np.nan],\n dtype=object,\n )\n tm.assert_series_equal(result, expected)\n\n\ndef test_len(any_string_dtype):\n ser = Series(\n ["foo", "fooo", "fooooo", np.nan, "fooooooo", "foo\n", "あ"],\n dtype=any_string_dtype,\n )\n result = ser.str.len()\n expected_dtype = (\n "float64" if is_object_or_nan_string_dtype(any_string_dtype) else "Int64"\n )\n expected = Series([3, 4, 6, np.nan, 8, 4, 1], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_len_mixed():\n ser = Series(\n ["a_b", np.nan, "asdf_cas_asdf", True, datetime.today(), "foo", None, 1, 2.0]\n )\n result = ser.str.len()\n expected = Series([3, np.nan, 13, np.nan, np.nan, 3, np.nan, np.nan, np.nan])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method,sub,start,end,expected",\n [\n ("index", "EF", None, None, [4, 3, 1, 0]),\n ("rindex", "EF", None, None, [4, 5, 7, 4]),\n ("index", "EF", 3, None, [4, 3, 7, 4]),\n ("rindex", "EF", 3, None, [4, 5, 7, 4]),\n ("index", "E", 4, 8, [4, 5, 7, 4]),\n ("rindex", "E", 0, 5, [4, 3, 1, 4]),\n ],\n)\ndef test_index(method, sub, start, end, index_or_series, any_string_dtype, expected):\n obj = index_or_series(\n ["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype\n )\n expected_dtype = (\n np.int64 if is_object_or_nan_string_dtype(any_string_dtype) else "Int64"\n )\n expected = index_or_series(expected, dtype=expected_dtype)\n\n result = getattr(obj.str, method)(sub, start, end)\n\n if index_or_series is Series:\n tm.assert_series_equal(result, expected)\n else:\n tm.assert_index_equal(result, expected)\n\n # compare with standard library\n expected = [getattr(item, method)(sub, start, end) for item in obj]\n assert list(result) == expected\n\n\ndef test_index_not_found_raises(index_or_series, any_string_dtype):\n obj = index_or_series(\n ["ABCDEFG", "BCDEFEF", "DEFGHIJEF", "EFGHEF"], dtype=any_string_dtype\n )\n with pytest.raises(ValueError, match="substring not found"):\n obj.str.index("DE")\n\n\n@pytest.mark.parametrize("method", ["index", "rindex"])\ndef test_index_wrong_type_raises(index_or_series, any_string_dtype, method):\n obj = index_or_series([], dtype=any_string_dtype)\n msg = "expected a string object, not int"\n\n with pytest.raises(TypeError, match=msg):\n getattr(obj.str, method)(0)\n\n\n@pytest.mark.parametrize(\n "method, exp",\n [\n ["index", [1, 1, 0]],\n ["rindex", [3, 1, 2]],\n ],\n)\ndef test_index_missing(any_string_dtype, method, exp):\n ser = Series(["abcb", "ab", "bcbe", np.nan], dtype=any_string_dtype)\n expected_dtype = (\n np.float64 if is_object_or_nan_string_dtype(any_string_dtype) else "Int64"\n )\n\n result = getattr(ser.str, method)("b")\n expected = Series(exp + [np.nan], dtype=expected_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_pipe_failures(any_string_dtype):\n # #2119\n ser = Series(["A|B|C"], dtype=any_string_dtype)\n\n result = ser.str.split("|")\n expected = Series([["A", "B", "C"]], dtype=object)\n tm.assert_series_equal(result, expected)\n\n result = ser.str.replace("|", " ", regex=False)\n expected = Series(["A B C"], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "start, stop, step, expected",\n [\n (2, 5, None, ["foo", "bar", np.nan, "baz"]),\n (0, 3, -1, ["", "", np.nan, ""]),\n (None, None, -1, ["owtoofaa", "owtrabaa", np.nan, "xuqzabaa"]),\n (None, 2, -1, ["owtoo", "owtra", np.nan, "xuqza"]),\n (3, 10, 2, ["oto", "ato", np.nan, "aqx"]),\n (3, 0, -1, ["ofa", "aba", np.nan, "aba"]),\n ],\n)\ndef test_slice(start, stop, step, expected, any_string_dtype):\n ser = Series(["aafootwo", "aabartwo", np.nan, "aabazqux"], dtype=any_string_dtype)\n result = ser.str.slice(start, stop, step)\n expected = Series(expected, dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "start, stop, step, expected",\n [\n (2, 5, None, ["foo", np.nan, "bar", np.nan, np.nan, None, np.nan, np.nan]),\n (4, 1, -1, ["oof", np.nan, "rab", np.nan, np.nan, None, np.nan, np.nan]),\n ],\n)\ndef test_slice_mixed_object(start, stop, step, expected):\n ser = Series(["aafootwo", np.nan, "aabartwo", True, datetime.today(), None, 1, 2.0])\n result = ser.str.slice(start, stop, step)\n expected = Series(expected, dtype=object)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "start,stop,repl,expected",\n [\n (2, 3, None, ["shrt", "a it longer", "evnlongerthanthat", "", np.nan]),\n (2, 3, "z", ["shzrt", "a zit longer", "evznlongerthanthat", "z", np.nan]),\n (2, 2, "z", ["shzort", "a zbit longer", "evzenlongerthanthat", "z", np.nan]),\n (2, 1, "z", ["shzort", "a zbit longer", "evzenlongerthanthat", "z", np.nan]),\n (-1, None, "z", ["shorz", "a bit longez", "evenlongerthanthaz", "z", np.nan]),\n (None, -2, "z", ["zrt", "zer", "zat", "z", np.nan]),\n (6, 8, "z", ["shortz", "a bit znger", "evenlozerthanthat", "z", np.nan]),\n (-10, 3, "z", ["zrt", "a zit longer", "evenlongzerthanthat", "z", np.nan]),\n ],\n)\ndef test_slice_replace(start, stop, repl, expected, any_string_dtype):\n ser = Series(\n ["short", "a bit longer", "evenlongerthanthat", "", np.nan],\n dtype=any_string_dtype,\n )\n expected = Series(expected, dtype=any_string_dtype)\n result = ser.str.slice_replace(start, stop, repl)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method, exp",\n [\n ["strip", ["aa", "bb", np.nan, "cc"]],\n ["lstrip", ["aa ", "bb \n", np.nan, "cc "]],\n ["rstrip", [" aa", " bb", np.nan, "cc"]],\n ],\n)\ndef test_strip_lstrip_rstrip(any_string_dtype, method, exp):\n ser = Series([" aa ", " bb \n", np.nan, "cc "], dtype=any_string_dtype)\n\n result = getattr(ser.str, method)()\n expected = Series(exp, dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method, exp",\n [\n ["strip", ["aa", np.nan, "bb"]],\n ["lstrip", ["aa ", np.nan, "bb \t\n"]],\n ["rstrip", [" aa", np.nan, " bb"]],\n ],\n)\ndef test_strip_lstrip_rstrip_mixed_object(method, exp):\n ser = Series([" aa ", np.nan, " bb \t\n", True, datetime.today(), None, 1, 2.0])\n\n result = getattr(ser.str, method)()\n expected = Series(exp + [np.nan, np.nan, None, np.nan, np.nan], dtype=object)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method, exp",\n [\n ["strip", ["ABC", " BNSD", "LDFJH "]],\n ["lstrip", ["ABCxx", " BNSD", "LDFJH xx"]],\n ["rstrip", ["xxABC", "xx BNSD", "LDFJH "]],\n ],\n)\ndef test_strip_lstrip_rstrip_args(any_string_dtype, method, exp):\n ser = Series(["xxABCxx", "xx BNSD", "LDFJH xx"], dtype=any_string_dtype)\n\n result = getattr(ser.str, method)("x")\n expected = Series(exp, dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "prefix, expected", [("a", ["b", " b c", "bc"]), ("ab", ["", "a b c", "bc"])]\n)\ndef test_removeprefix(any_string_dtype, prefix, expected):\n ser = Series(["ab", "a b c", "bc"], dtype=any_string_dtype)\n result = ser.str.removeprefix(prefix)\n ser_expected = Series(expected, dtype=any_string_dtype)\n tm.assert_series_equal(result, ser_expected)\n\n\n@pytest.mark.parametrize(\n "suffix, expected", [("c", ["ab", "a b ", "b"]), ("bc", ["ab", "a b c", ""])]\n)\ndef test_removesuffix(any_string_dtype, suffix, expected):\n ser = Series(["ab", "a b c", "bc"], dtype=any_string_dtype)\n result = ser.str.removesuffix(suffix)\n ser_expected = Series(expected, dtype=any_string_dtype)\n tm.assert_series_equal(result, ser_expected)\n\n\ndef test_string_slice_get_syntax(any_string_dtype):\n ser = Series(\n ["YYY", "B", "C", "YYYYYYbYYY", "BYYYcYYY", np.nan, "CYYYBYYY", "dog", "cYYYt"],\n dtype=any_string_dtype,\n )\n\n result = ser.str[0]\n expected = ser.str.get(0)\n tm.assert_series_equal(result, expected)\n\n result = ser.str[:3]\n expected = ser.str.slice(stop=3)\n tm.assert_series_equal(result, expected)\n\n result = ser.str[2::-1]\n expected = ser.str.slice(start=2, step=-1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_string_slice_out_of_bounds_nested():\n ser = Series([(1, 2), (1,), (3, 4, 5)])\n result = ser.str[1]\n expected = Series([2, np.nan, 4])\n tm.assert_series_equal(result, expected)\n\n\ndef test_string_slice_out_of_bounds(any_string_dtype):\n ser = Series(["foo", "b", "ba"], dtype=any_string_dtype)\n result = ser.str[1]\n expected = Series(["o", np.nan, "a"], dtype=any_string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_encode_decode(any_string_dtype):\n ser = Series(["a", "b", "a\xe4"], dtype=any_string_dtype).str.encode("utf-8")\n result = ser.str.decode("utf-8")\n expected = Series(["a", "b", "a\xe4"], dtype="str")\n tm.assert_series_equal(result, expected)\n\n\ndef test_encode_errors_kwarg(any_string_dtype):\n ser = Series(["a", "b", "a\x9d"], dtype=any_string_dtype)\n\n msg = (\n r"'charmap' codec can't encode character '\\x9d' in position 1: "\n "character maps to <undefined>"\n )\n with pytest.raises(UnicodeEncodeError, match=msg):\n ser.str.encode("cp1252")\n\n result = ser.str.encode("cp1252", "ignore")\n expected = ser.map(lambda x: x.encode("cp1252", "ignore"))\n tm.assert_series_equal(result, expected)\n\n\ndef test_decode_errors_kwarg():\n ser = Series([b"a", b"b", b"a\x9d"])\n\n msg = (\n "'charmap' codec can't decode byte 0x9d in position 1: "\n "character maps to <undefined>"\n )\n with pytest.raises(UnicodeDecodeError, match=msg):\n ser.str.decode("cp1252")\n\n result = ser.str.decode("cp1252", "ignore")\n expected = ser.map(lambda x: x.decode("cp1252", "ignore")).astype("str")\n tm.assert_series_equal(result, expected)\n\n\ndef test_decode_string_dtype(string_dtype):\n # https://github.com/pandas-dev/pandas/pull/60940\n ser = Series([b"a", b"b"])\n result = ser.str.decode("utf-8", dtype=string_dtype)\n expected = Series(["a", "b"], dtype=string_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_decode_object_dtype(object_dtype):\n # https://github.com/pandas-dev/pandas/pull/60940\n ser = Series([b"a", rb"\ud800"])\n result = ser.str.decode("utf-8", dtype=object_dtype)\n expected = Series(["a", r"\ud800"], dtype=object_dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_decode_bad_dtype():\n # https://github.com/pandas-dev/pandas/pull/60940\n ser = Series([b"a", b"b"])\n msg = "dtype must be string or object, got dtype='int64'"\n with pytest.raises(ValueError, match=msg):\n ser.str.decode("utf-8", dtype="int64")\n\n\n@pytest.mark.parametrize(\n "form, expected",\n [\n ("NFKC", ["ABC", "ABC", "123", np.nan, "アイエ"]),\n ("NFC", ["ABC", "ABC", "123", np.nan, "アイエ"]), # noqa: RUF001\n ],\n)\ndef test_normalize(form, expected, any_string_dtype):\n ser = Series(\n ["ABC", "ABC", "123", np.nan, "アイエ"], # noqa: RUF001\n index=["a", "b", "c", "d", "e"],\n dtype=any_string_dtype,\n )\n expected = Series(expected, index=["a", "b", "c", "d", "e"], dtype=any_string_dtype)\n result = ser.str.normalize(form)\n tm.assert_series_equal(result, expected)\n\n\ndef test_normalize_bad_arg_raises(any_string_dtype):\n ser = Series(\n ["ABC", "ABC", "123", np.nan, "アイエ"], # noqa: RUF001\n index=["a", "b", "c", "d", "e"],\n dtype=any_string_dtype,\n )\n with pytest.raises(ValueError, match="invalid normalization form"):\n ser.str.normalize("xxx")\n\n\ndef test_normalize_index():\n idx = Index(["ABC", "123", "アイエ"]) # noqa: RUF001\n expected = Index(["ABC", "123", "アイエ"])\n result = idx.str.normalize("NFKC")\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "values,inferred_type",\n [\n (["a", "b"], "string"),\n (["a", "b", 1], "mixed-integer"),\n (["a", "b", 1.3], "mixed"),\n (["a", "b", 1.3, 1], "mixed-integer"),\n (["aa", datetime(2011, 1, 1)], "mixed"),\n ],\n)\ndef test_index_str_accessor_visibility(values, inferred_type, index_or_series):\n obj = index_or_series(values)\n if index_or_series is Index:\n assert obj.inferred_type == inferred_type\n\n assert isinstance(obj.str, StringMethods)\n\n\n@pytest.mark.parametrize(\n "values,inferred_type",\n [\n ([1, np.nan], "floating"),\n ([datetime(2011, 1, 1)], "datetime64"),\n ([timedelta(1)], "timedelta64"),\n ],\n)\ndef test_index_str_accessor_non_string_values_raises(\n values, inferred_type, index_or_series\n):\n obj = index_or_series(values)\n if index_or_series is Index:\n assert obj.inferred_type == inferred_type\n\n msg = "Can only use .str accessor with string values"\n with pytest.raises(AttributeError, match=msg):\n obj.str\n\n\ndef test_index_str_accessor_multiindex_raises():\n # MultiIndex has mixed dtype, but not allow to use accessor\n idx = MultiIndex.from_tuples([("a", "b"), ("a", "b")])\n assert idx.inferred_type == "mixed"\n\n msg = "Can only use .str accessor with Index, not MultiIndex"\n with pytest.raises(AttributeError, match=msg):\n idx.str\n\n\ndef test_str_accessor_no_new_attributes(any_string_dtype):\n # https://github.com/pandas-dev/pandas/issues/10673\n ser = Series(list("aabbcde"), dtype=any_string_dtype)\n with pytest.raises(AttributeError, match="You cannot add any new attribute"):\n ser.str.xlabel = "a"\n\n\ndef test_cat_on_bytes_raises():\n lhs = Series(np.array(list("abc"), "S1").astype(object))\n rhs = Series(np.array(list("def"), "S1").astype(object))\n msg = "Cannot use .str.cat with values of inferred dtype 'bytes'"\n with pytest.raises(TypeError, match=msg):\n lhs.str.cat(rhs)\n\n\ndef test_str_accessor_in_apply_func():\n # https://github.com/pandas-dev/pandas/issues/38979\n df = DataFrame(zip("abc", "def"))\n expected = Series(["A/D", "B/E", "C/F"])\n result = df.apply(lambda f: "/".join(f.str.upper()), axis=1)\n tm.assert_series_equal(result, expected)\n\n\ndef test_zfill():\n # https://github.com/pandas-dev/pandas/issues/20868\n value = Series(["-1", "1", "1000", 10, np.nan])\n expected = Series(["-01", "001", "1000", np.nan, np.nan], dtype=object)\n tm.assert_series_equal(value.str.zfill(3), expected)\n\n value = Series(["-2", "+5"])\n expected = Series(["-0002", "+0005"])\n tm.assert_series_equal(value.str.zfill(5), expected)\n\n\ndef test_zfill_with_non_integer_argument():\n value = Series(["-2", "+5"])\n wid = "a"\n msg = f"width must be of integer type, not {type(wid).__name__}"\n with pytest.raises(TypeError, match=msg):\n value.str.zfill(wid)\n\n\ndef test_zfill_with_leading_sign():\n value = Series(["-cat", "-1", "+dog"])\n expected = Series(["-0cat", "-0001", "+0dog"])\n tm.assert_series_equal(value.str.zfill(5), expected)\n\n\ndef test_get_with_dict_label():\n # GH47911\n s = Series(\n [\n {"name": "Hello", "value": "World"},\n {"name": "Goodbye", "value": "Planet"},\n {"value": "Sea"},\n ]\n )\n result = s.str.get("name")\n expected = Series(["Hello", "Goodbye", None], dtype=object)\n tm.assert_series_equal(result, expected)\n result = s.str.get("value")\n expected = Series(["World", "Planet", "Sea"], dtype=object)\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_str_decode():\n # GH 22613\n result = Series([b"x", b"y"]).str.decode(encoding="UTF-8", errors="strict")\n expected = Series(["x", "y"], dtype="str")\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\strings\test_strings.py | test_strings.py | Python | 27,324 | 0.95 | 0.088689 | 0.043546 | react-lib | 306 | 2024-04-14T01:44:36.000692 | Apache-2.0 | true | dbbaa19091a27e0e085ae54e8a7bc868 |
import numpy as np\nimport pytest\n\nfrom pandas._libs import lib\n\nfrom pandas import (\n NA,\n DataFrame,\n Series,\n _testing as tm,\n option_context,\n)\n\n\ndef test_string_array(nullable_string_dtype, any_string_method):\n method_name, args, kwargs = any_string_method\n\n data = ["a", "bb", np.nan, "ccc"]\n a = Series(data, dtype=object)\n b = Series(data, dtype=nullable_string_dtype)\n\n if method_name == "decode":\n with pytest.raises(TypeError, match="a bytes-like object is required"):\n getattr(b.str, method_name)(*args, **kwargs)\n return\n\n expected = getattr(a.str, method_name)(*args, **kwargs)\n result = getattr(b.str, method_name)(*args, **kwargs)\n\n if isinstance(expected, Series):\n if expected.dtype == "object" and lib.is_string_array(\n expected.dropna().values,\n ):\n assert result.dtype == nullable_string_dtype\n result = result.astype(object)\n\n elif expected.dtype == "object" and lib.is_bool_array(\n expected.values, skipna=True\n ):\n assert result.dtype == "boolean"\n expected = expected.astype("boolean")\n\n elif expected.dtype == "bool":\n assert result.dtype == "boolean"\n result = result.astype("bool")\n\n elif expected.dtype == "float" and expected.isna().any():\n assert result.dtype == "Int64"\n result = result.astype("float")\n\n if expected.dtype == object:\n # GH#18463\n expected[expected.isna()] = NA\n\n elif isinstance(expected, DataFrame):\n columns = expected.select_dtypes(include="object").columns\n assert all(result[columns].dtypes == nullable_string_dtype)\n result[columns] = result[columns].astype(object)\n with option_context("future.no_silent_downcasting", True):\n expected[columns] = expected[columns].fillna(NA) # GH#18463\n\n tm.assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method,expected",\n [\n ("count", [2, None]),\n ("find", [0, None]),\n ("index", [0, None]),\n ("rindex", [2, None]),\n ],\n)\ndef test_string_array_numeric_integer_array(nullable_string_dtype, method, expected):\n s = Series(["aba", None], dtype=nullable_string_dtype)\n result = getattr(s.str, method)("a")\n expected = Series(expected, dtype="Int64")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "method,expected",\n [\n ("isdigit", [False, None, True]),\n ("isalpha", [True, None, False]),\n ("isalnum", [True, None, True]),\n ("isnumeric", [False, None, True]),\n ],\n)\ndef test_string_array_boolean_array(nullable_string_dtype, method, expected):\n s = Series(["a", None, "1"], dtype=nullable_string_dtype)\n result = getattr(s.str, method)()\n expected = Series(expected, dtype="boolean")\n tm.assert_series_equal(result, expected)\n\n\ndef test_string_array_extract(nullable_string_dtype):\n # https://github.com/pandas-dev/pandas/issues/30969\n # Only expand=False & multiple groups was failing\n\n a = Series(["a1", "b2", "cc"], dtype=nullable_string_dtype)\n b = Series(["a1", "b2", "cc"], dtype="object")\n pat = r"(\w)(\d)"\n\n result = a.str.extract(pat, expand=False)\n expected = b.str.extract(pat, expand=False)\n expected = expected.fillna(NA) # GH#18463\n assert all(result.dtypes == nullable_string_dtype)\n\n result = result.astype(object)\n tm.assert_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\strings\test_string_array.py | test_string_array.py | Python | 3,514 | 0.95 | 0.072072 | 0.034091 | python-kit | 531 | 2024-10-27T08:47:44.336589 | BSD-3-Clause | true | 7043618ddcee5ecfea2b18b092480025 |
import numpy as np\n\nimport pandas as pd\n\n\ndef is_object_or_nan_string_dtype(dtype):\n """\n Check if string-like dtype is following NaN semantics, i.e. is object\n dtype or a NaN-variant of the StringDtype.\n """\n return (isinstance(dtype, np.dtype) and dtype == "object") or (\n dtype.na_value is np.nan\n )\n\n\ndef _convert_na_value(ser, expected):\n if ser.dtype != object:\n if ser.dtype.na_value is np.nan:\n expected = expected.fillna(np.nan)\n else:\n # GH#18463\n expected = expected.fillna(pd.NA)\n return expected\n | .venv\Lib\site-packages\pandas\tests\strings\__init__.py | __init__.py | Python | 587 | 0.95 | 0.217391 | 0.055556 | node-utils | 941 | 2023-08-14T03:58:52.126454 | MIT | true | 0cd6003097dd3a81b8e8873a4a3bc10e |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\conftest.cpython-313.pyc | conftest.cpython-313.pyc | Other | 4,002 | 0.95 | 0.063492 | 0 | python-kit | 405 | 2025-07-03T03:18:14.459841 | MIT | true | 815cbd83e87272c8d2c72251023c6ce4 |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\test_api.cpython-313.pyc | test_api.cpython-313.pyc | Other | 7,376 | 0.95 | 0.031496 | 0.057851 | awesome-app | 246 | 2023-08-05T23:45:12.005777 | Apache-2.0 | true | d0c516ef0c92bc2dad923dc5dec29619 |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\test_case_justify.cpython-313.pyc | test_case_justify.cpython-313.pyc | Other | 22,936 | 0.8 | 0 | 0.004167 | react-lib | 459 | 2025-05-06T06:15:59.637603 | GPL-3.0 | true | a9925c2ce046573fe0ca266eb532bc00 |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\test_cat.cpython-313.pyc | test_cat.cpython-313.pyc | Other | 20,522 | 0.95 | 0 | 0.004098 | react-lib | 4 | 2024-04-12T08:25:59.137927 | MIT | true | e670f8e8272a13d8d76855a61b5a5334 |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\test_extract.cpython-313.pyc | test_extract.cpython-313.pyc | Other | 30,967 | 0.95 | 0 | 0.002538 | awesome-app | 465 | 2023-08-22T10:28:36.572439 | Apache-2.0 | true | 9c6af4971d33d8eb660cc0c521da0c6a |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\test_find_replace.cpython-313.pyc | test_find_replace.cpython-313.pyc | Other | 51,616 | 0.95 | 0 | 0.013314 | python-kit | 253 | 2023-10-09T16:26:59.115419 | GPL-3.0 | true | 38ef4940ef42ec5effea5db6e0780041 |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\test_get_dummies.cpython-313.pyc | test_get_dummies.cpython-313.pyc | Other | 2,701 | 0.8 | 0 | 0.09375 | react-lib | 376 | 2025-03-02T05:22:24.846153 | MIT | true | 96db16939df8d94df2622a4680c638cb |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\test_split_partition.cpython-313.pyc | test_split_partition.cpython-313.pyc | Other | 33,916 | 0.8 | 0 | 0.061538 | python-kit | 524 | 2024-10-31T18:38:36.463524 | BSD-3-Clause | true | 0997f19dc80525f003183d89cacf4369 |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\test_strings.cpython-313.pyc | test_strings.cpython-313.pyc | Other | 44,349 | 0.8 | 0 | 0.017279 | react-lib | 194 | 2024-11-16T06:43:07.789254 | Apache-2.0 | true | 66a3930be8cdec794e2c2ead7e1df4d3 |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\test_string_array.cpython-313.pyc | test_string_array.cpython-313.pyc | Other | 5,228 | 0.95 | 0 | 0.037037 | vue-tools | 488 | 2025-05-18T10:17:26.528275 | BSD-3-Clause | true | 22550fea90795a9fd1debb3f5a9cc988 |
\n\n | .venv\Lib\site-packages\pandas\tests\strings\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 1,215 | 0.7 | 0.083333 | 0 | awesome-app | 305 | 2023-10-28T08:45:33.604347 | MIT | true | add494a89dc8f036126f1a21006ca975 |
import decimal\n\nimport numpy as np\nfrom numpy import iinfo\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n ArrowDtype,\n DataFrame,\n Index,\n Series,\n option_context,\n to_numeric,\n)\nimport pandas._testing as tm\n\n\n@pytest.fixture(params=[None, "ignore", "raise", "coerce"])\ndef errors(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef signed(request):\n return request.param\n\n\n@pytest.fixture(params=[lambda x: x, str], ids=["identity", "str"])\ndef transform(request):\n return request.param\n\n\n@pytest.fixture(params=[47393996303418497800, 100000000000000000000])\ndef large_val(request):\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef multiple_elts(request):\n return request.param\n\n\n@pytest.fixture(\n params=[\n (lambda x: Index(x, name="idx"), tm.assert_index_equal),\n (lambda x: Series(x, name="ser"), tm.assert_series_equal),\n (lambda x: np.array(Index(x).values), tm.assert_numpy_array_equal),\n ]\n)\ndef transform_assert_equal(request):\n return request.param\n\n\n@pytest.mark.parametrize(\n "input_kwargs,result_kwargs",\n [\n ({}, {"dtype": np.int64}),\n ({"errors": "coerce", "downcast": "integer"}, {"dtype": np.int8}),\n ],\n)\ndef test_empty(input_kwargs, result_kwargs):\n # see gh-16302\n ser = Series([], dtype=object)\n result = to_numeric(ser, **input_kwargs)\n\n expected = Series([], **result_kwargs)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]\n)\n@pytest.mark.parametrize("last_val", ["7", 7])\ndef test_series(last_val, infer_string):\n with option_context("future.infer_string", infer_string):\n ser = Series(["1", "-3.14", last_val])\n result = to_numeric(ser)\n\n expected = Series([1, -3.14, 7])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data",\n [\n [1, 3, 4, 5],\n [1.0, 3.0, 4.0, 5.0],\n # Bool is regarded as numeric.\n [True, False, True, True],\n ],\n)\ndef test_series_numeric(data):\n ser = Series(data, index=list("ABCD"), name="EFG")\n\n result = to_numeric(ser)\n tm.assert_series_equal(result, ser)\n\n\n@pytest.mark.parametrize(\n "data,msg",\n [\n ([1, -3.14, "apple"], 'Unable to parse string "apple" at position 2'),\n (\n ["orange", 1, -3.14, "apple"],\n 'Unable to parse string "orange" at position 0',\n ),\n ],\n)\ndef test_error(data, msg):\n ser = Series(data)\n\n with pytest.raises(ValueError, match=msg):\n to_numeric(ser, errors="raise")\n\n\n@pytest.mark.parametrize(\n "errors,exp_data", [("ignore", [1, -3.14, "apple"]), ("coerce", [1, -3.14, np.nan])]\n)\n@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")\ndef test_ignore_error(errors, exp_data):\n ser = Series([1, -3.14, "apple"])\n result = to_numeric(ser, errors=errors)\n\n expected = Series(exp_data)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "errors,exp",\n [\n ("raise", 'Unable to parse string "apple" at position 2'),\n ("ignore", [True, False, "apple"]),\n # Coerces to float.\n ("coerce", [1.0, 0.0, np.nan]),\n ],\n)\n@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")\ndef test_bool_handling(errors, exp):\n ser = Series([True, False, "apple"])\n\n if isinstance(exp, str):\n with pytest.raises(ValueError, match=exp):\n to_numeric(ser, errors=errors)\n else:\n result = to_numeric(ser, errors=errors)\n expected = Series(exp)\n\n tm.assert_series_equal(result, expected)\n\n\ndef test_list():\n ser = ["1", "-3.14", "7"]\n res = to_numeric(ser)\n\n expected = np.array([1, -3.14, 7])\n tm.assert_numpy_array_equal(res, expected)\n\n\n@pytest.mark.parametrize(\n "data,arr_kwargs",\n [\n ([1, 3, 4, 5], {"dtype": np.int64}),\n ([1.0, 3.0, 4.0, 5.0], {}),\n # Boolean is regarded as numeric.\n ([True, False, True, True], {}),\n ],\n)\ndef test_list_numeric(data, arr_kwargs):\n result = to_numeric(data)\n expected = np.array(data, **arr_kwargs)\n tm.assert_numpy_array_equal(result, expected)\n\n\n@pytest.mark.parametrize("kwargs", [{"dtype": "O"}, {}])\ndef test_numeric(kwargs):\n data = [1, -3.14, 7]\n\n ser = Series(data, **kwargs)\n result = to_numeric(ser)\n\n expected = Series(data)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "columns",\n [\n # One column.\n "a",\n # Multiple columns.\n ["a", "b"],\n ],\n)\ndef test_numeric_df_columns(columns):\n # see gh-14827\n df = DataFrame(\n {\n "a": [1.2, decimal.Decimal(3.14), decimal.Decimal("infinity"), "0.1"],\n "b": [1.0, 2.0, 3.0, 4.0],\n }\n )\n\n expected = DataFrame({"a": [1.2, 3.14, np.inf, 0.1], "b": [1.0, 2.0, 3.0, 4.0]})\n\n df_copy = df.copy()\n df_copy[columns] = df_copy[columns].apply(to_numeric)\n\n tm.assert_frame_equal(df_copy, expected)\n\n\n@pytest.mark.parametrize(\n "data,exp_data",\n [\n (\n [[decimal.Decimal(3.14), 1.0], decimal.Decimal(1.6), 0.1],\n [[3.14, 1.0], 1.6, 0.1],\n ),\n ([np.array([decimal.Decimal(3.14), 1.0]), 0.1], [[3.14, 1.0], 0.1]),\n ],\n)\ndef test_numeric_embedded_arr_likes(data, exp_data):\n # Test to_numeric with embedded lists and arrays\n df = DataFrame({"a": data})\n df["a"] = df["a"].apply(to_numeric)\n\n expected = DataFrame({"a": exp_data})\n tm.assert_frame_equal(df, expected)\n\n\ndef test_all_nan():\n ser = Series(["a", "b", "c"])\n result = to_numeric(ser, errors="coerce")\n\n expected = Series([np.nan, np.nan, np.nan])\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")\ndef test_type_check(errors):\n # see gh-11776\n df = DataFrame({"a": [1, -3.14, 7], "b": ["4", "5", "6"]})\n kwargs = {"errors": errors} if errors is not None else {}\n with pytest.raises(TypeError, match="1-d array"):\n to_numeric(df, **kwargs)\n\n\n@pytest.mark.parametrize("val", [1, 1.1, 20001])\ndef test_scalar(val, signed, transform):\n val = -val if signed else val\n assert to_numeric(transform(val)) == float(val)\n\n\n@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")\ndef test_really_large_scalar(large_val, signed, transform, errors):\n # see gh-24910\n kwargs = {"errors": errors} if errors is not None else {}\n val = -large_val if signed else large_val\n\n val = transform(val)\n val_is_string = isinstance(val, str)\n\n if val_is_string and errors in (None, "raise"):\n msg = "Integer out of range. at position 0"\n with pytest.raises(ValueError, match=msg):\n to_numeric(val, **kwargs)\n else:\n expected = float(val) if (errors == "coerce" and val_is_string) else val\n tm.assert_almost_equal(to_numeric(val, **kwargs), expected)\n\n\n@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")\ndef test_really_large_in_arr(large_val, signed, transform, multiple_elts, errors):\n # see gh-24910\n kwargs = {"errors": errors} if errors is not None else {}\n val = -large_val if signed else large_val\n val = transform(val)\n\n extra_elt = "string"\n arr = [val] + multiple_elts * [extra_elt]\n\n val_is_string = isinstance(val, str)\n coercing = errors == "coerce"\n\n if errors in (None, "raise") and (val_is_string or multiple_elts):\n if val_is_string:\n msg = "Integer out of range. at position 0"\n else:\n msg = 'Unable to parse string "string" at position 1'\n\n with pytest.raises(ValueError, match=msg):\n to_numeric(arr, **kwargs)\n else:\n result = to_numeric(arr, **kwargs)\n\n exp_val = float(val) if (coercing and val_is_string) else val\n expected = [exp_val]\n\n if multiple_elts:\n if coercing:\n expected.append(np.nan)\n exp_dtype = float\n else:\n expected.append(extra_elt)\n exp_dtype = object\n else:\n exp_dtype = float if isinstance(exp_val, (int, float)) else object\n\n tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype))\n\n\n@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")\ndef test_really_large_in_arr_consistent(large_val, signed, multiple_elts, errors):\n # see gh-24910\n #\n # Even if we discover that we have to hold float, does not mean\n # we should be lenient on subsequent elements that fail to be integer.\n kwargs = {"errors": errors} if errors is not None else {}\n arr = [str(-large_val if signed else large_val)]\n\n if multiple_elts:\n arr.insert(0, large_val)\n\n if errors in (None, "raise"):\n index = int(multiple_elts)\n msg = f"Integer out of range. at position {index}"\n\n with pytest.raises(ValueError, match=msg):\n to_numeric(arr, **kwargs)\n else:\n result = to_numeric(arr, **kwargs)\n\n if errors == "coerce":\n expected = [float(i) for i in arr]\n exp_dtype = float\n else:\n expected = arr\n exp_dtype = object\n\n tm.assert_almost_equal(result, np.array(expected, dtype=exp_dtype))\n\n\n@pytest.mark.parametrize(\n "errors,checker",\n [\n ("raise", 'Unable to parse string "fail" at position 0'),\n ("ignore", lambda x: x == "fail"),\n ("coerce", lambda x: np.isnan(x)),\n ],\n)\n@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")\ndef test_scalar_fail(errors, checker):\n scalar = "fail"\n\n if isinstance(checker, str):\n with pytest.raises(ValueError, match=checker):\n to_numeric(scalar, errors=errors)\n else:\n assert checker(to_numeric(scalar, errors=errors))\n\n\n@pytest.mark.parametrize("data", [[1, 2, 3], [1.0, np.nan, 3, np.nan]])\ndef test_numeric_dtypes(data, transform_assert_equal):\n transform, assert_equal = transform_assert_equal\n data = transform(data)\n\n result = to_numeric(data)\n assert_equal(result, data)\n\n\n@pytest.mark.parametrize(\n "data,exp",\n [\n (["1", "2", "3"], np.array([1, 2, 3], dtype="int64")),\n (["1.5", "2.7", "3.4"], np.array([1.5, 2.7, 3.4])),\n ],\n)\ndef test_str(data, exp, transform_assert_equal):\n transform, assert_equal = transform_assert_equal\n result = to_numeric(transform(data))\n\n expected = transform(exp)\n assert_equal(result, expected)\n\n\ndef test_datetime_like(tz_naive_fixture, transform_assert_equal):\n transform, assert_equal = transform_assert_equal\n idx = pd.date_range("20130101", periods=3, tz=tz_naive_fixture)\n\n result = to_numeric(transform(idx))\n expected = transform(idx.asi8)\n assert_equal(result, expected)\n\n\ndef test_timedelta(transform_assert_equal):\n transform, assert_equal = transform_assert_equal\n idx = pd.timedelta_range("1 days", periods=3, freq="D")\n\n result = to_numeric(transform(idx))\n expected = transform(idx.asi8)\n assert_equal(result, expected)\n\n\ndef test_period(request, transform_assert_equal):\n transform, assert_equal = transform_assert_equal\n\n idx = pd.period_range("2011-01", periods=3, freq="M", name="")\n inp = transform(idx)\n\n if not isinstance(inp, Index):\n request.applymarker(\n pytest.mark.xfail(reason="Missing PeriodDtype support in to_numeric")\n )\n result = to_numeric(inp)\n expected = transform(idx.asi8)\n assert_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "errors,expected",\n [\n ("raise", "Invalid object type at position 0"),\n ("ignore", Series([[10.0, 2], 1.0, "apple"])),\n ("coerce", Series([np.nan, 1.0, np.nan])),\n ],\n)\n@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")\ndef test_non_hashable(errors, expected):\n # see gh-13324\n ser = Series([[10.0, 2], 1.0, "apple"])\n\n if isinstance(expected, str):\n with pytest.raises(TypeError, match=expected):\n to_numeric(ser, errors=errors)\n else:\n result = to_numeric(ser, errors=errors)\n tm.assert_series_equal(result, expected)\n\n\ndef test_downcast_invalid_cast():\n # see gh-13352\n data = ["1", 2, 3]\n invalid_downcast = "unsigned-integer"\n msg = "invalid downcasting method provided"\n\n with pytest.raises(ValueError, match=msg):\n to_numeric(data, downcast=invalid_downcast)\n\n\ndef test_errors_invalid_value():\n # see gh-26466\n data = ["1", 2, 3]\n invalid_error_value = "invalid"\n msg = "invalid error value specified"\n\n with pytest.raises(ValueError, match=msg):\n to_numeric(data, errors=invalid_error_value)\n\n\n@pytest.mark.parametrize(\n "data",\n [\n ["1", 2, 3],\n [1, 2, 3],\n np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"),\n ],\n)\n@pytest.mark.parametrize(\n "kwargs,exp_dtype",\n [\n # Basic function tests.\n ({}, np.int64),\n ({"downcast": None}, np.int64),\n # Support below np.float32 is rare and far between.\n ({"downcast": "float"}, np.dtype(np.float32).char),\n # Basic dtype support.\n ({"downcast": "unsigned"}, np.dtype(np.typecodes["UnsignedInteger"][0])),\n ],\n)\ndef test_downcast_basic(data, kwargs, exp_dtype):\n # see gh-13352\n result = to_numeric(data, **kwargs)\n expected = np.array([1, 2, 3], dtype=exp_dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n\n@pytest.mark.parametrize("signed_downcast", ["integer", "signed"])\n@pytest.mark.parametrize(\n "data",\n [\n ["1", 2, 3],\n [1, 2, 3],\n np.array(["1970-01-02", "1970-01-03", "1970-01-04"], dtype="datetime64[D]"),\n ],\n)\ndef test_signed_downcast(data, signed_downcast):\n # see gh-13352\n smallest_int_dtype = np.dtype(np.typecodes["Integer"][0])\n expected = np.array([1, 2, 3], dtype=smallest_int_dtype)\n\n res = to_numeric(data, downcast=signed_downcast)\n tm.assert_numpy_array_equal(res, expected)\n\n\ndef test_ignore_downcast_invalid_data():\n # If we can't successfully cast the given\n # data to a numeric dtype, do not bother\n # with the downcast parameter.\n data = ["foo", 2, 3]\n expected = np.array(data, dtype=object)\n\n msg = "errors='ignore' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = to_numeric(data, errors="ignore", downcast="unsigned")\n tm.assert_numpy_array_equal(res, expected)\n\n\ndef test_ignore_downcast_neg_to_unsigned():\n # Cannot cast to an unsigned integer\n # because we have a negative number.\n data = ["-1", 2, 3]\n expected = np.array([-1, 2, 3], dtype=np.int64)\n\n res = to_numeric(data, downcast="unsigned")\n tm.assert_numpy_array_equal(res, expected)\n\n\n# Warning in 32 bit platforms\n@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning")\n@pytest.mark.parametrize("downcast", ["integer", "signed", "unsigned"])\n@pytest.mark.parametrize(\n "data,expected",\n [\n (["1.1", 2, 3], np.array([1.1, 2, 3], dtype=np.float64)),\n (\n [10000.0, 20000, 3000, 40000.36, 50000, 50000.00],\n np.array(\n [10000.0, 20000, 3000, 40000.36, 50000, 50000.00], dtype=np.float64\n ),\n ),\n ],\n)\ndef test_ignore_downcast_cannot_convert_float(data, expected, downcast):\n # Cannot cast to an integer (signed or unsigned)\n # because we have a float number.\n res = to_numeric(data, downcast=downcast)\n tm.assert_numpy_array_equal(res, expected)\n\n\n@pytest.mark.parametrize(\n "downcast,expected_dtype",\n [("integer", np.int16), ("signed", np.int16), ("unsigned", np.uint16)],\n)\ndef test_downcast_not8bit(downcast, expected_dtype):\n # the smallest integer dtype need not be np.(u)int8\n data = ["256", 257, 258]\n\n expected = np.array([256, 257, 258], dtype=expected_dtype)\n res = to_numeric(data, downcast=downcast)\n tm.assert_numpy_array_equal(res, expected)\n\n\n@pytest.mark.parametrize(\n "dtype,downcast,min_max",\n [\n ("int8", "integer", [iinfo(np.int8).min, iinfo(np.int8).max]),\n ("int16", "integer", [iinfo(np.int16).min, iinfo(np.int16).max]),\n ("int32", "integer", [iinfo(np.int32).min, iinfo(np.int32).max]),\n ("int64", "integer", [iinfo(np.int64).min, iinfo(np.int64).max]),\n ("uint8", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max]),\n ("uint16", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max]),\n ("uint32", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max]),\n ("uint64", "unsigned", [iinfo(np.uint64).min, iinfo(np.uint64).max]),\n ("int16", "integer", [iinfo(np.int8).min, iinfo(np.int8).max + 1]),\n ("int32", "integer", [iinfo(np.int16).min, iinfo(np.int16).max + 1]),\n ("int64", "integer", [iinfo(np.int32).min, iinfo(np.int32).max + 1]),\n ("int16", "integer", [iinfo(np.int8).min - 1, iinfo(np.int16).max]),\n ("int32", "integer", [iinfo(np.int16).min - 1, iinfo(np.int32).max]),\n ("int64", "integer", [iinfo(np.int32).min - 1, iinfo(np.int64).max]),\n ("uint16", "unsigned", [iinfo(np.uint8).min, iinfo(np.uint8).max + 1]),\n ("uint32", "unsigned", [iinfo(np.uint16).min, iinfo(np.uint16).max + 1]),\n ("uint64", "unsigned", [iinfo(np.uint32).min, iinfo(np.uint32).max + 1]),\n ],\n)\ndef test_downcast_limits(dtype, downcast, min_max):\n # see gh-14404: test the limits of each downcast.\n series = to_numeric(Series(min_max), downcast=downcast)\n assert series.dtype == dtype\n\n\ndef test_downcast_float64_to_float32():\n # GH-43693: Check float64 preservation when >= 16,777,217\n series = Series([16777217.0, np.finfo(np.float64).max, np.nan], dtype=np.float64)\n result = to_numeric(series, downcast="float")\n\n assert series.dtype == result.dtype\n\n\n@pytest.mark.parametrize(\n "ser,expected",\n [\n (\n Series([0, 9223372036854775808]),\n Series([0, 9223372036854775808], dtype=np.uint64),\n )\n ],\n)\ndef test_downcast_uint64(ser, expected):\n # see gh-14422:\n # BUG: to_numeric doesn't work uint64 numbers\n\n result = to_numeric(ser, downcast="unsigned")\n\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data,exp_data",\n [\n (\n [200, 300, "", "NaN", 30000000000000000000],\n [200, 300, np.nan, np.nan, 30000000000000000000],\n ),\n (\n ["12345678901234567890", "1234567890", "ITEM"],\n [12345678901234567890, 1234567890, np.nan],\n ),\n ],\n)\ndef test_coerce_uint64_conflict(data, exp_data):\n # see gh-17007 and gh-17125\n #\n # Still returns float despite the uint64-nan conflict,\n # which would normally force the casting to object.\n result = to_numeric(Series(data), errors="coerce")\n expected = Series(exp_data, dtype=float)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "errors,exp",\n [\n ("ignore", Series(["12345678901234567890", "1234567890", "ITEM"])),\n ("raise", "Unable to parse string"),\n ],\n)\n@pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")\ndef test_non_coerce_uint64_conflict(errors, exp):\n # see gh-17007 and gh-17125\n #\n # For completeness.\n ser = Series(["12345678901234567890", "1234567890", "ITEM"])\n\n if isinstance(exp, str):\n with pytest.raises(ValueError, match=exp):\n to_numeric(ser, errors=errors)\n else:\n result = to_numeric(ser, errors=errors)\n tm.assert_series_equal(result, ser)\n\n\n@pytest.mark.parametrize("dc1", ["integer", "float", "unsigned"])\n@pytest.mark.parametrize("dc2", ["integer", "float", "unsigned"])\ndef test_downcast_empty(dc1, dc2):\n # GH32493\n\n tm.assert_numpy_array_equal(\n to_numeric([], downcast=dc1),\n to_numeric([], downcast=dc2),\n check_dtype=False,\n )\n\n\ndef test_failure_to_convert_uint64_string_to_NaN():\n # GH 32394\n result = to_numeric("uint64", errors="coerce")\n assert np.isnan(result)\n\n ser = Series([32, 64, np.nan])\n result = to_numeric(Series(["32", "64", "uint64"]), errors="coerce")\n tm.assert_series_equal(result, ser)\n\n\n@pytest.mark.parametrize(\n "strrep",\n [\n "243.164",\n "245.968",\n "249.585",\n "259.745",\n "265.742",\n "272.567",\n "279.196",\n "280.366",\n "275.034",\n "271.351",\n "272.889",\n "270.627",\n "280.828",\n "290.383",\n "308.153",\n "319.945",\n "336.0",\n "344.09",\n "351.385",\n "356.178",\n "359.82",\n "361.03",\n "367.701",\n "380.812",\n "387.98",\n "391.749",\n "391.171",\n "385.97",\n "385.345",\n "386.121",\n "390.996",\n "399.734",\n "413.073",\n "421.532",\n "430.221",\n "437.092",\n "439.746",\n "446.01",\n "451.191",\n "460.463",\n "469.779",\n "472.025",\n "479.49",\n "474.864",\n "467.54",\n "471.978",\n ],\n)\ndef test_precision_float_conversion(strrep):\n # GH 31364\n result = to_numeric(strrep)\n\n assert result == float(strrep)\n\n\n@pytest.mark.parametrize(\n "values, expected",\n [\n (["1", "2", None], Series([1, 2, np.nan], dtype="Int64")),\n (["1", "2", "3"], Series([1, 2, 3], dtype="Int64")),\n (["1", "2", 3], Series([1, 2, 3], dtype="Int64")),\n (["1", "2", 3.5], Series([1, 2, 3.5], dtype="Float64")),\n (["1", None, 3.5], Series([1, np.nan, 3.5], dtype="Float64")),\n (["1", "2", "3.5"], Series([1, 2, 3.5], dtype="Float64")),\n ],\n)\ndef test_to_numeric_from_nullable_string(values, nullable_string_dtype, expected):\n # https://github.com/pandas-dev/pandas/issues/37262\n s = Series(values, dtype=nullable_string_dtype)\n result = to_numeric(s)\n tm.assert_series_equal(result, expected)\n\n\ndef test_to_numeric_from_nullable_string_coerce(nullable_string_dtype):\n # GH#52146\n values = ["a", "1"]\n ser = Series(values, dtype=nullable_string_dtype)\n result = to_numeric(ser, errors="coerce")\n expected = Series([pd.NA, 1], dtype="Int64")\n tm.assert_series_equal(result, expected)\n\n\ndef test_to_numeric_from_nullable_string_ignore(nullable_string_dtype):\n # GH#52146\n values = ["a", "1"]\n ser = Series(values, dtype=nullable_string_dtype)\n expected = ser.copy()\n msg = "errors='ignore' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = to_numeric(ser, errors="ignore")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data, input_dtype, downcast, expected_dtype",\n (\n ([1, 1], "Int64", "integer", "Int8"),\n ([1.0, pd.NA], "Float64", "integer", "Int8"),\n ([1.0, 1.1], "Float64", "integer", "Float64"),\n ([1, pd.NA], "Int64", "integer", "Int8"),\n ([450, 300], "Int64", "integer", "Int16"),\n ([1, 1], "Float64", "integer", "Int8"),\n ([np.iinfo(np.int64).max - 1, 1], "Int64", "integer", "Int64"),\n ([1, 1], "Int64", "signed", "Int8"),\n ([1.0, 1.0], "Float32", "signed", "Int8"),\n ([1.0, 1.1], "Float64", "signed", "Float64"),\n ([1, pd.NA], "Int64", "signed", "Int8"),\n ([450, -300], "Int64", "signed", "Int16"),\n ([np.iinfo(np.uint64).max - 1, 1], "UInt64", "signed", "UInt64"),\n ([1, 1], "Int64", "unsigned", "UInt8"),\n ([1.0, 1.0], "Float32", "unsigned", "UInt8"),\n ([1.0, 1.1], "Float64", "unsigned", "Float64"),\n ([1, pd.NA], "Int64", "unsigned", "UInt8"),\n ([450, -300], "Int64", "unsigned", "Int64"),\n ([-1, -1], "Int32", "unsigned", "Int32"),\n ([1, 1], "Float64", "float", "Float32"),\n ([1, 1.1], "Float64", "float", "Float32"),\n ([1, 1], "Float32", "float", "Float32"),\n ([1, 1.1], "Float32", "float", "Float32"),\n ),\n)\ndef test_downcast_nullable_numeric(data, input_dtype, downcast, expected_dtype):\n arr = pd.array(data, dtype=input_dtype)\n result = to_numeric(arr, downcast=downcast)\n expected = pd.array(data, dtype=expected_dtype)\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_downcast_nullable_mask_is_copied():\n # GH38974\n\n arr = pd.array([1, 2, pd.NA], dtype="Int64")\n\n result = to_numeric(arr, downcast="integer")\n expected = pd.array([1, 2, pd.NA], dtype="Int8")\n tm.assert_extension_array_equal(result, expected)\n\n arr[1] = pd.NA # should not modify result\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_to_numeric_scientific_notation():\n # GH 15898\n result = to_numeric("1.7e+308")\n expected = np.float64(1.7e308)\n assert result == expected\n\n\n@pytest.mark.parametrize("val", [9876543210.0, 2.0**128])\ndef test_to_numeric_large_float_not_downcast_to_float_32(val):\n # GH 19729\n expected = Series([val])\n result = to_numeric(expected, downcast="float")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "val, dtype", [(1, "Int64"), (1.5, "Float64"), (True, "boolean")]\n)\ndef test_to_numeric_dtype_backend(val, dtype):\n # GH#50505\n ser = Series([val], dtype=object)\n result = to_numeric(ser, dtype_backend="numpy_nullable")\n expected = Series([val], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "val, dtype",\n [\n (1, "Int64"),\n (1.5, "Float64"),\n (True, "boolean"),\n (1, "int64[pyarrow]"),\n (1.5, "float64[pyarrow]"),\n (True, "bool[pyarrow]"),\n ],\n)\ndef test_to_numeric_dtype_backend_na(val, dtype):\n # GH#50505\n if "pyarrow" in dtype:\n pytest.importorskip("pyarrow")\n dtype_backend = "pyarrow"\n else:\n dtype_backend = "numpy_nullable"\n ser = Series([val, None], dtype=object)\n result = to_numeric(ser, dtype_backend=dtype_backend)\n expected = Series([val, pd.NA], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "val, dtype, downcast",\n [\n (1, "Int8", "integer"),\n (1.5, "Float32", "float"),\n (1, "Int8", "signed"),\n (1, "int8[pyarrow]", "integer"),\n (1.5, "float[pyarrow]", "float"),\n (1, "int8[pyarrow]", "signed"),\n ],\n)\ndef test_to_numeric_dtype_backend_downcasting(val, dtype, downcast):\n # GH#50505\n if "pyarrow" in dtype:\n pytest.importorskip("pyarrow")\n dtype_backend = "pyarrow"\n else:\n dtype_backend = "numpy_nullable"\n ser = Series([val, None], dtype=object)\n result = to_numeric(ser, dtype_backend=dtype_backend, downcast=downcast)\n expected = Series([val, pd.NA], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "smaller, dtype_backend",\n [["UInt8", "numpy_nullable"], ["uint8[pyarrow]", "pyarrow"]],\n)\ndef test_to_numeric_dtype_backend_downcasting_uint(smaller, dtype_backend):\n # GH#50505\n if dtype_backend == "pyarrow":\n pytest.importorskip("pyarrow")\n ser = Series([1, pd.NA], dtype="UInt64")\n result = to_numeric(ser, dtype_backend=dtype_backend, downcast="unsigned")\n expected = Series([1, pd.NA], dtype=smaller)\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dtype",\n [\n "Int64",\n "UInt64",\n "Float64",\n "boolean",\n "int64[pyarrow]",\n "uint64[pyarrow]",\n "float64[pyarrow]",\n "bool[pyarrow]",\n ],\n)\ndef test_to_numeric_dtype_backend_already_nullable(dtype):\n # GH#50505\n if "pyarrow" in dtype:\n pytest.importorskip("pyarrow")\n ser = Series([1, pd.NA], dtype=dtype)\n result = to_numeric(ser, dtype_backend="numpy_nullable")\n expected = Series([1, pd.NA], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_to_numeric_dtype_backend_error(dtype_backend):\n # GH#50505\n ser = Series(["a", "b", ""])\n expected = ser.copy()\n with pytest.raises(ValueError, match="Unable to parse string"):\n to_numeric(ser, dtype_backend=dtype_backend)\n\n msg = "errors='ignore' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = to_numeric(ser, dtype_backend=dtype_backend, errors="ignore")\n tm.assert_series_equal(result, expected)\n\n result = to_numeric(ser, dtype_backend=dtype_backend, errors="coerce")\n if dtype_backend == "pyarrow":\n dtype = "double[pyarrow]"\n else:\n dtype = "Float64"\n expected = Series([np.nan, np.nan, np.nan], dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n\ndef test_invalid_dtype_backend():\n ser = Series([1, 2, 3])\n msg = (\n "dtype_backend numpy is invalid, only 'numpy_nullable' and "\n "'pyarrow' are allowed."\n )\n with pytest.raises(ValueError, match=msg):\n to_numeric(ser, dtype_backend="numpy")\n\n\ndef test_coerce_pyarrow_backend():\n # GH 52588\n pa = pytest.importorskip("pyarrow")\n ser = Series(list("12x"), dtype=ArrowDtype(pa.string()))\n result = to_numeric(ser, errors="coerce", dtype_backend="pyarrow")\n expected = Series([1, 2, None], dtype=ArrowDtype(pa.int64()))\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\tools\test_to_numeric.py | test_to_numeric.py | Python | 29,480 | 0.95 | 0.095092 | 0.073935 | python-kit | 476 | 2024-07-30T06:10:22.657575 | MIT | true | 773e3eb3faf97e98733680455f7b88ef |
from datetime import time\nimport locale\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import PY311\n\nfrom pandas import Series\nimport pandas._testing as tm\nfrom pandas.core.tools.times import to_time\n\n# The tests marked with this are locale-dependent.\n# They pass, except when the machine locale is zh_CN or it_IT.\nfails_on_non_english = pytest.mark.xfail(\n locale.getlocale()[0] in ("zh_CN", "it_IT"),\n reason="fail on a CI build with LC_ALL=zh_CN.utf8/it_IT.utf8",\n strict=False,\n)\n\n\nclass TestToTime:\n @pytest.mark.parametrize(\n "time_string",\n [\n "14:15",\n "1415",\n pytest.param("2:15pm", marks=fails_on_non_english),\n pytest.param("0215pm", marks=fails_on_non_english),\n "14:15:00",\n "141500",\n pytest.param("2:15:00pm", marks=fails_on_non_english),\n pytest.param("021500pm", marks=fails_on_non_english),\n time(14, 15),\n ],\n )\n def test_parsers_time(self, time_string):\n # GH#11818\n assert to_time(time_string) == time(14, 15)\n\n def test_odd_format(self):\n new_string = "14.15"\n msg = r"Cannot convert arg \['14\.15'\] to a time"\n if not PY311:\n with pytest.raises(ValueError, match=msg):\n to_time(new_string)\n assert to_time(new_string, format="%H.%M") == time(14, 15)\n\n def test_arraylike(self):\n arg = ["14:15", "20:20"]\n expected_arr = [time(14, 15), time(20, 20)]\n assert to_time(arg) == expected_arr\n assert to_time(arg, format="%H:%M") == expected_arr\n assert to_time(arg, infer_time_format=True) == expected_arr\n assert to_time(arg, format="%I:%M%p", errors="coerce") == [None, None]\n\n msg = "errors='ignore' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = to_time(arg, format="%I:%M%p", errors="ignore")\n tm.assert_numpy_array_equal(res, np.array(arg, dtype=np.object_))\n\n msg = "Cannot convert.+to a time with given format"\n with pytest.raises(ValueError, match=msg):\n to_time(arg, format="%I:%M%p", errors="raise")\n\n tm.assert_series_equal(\n to_time(Series(arg, name="test")), Series(expected_arr, name="test")\n )\n\n res = to_time(np.array(arg))\n assert isinstance(res, list)\n assert res == expected_arr\n | .venv\Lib\site-packages\pandas\tests\tools\test_to_time.py | test_to_time.py | Python | 2,417 | 0.95 | 0.069444 | 0.05 | awesome-app | 848 | 2024-09-09T06:15:01.891264 | MIT | true | 66950fce7d8661ed3cc3f40977330421 |
from datetime import (\n time,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import IS64\nfrom pandas.errors import OutOfBoundsTimedelta\n\nimport pandas as pd\nfrom pandas import (\n Series,\n TimedeltaIndex,\n isna,\n to_timedelta,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import TimedeltaArray\n\n\nclass TestTimedeltas:\n def test_to_timedelta_dt64_raises(self):\n # Passing datetime64-dtype data to TimedeltaIndex is no longer\n # supported GH#29794\n msg = r"dtype datetime64\[ns\] cannot be converted to timedelta64\[ns\]"\n\n ser = Series([pd.NaT])\n with pytest.raises(TypeError, match=msg):\n to_timedelta(ser)\n with pytest.raises(TypeError, match=msg):\n ser.to_frame().apply(to_timedelta)\n\n @pytest.mark.parametrize("readonly", [True, False])\n def test_to_timedelta_readonly(self, readonly):\n # GH#34857\n arr = np.array([], dtype=object)\n if readonly:\n arr.setflags(write=False)\n result = to_timedelta(arr)\n expected = to_timedelta([])\n tm.assert_index_equal(result, expected)\n\n def test_to_timedelta_null(self):\n result = to_timedelta(["", ""])\n assert isna(result).all()\n\n def test_to_timedelta_same_np_timedelta64(self):\n # pass thru\n result = to_timedelta(np.array([np.timedelta64(1, "s")]))\n expected = pd.Index(np.array([np.timedelta64(1, "s")]))\n tm.assert_index_equal(result, expected)\n\n def test_to_timedelta_series(self):\n # Series\n expected = Series([timedelta(days=1), timedelta(days=1, seconds=1)])\n result = to_timedelta(Series(["1d", "1days 00:00:01"]))\n tm.assert_series_equal(result, expected)\n\n def test_to_timedelta_units(self):\n # with units\n result = TimedeltaIndex(\n [np.timedelta64(0, "ns"), np.timedelta64(10, "s").astype("m8[ns]")]\n )\n expected = to_timedelta([0, 10], unit="s")\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "dtype, unit",\n [\n ["int64", "s"],\n ["int64", "m"],\n ["int64", "h"],\n ["timedelta64[s]", "s"],\n ["timedelta64[D]", "D"],\n ],\n )\n def test_to_timedelta_units_dtypes(self, dtype, unit):\n # arrays of various dtypes\n arr = np.array([1] * 5, dtype=dtype)\n result = to_timedelta(arr, unit=unit)\n exp_dtype = "m8[ns]" if dtype == "int64" else "m8[s]"\n expected = TimedeltaIndex([np.timedelta64(1, unit)] * 5, dtype=exp_dtype)\n tm.assert_index_equal(result, expected)\n\n def test_to_timedelta_oob_non_nano(self):\n arr = np.array([pd.NaT._value + 1], dtype="timedelta64[m]")\n\n msg = (\n "Cannot convert -9223372036854775807 minutes to "\n r"timedelta64\[s\] without overflow"\n )\n with pytest.raises(OutOfBoundsTimedelta, match=msg):\n to_timedelta(arr)\n\n with pytest.raises(OutOfBoundsTimedelta, match=msg):\n TimedeltaIndex(arr)\n\n with pytest.raises(OutOfBoundsTimedelta, match=msg):\n TimedeltaArray._from_sequence(arr, dtype="m8[s]")\n\n @pytest.mark.parametrize(\n "arg", [np.arange(10).reshape(2, 5), pd.DataFrame(np.arange(10).reshape(2, 5))]\n )\n @pytest.mark.parametrize("errors", ["ignore", "raise", "coerce"])\n @pytest.mark.filterwarnings("ignore:errors='ignore' is deprecated:FutureWarning")\n def test_to_timedelta_dataframe(self, arg, errors):\n # GH 11776\n with pytest.raises(TypeError, match="1-d array"):\n to_timedelta(arg, errors=errors)\n\n def test_to_timedelta_invalid_errors(self):\n # bad value for errors parameter\n msg = "errors must be one of"\n with pytest.raises(ValueError, match=msg):\n to_timedelta(["foo"], errors="never")\n\n @pytest.mark.parametrize("arg", [[1, 2], 1])\n def test_to_timedelta_invalid_unit(self, arg):\n # these will error\n msg = "invalid unit abbreviation: foo"\n with pytest.raises(ValueError, match=msg):\n to_timedelta(arg, unit="foo")\n\n def test_to_timedelta_time(self):\n # time not supported ATM\n msg = (\n "Value must be Timedelta, string, integer, float, timedelta or convertible"\n )\n with pytest.raises(ValueError, match=msg):\n to_timedelta(time(second=1))\n assert to_timedelta(time(second=1), errors="coerce") is pd.NaT\n\n def test_to_timedelta_bad_value(self):\n msg = "Could not convert 'foo' to NumPy timedelta"\n with pytest.raises(ValueError, match=msg):\n to_timedelta(["foo", "bar"])\n\n def test_to_timedelta_bad_value_coerce(self):\n tm.assert_index_equal(\n TimedeltaIndex([pd.NaT, pd.NaT]),\n to_timedelta(["foo", "bar"], errors="coerce"),\n )\n\n tm.assert_index_equal(\n TimedeltaIndex(["1 day", pd.NaT, "1 min"]),\n to_timedelta(["1 day", "bar", "1 min"], errors="coerce"),\n )\n\n def test_to_timedelta_invalid_errors_ignore(self):\n # gh-13613: these should not error because errors='ignore'\n msg = "errors='ignore' is deprecated"\n invalid_data = "apple"\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = to_timedelta(invalid_data, errors="ignore")\n assert invalid_data == result\n\n invalid_data = ["apple", "1 days"]\n expected = np.array(invalid_data, dtype=object)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = to_timedelta(invalid_data, errors="ignore")\n tm.assert_numpy_array_equal(expected, result)\n\n invalid_data = pd.Index(["apple", "1 days"])\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = to_timedelta(invalid_data, errors="ignore")\n tm.assert_index_equal(invalid_data, result)\n\n invalid_data = Series(["apple", "1 days"])\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = to_timedelta(invalid_data, errors="ignore")\n tm.assert_series_equal(invalid_data, result)\n\n @pytest.mark.parametrize(\n "val, errors",\n [\n ("1M", True),\n ("1 M", True),\n ("1Y", True),\n ("1 Y", True),\n ("1y", True),\n ("1 y", True),\n ("1m", False),\n ("1 m", False),\n ("1 day", False),\n ("2day", False),\n ],\n )\n def test_unambiguous_timedelta_values(self, val, errors):\n # GH36666 Deprecate use of strings denoting units with 'M', 'Y', 'm' or 'y'\n # in pd.to_timedelta\n msg = "Units 'M', 'Y' and 'y' do not represent unambiguous timedelta"\n if errors:\n with pytest.raises(ValueError, match=msg):\n to_timedelta(val)\n else:\n # check it doesn't raise\n to_timedelta(val)\n\n def test_to_timedelta_via_apply(self):\n # GH 5458\n expected = Series([np.timedelta64(1, "s")])\n result = Series(["00:00:01"]).apply(to_timedelta)\n tm.assert_series_equal(result, expected)\n\n result = Series([to_timedelta("00:00:01")])\n tm.assert_series_equal(result, expected)\n\n def test_to_timedelta_inference_without_warning(self):\n # GH#41731 inference produces a warning in the Series constructor,\n # but _not_ in to_timedelta\n vals = ["00:00:01", pd.NaT]\n with tm.assert_produces_warning(None):\n result = to_timedelta(vals)\n\n expected = TimedeltaIndex([pd.Timedelta(seconds=1), pd.NaT])\n tm.assert_index_equal(result, expected)\n\n def test_to_timedelta_on_missing_values(self):\n # GH5438\n timedelta_NaT = np.timedelta64("NaT")\n\n actual = to_timedelta(Series(["00:00:01", np.nan]))\n expected = Series(\n [np.timedelta64(1000000000, "ns"), timedelta_NaT],\n dtype=f"{tm.ENDIAN}m8[ns]",\n )\n tm.assert_series_equal(actual, expected)\n\n ser = Series(["00:00:01", pd.NaT], dtype="m8[ns]")\n actual = to_timedelta(ser)\n tm.assert_series_equal(actual, expected)\n\n @pytest.mark.parametrize("val", [np.nan, pd.NaT, pd.NA])\n def test_to_timedelta_on_missing_values_scalar(self, val):\n actual = to_timedelta(val)\n assert actual._value == np.timedelta64("NaT").astype("int64")\n\n @pytest.mark.parametrize("val", [np.nan, pd.NaT, pd.NA])\n def test_to_timedelta_on_missing_values_list(self, val):\n actual = to_timedelta([val])\n assert actual[0]._value == np.timedelta64("NaT").astype("int64")\n\n @pytest.mark.xfail(not IS64, reason="Floating point error")\n def test_to_timedelta_float(self):\n # https://github.com/pandas-dev/pandas/issues/25077\n arr = np.arange(0, 1, 1e-6)[-10:]\n result = to_timedelta(arr, unit="s")\n expected_asi8 = np.arange(999990000, 10**9, 1000, dtype="int64")\n tm.assert_numpy_array_equal(result.asi8, expected_asi8)\n\n def test_to_timedelta_coerce_strings_unit(self):\n arr = np.array([1, 2, "error"], dtype=object)\n result = to_timedelta(arr, unit="ns", errors="coerce")\n expected = to_timedelta([1, 2, pd.NaT], unit="ns")\n tm.assert_index_equal(result, expected)\n\n def test_to_timedelta_ignore_strings_unit(self):\n arr = np.array([1, 2, "error"], dtype=object)\n msg = "errors='ignore' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = to_timedelta(arr, unit="ns", errors="ignore")\n tm.assert_numpy_array_equal(result, arr)\n\n @pytest.mark.parametrize(\n "expected_val, result_val", [[timedelta(days=2), 2], [None, None]]\n )\n def test_to_timedelta_nullable_int64_dtype(self, expected_val, result_val):\n # GH 35574\n expected = Series([timedelta(days=1), expected_val])\n result = to_timedelta(Series([1, result_val], dtype="Int64"), unit="days")\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n ("input", "expected"),\n [\n ("8:53:08.71800000001", "8:53:08.718"),\n ("8:53:08.718001", "8:53:08.718001"),\n ("8:53:08.7180000001", "8:53:08.7180000001"),\n ("-8:53:08.71800000001", "-8:53:08.718"),\n ("8:53:08.7180000089", "8:53:08.718000008"),\n ],\n )\n @pytest.mark.parametrize("func", [pd.Timedelta, to_timedelta])\n def test_to_timedelta_precision_over_nanos(self, input, expected, func):\n # GH: 36738\n expected = pd.Timedelta(expected)\n result = func(input)\n assert result == expected\n\n def test_to_timedelta_zerodim(self, fixed_now_ts):\n # ndarray.item() incorrectly returns int for dt64[ns] and td64[ns]\n dt64 = fixed_now_ts.to_datetime64()\n arg = np.array(dt64)\n\n msg = (\n "Value must be Timedelta, string, integer, float, timedelta "\n "or convertible, not datetime64"\n )\n with pytest.raises(ValueError, match=msg):\n to_timedelta(arg)\n\n arg2 = arg.view("m8[ns]")\n result = to_timedelta(arg2)\n assert isinstance(result, pd.Timedelta)\n assert result._value == dt64.view("i8")\n\n def test_to_timedelta_numeric_ea(self, any_numeric_ea_dtype):\n # GH#48796\n ser = Series([1, pd.NA], dtype=any_numeric_ea_dtype)\n result = to_timedelta(ser)\n expected = Series([pd.Timedelta(1, unit="ns"), pd.NaT])\n tm.assert_series_equal(result, expected)\n\n def test_to_timedelta_fraction(self):\n result = to_timedelta(1.0 / 3, unit="h")\n expected = pd.Timedelta("0 days 00:19:59.999999998")\n assert result == expected\n\n\ndef test_from_numeric_arrow_dtype(any_numeric_ea_dtype):\n # GH 52425\n pytest.importorskip("pyarrow")\n ser = Series([1, 2], dtype=f"{any_numeric_ea_dtype.lower()}[pyarrow]")\n result = to_timedelta(ser)\n expected = Series([1, 2], dtype="timedelta64[ns]")\n tm.assert_series_equal(result, expected)\n\n\n@pytest.mark.parametrize("unit", ["ns", "ms"])\ndef test_from_timedelta_arrow_dtype(unit):\n # GH 54298\n pytest.importorskip("pyarrow")\n expected = Series([timedelta(1)], dtype=f"duration[{unit}][pyarrow]")\n result = to_timedelta(expected)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\tools\test_to_timedelta.py | test_to_timedelta.py | Python | 12,454 | 0.95 | 0.108824 | 0.090909 | awesome-app | 82 | 2024-01-06T16:43:41.332556 | Apache-2.0 | true | d44493a4c50ebc75719c0d62fad5601a |
\n\n | .venv\Lib\site-packages\pandas\tests\tools\__pycache__\test_to_numeric.cpython-313.pyc | test_to_numeric.cpython-313.pyc | Other | 42,220 | 0.95 | 0 | 0.005128 | react-lib | 838 | 2025-04-17T02:42:01.383805 | BSD-3-Clause | true | c862bc7e50ac3a26228b5bcf18bb7b66 |
\n\n | .venv\Lib\site-packages\pandas\tests\tools\__pycache__\test_to_time.cpython-313.pyc | test_to_time.cpython-313.pyc | Other | 3,956 | 0.8 | 0 | 0 | awesome-app | 322 | 2025-03-14T14:26:58.984001 | GPL-3.0 | true | 327dd554ee0b4b5dc3ddea410cc33c92 |
\n\n | .venv\Lib\site-packages\pandas\tests\tools\__pycache__\test_to_timedelta.cpython-313.pyc | test_to_timedelta.cpython-313.pyc | Other | 21,193 | 0.95 | 0 | 0.004184 | awesome-app | 287 | 2025-02-04T12:26:56.509080 | MIT | true | 04d266debffe4950e684b5cf1dd3d030 |
\n\n | .venv\Lib\site-packages\pandas\tests\tools\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 193 | 0.7 | 0 | 0 | react-lib | 525 | 2024-06-18T13:23:45.248162 | GPL-3.0 | true | 7e177ad62c4d76885d4db62ca86da7b9 |
import pytest\n\nfrom pandas._libs.tslibs import offsets\n\nfrom pandas.tseries.frequencies import (\n is_subperiod,\n is_superperiod,\n)\n\n\n@pytest.mark.parametrize(\n "p1,p2,expected",\n [\n # Input validation.\n (offsets.MonthEnd(), None, False),\n (offsets.YearEnd(), None, False),\n (None, offsets.YearEnd(), False),\n (None, offsets.MonthEnd(), False),\n (None, None, False),\n (offsets.YearEnd(), offsets.MonthEnd(), True),\n (offsets.Hour(), offsets.Minute(), True),\n (offsets.Second(), offsets.Milli(), True),\n (offsets.Milli(), offsets.Micro(), True),\n (offsets.Micro(), offsets.Nano(), True),\n ],\n)\ndef test_super_sub_symmetry(p1, p2, expected):\n assert is_superperiod(p1, p2) is expected\n assert is_subperiod(p2, p1) is expected\n | .venv\Lib\site-packages\pandas\tests\tseries\frequencies\test_frequencies.py | test_frequencies.py | Python | 821 | 0.95 | 0.034483 | 0.04 | node-utils | 695 | 2024-08-30T05:10:12.546683 | Apache-2.0 | true | bf0274eea728ae564dca2cfb98f3e868 |
import numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import (\n Period,\n to_offset,\n)\n\n\n@pytest.mark.parametrize(\n "freqstr,exp_freqstr",\n [("D", "D"), ("W", "D"), ("ME", "D"), ("s", "s"), ("min", "s"), ("h", "s")],\n)\ndef test_get_to_timestamp_base(freqstr, exp_freqstr):\n off = to_offset(freqstr)\n per = Period._from_ordinal(1, off)\n exp_code = to_offset(exp_freqstr)._period_dtype_code\n\n result_code = per._dtype._get_to_timestamp_base()\n assert result_code == exp_code\n\n\n@pytest.mark.parametrize(\n "args,expected",\n [\n ((1.5, "min"), (90, "s")),\n ((62.4, "min"), (3744, "s")),\n ((1.04, "h"), (3744, "s")),\n ((1, "D"), (1, "D")),\n ((0.342931, "h"), (1234551600, "us")),\n ((1.2345, "D"), (106660800, "ms")),\n ],\n)\ndef test_resolution_bumping(args, expected):\n # see gh-14378\n off = to_offset(str(args[0]) + args[1])\n assert off.n == expected[0]\n assert off._prefix == expected[1]\n\n\n@pytest.mark.parametrize(\n "args",\n [\n (0.5, "ns"),\n # Too much precision in the input can prevent.\n (0.3429324798798269273987982, "h"),\n ],\n)\ndef test_cat(args):\n msg = "Invalid frequency"\n\n with pytest.raises(ValueError, match=msg):\n to_offset(str(args[0]) + args[1])\n\n\n@pytest.mark.parametrize(\n "freqstr,expected",\n [\n ("1h", "2021-01-01T09:00:00"),\n ("1D", "2021-01-02T08:00:00"),\n ("1W", "2021-01-03T08:00:00"),\n ("1ME", "2021-01-31T08:00:00"),\n ("1YE", "2021-12-31T08:00:00"),\n ],\n)\ndef test_compatibility(freqstr, expected):\n ts_np = np.datetime64("2021-01-01T08:00:00.00")\n do = to_offset(freqstr)\n assert ts_np + do == np.datetime64(expected)\n | .venv\Lib\site-packages\pandas\tests\tseries\frequencies\test_freq_code.py | test_freq_code.py | Python | 1,727 | 0.95 | 0.057971 | 0.034483 | python-kit | 490 | 2024-11-07T02:13:01.844077 | GPL-3.0 | true | 1de0ad20d0e6e1fdf95784171c7da0d1 |
from datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.ccalendar import (\n DAYS,\n MONTHS,\n)\nfrom pandas._libs.tslibs.offsets import _get_offset\nfrom pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG\nfrom pandas.compat import is_platform_windows\n\nfrom pandas import (\n DatetimeIndex,\n Index,\n RangeIndex,\n Series,\n Timestamp,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n DatetimeArray,\n TimedeltaArray,\n)\nfrom pandas.core.tools.datetimes import to_datetime\n\nfrom pandas.tseries import (\n frequencies,\n offsets,\n)\n\n\n@pytest.fixture(\n params=[\n (timedelta(1), "D"),\n (timedelta(hours=1), "h"),\n (timedelta(minutes=1), "min"),\n (timedelta(seconds=1), "s"),\n (np.timedelta64(1, "ns"), "ns"),\n (timedelta(microseconds=1), "us"),\n (timedelta(microseconds=1000), "ms"),\n ]\n)\ndef base_delta_code_pair(request):\n return request.param\n\n\nfreqs = (\n [f"QE-{month}" for month in MONTHS]\n + [f"{annual}-{month}" for annual in ["YE", "BYE"] for month in MONTHS]\n + ["ME", "BME", "BMS"]\n + [f"WOM-{count}{day}" for count in range(1, 5) for day in DAYS]\n + [f"W-{day}" for day in DAYS]\n)\n\n\n@pytest.mark.parametrize("freq", freqs)\n@pytest.mark.parametrize("periods", [5, 7])\ndef test_infer_freq_range(periods, freq):\n freq = freq.upper()\n\n gen = date_range("1/1/2000", periods=periods, freq=freq)\n index = DatetimeIndex(gen.values)\n\n if not freq.startswith("QE-"):\n assert frequencies.infer_freq(index) == gen.freqstr\n else:\n inf_freq = frequencies.infer_freq(index)\n is_dec_range = inf_freq == "QE-DEC" and gen.freqstr in (\n "QE",\n "QE-DEC",\n "QE-SEP",\n "QE-JUN",\n "QE-MAR",\n )\n is_nov_range = inf_freq == "QE-NOV" and gen.freqstr in (\n "QE-NOV",\n "QE-AUG",\n "QE-MAY",\n "QE-FEB",\n )\n is_oct_range = inf_freq == "QE-OCT" and gen.freqstr in (\n "QE-OCT",\n "QE-JUL",\n "QE-APR",\n "QE-JAN",\n )\n assert is_dec_range or is_nov_range or is_oct_range\n\n\ndef test_raise_if_period_index():\n index = period_range(start="1/1/1990", periods=20, freq="M")\n msg = "Check the `freq` attribute instead of using infer_freq"\n\n with pytest.raises(TypeError, match=msg):\n frequencies.infer_freq(index)\n\n\ndef test_raise_if_too_few():\n index = DatetimeIndex(["12/31/1998", "1/3/1999"])\n msg = "Need at least 3 dates to infer frequency"\n\n with pytest.raises(ValueError, match=msg):\n frequencies.infer_freq(index)\n\n\ndef test_business_daily():\n index = DatetimeIndex(["01/01/1999", "1/4/1999", "1/5/1999"])\n assert frequencies.infer_freq(index) == "B"\n\n\ndef test_business_daily_look_alike():\n # see gh-16624\n #\n # Do not infer "B when "weekend" (2-day gap) in wrong place.\n index = DatetimeIndex(["12/31/1998", "1/3/1999", "1/4/1999"])\n assert frequencies.infer_freq(index) is None\n\n\ndef test_day_corner():\n index = DatetimeIndex(["1/1/2000", "1/2/2000", "1/3/2000"])\n assert frequencies.infer_freq(index) == "D"\n\n\ndef test_non_datetime_index():\n dates = to_datetime(["1/1/2000", "1/2/2000", "1/3/2000"])\n assert frequencies.infer_freq(dates) == "D"\n\n\ndef test_fifth_week_of_month_infer():\n # see gh-9425\n #\n # Only attempt to infer up to WOM-4.\n index = DatetimeIndex(["2014-03-31", "2014-06-30", "2015-03-30"])\n assert frequencies.infer_freq(index) is None\n\n\ndef test_week_of_month_fake():\n # All of these dates are on same day\n # of week and are 4 or 5 weeks apart.\n index = DatetimeIndex(["2013-08-27", "2013-10-01", "2013-10-29", "2013-11-26"])\n assert frequencies.infer_freq(index) != "WOM-4TUE"\n\n\ndef test_fifth_week_of_month():\n # see gh-9425\n #\n # Only supports freq up to WOM-4.\n msg = (\n "Of the four parameters: start, end, periods, "\n "and freq, exactly three must be specified"\n )\n\n with pytest.raises(ValueError, match=msg):\n date_range("2014-01-01", freq="WOM-5MON")\n\n\ndef test_monthly_ambiguous():\n rng = DatetimeIndex(["1/31/2000", "2/29/2000", "3/31/2000"])\n assert rng.inferred_freq == "ME"\n\n\ndef test_annual_ambiguous():\n rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])\n assert rng.inferred_freq == "YE-JAN"\n\n\n@pytest.mark.parametrize("count", range(1, 5))\ndef test_infer_freq_delta(base_delta_code_pair, count):\n b = Timestamp(datetime.now())\n base_delta, code = base_delta_code_pair\n\n inc = base_delta * count\n index = DatetimeIndex([b + inc * j for j in range(3)])\n\n exp_freq = f"{count:d}{code}" if count > 1 else code\n assert frequencies.infer_freq(index) == exp_freq\n\n\n@pytest.mark.parametrize(\n "constructor",\n [\n lambda now, delta: DatetimeIndex(\n [now + delta * 7] + [now + delta * j for j in range(3)]\n ),\n lambda now, delta: DatetimeIndex(\n [now + delta * j for j in range(3)] + [now + delta * 7]\n ),\n ],\n)\ndef test_infer_freq_custom(base_delta_code_pair, constructor):\n b = Timestamp(datetime.now())\n base_delta, _ = base_delta_code_pair\n\n index = constructor(b, base_delta)\n assert frequencies.infer_freq(index) is None\n\n\n@pytest.mark.parametrize(\n "freq,expected", [("Q", "QE-DEC"), ("Q-NOV", "QE-NOV"), ("Q-OCT", "QE-OCT")]\n)\ndef test_infer_freq_index(freq, expected):\n rng = period_range("1959Q2", "2009Q3", freq=freq)\n with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):\n rng = Index(rng.to_timestamp("D", how="e").astype(object))\n\n assert rng.inferred_freq == expected\n\n\n@pytest.mark.parametrize(\n "expected,dates",\n list(\n {\n "YS-JAN": ["2009-01-01", "2010-01-01", "2011-01-01", "2012-01-01"],\n "QE-OCT": ["2009-01-31", "2009-04-30", "2009-07-31", "2009-10-31"],\n "ME": ["2010-11-30", "2010-12-31", "2011-01-31", "2011-02-28"],\n "W-SAT": ["2010-12-25", "2011-01-01", "2011-01-08", "2011-01-15"],\n "D": ["2011-01-01", "2011-01-02", "2011-01-03", "2011-01-04"],\n "h": [\n "2011-12-31 22:00",\n "2011-12-31 23:00",\n "2012-01-01 00:00",\n "2012-01-01 01:00",\n ],\n }.items()\n ),\n)\n@pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])\ndef test_infer_freq_tz(tz_naive_fixture, expected, dates, unit):\n # see gh-7310, GH#55609\n tz = tz_naive_fixture\n idx = DatetimeIndex(dates, tz=tz).as_unit(unit)\n assert idx.inferred_freq == expected\n\n\ndef test_infer_freq_tz_series(tz_naive_fixture):\n # infer_freq should work with both tz-naive and tz-aware series. See gh-52456\n tz = tz_naive_fixture\n idx = date_range("2021-01-01", "2021-01-04", tz=tz)\n series = idx.to_series().reset_index(drop=True)\n inferred_freq = frequencies.infer_freq(series)\n assert inferred_freq == "D"\n\n\n@pytest.mark.parametrize(\n "date_pair",\n [\n ["2013-11-02", "2013-11-5"], # Fall DST\n ["2014-03-08", "2014-03-11"], # Spring DST\n ["2014-01-01", "2014-01-03"], # Regular Time\n ],\n)\n@pytest.mark.parametrize(\n "freq",\n ["h", "3h", "10min", "3601s", "3600001ms", "3600000001us", "3600000000001ns"],\n)\ndef test_infer_freq_tz_transition(tz_naive_fixture, date_pair, freq):\n # see gh-8772\n tz = tz_naive_fixture\n idx = date_range(date_pair[0], date_pair[1], freq=freq, tz=tz)\n assert idx.inferred_freq == freq\n\n\ndef test_infer_freq_tz_transition_custom():\n index = date_range("2013-11-03", periods=5, freq="3h").tz_localize(\n "America/Chicago"\n )\n assert index.inferred_freq is None\n\n\n@pytest.mark.parametrize(\n "data,expected",\n [\n # Hourly freq in a day must result in "h"\n (\n [\n "2014-07-01 09:00",\n "2014-07-01 10:00",\n "2014-07-01 11:00",\n "2014-07-01 12:00",\n "2014-07-01 13:00",\n "2014-07-01 14:00",\n ],\n "h",\n ),\n (\n [\n "2014-07-01 09:00",\n "2014-07-01 10:00",\n "2014-07-01 11:00",\n "2014-07-01 12:00",\n "2014-07-01 13:00",\n "2014-07-01 14:00",\n "2014-07-01 15:00",\n "2014-07-01 16:00",\n "2014-07-02 09:00",\n "2014-07-02 10:00",\n "2014-07-02 11:00",\n ],\n "bh",\n ),\n (\n [\n "2014-07-04 09:00",\n "2014-07-04 10:00",\n "2014-07-04 11:00",\n "2014-07-04 12:00",\n "2014-07-04 13:00",\n "2014-07-04 14:00",\n "2014-07-04 15:00",\n "2014-07-04 16:00",\n "2014-07-07 09:00",\n "2014-07-07 10:00",\n "2014-07-07 11:00",\n ],\n "bh",\n ),\n (\n [\n "2014-07-04 09:00",\n "2014-07-04 10:00",\n "2014-07-04 11:00",\n "2014-07-04 12:00",\n "2014-07-04 13:00",\n "2014-07-04 14:00",\n "2014-07-04 15:00",\n "2014-07-04 16:00",\n "2014-07-07 09:00",\n "2014-07-07 10:00",\n "2014-07-07 11:00",\n "2014-07-07 12:00",\n "2014-07-07 13:00",\n "2014-07-07 14:00",\n "2014-07-07 15:00",\n "2014-07-07 16:00",\n "2014-07-08 09:00",\n "2014-07-08 10:00",\n "2014-07-08 11:00",\n "2014-07-08 12:00",\n "2014-07-08 13:00",\n "2014-07-08 14:00",\n "2014-07-08 15:00",\n "2014-07-08 16:00",\n ],\n "bh",\n ),\n ],\n)\ndef test_infer_freq_business_hour(data, expected):\n # see gh-7905\n idx = DatetimeIndex(data)\n assert idx.inferred_freq == expected\n\n\ndef test_not_monotonic():\n rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])\n rng = rng[::-1]\n\n assert rng.inferred_freq == "-1YE-JAN"\n\n\ndef test_non_datetime_index2():\n rng = DatetimeIndex(["1/31/2000", "1/31/2001", "1/31/2002"])\n vals = rng.to_pydatetime()\n\n result = frequencies.infer_freq(vals)\n assert result == rng.inferred_freq\n\n\n@pytest.mark.parametrize(\n "idx",\n [\n Index(np.arange(5), dtype=np.int64),\n Index(np.arange(5), dtype=np.float64),\n period_range("2020-01-01", periods=5),\n RangeIndex(5),\n ],\n)\ndef test_invalid_index_types(idx):\n # see gh-48439\n msg = "|".join(\n [\n "cannot infer freq from a non-convertible",\n "Check the `freq` attribute instead of using infer_freq",\n ]\n )\n\n with pytest.raises(TypeError, match=msg):\n frequencies.infer_freq(idx)\n\n\n@pytest.mark.skipif(is_platform_windows(), reason="see gh-10822: Windows issue")\ndef test_invalid_index_types_unicode():\n # see gh-10822\n #\n # Odd error message on conversions to datetime for unicode.\n msg = "Unknown datetime string format"\n\n with pytest.raises(ValueError, match=msg):\n frequencies.infer_freq(Index(["ZqgszYBfuL"]))\n\n\ndef test_string_datetime_like_compat():\n # see gh-6463\n data = ["2004-01", "2004-02", "2004-03", "2004-04"]\n\n expected = frequencies.infer_freq(data)\n result = frequencies.infer_freq(Index(data))\n\n assert result == expected\n\n\ndef test_series():\n # see gh-6407\n s = Series(date_range("20130101", "20130110"))\n inferred = frequencies.infer_freq(s)\n assert inferred == "D"\n\n\n@pytest.mark.parametrize("end", [10, 10.0])\ndef test_series_invalid_type(end):\n # see gh-6407\n msg = "cannot infer freq from a non-convertible dtype on a Series"\n s = Series(np.arange(end))\n\n with pytest.raises(TypeError, match=msg):\n frequencies.infer_freq(s)\n\n\ndef test_series_inconvertible_string(using_infer_string):\n # see gh-6407\n if using_infer_string:\n msg = "cannot infer freq from"\n\n with pytest.raises(TypeError, match=msg):\n frequencies.infer_freq(Series(["foo", "bar"]))\n else:\n msg = "Unknown datetime string format"\n\n with pytest.raises(ValueError, match=msg):\n frequencies.infer_freq(Series(["foo", "bar"]))\n\n\n@pytest.mark.parametrize("freq", [None, "ms"])\ndef test_series_period_index(freq):\n # see gh-6407\n #\n # Cannot infer on PeriodIndex\n msg = "cannot infer freq from a non-convertible dtype on a Series"\n s = Series(period_range("2013", periods=10, freq=freq))\n\n with pytest.raises(TypeError, match=msg):\n frequencies.infer_freq(s)\n\n\n@pytest.mark.parametrize("freq", ["ME", "ms", "s"])\ndef test_series_datetime_index(freq):\n s = Series(date_range("20130101", periods=10, freq=freq))\n inferred = frequencies.infer_freq(s)\n assert inferred == freq\n\n\n@pytest.mark.parametrize(\n "offset_func",\n [\n _get_offset,\n lambda freq: date_range("2011-01-01", periods=5, freq=freq),\n ],\n)\n@pytest.mark.parametrize(\n "freq",\n [\n "WEEKDAY",\n "EOM",\n "W@MON",\n "W@TUE",\n "W@WED",\n "W@THU",\n "W@FRI",\n "W@SAT",\n "W@SUN",\n "QE@JAN",\n "QE@FEB",\n "QE@MAR",\n "YE@JAN",\n "YE@FEB",\n "YE@MAR",\n "YE@APR",\n "YE@MAY",\n "YE@JUN",\n "YE@JUL",\n "YE@AUG",\n "YE@SEP",\n "YE@OCT",\n "YE@NOV",\n "YE@DEC",\n "YE@JAN",\n "WOM@1MON",\n "WOM@2MON",\n "WOM@3MON",\n "WOM@4MON",\n "WOM@1TUE",\n "WOM@2TUE",\n "WOM@3TUE",\n "WOM@4TUE",\n "WOM@1WED",\n "WOM@2WED",\n "WOM@3WED",\n "WOM@4WED",\n "WOM@1THU",\n "WOM@2THU",\n "WOM@3THU",\n "WOM@4THU",\n "WOM@1FRI",\n "WOM@2FRI",\n "WOM@3FRI",\n "WOM@4FRI",\n ],\n)\ndef test_legacy_offset_warnings(offset_func, freq):\n with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):\n offset_func(freq)\n\n\ndef test_ms_vs_capital_ms():\n left = _get_offset("ms")\n right = _get_offset("MS")\n\n assert left == offsets.Milli()\n assert right == offsets.MonthBegin()\n\n\ndef test_infer_freq_non_nano():\n arr = np.arange(10).astype(np.int64).view("M8[s]")\n dta = DatetimeArray._simple_new(arr, dtype=arr.dtype)\n res = frequencies.infer_freq(dta)\n assert res == "s"\n\n arr2 = arr.view("m8[ms]")\n tda = TimedeltaArray._simple_new(arr2, dtype=arr2.dtype)\n res2 = frequencies.infer_freq(tda)\n assert res2 == "ms"\n\n\ndef test_infer_freq_non_nano_tzaware(tz_aware_fixture):\n tz = tz_aware_fixture\n\n dti = date_range("2016-01-01", periods=365, freq="B", tz=tz)\n dta = dti._data.as_unit("s")\n\n res = frequencies.infer_freq(dta)\n assert res == "B"\n | .venv\Lib\site-packages\pandas\tests\tseries\frequencies\test_inference.py | test_inference.py | Python | 15,111 | 0.95 | 0.086022 | 0.058824 | vue-tools | 472 | 2023-08-08T07:10:30.087151 | BSD-3-Clause | true | 0f75ae6e84361605434231091ebf29db |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\frequencies\__pycache__\test_frequencies.cpython-313.pyc | test_frequencies.cpython-313.pyc | Other | 1,513 | 0.7 | 0 | 0 | node-utils | 64 | 2025-01-28T02:01:12.235833 | BSD-3-Clause | true | 4f3dd2d5aafdf3af2692ba88c62b8c6e |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\frequencies\__pycache__\test_freq_code.cpython-313.pyc | test_freq_code.cpython-313.pyc | Other | 2,876 | 0.8 | 0 | 0 | react-lib | 294 | 2025-03-13T06:55:55.831189 | BSD-3-Clause | true | a3ab280480854aaee0fe95eb5f9edbf0 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\frequencies\__pycache__\test_inference.cpython-313.pyc | test_inference.cpython-313.pyc | Other | 21,008 | 0.8 | 0 | 0.027778 | awesome-app | 311 | 2025-03-13T14:13:45.402875 | MIT | true | 7816769ffdd5697e9c735a6c39faee85 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\frequencies\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 207 | 0.7 | 0 | 0 | awesome-app | 291 | 2024-08-20T09:24:32.174066 | BSD-3-Clause | true | c85d706c6ccaf492db8fe5bd7ae345df |
from datetime import datetime\n\nimport pytest\n\nfrom pandas import (\n DatetimeIndex,\n offsets,\n to_datetime,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries.holiday import (\n AbstractHolidayCalendar,\n Holiday,\n Timestamp,\n USFederalHolidayCalendar,\n USLaborDay,\n USThanksgivingDay,\n get_calendar,\n)\n\n\n@pytest.mark.parametrize(\n "transform", [lambda x: x, lambda x: x.strftime("%Y-%m-%d"), lambda x: Timestamp(x)]\n)\ndef test_calendar(transform):\n start_date = datetime(2012, 1, 1)\n end_date = datetime(2012, 12, 31)\n\n calendar = USFederalHolidayCalendar()\n holidays = calendar.holidays(transform(start_date), transform(end_date))\n\n expected = [\n datetime(2012, 1, 2),\n datetime(2012, 1, 16),\n datetime(2012, 2, 20),\n datetime(2012, 5, 28),\n datetime(2012, 7, 4),\n datetime(2012, 9, 3),\n datetime(2012, 10, 8),\n datetime(2012, 11, 12),\n datetime(2012, 11, 22),\n datetime(2012, 12, 25),\n ]\n\n assert list(holidays.to_pydatetime()) == expected\n\n\ndef test_calendar_caching():\n # see gh-9552.\n\n class TestCalendar(AbstractHolidayCalendar):\n def __init__(self, name=None, rules=None) -> None:\n super().__init__(name=name, rules=rules)\n\n jan1 = TestCalendar(rules=[Holiday("jan1", year=2015, month=1, day=1)])\n jan2 = TestCalendar(rules=[Holiday("jan2", year=2015, month=1, day=2)])\n\n # Getting holidays for Jan 1 should not alter results for Jan 2.\n expected = DatetimeIndex(["01-Jan-2015"]).as_unit("ns")\n tm.assert_index_equal(jan1.holidays(), expected)\n\n expected2 = DatetimeIndex(["02-Jan-2015"]).as_unit("ns")\n tm.assert_index_equal(jan2.holidays(), expected2)\n\n\ndef test_calendar_observance_dates():\n # see gh-11477\n us_fed_cal = get_calendar("USFederalHolidayCalendar")\n holidays0 = us_fed_cal.holidays(\n datetime(2015, 7, 3), datetime(2015, 7, 3)\n ) # <-- same start and end dates\n holidays1 = us_fed_cal.holidays(\n datetime(2015, 7, 3), datetime(2015, 7, 6)\n ) # <-- different start and end dates\n holidays2 = us_fed_cal.holidays(\n datetime(2015, 7, 3), datetime(2015, 7, 3)\n ) # <-- same start and end dates\n\n # These should all produce the same result.\n #\n # In addition, calling with different start and end\n # dates should not alter the output if we call the\n # function again with the same start and end date.\n tm.assert_index_equal(holidays0, holidays1)\n tm.assert_index_equal(holidays0, holidays2)\n\n\ndef test_rule_from_name():\n us_fed_cal = get_calendar("USFederalHolidayCalendar")\n assert us_fed_cal.rule_from_name("Thanksgiving Day") == USThanksgivingDay\n\n\ndef test_calendar_2031():\n # See gh-27790\n #\n # Labor Day 2031 is on September 1. Saturday before is August 30.\n # Next working day after August 30 ought to be Tuesday, September 2.\n\n class testCalendar(AbstractHolidayCalendar):\n rules = [USLaborDay]\n\n cal = testCalendar()\n workDay = offsets.CustomBusinessDay(calendar=cal)\n Sat_before_Labor_Day_2031 = to_datetime("2031-08-30")\n next_working_day = Sat_before_Labor_Day_2031 + 0 * workDay\n assert next_working_day == to_datetime("2031-09-02")\n\n\ndef test_no_holidays_calendar():\n # Test for issue #31415\n\n class NoHolidaysCalendar(AbstractHolidayCalendar):\n pass\n\n cal = NoHolidaysCalendar()\n holidays = cal.holidays(Timestamp("01-Jan-2020"), Timestamp("01-Jan-2021"))\n empty_index = DatetimeIndex([]) # Type is DatetimeIndex since return_name=False\n tm.assert_index_equal(holidays, empty_index)\n | .venv\Lib\site-packages\pandas\tests\tseries\holiday\test_calendar.py | test_calendar.py | Python | 3,622 | 0.95 | 0.12605 | 0.141304 | python-kit | 752 | 2023-08-23T13:35:07.642834 | Apache-2.0 | true | 304acb764dd4cc1abc566d01791447a7 |
from datetime import datetime\n\nfrom pandas import DatetimeIndex\nimport pandas._testing as tm\n\nfrom pandas.tseries.holiday import (\n AbstractHolidayCalendar,\n USFederalHolidayCalendar,\n USMartinLutherKingJr,\n USMemorialDay,\n)\n\n\ndef test_no_mlk_before_1986():\n # see gh-10278\n class MLKCalendar(AbstractHolidayCalendar):\n rules = [USMartinLutherKingJr]\n\n holidays = MLKCalendar().holidays(start="1984", end="1988").to_pydatetime().tolist()\n\n # Testing to make sure holiday is not incorrectly observed before 1986.\n assert holidays == [datetime(1986, 1, 20, 0, 0), datetime(1987, 1, 19, 0, 0)]\n\n\ndef test_memorial_day():\n class MemorialDay(AbstractHolidayCalendar):\n rules = [USMemorialDay]\n\n holidays = MemorialDay().holidays(start="1971", end="1980").to_pydatetime().tolist()\n\n # Fixes 5/31 error and checked manually against Wikipedia.\n assert holidays == [\n datetime(1971, 5, 31, 0, 0),\n datetime(1972, 5, 29, 0, 0),\n datetime(1973, 5, 28, 0, 0),\n datetime(1974, 5, 27, 0, 0),\n datetime(1975, 5, 26, 0, 0),\n datetime(1976, 5, 31, 0, 0),\n datetime(1977, 5, 30, 0, 0),\n datetime(1978, 5, 29, 0, 0),\n datetime(1979, 5, 28, 0, 0),\n ]\n\n\ndef test_federal_holiday_inconsistent_returntype():\n # GH 49075 test case\n # Instantiate two calendars to rule out _cache\n cal1 = USFederalHolidayCalendar()\n cal2 = USFederalHolidayCalendar()\n\n results_2018 = cal1.holidays(start=datetime(2018, 8, 1), end=datetime(2018, 8, 31))\n results_2019 = cal2.holidays(start=datetime(2019, 8, 1), end=datetime(2019, 8, 31))\n expected_results = DatetimeIndex([], dtype="datetime64[ns]", freq=None)\n\n # Check against expected results to ensure both date\n # ranges generate expected results as per GH49075 submission\n tm.assert_index_equal(results_2018, expected_results)\n tm.assert_index_equal(results_2019, expected_results)\n | .venv\Lib\site-packages\pandas\tests\tseries\holiday\test_federal.py | test_federal.py | Python | 1,948 | 0.95 | 0.086207 | 0.159091 | awesome-app | 261 | 2025-03-28T06:56:54.877190 | GPL-3.0 | true | 577df326d1c014abfc7d3627fea3a7d6 |
from datetime import datetime\n\nimport pytest\nfrom pytz import utc\n\nfrom pandas import (\n DatetimeIndex,\n Series,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries.holiday import (\n MO,\n SA,\n AbstractHolidayCalendar,\n DateOffset,\n EasterMonday,\n GoodFriday,\n Holiday,\n HolidayCalendarFactory,\n Timestamp,\n USColumbusDay,\n USFederalHolidayCalendar,\n USLaborDay,\n USMartinLutherKingJr,\n USMemorialDay,\n USPresidentsDay,\n USThanksgivingDay,\n get_calendar,\n next_monday,\n)\n\n\n@pytest.mark.parametrize(\n "holiday,start_date,end_date,expected",\n [\n (\n USMemorialDay,\n datetime(2011, 1, 1),\n datetime(2020, 12, 31),\n [\n datetime(2011, 5, 30),\n datetime(2012, 5, 28),\n datetime(2013, 5, 27),\n datetime(2014, 5, 26),\n datetime(2015, 5, 25),\n datetime(2016, 5, 30),\n datetime(2017, 5, 29),\n datetime(2018, 5, 28),\n datetime(2019, 5, 27),\n datetime(2020, 5, 25),\n ],\n ),\n (\n Holiday("July 4th Eve", month=7, day=3),\n "2001-01-01",\n "2003-03-03",\n [Timestamp("2001-07-03 00:00:00"), Timestamp("2002-07-03 00:00:00")],\n ),\n (\n Holiday("July 4th Eve", month=7, day=3, days_of_week=(0, 1, 2, 3)),\n "2001-01-01",\n "2008-03-03",\n [\n Timestamp("2001-07-03 00:00:00"),\n Timestamp("2002-07-03 00:00:00"),\n Timestamp("2003-07-03 00:00:00"),\n Timestamp("2006-07-03 00:00:00"),\n Timestamp("2007-07-03 00:00:00"),\n ],\n ),\n (\n EasterMonday,\n datetime(2011, 1, 1),\n datetime(2020, 12, 31),\n [\n Timestamp("2011-04-25 00:00:00"),\n Timestamp("2012-04-09 00:00:00"),\n Timestamp("2013-04-01 00:00:00"),\n Timestamp("2014-04-21 00:00:00"),\n Timestamp("2015-04-06 00:00:00"),\n Timestamp("2016-03-28 00:00:00"),\n Timestamp("2017-04-17 00:00:00"),\n Timestamp("2018-04-02 00:00:00"),\n Timestamp("2019-04-22 00:00:00"),\n Timestamp("2020-04-13 00:00:00"),\n ],\n ),\n (\n GoodFriday,\n datetime(2011, 1, 1),\n datetime(2020, 12, 31),\n [\n Timestamp("2011-04-22 00:00:00"),\n Timestamp("2012-04-06 00:00:00"),\n Timestamp("2013-03-29 00:00:00"),\n Timestamp("2014-04-18 00:00:00"),\n Timestamp("2015-04-03 00:00:00"),\n Timestamp("2016-03-25 00:00:00"),\n Timestamp("2017-04-14 00:00:00"),\n Timestamp("2018-03-30 00:00:00"),\n Timestamp("2019-04-19 00:00:00"),\n Timestamp("2020-04-10 00:00:00"),\n ],\n ),\n (\n USThanksgivingDay,\n datetime(2011, 1, 1),\n datetime(2020, 12, 31),\n [\n datetime(2011, 11, 24),\n datetime(2012, 11, 22),\n datetime(2013, 11, 28),\n datetime(2014, 11, 27),\n datetime(2015, 11, 26),\n datetime(2016, 11, 24),\n datetime(2017, 11, 23),\n datetime(2018, 11, 22),\n datetime(2019, 11, 28),\n datetime(2020, 11, 26),\n ],\n ),\n ],\n)\ndef test_holiday_dates(holiday, start_date, end_date, expected):\n assert list(holiday.dates(start_date, end_date)) == expected\n\n # Verify that timezone info is preserved.\n assert list(\n holiday.dates(\n utc.localize(Timestamp(start_date)), utc.localize(Timestamp(end_date))\n )\n ) == [utc.localize(dt) for dt in expected]\n\n\n@pytest.mark.parametrize(\n "holiday,start,expected",\n [\n (USMemorialDay, datetime(2015, 7, 1), []),\n (USMemorialDay, "2015-05-25", [Timestamp("2015-05-25")]),\n (USLaborDay, datetime(2015, 7, 1), []),\n (USLaborDay, "2015-09-07", [Timestamp("2015-09-07")]),\n (USColumbusDay, datetime(2015, 7, 1), []),\n (USColumbusDay, "2015-10-12", [Timestamp("2015-10-12")]),\n (USThanksgivingDay, datetime(2015, 7, 1), []),\n (USThanksgivingDay, "2015-11-26", [Timestamp("2015-11-26")]),\n (USMartinLutherKingJr, datetime(2015, 7, 1), []),\n (USMartinLutherKingJr, "2015-01-19", [Timestamp("2015-01-19")]),\n (USPresidentsDay, datetime(2015, 7, 1), []),\n (USPresidentsDay, "2015-02-16", [Timestamp("2015-02-16")]),\n (GoodFriday, datetime(2015, 7, 1), []),\n (GoodFriday, "2015-04-03", [Timestamp("2015-04-03")]),\n (EasterMonday, "2015-04-06", [Timestamp("2015-04-06")]),\n (EasterMonday, datetime(2015, 7, 1), []),\n (EasterMonday, "2015-04-05", []),\n ("New Year's Day", "2015-01-01", [Timestamp("2015-01-01")]),\n ("New Year's Day", "2010-12-31", [Timestamp("2010-12-31")]),\n ("New Year's Day", datetime(2015, 7, 1), []),\n ("New Year's Day", "2011-01-01", []),\n ("Independence Day", "2015-07-03", [Timestamp("2015-07-03")]),\n ("Independence Day", datetime(2015, 7, 1), []),\n ("Independence Day", "2015-07-04", []),\n ("Veterans Day", "2012-11-12", [Timestamp("2012-11-12")]),\n ("Veterans Day", datetime(2015, 7, 1), []),\n ("Veterans Day", "2012-11-11", []),\n ("Christmas Day", "2011-12-26", [Timestamp("2011-12-26")]),\n ("Christmas Day", datetime(2015, 7, 1), []),\n ("Christmas Day", "2011-12-25", []),\n ("Juneteenth National Independence Day", "2020-06-19", []),\n (\n "Juneteenth National Independence Day",\n "2021-06-18",\n [Timestamp("2021-06-18")],\n ),\n ("Juneteenth National Independence Day", "2022-06-19", []),\n (\n "Juneteenth National Independence Day",\n "2022-06-20",\n [Timestamp("2022-06-20")],\n ),\n ],\n)\ndef test_holidays_within_dates(holiday, start, expected):\n # see gh-11477\n #\n # Fix holiday behavior where holiday.dates returned dates outside\n # start/end date, or observed rules could not be applied because the\n # holiday was not in the original date range (e.g., 7/4/2015 -> 7/3/2015).\n if isinstance(holiday, str):\n calendar = get_calendar("USFederalHolidayCalendar")\n holiday = calendar.rule_from_name(holiday)\n\n assert list(holiday.dates(start, start)) == expected\n\n # Verify that timezone info is preserved.\n assert list(\n holiday.dates(utc.localize(Timestamp(start)), utc.localize(Timestamp(start)))\n ) == [utc.localize(dt) for dt in expected]\n\n\n@pytest.mark.parametrize(\n "transform", [lambda x: x.strftime("%Y-%m-%d"), lambda x: Timestamp(x)]\n)\ndef test_argument_types(transform):\n start_date = datetime(2011, 1, 1)\n end_date = datetime(2020, 12, 31)\n\n holidays = USThanksgivingDay.dates(start_date, end_date)\n holidays2 = USThanksgivingDay.dates(transform(start_date), transform(end_date))\n tm.assert_index_equal(holidays, holidays2)\n\n\n@pytest.mark.parametrize(\n "name,kwargs",\n [\n ("One-Time", {"year": 2012, "month": 5, "day": 28}),\n (\n "Range",\n {\n "month": 5,\n "day": 28,\n "start_date": datetime(2012, 1, 1),\n "end_date": datetime(2012, 12, 31),\n "offset": DateOffset(weekday=MO(1)),\n },\n ),\n ],\n)\ndef test_special_holidays(name, kwargs):\n base_date = [datetime(2012, 5, 28)]\n holiday = Holiday(name, **kwargs)\n\n start_date = datetime(2011, 1, 1)\n end_date = datetime(2020, 12, 31)\n\n assert base_date == holiday.dates(start_date, end_date)\n\n\ndef test_get_calendar():\n class TestCalendar(AbstractHolidayCalendar):\n rules = []\n\n calendar = get_calendar("TestCalendar")\n assert TestCalendar == type(calendar)\n\n\ndef test_factory():\n class_1 = HolidayCalendarFactory(\n "MemorialDay", AbstractHolidayCalendar, USMemorialDay\n )\n class_2 = HolidayCalendarFactory(\n "Thanksgiving", AbstractHolidayCalendar, USThanksgivingDay\n )\n class_3 = HolidayCalendarFactory("Combined", class_1, class_2)\n\n assert len(class_1.rules) == 1\n assert len(class_2.rules) == 1\n assert len(class_3.rules) == 2\n\n\ndef test_both_offset_observance_raises():\n # see gh-10217\n msg = "Cannot use both offset and observance"\n with pytest.raises(NotImplementedError, match=msg):\n Holiday(\n "Cyber Monday",\n month=11,\n day=1,\n offset=[DateOffset(weekday=SA(4))],\n observance=next_monday,\n )\n\n\ndef test_half_open_interval_with_observance():\n # Prompted by GH 49075\n # Check for holidays that have a half-open date interval where\n # they have either a start_date or end_date defined along\n # with a defined observance pattern to make sure that the return type\n # for Holiday.dates() remains consistent before & after the year that\n # marks the 'edge' of the half-open date interval.\n\n holiday_1 = Holiday(\n "Arbitrary Holiday - start 2022-03-14",\n start_date=datetime(2022, 3, 14),\n month=3,\n day=14,\n observance=next_monday,\n )\n holiday_2 = Holiday(\n "Arbitrary Holiday 2 - end 2022-03-20",\n end_date=datetime(2022, 3, 20),\n month=3,\n day=20,\n observance=next_monday,\n )\n\n class TestHolidayCalendar(AbstractHolidayCalendar):\n rules = [\n USMartinLutherKingJr,\n holiday_1,\n holiday_2,\n USLaborDay,\n ]\n\n start = Timestamp("2022-08-01")\n end = Timestamp("2022-08-31")\n year_offset = DateOffset(years=5)\n expected_results = DatetimeIndex([], dtype="datetime64[ns]", freq=None)\n test_cal = TestHolidayCalendar()\n\n date_interval_low = test_cal.holidays(start - year_offset, end - year_offset)\n date_window_edge = test_cal.holidays(start, end)\n date_interval_high = test_cal.holidays(start + year_offset, end + year_offset)\n\n tm.assert_index_equal(date_interval_low, expected_results)\n tm.assert_index_equal(date_window_edge, expected_results)\n tm.assert_index_equal(date_interval_high, expected_results)\n\n\ndef test_holidays_with_timezone_specified_but_no_occurences():\n # GH 54580\n # _apply_rule() in holiday.py was silently dropping timezones if you passed it\n # an empty list of holiday dates that had timezone information\n start_date = Timestamp("2018-01-01", tz="America/Chicago")\n end_date = Timestamp("2018-01-11", tz="America/Chicago")\n test_case = USFederalHolidayCalendar().holidays(\n start_date, end_date, return_name=True\n )\n expected_results = Series("New Year's Day", index=[start_date])\n expected_results.index = expected_results.index.as_unit("ns")\n\n tm.assert_equal(test_case, expected_results)\n | .venv\Lib\site-packages\pandas\tests\tseries\holiday\test_holiday.py | test_holiday.py | Python | 11,173 | 0.95 | 0.051205 | 0.057239 | awesome-app | 487 | 2024-12-28T18:04:22.800976 | GPL-3.0 | true | b1602164ddc4ec30ac09002c54ff7f1b |
from datetime import datetime\n\nimport pytest\n\nfrom pandas.tseries.holiday import (\n after_nearest_workday,\n before_nearest_workday,\n nearest_workday,\n next_monday,\n next_monday_or_tuesday,\n next_workday,\n previous_friday,\n previous_workday,\n sunday_to_monday,\n weekend_to_monday,\n)\n\n_WEDNESDAY = datetime(2014, 4, 9)\n_THURSDAY = datetime(2014, 4, 10)\n_FRIDAY = datetime(2014, 4, 11)\n_SATURDAY = datetime(2014, 4, 12)\n_SUNDAY = datetime(2014, 4, 13)\n_MONDAY = datetime(2014, 4, 14)\n_TUESDAY = datetime(2014, 4, 15)\n_NEXT_WEDNESDAY = datetime(2014, 4, 16)\n\n\n@pytest.mark.parametrize("day", [_SATURDAY, _SUNDAY])\ndef test_next_monday(day):\n assert next_monday(day) == _MONDAY\n\n\n@pytest.mark.parametrize(\n "day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _TUESDAY), (_MONDAY, _TUESDAY)]\n)\ndef test_next_monday_or_tuesday(day, expected):\n assert next_monday_or_tuesday(day) == expected\n\n\n@pytest.mark.parametrize("day", [_SATURDAY, _SUNDAY])\ndef test_previous_friday(day):\n assert previous_friday(day) == _FRIDAY\n\n\ndef test_sunday_to_monday():\n assert sunday_to_monday(_SUNDAY) == _MONDAY\n\n\n@pytest.mark.parametrize(\n "day,expected", [(_SATURDAY, _FRIDAY), (_SUNDAY, _MONDAY), (_MONDAY, _MONDAY)]\n)\ndef test_nearest_workday(day, expected):\n assert nearest_workday(day) == expected\n\n\n@pytest.mark.parametrize(\n "day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _MONDAY), (_MONDAY, _MONDAY)]\n)\ndef test_weekend_to_monday(day, expected):\n assert weekend_to_monday(day) == expected\n\n\n@pytest.mark.parametrize(\n "day,expected",\n [\n (_WEDNESDAY, _THURSDAY),\n (_THURSDAY, _FRIDAY),\n (_SATURDAY, _MONDAY),\n (_SUNDAY, _MONDAY),\n (_MONDAY, _TUESDAY),\n (_TUESDAY, _NEXT_WEDNESDAY), # WED is same week as TUE\n ],\n)\ndef test_next_workday(day, expected):\n assert next_workday(day) == expected\n\n\n@pytest.mark.parametrize(\n "day,expected", [(_SATURDAY, _FRIDAY), (_SUNDAY, _FRIDAY), (_TUESDAY, _MONDAY)]\n)\ndef test_previous_workday(day, expected):\n assert previous_workday(day) == expected\n\n\n@pytest.mark.parametrize(\n "day,expected",\n [\n (_THURSDAY, _WEDNESDAY),\n (_FRIDAY, _THURSDAY),\n (_SATURDAY, _THURSDAY),\n (_SUNDAY, _FRIDAY),\n (_MONDAY, _FRIDAY), # last week Friday\n (_TUESDAY, _MONDAY),\n (_NEXT_WEDNESDAY, _TUESDAY), # WED is same week as TUE\n ],\n)\ndef test_before_nearest_workday(day, expected):\n assert before_nearest_workday(day) == expected\n\n\n@pytest.mark.parametrize(\n "day,expected", [(_SATURDAY, _MONDAY), (_SUNDAY, _TUESDAY), (_FRIDAY, _MONDAY)]\n)\ndef test_after_nearest_workday(day, expected):\n assert after_nearest_workday(day) == expected\n | .venv\Lib\site-packages\pandas\tests\tseries\holiday\test_observance.py | test_observance.py | Python | 2,723 | 0.95 | 0.095238 | 0 | vue-tools | 568 | 2024-03-24T14:14:47.223845 | BSD-3-Clause | true | 8fec819b0ec7baca0bf6beb04bc776f5 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\holiday\__pycache__\test_calendar.cpython-313.pyc | test_calendar.cpython-313.pyc | Other | 5,750 | 0.8 | 0 | 0 | vue-tools | 782 | 2023-12-19T16:23:12.218810 | MIT | true | 529afdd2909fea1dc50f2ca8fda548ca |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\holiday\__pycache__\test_federal.cpython-313.pyc | test_federal.cpython-313.pyc | Other | 3,072 | 0.8 | 0 | 0 | react-lib | 771 | 2024-07-19T10:35:22.053704 | Apache-2.0 | true | 3ffe5839202f8107597a4f8088523c24 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\holiday\__pycache__\test_holiday.cpython-313.pyc | test_holiday.cpython-313.pyc | Other | 12,533 | 0.8 | 0 | 0 | awesome-app | 693 | 2025-06-27T00:42:54.335486 | MIT | true | aa1370fc76031cee48993ef0737d3c24 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\holiday\__pycache__\test_observance.cpython-313.pyc | test_observance.cpython-313.pyc | Other | 4,113 | 0.8 | 0 | 0 | node-utils | 527 | 2024-07-28T00:47:05.080508 | MIT | true | 4b756712b698ae67e02bda936a929e7e |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\holiday\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 203 | 0.7 | 0 | 0 | react-lib | 463 | 2024-10-08T22:15:44.438783 | MIT | true | 099639ada2724ca92e80bcf911454c0f |
"""\nAssertion helpers and base class for offsets tests\n"""\nfrom __future__ import annotations\n\n\ndef assert_offset_equal(offset, base, expected):\n actual = offset + base\n actual_swapped = base + offset\n actual_apply = offset._apply(base)\n try:\n assert actual == expected\n assert actual_swapped == expected\n assert actual_apply == expected\n except AssertionError as err:\n raise AssertionError(\n f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"\n f"\nAt Date: {base}"\n ) from err\n\n\ndef assert_is_on_offset(offset, date, expected):\n actual = offset.is_on_offset(date)\n assert actual == expected, (\n f"\nExpected: {expected}\nActual: {actual}\nFor Offset: {offset})"\n f"\nAt Date: {date}"\n )\n\n\nclass WeekDay:\n MON = 0\n TUE = 1\n WED = 2\n THU = 3\n FRI = 4\n SAT = 5\n SUN = 6\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\common.py | common.py | Python | 900 | 0.85 | 0.162162 | 0 | python-kit | 431 | 2023-10-16T14:12:05.934503 | MIT | true | fa26a8e2967591c52c399376b3b193a7 |
"""\nTests for offsets.BDay\n"""\nfrom __future__ import annotations\n\nfrom datetime import (\n date,\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.offsets import (\n ApplyTypeError,\n BDay,\n BMonthEnd,\n)\n\nfrom pandas import (\n DatetimeIndex,\n Timedelta,\n _testing as tm,\n)\nfrom pandas.tests.tseries.offsets.common import (\n assert_is_on_offset,\n assert_offset_equal,\n)\n\nfrom pandas.tseries import offsets\n\n\n@pytest.fixture\ndef dt():\n return datetime(2008, 1, 1)\n\n\n@pytest.fixture\ndef _offset():\n return BDay\n\n\n@pytest.fixture\ndef offset(_offset):\n return _offset()\n\n\n@pytest.fixture\ndef offset2(_offset):\n return _offset(2)\n\n\nclass TestBusinessDay:\n def test_different_normalize_equals(self, _offset, offset2):\n # GH#21404 changed __eq__ to return False when `normalize` does not match\n offset = _offset()\n offset2 = _offset(normalize=True)\n assert offset != offset2\n\n def test_repr(self, offset, offset2):\n assert repr(offset) == "<BusinessDay>"\n assert repr(offset2) == "<2 * BusinessDays>"\n\n expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"\n assert repr(offset + timedelta(1)) == expected\n\n def test_with_offset(self, dt, offset):\n offset = offset + timedelta(hours=2)\n\n assert (dt + offset) == datetime(2008, 1, 2, 2)\n\n @pytest.mark.parametrize(\n "td",\n [\n Timedelta(hours=2),\n Timedelta(hours=2).to_pytimedelta(),\n Timedelta(hours=2).to_timedelta64(),\n ],\n ids=lambda x: type(x),\n )\n def test_with_offset_index(self, td, dt, offset):\n dti = DatetimeIndex([dt])\n expected = DatetimeIndex([datetime(2008, 1, 2, 2)])\n\n result = dti + (td + offset)\n tm.assert_index_equal(result, expected)\n\n result = dti + (offset + td)\n tm.assert_index_equal(result, expected)\n\n def test_eq(self, offset2):\n assert offset2 == offset2\n\n def test_hash(self, offset2):\n assert hash(offset2) == hash(offset2)\n\n def test_add_datetime(self, dt, offset2):\n assert offset2 + dt == datetime(2008, 1, 3)\n assert offset2 + np.datetime64("2008-01-01 00:00:00") == datetime(2008, 1, 3)\n\n def testRollback1(self, dt, _offset):\n assert _offset(10).rollback(dt) == dt\n\n def testRollback2(self, _offset):\n assert _offset(10).rollback(datetime(2008, 1, 5)) == datetime(2008, 1, 4)\n\n def testRollforward1(self, dt, _offset):\n assert _offset(10).rollforward(dt) == dt\n\n def testRollforward2(self, _offset):\n assert _offset(10).rollforward(datetime(2008, 1, 5)) == datetime(2008, 1, 7)\n\n def test_roll_date_object(self, offset):\n dt = date(2012, 9, 15)\n\n result = offset.rollback(dt)\n assert result == datetime(2012, 9, 14)\n\n result = offset.rollforward(dt)\n assert result == datetime(2012, 9, 17)\n\n offset = offsets.Day()\n result = offset.rollback(dt)\n assert result == datetime(2012, 9, 15)\n\n result = offset.rollforward(dt)\n assert result == datetime(2012, 9, 15)\n\n @pytest.mark.parametrize(\n "dt, expected",\n [\n (datetime(2008, 1, 1), True),\n (datetime(2008, 1, 5), False),\n ],\n )\n def test_is_on_offset(self, offset, dt, expected):\n assert_is_on_offset(offset, dt, expected)\n\n apply_cases: list[tuple[int, dict[datetime, datetime]]] = [\n (\n 1,\n {\n datetime(2008, 1, 1): datetime(2008, 1, 2),\n datetime(2008, 1, 4): datetime(2008, 1, 7),\n datetime(2008, 1, 5): datetime(2008, 1, 7),\n datetime(2008, 1, 6): datetime(2008, 1, 7),\n datetime(2008, 1, 7): datetime(2008, 1, 8),\n },\n ),\n (\n 2,\n {\n datetime(2008, 1, 1): datetime(2008, 1, 3),\n datetime(2008, 1, 4): datetime(2008, 1, 8),\n datetime(2008, 1, 5): datetime(2008, 1, 8),\n datetime(2008, 1, 6): datetime(2008, 1, 8),\n datetime(2008, 1, 7): datetime(2008, 1, 9),\n },\n ),\n (\n -1,\n {\n datetime(2008, 1, 1): datetime(2007, 12, 31),\n datetime(2008, 1, 4): datetime(2008, 1, 3),\n datetime(2008, 1, 5): datetime(2008, 1, 4),\n datetime(2008, 1, 6): datetime(2008, 1, 4),\n datetime(2008, 1, 7): datetime(2008, 1, 4),\n datetime(2008, 1, 8): datetime(2008, 1, 7),\n },\n ),\n (\n -2,\n {\n datetime(2008, 1, 1): datetime(2007, 12, 28),\n datetime(2008, 1, 4): datetime(2008, 1, 2),\n datetime(2008, 1, 5): datetime(2008, 1, 3),\n datetime(2008, 1, 6): datetime(2008, 1, 3),\n datetime(2008, 1, 7): datetime(2008, 1, 3),\n datetime(2008, 1, 8): datetime(2008, 1, 4),\n datetime(2008, 1, 9): datetime(2008, 1, 7),\n },\n ),\n (\n 0,\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 1, 4): datetime(2008, 1, 4),\n datetime(2008, 1, 5): datetime(2008, 1, 7),\n datetime(2008, 1, 6): datetime(2008, 1, 7),\n datetime(2008, 1, 7): datetime(2008, 1, 7),\n },\n ),\n ]\n\n @pytest.mark.parametrize("case", apply_cases)\n def test_apply(self, case, _offset):\n n, cases = case\n offset = _offset(n)\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n def test_apply_large_n(self, _offset):\n dt = datetime(2012, 10, 23)\n\n result = dt + _offset(10)\n assert result == datetime(2012, 11, 6)\n\n result = dt + _offset(100) - _offset(100)\n assert result == dt\n\n off = _offset() * 6\n rs = datetime(2012, 1, 1) - off\n xp = datetime(2011, 12, 23)\n assert rs == xp\n\n st = datetime(2011, 12, 18)\n rs = st + off\n xp = datetime(2011, 12, 26)\n assert rs == xp\n\n off = _offset() * 10\n rs = datetime(2014, 1, 5) + off # see #5890\n xp = datetime(2014, 1, 17)\n assert rs == xp\n\n def test_apply_corner(self, _offset):\n if _offset is BDay:\n msg = "Only know how to combine business day with datetime or timedelta"\n else:\n msg = (\n "Only know how to combine trading day "\n "with datetime, datetime64 or timedelta"\n )\n with pytest.raises(ApplyTypeError, match=msg):\n _offset()._apply(BMonthEnd())\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_business_day.py | test_business_day.py | Python | 6,808 | 0.95 | 0.101695 | 0.005208 | awesome-app | 417 | 2024-07-05T11:15:03.289942 | Apache-2.0 | true | 456bf0482d2f5f95597293b243b361af |
"""\nTests for offsets.BusinessHour\n"""\nfrom __future__ import annotations\n\nfrom datetime import (\n datetime,\n time as dt_time,\n)\n\nimport pytest\n\nfrom pandas._libs.tslibs import (\n Timedelta,\n Timestamp,\n)\nfrom pandas._libs.tslibs.offsets import (\n BDay,\n BusinessHour,\n Nano,\n)\n\nfrom pandas import (\n DatetimeIndex,\n _testing as tm,\n date_range,\n)\nfrom pandas.tests.tseries.offsets.common import assert_offset_equal\n\n\n@pytest.fixture\ndef dt():\n return datetime(2014, 7, 1, 10, 00)\n\n\n@pytest.fixture\ndef _offset():\n return BusinessHour\n\n\n@pytest.fixture\ndef offset1():\n return BusinessHour()\n\n\n@pytest.fixture\ndef offset2():\n return BusinessHour(n=3)\n\n\n@pytest.fixture\ndef offset3():\n return BusinessHour(n=-1)\n\n\n@pytest.fixture\ndef offset4():\n return BusinessHour(n=-4)\n\n\n@pytest.fixture\ndef offset5():\n return BusinessHour(start=dt_time(11, 0), end=dt_time(14, 30))\n\n\n@pytest.fixture\ndef offset6():\n return BusinessHour(start="20:00", end="05:00")\n\n\n@pytest.fixture\ndef offset7():\n return BusinessHour(n=-2, start=dt_time(21, 30), end=dt_time(6, 30))\n\n\n@pytest.fixture\ndef offset8():\n return BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"])\n\n\n@pytest.fixture\ndef offset9():\n return BusinessHour(n=3, start=["09:00", "22:00"], end=["13:00", "03:00"])\n\n\n@pytest.fixture\ndef offset10():\n return BusinessHour(n=-1, start=["23:00", "13:00"], end=["02:00", "17:00"])\n\n\nclass TestBusinessHour:\n @pytest.mark.parametrize(\n "start,end,match",\n [\n (\n dt_time(11, 0, 5),\n "17:00",\n "time data must be specified only with hour and minute",\n ),\n ("AAA", "17:00", "time data must match '%H:%M' format"),\n ("14:00:05", "17:00", "time data must match '%H:%M' format"),\n ([], "17:00", "Must include at least 1 start time"),\n ("09:00", [], "Must include at least 1 end time"),\n (\n ["09:00", "11:00"],\n "17:00",\n "number of starting time and ending time must be the same",\n ),\n (\n ["09:00", "11:00"],\n ["10:00"],\n "number of starting time and ending time must be the same",\n ),\n (\n ["09:00", "11:00"],\n ["12:00", "20:00"],\n r"invalid starting and ending time\(s\): opening hours should not "\n "touch or overlap with one another",\n ),\n (\n ["12:00", "20:00"],\n ["09:00", "11:00"],\n r"invalid starting and ending time\(s\): opening hours should not "\n "touch or overlap with one another",\n ),\n ],\n )\n def test_constructor_errors(self, start, end, match):\n with pytest.raises(ValueError, match=match):\n BusinessHour(start=start, end=end)\n\n def test_different_normalize_equals(self, _offset):\n # GH#21404 changed __eq__ to return False when `normalize` does not match\n offset = _offset()\n offset2 = _offset(normalize=True)\n assert offset != offset2\n\n def test_repr(\n self,\n offset1,\n offset2,\n offset3,\n offset4,\n offset5,\n offset6,\n offset7,\n offset8,\n offset9,\n offset10,\n ):\n assert repr(offset1) == "<BusinessHour: bh=09:00-17:00>"\n assert repr(offset2) == "<3 * BusinessHours: bh=09:00-17:00>"\n assert repr(offset3) == "<-1 * BusinessHour: bh=09:00-17:00>"\n assert repr(offset4) == "<-4 * BusinessHours: bh=09:00-17:00>"\n\n assert repr(offset5) == "<BusinessHour: bh=11:00-14:30>"\n assert repr(offset6) == "<BusinessHour: bh=20:00-05:00>"\n assert repr(offset7) == "<-2 * BusinessHours: bh=21:30-06:30>"\n assert repr(offset8) == "<BusinessHour: bh=09:00-12:00,13:00-17:00>"\n assert repr(offset9) == "<3 * BusinessHours: bh=09:00-13:00,22:00-03:00>"\n assert repr(offset10) == "<-1 * BusinessHour: bh=13:00-17:00,23:00-02:00>"\n\n def test_with_offset(self, dt):\n expected = Timestamp("2014-07-01 13:00")\n\n assert dt + BusinessHour() * 3 == expected\n assert dt + BusinessHour(n=3) == expected\n\n @pytest.mark.parametrize(\n "offset_name",\n ["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],\n )\n def test_eq_attribute(self, offset_name, request):\n offset = request.getfixturevalue(offset_name)\n assert offset == offset\n\n @pytest.mark.parametrize(\n "offset1,offset2",\n [\n (BusinessHour(start="09:00"), BusinessHour()),\n (\n BusinessHour(start=["23:00", "13:00"], end=["12:00", "17:00"]),\n BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),\n ),\n ],\n )\n def test_eq(self, offset1, offset2):\n assert offset1 == offset2\n\n @pytest.mark.parametrize(\n "offset1,offset2",\n [\n (BusinessHour(), BusinessHour(-1)),\n (BusinessHour(start="09:00"), BusinessHour(start="09:01")),\n (\n BusinessHour(start="09:00", end="17:00"),\n BusinessHour(start="17:00", end="09:01"),\n ),\n (\n BusinessHour(start=["13:00", "23:00"], end=["18:00", "07:00"]),\n BusinessHour(start=["13:00", "23:00"], end=["17:00", "12:00"]),\n ),\n ],\n )\n def test_neq(self, offset1, offset2):\n assert offset1 != offset2\n\n @pytest.mark.parametrize(\n "offset_name",\n ["offset1", "offset2", "offset3", "offset4", "offset8", "offset9", "offset10"],\n )\n def test_hash(self, offset_name, request):\n offset = request.getfixturevalue(offset_name)\n assert offset == offset\n\n def test_add_datetime(\n self,\n dt,\n offset1,\n offset2,\n offset3,\n offset4,\n offset8,\n offset9,\n offset10,\n ):\n assert offset1 + dt == datetime(2014, 7, 1, 11)\n assert offset2 + dt == datetime(2014, 7, 1, 13)\n assert offset3 + dt == datetime(2014, 6, 30, 17)\n assert offset4 + dt == datetime(2014, 6, 30, 14)\n assert offset8 + dt == datetime(2014, 7, 1, 11)\n assert offset9 + dt == datetime(2014, 7, 1, 22)\n assert offset10 + dt == datetime(2014, 7, 1, 1)\n\n def test_sub(self, dt, offset2, _offset):\n off = offset2\n msg = "Cannot subtract datetime from offset"\n with pytest.raises(TypeError, match=msg):\n off - dt\n assert 2 * off - off == off\n\n assert dt - offset2 == dt + _offset(-3)\n\n def test_multiply_by_zero(self, dt, offset1, offset2):\n assert dt - 0 * offset1 == dt\n assert dt + 0 * offset1 == dt\n assert dt - 0 * offset2 == dt\n assert dt + 0 * offset2 == dt\n\n def testRollback1(\n self,\n dt,\n _offset,\n offset1,\n offset2,\n offset3,\n offset4,\n offset5,\n offset6,\n offset7,\n offset8,\n offset9,\n offset10,\n ):\n assert offset1.rollback(dt) == dt\n assert offset2.rollback(dt) == dt\n assert offset3.rollback(dt) == dt\n assert offset4.rollback(dt) == dt\n assert offset5.rollback(dt) == datetime(2014, 6, 30, 14, 30)\n assert offset6.rollback(dt) == datetime(2014, 7, 1, 5, 0)\n assert offset7.rollback(dt) == datetime(2014, 7, 1, 6, 30)\n assert offset8.rollback(dt) == dt\n assert offset9.rollback(dt) == dt\n assert offset10.rollback(dt) == datetime(2014, 7, 1, 2)\n\n datet = datetime(2014, 7, 1, 0)\n assert offset1.rollback(datet) == datetime(2014, 6, 30, 17)\n assert offset2.rollback(datet) == datetime(2014, 6, 30, 17)\n assert offset3.rollback(datet) == datetime(2014, 6, 30, 17)\n assert offset4.rollback(datet) == datetime(2014, 6, 30, 17)\n assert offset5.rollback(datet) == datetime(2014, 6, 30, 14, 30)\n assert offset6.rollback(datet) == datet\n assert offset7.rollback(datet) == datet\n assert offset8.rollback(datet) == datetime(2014, 6, 30, 17)\n assert offset9.rollback(datet) == datet\n assert offset10.rollback(datet) == datet\n\n assert _offset(5).rollback(dt) == dt\n\n def testRollback2(self, _offset):\n assert _offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(\n 2014, 7, 4, 17, 0\n )\n\n def testRollforward1(\n self,\n dt,\n _offset,\n offset1,\n offset2,\n offset3,\n offset4,\n offset5,\n offset6,\n offset7,\n offset8,\n offset9,\n offset10,\n ):\n assert offset1.rollforward(dt) == dt\n assert offset2.rollforward(dt) == dt\n assert offset3.rollforward(dt) == dt\n assert offset4.rollforward(dt) == dt\n assert offset5.rollforward(dt) == datetime(2014, 7, 1, 11, 0)\n assert offset6.rollforward(dt) == datetime(2014, 7, 1, 20, 0)\n assert offset7.rollforward(dt) == datetime(2014, 7, 1, 21, 30)\n assert offset8.rollforward(dt) == dt\n assert offset9.rollforward(dt) == dt\n assert offset10.rollforward(dt) == datetime(2014, 7, 1, 13)\n\n datet = datetime(2014, 7, 1, 0)\n assert offset1.rollforward(datet) == datetime(2014, 7, 1, 9)\n assert offset2.rollforward(datet) == datetime(2014, 7, 1, 9)\n assert offset3.rollforward(datet) == datetime(2014, 7, 1, 9)\n assert offset4.rollforward(datet) == datetime(2014, 7, 1, 9)\n assert offset5.rollforward(datet) == datetime(2014, 7, 1, 11)\n assert offset6.rollforward(datet) == datet\n assert offset7.rollforward(datet) == datet\n assert offset8.rollforward(datet) == datetime(2014, 7, 1, 9)\n assert offset9.rollforward(datet) == datet\n assert offset10.rollforward(datet) == datet\n\n assert _offset(5).rollforward(dt) == dt\n\n def testRollforward2(self, _offset):\n assert _offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(\n 2014, 7, 7, 9\n )\n\n def test_roll_date_object(self):\n offset = BusinessHour()\n\n dt = datetime(2014, 7, 6, 15, 0)\n\n result = offset.rollback(dt)\n assert result == datetime(2014, 7, 4, 17)\n\n result = offset.rollforward(dt)\n assert result == datetime(2014, 7, 7, 9)\n\n normalize_cases = []\n normalize_cases.append(\n (\n BusinessHour(normalize=True),\n {\n datetime(2014, 7, 1, 8): datetime(2014, 7, 1),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 2),\n datetime(2014, 7, 1, 23): datetime(2014, 7, 2),\n datetime(2014, 7, 1, 0): datetime(2014, 7, 1),\n datetime(2014, 7, 4, 15): datetime(2014, 7, 4),\n datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),\n datetime(2014, 7, 5, 23): datetime(2014, 7, 7),\n datetime(2014, 7, 6, 10): datetime(2014, 7, 7),\n },\n )\n )\n\n normalize_cases.append(\n (\n BusinessHour(-1, normalize=True),\n {\n datetime(2014, 7, 1, 8): datetime(2014, 6, 30),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1),\n datetime(2014, 7, 1, 10): datetime(2014, 6, 30),\n datetime(2014, 7, 1, 0): datetime(2014, 6, 30),\n datetime(2014, 7, 7, 10): datetime(2014, 7, 4),\n datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),\n datetime(2014, 7, 5, 23): datetime(2014, 7, 4),\n datetime(2014, 7, 6, 10): datetime(2014, 7, 4),\n },\n )\n )\n\n normalize_cases.append(\n (\n BusinessHour(1, normalize=True, start="17:00", end="04:00"),\n {\n datetime(2014, 7, 1, 8): datetime(2014, 7, 1),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1),\n datetime(2014, 7, 1, 23): datetime(2014, 7, 2),\n datetime(2014, 7, 2, 2): datetime(2014, 7, 2),\n datetime(2014, 7, 2, 3): datetime(2014, 7, 2),\n datetime(2014, 7, 4, 23): datetime(2014, 7, 5),\n datetime(2014, 7, 5, 2): datetime(2014, 7, 5),\n datetime(2014, 7, 7, 2): datetime(2014, 7, 7),\n datetime(2014, 7, 7, 17): datetime(2014, 7, 7),\n },\n )\n )\n\n @pytest.mark.parametrize("case", normalize_cases)\n def test_normalize(self, case):\n offset, cases = case\n for dt, expected in cases.items():\n assert offset._apply(dt) == expected\n\n on_offset_cases = []\n on_offset_cases.append(\n (\n BusinessHour(),\n {\n datetime(2014, 7, 1, 9): True,\n datetime(2014, 7, 1, 8, 59): False,\n datetime(2014, 7, 1, 8): False,\n datetime(2014, 7, 1, 17): True,\n datetime(2014, 7, 1, 17, 1): False,\n datetime(2014, 7, 1, 18): False,\n datetime(2014, 7, 5, 9): False,\n datetime(2014, 7, 6, 12): False,\n },\n )\n )\n\n on_offset_cases.append(\n (\n BusinessHour(start="10:00", end="15:00"),\n {\n datetime(2014, 7, 1, 9): False,\n datetime(2014, 7, 1, 10): True,\n datetime(2014, 7, 1, 15): True,\n datetime(2014, 7, 1, 15, 1): False,\n datetime(2014, 7, 5, 12): False,\n datetime(2014, 7, 6, 12): False,\n },\n )\n )\n\n on_offset_cases.append(\n (\n BusinessHour(start="19:00", end="05:00"),\n {\n datetime(2014, 7, 1, 9, 0): False,\n datetime(2014, 7, 1, 10, 0): False,\n datetime(2014, 7, 1, 15): False,\n datetime(2014, 7, 1, 15, 1): False,\n datetime(2014, 7, 5, 12, 0): False,\n datetime(2014, 7, 6, 12, 0): False,\n datetime(2014, 7, 1, 19, 0): True,\n datetime(2014, 7, 2, 0, 0): True,\n datetime(2014, 7, 4, 23): True,\n datetime(2014, 7, 5, 1): True,\n datetime(2014, 7, 5, 5, 0): True,\n datetime(2014, 7, 6, 23, 0): False,\n datetime(2014, 7, 7, 3, 0): False,\n },\n )\n )\n\n on_offset_cases.append(\n (\n BusinessHour(start=["09:00", "13:00"], end=["12:00", "17:00"]),\n {\n datetime(2014, 7, 1, 9): True,\n datetime(2014, 7, 1, 8, 59): False,\n datetime(2014, 7, 1, 8): False,\n datetime(2014, 7, 1, 17): True,\n datetime(2014, 7, 1, 17, 1): False,\n datetime(2014, 7, 1, 18): False,\n datetime(2014, 7, 5, 9): False,\n datetime(2014, 7, 6, 12): False,\n datetime(2014, 7, 1, 12, 30): False,\n },\n )\n )\n\n on_offset_cases.append(\n (\n BusinessHour(start=["19:00", "23:00"], end=["21:00", "05:00"]),\n {\n datetime(2014, 7, 1, 9, 0): False,\n datetime(2014, 7, 1, 10, 0): False,\n datetime(2014, 7, 1, 15): False,\n datetime(2014, 7, 1, 15, 1): False,\n datetime(2014, 7, 5, 12, 0): False,\n datetime(2014, 7, 6, 12, 0): False,\n datetime(2014, 7, 1, 19, 0): True,\n datetime(2014, 7, 2, 0, 0): True,\n datetime(2014, 7, 4, 23): True,\n datetime(2014, 7, 5, 1): True,\n datetime(2014, 7, 5, 5, 0): True,\n datetime(2014, 7, 6, 23, 0): False,\n datetime(2014, 7, 7, 3, 0): False,\n datetime(2014, 7, 4, 22): False,\n },\n )\n )\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, cases = case\n for dt, expected in cases.items():\n assert offset.is_on_offset(dt) == expected\n\n apply_cases = [\n (\n BusinessHour(),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),\n datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),\n datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),\n datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 9),\n datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 2, 9, 30, 15),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 10),\n datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 12),\n # out of business hours\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),\n # saturday\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),\n datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),\n },\n ),\n (\n BusinessHour(4),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),\n datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),\n datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 11),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 12),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 13),\n datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 15),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 13),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),\n datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),\n },\n ),\n (\n BusinessHour(-1),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 10),\n datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 12),\n datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 14),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 15),\n datetime(2014, 7, 1, 10): datetime(2014, 6, 30, 17),\n datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 15, 30, 15),\n datetime(2014, 7, 1, 9, 30, 15): datetime(2014, 6, 30, 16, 30, 15),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 16),\n datetime(2014, 7, 1, 5): datetime(2014, 6, 30, 16),\n datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 10),\n # out of business hours\n datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 16),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 16),\n datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 16),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 16),\n # saturday\n datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 16),\n datetime(2014, 7, 7, 9): datetime(2014, 7, 4, 16),\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 16, 30),\n datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 16, 30, 30),\n },\n ),\n (\n BusinessHour(-4),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 15),\n datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),\n datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 11),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 12),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),\n datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),\n datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 13),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),\n datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 13),\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 13, 30),\n datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 13, 30, 30),\n },\n ),\n (\n BusinessHour(start="13:00", end="16:00"),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),\n datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),\n datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 13),\n datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 14),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 14),\n datetime(2014, 7, 1, 15, 30, 15): datetime(2014, 7, 2, 13, 30, 15),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 14),\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),\n },\n ),\n (\n BusinessHour(n=2, start="13:00", end="16:00"),\n {\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 15),\n datetime(2014, 7, 2, 14): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),\n datetime(2014, 7, 2, 14, 30): datetime(2014, 7, 3, 13, 30),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 15),\n datetime(2014, 7, 4, 14, 30): datetime(2014, 7, 7, 13, 30),\n datetime(2014, 7, 4, 14, 30, 30): datetime(2014, 7, 7, 13, 30, 30),\n },\n ),\n (\n BusinessHour(n=-1, start="13:00", end="16:00"),\n {\n datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 15),\n datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 15),\n datetime(2014, 7, 2, 14): datetime(2014, 7, 1, 16),\n datetime(2014, 7, 2, 15): datetime(2014, 7, 2, 14),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 15),\n datetime(2014, 7, 2, 16): datetime(2014, 7, 2, 15),\n datetime(2014, 7, 2, 13, 30, 15): datetime(2014, 7, 1, 15, 30, 15),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 15),\n datetime(2014, 7, 7, 11): datetime(2014, 7, 4, 15),\n },\n ),\n (\n BusinessHour(n=-3, start="10:00", end="16:00"),\n {\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 13),\n datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 11),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 13),\n datetime(2014, 7, 2, 13): datetime(2014, 7, 1, 16),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 13),\n datetime(2014, 7, 2, 11, 30): datetime(2014, 7, 1, 14, 30),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 13),\n datetime(2014, 7, 4, 10): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 13),\n datetime(2014, 7, 4, 16): datetime(2014, 7, 4, 13),\n datetime(2014, 7, 4, 12, 30): datetime(2014, 7, 3, 15, 30),\n datetime(2014, 7, 4, 12, 30, 30): datetime(2014, 7, 3, 15, 30, 30),\n },\n ),\n (\n BusinessHour(start="19:00", end="05:00"),\n {\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 20),\n datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 20),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 20),\n datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 20),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 20),\n datetime(2014, 7, 2, 4, 30): datetime(2014, 7, 2, 19, 30),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 1),\n datetime(2014, 7, 4, 10): datetime(2014, 7, 4, 20),\n datetime(2014, 7, 4, 23): datetime(2014, 7, 5, 0),\n datetime(2014, 7, 5, 0): datetime(2014, 7, 5, 1),\n datetime(2014, 7, 5, 4): datetime(2014, 7, 7, 19),\n datetime(2014, 7, 5, 4, 30): datetime(2014, 7, 7, 19, 30),\n datetime(2014, 7, 5, 4, 30, 30): datetime(2014, 7, 7, 19, 30, 30),\n },\n ),\n (\n BusinessHour(n=-1, start="19:00", end="05:00"),\n {\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),\n datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),\n datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),\n datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),\n datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),\n datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),\n datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),\n datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),\n datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 3),\n datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 5, 4, 30),\n datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 5, 4, 30, 30),\n },\n ),\n (\n BusinessHour(n=4, start="00:00", end="23:00"),\n {\n datetime(2014, 7, 3, 22): datetime(2014, 7, 4, 3),\n datetime(2014, 7, 4, 22): datetime(2014, 7, 7, 3),\n datetime(2014, 7, 3, 22, 30): datetime(2014, 7, 4, 3, 30),\n datetime(2014, 7, 3, 22, 20): datetime(2014, 7, 4, 3, 20),\n datetime(2014, 7, 4, 22, 30, 30): datetime(2014, 7, 7, 3, 30, 30),\n datetime(2014, 7, 4, 22, 30, 20): datetime(2014, 7, 7, 3, 30, 20),\n },\n ),\n (\n BusinessHour(n=-4, start="00:00", end="23:00"),\n {\n datetime(2014, 7, 4, 3): datetime(2014, 7, 3, 22),\n datetime(2014, 7, 7, 3): datetime(2014, 7, 4, 22),\n datetime(2014, 7, 4, 3, 30): datetime(2014, 7, 3, 22, 30),\n datetime(2014, 7, 4, 3, 20): datetime(2014, 7, 3, 22, 20),\n datetime(2014, 7, 7, 3, 30, 30): datetime(2014, 7, 4, 22, 30, 30),\n datetime(2014, 7, 7, 3, 30, 20): datetime(2014, 7, 4, 22, 30, 20),\n },\n ),\n (\n BusinessHour(start=["09:00", "14:00"], end=["12:00", "18:00"]),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 14),\n datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),\n datetime(2014, 7, 1, 19): datetime(2014, 7, 2, 10),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 17),\n datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 1, 17, 30, 15),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 9),\n datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 14),\n # out of business hours\n datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 15),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 10),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),\n # saturday\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 9),\n datetime(2014, 7, 4, 17, 30): datetime(2014, 7, 7, 9, 30),\n datetime(2014, 7, 4, 17, 30, 30): datetime(2014, 7, 7, 9, 30, 30),\n },\n ),\n (\n BusinessHour(n=4, start=["09:00", "14:00"], end=["12:00", "18:00"]),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 17),\n datetime(2014, 7, 1, 13): datetime(2014, 7, 2, 9),\n datetime(2014, 7, 1, 15): datetime(2014, 7, 2, 10),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 2, 11),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 2, 14),\n datetime(2014, 7, 2, 11): datetime(2014, 7, 2, 17),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 15),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 15),\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 15),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 15),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 15),\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 14),\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 11, 30),\n datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 11, 30, 30),\n },\n ),\n (\n BusinessHour(n=-4, start=["09:00", "14:00"], end=["12:00", "18:00"]),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 6, 30, 16),\n datetime(2014, 7, 1, 13): datetime(2014, 6, 30, 17),\n datetime(2014, 7, 1, 15): datetime(2014, 6, 30, 18),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1, 10),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 11),\n datetime(2014, 7, 2, 11): datetime(2014, 7, 1, 16),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 1, 12),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 12),\n datetime(2014, 7, 2, 23): datetime(2014, 7, 2, 12),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 12),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 4, 12),\n datetime(2014, 7, 4, 18): datetime(2014, 7, 4, 12),\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 4, 14, 30),\n datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 4, 14, 30, 30),\n },\n ),\n (\n BusinessHour(n=-1, start=["19:00", "03:00"], end=["01:00", "05:00"]),\n {\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1, 4),\n datetime(2014, 7, 2, 14): datetime(2014, 7, 2, 4),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 2, 4),\n datetime(2014, 7, 2, 13): datetime(2014, 7, 2, 4),\n datetime(2014, 7, 2, 20): datetime(2014, 7, 2, 5),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 2, 4),\n datetime(2014, 7, 2, 4): datetime(2014, 7, 2, 1),\n datetime(2014, 7, 2, 19, 30): datetime(2014, 7, 2, 4, 30),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 2, 23),\n datetime(2014, 7, 3, 6): datetime(2014, 7, 3, 4),\n datetime(2014, 7, 4, 23): datetime(2014, 7, 4, 22),\n datetime(2014, 7, 5, 0): datetime(2014, 7, 4, 23),\n datetime(2014, 7, 5, 4): datetime(2014, 7, 5, 0),\n datetime(2014, 7, 7, 3, 30): datetime(2014, 7, 5, 0, 30),\n datetime(2014, 7, 7, 19, 30): datetime(2014, 7, 7, 4, 30),\n datetime(2014, 7, 7, 19, 30, 30): datetime(2014, 7, 7, 4, 30, 30),\n },\n ),\n ]\n\n # long business hours (see gh-26381)\n\n # multiple business hours\n\n @pytest.mark.parametrize("case", apply_cases)\n def test_apply(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n apply_large_n_cases = [\n (\n # A week later\n BusinessHour(40),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 8, 11),\n datetime(2014, 7, 1, 13): datetime(2014, 7, 8, 13),\n datetime(2014, 7, 1, 15): datetime(2014, 7, 8, 15),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 8, 16),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 9, 9),\n datetime(2014, 7, 2, 11): datetime(2014, 7, 9, 11),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 9, 9),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 10, 9),\n datetime(2014, 7, 2, 23): datetime(2014, 7, 10, 9),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 10, 9),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 9),\n datetime(2014, 7, 4, 18): datetime(2014, 7, 14, 9),\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 14, 9, 30),\n datetime(2014, 7, 7, 9, 30, 30): datetime(2014, 7, 14, 9, 30, 30),\n },\n ),\n (\n # 3 days and 1 hour before\n BusinessHour(-25),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),\n datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 12),\n datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 16),\n datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 17),\n datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),\n datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 16),\n datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 16),\n datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 16),\n datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 16),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 16),\n datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 16),\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 16, 30),\n datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),\n },\n ),\n (\n # 5 days and 3 hours later\n BusinessHour(28, start="21:00", end="02:00"),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),\n datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 1),\n datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),\n datetime(2014, 7, 2, 2): datetime(2014, 7, 10, 0),\n datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),\n datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),\n datetime(2014, 7, 4, 2): datetime(2014, 7, 12, 0),\n datetime(2014, 7, 4, 3): datetime(2014, 7, 12, 0),\n datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 15, 0),\n datetime(2014, 7, 6, 18): datetime(2014, 7, 15, 0),\n datetime(2014, 7, 7, 1): datetime(2014, 7, 15, 0),\n datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),\n },\n ),\n (\n # large n for multiple opening hours (3 days and 1 hour before)\n BusinessHour(n=-25, start=["09:00", "14:00"], end=["12:00", "19:00"]),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 6, 26, 10),\n datetime(2014, 7, 1, 13): datetime(2014, 6, 26, 11),\n datetime(2014, 7, 1, 9): datetime(2014, 6, 25, 18),\n datetime(2014, 7, 1, 10): datetime(2014, 6, 25, 19),\n datetime(2014, 7, 3, 11): datetime(2014, 6, 30, 10),\n datetime(2014, 7, 3, 8): datetime(2014, 6, 27, 18),\n datetime(2014, 7, 3, 19): datetime(2014, 6, 30, 18),\n datetime(2014, 7, 3, 23): datetime(2014, 6, 30, 18),\n datetime(2014, 7, 4, 9): datetime(2014, 6, 30, 18),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 1, 18),\n datetime(2014, 7, 6, 18): datetime(2014, 7, 1, 18),\n datetime(2014, 7, 7, 9, 30): datetime(2014, 7, 1, 18, 30),\n datetime(2014, 7, 7, 10, 30, 30): datetime(2014, 7, 2, 9, 30, 30),\n },\n ),\n (\n # 5 days and 3 hours later\n BusinessHour(28, start=["21:00", "03:00"], end=["01:00", "04:00"]),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 9, 0),\n datetime(2014, 7, 1, 22): datetime(2014, 7, 9, 3),\n datetime(2014, 7, 1, 23): datetime(2014, 7, 9, 21),\n datetime(2014, 7, 2, 2): datetime(2014, 7, 9, 23),\n datetime(2014, 7, 3, 21): datetime(2014, 7, 11, 0),\n datetime(2014, 7, 4, 1): datetime(2014, 7, 11, 23),\n datetime(2014, 7, 4, 2): datetime(2014, 7, 11, 23),\n datetime(2014, 7, 4, 3): datetime(2014, 7, 11, 23),\n datetime(2014, 7, 4, 21): datetime(2014, 7, 12, 0),\n datetime(2014, 7, 5, 0): datetime(2014, 7, 14, 22),\n datetime(2014, 7, 5, 1): datetime(2014, 7, 14, 23),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 14, 23),\n datetime(2014, 7, 6, 18): datetime(2014, 7, 14, 23),\n datetime(2014, 7, 7, 1): datetime(2014, 7, 14, 23),\n datetime(2014, 7, 7, 23, 30): datetime(2014, 7, 15, 21, 30),\n },\n ),\n ]\n\n @pytest.mark.parametrize("case", apply_large_n_cases)\n def test_apply_large_n(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n def test_apply_nanoseconds(self):\n tests = [\n (\n BusinessHour(),\n {\n Timestamp("2014-07-04 15:00")\n + Nano(5): Timestamp("2014-07-04 16:00")\n + Nano(5),\n Timestamp("2014-07-04 16:00")\n + Nano(5): Timestamp("2014-07-07 09:00")\n + Nano(5),\n Timestamp("2014-07-04 16:00")\n - Nano(5): Timestamp("2014-07-04 17:00")\n - Nano(5),\n },\n ),\n (\n BusinessHour(-1),\n {\n Timestamp("2014-07-04 15:00")\n + Nano(5): Timestamp("2014-07-04 14:00")\n + Nano(5),\n Timestamp("2014-07-04 10:00")\n + Nano(5): Timestamp("2014-07-04 09:00")\n + Nano(5),\n Timestamp("2014-07-04 10:00")\n - Nano(5): Timestamp("2014-07-03 17:00")\n - Nano(5),\n },\n ),\n ]\n\n for offset, cases in tests:\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n @pytest.mark.parametrize("td_unit", ["s", "ms", "us", "ns"])\n @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])\n def test_bday_ignores_timedeltas(self, unit, td_unit):\n # GH#55608\n idx = date_range("2010/02/01", "2010/02/10", freq="12h", unit=unit)\n td = Timedelta(3, unit="h").as_unit(td_unit)\n off = BDay(offset=td)\n t1 = idx + off\n\n exp_unit = tm.get_finest_unit(td.unit, idx.unit)\n\n expected = DatetimeIndex(\n [\n "2010-02-02 03:00:00",\n "2010-02-02 15:00:00",\n "2010-02-03 03:00:00",\n "2010-02-03 15:00:00",\n "2010-02-04 03:00:00",\n "2010-02-04 15:00:00",\n "2010-02-05 03:00:00",\n "2010-02-05 15:00:00",\n "2010-02-08 03:00:00",\n "2010-02-08 15:00:00",\n "2010-02-08 03:00:00",\n "2010-02-08 15:00:00",\n "2010-02-08 03:00:00",\n "2010-02-08 15:00:00",\n "2010-02-09 03:00:00",\n "2010-02-09 15:00:00",\n "2010-02-10 03:00:00",\n "2010-02-10 15:00:00",\n "2010-02-11 03:00:00",\n ],\n freq=None,\n ).as_unit(exp_unit)\n tm.assert_index_equal(t1, expected)\n\n # TODO(GH#55564): as_unit will be unnecessary\n pointwise = DatetimeIndex([x + off for x in idx]).as_unit(exp_unit)\n tm.assert_index_equal(pointwise, expected)\n\n def test_add_bday_offset_nanos(self):\n # GH#55608\n idx = date_range("2010/02/01", "2010/02/10", freq="12h", unit="ns")\n off = BDay(offset=Timedelta(3, unit="ns"))\n\n result = idx + off\n expected = DatetimeIndex([x + off for x in idx])\n tm.assert_index_equal(result, expected)\n\n\nclass TestOpeningTimes:\n # opening time should be affected by sign of n, not by n's value and end\n opening_time_cases = [\n (\n [\n BusinessHour(),\n BusinessHour(n=2),\n BusinessHour(n=4),\n BusinessHour(end="10:00"),\n BusinessHour(n=2, end="4:00"),\n BusinessHour(n=4, end="15:00"),\n ],\n {\n datetime(2014, 7, 1, 11): (\n datetime(2014, 7, 2, 9),\n datetime(2014, 7, 1, 9),\n ),\n datetime(2014, 7, 1, 18): (\n datetime(2014, 7, 2, 9),\n datetime(2014, 7, 1, 9),\n ),\n datetime(2014, 7, 1, 23): (\n datetime(2014, 7, 2, 9),\n datetime(2014, 7, 1, 9),\n ),\n datetime(2014, 7, 2, 8): (\n datetime(2014, 7, 2, 9),\n datetime(2014, 7, 1, 9),\n ),\n # if timestamp is on opening time, next opening time is\n # as it is\n datetime(2014, 7, 2, 9): (\n datetime(2014, 7, 2, 9),\n datetime(2014, 7, 2, 9),\n ),\n datetime(2014, 7, 2, 10): (\n datetime(2014, 7, 3, 9),\n datetime(2014, 7, 2, 9),\n ),\n # 2014-07-05 is saturday\n datetime(2014, 7, 5, 10): (\n datetime(2014, 7, 7, 9),\n datetime(2014, 7, 4, 9),\n ),\n datetime(2014, 7, 4, 10): (\n datetime(2014, 7, 7, 9),\n datetime(2014, 7, 4, 9),\n ),\n datetime(2014, 7, 4, 23): (\n datetime(2014, 7, 7, 9),\n datetime(2014, 7, 4, 9),\n ),\n datetime(2014, 7, 6, 10): (\n datetime(2014, 7, 7, 9),\n datetime(2014, 7, 4, 9),\n ),\n datetime(2014, 7, 7, 5): (\n datetime(2014, 7, 7, 9),\n datetime(2014, 7, 4, 9),\n ),\n datetime(2014, 7, 7, 9, 1): (\n datetime(2014, 7, 8, 9),\n datetime(2014, 7, 7, 9),\n ),\n },\n ),\n (\n [\n BusinessHour(start="11:15"),\n BusinessHour(n=2, start="11:15"),\n BusinessHour(n=3, start="11:15"),\n BusinessHour(start="11:15", end="10:00"),\n BusinessHour(n=2, start="11:15", end="4:00"),\n BusinessHour(n=3, start="11:15", end="15:00"),\n ],\n {\n datetime(2014, 7, 1, 11): (\n datetime(2014, 7, 1, 11, 15),\n datetime(2014, 6, 30, 11, 15),\n ),\n datetime(2014, 7, 1, 18): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 1, 11, 15),\n ),\n datetime(2014, 7, 1, 23): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 1, 11, 15),\n ),\n datetime(2014, 7, 2, 8): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 1, 11, 15),\n ),\n datetime(2014, 7, 2, 9): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 1, 11, 15),\n ),\n datetime(2014, 7, 2, 10): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 1, 11, 15),\n ),\n datetime(2014, 7, 2, 11, 15): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 2, 11, 15),\n ),\n datetime(2014, 7, 2, 11, 15, 1): (\n datetime(2014, 7, 3, 11, 15),\n datetime(2014, 7, 2, 11, 15),\n ),\n datetime(2014, 7, 5, 10): (\n datetime(2014, 7, 7, 11, 15),\n datetime(2014, 7, 4, 11, 15),\n ),\n datetime(2014, 7, 4, 10): (\n datetime(2014, 7, 4, 11, 15),\n datetime(2014, 7, 3, 11, 15),\n ),\n datetime(2014, 7, 4, 23): (\n datetime(2014, 7, 7, 11, 15),\n datetime(2014, 7, 4, 11, 15),\n ),\n datetime(2014, 7, 6, 10): (\n datetime(2014, 7, 7, 11, 15),\n datetime(2014, 7, 4, 11, 15),\n ),\n datetime(2014, 7, 7, 5): (\n datetime(2014, 7, 7, 11, 15),\n datetime(2014, 7, 4, 11, 15),\n ),\n datetime(2014, 7, 7, 9, 1): (\n datetime(2014, 7, 7, 11, 15),\n datetime(2014, 7, 4, 11, 15),\n ),\n },\n ),\n (\n [\n BusinessHour(-1),\n BusinessHour(n=-2),\n BusinessHour(n=-4),\n BusinessHour(n=-1, end="10:00"),\n BusinessHour(n=-2, end="4:00"),\n BusinessHour(n=-4, end="15:00"),\n ],\n {\n datetime(2014, 7, 1, 11): (\n datetime(2014, 7, 1, 9),\n datetime(2014, 7, 2, 9),\n ),\n datetime(2014, 7, 1, 18): (\n datetime(2014, 7, 1, 9),\n datetime(2014, 7, 2, 9),\n ),\n datetime(2014, 7, 1, 23): (\n datetime(2014, 7, 1, 9),\n datetime(2014, 7, 2, 9),\n ),\n datetime(2014, 7, 2, 8): (\n datetime(2014, 7, 1, 9),\n datetime(2014, 7, 2, 9),\n ),\n datetime(2014, 7, 2, 9): (\n datetime(2014, 7, 2, 9),\n datetime(2014, 7, 2, 9),\n ),\n datetime(2014, 7, 2, 10): (\n datetime(2014, 7, 2, 9),\n datetime(2014, 7, 3, 9),\n ),\n datetime(2014, 7, 5, 10): (\n datetime(2014, 7, 4, 9),\n datetime(2014, 7, 7, 9),\n ),\n datetime(2014, 7, 4, 10): (\n datetime(2014, 7, 4, 9),\n datetime(2014, 7, 7, 9),\n ),\n datetime(2014, 7, 4, 23): (\n datetime(2014, 7, 4, 9),\n datetime(2014, 7, 7, 9),\n ),\n datetime(2014, 7, 6, 10): (\n datetime(2014, 7, 4, 9),\n datetime(2014, 7, 7, 9),\n ),\n datetime(2014, 7, 7, 5): (\n datetime(2014, 7, 4, 9),\n datetime(2014, 7, 7, 9),\n ),\n datetime(2014, 7, 7, 9): (\n datetime(2014, 7, 7, 9),\n datetime(2014, 7, 7, 9),\n ),\n datetime(2014, 7, 7, 9, 1): (\n datetime(2014, 7, 7, 9),\n datetime(2014, 7, 8, 9),\n ),\n },\n ),\n (\n [\n BusinessHour(start="17:00", end="05:00"),\n BusinessHour(n=3, start="17:00", end="03:00"),\n ],\n {\n datetime(2014, 7, 1, 11): (\n datetime(2014, 7, 1, 17),\n datetime(2014, 6, 30, 17),\n ),\n datetime(2014, 7, 1, 18): (\n datetime(2014, 7, 2, 17),\n datetime(2014, 7, 1, 17),\n ),\n datetime(2014, 7, 1, 23): (\n datetime(2014, 7, 2, 17),\n datetime(2014, 7, 1, 17),\n ),\n datetime(2014, 7, 2, 8): (\n datetime(2014, 7, 2, 17),\n datetime(2014, 7, 1, 17),\n ),\n datetime(2014, 7, 2, 9): (\n datetime(2014, 7, 2, 17),\n datetime(2014, 7, 1, 17),\n ),\n datetime(2014, 7, 4, 17): (\n datetime(2014, 7, 4, 17),\n datetime(2014, 7, 4, 17),\n ),\n datetime(2014, 7, 5, 10): (\n datetime(2014, 7, 7, 17),\n datetime(2014, 7, 4, 17),\n ),\n datetime(2014, 7, 4, 10): (\n datetime(2014, 7, 4, 17),\n datetime(2014, 7, 3, 17),\n ),\n datetime(2014, 7, 4, 23): (\n datetime(2014, 7, 7, 17),\n datetime(2014, 7, 4, 17),\n ),\n datetime(2014, 7, 6, 10): (\n datetime(2014, 7, 7, 17),\n datetime(2014, 7, 4, 17),\n ),\n datetime(2014, 7, 7, 5): (\n datetime(2014, 7, 7, 17),\n datetime(2014, 7, 4, 17),\n ),\n datetime(2014, 7, 7, 17, 1): (\n datetime(2014, 7, 8, 17),\n datetime(2014, 7, 7, 17),\n ),\n },\n ),\n (\n [\n BusinessHour(-1, start="17:00", end="05:00"),\n BusinessHour(n=-2, start="17:00", end="03:00"),\n ],\n {\n datetime(2014, 7, 1, 11): (\n datetime(2014, 6, 30, 17),\n datetime(2014, 7, 1, 17),\n ),\n datetime(2014, 7, 1, 18): (\n datetime(2014, 7, 1, 17),\n datetime(2014, 7, 2, 17),\n ),\n datetime(2014, 7, 1, 23): (\n datetime(2014, 7, 1, 17),\n datetime(2014, 7, 2, 17),\n ),\n datetime(2014, 7, 2, 8): (\n datetime(2014, 7, 1, 17),\n datetime(2014, 7, 2, 17),\n ),\n datetime(2014, 7, 2, 9): (\n datetime(2014, 7, 1, 17),\n datetime(2014, 7, 2, 17),\n ),\n datetime(2014, 7, 2, 16, 59): (\n datetime(2014, 7, 1, 17),\n datetime(2014, 7, 2, 17),\n ),\n datetime(2014, 7, 5, 10): (\n datetime(2014, 7, 4, 17),\n datetime(2014, 7, 7, 17),\n ),\n datetime(2014, 7, 4, 10): (\n datetime(2014, 7, 3, 17),\n datetime(2014, 7, 4, 17),\n ),\n datetime(2014, 7, 4, 23): (\n datetime(2014, 7, 4, 17),\n datetime(2014, 7, 7, 17),\n ),\n datetime(2014, 7, 6, 10): (\n datetime(2014, 7, 4, 17),\n datetime(2014, 7, 7, 17),\n ),\n datetime(2014, 7, 7, 5): (\n datetime(2014, 7, 4, 17),\n datetime(2014, 7, 7, 17),\n ),\n datetime(2014, 7, 7, 18): (\n datetime(2014, 7, 7, 17),\n datetime(2014, 7, 8, 17),\n ),\n },\n ),\n (\n [\n BusinessHour(start=["11:15", "15:00"], end=["13:00", "20:00"]),\n BusinessHour(n=3, start=["11:15", "15:00"], end=["12:00", "20:00"]),\n BusinessHour(start=["11:15", "15:00"], end=["13:00", "17:00"]),\n BusinessHour(n=2, start=["11:15", "15:00"], end=["12:00", "03:00"]),\n BusinessHour(n=3, start=["11:15", "15:00"], end=["13:00", "16:00"]),\n ],\n {\n datetime(2014, 7, 1, 11): (\n datetime(2014, 7, 1, 11, 15),\n datetime(2014, 6, 30, 15),\n ),\n datetime(2014, 7, 1, 18): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 1, 15),\n ),\n datetime(2014, 7, 1, 23): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 1, 15),\n ),\n datetime(2014, 7, 2, 8): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 1, 15),\n ),\n datetime(2014, 7, 2, 9): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 1, 15),\n ),\n datetime(2014, 7, 2, 10): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 1, 15),\n ),\n datetime(2014, 7, 2, 11, 15): (\n datetime(2014, 7, 2, 11, 15),\n datetime(2014, 7, 2, 11, 15),\n ),\n datetime(2014, 7, 2, 11, 15, 1): (\n datetime(2014, 7, 2, 15),\n datetime(2014, 7, 2, 11, 15),\n ),\n datetime(2014, 7, 5, 10): (\n datetime(2014, 7, 7, 11, 15),\n datetime(2014, 7, 4, 15),\n ),\n datetime(2014, 7, 4, 10): (\n datetime(2014, 7, 4, 11, 15),\n datetime(2014, 7, 3, 15),\n ),\n datetime(2014, 7, 4, 23): (\n datetime(2014, 7, 7, 11, 15),\n datetime(2014, 7, 4, 15),\n ),\n datetime(2014, 7, 6, 10): (\n datetime(2014, 7, 7, 11, 15),\n datetime(2014, 7, 4, 15),\n ),\n datetime(2014, 7, 7, 5): (\n datetime(2014, 7, 7, 11, 15),\n datetime(2014, 7, 4, 15),\n ),\n datetime(2014, 7, 7, 9, 1): (\n datetime(2014, 7, 7, 11, 15),\n datetime(2014, 7, 4, 15),\n ),\n datetime(2014, 7, 7, 12): (\n datetime(2014, 7, 7, 15),\n datetime(2014, 7, 7, 11, 15),\n ),\n },\n ),\n (\n [\n BusinessHour(n=-1, start=["17:00", "08:00"], end=["05:00", "10:00"]),\n BusinessHour(n=-2, start=["08:00", "17:00"], end=["10:00", "03:00"]),\n ],\n {\n datetime(2014, 7, 1, 11): (\n datetime(2014, 7, 1, 8),\n datetime(2014, 7, 1, 17),\n ),\n datetime(2014, 7, 1, 18): (\n datetime(2014, 7, 1, 17),\n datetime(2014, 7, 2, 8),\n ),\n datetime(2014, 7, 1, 23): (\n datetime(2014, 7, 1, 17),\n datetime(2014, 7, 2, 8),\n ),\n datetime(2014, 7, 2, 8): (\n datetime(2014, 7, 2, 8),\n datetime(2014, 7, 2, 8),\n ),\n datetime(2014, 7, 2, 9): (\n datetime(2014, 7, 2, 8),\n datetime(2014, 7, 2, 17),\n ),\n datetime(2014, 7, 2, 16, 59): (\n datetime(2014, 7, 2, 8),\n datetime(2014, 7, 2, 17),\n ),\n datetime(2014, 7, 5, 10): (\n datetime(2014, 7, 4, 17),\n datetime(2014, 7, 7, 8),\n ),\n datetime(2014, 7, 4, 10): (\n datetime(2014, 7, 4, 8),\n datetime(2014, 7, 4, 17),\n ),\n datetime(2014, 7, 4, 23): (\n datetime(2014, 7, 4, 17),\n datetime(2014, 7, 7, 8),\n ),\n datetime(2014, 7, 6, 10): (\n datetime(2014, 7, 4, 17),\n datetime(2014, 7, 7, 8),\n ),\n datetime(2014, 7, 7, 5): (\n datetime(2014, 7, 4, 17),\n datetime(2014, 7, 7, 8),\n ),\n datetime(2014, 7, 7, 18): (\n datetime(2014, 7, 7, 17),\n datetime(2014, 7, 8, 8),\n ),\n },\n ),\n ]\n\n @pytest.mark.parametrize("case", opening_time_cases)\n def test_opening_time(self, case):\n _offsets, cases = case\n for offset in _offsets:\n for dt, (exp_next, exp_prev) in cases.items():\n assert offset._next_opening_time(dt) == exp_next\n assert offset._prev_opening_time(dt) == exp_prev\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_business_hour.py | test_business_hour.py | Python | 58,452 | 0.75 | 0.035294 | 0.015407 | react-lib | 193 | 2025-03-24T14:05:04.987634 | BSD-3-Clause | true | d4e21eaa80d852affee0ce45711464f2 |
"""\nTests for the following offsets:\n- BMonthBegin\n- BMonthEnd\n"""\nfrom __future__ import annotations\n\nfrom datetime import datetime\n\nimport pytest\n\nimport pandas as pd\nfrom pandas.tests.tseries.offsets.common import (\n assert_is_on_offset,\n assert_offset_equal,\n)\n\nfrom pandas.tseries.offsets import (\n BMonthBegin,\n BMonthEnd,\n)\n\n\n@pytest.mark.parametrize("n", [-2, 1])\n@pytest.mark.parametrize(\n "cls",\n [\n BMonthBegin,\n BMonthEnd,\n ],\n)\ndef test_apply_index(cls, n):\n offset = cls(n=n)\n rng = pd.date_range(start="1/1/2000", periods=100000, freq="min")\n ser = pd.Series(rng)\n\n res = rng + offset\n assert res.freq is None # not retained\n assert res[0] == rng[0] + offset\n assert res[-1] == rng[-1] + offset\n res2 = ser + offset\n # apply_index is only for indexes, not series, so no res2_v2\n assert res2.iloc[0] == ser.iloc[0] + offset\n assert res2.iloc[-1] == ser.iloc[-1] + offset\n\n\nclass TestBMonthBegin:\n def test_offsets_compare_equal(self):\n # root cause of #456\n offset1 = BMonthBegin()\n offset2 = BMonthBegin()\n assert not offset1 != offset2\n\n offset_cases = []\n offset_cases.append(\n (\n BMonthBegin(),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 1),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2006, 12, 29): datetime(2007, 1, 1),\n datetime(2006, 12, 31): datetime(2007, 1, 1),\n datetime(2006, 9, 1): datetime(2006, 10, 2),\n datetime(2007, 1, 1): datetime(2007, 2, 1),\n datetime(2006, 12, 1): datetime(2007, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthBegin(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2006, 10, 2): datetime(2006, 10, 2),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2006, 12, 29): datetime(2007, 1, 1),\n datetime(2006, 12, 31): datetime(2007, 1, 1),\n datetime(2006, 9, 15): datetime(2006, 10, 2),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthBegin(2),\n {\n datetime(2008, 1, 1): datetime(2008, 3, 3),\n datetime(2008, 1, 15): datetime(2008, 3, 3),\n datetime(2006, 12, 29): datetime(2007, 2, 1),\n datetime(2006, 12, 31): datetime(2007, 2, 1),\n datetime(2007, 1, 1): datetime(2007, 3, 1),\n datetime(2006, 11, 1): datetime(2007, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthBegin(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 1),\n datetime(2008, 6, 30): datetime(2008, 6, 2),\n datetime(2008, 6, 1): datetime(2008, 5, 1),\n datetime(2008, 3, 10): datetime(2008, 3, 3),\n datetime(2008, 12, 31): datetime(2008, 12, 1),\n datetime(2006, 12, 29): datetime(2006, 12, 1),\n datetime(2006, 12, 30): datetime(2006, 12, 1),\n datetime(2007, 1, 1): datetime(2006, 12, 1),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (BMonthBegin(), datetime(2007, 12, 31), False),\n (BMonthBegin(), datetime(2008, 1, 1), True),\n (BMonthBegin(), datetime(2001, 4, 2), True),\n (BMonthBegin(), datetime(2008, 3, 3), True),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestBMonthEnd:\n def test_normalize(self):\n dt = datetime(2007, 1, 1, 3)\n\n result = dt + BMonthEnd(normalize=True)\n expected = dt.replace(hour=0) + BMonthEnd()\n assert result == expected\n\n def test_offsets_compare_equal(self):\n # root cause of #456\n offset1 = BMonthEnd()\n offset2 = BMonthEnd()\n assert not offset1 != offset2\n\n offset_cases = []\n offset_cases.append(\n (\n BMonthEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 2, 29),\n datetime(2006, 12, 29): datetime(2007, 1, 31),\n datetime(2006, 12, 31): datetime(2007, 1, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 31),\n datetime(2006, 12, 1): datetime(2006, 12, 29),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthEnd(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 1, 31),\n datetime(2006, 12, 29): datetime(2006, 12, 29),\n datetime(2006, 12, 31): datetime(2007, 1, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthEnd(2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 29),\n datetime(2008, 1, 31): datetime(2008, 3, 31),\n datetime(2006, 12, 29): datetime(2007, 2, 28),\n datetime(2006, 12, 31): datetime(2007, 2, 28),\n datetime(2007, 1, 1): datetime(2007, 2, 28),\n datetime(2006, 11, 1): datetime(2006, 12, 29),\n },\n )\n )\n\n offset_cases.append(\n (\n BMonthEnd(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 29),\n datetime(2008, 6, 30): datetime(2008, 5, 30),\n datetime(2008, 12, 31): datetime(2008, 11, 28),\n datetime(2006, 12, 29): datetime(2006, 11, 30),\n datetime(2006, 12, 30): datetime(2006, 12, 29),\n datetime(2007, 1, 1): datetime(2006, 12, 29),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (BMonthEnd(), datetime(2007, 12, 31), True),\n (BMonthEnd(), datetime(2008, 1, 1), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_business_month.py | test_business_month.py | Python | 6,717 | 0.95 | 0.064516 | 0.015789 | node-utils | 946 | 2024-08-19T08:39:43.472320 | BSD-3-Clause | true | cc49f74bd2ff9c271c4d98d490755f42 |
"""\nTests for the following offsets:\n- BQuarterBegin\n- BQuarterEnd\n"""\nfrom __future__ import annotations\n\nfrom datetime import datetime\n\nimport pytest\n\nimport pandas._testing as tm\nfrom pandas.tests.tseries.offsets.common import (\n assert_is_on_offset,\n assert_offset_equal,\n)\n\nfrom pandas.tseries.offsets import (\n BQuarterBegin,\n BQuarterEnd,\n)\n\n\ndef test_quarterly_dont_normalize():\n date = datetime(2012, 3, 31, 5, 30)\n\n offsets = (BQuarterEnd, BQuarterBegin)\n\n for klass in offsets:\n result = date + klass()\n assert result.time() == date.time()\n\n\n@pytest.mark.parametrize("offset", [BQuarterBegin(), BQuarterEnd()])\ndef test_on_offset(offset):\n dates = [\n datetime(2016, m, d)\n for m in [10, 11, 12]\n for d in [1, 2, 3, 28, 29, 30, 31]\n if not (m == 11 and d == 31)\n ]\n for date in dates:\n res = offset.is_on_offset(date)\n slow_version = date == (date + offset) - offset\n assert res == slow_version\n\n\nclass TestBQuarterBegin:\n def test_repr(self):\n expected = "<BusinessQuarterBegin: startingMonth=3>"\n assert repr(BQuarterBegin()) == expected\n expected = "<BusinessQuarterBegin: startingMonth=3>"\n assert repr(BQuarterBegin(startingMonth=3)) == expected\n expected = "<BusinessQuarterBegin: startingMonth=1>"\n assert repr(BQuarterBegin(startingMonth=1)) == expected\n\n def test_is_anchored(self):\n msg = "BQuarterBegin.is_anchored is deprecated "\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert BQuarterBegin(startingMonth=1).is_anchored()\n assert BQuarterBegin().is_anchored()\n assert not BQuarterBegin(2, startingMonth=1).is_anchored()\n\n def test_offset_corner_case(self):\n # corner\n offset = BQuarterBegin(n=-1, startingMonth=1)\n assert datetime(2007, 4, 3) + offset == datetime(2007, 4, 2)\n\n offset_cases = []\n offset_cases.append(\n (\n BQuarterBegin(startingMonth=1),\n {\n datetime(2008, 1, 1): datetime(2008, 4, 1),\n datetime(2008, 1, 31): datetime(2008, 4, 1),\n datetime(2008, 2, 15): datetime(2008, 4, 1),\n datetime(2008, 2, 29): datetime(2008, 4, 1),\n datetime(2008, 3, 15): datetime(2008, 4, 1),\n datetime(2008, 3, 31): datetime(2008, 4, 1),\n datetime(2008, 4, 15): datetime(2008, 7, 1),\n datetime(2007, 3, 15): datetime(2007, 4, 2),\n datetime(2007, 2, 28): datetime(2007, 4, 2),\n datetime(2007, 1, 1): datetime(2007, 4, 2),\n datetime(2007, 4, 15): datetime(2007, 7, 2),\n datetime(2007, 7, 1): datetime(2007, 7, 2),\n datetime(2007, 4, 1): datetime(2007, 4, 2),\n datetime(2007, 4, 2): datetime(2007, 7, 2),\n datetime(2008, 4, 30): datetime(2008, 7, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterBegin(startingMonth=2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 1),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2008, 1, 15): datetime(2008, 2, 1),\n datetime(2008, 2, 29): datetime(2008, 5, 1),\n datetime(2008, 3, 15): datetime(2008, 5, 1),\n datetime(2008, 3, 31): datetime(2008, 5, 1),\n datetime(2008, 4, 15): datetime(2008, 5, 1),\n datetime(2008, 8, 15): datetime(2008, 11, 3),\n datetime(2008, 9, 15): datetime(2008, 11, 3),\n datetime(2008, 11, 1): datetime(2008, 11, 3),\n datetime(2008, 4, 30): datetime(2008, 5, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterBegin(startingMonth=1, n=0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2007, 12, 31): datetime(2008, 1, 1),\n datetime(2008, 2, 15): datetime(2008, 4, 1),\n datetime(2008, 2, 29): datetime(2008, 4, 1),\n datetime(2008, 1, 15): datetime(2008, 4, 1),\n datetime(2008, 2, 27): datetime(2008, 4, 1),\n datetime(2008, 3, 15): datetime(2008, 4, 1),\n datetime(2007, 4, 1): datetime(2007, 4, 2),\n datetime(2007, 4, 2): datetime(2007, 4, 2),\n datetime(2007, 7, 1): datetime(2007, 7, 2),\n datetime(2007, 4, 15): datetime(2007, 7, 2),\n datetime(2007, 7, 2): datetime(2007, 7, 2),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterBegin(startingMonth=1, n=-1),\n {\n datetime(2008, 1, 1): datetime(2007, 10, 1),\n datetime(2008, 1, 31): datetime(2008, 1, 1),\n datetime(2008, 2, 15): datetime(2008, 1, 1),\n datetime(2008, 2, 29): datetime(2008, 1, 1),\n datetime(2008, 3, 15): datetime(2008, 1, 1),\n datetime(2008, 3, 31): datetime(2008, 1, 1),\n datetime(2008, 4, 15): datetime(2008, 4, 1),\n datetime(2007, 7, 3): datetime(2007, 7, 2),\n datetime(2007, 4, 3): datetime(2007, 4, 2),\n datetime(2007, 7, 2): datetime(2007, 4, 2),\n datetime(2008, 4, 1): datetime(2008, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterBegin(startingMonth=1, n=2),\n {\n datetime(2008, 1, 1): datetime(2008, 7, 1),\n datetime(2008, 1, 15): datetime(2008, 7, 1),\n datetime(2008, 2, 29): datetime(2008, 7, 1),\n datetime(2008, 3, 15): datetime(2008, 7, 1),\n datetime(2007, 3, 31): datetime(2007, 7, 2),\n datetime(2007, 4, 15): datetime(2007, 10, 1),\n datetime(2008, 4, 30): datetime(2008, 10, 1),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n\nclass TestBQuarterEnd:\n def test_repr(self):\n expected = "<BusinessQuarterEnd: startingMonth=3>"\n assert repr(BQuarterEnd()) == expected\n expected = "<BusinessQuarterEnd: startingMonth=3>"\n assert repr(BQuarterEnd(startingMonth=3)) == expected\n expected = "<BusinessQuarterEnd: startingMonth=1>"\n assert repr(BQuarterEnd(startingMonth=1)) == expected\n\n def test_is_anchored(self):\n msg = "BQuarterEnd.is_anchored is deprecated "\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert BQuarterEnd(startingMonth=1).is_anchored()\n assert BQuarterEnd().is_anchored()\n assert not BQuarterEnd(2, startingMonth=1).is_anchored()\n\n def test_offset_corner_case(self):\n # corner\n offset = BQuarterEnd(n=-1, startingMonth=1)\n assert datetime(2010, 1, 31) + offset == datetime(2010, 1, 29)\n\n offset_cases = []\n offset_cases.append(\n (\n BQuarterEnd(startingMonth=1),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 4, 30),\n datetime(2008, 2, 15): datetime(2008, 4, 30),\n datetime(2008, 2, 29): datetime(2008, 4, 30),\n datetime(2008, 3, 15): datetime(2008, 4, 30),\n datetime(2008, 3, 31): datetime(2008, 4, 30),\n datetime(2008, 4, 15): datetime(2008, 4, 30),\n datetime(2008, 4, 30): datetime(2008, 7, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterEnd(startingMonth=2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 29),\n datetime(2008, 1, 31): datetime(2008, 2, 29),\n datetime(2008, 2, 15): datetime(2008, 2, 29),\n datetime(2008, 2, 29): datetime(2008, 5, 30),\n datetime(2008, 3, 15): datetime(2008, 5, 30),\n datetime(2008, 3, 31): datetime(2008, 5, 30),\n datetime(2008, 4, 15): datetime(2008, 5, 30),\n datetime(2008, 4, 30): datetime(2008, 5, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterEnd(startingMonth=1, n=0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 1, 31),\n datetime(2008, 2, 15): datetime(2008, 4, 30),\n datetime(2008, 2, 29): datetime(2008, 4, 30),\n datetime(2008, 3, 15): datetime(2008, 4, 30),\n datetime(2008, 3, 31): datetime(2008, 4, 30),\n datetime(2008, 4, 15): datetime(2008, 4, 30),\n datetime(2008, 4, 30): datetime(2008, 4, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterEnd(startingMonth=1, n=-1),\n {\n datetime(2008, 1, 1): datetime(2007, 10, 31),\n datetime(2008, 1, 31): datetime(2007, 10, 31),\n datetime(2008, 2, 15): datetime(2008, 1, 31),\n datetime(2008, 2, 29): datetime(2008, 1, 31),\n datetime(2008, 3, 15): datetime(2008, 1, 31),\n datetime(2008, 3, 31): datetime(2008, 1, 31),\n datetime(2008, 4, 15): datetime(2008, 1, 31),\n datetime(2008, 4, 30): datetime(2008, 1, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n BQuarterEnd(startingMonth=1, n=2),\n {\n datetime(2008, 1, 31): datetime(2008, 7, 31),\n datetime(2008, 2, 15): datetime(2008, 7, 31),\n datetime(2008, 2, 29): datetime(2008, 7, 31),\n datetime(2008, 3, 15): datetime(2008, 7, 31),\n datetime(2008, 3, 31): datetime(2008, 7, 31),\n datetime(2008, 4, 15): datetime(2008, 7, 31),\n datetime(2008, 4, 30): datetime(2008, 10, 31),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (BQuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),\n (BQuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),\n (BQuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),\n (BQuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),\n (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), True),\n (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),\n (BQuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),\n (BQuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), True),\n (BQuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),\n (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), True),\n (BQuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_business_quarter.py | test_business_quarter.py | Python | 12,591 | 0.95 | 0.066667 | 0.007117 | vue-tools | 919 | 2023-07-20T04:36:12.745785 | MIT | true | 55f779a92e2ab6b5429dfc8dbe7b2a14 |
"""\nTests for the following offsets:\n- BYearBegin\n- BYearEnd\n"""\nfrom __future__ import annotations\n\nfrom datetime import datetime\n\nimport pytest\n\nfrom pandas.tests.tseries.offsets.common import (\n assert_is_on_offset,\n assert_offset_equal,\n)\n\nfrom pandas.tseries.offsets import (\n BYearBegin,\n BYearEnd,\n)\n\n\nclass TestBYearBegin:\n def test_misspecified(self):\n msg = "Month must go from 1 to 12"\n with pytest.raises(ValueError, match=msg):\n BYearBegin(month=13)\n with pytest.raises(ValueError, match=msg):\n BYearEnd(month=13)\n\n offset_cases = []\n offset_cases.append(\n (\n BYearBegin(),\n {\n datetime(2008, 1, 1): datetime(2009, 1, 1),\n datetime(2008, 6, 30): datetime(2009, 1, 1),\n datetime(2008, 12, 31): datetime(2009, 1, 1),\n datetime(2011, 1, 1): datetime(2011, 1, 3),\n datetime(2011, 1, 3): datetime(2012, 1, 2),\n datetime(2005, 12, 30): datetime(2006, 1, 2),\n datetime(2005, 12, 31): datetime(2006, 1, 2),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearBegin(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 6, 30): datetime(2009, 1, 1),\n datetime(2008, 12, 31): datetime(2009, 1, 1),\n datetime(2005, 12, 30): datetime(2006, 1, 2),\n datetime(2005, 12, 31): datetime(2006, 1, 2),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearBegin(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 1, 2),\n datetime(2009, 1, 4): datetime(2009, 1, 1),\n datetime(2009, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 6, 30): datetime(2008, 1, 1),\n datetime(2008, 12, 31): datetime(2008, 1, 1),\n datetime(2006, 12, 29): datetime(2006, 1, 2),\n datetime(2006, 12, 30): datetime(2006, 1, 2),\n datetime(2006, 1, 1): datetime(2005, 1, 3),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearBegin(-2),\n {\n datetime(2007, 1, 1): datetime(2005, 1, 3),\n datetime(2007, 6, 30): datetime(2006, 1, 2),\n datetime(2008, 12, 31): datetime(2007, 1, 1),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n\nclass TestBYearEnd:\n offset_cases = []\n offset_cases.append(\n (\n BYearEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 12, 31),\n datetime(2008, 12, 31): datetime(2009, 12, 31),\n datetime(2005, 12, 30): datetime(2006, 12, 29),\n datetime(2005, 12, 31): datetime(2006, 12, 29),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearEnd(0),\n {\n datetime(2008, 1, 1): datetime(2008, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 12, 31),\n datetime(2008, 12, 31): datetime(2008, 12, 31),\n datetime(2005, 12, 31): datetime(2006, 12, 29),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearEnd(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 29),\n datetime(2008, 6, 30): datetime(2007, 12, 31),\n datetime(2008, 12, 31): datetime(2007, 12, 31),\n datetime(2006, 12, 29): datetime(2005, 12, 30),\n datetime(2006, 12, 30): datetime(2006, 12, 29),\n datetime(2007, 1, 1): datetime(2006, 12, 29),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearEnd(-2),\n {\n datetime(2007, 1, 1): datetime(2005, 12, 30),\n datetime(2008, 6, 30): datetime(2006, 12, 29),\n datetime(2008, 12, 31): datetime(2006, 12, 29),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (BYearEnd(), datetime(2007, 12, 31), True),\n (BYearEnd(), datetime(2008, 1, 1), False),\n (BYearEnd(), datetime(2006, 12, 31), False),\n (BYearEnd(), datetime(2006, 12, 29), True),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestBYearEndLagged:\n def test_bad_month_fail(self):\n msg = "Month must go from 1 to 12"\n with pytest.raises(ValueError, match=msg):\n BYearEnd(month=13)\n with pytest.raises(ValueError, match=msg):\n BYearEnd(month=0)\n\n offset_cases = []\n offset_cases.append(\n (\n BYearEnd(month=6),\n {\n datetime(2008, 1, 1): datetime(2008, 6, 30),\n datetime(2007, 6, 30): datetime(2008, 6, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n BYearEnd(n=-1, month=6),\n {\n datetime(2008, 1, 1): datetime(2007, 6, 29),\n datetime(2007, 6, 30): datetime(2007, 6, 29),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n def test_roll(self):\n offset = BYearEnd(month=6)\n date = datetime(2009, 11, 30)\n\n assert offset.rollforward(date) == datetime(2010, 6, 30)\n assert offset.rollback(date) == datetime(2009, 6, 30)\n\n on_offset_cases = [\n (BYearEnd(month=2), datetime(2007, 2, 28), True),\n (BYearEnd(month=6), datetime(2007, 6, 30), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_business_year.py | test_business_year.py | Python | 6,436 | 0.85 | 0.069767 | 0 | react-lib | 908 | 2023-11-05T09:13:00.557521 | GPL-3.0 | true | c6d21601afa733b062b20d5d2886ae4e |
from datetime import datetime\n\nfrom dateutil.tz.tz import tzlocal\nimport pytest\n\nfrom pandas._libs.tslibs import (\n OutOfBoundsDatetime,\n Timestamp,\n)\nfrom pandas.compat import (\n IS64,\n is_platform_windows,\n)\n\nfrom pandas.tseries.offsets import (\n FY5253,\n BDay,\n BMonthBegin,\n BMonthEnd,\n BQuarterBegin,\n BQuarterEnd,\n BusinessHour,\n BYearBegin,\n BYearEnd,\n CBMonthBegin,\n CBMonthEnd,\n CDay,\n CustomBusinessHour,\n DateOffset,\n FY5253Quarter,\n LastWeekOfMonth,\n MonthBegin,\n MonthEnd,\n QuarterEnd,\n SemiMonthBegin,\n SemiMonthEnd,\n Week,\n WeekOfMonth,\n YearBegin,\n YearEnd,\n)\n\n\ndef _get_offset(klass, value=1, normalize=False):\n # create instance from offset class\n if klass is FY5253:\n klass = klass(\n n=value,\n startingMonth=1,\n weekday=1,\n variation="last",\n normalize=normalize,\n )\n elif klass is FY5253Quarter:\n klass = klass(\n n=value,\n startingMonth=1,\n weekday=1,\n qtr_with_extra_week=1,\n variation="last",\n normalize=normalize,\n )\n elif klass is LastWeekOfMonth:\n klass = klass(n=value, weekday=5, normalize=normalize)\n elif klass is WeekOfMonth:\n klass = klass(n=value, week=1, weekday=5, normalize=normalize)\n elif klass is Week:\n klass = klass(n=value, weekday=5, normalize=normalize)\n elif klass is DateOffset:\n klass = klass(days=value, normalize=normalize)\n else:\n klass = klass(value, normalize=normalize)\n return klass\n\n\n@pytest.fixture(\n params=[\n BDay,\n BusinessHour,\n BMonthEnd,\n BMonthBegin,\n BQuarterEnd,\n BQuarterBegin,\n BYearEnd,\n BYearBegin,\n CDay,\n CustomBusinessHour,\n CBMonthEnd,\n CBMonthBegin,\n MonthEnd,\n MonthBegin,\n SemiMonthBegin,\n SemiMonthEnd,\n QuarterEnd,\n LastWeekOfMonth,\n WeekOfMonth,\n Week,\n YearBegin,\n YearEnd,\n FY5253,\n FY5253Quarter,\n DateOffset,\n ]\n)\ndef _offset(request):\n return request.param\n\n\n@pytest.fixture\ndef dt(_offset):\n if _offset in (CBMonthBegin, CBMonthEnd, BDay):\n return Timestamp(2008, 1, 1)\n elif _offset is (CustomBusinessHour, BusinessHour):\n return Timestamp(2014, 7, 1, 10, 00)\n return Timestamp(2008, 1, 2)\n\n\ndef test_apply_out_of_range(request, tz_naive_fixture, _offset):\n tz = tz_naive_fixture\n\n # try to create an out-of-bounds result timestamp; if we can't create\n # the offset skip\n try:\n if _offset in (BusinessHour, CustomBusinessHour):\n # Using 10000 in BusinessHour fails in tz check because of DST\n # difference\n offset = _get_offset(_offset, value=100000)\n else:\n offset = _get_offset(_offset, value=10000)\n\n result = Timestamp("20080101") + offset\n assert isinstance(result, datetime)\n assert result.tzinfo is None\n\n # Check tz is preserved\n t = Timestamp("20080101", tz=tz)\n result = t + offset\n assert isinstance(result, datetime)\n if tz is not None:\n assert t.tzinfo is not None\n\n if isinstance(tz, tzlocal) and not IS64 and _offset is not DateOffset:\n # If we hit OutOfBoundsDatetime on non-64 bit machines\n # we'll drop out of the try clause before the next test\n request.applymarker(\n pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038")\n )\n elif (\n isinstance(tz, tzlocal)\n and is_platform_windows()\n and _offset in (QuarterEnd, BQuarterBegin, BQuarterEnd)\n ):\n request.applymarker(\n pytest.mark.xfail(reason="After GH#49737 t.tzinfo is None on CI")\n )\n assert str(t.tzinfo) == str(result.tzinfo)\n\n except OutOfBoundsDatetime:\n pass\n except (ValueError, KeyError):\n # we are creating an invalid offset\n # so ignore\n pass\n\n\ndef test_offsets_compare_equal(_offset):\n # root cause of GH#456: __ne__ was not implemented\n offset1 = _offset()\n offset2 = _offset()\n assert not offset1 != offset2\n assert offset1 == offset2\n\n\n@pytest.mark.parametrize(\n "date, offset2",\n [\n [Timestamp(2008, 1, 1), BDay(2)],\n [Timestamp(2014, 7, 1, 10, 00), BusinessHour(n=3)],\n [\n Timestamp(2014, 7, 1, 10),\n CustomBusinessHour(\n holidays=["2014-06-27", Timestamp(2014, 6, 30), Timestamp("2014-07-02")]\n ),\n ],\n [Timestamp(2008, 1, 2), SemiMonthEnd(2)],\n [Timestamp(2008, 1, 2), SemiMonthBegin(2)],\n [Timestamp(2008, 1, 2), Week(2)],\n [Timestamp(2008, 1, 2), WeekOfMonth(2)],\n [Timestamp(2008, 1, 2), LastWeekOfMonth(2)],\n ],\n)\ndef test_rsub(date, offset2):\n assert date - offset2 == (-offset2)._apply(date)\n\n\n@pytest.mark.parametrize(\n "date, offset2",\n [\n [Timestamp(2008, 1, 1), BDay(2)],\n [Timestamp(2014, 7, 1, 10, 00), BusinessHour(n=3)],\n [\n Timestamp(2014, 7, 1, 10),\n CustomBusinessHour(\n holidays=["2014-06-27", Timestamp(2014, 6, 30), Timestamp("2014-07-02")]\n ),\n ],\n [Timestamp(2008, 1, 2), SemiMonthEnd(2)],\n [Timestamp(2008, 1, 2), SemiMonthBegin(2)],\n [Timestamp(2008, 1, 2), Week(2)],\n [Timestamp(2008, 1, 2), WeekOfMonth(2)],\n [Timestamp(2008, 1, 2), LastWeekOfMonth(2)],\n ],\n)\ndef test_radd(date, offset2):\n assert date + offset2 == offset2 + date\n\n\n@pytest.mark.parametrize(\n "date, offset_box, offset2",\n [\n [Timestamp(2008, 1, 1), BDay, BDay(2)],\n [Timestamp(2008, 1, 2), SemiMonthEnd, SemiMonthEnd(2)],\n [Timestamp(2008, 1, 2), SemiMonthBegin, SemiMonthBegin(2)],\n [Timestamp(2008, 1, 2), Week, Week(2)],\n [Timestamp(2008, 1, 2), WeekOfMonth, WeekOfMonth(2)],\n [Timestamp(2008, 1, 2), LastWeekOfMonth, LastWeekOfMonth(2)],\n ],\n)\ndef test_sub(date, offset_box, offset2):\n off = offset2\n msg = "Cannot subtract datetime from offset"\n with pytest.raises(TypeError, match=msg):\n off - date\n\n assert 2 * off - off == off\n assert date - offset2 == date + offset_box(-2)\n assert date - offset2 == date - (2 * off - off)\n\n\n@pytest.mark.parametrize(\n "offset_box, offset1",\n [\n [BDay, BDay()],\n [LastWeekOfMonth, LastWeekOfMonth()],\n [WeekOfMonth, WeekOfMonth()],\n [Week, Week()],\n [SemiMonthBegin, SemiMonthBegin()],\n [SemiMonthEnd, SemiMonthEnd()],\n [CustomBusinessHour, CustomBusinessHour(weekmask="Tue Wed Thu Fri")],\n [BusinessHour, BusinessHour()],\n ],\n)\ndef test_Mult1(offset_box, offset1):\n dt = Timestamp(2008, 1, 2)\n assert dt + 10 * offset1 == dt + offset_box(10)\n assert dt + 5 * offset1 == dt + offset_box(5)\n\n\ndef test_compare_str(_offset):\n # GH#23524\n # comparing to strings that cannot be cast to DateOffsets should\n # not raise for __eq__ or __ne__\n off = _get_offset(_offset)\n\n assert not off == "infer"\n assert off != "foo"\n # Note: inequalities are only implemented for Tick subclasses;\n # tests for this are in test_ticks\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_common.py | test_common.py | Python | 7,406 | 0.95 | 0.085821 | 0.067227 | awesome-app | 845 | 2024-03-23T20:38:44.068166 | BSD-3-Clause | true | ff398e1fcfef2fc0b3697044797dce97 |
"""\nTests for offsets.CustomBusinessDay / CDay\n"""\nfrom datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.offsets import CDay\n\nfrom pandas import (\n _testing as tm,\n read_pickle,\n)\nfrom pandas.tests.tseries.offsets.common import assert_offset_equal\n\nfrom pandas.tseries.holiday import USFederalHolidayCalendar\n\n\n@pytest.fixture\ndef offset():\n return CDay()\n\n\n@pytest.fixture\ndef offset2():\n return CDay(2)\n\n\nclass TestCustomBusinessDay:\n def test_repr(self, offset, offset2):\n assert repr(offset) == "<CustomBusinessDay>"\n assert repr(offset2) == "<2 * CustomBusinessDays>"\n\n expected = "<BusinessDay: offset=datetime.timedelta(days=1)>"\n assert repr(offset + timedelta(1)) == expected\n\n def test_holidays(self):\n # Define a TradingDay offset\n holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]\n tday = CDay(holidays=holidays)\n for year in range(2012, 2015):\n dt = datetime(year, 4, 30)\n xp = datetime(year, 5, 2)\n rs = dt + tday\n assert rs == xp\n\n def test_weekmask(self):\n weekmask_saudi = "Sat Sun Mon Tue Wed" # Thu-Fri Weekend\n weekmask_uae = "1111001" # Fri-Sat Weekend\n weekmask_egypt = [1, 1, 1, 1, 0, 0, 1] # Fri-Sat Weekend\n bday_saudi = CDay(weekmask=weekmask_saudi)\n bday_uae = CDay(weekmask=weekmask_uae)\n bday_egypt = CDay(weekmask=weekmask_egypt)\n dt = datetime(2013, 5, 1)\n xp_saudi = datetime(2013, 5, 4)\n xp_uae = datetime(2013, 5, 2)\n xp_egypt = datetime(2013, 5, 2)\n assert xp_saudi == dt + bday_saudi\n assert xp_uae == dt + bday_uae\n assert xp_egypt == dt + bday_egypt\n xp2 = datetime(2013, 5, 5)\n assert xp2 == dt + 2 * bday_saudi\n assert xp2 == dt + 2 * bday_uae\n assert xp2 == dt + 2 * bday_egypt\n\n def test_weekmask_and_holidays(self):\n weekmask_egypt = "Sun Mon Tue Wed Thu" # Fri-Sat Weekend\n holidays = ["2012-05-01", datetime(2013, 5, 1), np.datetime64("2014-05-01")]\n bday_egypt = CDay(holidays=holidays, weekmask=weekmask_egypt)\n dt = datetime(2013, 4, 30)\n xp_egypt = datetime(2013, 5, 5)\n assert xp_egypt == dt + 2 * bday_egypt\n\n @pytest.mark.filterwarnings("ignore:Non:pandas.errors.PerformanceWarning")\n def test_calendar(self):\n calendar = USFederalHolidayCalendar()\n dt = datetime(2014, 1, 17)\n assert_offset_equal(CDay(calendar=calendar), dt, datetime(2014, 1, 21))\n\n def test_roundtrip_pickle(self, offset, offset2):\n def _check_roundtrip(obj):\n unpickled = tm.round_trip_pickle(obj)\n assert unpickled == obj\n\n _check_roundtrip(offset)\n _check_roundtrip(offset2)\n _check_roundtrip(offset * 2)\n\n def test_pickle_compat_0_14_1(self, datapath):\n hdays = [datetime(2013, 1, 1) for ele in range(4)]\n pth = datapath("tseries", "offsets", "data", "cday-0.14.1.pickle")\n cday0_14_1 = read_pickle(pth)\n cday = CDay(holidays=hdays)\n assert cday == cday0_14_1\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_custom_business_day.py | test_custom_business_day.py | Python | 3,180 | 0.95 | 0.142857 | 0.0125 | node-utils | 272 | 2024-10-25T10:21:20.399356 | Apache-2.0 | true | 7b0d1e25de05d93018bb96c79e7dff2c |
"""\nTests for offsets.CustomBusinessHour\n"""\nfrom __future__ import annotations\n\nfrom datetime import (\n datetime,\n time as dt_time,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import Timestamp\nfrom pandas._libs.tslibs.offsets import (\n BusinessHour,\n CustomBusinessHour,\n Nano,\n)\n\nfrom pandas.tests.tseries.offsets.common import assert_offset_equal\n\nfrom pandas.tseries.holiday import USFederalHolidayCalendar\n\nholidays = ["2014-06-27", datetime(2014, 6, 30), np.datetime64("2014-07-02")]\n\n\n@pytest.fixture\ndef dt():\n return datetime(2014, 7, 1, 10, 00)\n\n\n@pytest.fixture\ndef _offset():\n return CustomBusinessHour\n\n\n# 2014 Calendar to check custom holidays\n# Sun Mon Tue Wed Thu Fri Sat\n# 6/22 23 24 25 26 27 28\n# 29 30 7/1 2 3 4 5\n# 6 7 8 9 10 11 12\n@pytest.fixture\ndef offset1():\n return CustomBusinessHour(weekmask="Tue Wed Thu Fri")\n\n\n@pytest.fixture\ndef offset2():\n return CustomBusinessHour(holidays=holidays)\n\n\nclass TestCustomBusinessHour:\n def test_constructor_errors(self):\n msg = "time data must be specified only with hour and minute"\n with pytest.raises(ValueError, match=msg):\n CustomBusinessHour(start=dt_time(11, 0, 5))\n msg = "time data must match '%H:%M' format"\n with pytest.raises(ValueError, match=msg):\n CustomBusinessHour(start="AAA")\n msg = "time data must match '%H:%M' format"\n with pytest.raises(ValueError, match=msg):\n CustomBusinessHour(start="14:00:05")\n\n def test_different_normalize_equals(self, _offset):\n # GH#21404 changed __eq__ to return False when `normalize` does not match\n offset = _offset()\n offset2 = _offset(normalize=True)\n assert offset != offset2\n\n def test_repr(self, offset1, offset2):\n assert repr(offset1) == "<CustomBusinessHour: cbh=09:00-17:00>"\n assert repr(offset2) == "<CustomBusinessHour: cbh=09:00-17:00>"\n\n def test_with_offset(self, dt):\n expected = Timestamp("2014-07-01 13:00")\n\n assert dt + CustomBusinessHour() * 3 == expected\n assert dt + CustomBusinessHour(n=3) == expected\n\n def test_eq(self, offset1, offset2):\n for offset in [offset1, offset2]:\n assert offset == offset\n\n assert CustomBusinessHour() != CustomBusinessHour(-1)\n assert CustomBusinessHour(start="09:00") == CustomBusinessHour()\n assert CustomBusinessHour(start="09:00") != CustomBusinessHour(start="09:01")\n assert CustomBusinessHour(start="09:00", end="17:00") != CustomBusinessHour(\n start="17:00", end="09:01"\n )\n\n assert CustomBusinessHour(weekmask="Tue Wed Thu Fri") != CustomBusinessHour(\n weekmask="Mon Tue Wed Thu Fri"\n )\n assert CustomBusinessHour(holidays=["2014-06-27"]) != CustomBusinessHour(\n holidays=["2014-06-28"]\n )\n\n def test_hash(self, offset1, offset2):\n assert hash(offset1) == hash(offset1)\n assert hash(offset2) == hash(offset2)\n\n def test_add_dateime(self, dt, offset1, offset2):\n assert offset1 + dt == datetime(2014, 7, 1, 11)\n assert offset2 + dt == datetime(2014, 7, 1, 11)\n\n def testRollback1(self, dt, offset1, offset2):\n assert offset1.rollback(dt) == dt\n assert offset2.rollback(dt) == dt\n\n d = datetime(2014, 7, 1, 0)\n\n # 2014/07/01 is Tuesday, 06/30 is Monday(holiday)\n assert offset1.rollback(d) == datetime(2014, 6, 27, 17)\n\n # 2014/6/30 and 2014/6/27 are holidays\n assert offset2.rollback(d) == datetime(2014, 6, 26, 17)\n\n def testRollback2(self, _offset):\n assert _offset(-3).rollback(datetime(2014, 7, 5, 15, 0)) == datetime(\n 2014, 7, 4, 17, 0\n )\n\n def testRollforward1(self, dt, offset1, offset2):\n assert offset1.rollforward(dt) == dt\n assert offset2.rollforward(dt) == dt\n\n d = datetime(2014, 7, 1, 0)\n assert offset1.rollforward(d) == datetime(2014, 7, 1, 9)\n assert offset2.rollforward(d) == datetime(2014, 7, 1, 9)\n\n def testRollforward2(self, _offset):\n assert _offset(-3).rollforward(datetime(2014, 7, 5, 16, 0)) == datetime(\n 2014, 7, 7, 9\n )\n\n def test_roll_date_object(self):\n offset = BusinessHour()\n\n dt = datetime(2014, 7, 6, 15, 0)\n\n result = offset.rollback(dt)\n assert result == datetime(2014, 7, 4, 17)\n\n result = offset.rollforward(dt)\n assert result == datetime(2014, 7, 7, 9)\n\n normalize_cases = [\n (\n CustomBusinessHour(normalize=True, holidays=holidays),\n {\n datetime(2014, 7, 1, 8): datetime(2014, 7, 1),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 3),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 3),\n datetime(2014, 7, 1, 23): datetime(2014, 7, 3),\n datetime(2014, 7, 1, 0): datetime(2014, 7, 1),\n datetime(2014, 7, 4, 15): datetime(2014, 7, 4),\n datetime(2014, 7, 4, 15, 59): datetime(2014, 7, 4),\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7),\n datetime(2014, 7, 5, 23): datetime(2014, 7, 7),\n datetime(2014, 7, 6, 10): datetime(2014, 7, 7),\n },\n ),\n (\n CustomBusinessHour(-1, normalize=True, holidays=holidays),\n {\n datetime(2014, 7, 1, 8): datetime(2014, 6, 26),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 1),\n datetime(2014, 7, 1, 10): datetime(2014, 6, 26),\n datetime(2014, 7, 1, 0): datetime(2014, 6, 26),\n datetime(2014, 7, 7, 10): datetime(2014, 7, 4),\n datetime(2014, 7, 7, 10, 1): datetime(2014, 7, 7),\n datetime(2014, 7, 5, 23): datetime(2014, 7, 4),\n datetime(2014, 7, 6, 10): datetime(2014, 7, 4),\n },\n ),\n (\n CustomBusinessHour(\n 1, normalize=True, start="17:00", end="04:00", holidays=holidays\n ),\n {\n datetime(2014, 7, 1, 8): datetime(2014, 7, 1),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 1),\n datetime(2014, 7, 1, 23): datetime(2014, 7, 2),\n datetime(2014, 7, 2, 2): datetime(2014, 7, 2),\n datetime(2014, 7, 2, 3): datetime(2014, 7, 3),\n datetime(2014, 7, 4, 23): datetime(2014, 7, 5),\n datetime(2014, 7, 5, 2): datetime(2014, 7, 5),\n datetime(2014, 7, 7, 2): datetime(2014, 7, 7),\n datetime(2014, 7, 7, 17): datetime(2014, 7, 7),\n },\n ),\n ]\n\n @pytest.mark.parametrize("norm_cases", normalize_cases)\n def test_normalize(self, norm_cases):\n offset, cases = norm_cases\n for dt, expected in cases.items():\n assert offset._apply(dt) == expected\n\n @pytest.mark.parametrize(\n "dt, expected",\n [\n [datetime(2014, 7, 1, 9), False],\n [datetime(2014, 7, 1, 10), True],\n [datetime(2014, 7, 1, 15), True],\n [datetime(2014, 7, 1, 15, 1), False],\n [datetime(2014, 7, 5, 12), False],\n [datetime(2014, 7, 6, 12), False],\n ],\n )\n def test_is_on_offset(self, dt, expected):\n offset = CustomBusinessHour(start="10:00", end="15:00", holidays=holidays)\n assert offset.is_on_offset(dt) == expected\n\n apply_cases = [\n (\n CustomBusinessHour(holidays=holidays),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 12),\n datetime(2014, 7, 1, 13): datetime(2014, 7, 1, 14),\n datetime(2014, 7, 1, 15): datetime(2014, 7, 1, 16),\n datetime(2014, 7, 1, 19): datetime(2014, 7, 3, 10),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 9),\n datetime(2014, 7, 1, 16, 30, 15): datetime(2014, 7, 3, 9, 30, 15),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 10),\n datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 10),\n # out of business hours\n datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 10),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 10),\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 10),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 10),\n # saturday\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 10),\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 10),\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 9, 30),\n datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 9, 30, 30),\n },\n ),\n (\n CustomBusinessHour(4, holidays=holidays),\n {\n datetime(2014, 7, 1, 11): datetime(2014, 7, 1, 15),\n datetime(2014, 7, 1, 13): datetime(2014, 7, 3, 9),\n datetime(2014, 7, 1, 15): datetime(2014, 7, 3, 11),\n datetime(2014, 7, 1, 16): datetime(2014, 7, 3, 12),\n datetime(2014, 7, 1, 17): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 2, 11): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 2, 8): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 2, 19): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 2, 23): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 3, 0): datetime(2014, 7, 3, 13),\n datetime(2014, 7, 5, 15): datetime(2014, 7, 7, 13),\n datetime(2014, 7, 4, 17): datetime(2014, 7, 7, 13),\n datetime(2014, 7, 4, 16, 30): datetime(2014, 7, 7, 12, 30),\n datetime(2014, 7, 4, 16, 30, 30): datetime(2014, 7, 7, 12, 30, 30),\n },\n ),\n ]\n\n @pytest.mark.parametrize("apply_case", apply_cases)\n def test_apply(self, apply_case):\n offset, cases = apply_case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n nano_cases = [\n (\n CustomBusinessHour(holidays=holidays),\n {\n Timestamp("2014-07-01 15:00")\n + Nano(5): Timestamp("2014-07-01 16:00")\n + Nano(5),\n Timestamp("2014-07-01 16:00")\n + Nano(5): Timestamp("2014-07-03 09:00")\n + Nano(5),\n Timestamp("2014-07-01 16:00")\n - Nano(5): Timestamp("2014-07-01 17:00")\n - Nano(5),\n },\n ),\n (\n CustomBusinessHour(-1, holidays=holidays),\n {\n Timestamp("2014-07-01 15:00")\n + Nano(5): Timestamp("2014-07-01 14:00")\n + Nano(5),\n Timestamp("2014-07-01 10:00")\n + Nano(5): Timestamp("2014-07-01 09:00")\n + Nano(5),\n Timestamp("2014-07-01 10:00")\n - Nano(5): Timestamp("2014-06-26 17:00")\n - Nano(5),\n },\n ),\n ]\n\n @pytest.mark.parametrize("nano_case", nano_cases)\n def test_apply_nanoseconds(self, nano_case):\n offset, cases = nano_case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n def test_us_federal_holiday_with_datetime(self):\n # GH 16867\n bhour_us = CustomBusinessHour(calendar=USFederalHolidayCalendar())\n t0 = datetime(2014, 1, 17, 15)\n result = t0 + bhour_us * 8\n expected = Timestamp("2014-01-21 15:00:00")\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "weekmask, expected_time, mult",\n [\n ["Mon Tue Wed Thu Fri Sat", "2018-11-10 09:00:00", 10],\n ["Tue Wed Thu Fri Sat", "2018-11-13 08:00:00", 18],\n ],\n)\ndef test_custom_businesshour_weekmask_and_holidays(weekmask, expected_time, mult):\n # GH 23542\n holidays = ["2018-11-09"]\n bh = CustomBusinessHour(\n start="08:00", end="17:00", weekmask=weekmask, holidays=holidays\n )\n result = Timestamp("2018-11-08 08:00") + mult * bh\n expected = Timestamp(expected_time)\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_custom_business_hour.py | test_custom_business_hour.py | Python | 12,312 | 0.95 | 0.085106 | 0.042553 | python-kit | 669 | 2025-05-05T18:41:05.587399 | Apache-2.0 | true | a75330546e0156534b351b14e2da45d3 |
"""\nTests for the following offsets:\n- CustomBusinessMonthBase\n- CustomBusinessMonthBegin\n- CustomBusinessMonthEnd\n"""\nfrom __future__ import annotations\n\nfrom datetime import (\n date,\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.offsets import (\n CBMonthBegin,\n CBMonthEnd,\n CDay,\n)\n\nimport pandas._testing as tm\nfrom pandas.tests.tseries.offsets.common import (\n assert_is_on_offset,\n assert_offset_equal,\n)\n\nfrom pandas.tseries import offsets\n\n\n@pytest.fixture\ndef dt():\n return datetime(2008, 1, 1)\n\n\nclass TestCommonCBM:\n @pytest.mark.parametrize("offset2", [CBMonthBegin(2), CBMonthEnd(2)])\n def test_eq(self, offset2):\n assert offset2 == offset2\n\n @pytest.mark.parametrize("offset2", [CBMonthBegin(2), CBMonthEnd(2)])\n def test_hash(self, offset2):\n assert hash(offset2) == hash(offset2)\n\n @pytest.mark.parametrize("_offset", [CBMonthBegin, CBMonthEnd])\n def test_roundtrip_pickle(self, _offset):\n def _check_roundtrip(obj):\n unpickled = tm.round_trip_pickle(obj)\n assert unpickled == obj\n\n _check_roundtrip(_offset())\n _check_roundtrip(_offset(2))\n _check_roundtrip(_offset() * 2)\n\n @pytest.mark.parametrize("_offset", [CBMonthBegin, CBMonthEnd])\n def test_copy(self, _offset):\n # GH 17452\n off = _offset(weekmask="Mon Wed Fri")\n assert off == off.copy()\n\n\nclass TestCustomBusinessMonthBegin:\n @pytest.fixture\n def _offset(self):\n return CBMonthBegin\n\n @pytest.fixture\n def offset(self):\n return CBMonthBegin()\n\n @pytest.fixture\n def offset2(self):\n return CBMonthBegin(2)\n\n def test_different_normalize_equals(self, _offset):\n # GH#21404 changed __eq__ to return False when `normalize` does not match\n offset = _offset()\n offset2 = _offset(normalize=True)\n assert offset != offset2\n\n def test_repr(self, offset, offset2):\n assert repr(offset) == "<CustomBusinessMonthBegin>"\n assert repr(offset2) == "<2 * CustomBusinessMonthBegins>"\n\n def test_add_datetime(self, dt, offset2):\n assert offset2 + dt == datetime(2008, 3, 3)\n\n def testRollback1(self):\n assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)\n\n def testRollback2(self, dt):\n assert CBMonthBegin(10).rollback(dt) == datetime(2008, 1, 1)\n\n def testRollforward1(self, dt):\n assert CBMonthBegin(10).rollforward(dt) == datetime(2008, 1, 1)\n\n def test_roll_date_object(self):\n offset = CBMonthBegin()\n\n dt = date(2012, 9, 15)\n\n result = offset.rollback(dt)\n assert result == datetime(2012, 9, 3)\n\n result = offset.rollforward(dt)\n assert result == datetime(2012, 10, 1)\n\n offset = offsets.Day()\n result = offset.rollback(dt)\n assert result == datetime(2012, 9, 15)\n\n result = offset.rollforward(dt)\n assert result == datetime(2012, 9, 15)\n\n on_offset_cases = [\n (CBMonthBegin(), datetime(2008, 1, 1), True),\n (CBMonthBegin(), datetime(2008, 1, 31), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n apply_cases = [\n (\n CBMonthBegin(),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 1),\n datetime(2008, 2, 7): datetime(2008, 3, 3),\n },\n ),\n (\n 2 * CBMonthBegin(),\n {\n datetime(2008, 1, 1): datetime(2008, 3, 3),\n datetime(2008, 2, 7): datetime(2008, 4, 1),\n },\n ),\n (\n -CBMonthBegin(),\n {\n datetime(2008, 1, 1): datetime(2007, 12, 3),\n datetime(2008, 2, 8): datetime(2008, 2, 1),\n },\n ),\n (\n -2 * CBMonthBegin(),\n {\n datetime(2008, 1, 1): datetime(2007, 11, 1),\n datetime(2008, 2, 9): datetime(2008, 1, 1),\n },\n ),\n (\n CBMonthBegin(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 1, 7): datetime(2008, 2, 1),\n },\n ),\n ]\n\n @pytest.mark.parametrize("case", apply_cases)\n def test_apply(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n def test_apply_large_n(self):\n dt = datetime(2012, 10, 23)\n\n result = dt + CBMonthBegin(10)\n assert result == datetime(2013, 8, 1)\n\n result = dt + CDay(100) - CDay(100)\n assert result == dt\n\n off = CBMonthBegin() * 6\n rs = datetime(2012, 1, 1) - off\n xp = datetime(2011, 7, 1)\n assert rs == xp\n\n st = datetime(2011, 12, 18)\n rs = st + off\n\n xp = datetime(2012, 6, 1)\n assert rs == xp\n\n def test_holidays(self):\n # Define a TradingDay offset\n holidays = ["2012-02-01", datetime(2012, 2, 2), np.datetime64("2012-03-01")]\n bm_offset = CBMonthBegin(holidays=holidays)\n dt = datetime(2012, 1, 1)\n\n assert dt + bm_offset == datetime(2012, 1, 2)\n assert dt + 2 * bm_offset == datetime(2012, 2, 3)\n\n @pytest.mark.parametrize(\n "case",\n [\n (\n CBMonthBegin(n=1, offset=timedelta(days=5)),\n {\n datetime(2021, 3, 1): datetime(2021, 4, 1) + timedelta(days=5),\n datetime(2021, 4, 17): datetime(2021, 5, 3) + timedelta(days=5),\n },\n ),\n (\n CBMonthBegin(n=2, offset=timedelta(days=40)),\n {\n datetime(2021, 3, 10): datetime(2021, 5, 3) + timedelta(days=40),\n datetime(2021, 4, 30): datetime(2021, 6, 1) + timedelta(days=40),\n },\n ),\n (\n CBMonthBegin(n=1, offset=timedelta(days=-5)),\n {\n datetime(2021, 3, 1): datetime(2021, 4, 1) - timedelta(days=5),\n datetime(2021, 4, 11): datetime(2021, 5, 3) - timedelta(days=5),\n },\n ),\n (\n -2 * CBMonthBegin(n=1, offset=timedelta(days=10)),\n {\n datetime(2021, 3, 1): datetime(2021, 1, 1) + timedelta(days=10),\n datetime(2021, 4, 3): datetime(2021, 3, 1) + timedelta(days=10),\n },\n ),\n (\n CBMonthBegin(n=0, offset=timedelta(days=1)),\n {\n datetime(2021, 3, 2): datetime(2021, 4, 1) + timedelta(days=1),\n datetime(2021, 4, 1): datetime(2021, 4, 1) + timedelta(days=1),\n },\n ),\n (\n CBMonthBegin(\n n=1, holidays=["2021-04-01", "2021-04-02"], offset=timedelta(days=1)\n ),\n {\n datetime(2021, 3, 2): datetime(2021, 4, 5) + timedelta(days=1),\n },\n ),\n ],\n )\n def test_apply_with_extra_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n\nclass TestCustomBusinessMonthEnd:\n @pytest.fixture\n def _offset(self):\n return CBMonthEnd\n\n @pytest.fixture\n def offset(self):\n return CBMonthEnd()\n\n @pytest.fixture\n def offset2(self):\n return CBMonthEnd(2)\n\n def test_different_normalize_equals(self, _offset):\n # GH#21404 changed __eq__ to return False when `normalize` does not match\n offset = _offset()\n offset2 = _offset(normalize=True)\n assert offset != offset2\n\n def test_repr(self, offset, offset2):\n assert repr(offset) == "<CustomBusinessMonthEnd>"\n assert repr(offset2) == "<2 * CustomBusinessMonthEnds>"\n\n def test_add_datetime(self, dt, offset2):\n assert offset2 + dt == datetime(2008, 2, 29)\n\n def testRollback1(self):\n assert CDay(10).rollback(datetime(2007, 12, 31)) == datetime(2007, 12, 31)\n\n def testRollback2(self, dt):\n assert CBMonthEnd(10).rollback(dt) == datetime(2007, 12, 31)\n\n def testRollforward1(self, dt):\n assert CBMonthEnd(10).rollforward(dt) == datetime(2008, 1, 31)\n\n def test_roll_date_object(self):\n offset = CBMonthEnd()\n\n dt = date(2012, 9, 15)\n\n result = offset.rollback(dt)\n assert result == datetime(2012, 8, 31)\n\n result = offset.rollforward(dt)\n assert result == datetime(2012, 9, 28)\n\n offset = offsets.Day()\n result = offset.rollback(dt)\n assert result == datetime(2012, 9, 15)\n\n result = offset.rollforward(dt)\n assert result == datetime(2012, 9, 15)\n\n on_offset_cases = [\n (CBMonthEnd(), datetime(2008, 1, 31), True),\n (CBMonthEnd(), datetime(2008, 1, 1), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n apply_cases = [\n (\n CBMonthEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 2, 7): datetime(2008, 2, 29),\n },\n ),\n (\n 2 * CBMonthEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 29),\n datetime(2008, 2, 7): datetime(2008, 3, 31),\n },\n ),\n (\n -CBMonthEnd(),\n {\n datetime(2008, 1, 1): datetime(2007, 12, 31),\n datetime(2008, 2, 8): datetime(2008, 1, 31),\n },\n ),\n (\n -2 * CBMonthEnd(),\n {\n datetime(2008, 1, 1): datetime(2007, 11, 30),\n datetime(2008, 2, 9): datetime(2007, 12, 31),\n },\n ),\n (\n CBMonthEnd(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 2, 7): datetime(2008, 2, 29),\n },\n ),\n ]\n\n @pytest.mark.parametrize("case", apply_cases)\n def test_apply(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n def test_apply_large_n(self):\n dt = datetime(2012, 10, 23)\n\n result = dt + CBMonthEnd(10)\n assert result == datetime(2013, 7, 31)\n\n result = dt + CDay(100) - CDay(100)\n assert result == dt\n\n off = CBMonthEnd() * 6\n rs = datetime(2012, 1, 1) - off\n xp = datetime(2011, 7, 29)\n assert rs == xp\n\n st = datetime(2011, 12, 18)\n rs = st + off\n xp = datetime(2012, 5, 31)\n assert rs == xp\n\n def test_holidays(self):\n # Define a TradingDay offset\n holidays = ["2012-01-31", datetime(2012, 2, 28), np.datetime64("2012-02-29")]\n bm_offset = CBMonthEnd(holidays=holidays)\n dt = datetime(2012, 1, 1)\n assert dt + bm_offset == datetime(2012, 1, 30)\n assert dt + 2 * bm_offset == datetime(2012, 2, 27)\n\n @pytest.mark.parametrize(\n "case",\n [\n (\n CBMonthEnd(n=1, offset=timedelta(days=5)),\n {\n datetime(2021, 3, 1): datetime(2021, 3, 31) + timedelta(days=5),\n datetime(2021, 4, 17): datetime(2021, 4, 30) + timedelta(days=5),\n },\n ),\n (\n CBMonthEnd(n=2, offset=timedelta(days=40)),\n {\n datetime(2021, 3, 10): datetime(2021, 4, 30) + timedelta(days=40),\n datetime(2021, 4, 30): datetime(2021, 6, 30) + timedelta(days=40),\n },\n ),\n (\n CBMonthEnd(n=1, offset=timedelta(days=-5)),\n {\n datetime(2021, 3, 1): datetime(2021, 3, 31) - timedelta(days=5),\n datetime(2021, 4, 11): datetime(2021, 4, 30) - timedelta(days=5),\n },\n ),\n (\n -2 * CBMonthEnd(n=1, offset=timedelta(days=10)),\n {\n datetime(2021, 3, 1): datetime(2021, 1, 29) + timedelta(days=10),\n datetime(2021, 4, 3): datetime(2021, 2, 26) + timedelta(days=10),\n },\n ),\n (\n CBMonthEnd(n=0, offset=timedelta(days=1)),\n {\n datetime(2021, 3, 2): datetime(2021, 3, 31) + timedelta(days=1),\n datetime(2021, 4, 1): datetime(2021, 4, 30) + timedelta(days=1),\n },\n ),\n (\n CBMonthEnd(n=1, holidays=["2021-03-31"], offset=timedelta(days=1)),\n {\n datetime(2021, 3, 2): datetime(2021, 3, 30) + timedelta(days=1),\n },\n ),\n ],\n )\n def test_apply_with_extra_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_custom_business_month.py | test_custom_business_month.py | Python | 13,362 | 0.95 | 0.100686 | 0.013587 | vue-tools | 300 | 2024-04-06T07:46:19.443232 | Apache-2.0 | true | 5426b98095e22e2e7b3689d473c4287f |
"""\nTests for DateOffset additions over Daylight Savings Time\n"""\nfrom datetime import timedelta\n\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs import Timestamp\nfrom pandas._libs.tslibs.offsets import (\n BMonthBegin,\n BMonthEnd,\n BQuarterBegin,\n BQuarterEnd,\n BYearBegin,\n BYearEnd,\n CBMonthBegin,\n CBMonthEnd,\n CustomBusinessDay,\n DateOffset,\n Day,\n MonthBegin,\n MonthEnd,\n QuarterBegin,\n QuarterEnd,\n SemiMonthBegin,\n SemiMonthEnd,\n Week,\n YearBegin,\n YearEnd,\n)\nfrom pandas.errors import PerformanceWarning\n\nfrom pandas import DatetimeIndex\nimport pandas._testing as tm\nfrom pandas.util.version import Version\n\n# error: Module has no attribute "__version__"\npytz_version = Version(pytz.__version__) # type: ignore[attr-defined]\n\n\ndef get_utc_offset_hours(ts):\n # take a Timestamp and compute total hours of utc offset\n o = ts.utcoffset()\n return (o.days * 24 * 3600 + o.seconds) / 3600.0\n\n\nclass TestDST:\n # one microsecond before the DST transition\n ts_pre_fallback = "2013-11-03 01:59:59.999999"\n ts_pre_springfwd = "2013-03-10 01:59:59.999999"\n\n # test both basic names and dateutil timezones\n timezone_utc_offsets = {\n "US/Eastern": {"utc_offset_daylight": -4, "utc_offset_standard": -5},\n "dateutil/US/Pacific": {"utc_offset_daylight": -7, "utc_offset_standard": -8},\n }\n valid_date_offsets_singular = [\n "weekday",\n "day",\n "hour",\n "minute",\n "second",\n "microsecond",\n ]\n valid_date_offsets_plural = [\n "weeks",\n "days",\n "hours",\n "minutes",\n "seconds",\n "milliseconds",\n "microseconds",\n ]\n\n def _test_all_offsets(self, n, **kwds):\n valid_offsets = (\n self.valid_date_offsets_plural\n if n > 1\n else self.valid_date_offsets_singular\n )\n\n for name in valid_offsets:\n self._test_offset(offset_name=name, offset_n=n, **kwds)\n\n def _test_offset(self, offset_name, offset_n, tstart, expected_utc_offset):\n offset = DateOffset(**{offset_name: offset_n})\n\n if (\n offset_name in ["hour", "minute", "second", "microsecond"]\n and offset_n == 1\n and tstart == Timestamp("2013-11-03 01:59:59.999999-0500", tz="US/Eastern")\n ):\n # This addition results in an ambiguous wall time\n err_msg = {\n "hour": "2013-11-03 01:59:59.999999",\n "minute": "2013-11-03 01:01:59.999999",\n "second": "2013-11-03 01:59:01.999999",\n "microsecond": "2013-11-03 01:59:59.000001",\n }[offset_name]\n with pytest.raises(pytz.AmbiguousTimeError, match=err_msg):\n tstart + offset\n # While we're here, let's check that we get the same behavior in a\n # vectorized path\n dti = DatetimeIndex([tstart])\n warn_msg = "Non-vectorized DateOffset"\n with pytest.raises(pytz.AmbiguousTimeError, match=err_msg):\n with tm.assert_produces_warning(PerformanceWarning, match=warn_msg):\n dti + offset\n return\n\n t = tstart + offset\n if expected_utc_offset is not None:\n assert get_utc_offset_hours(t) == expected_utc_offset\n\n if offset_name == "weeks":\n # dates should match\n assert t.date() == timedelta(days=7 * offset.kwds["weeks"]) + tstart.date()\n # expect the same day of week, hour of day, minute, second, ...\n assert (\n t.dayofweek == tstart.dayofweek\n and t.hour == tstart.hour\n and t.minute == tstart.minute\n and t.second == tstart.second\n )\n elif offset_name == "days":\n # dates should match\n assert timedelta(offset.kwds["days"]) + tstart.date() == t.date()\n # expect the same hour of day, minute, second, ...\n assert (\n t.hour == tstart.hour\n and t.minute == tstart.minute\n and t.second == tstart.second\n )\n elif offset_name in self.valid_date_offsets_singular:\n # expect the singular offset value to match between tstart and t\n datepart_offset = getattr(\n t, offset_name if offset_name != "weekday" else "dayofweek"\n )\n assert datepart_offset == offset.kwds[offset_name]\n else:\n # the offset should be the same as if it was done in UTC\n assert t == (tstart.tz_convert("UTC") + offset).tz_convert("US/Pacific")\n\n def _make_timestamp(self, string, hrs_offset, tz):\n if hrs_offset >= 0:\n offset_string = f"{hrs_offset:02d}00"\n else:\n offset_string = f"-{(hrs_offset * -1):02}00"\n return Timestamp(string + offset_string).tz_convert(tz)\n\n def test_springforward_plural(self):\n # test moving from standard to daylight savings\n for tz, utc_offsets in self.timezone_utc_offsets.items():\n hrs_pre = utc_offsets["utc_offset_standard"]\n hrs_post = utc_offsets["utc_offset_daylight"]\n self._test_all_offsets(\n n=3,\n tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),\n expected_utc_offset=hrs_post,\n )\n\n def test_fallback_singular(self):\n # in the case of singular offsets, we don't necessarily know which utc\n # offset the new Timestamp will wind up in (the tz for 1 month may be\n # different from 1 second) so we don't specify an expected_utc_offset\n for tz, utc_offsets in self.timezone_utc_offsets.items():\n hrs_pre = utc_offsets["utc_offset_standard"]\n self._test_all_offsets(\n n=1,\n tstart=self._make_timestamp(self.ts_pre_fallback, hrs_pre, tz),\n expected_utc_offset=None,\n )\n\n def test_springforward_singular(self):\n for tz, utc_offsets in self.timezone_utc_offsets.items():\n hrs_pre = utc_offsets["utc_offset_standard"]\n self._test_all_offsets(\n n=1,\n tstart=self._make_timestamp(self.ts_pre_springfwd, hrs_pre, tz),\n expected_utc_offset=None,\n )\n\n offset_classes = {\n MonthBegin: ["11/2/2012", "12/1/2012"],\n MonthEnd: ["11/2/2012", "11/30/2012"],\n BMonthBegin: ["11/2/2012", "12/3/2012"],\n BMonthEnd: ["11/2/2012", "11/30/2012"],\n CBMonthBegin: ["11/2/2012", "12/3/2012"],\n CBMonthEnd: ["11/2/2012", "11/30/2012"],\n SemiMonthBegin: ["11/2/2012", "11/15/2012"],\n SemiMonthEnd: ["11/2/2012", "11/15/2012"],\n Week: ["11/2/2012", "11/9/2012"],\n YearBegin: ["11/2/2012", "1/1/2013"],\n YearEnd: ["11/2/2012", "12/31/2012"],\n BYearBegin: ["11/2/2012", "1/1/2013"],\n BYearEnd: ["11/2/2012", "12/31/2012"],\n QuarterBegin: ["11/2/2012", "12/1/2012"],\n QuarterEnd: ["11/2/2012", "12/31/2012"],\n BQuarterBegin: ["11/2/2012", "12/3/2012"],\n BQuarterEnd: ["11/2/2012", "12/31/2012"],\n Day: ["11/4/2012", "11/4/2012 23:00"],\n }.items()\n\n @pytest.mark.parametrize("tup", offset_classes)\n def test_all_offset_classes(self, tup):\n offset, test_values = tup\n\n first = Timestamp(test_values[0], tz="US/Eastern") + offset()\n second = Timestamp(test_values[1], tz="US/Eastern")\n assert first == second\n\n\n@pytest.mark.parametrize(\n "original_dt, target_dt, offset, tz",\n [\n pytest.param(\n Timestamp("1900-01-01"),\n Timestamp("1905-07-01"),\n MonthBegin(66),\n "Africa/Lagos",\n marks=pytest.mark.xfail(\n pytz_version < Version("2020.5") or pytz_version == Version("2022.2"),\n reason="GH#41906: pytz utc transition dates changed",\n ),\n ),\n (\n Timestamp("2021-10-01 01:15"),\n Timestamp("2021-10-31 01:15"),\n MonthEnd(1),\n "Europe/London",\n ),\n (\n Timestamp("2010-12-05 02:59"),\n Timestamp("2010-10-31 02:59"),\n SemiMonthEnd(-3),\n "Europe/Paris",\n ),\n (\n Timestamp("2021-10-31 01:20"),\n Timestamp("2021-11-07 01:20"),\n CustomBusinessDay(2, weekmask="Sun Mon"),\n "US/Eastern",\n ),\n (\n Timestamp("2020-04-03 01:30"),\n Timestamp("2020-11-01 01:30"),\n YearBegin(1, month=11),\n "America/Chicago",\n ),\n ],\n)\ndef test_nontick_offset_with_ambiguous_time_error(original_dt, target_dt, offset, tz):\n # .apply for non-Tick offsets throws AmbiguousTimeError when the target dt\n # is dst-ambiguous\n localized_dt = original_dt.tz_localize(tz)\n\n msg = f"Cannot infer dst time from {target_dt}, try using the 'ambiguous' argument"\n with pytest.raises(pytz.AmbiguousTimeError, match=msg):\n localized_dt + offset\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_dst.py | test_dst.py | Python | 9,139 | 0.95 | 0.096154 | 0.080851 | node-utils | 177 | 2024-01-18T06:53:09.513358 | MIT | true | 3cdca9c2180471105ce0197d83b5f95b |
"""\nTests for the following offsets:\n- Easter\n"""\nfrom __future__ import annotations\n\nfrom datetime import datetime\n\nimport pytest\n\nfrom pandas.tests.tseries.offsets.common import assert_offset_equal\n\nfrom pandas.tseries.offsets import Easter\n\n\nclass TestEaster:\n @pytest.mark.parametrize(\n "offset,date,expected",\n [\n (Easter(), datetime(2010, 1, 1), datetime(2010, 4, 4)),\n (Easter(), datetime(2010, 4, 5), datetime(2011, 4, 24)),\n (Easter(2), datetime(2010, 1, 1), datetime(2011, 4, 24)),\n (Easter(), datetime(2010, 4, 4), datetime(2011, 4, 24)),\n (Easter(2), datetime(2010, 4, 4), datetime(2012, 4, 8)),\n (-Easter(), datetime(2011, 1, 1), datetime(2010, 4, 4)),\n (-Easter(), datetime(2010, 4, 5), datetime(2010, 4, 4)),\n (-Easter(2), datetime(2011, 1, 1), datetime(2009, 4, 12)),\n (-Easter(), datetime(2010, 4, 4), datetime(2009, 4, 12)),\n (-Easter(2), datetime(2010, 4, 4), datetime(2008, 3, 23)),\n ],\n )\n def test_offset(self, offset, date, expected):\n assert_offset_equal(offset, date, expected)\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_easter.py | test_easter.py | Python | 1,150 | 0.85 | 0.090909 | 0 | react-lib | 175 | 2025-01-13T02:07:24.081546 | MIT | true | ec63a30f8b867858a7337e83e1eb0f26 |
"""\nTests for Fiscal Year and Fiscal Quarter offset classes\n"""\nfrom datetime import datetime\n\nfrom dateutil.relativedelta import relativedelta\nimport pytest\n\nfrom pandas import Timestamp\nimport pandas._testing as tm\nfrom pandas.tests.tseries.offsets.common import (\n WeekDay,\n assert_is_on_offset,\n assert_offset_equal,\n)\n\nfrom pandas.tseries.offsets import (\n FY5253,\n FY5253Quarter,\n)\n\n\ndef makeFY5253LastOfMonthQuarter(*args, **kwds):\n return FY5253Quarter(*args, variation="last", **kwds)\n\n\ndef makeFY5253NearestEndMonthQuarter(*args, **kwds):\n return FY5253Quarter(*args, variation="nearest", **kwds)\n\n\ndef makeFY5253NearestEndMonth(*args, **kwds):\n return FY5253(*args, variation="nearest", **kwds)\n\n\ndef makeFY5253LastOfMonth(*args, **kwds):\n return FY5253(*args, variation="last", **kwds)\n\n\ndef test_get_offset_name():\n assert (\n makeFY5253LastOfMonthQuarter(\n weekday=1, startingMonth=3, qtr_with_extra_week=4\n ).freqstr\n == "REQ-L-MAR-TUE-4"\n )\n assert (\n makeFY5253NearestEndMonthQuarter(\n weekday=1, startingMonth=3, qtr_with_extra_week=3\n ).freqstr\n == "REQ-N-MAR-TUE-3"\n )\n\n\nclass TestFY5253LastOfMonth:\n offset_lom_sat_aug = makeFY5253LastOfMonth(1, startingMonth=8, weekday=WeekDay.SAT)\n offset_lom_sat_sep = makeFY5253LastOfMonth(1, startingMonth=9, weekday=WeekDay.SAT)\n\n on_offset_cases = [\n # From Wikipedia (see:\n # https://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar#Last_Saturday_of_the_month_at_fiscal_year_end)\n (offset_lom_sat_aug, datetime(2006, 8, 26), True),\n (offset_lom_sat_aug, datetime(2007, 8, 25), True),\n (offset_lom_sat_aug, datetime(2008, 8, 30), True),\n (offset_lom_sat_aug, datetime(2009, 8, 29), True),\n (offset_lom_sat_aug, datetime(2010, 8, 28), True),\n (offset_lom_sat_aug, datetime(2011, 8, 27), True),\n (offset_lom_sat_aug, datetime(2012, 8, 25), True),\n (offset_lom_sat_aug, datetime(2013, 8, 31), True),\n (offset_lom_sat_aug, datetime(2014, 8, 30), True),\n (offset_lom_sat_aug, datetime(2015, 8, 29), True),\n (offset_lom_sat_aug, datetime(2016, 8, 27), True),\n (offset_lom_sat_aug, datetime(2017, 8, 26), True),\n (offset_lom_sat_aug, datetime(2018, 8, 25), True),\n (offset_lom_sat_aug, datetime(2019, 8, 31), True),\n (offset_lom_sat_aug, datetime(2006, 8, 27), False),\n (offset_lom_sat_aug, datetime(2007, 8, 28), False),\n (offset_lom_sat_aug, datetime(2008, 8, 31), False),\n (offset_lom_sat_aug, datetime(2009, 8, 30), False),\n (offset_lom_sat_aug, datetime(2010, 8, 29), False),\n (offset_lom_sat_aug, datetime(2011, 8, 28), False),\n (offset_lom_sat_aug, datetime(2006, 8, 25), False),\n (offset_lom_sat_aug, datetime(2007, 8, 24), False),\n (offset_lom_sat_aug, datetime(2008, 8, 29), False),\n (offset_lom_sat_aug, datetime(2009, 8, 28), False),\n (offset_lom_sat_aug, datetime(2010, 8, 27), False),\n (offset_lom_sat_aug, datetime(2011, 8, 26), False),\n (offset_lom_sat_aug, datetime(2019, 8, 30), False),\n # From GMCR (see for example:\n # http://yahoo.brand.edgar-online.com/Default.aspx?\n # companyid=3184&formtypeID=7)\n (offset_lom_sat_sep, datetime(2010, 9, 25), True),\n (offset_lom_sat_sep, datetime(2011, 9, 24), True),\n (offset_lom_sat_sep, datetime(2012, 9, 29), True),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n def test_apply(self):\n offset_lom_aug_sat = makeFY5253LastOfMonth(startingMonth=8, weekday=WeekDay.SAT)\n offset_lom_aug_sat_1 = makeFY5253LastOfMonth(\n n=1, startingMonth=8, weekday=WeekDay.SAT\n )\n\n date_seq_lom_aug_sat = [\n datetime(2006, 8, 26),\n datetime(2007, 8, 25),\n datetime(2008, 8, 30),\n datetime(2009, 8, 29),\n datetime(2010, 8, 28),\n datetime(2011, 8, 27),\n datetime(2012, 8, 25),\n datetime(2013, 8, 31),\n datetime(2014, 8, 30),\n datetime(2015, 8, 29),\n datetime(2016, 8, 27),\n ]\n\n tests = [\n (offset_lom_aug_sat, date_seq_lom_aug_sat),\n (offset_lom_aug_sat_1, date_seq_lom_aug_sat),\n (offset_lom_aug_sat, [datetime(2006, 8, 25)] + date_seq_lom_aug_sat),\n (offset_lom_aug_sat_1, [datetime(2006, 8, 27)] + date_seq_lom_aug_sat[1:]),\n (\n makeFY5253LastOfMonth(n=-1, startingMonth=8, weekday=WeekDay.SAT),\n list(reversed(date_seq_lom_aug_sat)),\n ),\n ]\n for test in tests:\n offset, data = test\n current = data[0]\n for datum in data[1:]:\n current = current + offset\n assert current == datum\n\n\nclass TestFY5253NearestEndMonth:\n def test_get_year_end(self):\n assert makeFY5253NearestEndMonth(\n startingMonth=8, weekday=WeekDay.SAT\n ).get_year_end(datetime(2013, 1, 1)) == datetime(2013, 8, 31)\n assert makeFY5253NearestEndMonth(\n startingMonth=8, weekday=WeekDay.SUN\n ).get_year_end(datetime(2013, 1, 1)) == datetime(2013, 9, 1)\n assert makeFY5253NearestEndMonth(\n startingMonth=8, weekday=WeekDay.FRI\n ).get_year_end(datetime(2013, 1, 1)) == datetime(2013, 8, 30)\n\n offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12, variation="nearest")\n assert offset_n.get_year_end(datetime(2012, 1, 1)) == datetime(2013, 1, 1)\n assert offset_n.get_year_end(datetime(2012, 1, 10)) == datetime(2013, 1, 1)\n\n assert offset_n.get_year_end(datetime(2013, 1, 1)) == datetime(2013, 12, 31)\n assert offset_n.get_year_end(datetime(2013, 1, 2)) == datetime(2013, 12, 31)\n assert offset_n.get_year_end(datetime(2013, 1, 3)) == datetime(2013, 12, 31)\n assert offset_n.get_year_end(datetime(2013, 1, 10)) == datetime(2013, 12, 31)\n\n JNJ = FY5253(n=1, startingMonth=12, weekday=6, variation="nearest")\n assert JNJ.get_year_end(datetime(2006, 1, 1)) == datetime(2006, 12, 31)\n\n offset_lom_aug_sat = makeFY5253NearestEndMonth(\n 1, startingMonth=8, weekday=WeekDay.SAT\n )\n offset_lom_aug_thu = makeFY5253NearestEndMonth(\n 1, startingMonth=8, weekday=WeekDay.THU\n )\n offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12, variation="nearest")\n\n on_offset_cases = [\n # From Wikipedia (see:\n # https://en.wikipedia.org/wiki/4%E2%80%934%E2%80%935_calendar\n # #Saturday_nearest_the_end_of_month)\n # 2006-09-02 2006 September 2\n # 2007-09-01 2007 September 1\n # 2008-08-30 2008 August 30 (leap year)\n # 2009-08-29 2009 August 29\n # 2010-08-28 2010 August 28\n # 2011-09-03 2011 September 3\n # 2012-09-01 2012 September 1 (leap year)\n # 2013-08-31 2013 August 31\n # 2014-08-30 2014 August 30\n # 2015-08-29 2015 August 29\n # 2016-09-03 2016 September 3 (leap year)\n # 2017-09-02 2017 September 2\n # 2018-09-01 2018 September 1\n # 2019-08-31 2019 August 31\n (offset_lom_aug_sat, datetime(2006, 9, 2), True),\n (offset_lom_aug_sat, datetime(2007, 9, 1), True),\n (offset_lom_aug_sat, datetime(2008, 8, 30), True),\n (offset_lom_aug_sat, datetime(2009, 8, 29), True),\n (offset_lom_aug_sat, datetime(2010, 8, 28), True),\n (offset_lom_aug_sat, datetime(2011, 9, 3), True),\n (offset_lom_aug_sat, datetime(2016, 9, 3), True),\n (offset_lom_aug_sat, datetime(2017, 9, 2), True),\n (offset_lom_aug_sat, datetime(2018, 9, 1), True),\n (offset_lom_aug_sat, datetime(2019, 8, 31), True),\n (offset_lom_aug_sat, datetime(2006, 8, 27), False),\n (offset_lom_aug_sat, datetime(2007, 8, 28), False),\n (offset_lom_aug_sat, datetime(2008, 8, 31), False),\n (offset_lom_aug_sat, datetime(2009, 8, 30), False),\n (offset_lom_aug_sat, datetime(2010, 8, 29), False),\n (offset_lom_aug_sat, datetime(2011, 8, 28), False),\n (offset_lom_aug_sat, datetime(2006, 8, 25), False),\n (offset_lom_aug_sat, datetime(2007, 8, 24), False),\n (offset_lom_aug_sat, datetime(2008, 8, 29), False),\n (offset_lom_aug_sat, datetime(2009, 8, 28), False),\n (offset_lom_aug_sat, datetime(2010, 8, 27), False),\n (offset_lom_aug_sat, datetime(2011, 8, 26), False),\n (offset_lom_aug_sat, datetime(2019, 8, 30), False),\n # From Micron, see:\n # http://google.brand.edgar-online.com/?sym=MU&formtypeID=7\n (offset_lom_aug_thu, datetime(2012, 8, 30), True),\n (offset_lom_aug_thu, datetime(2011, 9, 1), True),\n (offset_n, datetime(2012, 12, 31), False),\n (offset_n, datetime(2013, 1, 1), True),\n (offset_n, datetime(2013, 1, 2), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n def test_apply(self):\n date_seq_nem_8_sat = [\n datetime(2006, 9, 2),\n datetime(2007, 9, 1),\n datetime(2008, 8, 30),\n datetime(2009, 8, 29),\n datetime(2010, 8, 28),\n datetime(2011, 9, 3),\n ]\n\n JNJ = [\n datetime(2005, 1, 2),\n datetime(2006, 1, 1),\n datetime(2006, 12, 31),\n datetime(2007, 12, 30),\n datetime(2008, 12, 28),\n datetime(2010, 1, 3),\n datetime(2011, 1, 2),\n datetime(2012, 1, 1),\n datetime(2012, 12, 30),\n ]\n\n DEC_SAT = FY5253(n=-1, startingMonth=12, weekday=5, variation="nearest")\n\n tests = [\n (\n makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT),\n date_seq_nem_8_sat,\n ),\n (\n makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT),\n date_seq_nem_8_sat,\n ),\n (\n makeFY5253NearestEndMonth(startingMonth=8, weekday=WeekDay.SAT),\n [datetime(2006, 9, 1)] + date_seq_nem_8_sat,\n ),\n (\n makeFY5253NearestEndMonth(n=1, startingMonth=8, weekday=WeekDay.SAT),\n [datetime(2006, 9, 3)] + date_seq_nem_8_sat[1:],\n ),\n (\n makeFY5253NearestEndMonth(n=-1, startingMonth=8, weekday=WeekDay.SAT),\n list(reversed(date_seq_nem_8_sat)),\n ),\n (\n makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN),\n JNJ,\n ),\n (\n makeFY5253NearestEndMonth(n=-1, startingMonth=12, weekday=WeekDay.SUN),\n list(reversed(JNJ)),\n ),\n (\n makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN),\n [datetime(2005, 1, 2), datetime(2006, 1, 1)],\n ),\n (\n makeFY5253NearestEndMonth(n=1, startingMonth=12, weekday=WeekDay.SUN),\n [datetime(2006, 1, 2), datetime(2006, 12, 31)],\n ),\n (DEC_SAT, [datetime(2013, 1, 15), datetime(2012, 12, 29)]),\n ]\n for test in tests:\n offset, data = test\n current = data[0]\n for datum in data[1:]:\n current = current + offset\n assert current == datum\n\n\nclass TestFY5253LastOfMonthQuarter:\n def test_is_anchored(self):\n msg = "FY5253Quarter.is_anchored is deprecated "\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert makeFY5253LastOfMonthQuarter(\n startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4\n ).is_anchored()\n assert makeFY5253LastOfMonthQuarter(\n weekday=WeekDay.SAT, startingMonth=3, qtr_with_extra_week=4\n ).is_anchored()\n assert not makeFY5253LastOfMonthQuarter(\n 2, startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4\n ).is_anchored()\n\n def test_equality(self):\n assert makeFY5253LastOfMonthQuarter(\n startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4\n ) == makeFY5253LastOfMonthQuarter(\n startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n assert makeFY5253LastOfMonthQuarter(\n startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4\n ) != makeFY5253LastOfMonthQuarter(\n startingMonth=1, weekday=WeekDay.SUN, qtr_with_extra_week=4\n )\n assert makeFY5253LastOfMonthQuarter(\n startingMonth=1, weekday=WeekDay.SAT, qtr_with_extra_week=4\n ) != makeFY5253LastOfMonthQuarter(\n startingMonth=2, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n\n def test_offset(self):\n offset = makeFY5253LastOfMonthQuarter(\n 1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n offset2 = makeFY5253LastOfMonthQuarter(\n 2, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n offset4 = makeFY5253LastOfMonthQuarter(\n 4, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n\n offset_neg1 = makeFY5253LastOfMonthQuarter(\n -1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n offset_neg2 = makeFY5253LastOfMonthQuarter(\n -2, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n\n GMCR = [\n datetime(2010, 3, 27),\n datetime(2010, 6, 26),\n datetime(2010, 9, 25),\n datetime(2010, 12, 25),\n datetime(2011, 3, 26),\n datetime(2011, 6, 25),\n datetime(2011, 9, 24),\n datetime(2011, 12, 24),\n datetime(2012, 3, 24),\n datetime(2012, 6, 23),\n datetime(2012, 9, 29),\n datetime(2012, 12, 29),\n datetime(2013, 3, 30),\n datetime(2013, 6, 29),\n ]\n\n assert_offset_equal(offset, base=GMCR[0], expected=GMCR[1])\n assert_offset_equal(\n offset, base=GMCR[0] + relativedelta(days=-1), expected=GMCR[0]\n )\n assert_offset_equal(offset, base=GMCR[1], expected=GMCR[2])\n\n assert_offset_equal(offset2, base=GMCR[0], expected=GMCR[2])\n assert_offset_equal(offset4, base=GMCR[0], expected=GMCR[4])\n\n assert_offset_equal(offset_neg1, base=GMCR[-1], expected=GMCR[-2])\n assert_offset_equal(\n offset_neg1, base=GMCR[-1] + relativedelta(days=+1), expected=GMCR[-1]\n )\n assert_offset_equal(offset_neg2, base=GMCR[-1], expected=GMCR[-3])\n\n date = GMCR[0] + relativedelta(days=-1)\n for expected in GMCR:\n assert_offset_equal(offset, date, expected)\n date = date + offset\n\n date = GMCR[-1] + relativedelta(days=+1)\n for expected in reversed(GMCR):\n assert_offset_equal(offset_neg1, date, expected)\n date = date + offset_neg1\n\n lomq_aug_sat_4 = makeFY5253LastOfMonthQuarter(\n 1, startingMonth=8, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n lomq_sep_sat_4 = makeFY5253LastOfMonthQuarter(\n 1, startingMonth=9, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n\n on_offset_cases = [\n # From Wikipedia\n (lomq_aug_sat_4, datetime(2006, 8, 26), True),\n (lomq_aug_sat_4, datetime(2007, 8, 25), True),\n (lomq_aug_sat_4, datetime(2008, 8, 30), True),\n (lomq_aug_sat_4, datetime(2009, 8, 29), True),\n (lomq_aug_sat_4, datetime(2010, 8, 28), True),\n (lomq_aug_sat_4, datetime(2011, 8, 27), True),\n (lomq_aug_sat_4, datetime(2019, 8, 31), True),\n (lomq_aug_sat_4, datetime(2006, 8, 27), False),\n (lomq_aug_sat_4, datetime(2007, 8, 28), False),\n (lomq_aug_sat_4, datetime(2008, 8, 31), False),\n (lomq_aug_sat_4, datetime(2009, 8, 30), False),\n (lomq_aug_sat_4, datetime(2010, 8, 29), False),\n (lomq_aug_sat_4, datetime(2011, 8, 28), False),\n (lomq_aug_sat_4, datetime(2006, 8, 25), False),\n (lomq_aug_sat_4, datetime(2007, 8, 24), False),\n (lomq_aug_sat_4, datetime(2008, 8, 29), False),\n (lomq_aug_sat_4, datetime(2009, 8, 28), False),\n (lomq_aug_sat_4, datetime(2010, 8, 27), False),\n (lomq_aug_sat_4, datetime(2011, 8, 26), False),\n (lomq_aug_sat_4, datetime(2019, 8, 30), False),\n # From GMCR\n (lomq_sep_sat_4, datetime(2010, 9, 25), True),\n (lomq_sep_sat_4, datetime(2011, 9, 24), True),\n (lomq_sep_sat_4, datetime(2012, 9, 29), True),\n (lomq_sep_sat_4, datetime(2013, 6, 29), True),\n (lomq_sep_sat_4, datetime(2012, 6, 23), True),\n (lomq_sep_sat_4, datetime(2012, 6, 30), False),\n (lomq_sep_sat_4, datetime(2013, 3, 30), True),\n (lomq_sep_sat_4, datetime(2012, 3, 24), True),\n (lomq_sep_sat_4, datetime(2012, 12, 29), True),\n (lomq_sep_sat_4, datetime(2011, 12, 24), True),\n # INTC (extra week in Q1)\n # See: http://www.intc.com/releasedetail.cfm?ReleaseID=542844\n (\n makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ),\n datetime(2011, 4, 2),\n True,\n ),\n # see: http://google.brand.edgar-online.com/?sym=INTC&formtypeID=7\n (\n makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ),\n datetime(2012, 12, 29),\n True,\n ),\n (\n makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ),\n datetime(2011, 12, 31),\n True,\n ),\n (\n makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ),\n datetime(2010, 12, 25),\n True,\n ),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n def test_year_has_extra_week(self):\n # End of long Q1\n assert makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ).year_has_extra_week(datetime(2011, 4, 2))\n\n # Start of long Q1\n assert makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ).year_has_extra_week(datetime(2010, 12, 26))\n\n # End of year before year with long Q1\n assert not makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ).year_has_extra_week(datetime(2010, 12, 25))\n\n for year in [\n x for x in range(1994, 2011 + 1) if x not in [2011, 2005, 2000, 1994]\n ]:\n assert not makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ).year_has_extra_week(datetime(year, 4, 2))\n\n # Other long years\n assert makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ).year_has_extra_week(datetime(2005, 4, 2))\n\n assert makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ).year_has_extra_week(datetime(2000, 4, 2))\n\n assert makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n ).year_has_extra_week(datetime(1994, 4, 2))\n\n def test_get_weeks(self):\n sat_dec_1 = makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=1\n )\n sat_dec_4 = makeFY5253LastOfMonthQuarter(\n 1, startingMonth=12, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n\n assert sat_dec_1.get_weeks(datetime(2011, 4, 2)) == [14, 13, 13, 13]\n assert sat_dec_4.get_weeks(datetime(2011, 4, 2)) == [13, 13, 13, 14]\n assert sat_dec_1.get_weeks(datetime(2010, 12, 25)) == [13, 13, 13, 13]\n\n\nclass TestFY5253NearestEndMonthQuarter:\n offset_nem_sat_aug_4 = makeFY5253NearestEndMonthQuarter(\n 1, startingMonth=8, weekday=WeekDay.SAT, qtr_with_extra_week=4\n )\n offset_nem_thu_aug_4 = makeFY5253NearestEndMonthQuarter(\n 1, startingMonth=8, weekday=WeekDay.THU, qtr_with_extra_week=4\n )\n offset_n = FY5253(weekday=WeekDay.TUE, startingMonth=12, variation="nearest")\n\n on_offset_cases = [\n # From Wikipedia\n (offset_nem_sat_aug_4, datetime(2006, 9, 2), True),\n (offset_nem_sat_aug_4, datetime(2007, 9, 1), True),\n (offset_nem_sat_aug_4, datetime(2008, 8, 30), True),\n (offset_nem_sat_aug_4, datetime(2009, 8, 29), True),\n (offset_nem_sat_aug_4, datetime(2010, 8, 28), True),\n (offset_nem_sat_aug_4, datetime(2011, 9, 3), True),\n (offset_nem_sat_aug_4, datetime(2016, 9, 3), True),\n (offset_nem_sat_aug_4, datetime(2017, 9, 2), True),\n (offset_nem_sat_aug_4, datetime(2018, 9, 1), True),\n (offset_nem_sat_aug_4, datetime(2019, 8, 31), True),\n (offset_nem_sat_aug_4, datetime(2006, 8, 27), False),\n (offset_nem_sat_aug_4, datetime(2007, 8, 28), False),\n (offset_nem_sat_aug_4, datetime(2008, 8, 31), False),\n (offset_nem_sat_aug_4, datetime(2009, 8, 30), False),\n (offset_nem_sat_aug_4, datetime(2010, 8, 29), False),\n (offset_nem_sat_aug_4, datetime(2011, 8, 28), False),\n (offset_nem_sat_aug_4, datetime(2006, 8, 25), False),\n (offset_nem_sat_aug_4, datetime(2007, 8, 24), False),\n (offset_nem_sat_aug_4, datetime(2008, 8, 29), False),\n (offset_nem_sat_aug_4, datetime(2009, 8, 28), False),\n (offset_nem_sat_aug_4, datetime(2010, 8, 27), False),\n (offset_nem_sat_aug_4, datetime(2011, 8, 26), False),\n (offset_nem_sat_aug_4, datetime(2019, 8, 30), False),\n # From Micron, see:\n # http://google.brand.edgar-online.com/?sym=MU&formtypeID=7\n (offset_nem_thu_aug_4, datetime(2012, 8, 30), True),\n (offset_nem_thu_aug_4, datetime(2011, 9, 1), True),\n # See: http://google.brand.edgar-online.com/?sym=MU&formtypeID=13\n (offset_nem_thu_aug_4, datetime(2013, 5, 30), True),\n (offset_nem_thu_aug_4, datetime(2013, 2, 28), True),\n (offset_nem_thu_aug_4, datetime(2012, 11, 29), True),\n (offset_nem_thu_aug_4, datetime(2012, 5, 31), True),\n (offset_nem_thu_aug_4, datetime(2007, 3, 1), True),\n (offset_nem_thu_aug_4, datetime(1994, 3, 3), True),\n (offset_n, datetime(2012, 12, 31), False),\n (offset_n, datetime(2013, 1, 1), True),\n (offset_n, datetime(2013, 1, 2), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n def test_offset(self):\n offset = makeFY5253NearestEndMonthQuarter(\n 1, startingMonth=8, weekday=WeekDay.THU, qtr_with_extra_week=4\n )\n\n MU = [\n datetime(2012, 5, 31),\n datetime(2012, 8, 30),\n datetime(2012, 11, 29),\n datetime(2013, 2, 28),\n datetime(2013, 5, 30),\n ]\n\n date = MU[0] + relativedelta(days=-1)\n for expected in MU:\n assert_offset_equal(offset, date, expected)\n date = date + offset\n\n assert_offset_equal(offset, datetime(2012, 5, 31), datetime(2012, 8, 30))\n assert_offset_equal(offset, datetime(2012, 5, 30), datetime(2012, 5, 31))\n\n offset2 = FY5253Quarter(\n weekday=5, startingMonth=12, variation="last", qtr_with_extra_week=4\n )\n\n assert_offset_equal(offset2, datetime(2013, 1, 15), datetime(2013, 3, 30))\n\n\ndef test_bunched_yearends():\n # GH#14774 cases with two fiscal year-ends in the same calendar-year\n fy = FY5253(n=1, weekday=5, startingMonth=12, variation="nearest")\n dt = Timestamp("2004-01-01")\n assert fy.rollback(dt) == Timestamp("2002-12-28")\n assert (-fy)._apply(dt) == Timestamp("2002-12-28")\n assert dt - fy == Timestamp("2002-12-28")\n\n assert fy.rollforward(dt) == Timestamp("2004-01-03")\n assert fy._apply(dt) == Timestamp("2004-01-03")\n assert fy + dt == Timestamp("2004-01-03")\n assert dt + fy == Timestamp("2004-01-03")\n\n # Same thing, but starting from a Timestamp in the previous year.\n dt = Timestamp("2003-12-31")\n assert fy.rollback(dt) == Timestamp("2002-12-28")\n assert (-fy)._apply(dt) == Timestamp("2002-12-28")\n assert dt - fy == Timestamp("2002-12-28")\n\n\ndef test_fy5253_last_onoffset():\n # GH#18877 dates on the year-end but not normalized to midnight\n offset = FY5253(n=-5, startingMonth=5, variation="last", weekday=0)\n ts = Timestamp("1984-05-28 06:29:43.955911354+0200", tz="Europe/San_Marino")\n fast = offset.is_on_offset(ts)\n slow = (ts + offset) - offset == ts\n assert fast == slow\n\n\ndef test_fy5253_nearest_onoffset():\n # GH#18877 dates on the year-end but not normalized to midnight\n offset = FY5253(n=3, startingMonth=7, variation="nearest", weekday=2)\n ts = Timestamp("2032-07-28 00:12:59.035729419+0000", tz="Africa/Dakar")\n fast = offset.is_on_offset(ts)\n slow = (ts + offset) - offset == ts\n assert fast == slow\n\n\ndef test_fy5253qtr_onoffset_nearest():\n # GH#19036\n ts = Timestamp("1985-09-02 23:57:46.232550356-0300", tz="Atlantic/Bermuda")\n offset = FY5253Quarter(\n n=3, qtr_with_extra_week=1, startingMonth=2, variation="nearest", weekday=0\n )\n fast = offset.is_on_offset(ts)\n slow = (ts + offset) - offset == ts\n assert fast == slow\n\n\ndef test_fy5253qtr_onoffset_last():\n # GH#19036\n offset = FY5253Quarter(\n n=-2, qtr_with_extra_week=1, startingMonth=7, variation="last", weekday=2\n )\n ts = Timestamp("2011-01-26 19:03:40.331096129+0200", tz="Africa/Windhoek")\n slow = (ts + offset) - offset == ts\n fast = offset.is_on_offset(ts)\n assert fast == slow\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_fiscal.py | test_fiscal.py | Python | 26,732 | 0.95 | 0.059451 | 0.074394 | react-lib | 47 | 2025-06-09T00:28:15.990033 | Apache-2.0 | true | b751a98d7780b2f667268f2c96687ffe |
"""\nTests for offset behavior with indices.\n"""\nimport pytest\n\nfrom pandas import (\n Series,\n date_range,\n)\n\nfrom pandas.tseries.offsets import (\n BMonthBegin,\n BMonthEnd,\n BQuarterBegin,\n BQuarterEnd,\n BYearBegin,\n BYearEnd,\n MonthBegin,\n MonthEnd,\n QuarterBegin,\n QuarterEnd,\n YearBegin,\n YearEnd,\n)\n\n\n@pytest.mark.parametrize("n", [-2, 1])\n@pytest.mark.parametrize(\n "cls",\n [\n MonthBegin,\n MonthEnd,\n BMonthBegin,\n BMonthEnd,\n QuarterBegin,\n QuarterEnd,\n BQuarterBegin,\n BQuarterEnd,\n YearBegin,\n YearEnd,\n BYearBegin,\n BYearEnd,\n ],\n)\ndef test_apply_index(cls, n):\n offset = cls(n=n)\n rng = date_range(start="1/1/2000", periods=100000, freq="min")\n ser = Series(rng)\n\n res = rng + offset\n assert res.freq is None # not retained\n assert res[0] == rng[0] + offset\n assert res[-1] == rng[-1] + offset\n res2 = ser + offset\n # apply_index is only for indexes, not series, so no res2_v2\n assert res2.iloc[0] == ser.iloc[0] + offset\n assert res2.iloc[-1] == ser.iloc[-1] + offset\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_index.py | test_index.py | Python | 1,147 | 0.95 | 0.052632 | 0.019231 | vue-tools | 40 | 2025-06-03T16:01:52.174651 | Apache-2.0 | true | c89c987e2c4a45b1442f1b78faa74f75 |
"""\nTests for the following offsets:\n- SemiMonthBegin\n- SemiMonthEnd\n- MonthBegin\n- MonthEnd\n"""\nfrom __future__ import annotations\n\nfrom datetime import datetime\n\nimport pytest\n\nfrom pandas._libs.tslibs import Timestamp\nfrom pandas._libs.tslibs.offsets import (\n MonthBegin,\n MonthEnd,\n SemiMonthBegin,\n SemiMonthEnd,\n)\n\nfrom pandas import (\n DatetimeIndex,\n Series,\n _testing as tm,\n)\nfrom pandas.tests.tseries.offsets.common import (\n assert_is_on_offset,\n assert_offset_equal,\n)\n\n\nclass TestSemiMonthEnd:\n def test_offset_whole_year(self):\n dates = (\n datetime(2007, 12, 31),\n datetime(2008, 1, 15),\n datetime(2008, 1, 31),\n datetime(2008, 2, 15),\n datetime(2008, 2, 29),\n datetime(2008, 3, 15),\n datetime(2008, 3, 31),\n datetime(2008, 4, 15),\n datetime(2008, 4, 30),\n datetime(2008, 5, 15),\n datetime(2008, 5, 31),\n datetime(2008, 6, 15),\n datetime(2008, 6, 30),\n datetime(2008, 7, 15),\n datetime(2008, 7, 31),\n datetime(2008, 8, 15),\n datetime(2008, 8, 31),\n datetime(2008, 9, 15),\n datetime(2008, 9, 30),\n datetime(2008, 10, 15),\n datetime(2008, 10, 31),\n datetime(2008, 11, 15),\n datetime(2008, 11, 30),\n datetime(2008, 12, 15),\n datetime(2008, 12, 31),\n )\n\n for base, exp_date in zip(dates[:-1], dates[1:]):\n assert_offset_equal(SemiMonthEnd(), base, exp_date)\n\n # ensure .apply_index works as expected\n shift = DatetimeIndex(dates[:-1])\n with tm.assert_produces_warning(None):\n # GH#22535 check that we don't get a FutureWarning from adding\n # an integer array to PeriodIndex\n result = SemiMonthEnd() + shift\n\n exp = DatetimeIndex(dates[1:])\n tm.assert_index_equal(result, exp)\n\n offset_cases = []\n offset_cases.append(\n (\n SemiMonthEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 15),\n datetime(2008, 1, 15): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 2, 15),\n datetime(2006, 12, 14): datetime(2006, 12, 15),\n datetime(2006, 12, 29): datetime(2006, 12, 31),\n datetime(2006, 12, 31): datetime(2007, 1, 15),\n datetime(2007, 1, 1): datetime(2007, 1, 15),\n datetime(2006, 12, 1): datetime(2006, 12, 15),\n datetime(2006, 12, 15): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n SemiMonthEnd(day_of_month=20),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 20),\n datetime(2008, 1, 15): datetime(2008, 1, 20),\n datetime(2008, 1, 21): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 2, 20),\n datetime(2006, 12, 14): datetime(2006, 12, 20),\n datetime(2006, 12, 29): datetime(2006, 12, 31),\n datetime(2006, 12, 31): datetime(2007, 1, 20),\n datetime(2007, 1, 1): datetime(2007, 1, 20),\n datetime(2006, 12, 1): datetime(2006, 12, 20),\n datetime(2006, 12, 15): datetime(2006, 12, 20),\n },\n )\n )\n\n offset_cases.append(\n (\n SemiMonthEnd(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 15),\n datetime(2008, 1, 16): datetime(2008, 1, 31),\n datetime(2008, 1, 15): datetime(2008, 1, 15),\n datetime(2008, 1, 31): datetime(2008, 1, 31),\n datetime(2006, 12, 29): datetime(2006, 12, 31),\n datetime(2006, 12, 31): datetime(2006, 12, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 15),\n },\n )\n )\n\n offset_cases.append(\n (\n SemiMonthEnd(0, day_of_month=16),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 16),\n datetime(2008, 1, 16): datetime(2008, 1, 16),\n datetime(2008, 1, 15): datetime(2008, 1, 16),\n datetime(2008, 1, 31): datetime(2008, 1, 31),\n datetime(2006, 12, 29): datetime(2006, 12, 31),\n datetime(2006, 12, 31): datetime(2006, 12, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 16),\n },\n )\n )\n\n offset_cases.append(\n (\n SemiMonthEnd(2),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 2, 29),\n datetime(2006, 12, 29): datetime(2007, 1, 15),\n datetime(2006, 12, 31): datetime(2007, 1, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 31),\n datetime(2007, 1, 16): datetime(2007, 2, 15),\n datetime(2006, 11, 1): datetime(2006, 11, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n SemiMonthEnd(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 6, 15),\n datetime(2008, 12, 31): datetime(2008, 12, 15),\n datetime(2006, 12, 29): datetime(2006, 12, 15),\n datetime(2006, 12, 30): datetime(2006, 12, 15),\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n SemiMonthEnd(-1, day_of_month=4),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n datetime(2007, 1, 4): datetime(2006, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 6, 4),\n datetime(2008, 12, 31): datetime(2008, 12, 4),\n datetime(2006, 12, 5): datetime(2006, 12, 4),\n datetime(2006, 12, 30): datetime(2006, 12, 4),\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n SemiMonthEnd(-2),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 15),\n datetime(2008, 6, 30): datetime(2008, 5, 31),\n datetime(2008, 3, 15): datetime(2008, 2, 15),\n datetime(2008, 12, 31): datetime(2008, 11, 30),\n datetime(2006, 12, 29): datetime(2006, 11, 30),\n datetime(2006, 12, 14): datetime(2006, 11, 15),\n datetime(2007, 1, 1): datetime(2006, 12, 15),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_apply_index(self, case):\n # https://github.com/pandas-dev/pandas/issues/34580\n offset, cases = case\n shift = DatetimeIndex(cases.keys())\n exp = DatetimeIndex(cases.values())\n\n with tm.assert_produces_warning(None):\n # GH#22535 check that we don't get a FutureWarning from adding\n # an integer array to PeriodIndex\n result = offset + shift\n tm.assert_index_equal(result, exp)\n\n on_offset_cases = [\n (datetime(2007, 12, 31), True),\n (datetime(2007, 12, 15), True),\n (datetime(2007, 12, 14), False),\n (datetime(2007, 12, 1), False),\n (datetime(2008, 2, 29), True),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n dt, expected = case\n assert_is_on_offset(SemiMonthEnd(), dt, expected)\n\n @pytest.mark.parametrize("klass", [Series, DatetimeIndex])\n def test_vectorized_offset_addition(self, klass):\n shift = klass(\n [\n Timestamp("2000-01-15 00:15:00", tz="US/Central"),\n Timestamp("2000-02-15", tz="US/Central"),\n ],\n name="a",\n )\n\n with tm.assert_produces_warning(None):\n # GH#22535 check that we don't get a FutureWarning from adding\n # an integer array to PeriodIndex\n result = shift + SemiMonthEnd()\n result2 = SemiMonthEnd() + shift\n\n exp = klass(\n [\n Timestamp("2000-01-31 00:15:00", tz="US/Central"),\n Timestamp("2000-02-29", tz="US/Central"),\n ],\n name="a",\n )\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n shift = klass(\n [\n Timestamp("2000-01-01 00:15:00", tz="US/Central"),\n Timestamp("2000-02-01", tz="US/Central"),\n ],\n name="a",\n )\n\n with tm.assert_produces_warning(None):\n # GH#22535 check that we don't get a FutureWarning from adding\n # an integer array to PeriodIndex\n result = shift + SemiMonthEnd()\n result2 = SemiMonthEnd() + shift\n\n exp = klass(\n [\n Timestamp("2000-01-15 00:15:00", tz="US/Central"),\n Timestamp("2000-02-15", tz="US/Central"),\n ],\n name="a",\n )\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n\nclass TestSemiMonthBegin:\n def test_offset_whole_year(self):\n dates = (\n datetime(2007, 12, 15),\n datetime(2008, 1, 1),\n datetime(2008, 1, 15),\n datetime(2008, 2, 1),\n datetime(2008, 2, 15),\n datetime(2008, 3, 1),\n datetime(2008, 3, 15),\n datetime(2008, 4, 1),\n datetime(2008, 4, 15),\n datetime(2008, 5, 1),\n datetime(2008, 5, 15),\n datetime(2008, 6, 1),\n datetime(2008, 6, 15),\n datetime(2008, 7, 1),\n datetime(2008, 7, 15),\n datetime(2008, 8, 1),\n datetime(2008, 8, 15),\n datetime(2008, 9, 1),\n datetime(2008, 9, 15),\n datetime(2008, 10, 1),\n datetime(2008, 10, 15),\n datetime(2008, 11, 1),\n datetime(2008, 11, 15),\n datetime(2008, 12, 1),\n datetime(2008, 12, 15),\n )\n\n for base, exp_date in zip(dates[:-1], dates[1:]):\n assert_offset_equal(SemiMonthBegin(), base, exp_date)\n\n # ensure .apply_index works as expected\n shift = DatetimeIndex(dates[:-1])\n with tm.assert_produces_warning(None):\n # GH#22535 check that we don't get a FutureWarning from adding\n # an integer array to PeriodIndex\n result = SemiMonthBegin() + shift\n\n exp = DatetimeIndex(dates[1:])\n tm.assert_index_equal(result, exp)\n\n offset_cases = [\n (\n SemiMonthBegin(),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 15),\n datetime(2008, 1, 15): datetime(2008, 2, 1),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2006, 12, 14): datetime(2006, 12, 15),\n datetime(2006, 12, 29): datetime(2007, 1, 1),\n datetime(2006, 12, 31): datetime(2007, 1, 1),\n datetime(2007, 1, 1): datetime(2007, 1, 15),\n datetime(2006, 12, 1): datetime(2006, 12, 15),\n datetime(2006, 12, 15): datetime(2007, 1, 1),\n },\n ),\n (\n SemiMonthBegin(day_of_month=20),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 20),\n datetime(2008, 1, 15): datetime(2008, 1, 20),\n datetime(2008, 1, 21): datetime(2008, 2, 1),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2006, 12, 14): datetime(2006, 12, 20),\n datetime(2006, 12, 29): datetime(2007, 1, 1),\n datetime(2006, 12, 31): datetime(2007, 1, 1),\n datetime(2007, 1, 1): datetime(2007, 1, 20),\n datetime(2006, 12, 1): datetime(2006, 12, 20),\n datetime(2006, 12, 15): datetime(2006, 12, 20),\n },\n ),\n (\n SemiMonthBegin(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 1, 16): datetime(2008, 2, 1),\n datetime(2008, 1, 15): datetime(2008, 1, 15),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2006, 12, 29): datetime(2007, 1, 1),\n datetime(2006, 12, 2): datetime(2006, 12, 15),\n datetime(2007, 1, 1): datetime(2007, 1, 1),\n },\n ),\n (\n SemiMonthBegin(0, day_of_month=16),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 1, 16): datetime(2008, 1, 16),\n datetime(2008, 1, 15): datetime(2008, 1, 16),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2006, 12, 29): datetime(2007, 1, 1),\n datetime(2006, 12, 31): datetime(2007, 1, 1),\n datetime(2007, 1, 5): datetime(2007, 1, 16),\n datetime(2007, 1, 1): datetime(2007, 1, 1),\n },\n ),\n (\n SemiMonthBegin(2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 1),\n datetime(2008, 1, 31): datetime(2008, 2, 15),\n datetime(2006, 12, 1): datetime(2007, 1, 1),\n datetime(2006, 12, 29): datetime(2007, 1, 15),\n datetime(2006, 12, 15): datetime(2007, 1, 15),\n datetime(2007, 1, 1): datetime(2007, 2, 1),\n datetime(2007, 1, 16): datetime(2007, 2, 15),\n datetime(2006, 11, 1): datetime(2006, 12, 1),\n },\n ),\n (\n SemiMonthBegin(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 15),\n datetime(2008, 6, 30): datetime(2008, 6, 15),\n datetime(2008, 6, 14): datetime(2008, 6, 1),\n datetime(2008, 12, 31): datetime(2008, 12, 15),\n datetime(2006, 12, 29): datetime(2006, 12, 15),\n datetime(2006, 12, 15): datetime(2006, 12, 1),\n datetime(2007, 1, 1): datetime(2006, 12, 15),\n },\n ),\n (\n SemiMonthBegin(-1, day_of_month=4),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 4),\n datetime(2007, 1, 4): datetime(2007, 1, 1),\n datetime(2008, 6, 30): datetime(2008, 6, 4),\n datetime(2008, 12, 31): datetime(2008, 12, 4),\n datetime(2006, 12, 5): datetime(2006, 12, 4),\n datetime(2006, 12, 30): datetime(2006, 12, 4),\n datetime(2006, 12, 2): datetime(2006, 12, 1),\n datetime(2007, 1, 1): datetime(2006, 12, 4),\n },\n ),\n (\n SemiMonthBegin(-2),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 1),\n datetime(2008, 6, 30): datetime(2008, 6, 1),\n datetime(2008, 6, 14): datetime(2008, 5, 15),\n datetime(2008, 12, 31): datetime(2008, 12, 1),\n datetime(2006, 12, 29): datetime(2006, 12, 1),\n datetime(2006, 12, 15): datetime(2006, 11, 15),\n datetime(2007, 1, 1): datetime(2006, 12, 1),\n },\n ),\n ]\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_apply_index(self, case):\n offset, cases = case\n shift = DatetimeIndex(cases.keys())\n\n with tm.assert_produces_warning(None):\n # GH#22535 check that we don't get a FutureWarning from adding\n # an integer array to PeriodIndex\n result = offset + shift\n\n exp = DatetimeIndex(cases.values())\n tm.assert_index_equal(result, exp)\n\n on_offset_cases = [\n (datetime(2007, 12, 1), True),\n (datetime(2007, 12, 15), True),\n (datetime(2007, 12, 14), False),\n (datetime(2007, 12, 31), False),\n (datetime(2008, 2, 15), True),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n dt, expected = case\n assert_is_on_offset(SemiMonthBegin(), dt, expected)\n\n @pytest.mark.parametrize("klass", [Series, DatetimeIndex])\n def test_vectorized_offset_addition(self, klass):\n shift = klass(\n [\n Timestamp("2000-01-15 00:15:00", tz="US/Central"),\n Timestamp("2000-02-15", tz="US/Central"),\n ],\n name="a",\n )\n with tm.assert_produces_warning(None):\n # GH#22535 check that we don't get a FutureWarning from adding\n # an integer array to PeriodIndex\n result = shift + SemiMonthBegin()\n result2 = SemiMonthBegin() + shift\n\n exp = klass(\n [\n Timestamp("2000-02-01 00:15:00", tz="US/Central"),\n Timestamp("2000-03-01", tz="US/Central"),\n ],\n name="a",\n )\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n shift = klass(\n [\n Timestamp("2000-01-01 00:15:00", tz="US/Central"),\n Timestamp("2000-02-01", tz="US/Central"),\n ],\n name="a",\n )\n with tm.assert_produces_warning(None):\n # GH#22535 check that we don't get a FutureWarning from adding\n # an integer array to PeriodIndex\n result = shift + SemiMonthBegin()\n result2 = SemiMonthBegin() + shift\n\n exp = klass(\n [\n Timestamp("2000-01-15 00:15:00", tz="US/Central"),\n Timestamp("2000-02-15", tz="US/Central"),\n ],\n name="a",\n )\n tm.assert_equal(result, exp)\n tm.assert_equal(result2, exp)\n\n\nclass TestMonthBegin:\n offset_cases = []\n # NOTE: I'm not entirely happy with the logic here for Begin -ss\n # see thread 'offset conventions' on the ML\n offset_cases.append(\n (\n MonthBegin(),\n {\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2008, 2, 1): datetime(2008, 3, 1),\n datetime(2006, 12, 31): datetime(2007, 1, 1),\n datetime(2006, 12, 1): datetime(2007, 1, 1),\n datetime(2007, 1, 31): datetime(2007, 2, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthBegin(0),\n {\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2006, 12, 3): datetime(2007, 1, 1),\n datetime(2007, 1, 31): datetime(2007, 2, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthBegin(2),\n {\n datetime(2008, 2, 29): datetime(2008, 4, 1),\n datetime(2008, 1, 31): datetime(2008, 3, 1),\n datetime(2006, 12, 31): datetime(2007, 2, 1),\n datetime(2007, 12, 28): datetime(2008, 2, 1),\n datetime(2007, 1, 1): datetime(2007, 3, 1),\n datetime(2006, 11, 1): datetime(2007, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthBegin(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 1),\n datetime(2008, 5, 31): datetime(2008, 5, 1),\n datetime(2008, 12, 31): datetime(2008, 12, 1),\n datetime(2006, 12, 29): datetime(2006, 12, 1),\n datetime(2006, 1, 2): datetime(2006, 1, 1),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n\nclass TestMonthEnd:\n def test_day_of_month(self):\n dt = datetime(2007, 1, 1)\n offset = MonthEnd()\n\n result = dt + offset\n assert result == Timestamp(2007, 1, 31)\n\n result = result + offset\n assert result == Timestamp(2007, 2, 28)\n\n def test_normalize(self):\n dt = datetime(2007, 1, 1, 3)\n\n result = dt + MonthEnd(normalize=True)\n expected = dt.replace(hour=0) + MonthEnd()\n assert result == expected\n\n offset_cases = []\n offset_cases.append(\n (\n MonthEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 2, 29),\n datetime(2006, 12, 29): datetime(2006, 12, 31),\n datetime(2006, 12, 31): datetime(2007, 1, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 31),\n datetime(2006, 12, 1): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthEnd(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 1, 31),\n datetime(2006, 12, 29): datetime(2006, 12, 31),\n datetime(2006, 12, 31): datetime(2006, 12, 31),\n datetime(2007, 1, 1): datetime(2007, 1, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthEnd(2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 29),\n datetime(2008, 1, 31): datetime(2008, 3, 31),\n datetime(2006, 12, 29): datetime(2007, 1, 31),\n datetime(2006, 12, 31): datetime(2007, 2, 28),\n datetime(2007, 1, 1): datetime(2007, 2, 28),\n datetime(2006, 11, 1): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n MonthEnd(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 5, 31),\n datetime(2008, 12, 31): datetime(2008, 11, 30),\n datetime(2006, 12, 29): datetime(2006, 11, 30),\n datetime(2006, 12, 30): datetime(2006, 11, 30),\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (MonthEnd(), datetime(2007, 12, 31), True),\n (MonthEnd(), datetime(2008, 1, 1), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_month.py | test_month.py | Python | 23,243 | 0.95 | 0.040541 | 0.034826 | vue-tools | 618 | 2023-09-21T03:16:35.714501 | GPL-3.0 | true | f1c3367e0328b6c3288a45c9689f6a1c |
"""\nTests of pandas.tseries.offsets\n"""\nfrom __future__ import annotations\n\nfrom datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import (\n NaT,\n Timedelta,\n Timestamp,\n conversion,\n timezones,\n)\nimport pandas._libs.tslibs.offsets as liboffsets\nfrom pandas._libs.tslibs.offsets import (\n _get_offset,\n _offset_map,\n to_offset,\n)\nfrom pandas._libs.tslibs.period import INVALID_FREQ_ERR_MSG\nfrom pandas.errors import PerformanceWarning\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.tests.tseries.offsets.common import WeekDay\n\nfrom pandas.tseries import offsets\nfrom pandas.tseries.offsets import (\n FY5253,\n BDay,\n BMonthEnd,\n BusinessHour,\n CustomBusinessDay,\n CustomBusinessHour,\n CustomBusinessMonthBegin,\n CustomBusinessMonthEnd,\n DateOffset,\n Easter,\n FY5253Quarter,\n LastWeekOfMonth,\n MonthBegin,\n Nano,\n Tick,\n Week,\n WeekOfMonth,\n)\n\n_ARITHMETIC_DATE_OFFSET = [\n "years",\n "months",\n "weeks",\n "days",\n "hours",\n "minutes",\n "seconds",\n "milliseconds",\n "microseconds",\n]\n\n\ndef _create_offset(klass, value=1, normalize=False):\n # create instance from offset class\n if klass is FY5253:\n klass = klass(\n n=value,\n startingMonth=1,\n weekday=1,\n variation="last",\n normalize=normalize,\n )\n elif klass is FY5253Quarter:\n klass = klass(\n n=value,\n startingMonth=1,\n weekday=1,\n qtr_with_extra_week=1,\n variation="last",\n normalize=normalize,\n )\n elif klass is LastWeekOfMonth:\n klass = klass(n=value, weekday=5, normalize=normalize)\n elif klass is WeekOfMonth:\n klass = klass(n=value, week=1, weekday=5, normalize=normalize)\n elif klass is Week:\n klass = klass(n=value, weekday=5, normalize=normalize)\n elif klass is DateOffset:\n klass = klass(days=value, normalize=normalize)\n else:\n klass = klass(value, normalize=normalize)\n return klass\n\n\n@pytest.fixture(\n params=[\n getattr(offsets, o)\n for o in offsets.__all__\n if issubclass(getattr(offsets, o), liboffsets.MonthOffset)\n and o != "MonthOffset"\n ]\n)\ndef month_classes(request):\n """\n Fixture for month based datetime offsets available for a time series.\n """\n return request.param\n\n\n@pytest.fixture(\n params=[\n getattr(offsets, o) for o in offsets.__all__ if o not in ("Tick", "BaseOffset")\n ]\n)\ndef offset_types(request):\n """\n Fixture for all the datetime offsets available for a time series.\n """\n return request.param\n\n\n@pytest.fixture\ndef dt():\n return Timestamp(datetime(2008, 1, 2))\n\n\n@pytest.fixture\ndef expecteds():\n # executed value created by _create_offset\n # are applied to 2011/01/01 09:00 (Saturday)\n # used for .apply and .rollforward\n return {\n "Day": Timestamp("2011-01-02 09:00:00"),\n "DateOffset": Timestamp("2011-01-02 09:00:00"),\n "BusinessDay": Timestamp("2011-01-03 09:00:00"),\n "CustomBusinessDay": Timestamp("2011-01-03 09:00:00"),\n "CustomBusinessMonthEnd": Timestamp("2011-01-31 09:00:00"),\n "CustomBusinessMonthBegin": Timestamp("2011-01-03 09:00:00"),\n "MonthBegin": Timestamp("2011-02-01 09:00:00"),\n "BusinessMonthBegin": Timestamp("2011-01-03 09:00:00"),\n "MonthEnd": Timestamp("2011-01-31 09:00:00"),\n "SemiMonthEnd": Timestamp("2011-01-15 09:00:00"),\n "SemiMonthBegin": Timestamp("2011-01-15 09:00:00"),\n "BusinessMonthEnd": Timestamp("2011-01-31 09:00:00"),\n "YearBegin": Timestamp("2012-01-01 09:00:00"),\n "BYearBegin": Timestamp("2011-01-03 09:00:00"),\n "YearEnd": Timestamp("2011-12-31 09:00:00"),\n "BYearEnd": Timestamp("2011-12-30 09:00:00"),\n "QuarterBegin": Timestamp("2011-03-01 09:00:00"),\n "BQuarterBegin": Timestamp("2011-03-01 09:00:00"),\n "QuarterEnd": Timestamp("2011-03-31 09:00:00"),\n "BQuarterEnd": Timestamp("2011-03-31 09:00:00"),\n "BusinessHour": Timestamp("2011-01-03 10:00:00"),\n "CustomBusinessHour": Timestamp("2011-01-03 10:00:00"),\n "WeekOfMonth": Timestamp("2011-01-08 09:00:00"),\n "LastWeekOfMonth": Timestamp("2011-01-29 09:00:00"),\n "FY5253Quarter": Timestamp("2011-01-25 09:00:00"),\n "FY5253": Timestamp("2011-01-25 09:00:00"),\n "Week": Timestamp("2011-01-08 09:00:00"),\n "Easter": Timestamp("2011-04-24 09:00:00"),\n "Hour": Timestamp("2011-01-01 10:00:00"),\n "Minute": Timestamp("2011-01-01 09:01:00"),\n "Second": Timestamp("2011-01-01 09:00:01"),\n "Milli": Timestamp("2011-01-01 09:00:00.001000"),\n "Micro": Timestamp("2011-01-01 09:00:00.000001"),\n "Nano": Timestamp("2011-01-01T09:00:00.000000001"),\n }\n\n\nclass TestCommon:\n def test_immutable(self, offset_types):\n # GH#21341 check that __setattr__ raises\n offset = _create_offset(offset_types)\n msg = "objects is not writable|DateOffset objects are immutable"\n with pytest.raises(AttributeError, match=msg):\n offset.normalize = True\n with pytest.raises(AttributeError, match=msg):\n offset.n = 91\n\n def test_return_type(self, offset_types):\n offset = _create_offset(offset_types)\n\n # make sure that we are returning a Timestamp\n result = Timestamp("20080101") + offset\n assert isinstance(result, Timestamp)\n\n # make sure that we are returning NaT\n assert NaT + offset is NaT\n assert offset + NaT is NaT\n\n assert NaT - offset is NaT\n assert (-offset)._apply(NaT) is NaT\n\n def test_offset_n(self, offset_types):\n offset = _create_offset(offset_types)\n assert offset.n == 1\n\n neg_offset = offset * -1\n assert neg_offset.n == -1\n\n mul_offset = offset * 3\n assert mul_offset.n == 3\n\n def test_offset_timedelta64_arg(self, offset_types):\n # check that offset._validate_n raises TypeError on a timedelt64\n # object\n off = _create_offset(offset_types)\n\n td64 = np.timedelta64(4567, "s")\n with pytest.raises(TypeError, match="argument must be an integer"):\n type(off)(n=td64, **off.kwds)\n\n def test_offset_mul_ndarray(self, offset_types):\n off = _create_offset(offset_types)\n\n expected = np.array([[off, off * 2], [off * 3, off * 4]])\n\n result = np.array([[1, 2], [3, 4]]) * off\n tm.assert_numpy_array_equal(result, expected)\n\n result = off * np.array([[1, 2], [3, 4]])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_offset_freqstr(self, offset_types):\n offset = _create_offset(offset_types)\n\n freqstr = offset.freqstr\n if freqstr not in ("<Easter>", "<DateOffset: days=1>", "LWOM-SAT"):\n code = _get_offset(freqstr)\n assert offset.rule_code == code\n\n def _check_offsetfunc_works(self, offset, funcname, dt, expected, normalize=False):\n if normalize and issubclass(offset, Tick):\n # normalize=True disallowed for Tick subclasses GH#21427\n return\n\n offset_s = _create_offset(offset, normalize=normalize)\n func = getattr(offset_s, funcname)\n\n result = func(dt)\n assert isinstance(result, Timestamp)\n assert result == expected\n\n result = func(Timestamp(dt))\n assert isinstance(result, Timestamp)\n assert result == expected\n\n # see gh-14101\n ts = Timestamp(dt) + Nano(5)\n # test nanosecond is preserved\n with tm.assert_produces_warning(None):\n result = func(ts)\n\n assert isinstance(result, Timestamp)\n if normalize is False:\n assert result == expected + Nano(5)\n else:\n assert result == expected\n\n if isinstance(dt, np.datetime64):\n # test tz when input is datetime or Timestamp\n return\n\n for tz in [\n None,\n "UTC",\n "Asia/Tokyo",\n "US/Eastern",\n "dateutil/Asia/Tokyo",\n "dateutil/US/Pacific",\n ]:\n expected_localize = expected.tz_localize(tz)\n tz_obj = timezones.maybe_get_tz(tz)\n dt_tz = conversion.localize_pydatetime(dt, tz_obj)\n\n result = func(dt_tz)\n assert isinstance(result, Timestamp)\n assert result == expected_localize\n\n result = func(Timestamp(dt, tz=tz))\n assert isinstance(result, Timestamp)\n assert result == expected_localize\n\n # see gh-14101\n ts = Timestamp(dt, tz=tz) + Nano(5)\n # test nanosecond is preserved\n with tm.assert_produces_warning(None):\n result = func(ts)\n assert isinstance(result, Timestamp)\n if normalize is False:\n assert result == expected_localize + Nano(5)\n else:\n assert result == expected_localize\n\n def test_apply(self, offset_types, expecteds):\n sdt = datetime(2011, 1, 1, 9, 0)\n ndt = np.datetime64("2011-01-01 09:00")\n\n expected = expecteds[offset_types.__name__]\n expected_norm = Timestamp(expected.date())\n\n for dt in [sdt, ndt]:\n self._check_offsetfunc_works(offset_types, "_apply", dt, expected)\n\n self._check_offsetfunc_works(\n offset_types, "_apply", dt, expected_norm, normalize=True\n )\n\n def test_rollforward(self, offset_types, expecteds):\n expecteds = expecteds.copy()\n\n # result will not be changed if the target is on the offset\n no_changes = [\n "Day",\n "MonthBegin",\n "SemiMonthBegin",\n "YearBegin",\n "Week",\n "Hour",\n "Minute",\n "Second",\n "Milli",\n "Micro",\n "Nano",\n "DateOffset",\n ]\n for n in no_changes:\n expecteds[n] = Timestamp("2011/01/01 09:00")\n\n expecteds["BusinessHour"] = Timestamp("2011-01-03 09:00:00")\n expecteds["CustomBusinessHour"] = Timestamp("2011-01-03 09:00:00")\n\n # but be changed when normalize=True\n norm_expected = expecteds.copy()\n for k in norm_expected:\n norm_expected[k] = Timestamp(norm_expected[k].date())\n\n normalized = {\n "Day": Timestamp("2011-01-02 00:00:00"),\n "DateOffset": Timestamp("2011-01-02 00:00:00"),\n "MonthBegin": Timestamp("2011-02-01 00:00:00"),\n "SemiMonthBegin": Timestamp("2011-01-15 00:00:00"),\n "YearBegin": Timestamp("2012-01-01 00:00:00"),\n "Week": Timestamp("2011-01-08 00:00:00"),\n "Hour": Timestamp("2011-01-01 00:00:00"),\n "Minute": Timestamp("2011-01-01 00:00:00"),\n "Second": Timestamp("2011-01-01 00:00:00"),\n "Milli": Timestamp("2011-01-01 00:00:00"),\n "Micro": Timestamp("2011-01-01 00:00:00"),\n }\n norm_expected.update(normalized)\n\n sdt = datetime(2011, 1, 1, 9, 0)\n ndt = np.datetime64("2011-01-01 09:00")\n\n for dt in [sdt, ndt]:\n expected = expecteds[offset_types.__name__]\n self._check_offsetfunc_works(offset_types, "rollforward", dt, expected)\n expected = norm_expected[offset_types.__name__]\n self._check_offsetfunc_works(\n offset_types, "rollforward", dt, expected, normalize=True\n )\n\n def test_rollback(self, offset_types):\n expecteds = {\n "BusinessDay": Timestamp("2010-12-31 09:00:00"),\n "CustomBusinessDay": Timestamp("2010-12-31 09:00:00"),\n "CustomBusinessMonthEnd": Timestamp("2010-12-31 09:00:00"),\n "CustomBusinessMonthBegin": Timestamp("2010-12-01 09:00:00"),\n "BusinessMonthBegin": Timestamp("2010-12-01 09:00:00"),\n "MonthEnd": Timestamp("2010-12-31 09:00:00"),\n "SemiMonthEnd": Timestamp("2010-12-31 09:00:00"),\n "BusinessMonthEnd": Timestamp("2010-12-31 09:00:00"),\n "BYearBegin": Timestamp("2010-01-01 09:00:00"),\n "YearEnd": Timestamp("2010-12-31 09:00:00"),\n "BYearEnd": Timestamp("2010-12-31 09:00:00"),\n "QuarterBegin": Timestamp("2010-12-01 09:00:00"),\n "BQuarterBegin": Timestamp("2010-12-01 09:00:00"),\n "QuarterEnd": Timestamp("2010-12-31 09:00:00"),\n "BQuarterEnd": Timestamp("2010-12-31 09:00:00"),\n "BusinessHour": Timestamp("2010-12-31 17:00:00"),\n "CustomBusinessHour": Timestamp("2010-12-31 17:00:00"),\n "WeekOfMonth": Timestamp("2010-12-11 09:00:00"),\n "LastWeekOfMonth": Timestamp("2010-12-25 09:00:00"),\n "FY5253Quarter": Timestamp("2010-10-26 09:00:00"),\n "FY5253": Timestamp("2010-01-26 09:00:00"),\n "Easter": Timestamp("2010-04-04 09:00:00"),\n }\n\n # result will not be changed if the target is on the offset\n for n in [\n "Day",\n "MonthBegin",\n "SemiMonthBegin",\n "YearBegin",\n "Week",\n "Hour",\n "Minute",\n "Second",\n "Milli",\n "Micro",\n "Nano",\n "DateOffset",\n ]:\n expecteds[n] = Timestamp("2011/01/01 09:00")\n\n # but be changed when normalize=True\n norm_expected = expecteds.copy()\n for k in norm_expected:\n norm_expected[k] = Timestamp(norm_expected[k].date())\n\n normalized = {\n "Day": Timestamp("2010-12-31 00:00:00"),\n "DateOffset": Timestamp("2010-12-31 00:00:00"),\n "MonthBegin": Timestamp("2010-12-01 00:00:00"),\n "SemiMonthBegin": Timestamp("2010-12-15 00:00:00"),\n "YearBegin": Timestamp("2010-01-01 00:00:00"),\n "Week": Timestamp("2010-12-25 00:00:00"),\n "Hour": Timestamp("2011-01-01 00:00:00"),\n "Minute": Timestamp("2011-01-01 00:00:00"),\n "Second": Timestamp("2011-01-01 00:00:00"),\n "Milli": Timestamp("2011-01-01 00:00:00"),\n "Micro": Timestamp("2011-01-01 00:00:00"),\n }\n norm_expected.update(normalized)\n\n sdt = datetime(2011, 1, 1, 9, 0)\n ndt = np.datetime64("2011-01-01 09:00")\n\n for dt in [sdt, ndt]:\n expected = expecteds[offset_types.__name__]\n self._check_offsetfunc_works(offset_types, "rollback", dt, expected)\n\n expected = norm_expected[offset_types.__name__]\n self._check_offsetfunc_works(\n offset_types, "rollback", dt, expected, normalize=True\n )\n\n def test_is_on_offset(self, offset_types, expecteds):\n dt = expecteds[offset_types.__name__]\n offset_s = _create_offset(offset_types)\n assert offset_s.is_on_offset(dt)\n\n # when normalize=True, is_on_offset checks time is 00:00:00\n if issubclass(offset_types, Tick):\n # normalize=True disallowed for Tick subclasses GH#21427\n return\n offset_n = _create_offset(offset_types, normalize=True)\n assert not offset_n.is_on_offset(dt)\n\n if offset_types in (BusinessHour, CustomBusinessHour):\n # In default BusinessHour (9:00-17:00), normalized time\n # cannot be in business hour range\n return\n date = datetime(dt.year, dt.month, dt.day)\n assert offset_n.is_on_offset(date)\n\n def test_add(self, offset_types, tz_naive_fixture, expecteds):\n tz = tz_naive_fixture\n dt = datetime(2011, 1, 1, 9, 0)\n\n offset_s = _create_offset(offset_types)\n expected = expecteds[offset_types.__name__]\n\n result_dt = dt + offset_s\n result_ts = Timestamp(dt) + offset_s\n for result in [result_dt, result_ts]:\n assert isinstance(result, Timestamp)\n assert result == expected\n\n expected_localize = expected.tz_localize(tz)\n result = Timestamp(dt, tz=tz) + offset_s\n assert isinstance(result, Timestamp)\n assert result == expected_localize\n\n # normalize=True, disallowed for Tick subclasses GH#21427\n if issubclass(offset_types, Tick):\n return\n offset_s = _create_offset(offset_types, normalize=True)\n expected = Timestamp(expected.date())\n\n result_dt = dt + offset_s\n result_ts = Timestamp(dt) + offset_s\n for result in [result_dt, result_ts]:\n assert isinstance(result, Timestamp)\n assert result == expected\n\n expected_localize = expected.tz_localize(tz)\n result = Timestamp(dt, tz=tz) + offset_s\n assert isinstance(result, Timestamp)\n assert result == expected_localize\n\n def test_add_empty_datetimeindex(self, offset_types, tz_naive_fixture):\n # GH#12724, GH#30336\n offset_s = _create_offset(offset_types)\n\n dti = DatetimeIndex([], tz=tz_naive_fixture).as_unit("ns")\n\n warn = None\n if isinstance(\n offset_s,\n (\n Easter,\n WeekOfMonth,\n LastWeekOfMonth,\n CustomBusinessDay,\n BusinessHour,\n CustomBusinessHour,\n CustomBusinessMonthBegin,\n CustomBusinessMonthEnd,\n FY5253,\n FY5253Quarter,\n ),\n ):\n # We don't have an optimized apply_index\n warn = PerformanceWarning\n\n # stacklevel checking is slow, and we have ~800 of variants of this\n # test, so let's only check the stacklevel in a subset of them\n check_stacklevel = tz_naive_fixture is None\n with tm.assert_produces_warning(warn, check_stacklevel=check_stacklevel):\n result = dti + offset_s\n tm.assert_index_equal(result, dti)\n with tm.assert_produces_warning(warn, check_stacklevel=check_stacklevel):\n result = offset_s + dti\n tm.assert_index_equal(result, dti)\n\n dta = dti._data\n with tm.assert_produces_warning(warn, check_stacklevel=check_stacklevel):\n result = dta + offset_s\n tm.assert_equal(result, dta)\n with tm.assert_produces_warning(warn, check_stacklevel=check_stacklevel):\n result = offset_s + dta\n tm.assert_equal(result, dta)\n\n def test_pickle_roundtrip(self, offset_types):\n off = _create_offset(offset_types)\n res = tm.round_trip_pickle(off)\n assert off == res\n if type(off) is not DateOffset:\n for attr in off._attributes:\n if attr == "calendar":\n # np.busdaycalendar __eq__ will return False;\n # we check holidays and weekmask attrs so are OK\n continue\n # Make sure nothings got lost from _params (which __eq__) is based on\n assert getattr(off, attr) == getattr(res, attr)\n\n def test_pickle_dateoffset_odd_inputs(self):\n # GH#34511\n off = DateOffset(months=12)\n res = tm.round_trip_pickle(off)\n assert off == res\n\n base_dt = datetime(2020, 1, 1)\n assert base_dt + off == base_dt + res\n\n def test_offsets_hashable(self, offset_types):\n # GH: 37267\n off = _create_offset(offset_types)\n assert hash(off) is not None\n\n # TODO: belongs in arithmetic tests?\n @pytest.mark.filterwarnings(\n "ignore:Non-vectorized DateOffset being applied to Series or DatetimeIndex"\n )\n @pytest.mark.parametrize("unit", ["s", "ms", "us"])\n def test_add_dt64_ndarray_non_nano(self, offset_types, unit):\n # check that the result with non-nano matches nano\n off = _create_offset(offset_types)\n\n dti = date_range("2016-01-01", periods=35, freq="D", unit=unit)\n\n result = (dti + off)._with_freq(None)\n\n exp_unit = unit\n if isinstance(off, Tick) and off._creso > dti._data._creso:\n # cast to higher reso like we would with Timedelta scalar\n exp_unit = Timedelta(off).unit\n # TODO(GH#55564): as_unit will be unnecessary\n expected = DatetimeIndex([x + off for x in dti]).as_unit(exp_unit)\n\n tm.assert_index_equal(result, expected)\n\n\nclass TestDateOffset:\n def setup_method(self):\n _offset_map.clear()\n\n def test_repr(self):\n repr(DateOffset())\n repr(DateOffset(2))\n repr(2 * DateOffset())\n repr(2 * DateOffset(months=2))\n\n def test_mul(self):\n assert DateOffset(2) == 2 * DateOffset(1)\n assert DateOffset(2) == DateOffset(1) * 2\n\n @pytest.mark.parametrize("kwd", sorted(liboffsets._relativedelta_kwds))\n def test_constructor(self, kwd, request):\n if kwd == "millisecond":\n request.applymarker(\n pytest.mark.xfail(\n raises=NotImplementedError,\n reason="Constructing DateOffset object with `millisecond` is not "\n "yet supported.",\n )\n )\n offset = DateOffset(**{kwd: 2})\n assert offset.kwds == {kwd: 2}\n assert getattr(offset, kwd) == 2\n\n def test_default_constructor(self, dt):\n assert (dt + DateOffset(2)) == datetime(2008, 1, 4)\n\n def test_is_anchored(self):\n msg = "DateOffset.is_anchored is deprecated "\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert not DateOffset(2).is_anchored()\n assert DateOffset(1).is_anchored()\n\n def test_copy(self):\n assert DateOffset(months=2).copy() == DateOffset(months=2)\n assert DateOffset(milliseconds=1).copy() == DateOffset(milliseconds=1)\n\n @pytest.mark.parametrize(\n "arithmatic_offset_type, expected",\n zip(\n _ARITHMETIC_DATE_OFFSET,\n [\n "2009-01-02",\n "2008-02-02",\n "2008-01-09",\n "2008-01-03",\n "2008-01-02 01:00:00",\n "2008-01-02 00:01:00",\n "2008-01-02 00:00:01",\n "2008-01-02 00:00:00.001000000",\n "2008-01-02 00:00:00.000001000",\n ],\n ),\n )\n def test_add(self, arithmatic_offset_type, expected, dt):\n assert DateOffset(**{arithmatic_offset_type: 1}) + dt == Timestamp(expected)\n assert dt + DateOffset(**{arithmatic_offset_type: 1}) == Timestamp(expected)\n\n @pytest.mark.parametrize(\n "arithmatic_offset_type, expected",\n zip(\n _ARITHMETIC_DATE_OFFSET,\n [\n "2007-01-02",\n "2007-12-02",\n "2007-12-26",\n "2008-01-01",\n "2008-01-01 23:00:00",\n "2008-01-01 23:59:00",\n "2008-01-01 23:59:59",\n "2008-01-01 23:59:59.999000000",\n "2008-01-01 23:59:59.999999000",\n ],\n ),\n )\n def test_sub(self, arithmatic_offset_type, expected, dt):\n assert dt - DateOffset(**{arithmatic_offset_type: 1}) == Timestamp(expected)\n with pytest.raises(TypeError, match="Cannot subtract datetime from offset"):\n DateOffset(**{arithmatic_offset_type: 1}) - dt\n\n @pytest.mark.parametrize(\n "arithmatic_offset_type, n, expected",\n zip(\n _ARITHMETIC_DATE_OFFSET,\n range(1, 10),\n [\n "2009-01-02",\n "2008-03-02",\n "2008-01-23",\n "2008-01-06",\n "2008-01-02 05:00:00",\n "2008-01-02 00:06:00",\n "2008-01-02 00:00:07",\n "2008-01-02 00:00:00.008000000",\n "2008-01-02 00:00:00.000009000",\n ],\n ),\n )\n def test_mul_add(self, arithmatic_offset_type, n, expected, dt):\n assert DateOffset(**{arithmatic_offset_type: 1}) * n + dt == Timestamp(expected)\n assert n * DateOffset(**{arithmatic_offset_type: 1}) + dt == Timestamp(expected)\n assert dt + DateOffset(**{arithmatic_offset_type: 1}) * n == Timestamp(expected)\n assert dt + n * DateOffset(**{arithmatic_offset_type: 1}) == Timestamp(expected)\n\n @pytest.mark.parametrize(\n "arithmatic_offset_type, n, expected",\n zip(\n _ARITHMETIC_DATE_OFFSET,\n range(1, 10),\n [\n "2007-01-02",\n "2007-11-02",\n "2007-12-12",\n "2007-12-29",\n "2008-01-01 19:00:00",\n "2008-01-01 23:54:00",\n "2008-01-01 23:59:53",\n "2008-01-01 23:59:59.992000000",\n "2008-01-01 23:59:59.999991000",\n ],\n ),\n )\n def test_mul_sub(self, arithmatic_offset_type, n, expected, dt):\n assert dt - DateOffset(**{arithmatic_offset_type: 1}) * n == Timestamp(expected)\n assert dt - n * DateOffset(**{arithmatic_offset_type: 1}) == Timestamp(expected)\n\n def test_leap_year(self):\n d = datetime(2008, 1, 31)\n assert (d + DateOffset(months=1)) == datetime(2008, 2, 29)\n\n def test_eq(self):\n offset1 = DateOffset(days=1)\n offset2 = DateOffset(days=365)\n\n assert offset1 != offset2\n\n assert DateOffset(milliseconds=3) != DateOffset(milliseconds=7)\n\n @pytest.mark.parametrize(\n "offset_kwargs, expected_arg",\n [\n ({"microseconds": 1, "milliseconds": 1}, "2022-01-01 00:00:00.001001"),\n ({"seconds": 1, "milliseconds": 1}, "2022-01-01 00:00:01.001"),\n ({"minutes": 1, "milliseconds": 1}, "2022-01-01 00:01:00.001"),\n ({"hours": 1, "milliseconds": 1}, "2022-01-01 01:00:00.001"),\n ({"days": 1, "milliseconds": 1}, "2022-01-02 00:00:00.001"),\n ({"weeks": 1, "milliseconds": 1}, "2022-01-08 00:00:00.001"),\n ({"months": 1, "milliseconds": 1}, "2022-02-01 00:00:00.001"),\n ({"years": 1, "milliseconds": 1}, "2023-01-01 00:00:00.001"),\n ],\n )\n def test_milliseconds_combination(self, offset_kwargs, expected_arg):\n # GH 49897\n offset = DateOffset(**offset_kwargs)\n ts = Timestamp("2022-01-01")\n result = ts + offset\n expected = Timestamp(expected_arg)\n\n assert result == expected\n\n def test_offset_invalid_arguments(self):\n msg = "^Invalid argument/s or bad combination of arguments"\n with pytest.raises(ValueError, match=msg):\n DateOffset(picoseconds=1)\n\n\nclass TestOffsetNames:\n def test_get_offset_name(self):\n assert BDay().freqstr == "B"\n assert BDay(2).freqstr == "2B"\n assert BMonthEnd().freqstr == "BME"\n assert Week(weekday=0).freqstr == "W-MON"\n assert Week(weekday=1).freqstr == "W-TUE"\n assert Week(weekday=2).freqstr == "W-WED"\n assert Week(weekday=3).freqstr == "W-THU"\n assert Week(weekday=4).freqstr == "W-FRI"\n\n assert LastWeekOfMonth(weekday=WeekDay.SUN).freqstr == "LWOM-SUN"\n\n\ndef test_get_offset():\n with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):\n _get_offset("gibberish")\n with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):\n _get_offset("QS-JAN-B")\n\n pairs = [\n ("B", BDay()),\n ("b", BDay()),\n ("bme", BMonthEnd()),\n ("Bme", BMonthEnd()),\n ("W-MON", Week(weekday=0)),\n ("W-TUE", Week(weekday=1)),\n ("W-WED", Week(weekday=2)),\n ("W-THU", Week(weekday=3)),\n ("W-FRI", Week(weekday=4)),\n ]\n\n for name, expected in pairs:\n offset = _get_offset(name)\n assert offset == expected, (\n f"Expected {repr(name)} to yield {repr(expected)} "\n f"(actual: {repr(offset)})"\n )\n\n\ndef test_get_offset_legacy():\n pairs = [("w@Sat", Week(weekday=5))]\n for name, expected in pairs:\n with pytest.raises(ValueError, match=INVALID_FREQ_ERR_MSG):\n _get_offset(name)\n\n\nclass TestOffsetAliases:\n def setup_method(self):\n _offset_map.clear()\n\n def test_alias_equality(self):\n for k, v in _offset_map.items():\n if v is None:\n continue\n assert k == v.copy()\n\n def test_rule_code(self):\n lst = ["ME", "MS", "BME", "BMS", "D", "B", "h", "min", "s", "ms", "us"]\n for k in lst:\n assert k == _get_offset(k).rule_code\n # should be cached - this is kind of an internals test...\n assert k in _offset_map\n assert k == (_get_offset(k) * 3).rule_code\n\n suffix_lst = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]\n base = "W"\n for v in suffix_lst:\n alias = "-".join([base, v])\n assert alias == _get_offset(alias).rule_code\n assert alias == (_get_offset(alias) * 5).rule_code\n\n suffix_lst = [\n "JAN",\n "FEB",\n "MAR",\n "APR",\n "MAY",\n "JUN",\n "JUL",\n "AUG",\n "SEP",\n "OCT",\n "NOV",\n "DEC",\n ]\n base_lst = ["YE", "YS", "BYE", "BYS", "QE", "QS", "BQE", "BQS"]\n for base in base_lst:\n for v in suffix_lst:\n alias = "-".join([base, v])\n assert alias == _get_offset(alias).rule_code\n assert alias == (_get_offset(alias) * 5).rule_code\n\n\ndef test_freq_offsets():\n off = BDay(1, offset=timedelta(0, 1800))\n assert off.freqstr == "B+30Min"\n\n off = BDay(1, offset=timedelta(0, -1800))\n assert off.freqstr == "B-30Min"\n\n\nclass TestReprNames:\n def test_str_for_named_is_name(self):\n # look at all the amazing combinations!\n month_prefixes = ["YE", "YS", "BYE", "BYS", "QE", "BQE", "BQS", "QS"]\n names = [\n prefix + "-" + month\n for prefix in month_prefixes\n for month in [\n "JAN",\n "FEB",\n "MAR",\n "APR",\n "MAY",\n "JUN",\n "JUL",\n "AUG",\n "SEP",\n "OCT",\n "NOV",\n "DEC",\n ]\n ]\n days = ["MON", "TUE", "WED", "THU", "FRI", "SAT", "SUN"]\n names += ["W-" + day for day in days]\n names += ["WOM-" + week + day for week in ("1", "2", "3", "4") for day in days]\n _offset_map.clear()\n for name in names:\n offset = _get_offset(name)\n assert offset.freqstr == name\n\n\n# ---------------------------------------------------------------------\n\n\ndef test_valid_default_arguments(offset_types):\n # GH#19142 check that the calling the constructors without passing\n # any keyword arguments produce valid offsets\n cls = offset_types\n cls()\n\n\n@pytest.mark.parametrize("kwd", sorted(liboffsets._relativedelta_kwds))\ndef test_valid_month_attributes(kwd, month_classes):\n # GH#18226\n cls = month_classes\n # check that we cannot create e.g. MonthEnd(weeks=3)\n msg = rf"__init__\(\) got an unexpected keyword argument '{kwd}'"\n with pytest.raises(TypeError, match=msg):\n cls(**{kwd: 3})\n\n\ndef test_month_offset_name(month_classes):\n # GH#33757 off.name with n != 1 should not raise AttributeError\n obj = month_classes(1)\n obj2 = month_classes(2)\n assert obj2.name == obj.name\n\n\n@pytest.mark.parametrize("kwd", sorted(liboffsets._relativedelta_kwds))\ndef test_valid_relativedelta_kwargs(kwd, request):\n if kwd == "millisecond":\n request.applymarker(\n pytest.mark.xfail(\n raises=NotImplementedError,\n reason="Constructing DateOffset object with `millisecond` is not "\n "yet supported.",\n )\n )\n # Check that all the arguments specified in liboffsets._relativedelta_kwds\n # are in fact valid relativedelta keyword args\n DateOffset(**{kwd: 1})\n\n\n@pytest.mark.parametrize("kwd", sorted(liboffsets._relativedelta_kwds))\ndef test_valid_tick_attributes(kwd, tick_classes):\n # GH#18226\n cls = tick_classes\n # check that we cannot create e.g. Hour(weeks=3)\n msg = rf"__init__\(\) got an unexpected keyword argument '{kwd}'"\n with pytest.raises(TypeError, match=msg):\n cls(**{kwd: 3})\n\n\ndef test_validate_n_error():\n with pytest.raises(TypeError, match="argument must be an integer"):\n DateOffset(n="Doh!")\n\n with pytest.raises(TypeError, match="argument must be an integer"):\n MonthBegin(n=timedelta(1))\n\n with pytest.raises(TypeError, match="argument must be an integer"):\n BDay(n=np.array([1, 2], dtype=np.int64))\n\n\ndef test_require_integers(offset_types):\n cls = offset_types\n with pytest.raises(ValueError, match="argument must be an integer"):\n cls(n=1.5)\n\n\ndef test_tick_normalize_raises(tick_classes):\n # check that trying to create a Tick object with normalize=True raises\n # GH#21427\n cls = tick_classes\n msg = "Tick offset with `normalize=True` are not allowed."\n with pytest.raises(ValueError, match=msg):\n cls(n=3, normalize=True)\n\n\n@pytest.mark.parametrize(\n "offset_kwargs, expected_arg",\n [\n ({"nanoseconds": 1}, "1970-01-01 00:00:00.000000001"),\n ({"nanoseconds": 5}, "1970-01-01 00:00:00.000000005"),\n ({"nanoseconds": -1}, "1969-12-31 23:59:59.999999999"),\n ({"microseconds": 1}, "1970-01-01 00:00:00.000001"),\n ({"microseconds": -1}, "1969-12-31 23:59:59.999999"),\n ({"seconds": 1}, "1970-01-01 00:00:01"),\n ({"seconds": -1}, "1969-12-31 23:59:59"),\n ({"minutes": 1}, "1970-01-01 00:01:00"),\n ({"minutes": -1}, "1969-12-31 23:59:00"),\n ({"hours": 1}, "1970-01-01 01:00:00"),\n ({"hours": -1}, "1969-12-31 23:00:00"),\n ({"days": 1}, "1970-01-02 00:00:00"),\n ({"days": -1}, "1969-12-31 00:00:00"),\n ({"weeks": 1}, "1970-01-08 00:00:00"),\n ({"weeks": -1}, "1969-12-25 00:00:00"),\n ({"months": 1}, "1970-02-01 00:00:00"),\n ({"months": -1}, "1969-12-01 00:00:00"),\n ({"years": 1}, "1971-01-01 00:00:00"),\n ({"years": -1}, "1969-01-01 00:00:00"),\n ],\n)\ndef test_dateoffset_add_sub(offset_kwargs, expected_arg):\n offset = DateOffset(**offset_kwargs)\n ts = Timestamp(0)\n result = ts + offset\n expected = Timestamp(expected_arg)\n assert result == expected\n result -= offset\n assert result == ts\n result = offset + ts\n assert result == expected\n\n\ndef test_dateoffset_add_sub_timestamp_with_nano():\n offset = DateOffset(minutes=2, nanoseconds=9)\n ts = Timestamp(4)\n result = ts + offset\n expected = Timestamp("1970-01-01 00:02:00.000000013")\n assert result == expected\n result -= offset\n assert result == ts\n result = offset + ts\n assert result == expected\n\n offset2 = DateOffset(minutes=2, nanoseconds=9, hour=1)\n assert offset2._use_relativedelta\n with tm.assert_produces_warning(None):\n # no warning about Discarding nonzero nanoseconds\n result2 = ts + offset2\n expected2 = Timestamp("1970-01-01 01:02:00.000000013")\n assert result2 == expected2\n\n\n@pytest.mark.parametrize(\n "attribute",\n [\n "hours",\n "days",\n "weeks",\n "months",\n "years",\n ],\n)\ndef test_dateoffset_immutable(attribute):\n offset = DateOffset(**{attribute: 0})\n msg = "DateOffset objects are immutable"\n with pytest.raises(AttributeError, match=msg):\n setattr(offset, attribute, 5)\n\n\ndef test_dateoffset_misc():\n oset = offsets.DateOffset(months=2, days=4)\n # it works\n oset.freqstr\n\n assert not offsets.DateOffset(months=2) == 2\n\n\n@pytest.mark.parametrize("n", [-1, 1, 3])\ndef test_construct_int_arg_no_kwargs_assumed_days(n):\n # GH 45890, 45643\n offset = DateOffset(n)\n assert offset._offset == timedelta(1)\n result = Timestamp(2022, 1, 2) + offset\n expected = Timestamp(2022, 1, 2 + n)\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "offset, expected",\n [\n (\n DateOffset(minutes=7, nanoseconds=18),\n Timestamp("2022-01-01 00:07:00.000000018"),\n ),\n (DateOffset(nanoseconds=3), Timestamp("2022-01-01 00:00:00.000000003")),\n ],\n)\ndef test_dateoffset_add_sub_timestamp_series_with_nano(offset, expected):\n # GH 47856\n start_time = Timestamp("2022-01-01")\n teststamp = start_time\n testseries = Series([start_time])\n testseries = testseries + offset\n assert testseries[0] == expected\n testseries -= offset\n assert testseries[0] == teststamp\n testseries = offset + testseries\n assert testseries[0] == expected\n\n\n@pytest.mark.parametrize(\n "n_months, scaling_factor, start_timestamp, expected_timestamp",\n [\n (1, 2, "2020-01-30", "2020-03-30"),\n (2, 1, "2020-01-30", "2020-03-30"),\n (1, 0, "2020-01-30", "2020-01-30"),\n (2, 0, "2020-01-30", "2020-01-30"),\n (1, -1, "2020-01-30", "2019-12-30"),\n (2, -1, "2020-01-30", "2019-11-30"),\n ],\n)\ndef test_offset_multiplication(\n n_months, scaling_factor, start_timestamp, expected_timestamp\n):\n # GH 47953\n mo1 = DateOffset(months=n_months)\n\n startscalar = Timestamp(start_timestamp)\n startarray = Series([startscalar])\n\n resultscalar = startscalar + (mo1 * scaling_factor)\n resultarray = startarray + (mo1 * scaling_factor)\n\n expectedscalar = Timestamp(expected_timestamp)\n expectedarray = Series([expectedscalar])\n assert resultscalar == expectedscalar\n\n tm.assert_series_equal(resultarray, expectedarray)\n\n\ndef test_dateoffset_operations_on_dataframes():\n # GH 47953\n df = DataFrame({"T": [Timestamp("2019-04-30")], "D": [DateOffset(months=1)]})\n frameresult1 = df["T"] + 26 * df["D"]\n df2 = DataFrame(\n {\n "T": [Timestamp("2019-04-30"), Timestamp("2019-04-30")],\n "D": [DateOffset(months=1), DateOffset(months=1)],\n }\n )\n expecteddate = Timestamp("2021-06-30")\n with tm.assert_produces_warning(PerformanceWarning):\n frameresult2 = df2["T"] + 26 * df2["D"]\n\n assert frameresult1[0] == expecteddate\n assert frameresult2[0] == expecteddate\n\n\ndef test_is_yqm_start_end():\n freq_m = to_offset("ME")\n bm = to_offset("BME")\n qfeb = to_offset("QE-FEB")\n qsfeb = to_offset("QS-FEB")\n bq = to_offset("BQE")\n bqs_apr = to_offset("BQS-APR")\n as_nov = to_offset("YS-NOV")\n\n tests = [\n (freq_m.is_month_start(Timestamp("2013-06-01")), 1),\n (bm.is_month_start(Timestamp("2013-06-01")), 0),\n (freq_m.is_month_start(Timestamp("2013-06-03")), 0),\n (bm.is_month_start(Timestamp("2013-06-03")), 1),\n (qfeb.is_month_end(Timestamp("2013-02-28")), 1),\n (qfeb.is_quarter_end(Timestamp("2013-02-28")), 1),\n (qfeb.is_year_end(Timestamp("2013-02-28")), 1),\n (qfeb.is_month_start(Timestamp("2013-03-01")), 1),\n (qfeb.is_quarter_start(Timestamp("2013-03-01")), 1),\n (qfeb.is_year_start(Timestamp("2013-03-01")), 1),\n (qsfeb.is_month_end(Timestamp("2013-03-31")), 1),\n (qsfeb.is_quarter_end(Timestamp("2013-03-31")), 0),\n (qsfeb.is_year_end(Timestamp("2013-03-31")), 0),\n (qsfeb.is_month_start(Timestamp("2013-02-01")), 1),\n (qsfeb.is_quarter_start(Timestamp("2013-02-01")), 1),\n (qsfeb.is_year_start(Timestamp("2013-02-01")), 1),\n (bq.is_month_end(Timestamp("2013-06-30")), 0),\n (bq.is_quarter_end(Timestamp("2013-06-30")), 0),\n (bq.is_year_end(Timestamp("2013-06-30")), 0),\n (bq.is_month_end(Timestamp("2013-06-28")), 1),\n (bq.is_quarter_end(Timestamp("2013-06-28")), 1),\n (bq.is_year_end(Timestamp("2013-06-28")), 0),\n (bqs_apr.is_month_end(Timestamp("2013-06-30")), 0),\n (bqs_apr.is_quarter_end(Timestamp("2013-06-30")), 0),\n (bqs_apr.is_year_end(Timestamp("2013-06-30")), 0),\n (bqs_apr.is_month_end(Timestamp("2013-06-28")), 1),\n (bqs_apr.is_quarter_end(Timestamp("2013-06-28")), 1),\n (bqs_apr.is_year_end(Timestamp("2013-03-29")), 1),\n (as_nov.is_year_start(Timestamp("2013-11-01")), 1),\n (as_nov.is_year_end(Timestamp("2013-10-31")), 1),\n (Timestamp("2012-02-01").days_in_month, 29),\n (Timestamp("2013-02-01").days_in_month, 28),\n ]\n\n for ts, value in tests:\n assert ts == value\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_offsets.py | test_offsets.py | Python | 40,623 | 0.95 | 0.104641 | 0.057312 | python-kit | 477 | 2024-05-16T02:33:06.914227 | GPL-3.0 | true | 0c873b842ba0d2f9e282050874abf494 |
"""\nBehavioral based tests for offsets and date_range.\n\nThis file is adapted from https://github.com/pandas-dev/pandas/pull/18761 -\nwhich was more ambitious but less idiomatic in its use of Hypothesis.\n\nYou may wish to consult the previous version for inspiration on further\ntests, or when trying to pin down the bugs exposed by the tests below.\n"""\nfrom hypothesis import (\n assume,\n given,\n)\nimport pytest\nimport pytz\n\nimport pandas as pd\nfrom pandas._testing._hypothesis import (\n DATETIME_JAN_1_1900_OPTIONAL_TZ,\n YQM_OFFSET,\n)\n\n# ----------------------------------------------------------------\n# Offset-specific behaviour tests\n\n\n@pytest.mark.arm_slow\n@given(DATETIME_JAN_1_1900_OPTIONAL_TZ, YQM_OFFSET)\ndef test_on_offset_implementations(dt, offset):\n assume(not offset.normalize)\n # check that the class-specific implementations of is_on_offset match\n # the general case definition:\n # (dt + offset) - offset == dt\n try:\n compare = (dt + offset) - offset\n except (pytz.NonExistentTimeError, pytz.AmbiguousTimeError):\n # When dt + offset does not exist or is DST-ambiguous, assume(False) to\n # indicate to hypothesis that this is not a valid test case\n # DST-ambiguous example (GH41906):\n # dt = datetime.datetime(1900, 1, 1, tzinfo=pytz.timezone('Africa/Kinshasa'))\n # offset = MonthBegin(66)\n assume(False)\n\n assert offset.is_on_offset(dt) == (compare == dt)\n\n\n@given(YQM_OFFSET)\ndef test_shift_across_dst(offset):\n # GH#18319 check that 1) timezone is correctly normalized and\n # 2) that hour is not incorrectly changed by this normalization\n assume(not offset.normalize)\n\n # Note that dti includes a transition across DST boundary\n dti = pd.date_range(\n start="2017-10-30 12:00:00", end="2017-11-06", freq="D", tz="US/Eastern"\n )\n assert (dti.hour == 12).all() # we haven't screwed up yet\n\n res = dti + offset\n assert (res.hour == 12).all()\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_offsets_properties.py | test_offsets_properties.py | Python | 1,971 | 0.95 | 0.1 | 0.265306 | python-kit | 204 | 2024-11-07T13:21:40.348066 | Apache-2.0 | true | b24ec95eff425a6602e73c83e16a1292 |
"""\nTests for the following offsets:\n- QuarterBegin\n- QuarterEnd\n"""\nfrom __future__ import annotations\n\nfrom datetime import datetime\n\nimport pytest\n\nimport pandas._testing as tm\nfrom pandas.tests.tseries.offsets.common import (\n assert_is_on_offset,\n assert_offset_equal,\n)\n\nfrom pandas.tseries.offsets import (\n QuarterBegin,\n QuarterEnd,\n)\n\n\n@pytest.mark.parametrize("klass", (QuarterBegin, QuarterEnd))\ndef test_quarterly_dont_normalize(klass):\n date = datetime(2012, 3, 31, 5, 30)\n result = date + klass()\n assert result.time() == date.time()\n\n\n@pytest.mark.parametrize("offset", [QuarterBegin(), QuarterEnd()])\n@pytest.mark.parametrize(\n "date",\n [\n datetime(2016, m, d)\n for m in [10, 11, 12]\n for d in [1, 2, 3, 28, 29, 30, 31]\n if not (m == 11 and d == 31)\n ],\n)\ndef test_on_offset(offset, date):\n res = offset.is_on_offset(date)\n slow_version = date == (date + offset) - offset\n assert res == slow_version\n\n\nclass TestQuarterBegin:\n def test_repr(self):\n expected = "<QuarterBegin: startingMonth=3>"\n assert repr(QuarterBegin()) == expected\n expected = "<QuarterBegin: startingMonth=3>"\n assert repr(QuarterBegin(startingMonth=3)) == expected\n expected = "<QuarterBegin: startingMonth=1>"\n assert repr(QuarterBegin(startingMonth=1)) == expected\n\n def test_is_anchored(self):\n msg = "QuarterBegin.is_anchored is deprecated "\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert QuarterBegin(startingMonth=1).is_anchored()\n assert QuarterBegin().is_anchored()\n assert not QuarterBegin(2, startingMonth=1).is_anchored()\n\n def test_offset_corner_case(self):\n # corner\n offset = QuarterBegin(n=-1, startingMonth=1)\n assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 1)\n\n offset_cases = []\n offset_cases.append(\n (\n QuarterBegin(startingMonth=1),\n {\n datetime(2007, 12, 1): datetime(2008, 1, 1),\n datetime(2008, 1, 1): datetime(2008, 4, 1),\n datetime(2008, 2, 15): datetime(2008, 4, 1),\n datetime(2008, 2, 29): datetime(2008, 4, 1),\n datetime(2008, 3, 15): datetime(2008, 4, 1),\n datetime(2008, 3, 31): datetime(2008, 4, 1),\n datetime(2008, 4, 15): datetime(2008, 7, 1),\n datetime(2008, 4, 1): datetime(2008, 7, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterBegin(startingMonth=2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 1),\n datetime(2008, 1, 31): datetime(2008, 2, 1),\n datetime(2008, 1, 15): datetime(2008, 2, 1),\n datetime(2008, 2, 29): datetime(2008, 5, 1),\n datetime(2008, 3, 15): datetime(2008, 5, 1),\n datetime(2008, 3, 31): datetime(2008, 5, 1),\n datetime(2008, 4, 15): datetime(2008, 5, 1),\n datetime(2008, 4, 30): datetime(2008, 5, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterBegin(startingMonth=1, n=0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 12, 1): datetime(2009, 1, 1),\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 2, 15): datetime(2008, 4, 1),\n datetime(2008, 2, 29): datetime(2008, 4, 1),\n datetime(2008, 3, 15): datetime(2008, 4, 1),\n datetime(2008, 3, 31): datetime(2008, 4, 1),\n datetime(2008, 4, 15): datetime(2008, 7, 1),\n datetime(2008, 4, 30): datetime(2008, 7, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterBegin(startingMonth=1, n=-1),\n {\n datetime(2008, 1, 1): datetime(2007, 10, 1),\n datetime(2008, 1, 31): datetime(2008, 1, 1),\n datetime(2008, 2, 15): datetime(2008, 1, 1),\n datetime(2008, 2, 29): datetime(2008, 1, 1),\n datetime(2008, 3, 15): datetime(2008, 1, 1),\n datetime(2008, 3, 31): datetime(2008, 1, 1),\n datetime(2008, 4, 15): datetime(2008, 4, 1),\n datetime(2008, 4, 30): datetime(2008, 4, 1),\n datetime(2008, 7, 1): datetime(2008, 4, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterBegin(startingMonth=1, n=2),\n {\n datetime(2008, 1, 1): datetime(2008, 7, 1),\n datetime(2008, 2, 15): datetime(2008, 7, 1),\n datetime(2008, 2, 29): datetime(2008, 7, 1),\n datetime(2008, 3, 15): datetime(2008, 7, 1),\n datetime(2008, 3, 31): datetime(2008, 7, 1),\n datetime(2008, 4, 15): datetime(2008, 10, 1),\n datetime(2008, 4, 1): datetime(2008, 10, 1),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n\nclass TestQuarterEnd:\n def test_repr(self):\n expected = "<QuarterEnd: startingMonth=3>"\n assert repr(QuarterEnd()) == expected\n expected = "<QuarterEnd: startingMonth=3>"\n assert repr(QuarterEnd(startingMonth=3)) == expected\n expected = "<QuarterEnd: startingMonth=1>"\n assert repr(QuarterEnd(startingMonth=1)) == expected\n\n def test_is_anchored(self):\n msg = "QuarterEnd.is_anchored is deprecated "\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert QuarterEnd(startingMonth=1).is_anchored()\n assert QuarterEnd().is_anchored()\n assert not QuarterEnd(2, startingMonth=1).is_anchored()\n\n def test_offset_corner_case(self):\n # corner\n offset = QuarterEnd(n=-1, startingMonth=1)\n assert datetime(2010, 2, 1) + offset == datetime(2010, 1, 31)\n\n offset_cases = []\n offset_cases.append(\n (\n QuarterEnd(startingMonth=1),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 4, 30),\n datetime(2008, 2, 15): datetime(2008, 4, 30),\n datetime(2008, 2, 29): datetime(2008, 4, 30),\n datetime(2008, 3, 15): datetime(2008, 4, 30),\n datetime(2008, 3, 31): datetime(2008, 4, 30),\n datetime(2008, 4, 15): datetime(2008, 4, 30),\n datetime(2008, 4, 30): datetime(2008, 7, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterEnd(startingMonth=2),\n {\n datetime(2008, 1, 1): datetime(2008, 2, 29),\n datetime(2008, 1, 31): datetime(2008, 2, 29),\n datetime(2008, 2, 15): datetime(2008, 2, 29),\n datetime(2008, 2, 29): datetime(2008, 5, 31),\n datetime(2008, 3, 15): datetime(2008, 5, 31),\n datetime(2008, 3, 31): datetime(2008, 5, 31),\n datetime(2008, 4, 15): datetime(2008, 5, 31),\n datetime(2008, 4, 30): datetime(2008, 5, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterEnd(startingMonth=1, n=0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 31),\n datetime(2008, 1, 31): datetime(2008, 1, 31),\n datetime(2008, 2, 15): datetime(2008, 4, 30),\n datetime(2008, 2, 29): datetime(2008, 4, 30),\n datetime(2008, 3, 15): datetime(2008, 4, 30),\n datetime(2008, 3, 31): datetime(2008, 4, 30),\n datetime(2008, 4, 15): datetime(2008, 4, 30),\n datetime(2008, 4, 30): datetime(2008, 4, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterEnd(startingMonth=1, n=-1),\n {\n datetime(2008, 1, 1): datetime(2007, 10, 31),\n datetime(2008, 1, 31): datetime(2007, 10, 31),\n datetime(2008, 2, 15): datetime(2008, 1, 31),\n datetime(2008, 2, 29): datetime(2008, 1, 31),\n datetime(2008, 3, 15): datetime(2008, 1, 31),\n datetime(2008, 3, 31): datetime(2008, 1, 31),\n datetime(2008, 4, 15): datetime(2008, 1, 31),\n datetime(2008, 4, 30): datetime(2008, 1, 31),\n datetime(2008, 7, 1): datetime(2008, 4, 30),\n },\n )\n )\n\n offset_cases.append(\n (\n QuarterEnd(startingMonth=1, n=2),\n {\n datetime(2008, 1, 31): datetime(2008, 7, 31),\n datetime(2008, 2, 15): datetime(2008, 7, 31),\n datetime(2008, 2, 29): datetime(2008, 7, 31),\n datetime(2008, 3, 15): datetime(2008, 7, 31),\n datetime(2008, 3, 31): datetime(2008, 7, 31),\n datetime(2008, 4, 15): datetime(2008, 7, 31),\n datetime(2008, 4, 30): datetime(2008, 10, 31),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (QuarterEnd(1, startingMonth=1), datetime(2008, 1, 31), True),\n (QuarterEnd(1, startingMonth=1), datetime(2007, 12, 31), False),\n (QuarterEnd(1, startingMonth=1), datetime(2008, 2, 29), False),\n (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 30), False),\n (QuarterEnd(1, startingMonth=1), datetime(2007, 3, 31), False),\n (QuarterEnd(1, startingMonth=1), datetime(2008, 4, 30), True),\n (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 30), False),\n (QuarterEnd(1, startingMonth=1), datetime(2008, 5, 31), False),\n (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 29), False),\n (QuarterEnd(1, startingMonth=1), datetime(2007, 6, 30), False),\n (QuarterEnd(1, startingMonth=2), datetime(2008, 1, 31), False),\n (QuarterEnd(1, startingMonth=2), datetime(2007, 12, 31), False),\n (QuarterEnd(1, startingMonth=2), datetime(2008, 2, 29), True),\n (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 30), False),\n (QuarterEnd(1, startingMonth=2), datetime(2007, 3, 31), False),\n (QuarterEnd(1, startingMonth=2), datetime(2008, 4, 30), False),\n (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 30), False),\n (QuarterEnd(1, startingMonth=2), datetime(2008, 5, 31), True),\n (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 29), False),\n (QuarterEnd(1, startingMonth=2), datetime(2007, 6, 30), False),\n (QuarterEnd(1, startingMonth=3), datetime(2008, 1, 31), False),\n (QuarterEnd(1, startingMonth=3), datetime(2007, 12, 31), True),\n (QuarterEnd(1, startingMonth=3), datetime(2008, 2, 29), False),\n (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 30), False),\n (QuarterEnd(1, startingMonth=3), datetime(2007, 3, 31), True),\n (QuarterEnd(1, startingMonth=3), datetime(2008, 4, 30), False),\n (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 30), False),\n (QuarterEnd(1, startingMonth=3), datetime(2008, 5, 31), False),\n (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 29), False),\n (QuarterEnd(1, startingMonth=3), datetime(2007, 6, 30), True),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_quarter.py | test_quarter.py | Python | 11,839 | 0.95 | 0.062706 | 0.00738 | vue-tools | 186 | 2024-03-01T02:58:00.651042 | MIT | true | b3e74daf3198067007ed3ceca3463289 |
"""\nTests for offsets.Tick and subclasses\n"""\nfrom datetime import (\n datetime,\n timedelta,\n)\n\nfrom hypothesis import (\n assume,\n example,\n given,\n)\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.offsets import delta_to_tick\nfrom pandas.errors import OutOfBoundsTimedelta\n\nfrom pandas import (\n Timedelta,\n Timestamp,\n)\nimport pandas._testing as tm\nfrom pandas._testing._hypothesis import INT_NEG_999_TO_POS_999\nfrom pandas.tests.tseries.offsets.common import assert_offset_equal\n\nfrom pandas.tseries import offsets\nfrom pandas.tseries.offsets import (\n Hour,\n Micro,\n Milli,\n Minute,\n Nano,\n Second,\n)\n\n# ---------------------------------------------------------------------\n# Test Helpers\n\ntick_classes = [Hour, Minute, Second, Milli, Micro, Nano]\n\n\n# ---------------------------------------------------------------------\n\n\ndef test_apply_ticks():\n result = offsets.Hour(3) + offsets.Hour(4)\n exp = offsets.Hour(7)\n assert result == exp\n\n\ndef test_delta_to_tick():\n delta = timedelta(3)\n\n tick = delta_to_tick(delta)\n assert tick == offsets.Day(3)\n\n td = Timedelta(nanoseconds=5)\n tick = delta_to_tick(td)\n assert tick == Nano(5)\n\n\n@pytest.mark.parametrize("cls", tick_classes)\n@example(n=2, m=3)\n@example(n=800, m=300)\n@example(n=1000, m=5)\n@given(n=INT_NEG_999_TO_POS_999, m=INT_NEG_999_TO_POS_999)\ndef test_tick_add_sub(cls, n, m):\n # For all Tick subclasses and all integers n, m, we should have\n # tick(n) + tick(m) == tick(n+m)\n # tick(n) - tick(m) == tick(n-m)\n left = cls(n)\n right = cls(m)\n expected = cls(n + m)\n\n assert left + right == expected\n\n expected = cls(n - m)\n assert left - right == expected\n\n\n@pytest.mark.arm_slow\n@pytest.mark.parametrize("cls", tick_classes)\n@example(n=2, m=3)\n@given(n=INT_NEG_999_TO_POS_999, m=INT_NEG_999_TO_POS_999)\ndef test_tick_equality(cls, n, m):\n assume(m != n)\n # tick == tock iff tick.n == tock.n\n left = cls(n)\n right = cls(m)\n assert left != right\n\n right = cls(n)\n assert left == right\n assert not left != right\n\n if n != 0:\n assert cls(n) != cls(-n)\n\n\n# ---------------------------------------------------------------------\n\n\ndef test_Hour():\n assert_offset_equal(Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 1))\n assert_offset_equal(Hour(-1), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))\n assert_offset_equal(2 * Hour(), datetime(2010, 1, 1), datetime(2010, 1, 1, 2))\n assert_offset_equal(-1 * Hour(), datetime(2010, 1, 1, 1), datetime(2010, 1, 1))\n\n assert Hour(3) + Hour(2) == Hour(5)\n assert Hour(3) - Hour(2) == Hour()\n\n assert Hour(4) != Hour(1)\n\n\ndef test_Minute():\n assert_offset_equal(Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 1))\n assert_offset_equal(Minute(-1), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))\n assert_offset_equal(2 * Minute(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 2))\n assert_offset_equal(-1 * Minute(), datetime(2010, 1, 1, 0, 1), datetime(2010, 1, 1))\n\n assert Minute(3) + Minute(2) == Minute(5)\n assert Minute(3) - Minute(2) == Minute()\n assert Minute(5) != Minute()\n\n\ndef test_Second():\n assert_offset_equal(Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 1))\n assert_offset_equal(Second(-1), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1))\n assert_offset_equal(\n 2 * Second(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 2)\n )\n assert_offset_equal(\n -1 * Second(), datetime(2010, 1, 1, 0, 0, 1), datetime(2010, 1, 1)\n )\n\n assert Second(3) + Second(2) == Second(5)\n assert Second(3) - Second(2) == Second()\n\n\ndef test_Millisecond():\n assert_offset_equal(\n Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1000)\n )\n assert_offset_equal(\n Milli(-1), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1)\n )\n assert_offset_equal(\n Milli(2), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000)\n )\n assert_offset_equal(\n 2 * Milli(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2000)\n )\n assert_offset_equal(\n -1 * Milli(), datetime(2010, 1, 1, 0, 0, 0, 1000), datetime(2010, 1, 1)\n )\n\n assert Milli(3) + Milli(2) == Milli(5)\n assert Milli(3) - Milli(2) == Milli()\n\n\ndef test_MillisecondTimestampArithmetic():\n assert_offset_equal(\n Milli(), Timestamp("2010-01-01"), Timestamp("2010-01-01 00:00:00.001")\n )\n assert_offset_equal(\n Milli(-1), Timestamp("2010-01-01 00:00:00.001"), Timestamp("2010-01-01")\n )\n\n\ndef test_Microsecond():\n assert_offset_equal(Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 1))\n assert_offset_equal(\n Micro(-1), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1)\n )\n\n assert_offset_equal(\n 2 * Micro(), datetime(2010, 1, 1), datetime(2010, 1, 1, 0, 0, 0, 2)\n )\n assert_offset_equal(\n -1 * Micro(), datetime(2010, 1, 1, 0, 0, 0, 1), datetime(2010, 1, 1)\n )\n\n assert Micro(3) + Micro(2) == Micro(5)\n assert Micro(3) - Micro(2) == Micro()\n\n\ndef test_NanosecondGeneric():\n timestamp = Timestamp(datetime(2010, 1, 1))\n assert timestamp.nanosecond == 0\n\n result = timestamp + Nano(10)\n assert result.nanosecond == 10\n\n reverse_result = Nano(10) + timestamp\n assert reverse_result.nanosecond == 10\n\n\ndef test_Nanosecond():\n timestamp = Timestamp(datetime(2010, 1, 1))\n assert_offset_equal(Nano(), timestamp, timestamp + np.timedelta64(1, "ns"))\n assert_offset_equal(Nano(-1), timestamp + np.timedelta64(1, "ns"), timestamp)\n assert_offset_equal(2 * Nano(), timestamp, timestamp + np.timedelta64(2, "ns"))\n assert_offset_equal(-1 * Nano(), timestamp + np.timedelta64(1, "ns"), timestamp)\n\n assert Nano(3) + Nano(2) == Nano(5)\n assert Nano(3) - Nano(2) == Nano()\n\n # GH9284\n assert Nano(1) + Nano(10) == Nano(11)\n assert Nano(5) + Micro(1) == Nano(1005)\n assert Micro(5) + Nano(1) == Nano(5001)\n\n\n@pytest.mark.parametrize(\n "kls, expected",\n [\n (Hour, Timedelta(hours=5)),\n (Minute, Timedelta(hours=2, minutes=3)),\n (Second, Timedelta(hours=2, seconds=3)),\n (Milli, Timedelta(hours=2, milliseconds=3)),\n (Micro, Timedelta(hours=2, microseconds=3)),\n (Nano, Timedelta(hours=2, nanoseconds=3)),\n ],\n)\ndef test_tick_addition(kls, expected):\n offset = kls(3)\n td = Timedelta(hours=2)\n\n for other in [td, td.to_pytimedelta(), td.to_timedelta64()]:\n result = offset + other\n assert isinstance(result, Timedelta)\n assert result == expected\n\n result = other + offset\n assert isinstance(result, Timedelta)\n assert result == expected\n\n\ndef test_tick_delta_overflow():\n # GH#55503 raise OutOfBoundsTimedelta, not OverflowError\n tick = offsets.Day(10**9)\n msg = "Cannot cast 1000000000 days 00:00:00 to unit='ns' without overflow"\n depr_msg = "Day.delta is deprecated"\n with pytest.raises(OutOfBoundsTimedelta, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n tick.delta\n\n\n@pytest.mark.parametrize("cls", tick_classes)\ndef test_tick_division(cls):\n off = cls(10)\n\n assert off / cls(5) == 2\n assert off / 2 == cls(5)\n assert off / 2.0 == cls(5)\n\n assert off / off._as_pd_timedelta == 1\n assert off / off._as_pd_timedelta.to_timedelta64() == 1\n\n assert off / Nano(1) == off._as_pd_timedelta / Nano(1)._as_pd_timedelta\n\n if cls is not Nano:\n # A case where we end up with a smaller class\n result = off / 1000\n assert isinstance(result, offsets.Tick)\n assert not isinstance(result, cls)\n assert result._as_pd_timedelta == off._as_pd_timedelta / 1000\n\n if cls._nanos_inc < Timedelta(seconds=1)._value:\n # Case where we end up with a bigger class\n result = off / 0.001\n assert isinstance(result, offsets.Tick)\n assert not isinstance(result, cls)\n assert result._as_pd_timedelta == off._as_pd_timedelta / 0.001\n\n\ndef test_tick_mul_float():\n off = Micro(2)\n\n # Case where we retain type\n result = off * 1.5\n expected = Micro(3)\n assert result == expected\n assert isinstance(result, Micro)\n\n # Case where we bump up to the next type\n result = off * 1.25\n expected = Nano(2500)\n assert result == expected\n assert isinstance(result, Nano)\n\n\n@pytest.mark.parametrize("cls", tick_classes)\ndef test_tick_rdiv(cls):\n off = cls(10)\n delta = off._as_pd_timedelta\n td64 = delta.to_timedelta64()\n instance__type = ".".join([cls.__module__, cls.__name__])\n msg = (\n "unsupported operand type\\(s\\) for \\/: 'int'|'float' and "\n f"'{instance__type}'"\n )\n\n with pytest.raises(TypeError, match=msg):\n 2 / off\n with pytest.raises(TypeError, match=msg):\n 2.0 / off\n\n assert (td64 * 2.5) / off == 2.5\n\n if cls is not Nano:\n # skip pytimedelta for Nano since it gets dropped\n assert (delta.to_pytimedelta() * 2) / off == 2\n\n result = np.array([2 * td64, td64]) / off\n expected = np.array([2.0, 1.0])\n tm.assert_numpy_array_equal(result, expected)\n\n\n@pytest.mark.parametrize("cls1", tick_classes)\n@pytest.mark.parametrize("cls2", tick_classes)\ndef test_tick_zero(cls1, cls2):\n assert cls1(0) == cls2(0)\n assert cls1(0) + cls2(0) == cls1(0)\n\n if cls1 is not Nano:\n assert cls1(2) + cls2(0) == cls1(2)\n\n if cls1 is Nano:\n assert cls1(2) + Nano(0) == cls1(2)\n\n\n@pytest.mark.parametrize("cls", tick_classes)\ndef test_tick_equalities(cls):\n assert cls() == cls(1)\n\n\n@pytest.mark.parametrize("cls", tick_classes)\ndef test_tick_offset(cls):\n msg = f"{cls.__name__}.is_anchored is deprecated "\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert not cls().is_anchored()\n\n\n@pytest.mark.parametrize("cls", tick_classes)\ndef test_compare_ticks(cls):\n three = cls(3)\n four = cls(4)\n\n assert three < cls(4)\n assert cls(3) < four\n assert four > cls(3)\n assert cls(4) > three\n assert cls(3) == cls(3)\n assert cls(3) != cls(4)\n\n\n@pytest.mark.parametrize("cls", tick_classes)\ndef test_compare_ticks_to_strs(cls):\n # GH#23524\n off = cls(19)\n\n # These tests should work with any strings, but we particularly are\n # interested in "infer" as that comparison is convenient to make in\n # Datetime/Timedelta Array/Index constructors\n assert not off == "infer"\n assert not "foo" == off\n\n instance_type = ".".join([cls.__module__, cls.__name__])\n msg = (\n "'<'|'<='|'>'|'>=' not supported between instances of "\n f"'str' and '{instance_type}'|'{instance_type}' and 'str'"\n )\n\n for left, right in [("infer", off), (off, "infer")]:\n with pytest.raises(TypeError, match=msg):\n left < right\n with pytest.raises(TypeError, match=msg):\n left <= right\n with pytest.raises(TypeError, match=msg):\n left > right\n with pytest.raises(TypeError, match=msg):\n left >= right\n\n\n@pytest.mark.parametrize("cls", tick_classes)\ndef test_compare_ticks_to_timedeltalike(cls):\n off = cls(19)\n\n td = off._as_pd_timedelta\n\n others = [td, td.to_timedelta64()]\n if cls is not Nano:\n others.append(td.to_pytimedelta())\n\n for other in others:\n assert off == other\n assert not off != other\n assert not off < other\n assert not off > other\n assert off <= other\n assert off >= other\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_ticks.py | test_ticks.py | Python | 11,548 | 0.95 | 0.093827 | 0.061489 | react-lib | 538 | 2024-08-26T18:08:01.989849 | BSD-3-Clause | true | 8eeb9f4a7b440251a79263f120cf51e5 |
"""\nTests for the following offsets:\n- Week\n- WeekOfMonth\n- LastWeekOfMonth\n"""\nfrom __future__ import annotations\n\nfrom datetime import (\n datetime,\n timedelta,\n)\n\nimport pytest\n\nfrom pandas._libs.tslibs import Timestamp\nfrom pandas._libs.tslibs.offsets import (\n Day,\n LastWeekOfMonth,\n Week,\n WeekOfMonth,\n)\n\nimport pandas._testing as tm\nfrom pandas.tests.tseries.offsets.common import (\n WeekDay,\n assert_is_on_offset,\n assert_offset_equal,\n)\n\n\nclass TestWeek:\n def test_repr(self):\n assert repr(Week(weekday=0)) == "<Week: weekday=0>"\n assert repr(Week(n=-1, weekday=0)) == "<-1 * Week: weekday=0>"\n assert repr(Week(n=-2, weekday=0)) == "<-2 * Weeks: weekday=0>"\n\n def test_corner(self):\n with pytest.raises(ValueError, match="Day must be"):\n Week(weekday=7)\n\n with pytest.raises(ValueError, match="Day must be"):\n Week(weekday=-1)\n\n def test_is_anchored(self):\n msg = "Week.is_anchored is deprecated "\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert Week(weekday=0).is_anchored()\n assert not Week().is_anchored()\n assert not Week(2, weekday=2).is_anchored()\n assert not Week(2).is_anchored()\n\n offset_cases = []\n # not business week\n offset_cases.append(\n (\n Week(),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 8),\n datetime(2008, 1, 4): datetime(2008, 1, 11),\n datetime(2008, 1, 5): datetime(2008, 1, 12),\n datetime(2008, 1, 6): datetime(2008, 1, 13),\n datetime(2008, 1, 7): datetime(2008, 1, 14),\n },\n )\n )\n\n # Mon\n offset_cases.append(\n (\n Week(weekday=0),\n {\n datetime(2007, 12, 31): datetime(2008, 1, 7),\n datetime(2008, 1, 4): datetime(2008, 1, 7),\n datetime(2008, 1, 5): datetime(2008, 1, 7),\n datetime(2008, 1, 6): datetime(2008, 1, 7),\n datetime(2008, 1, 7): datetime(2008, 1, 14),\n },\n )\n )\n\n # n=0 -> roll forward. Mon\n offset_cases.append(\n (\n Week(0, weekday=0),\n {\n datetime(2007, 12, 31): datetime(2007, 12, 31),\n datetime(2008, 1, 4): datetime(2008, 1, 7),\n datetime(2008, 1, 5): datetime(2008, 1, 7),\n datetime(2008, 1, 6): datetime(2008, 1, 7),\n datetime(2008, 1, 7): datetime(2008, 1, 7),\n },\n )\n )\n\n # n=0 -> roll forward. Mon\n offset_cases.append(\n (\n Week(-2, weekday=1),\n {\n datetime(2010, 4, 6): datetime(2010, 3, 23),\n datetime(2010, 4, 8): datetime(2010, 3, 30),\n datetime(2010, 4, 5): datetime(2010, 3, 23),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n @pytest.mark.parametrize("weekday", range(7))\n def test_is_on_offset(self, weekday):\n offset = Week(weekday=weekday)\n\n for day in range(1, 8):\n date = datetime(2008, 1, day)\n expected = day % 7 == weekday\n assert_is_on_offset(offset, date, expected)\n\n @pytest.mark.parametrize(\n "n,date",\n [\n (2, "1862-01-13 09:03:34.873477378+0210"),\n (-2, "1856-10-24 16:18:36.556360110-0717"),\n ],\n )\n def test_is_on_offset_weekday_none(self, n, date):\n # GH 18510 Week with weekday = None, normalize = False\n # should always be is_on_offset\n offset = Week(n=n, weekday=None)\n ts = Timestamp(date, tz="Africa/Lusaka")\n fast = offset.is_on_offset(ts)\n slow = (ts + offset) - offset == ts\n assert fast == slow\n\n def test_week_add_invalid(self):\n # Week with weekday should raise TypeError and _not_ AttributeError\n # when adding invalid offset\n offset = Week(weekday=1)\n other = Day()\n with pytest.raises(TypeError, match="Cannot add"):\n offset + other\n\n\nclass TestWeekOfMonth:\n def test_constructor(self):\n with pytest.raises(ValueError, match="^Week"):\n WeekOfMonth(n=1, week=4, weekday=0)\n\n with pytest.raises(ValueError, match="^Week"):\n WeekOfMonth(n=1, week=-1, weekday=0)\n\n with pytest.raises(ValueError, match="^Day"):\n WeekOfMonth(n=1, week=0, weekday=-1)\n\n with pytest.raises(ValueError, match="^Day"):\n WeekOfMonth(n=1, week=0, weekday=-7)\n\n def test_repr(self):\n assert (\n repr(WeekOfMonth(weekday=1, week=2)) == "<WeekOfMonth: week=2, weekday=1>"\n )\n\n def test_offset(self):\n date1 = datetime(2011, 1, 4) # 1st Tuesday of Month\n date2 = datetime(2011, 1, 11) # 2nd Tuesday of Month\n date3 = datetime(2011, 1, 18) # 3rd Tuesday of Month\n date4 = datetime(2011, 1, 25) # 4th Tuesday of Month\n\n # see for loop for structure\n test_cases = [\n (-2, 2, 1, date1, datetime(2010, 11, 16)),\n (-2, 2, 1, date2, datetime(2010, 11, 16)),\n (-2, 2, 1, date3, datetime(2010, 11, 16)),\n (-2, 2, 1, date4, datetime(2010, 12, 21)),\n (-1, 2, 1, date1, datetime(2010, 12, 21)),\n (-1, 2, 1, date2, datetime(2010, 12, 21)),\n (-1, 2, 1, date3, datetime(2010, 12, 21)),\n (-1, 2, 1, date4, datetime(2011, 1, 18)),\n (0, 0, 1, date1, datetime(2011, 1, 4)),\n (0, 0, 1, date2, datetime(2011, 2, 1)),\n (0, 0, 1, date3, datetime(2011, 2, 1)),\n (0, 0, 1, date4, datetime(2011, 2, 1)),\n (0, 1, 1, date1, datetime(2011, 1, 11)),\n (0, 1, 1, date2, datetime(2011, 1, 11)),\n (0, 1, 1, date3, datetime(2011, 2, 8)),\n (0, 1, 1, date4, datetime(2011, 2, 8)),\n (0, 0, 1, date1, datetime(2011, 1, 4)),\n (0, 1, 1, date2, datetime(2011, 1, 11)),\n (0, 2, 1, date3, datetime(2011, 1, 18)),\n (0, 3, 1, date4, datetime(2011, 1, 25)),\n (1, 0, 0, date1, datetime(2011, 2, 7)),\n (1, 0, 0, date2, datetime(2011, 2, 7)),\n (1, 0, 0, date3, datetime(2011, 2, 7)),\n (1, 0, 0, date4, datetime(2011, 2, 7)),\n (1, 0, 1, date1, datetime(2011, 2, 1)),\n (1, 0, 1, date2, datetime(2011, 2, 1)),\n (1, 0, 1, date3, datetime(2011, 2, 1)),\n (1, 0, 1, date4, datetime(2011, 2, 1)),\n (1, 0, 2, date1, datetime(2011, 1, 5)),\n (1, 0, 2, date2, datetime(2011, 2, 2)),\n (1, 0, 2, date3, datetime(2011, 2, 2)),\n (1, 0, 2, date4, datetime(2011, 2, 2)),\n (1, 2, 1, date1, datetime(2011, 1, 18)),\n (1, 2, 1, date2, datetime(2011, 1, 18)),\n (1, 2, 1, date3, datetime(2011, 2, 15)),\n (1, 2, 1, date4, datetime(2011, 2, 15)),\n (2, 2, 1, date1, datetime(2011, 2, 15)),\n (2, 2, 1, date2, datetime(2011, 2, 15)),\n (2, 2, 1, date3, datetime(2011, 3, 15)),\n (2, 2, 1, date4, datetime(2011, 3, 15)),\n ]\n\n for n, week, weekday, dt, expected in test_cases:\n offset = WeekOfMonth(n, week=week, weekday=weekday)\n assert_offset_equal(offset, dt, expected)\n\n # try subtracting\n result = datetime(2011, 2, 1) - WeekOfMonth(week=1, weekday=2)\n assert result == datetime(2011, 1, 12)\n\n result = datetime(2011, 2, 3) - WeekOfMonth(week=0, weekday=2)\n assert result == datetime(2011, 2, 2)\n\n on_offset_cases = [\n (0, 0, datetime(2011, 2, 7), True),\n (0, 0, datetime(2011, 2, 6), False),\n (0, 0, datetime(2011, 2, 14), False),\n (1, 0, datetime(2011, 2, 14), True),\n (0, 1, datetime(2011, 2, 1), True),\n (0, 1, datetime(2011, 2, 8), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n week, weekday, dt, expected = case\n offset = WeekOfMonth(week=week, weekday=weekday)\n assert offset.is_on_offset(dt) == expected\n\n @pytest.mark.parametrize(\n "n,week,date,tz",\n [\n (2, 2, "1916-05-15 01:14:49.583410462+0422", "Asia/Qyzylorda"),\n (-3, 1, "1980-12-08 03:38:52.878321185+0500", "Asia/Oral"),\n ],\n )\n def test_is_on_offset_nanoseconds(self, n, week, date, tz):\n # GH 18864\n # Make sure that nanoseconds don't trip up is_on_offset (and with it apply)\n offset = WeekOfMonth(n=n, week=week, weekday=0)\n ts = Timestamp(date, tz=tz)\n fast = offset.is_on_offset(ts)\n slow = (ts + offset) - offset == ts\n assert fast == slow\n\n\nclass TestLastWeekOfMonth:\n def test_constructor(self):\n with pytest.raises(ValueError, match="^N cannot be 0"):\n LastWeekOfMonth(n=0, weekday=1)\n\n with pytest.raises(ValueError, match="^Day"):\n LastWeekOfMonth(n=1, weekday=-1)\n\n with pytest.raises(ValueError, match="^Day"):\n LastWeekOfMonth(n=1, weekday=7)\n\n def test_offset(self):\n # Saturday\n last_sat = datetime(2013, 8, 31)\n next_sat = datetime(2013, 9, 28)\n offset_sat = LastWeekOfMonth(n=1, weekday=5)\n\n one_day_before = last_sat + timedelta(days=-1)\n assert one_day_before + offset_sat == last_sat\n\n one_day_after = last_sat + timedelta(days=+1)\n assert one_day_after + offset_sat == next_sat\n\n # Test On that day\n assert last_sat + offset_sat == next_sat\n\n # Thursday\n\n offset_thur = LastWeekOfMonth(n=1, weekday=3)\n last_thurs = datetime(2013, 1, 31)\n next_thurs = datetime(2013, 2, 28)\n\n one_day_before = last_thurs + timedelta(days=-1)\n assert one_day_before + offset_thur == last_thurs\n\n one_day_after = last_thurs + timedelta(days=+1)\n assert one_day_after + offset_thur == next_thurs\n\n # Test on that day\n assert last_thurs + offset_thur == next_thurs\n\n three_before = last_thurs + timedelta(days=-3)\n assert three_before + offset_thur == last_thurs\n\n two_after = last_thurs + timedelta(days=+2)\n assert two_after + offset_thur == next_thurs\n\n offset_sunday = LastWeekOfMonth(n=1, weekday=WeekDay.SUN)\n assert datetime(2013, 7, 31) + offset_sunday == datetime(2013, 8, 25)\n\n on_offset_cases = [\n (WeekDay.SUN, datetime(2013, 1, 27), True),\n (WeekDay.SAT, datetime(2013, 3, 30), True),\n (WeekDay.MON, datetime(2013, 2, 18), False), # Not the last Mon\n (WeekDay.SUN, datetime(2013, 2, 25), False), # Not a SUN\n (WeekDay.MON, datetime(2013, 2, 25), True),\n (WeekDay.SAT, datetime(2013, 11, 30), True),\n (WeekDay.SAT, datetime(2006, 8, 26), True),\n (WeekDay.SAT, datetime(2007, 8, 25), True),\n (WeekDay.SAT, datetime(2008, 8, 30), True),\n (WeekDay.SAT, datetime(2009, 8, 29), True),\n (WeekDay.SAT, datetime(2010, 8, 28), True),\n (WeekDay.SAT, datetime(2011, 8, 27), True),\n (WeekDay.SAT, datetime(2019, 8, 31), True),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n weekday, dt, expected = case\n offset = LastWeekOfMonth(weekday=weekday)\n assert offset.is_on_offset(dt) == expected\n\n @pytest.mark.parametrize(\n "n,weekday,date,tz",\n [\n (4, 6, "1917-05-27 20:55:27.084284178+0200", "Europe/Warsaw"),\n (-4, 5, "2005-08-27 05:01:42.799392561-0500", "America/Rainy_River"),\n ],\n )\n def test_last_week_of_month_on_offset(self, n, weekday, date, tz):\n # GH 19036, GH 18977 _adjust_dst was incorrect for LastWeekOfMonth\n offset = LastWeekOfMonth(n=n, weekday=weekday)\n ts = Timestamp(date, tz=tz)\n slow = (ts + offset) - offset == ts\n fast = offset.is_on_offset(ts)\n assert fast == slow\n\n def test_repr(self):\n assert (\n repr(LastWeekOfMonth(n=2, weekday=1)) == "<2 * LastWeekOfMonths: weekday=1>"\n )\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_week.py | test_week.py | Python | 12,330 | 0.95 | 0.079772 | 0.057047 | node-utils | 52 | 2024-10-26T06:59:22.270930 | Apache-2.0 | true | 748a295db78548aa0418b9b7d45c8009 |
"""\nTests for the following offsets:\n- YearBegin\n- YearEnd\n"""\nfrom __future__ import annotations\n\nfrom datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas import Timestamp\nfrom pandas.tests.tseries.offsets.common import (\n assert_is_on_offset,\n assert_offset_equal,\n)\n\nfrom pandas.tseries.offsets import (\n YearBegin,\n YearEnd,\n)\n\n\nclass TestYearBegin:\n def test_misspecified(self):\n with pytest.raises(ValueError, match="Month must go from 1 to 12"):\n YearBegin(month=13)\n\n offset_cases = []\n offset_cases.append(\n (\n YearBegin(),\n {\n datetime(2008, 1, 1): datetime(2009, 1, 1),\n datetime(2008, 6, 30): datetime(2009, 1, 1),\n datetime(2008, 12, 31): datetime(2009, 1, 1),\n datetime(2005, 12, 30): datetime(2006, 1, 1),\n datetime(2005, 12, 31): datetime(2006, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(0),\n {\n datetime(2008, 1, 1): datetime(2008, 1, 1),\n datetime(2008, 6, 30): datetime(2009, 1, 1),\n datetime(2008, 12, 31): datetime(2009, 1, 1),\n datetime(2005, 12, 30): datetime(2006, 1, 1),\n datetime(2005, 12, 31): datetime(2006, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(3),\n {\n datetime(2008, 1, 1): datetime(2011, 1, 1),\n datetime(2008, 6, 30): datetime(2011, 1, 1),\n datetime(2008, 12, 31): datetime(2011, 1, 1),\n datetime(2005, 12, 30): datetime(2008, 1, 1),\n datetime(2005, 12, 31): datetime(2008, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 1, 1),\n datetime(2007, 1, 15): datetime(2007, 1, 1),\n datetime(2008, 6, 30): datetime(2008, 1, 1),\n datetime(2008, 12, 31): datetime(2008, 1, 1),\n datetime(2006, 12, 29): datetime(2006, 1, 1),\n datetime(2006, 12, 30): datetime(2006, 1, 1),\n datetime(2007, 1, 1): datetime(2006, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(-2),\n {\n datetime(2007, 1, 1): datetime(2005, 1, 1),\n datetime(2008, 6, 30): datetime(2007, 1, 1),\n datetime(2008, 12, 31): datetime(2007, 1, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(month=4),\n {\n datetime(2007, 4, 1): datetime(2008, 4, 1),\n datetime(2007, 4, 15): datetime(2008, 4, 1),\n datetime(2007, 3, 1): datetime(2007, 4, 1),\n datetime(2007, 12, 15): datetime(2008, 4, 1),\n datetime(2012, 1, 31): datetime(2012, 4, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(0, month=4),\n {\n datetime(2007, 4, 1): datetime(2007, 4, 1),\n datetime(2007, 3, 1): datetime(2007, 4, 1),\n datetime(2007, 12, 15): datetime(2008, 4, 1),\n datetime(2012, 1, 31): datetime(2012, 4, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(4, month=4),\n {\n datetime(2007, 4, 1): datetime(2011, 4, 1),\n datetime(2007, 4, 15): datetime(2011, 4, 1),\n datetime(2007, 3, 1): datetime(2010, 4, 1),\n datetime(2007, 12, 15): datetime(2011, 4, 1),\n datetime(2012, 1, 31): datetime(2015, 4, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(-1, month=4),\n {\n datetime(2007, 4, 1): datetime(2006, 4, 1),\n datetime(2007, 3, 1): datetime(2006, 4, 1),\n datetime(2007, 12, 15): datetime(2007, 4, 1),\n datetime(2012, 1, 31): datetime(2011, 4, 1),\n },\n )\n )\n\n offset_cases.append(\n (\n YearBegin(-3, month=4),\n {\n datetime(2007, 4, 1): datetime(2004, 4, 1),\n datetime(2007, 3, 1): datetime(2004, 4, 1),\n datetime(2007, 12, 15): datetime(2005, 4, 1),\n datetime(2012, 1, 31): datetime(2009, 4, 1),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (YearBegin(), datetime(2007, 1, 3), False),\n (YearBegin(), datetime(2008, 1, 1), True),\n (YearBegin(), datetime(2006, 12, 31), False),\n (YearBegin(), datetime(2006, 1, 2), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestYearEnd:\n def test_misspecified(self):\n with pytest.raises(ValueError, match="Month must go from 1 to 12"):\n YearEnd(month=13)\n\n offset_cases = []\n offset_cases.append(\n (\n YearEnd(),\n {\n datetime(2008, 1, 1): datetime(2008, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 12, 31),\n datetime(2008, 12, 31): datetime(2009, 12, 31),\n datetime(2005, 12, 30): datetime(2005, 12, 31),\n datetime(2005, 12, 31): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(0),\n {\n datetime(2008, 1, 1): datetime(2008, 12, 31),\n datetime(2008, 6, 30): datetime(2008, 12, 31),\n datetime(2008, 12, 31): datetime(2008, 12, 31),\n datetime(2005, 12, 30): datetime(2005, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(-1),\n {\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n datetime(2008, 6, 30): datetime(2007, 12, 31),\n datetime(2008, 12, 31): datetime(2007, 12, 31),\n datetime(2006, 12, 29): datetime(2005, 12, 31),\n datetime(2006, 12, 30): datetime(2005, 12, 31),\n datetime(2007, 1, 1): datetime(2006, 12, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(-2),\n {\n datetime(2007, 1, 1): datetime(2005, 12, 31),\n datetime(2008, 6, 30): datetime(2006, 12, 31),\n datetime(2008, 12, 31): datetime(2006, 12, 31),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (YearEnd(), datetime(2007, 12, 31), True),\n (YearEnd(), datetime(2008, 1, 1), False),\n (YearEnd(), datetime(2006, 12, 31), True),\n (YearEnd(), datetime(2006, 12, 29), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\nclass TestYearEndDiffMonth:\n offset_cases = []\n offset_cases.append(\n (\n YearEnd(month=3),\n {\n datetime(2008, 1, 1): datetime(2008, 3, 31),\n datetime(2008, 2, 15): datetime(2008, 3, 31),\n datetime(2008, 3, 31): datetime(2009, 3, 31),\n datetime(2008, 3, 30): datetime(2008, 3, 31),\n datetime(2005, 3, 31): datetime(2006, 3, 31),\n datetime(2006, 7, 30): datetime(2007, 3, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(0, month=3),\n {\n datetime(2008, 1, 1): datetime(2008, 3, 31),\n datetime(2008, 2, 28): datetime(2008, 3, 31),\n datetime(2008, 3, 31): datetime(2008, 3, 31),\n datetime(2005, 3, 30): datetime(2005, 3, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(-1, month=3),\n {\n datetime(2007, 1, 1): datetime(2006, 3, 31),\n datetime(2008, 2, 28): datetime(2007, 3, 31),\n datetime(2008, 3, 31): datetime(2007, 3, 31),\n datetime(2006, 3, 29): datetime(2005, 3, 31),\n datetime(2006, 3, 30): datetime(2005, 3, 31),\n datetime(2007, 3, 1): datetime(2006, 3, 31),\n },\n )\n )\n\n offset_cases.append(\n (\n YearEnd(-2, month=3),\n {\n datetime(2007, 1, 1): datetime(2005, 3, 31),\n datetime(2008, 6, 30): datetime(2007, 3, 31),\n datetime(2008, 3, 31): datetime(2006, 3, 31),\n },\n )\n )\n\n @pytest.mark.parametrize("case", offset_cases)\n def test_offset(self, case):\n offset, cases = case\n for base, expected in cases.items():\n assert_offset_equal(offset, base, expected)\n\n on_offset_cases = [\n (YearEnd(month=3), datetime(2007, 3, 31), True),\n (YearEnd(month=3), datetime(2008, 1, 1), False),\n (YearEnd(month=3), datetime(2006, 3, 31), True),\n (YearEnd(month=3), datetime(2006, 3, 29), False),\n ]\n\n @pytest.mark.parametrize("case", on_offset_cases)\n def test_is_on_offset(self, case):\n offset, dt, expected = case\n assert_is_on_offset(offset, dt, expected)\n\n\ndef test_add_out_of_pydatetime_range():\n # GH#50348 don't raise in Timestamp.replace\n ts = Timestamp(np.datetime64("-20000-12-31"))\n off = YearEnd()\n\n result = ts + off\n # TODO(cython3): "arg: datetime" annotation will impose\n # datetime limitations on Timestamp. The fused type below works in cy3\n # ctypedef fused datetimelike:\n # _Timestamp\n # datetime\n # expected = Timestamp(np.datetime64("-19999-12-31"))\n # assert result == expected\n assert result.year in (-19999, 1973)\n assert result.month == 12\n assert result.day == 31\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\test_year.py | test_year.py | Python | 10,455 | 0.95 | 0.047198 | 0.026667 | node-utils | 190 | 2024-08-20T12:27:37.970666 | MIT | true | 93963bb39df728857849ce372baed659 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\common.cpython-313.pyc | common.cpython-313.pyc | Other | 1,573 | 0.95 | 0.066667 | 0 | python-kit | 362 | 2024-07-28T07:08:44.219051 | BSD-3-Clause | true | 958703f998bad41df2a943ab874bec76 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_business_day.cpython-313.pyc | test_business_day.cpython-313.pyc | Other | 10,632 | 0.8 | 0.010989 | 0.045455 | react-lib | 218 | 2024-10-28T23:13:47.185801 | Apache-2.0 | true | 4e764cffe1c218e3545b0b1ce4ca266d |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_business_hour.cpython-313.pyc | test_business_hour.cpython-313.pyc | Other | 57,560 | 0.8 | 0.001311 | 0.005362 | react-lib | 835 | 2025-05-31T17:46:56.392400 | MIT | true | 6a0e4bdcd0e3e15822d67b485b61dcf2 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_business_month.cpython-313.pyc | test_business_month.cpython-313.pyc | Other | 8,504 | 0.8 | 0.010204 | 0 | awesome-app | 974 | 2024-03-27T00:20:13.045230 | MIT | true | a2d8bd71d69a7c01631ad909c74fac2a |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_business_quarter.cpython-313.pyc | test_business_quarter.cpython-313.pyc | Other | 14,574 | 0.8 | 0.0025 | 0 | vue-tools | 325 | 2024-07-27T06:01:52.831326 | Apache-2.0 | true | 4c8a1c8ebf31631b82db086ba7e65a7d |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_business_year.cpython-313.pyc | test_business_year.cpython-313.pyc | Other | 8,570 | 0.8 | 0.009174 | 0 | node-utils | 236 | 2023-09-25T11:41:39.831580 | MIT | true | 922a94ffec4d243c1bb0d4e73eebcd64 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_common.cpython-313.pyc | test_common.cpython-313.pyc | Other | 8,215 | 0.8 | 0 | 0 | react-lib | 15 | 2024-02-28T10:18:26.761667 | Apache-2.0 | true | f7392311e66acb73c4e1b35b4e3a1311 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_custom_business_day.cpython-313.pyc | test_custom_business_day.cpython-313.pyc | Other | 5,445 | 0.8 | 0.02 | 0 | react-lib | 269 | 2024-10-15T18:03:20.664714 | BSD-3-Clause | true | 66c4830a0465231cc64751706b0df876 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_custom_business_hour.cpython-313.pyc | test_custom_business_hour.cpython-313.pyc | Other | 16,105 | 0.8 | 0.005714 | 0.006061 | react-lib | 864 | 2024-11-27T05:37:12.946210 | Apache-2.0 | true | 1a2eba70b3289e5244fbfb082e869cc0 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_custom_business_month.cpython-313.pyc | test_custom_business_month.cpython-313.pyc | Other | 18,991 | 0.8 | 0.004255 | 0.017167 | vue-tools | 303 | 2024-04-14T19:59:43.127448 | GPL-3.0 | true | 0266fc9416dd40a7025d6e74754b924c |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_dst.cpython-313.pyc | test_dst.cpython-313.pyc | Other | 9,962 | 0.8 | 0.020619 | 0.032609 | vue-tools | 150 | 2024-11-04T08:24:05.638506 | GPL-3.0 | true | e53889c5f077c97a5f21a87672c27e5e |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_easter.cpython-313.pyc | test_easter.cpython-313.pyc | Other | 1,984 | 0.8 | 0.043478 | 0 | react-lib | 211 | 2024-08-09T18:09:42.890434 | GPL-3.0 | true | 62293f4fb2e551ff90b3fcea53362339 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_fiscal.cpython-313.pyc | test_fiscal.cpython-313.pyc | Other | 27,431 | 0.8 | 0.00232 | 0 | awesome-app | 425 | 2023-08-01T18:59:20.967175 | GPL-3.0 | true | 858bb88dd290f6602af5bd2ad6cf6286 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_index.cpython-313.pyc | test_index.cpython-313.pyc | Other | 1,681 | 0.8 | 0.034483 | 0 | vue-tools | 502 | 2024-05-09T22:33:59.668215 | Apache-2.0 | true | 7bb9bb6914bd4e9536f277627ea95330 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_month.cpython-313.pyc | test_month.cpython-313.pyc | Other | 24,887 | 0.8 | 0.003534 | 0 | react-lib | 803 | 2023-07-18T14:27:53.444979 | GPL-3.0 | true | c0757ca9c43b7f0d4e3dd146692defec |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_offsets.cpython-313.pyc | test_offsets.cpython-313.pyc | Other | 50,957 | 0.95 | 0.00639 | 0.008197 | node-utils | 490 | 2023-11-17T16:33:23.485808 | Apache-2.0 | true | cbc1498418441c366c28f654cf813b94 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_offsets_properties.cpython-313.pyc | test_offsets_properties.cpython-313.pyc | Other | 2,157 | 0.8 | 0.068966 | 0 | awesome-app | 517 | 2023-09-07T19:41:30.605685 | BSD-3-Clause | true | 1c7b2c484eeb202ceff5180f9518a58f |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_quarter.cpython-313.pyc | test_quarter.cpython-313.pyc | Other | 13,907 | 0.8 | 0.002933 | 0.003003 | vue-tools | 870 | 2024-12-28T11:19:36.224174 | BSD-3-Clause | true | 1607fe0ad4db6834329e90a575721510 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_ticks.cpython-313.pyc | test_ticks.cpython-313.pyc | Other | 19,308 | 0.8 | 0.012346 | 0 | react-lib | 442 | 2024-03-25T21:17:22.539789 | BSD-3-Clause | true | 0ea1287d127935b135e79a7c85903fd9 |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_week.cpython-313.pyc | test_week.cpython-313.pyc | Other | 17,193 | 0.8 | 0.003922 | 0.012048 | vue-tools | 955 | 2025-03-30T01:40:40.800884 | MIT | true | 62e237b9a90bd032a68434c00fc69b0d |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\test_year.cpython-313.pyc | test_year.cpython-313.pyc | Other | 11,889 | 0.8 | 0.007407 | 0 | awesome-app | 783 | 2024-08-30T00:43:22.198743 | Apache-2.0 | true | 937fa86b8bffd0a317421b8e2e77565f |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\offsets\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 203 | 0.7 | 0 | 0 | awesome-app | 922 | 2025-01-04T03:46:02.120621 | Apache-2.0 | true | 23aa15cc99f765e93810804b7c00788d |
\n\n | .venv\Lib\site-packages\pandas\tests\tseries\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 195 | 0.7 | 0 | 0 | vue-tools | 850 | 2025-02-18T13:54:42.857810 | BSD-3-Clause | true | b4ac12bf4057ba9093434e3f72512f4b |
"""Tests that the tslibs API is locked down"""\n\nfrom pandas._libs import tslibs\n\n\ndef test_namespace():\n submodules = [\n "base",\n "ccalendar",\n "conversion",\n "dtypes",\n "fields",\n "nattype",\n "np_datetime",\n "offsets",\n "parsing",\n "period",\n "strptime",\n "vectorized",\n "timedeltas",\n "timestamps",\n "timezones",\n "tzconversion",\n ]\n\n api = [\n "BaseOffset",\n "NaT",\n "NaTType",\n "iNaT",\n "nat_strings",\n "OutOfBoundsDatetime",\n "OutOfBoundsTimedelta",\n "Period",\n "IncompatibleFrequency",\n "Resolution",\n "Tick",\n "Timedelta",\n "dt64arr_to_periodarr",\n "Timestamp",\n "is_date_array_normalized",\n "ints_to_pydatetime",\n "normalize_i8_timestamps",\n "get_resolution",\n "delta_to_nanoseconds",\n "ints_to_pytimedelta",\n "localize_pydatetime",\n "tz_convert_from_utc",\n "tz_convert_from_utc_single",\n "to_offset",\n "tz_compare",\n "is_unitless",\n "astype_overflowsafe",\n "get_unit_from_dtype",\n "periods_per_day",\n "periods_per_second",\n "guess_datetime_format",\n "add_overflowsafe",\n "get_supported_dtype",\n "is_supported_dtype",\n ]\n\n expected = set(submodules + api)\n names = [x for x in dir(tslibs) if not x.startswith("__")]\n assert set(names) == expected\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_api.py | test_api.py | Python | 1,525 | 0.85 | 0.046154 | 0 | vue-tools | 352 | 2025-04-02T13:07:47.907348 | MIT | true | 6f3bd813f9f8f0a66900eeb0755108a6 |
from datetime import (\n date,\n datetime,\n timedelta,\n timezone,\n)\n\nfrom dateutil.tz.tz import tzoffset\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import (\n NaT,\n iNaT,\n tslib,\n)\nfrom pandas._libs.tslibs.dtypes import NpyDatetimeUnit\nfrom pandas._libs.tslibs.np_datetime import OutOfBoundsDatetime\n\nfrom pandas import Timestamp\nimport pandas._testing as tm\n\ncreso_infer = NpyDatetimeUnit.NPY_FR_GENERIC.value\n\n\nclass TestArrayToDatetimeResolutionInference:\n # TODO: tests that include tzs, ints\n\n def test_infer_all_nat(self):\n arr = np.array([NaT, np.nan], dtype=object)\n result, tz = tslib.array_to_datetime(arr, creso=creso_infer)\n assert tz is None\n assert result.dtype == "M8[s]"\n\n def test_infer_homogeoneous_datetimes(self):\n dt = datetime(2023, 10, 27, 18, 3, 5, 678000)\n arr = np.array([dt, dt, dt], dtype=object)\n result, tz = tslib.array_to_datetime(arr, creso=creso_infer)\n assert tz is None\n expected = np.array([dt, dt, dt], dtype="M8[us]")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_infer_homogeoneous_date_objects(self):\n dt = datetime(2023, 10, 27, 18, 3, 5, 678000)\n dt2 = dt.date()\n arr = np.array([None, dt2, dt2, dt2], dtype=object)\n result, tz = tslib.array_to_datetime(arr, creso=creso_infer)\n assert tz is None\n expected = np.array([np.datetime64("NaT"), dt2, dt2, dt2], dtype="M8[s]")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_infer_homogeoneous_dt64(self):\n dt = datetime(2023, 10, 27, 18, 3, 5, 678000)\n dt64 = np.datetime64(dt, "ms")\n arr = np.array([None, dt64, dt64, dt64], dtype=object)\n result, tz = tslib.array_to_datetime(arr, creso=creso_infer)\n assert tz is None\n expected = np.array([np.datetime64("NaT"), dt64, dt64, dt64], dtype="M8[ms]")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_infer_homogeoneous_timestamps(self):\n dt = datetime(2023, 10, 27, 18, 3, 5, 678000)\n ts = Timestamp(dt).as_unit("ns")\n arr = np.array([None, ts, ts, ts], dtype=object)\n result, tz = tslib.array_to_datetime(arr, creso=creso_infer)\n assert tz is None\n expected = np.array([np.datetime64("NaT")] + [ts.asm8] * 3, dtype="M8[ns]")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_infer_homogeoneous_datetimes_strings(self):\n item = "2023-10-27 18:03:05.678000"\n arr = np.array([None, item, item, item], dtype=object)\n result, tz = tslib.array_to_datetime(arr, creso=creso_infer)\n assert tz is None\n expected = np.array([np.datetime64("NaT"), item, item, item], dtype="M8[us]")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_infer_heterogeneous(self):\n dtstr = "2023-10-27 18:03:05.678000"\n\n arr = np.array([dtstr, dtstr[:-3], dtstr[:-7], None], dtype=object)\n result, tz = tslib.array_to_datetime(arr, creso=creso_infer)\n assert tz is None\n expected = np.array(arr, dtype="M8[us]")\n tm.assert_numpy_array_equal(result, expected)\n\n result, tz = tslib.array_to_datetime(arr[::-1], creso=creso_infer)\n assert tz is None\n tm.assert_numpy_array_equal(result, expected[::-1])\n\n @pytest.mark.parametrize(\n "item", [float("nan"), NaT.value, float(NaT.value), "NaT", ""]\n )\n def test_infer_with_nat_int_float_str(self, item):\n # floats/ints get inferred to nanos *unless* they are NaN/iNaT,\n # similar NaT string gets treated like NaT scalar (ignored for resolution)\n dt = datetime(2023, 11, 15, 15, 5, 6)\n\n arr = np.array([dt, item], dtype=object)\n result, tz = tslib.array_to_datetime(arr, creso=creso_infer)\n assert tz is None\n expected = np.array([dt, np.datetime64("NaT")], dtype="M8[us]")\n tm.assert_numpy_array_equal(result, expected)\n\n result2, tz2 = tslib.array_to_datetime(arr[::-1], creso=creso_infer)\n assert tz2 is None\n tm.assert_numpy_array_equal(result2, expected[::-1])\n\n\nclass TestArrayToDatetimeWithTZResolutionInference:\n def test_array_to_datetime_with_tz_resolution(self):\n tz = tzoffset("custom", 3600)\n vals = np.array(["2016-01-01 02:03:04.567", NaT], dtype=object)\n res = tslib.array_to_datetime_with_tz(vals, tz, False, False, creso_infer)\n assert res.dtype == "M8[ms]"\n\n vals2 = np.array([datetime(2016, 1, 1, 2, 3, 4), NaT], dtype=object)\n res2 = tslib.array_to_datetime_with_tz(vals2, tz, False, False, creso_infer)\n assert res2.dtype == "M8[us]"\n\n vals3 = np.array([NaT, np.datetime64(12345, "s")], dtype=object)\n res3 = tslib.array_to_datetime_with_tz(vals3, tz, False, False, creso_infer)\n assert res3.dtype == "M8[s]"\n\n def test_array_to_datetime_with_tz_resolution_all_nat(self):\n tz = tzoffset("custom", 3600)\n vals = np.array(["NaT"], dtype=object)\n res = tslib.array_to_datetime_with_tz(vals, tz, False, False, creso_infer)\n assert res.dtype == "M8[s]"\n\n vals2 = np.array([NaT, NaT], dtype=object)\n res2 = tslib.array_to_datetime_with_tz(vals2, tz, False, False, creso_infer)\n assert res2.dtype == "M8[s]"\n\n\n@pytest.mark.parametrize(\n "data,expected",\n [\n (\n ["01-01-2013", "01-02-2013"],\n [\n "2013-01-01T00:00:00.000000000",\n "2013-01-02T00:00:00.000000000",\n ],\n ),\n (\n ["Mon Sep 16 2013", "Tue Sep 17 2013"],\n [\n "2013-09-16T00:00:00.000000000",\n "2013-09-17T00:00:00.000000000",\n ],\n ),\n ],\n)\ndef test_parsing_valid_dates(data, expected):\n arr = np.array(data, dtype=object)\n result, _ = tslib.array_to_datetime(arr)\n\n expected = np.array(expected, dtype="M8[ns]")\n tm.assert_numpy_array_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dt_string, expected_tz",\n [\n ["01-01-2013 08:00:00+08:00", 480],\n ["2013-01-01T08:00:00.000000000+0800", 480],\n ["2012-12-31T16:00:00.000000000-0800", -480],\n ["12-31-2012 23:00:00-01:00", -60],\n ],\n)\ndef test_parsing_timezone_offsets(dt_string, expected_tz):\n # All of these datetime strings with offsets are equivalent\n # to the same datetime after the timezone offset is added.\n arr = np.array(["01-01-2013 00:00:00"], dtype=object)\n expected, _ = tslib.array_to_datetime(arr)\n\n arr = np.array([dt_string], dtype=object)\n result, result_tz = tslib.array_to_datetime(arr)\n\n tm.assert_numpy_array_equal(result, expected)\n assert result_tz == timezone(timedelta(minutes=expected_tz))\n\n\ndef test_parsing_non_iso_timezone_offset():\n dt_string = "01-01-2013T00:00:00.000000000+0000"\n arr = np.array([dt_string], dtype=object)\n\n with tm.assert_produces_warning(None):\n # GH#50949 should not get tzlocal-deprecation warning here\n result, result_tz = tslib.array_to_datetime(arr)\n expected = np.array([np.datetime64("2013-01-01 00:00:00.000000000")])\n\n tm.assert_numpy_array_equal(result, expected)\n assert result_tz is timezone.utc\n\n\ndef test_parsing_different_timezone_offsets():\n # see gh-17697\n data = ["2015-11-18 15:30:00+05:30", "2015-11-18 15:30:00+06:30"]\n data = np.array(data, dtype=object)\n\n msg = "parsing datetimes with mixed time zones will raise an error"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result, result_tz = tslib.array_to_datetime(data)\n expected = np.array(\n [\n datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 19800)),\n datetime(2015, 11, 18, 15, 30, tzinfo=tzoffset(None, 23400)),\n ],\n dtype=object,\n )\n\n tm.assert_numpy_array_equal(result, expected)\n assert result_tz is None\n\n\n@pytest.mark.parametrize(\n "data", [["-352.737091", "183.575577"], ["1", "2", "3", "4", "5"]]\n)\ndef test_number_looking_strings_not_into_datetime(data):\n # see gh-4601\n #\n # These strings don't look like datetimes, so\n # they shouldn't be attempted to be converted.\n arr = np.array(data, dtype=object)\n result, _ = tslib.array_to_datetime(arr, errors="ignore")\n\n tm.assert_numpy_array_equal(result, arr)\n\n\n@pytest.mark.parametrize(\n "invalid_date",\n [\n date(1000, 1, 1),\n datetime(1000, 1, 1),\n "1000-01-01",\n "Jan 1, 1000",\n np.datetime64("1000-01-01"),\n ],\n)\n@pytest.mark.parametrize("errors", ["coerce", "raise"])\ndef test_coerce_outside_ns_bounds(invalid_date, errors):\n arr = np.array([invalid_date], dtype="object")\n kwargs = {"values": arr, "errors": errors}\n\n if errors == "raise":\n msg = "^Out of bounds nanosecond timestamp: .*, at position 0$"\n\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n tslib.array_to_datetime(**kwargs)\n else: # coerce.\n result, _ = tslib.array_to_datetime(**kwargs)\n expected = np.array([iNaT], dtype="M8[ns]")\n\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_coerce_outside_ns_bounds_one_valid():\n arr = np.array(["1/1/1000", "1/1/2000"], dtype=object)\n result, _ = tslib.array_to_datetime(arr, errors="coerce")\n\n expected = [iNaT, "2000-01-01T00:00:00.000000000"]\n expected = np.array(expected, dtype="M8[ns]")\n\n tm.assert_numpy_array_equal(result, expected)\n\n\n@pytest.mark.parametrize("errors", ["ignore", "coerce"])\ndef test_coerce_of_invalid_datetimes(errors):\n arr = np.array(["01-01-2013", "not_a_date", "1"], dtype=object)\n kwargs = {"values": arr, "errors": errors}\n\n if errors == "ignore":\n # Without coercing, the presence of any invalid\n # dates prevents any values from being converted.\n result, _ = tslib.array_to_datetime(**kwargs)\n tm.assert_numpy_array_equal(result, arr)\n else: # coerce.\n # With coercing, the invalid dates becomes iNaT\n result, _ = tslib.array_to_datetime(arr, errors="coerce")\n expected = ["2013-01-01T00:00:00.000000000", iNaT, iNaT]\n\n tm.assert_numpy_array_equal(result, np.array(expected, dtype="M8[ns]"))\n\n\ndef test_to_datetime_barely_out_of_bounds():\n # see gh-19382, gh-19529\n #\n # Close enough to bounds that dropping nanos\n # would result in an in-bounds datetime.\n arr = np.array(["2262-04-11 23:47:16.854775808"], dtype=object)\n msg = "^Out of bounds nanosecond timestamp: 2262-04-11 23:47:16, at position 0$"\n\n with pytest.raises(tslib.OutOfBoundsDatetime, match=msg):\n tslib.array_to_datetime(arr)\n\n\n@pytest.mark.parametrize(\n "timestamp",\n [\n # Close enough to bounds that scaling micros to nanos overflows\n # but adding nanos would result in an in-bounds datetime.\n "1677-09-21T00:12:43.145224193",\n "1677-09-21T00:12:43.145224999",\n # this always worked\n "1677-09-21T00:12:43.145225000",\n ],\n)\ndef test_to_datetime_barely_inside_bounds(timestamp):\n # see gh-57150\n result, _ = tslib.array_to_datetime(np.array([timestamp], dtype=object))\n tm.assert_numpy_array_equal(result, np.array([timestamp], dtype="M8[ns]"))\n\n\nclass SubDatetime(datetime):\n pass\n\n\n@pytest.mark.parametrize(\n "data,expected",\n [\n ([SubDatetime(2000, 1, 1)], ["2000-01-01T00:00:00.000000000"]),\n ([datetime(2000, 1, 1)], ["2000-01-01T00:00:00.000000000"]),\n ([Timestamp(2000, 1, 1)], ["2000-01-01T00:00:00.000000000"]),\n ],\n)\ndef test_datetime_subclass(data, expected):\n # GH 25851\n # ensure that subclassed datetime works with\n # array_to_datetime\n\n arr = np.array(data, dtype=object)\n result, _ = tslib.array_to_datetime(arr)\n\n expected = np.array(expected, dtype="M8[ns]")\n tm.assert_numpy_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_array_to_datetime.py | test_array_to_datetime.py | Python | 11,871 | 0.95 | 0.080119 | 0.092251 | vue-tools | 693 | 2023-11-03T16:34:03.457934 | BSD-3-Clause | true | f6851fbbce9b2bc76e8094e254cf5465 |
from datetime import (\n date,\n datetime,\n)\n\nfrom hypothesis import given\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import ccalendar\n\nfrom pandas._testing._hypothesis import DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ\n\n\n@pytest.mark.parametrize(\n "date_tuple,expected",\n [\n ((2001, 3, 1), 60),\n ((2004, 3, 1), 61),\n ((1907, 12, 31), 365), # End-of-year, non-leap year.\n ((2004, 12, 31), 366), # End-of-year, leap year.\n ],\n)\ndef test_get_day_of_year_numeric(date_tuple, expected):\n assert ccalendar.get_day_of_year(*date_tuple) == expected\n\n\ndef test_get_day_of_year_dt():\n dt = datetime.fromordinal(1 + np.random.default_rng(2).integers(365 * 4000))\n result = ccalendar.get_day_of_year(dt.year, dt.month, dt.day)\n\n expected = (dt - dt.replace(month=1, day=1)).days + 1\n assert result == expected\n\n\n@pytest.mark.parametrize(\n "input_date_tuple, expected_iso_tuple",\n [\n [(2020, 1, 1), (2020, 1, 3)],\n [(2019, 12, 31), (2020, 1, 2)],\n [(2019, 12, 30), (2020, 1, 1)],\n [(2009, 12, 31), (2009, 53, 4)],\n [(2010, 1, 1), (2009, 53, 5)],\n [(2010, 1, 3), (2009, 53, 7)],\n [(2010, 1, 4), (2010, 1, 1)],\n [(2006, 1, 1), (2005, 52, 7)],\n [(2005, 12, 31), (2005, 52, 6)],\n [(2008, 12, 28), (2008, 52, 7)],\n [(2008, 12, 29), (2009, 1, 1)],\n ],\n)\ndef test_dt_correct_iso_8601_year_week_and_day(input_date_tuple, expected_iso_tuple):\n result = ccalendar.get_iso_calendar(*input_date_tuple)\n expected_from_date_isocalendar = date(*input_date_tuple).isocalendar()\n assert result == expected_from_date_isocalendar\n assert result == expected_iso_tuple\n\n\n@given(DATETIME_IN_PD_TIMESTAMP_RANGE_NO_TZ)\ndef test_isocalendar(dt):\n expected = dt.isocalendar()\n result = ccalendar.get_iso_calendar(dt.year, dt.month, dt.day)\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_ccalendar.py | test_ccalendar.py | Python | 1,903 | 0.95 | 0.063492 | 0 | react-lib | 554 | 2024-06-24T06:17:12.884619 | MIT | true | 29842422899fea9fefbaa855f7755cd1 |
from datetime import datetime\n\nimport numpy as np\nimport pytest\nfrom pytz import UTC\n\nfrom pandas._libs.tslibs import (\n OutOfBoundsTimedelta,\n astype_overflowsafe,\n conversion,\n iNaT,\n timezones,\n tz_convert_from_utc,\n tzconversion,\n)\n\nfrom pandas import (\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef _compare_utc_to_local(tz_didx):\n def f(x):\n return tzconversion.tz_convert_from_utc_single(x, tz_didx.tz)\n\n result = tz_convert_from_utc(tz_didx.asi8, tz_didx.tz)\n expected = np.vectorize(f)(tz_didx.asi8)\n\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef _compare_local_to_utc(tz_didx, naive_didx):\n # Check that tz_localize behaves the same vectorized and pointwise.\n err1 = err2 = None\n try:\n result = tzconversion.tz_localize_to_utc(naive_didx.asi8, tz_didx.tz)\n err1 = None\n except Exception as err:\n err1 = err\n\n try:\n expected = naive_didx.map(lambda x: x.tz_localize(tz_didx.tz)).asi8\n except Exception as err:\n err2 = err\n\n if err1 is not None:\n assert type(err1) == type(err2)\n else:\n assert err2 is None\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_tz_localize_to_utc_copies():\n # GH#46460\n arr = np.arange(5, dtype="i8")\n result = tz_convert_from_utc(arr, tz=UTC)\n tm.assert_numpy_array_equal(result, arr)\n assert not np.shares_memory(arr, result)\n\n result = tz_convert_from_utc(arr, tz=None)\n tm.assert_numpy_array_equal(result, arr)\n assert not np.shares_memory(arr, result)\n\n\ndef test_tz_convert_single_matches_tz_convert_hourly(tz_aware_fixture):\n tz = tz_aware_fixture\n tz_didx = date_range("2014-03-01", "2015-01-10", freq="h", tz=tz)\n naive_didx = date_range("2014-03-01", "2015-01-10", freq="h")\n\n _compare_utc_to_local(tz_didx)\n _compare_local_to_utc(tz_didx, naive_didx)\n\n\n@pytest.mark.parametrize("freq", ["D", "YE"])\ndef test_tz_convert_single_matches_tz_convert(tz_aware_fixture, freq):\n tz = tz_aware_fixture\n tz_didx = date_range("2018-01-01", "2020-01-01", freq=freq, tz=tz)\n naive_didx = date_range("2018-01-01", "2020-01-01", freq=freq)\n\n _compare_utc_to_local(tz_didx)\n _compare_local_to_utc(tz_didx, naive_didx)\n\n\n@pytest.mark.parametrize(\n "arr",\n [\n pytest.param(np.array([], dtype=np.int64), id="empty"),\n pytest.param(np.array([iNaT], dtype=np.int64), id="all_nat"),\n ],\n)\ndef test_tz_convert_corner(arr):\n result = tz_convert_from_utc(arr, timezones.maybe_get_tz("Asia/Tokyo"))\n tm.assert_numpy_array_equal(result, arr)\n\n\ndef test_tz_convert_readonly():\n # GH#35530\n arr = np.array([0], dtype=np.int64)\n arr.setflags(write=False)\n result = tz_convert_from_utc(arr, UTC)\n tm.assert_numpy_array_equal(result, arr)\n\n\n@pytest.mark.parametrize("copy", [True, False])\n@pytest.mark.parametrize("dtype", ["M8[ns]", "M8[s]"])\ndef test_length_zero_copy(dtype, copy):\n arr = np.array([], dtype=dtype)\n result = astype_overflowsafe(arr, copy=copy, dtype=np.dtype("M8[ns]"))\n if copy:\n assert not np.shares_memory(result, arr)\n elif arr.dtype == result.dtype:\n assert result is arr\n else:\n assert not np.shares_memory(result, arr)\n\n\ndef test_ensure_datetime64ns_bigendian():\n # GH#29684\n arr = np.array([np.datetime64(1, "ms")], dtype=">M8[ms]")\n result = astype_overflowsafe(arr, dtype=np.dtype("M8[ns]"))\n\n expected = np.array([np.datetime64(1, "ms")], dtype="M8[ns]")\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_ensure_timedelta64ns_overflows():\n arr = np.arange(10).astype("m8[Y]") * 100\n msg = r"Cannot convert 300 years to timedelta64\[ns\] without overflow"\n with pytest.raises(OutOfBoundsTimedelta, match=msg):\n astype_overflowsafe(arr, dtype=np.dtype("m8[ns]"))\n\n\nclass SubDatetime(datetime):\n pass\n\n\n@pytest.mark.parametrize(\n "dt, expected",\n [\n pytest.param(\n Timestamp("2000-01-01"), Timestamp("2000-01-01", tz=UTC), id="timestamp"\n ),\n pytest.param(\n datetime(2000, 1, 1), datetime(2000, 1, 1, tzinfo=UTC), id="datetime"\n ),\n pytest.param(\n SubDatetime(2000, 1, 1),\n SubDatetime(2000, 1, 1, tzinfo=UTC),\n id="subclassed_datetime",\n ),\n ],\n)\ndef test_localize_pydatetime_dt_types(dt, expected):\n # GH 25851\n # ensure that subclassed datetime works with\n # localize_pydatetime\n result = conversion.localize_pydatetime(dt, UTC)\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_conversion.py | test_conversion.py | Python | 4,555 | 0.95 | 0.10625 | 0.056 | python-kit | 741 | 2024-10-21T21:10:25.526019 | MIT | true | 9a9e0cd9b5ca769a6258ef937f242576 |
import numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import fields\n\nimport pandas._testing as tm\n\n\n@pytest.fixture\ndef dtindex():\n dtindex = np.arange(5, dtype=np.int64) * 10**9 * 3600 * 24 * 32\n dtindex.flags.writeable = False\n return dtindex\n\n\ndef test_get_date_name_field_readonly(dtindex):\n # https://github.com/vaexio/vaex/issues/357\n # fields functions shouldn't raise when we pass read-only data\n result = fields.get_date_name_field(dtindex, "month_name")\n expected = np.array(["January", "February", "March", "April", "May"], dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_get_date_field_readonly(dtindex):\n result = fields.get_date_field(dtindex, "Y")\n expected = np.array([1970, 1970, 1970, 1970, 1970], dtype=np.int32)\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_get_start_end_field_readonly(dtindex):\n result = fields.get_start_end_field(dtindex, "is_month_start", None)\n expected = np.array([True, False, False, False, False], dtype=np.bool_)\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_get_timedelta_field_readonly(dtindex):\n # treat dtindex as timedeltas for this next one\n result = fields.get_timedelta_field(dtindex, "seconds")\n expected = np.array([0] * 5, dtype=np.int32)\n tm.assert_numpy_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_fields.py | test_fields.py | Python | 1,352 | 0.95 | 0.15 | 0.107143 | awesome-app | 626 | 2025-03-31T17:33:09.597584 | Apache-2.0 | true | 79e0d4561ae34cac27e4369802f24fca |
import pytest\n\nfrom pandas._libs.tslibs.parsing import get_rule_month\n\nfrom pandas.tseries import offsets\n\n\n@pytest.mark.parametrize(\n "obj,expected",\n [\n ("W", "DEC"),\n (offsets.Week().freqstr, "DEC"),\n ("D", "DEC"),\n (offsets.Day().freqstr, "DEC"),\n ("Q", "DEC"),\n (offsets.QuarterEnd(startingMonth=12).freqstr, "DEC"),\n ("Q-JAN", "JAN"),\n (offsets.QuarterEnd(startingMonth=1).freqstr, "JAN"),\n ("Y-DEC", "DEC"),\n (offsets.YearEnd().freqstr, "DEC"),\n ("Y-MAY", "MAY"),\n (offsets.YearEnd(month=5).freqstr, "MAY"),\n ],\n)\ndef test_get_rule_month(obj, expected):\n result = get_rule_month(obj)\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\tslibs\test_libfrequencies.py | test_libfrequencies.py | Python | 717 | 0.85 | 0.037037 | 0 | python-kit | 538 | 2023-10-19T05:56:10.358737 | BSD-3-Clause | true | 8ac7b3f1d7e2acd80726dd9cd3fc5558 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.