content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_pipe.cpython-313.pyc | test_pipe.cpython-313.pyc | Other | 4,071 | 0.8 | 0 | 0 | python-kit | 75 | 2024-11-24T02:48:04.954147 | MIT | true | 5955cba7f8cbac8398eac11091789e0d |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_raises.cpython-313.pyc | test_raises.cpython-313.pyc | Other | 23,857 | 0.95 | 0.100775 | 0.007843 | vue-tools | 555 | 2025-05-09T01:08:09.229265 | MIT | true | f64088a18a735fc7b2483411986d0619 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_reductions.cpython-313.pyc | test_reductions.cpython-313.pyc | Other | 57,993 | 0.75 | 0.002736 | 0.028736 | python-kit | 834 | 2024-11-12T20:01:19.651289 | Apache-2.0 | true | 43e63ac3973382c42e09b17c4730258d |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\test_timegrouper.cpython-313.pyc | test_timegrouper.cpython-313.pyc | Other | 43,163 | 0.95 | 0.002179 | 0.006772 | awesome-app | 216 | 2024-01-21T21:05:02.874252 | Apache-2.0 | true | 8adf9f5989f66ce356ce8c203f3858f3 |
\n\n | .venv\Lib\site-packages\pandas\tests\groupby\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 856 | 0.85 | 0.125 | 0 | react-lib | 372 | 2025-03-15T15:26:09.919305 | GPL-3.0 | true | bb89366c372dfbb0b2d317d9262cfef9 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Series,\n array,\n)\n\n\n@pytest.fixture(params=[None, False])\ndef sort(request):\n """\n Valid values for the 'sort' parameter used in the Index\n setops methods (intersection, union, etc.)\n\n Caution:\n Don't confuse this one with the "sort" fixture used\n for DataFrame.append or concat. That one has\n parameters [True, False].\n\n We can't combine them as sort=True is not permitted\n in the Index setops methods.\n """\n return request.param\n\n\n@pytest.fixture(params=["D", "3D", "-3D", "h", "2h", "-2h", "min", "2min", "s", "-3s"])\ndef freq_sample(request):\n """\n Valid values for 'freq' parameter used to create date_range and\n timedelta_range..\n """\n return request.param\n\n\n@pytest.fixture(params=[list, tuple, np.array, array, Series])\ndef listlike_box(request):\n """\n Types that may be passed as the indexer to searchsorted.\n """\n return request.param\n | .venv\Lib\site-packages\pandas\tests\indexes\conftest.py | conftest.py | Python | 987 | 0.85 | 0.146341 | 0 | react-lib | 971 | 2023-08-14T22:25:55.137197 | GPL-3.0 | true | b4acc2d9d44c8ce98c6ec6c4dcf630db |
"""\nTests that can be parametrized over _any_ Index object.\n"""\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import InvalidIndexError\n\nimport pandas._testing as tm\n\n\ndef test_boolean_context_compat(index):\n # GH#7897\n with pytest.raises(ValueError, match="The truth value of a"):\n if index:\n pass\n\n with pytest.raises(ValueError, match="The truth value of a"):\n bool(index)\n\n\ndef test_sort(index):\n msg = "cannot sort an Index object in-place, use sort_values instead"\n with pytest.raises(TypeError, match=msg):\n index.sort()\n\n\ndef test_hash_error(index):\n with pytest.raises(TypeError, match=f"unhashable type: '{type(index).__name__}'"):\n hash(index)\n\n\ndef test_mutability(index):\n if not len(index):\n pytest.skip("Test doesn't make sense for empty index")\n msg = "Index does not support mutable operations"\n with pytest.raises(TypeError, match=msg):\n index[0] = index[0]\n\n\n@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\ndef test_map_identity_mapping(index, request):\n # GH#12766\n\n result = index.map(lambda x: x)\n if index.dtype == object and result.dtype in [bool, "string"]:\n assert (index == result).all()\n # TODO: could work that into the 'exact="equiv"'?\n return # FIXME: doesn't belong in this file anymore!\n tm.assert_index_equal(result, index, exact="equiv")\n\n\ndef test_wrong_number_names(index):\n names = index.nlevels * ["apple", "banana", "carrot"]\n with pytest.raises(ValueError, match="^Length"):\n index.names = names\n\n\ndef test_view_preserves_name(index):\n assert index.view().name == index.name\n\n\ndef test_ravel(index):\n # GH#19956 ravel returning ndarray is deprecated, in 2.0 returns a view on self\n res = index.ravel()\n tm.assert_index_equal(res, index)\n\n\nclass TestConversion:\n def test_to_series(self, index):\n # assert that we are creating a copy of the index\n\n ser = index.to_series()\n assert ser.values is not index.values\n assert ser.index is not index\n assert ser.name == index.name\n\n def test_to_series_with_arguments(self, index):\n # GH#18699\n\n # index kwarg\n ser = index.to_series(index=index)\n\n assert ser.values is not index.values\n assert ser.index is index\n assert ser.name == index.name\n\n # name kwarg\n ser = index.to_series(name="__test")\n\n assert ser.values is not index.values\n assert ser.index is not index\n assert ser.name != index.name\n\n def test_tolist_matches_list(self, index):\n assert index.tolist() == list(index)\n\n\nclass TestRoundTrips:\n def test_pickle_roundtrip(self, index):\n result = tm.round_trip_pickle(index)\n tm.assert_index_equal(result, index, exact=True)\n if result.nlevels > 1:\n # GH#8367 round-trip with timezone\n assert index.equal_levels(result)\n\n def test_pickle_preserves_name(self, index):\n original_name, index.name = index.name, "foo"\n unpickled = tm.round_trip_pickle(index)\n assert index.equals(unpickled)\n index.name = original_name\n\n\nclass TestIndexing:\n def test_get_loc_listlike_raises_invalid_index_error(self, index):\n # and never TypeError\n key = np.array([0, 1], dtype=np.intp)\n\n with pytest.raises(InvalidIndexError, match=r"\[0 1\]"):\n index.get_loc(key)\n\n with pytest.raises(InvalidIndexError, match=r"\[False True\]"):\n index.get_loc(key.astype(bool))\n\n def test_getitem_ellipsis(self, index):\n # GH#21282\n result = index[...]\n assert result.equals(index)\n assert result is not index\n\n def test_slice_keeps_name(self, index):\n assert index.name == index[1:].name\n\n @pytest.mark.parametrize("item", [101, "no_int", 2.5])\n def test_getitem_error(self, index, item):\n msg = "|".join(\n [\n r"index 101 is out of bounds for axis 0 with size [\d]+",\n re.escape(\n "only integers, slices (`:`), ellipsis (`...`), "\n "numpy.newaxis (`None`) and integer or boolean arrays "\n "are valid indices"\n ),\n "index out of bounds", # string[pyarrow]\n ]\n )\n with pytest.raises(IndexError, match=msg):\n index[item]\n\n\nclass TestRendering:\n def test_str(self, index):\n # test the string repr\n index.name = "foo"\n assert "'foo'" in str(index)\n assert type(index).__name__ in str(index)\n\n\nclass TestReductions:\n def test_argmax_axis_invalid(self, index):\n # GH#23081\n msg = r"`axis` must be fewer than the number of dimensions \(1\)"\n with pytest.raises(ValueError, match=msg):\n index.argmax(axis=1)\n with pytest.raises(ValueError, match=msg):\n index.argmin(axis=2)\n with pytest.raises(ValueError, match=msg):\n index.min(axis=-2)\n with pytest.raises(ValueError, match=msg):\n index.max(axis=-3)\n | .venv\Lib\site-packages\pandas\tests\indexes\test_any_index.py | test_any_index.py | Python | 5,143 | 0.95 | 0.174419 | 0.101563 | awesome-app | 516 | 2023-09-10T14:12:43.522540 | BSD-3-Clause | true | 4aa5e9fd98ade790b9a539e3ec8cef23 |
from collections import defaultdict\nfrom datetime import datetime\nfrom functools import partial\nimport math\nimport operator\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import IS64\nfrom pandas.errors import InvalidIndexError\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.common import (\n is_any_real_numeric_dtype,\n is_numeric_dtype,\n is_object_dtype,\n)\n\nimport pandas as pd\nfrom pandas import (\n CategoricalIndex,\n DataFrame,\n DatetimeIndex,\n IntervalIndex,\n PeriodIndex,\n RangeIndex,\n Series,\n TimedeltaIndex,\n date_range,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.indexes.api import (\n Index,\n MultiIndex,\n _get_combined_index,\n ensure_index,\n ensure_index_from_sequences,\n)\n\n\nclass TestIndex:\n @pytest.fixture\n def simple_index(self) -> Index:\n return Index(list("abcde"))\n\n def test_can_hold_identifiers(self, simple_index):\n index = simple_index\n key = index[0]\n assert index._can_hold_identifiers_and_holds_name(key) is True\n\n @pytest.mark.parametrize("index", ["datetime"], indirect=True)\n def test_new_axis(self, index):\n # TODO: a bunch of scattered tests check this deprecation is enforced.\n # de-duplicate/centralize them.\n with pytest.raises(ValueError, match="Multi-dimensional indexing"):\n # GH#30588 multi-dimensional indexing deprecated\n index[None, :]\n\n def test_constructor_regular(self, index):\n tm.assert_contains_all(index, index)\n\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n def test_constructor_casting(self, index):\n # casting\n arr = np.array(index)\n new_index = Index(arr)\n tm.assert_contains_all(arr, new_index)\n tm.assert_index_equal(index, new_index)\n\n def test_constructor_copy(self, using_infer_string):\n index = Index(list("abc"), name="name")\n arr = np.array(index)\n new_index = Index(arr, copy=True, name="name")\n assert isinstance(new_index, Index)\n assert new_index.name == "name"\n if using_infer_string:\n tm.assert_extension_array_equal(\n new_index.values, pd.array(arr, dtype="str")\n )\n else:\n tm.assert_numpy_array_equal(arr, new_index.values)\n arr[0] = "SOMEBIGLONGSTRING"\n assert new_index[0] != "SOMEBIGLONGSTRING"\n\n @pytest.mark.parametrize("cast_as_obj", [True, False])\n @pytest.mark.parametrize(\n "index",\n [\n date_range(\n "2015-01-01 10:00",\n freq="D",\n periods=3,\n tz="US/Eastern",\n name="Green Eggs & Ham",\n ), # DTI with tz\n date_range("2015-01-01 10:00", freq="D", periods=3), # DTI no tz\n timedelta_range("1 days", freq="D", periods=3), # td\n period_range("2015-01-01", freq="D", periods=3), # period\n ],\n )\n def test_constructor_from_index_dtlike(self, cast_as_obj, index):\n if cast_as_obj:\n with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):\n result = Index(index.astype(object))\n else:\n result = Index(index)\n\n tm.assert_index_equal(result, index)\n\n if isinstance(index, DatetimeIndex):\n assert result.tz == index.tz\n if cast_as_obj:\n # GH#23524 check that Index(dti, dtype=object) does not\n # incorrectly raise ValueError, and that nanoseconds are not\n # dropped\n index += pd.Timedelta(nanoseconds=50)\n result = Index(index, dtype=object)\n assert result.dtype == np.object_\n assert list(result) == list(index)\n\n @pytest.mark.parametrize(\n "index,has_tz",\n [\n (\n date_range("2015-01-01 10:00", freq="D", periods=3, tz="US/Eastern"),\n True,\n ), # datetimetz\n (timedelta_range("1 days", freq="D", periods=3), False), # td\n (period_range("2015-01-01", freq="D", periods=3), False), # period\n ],\n )\n def test_constructor_from_series_dtlike(self, index, has_tz):\n result = Index(Series(index))\n tm.assert_index_equal(result, index)\n\n if has_tz:\n assert result.tz == index.tz\n\n def test_constructor_from_series_freq(self):\n # GH 6273\n # create from a series, passing a freq\n dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"]\n expected = DatetimeIndex(dts, freq="MS")\n\n s = Series(pd.to_datetime(dts))\n result = DatetimeIndex(s, freq="MS")\n\n tm.assert_index_equal(result, expected)\n\n def test_constructor_from_frame_series_freq(self, using_infer_string):\n # GH 6273\n # create from a series, passing a freq\n dts = ["1-1-1990", "2-1-1990", "3-1-1990", "4-1-1990", "5-1-1990"]\n expected = DatetimeIndex(dts, freq="MS")\n\n df = DataFrame(np.random.default_rng(2).random((5, 3)))\n df["date"] = dts\n result = DatetimeIndex(df["date"], freq="MS")\n dtype = object if not using_infer_string else "str"\n assert df["date"].dtype == dtype\n expected.name = "date"\n tm.assert_index_equal(result, expected)\n\n expected = Series(dts, name="date")\n tm.assert_series_equal(df["date"], expected)\n\n # GH 6274\n # infer freq of same\n if not using_infer_string:\n # Doesn't work with arrow strings\n freq = pd.infer_freq(df["date"])\n assert freq == "MS"\n\n def test_constructor_int_dtype_nan(self):\n # see gh-15187\n data = [np.nan]\n expected = Index(data, dtype=np.float64)\n result = Index(data, dtype="float")\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "klass,dtype,na_val",\n [\n (Index, np.float64, np.nan),\n (DatetimeIndex, "datetime64[ns]", pd.NaT),\n ],\n )\n def test_index_ctor_infer_nan_nat(self, klass, dtype, na_val):\n # GH 13467\n na_list = [na_val, na_val]\n expected = klass(na_list)\n assert expected.dtype == dtype\n\n result = Index(na_list)\n tm.assert_index_equal(result, expected)\n\n result = Index(np.array(na_list))\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "vals,dtype",\n [\n ([1, 2, 3, 4, 5], "int"),\n ([1.1, np.nan, 2.2, 3.0], "float"),\n (["A", "B", "C", np.nan], "obj"),\n ],\n )\n def test_constructor_simple_new(self, vals, dtype):\n index = Index(vals, name=dtype)\n result = index._simple_new(index.values, dtype)\n tm.assert_index_equal(result, index)\n\n @pytest.mark.parametrize("attr", ["values", "asi8"])\n @pytest.mark.parametrize("klass", [Index, DatetimeIndex])\n def test_constructor_dtypes_datetime(self, tz_naive_fixture, attr, klass):\n # Test constructing with a datetimetz dtype\n # .values produces numpy datetimes, so these are considered naive\n # .asi8 produces integers, so these are considered epoch timestamps\n # ^the above will be true in a later version. Right now we `.view`\n # the i8 values as NS_DTYPE, effectively treating them as wall times.\n index = date_range("2011-01-01", periods=5)\n arg = getattr(index, attr)\n index = index.tz_localize(tz_naive_fixture)\n dtype = index.dtype\n\n # As of 2.0 astype raises on dt64.astype(dt64tz)\n err = tz_naive_fixture is not None\n msg = "Cannot use .astype to convert from timezone-naive dtype to"\n\n if attr == "asi8":\n result = DatetimeIndex(arg).tz_localize(tz_naive_fixture)\n tm.assert_index_equal(result, index)\n elif klass is Index:\n with pytest.raises(TypeError, match="unexpected keyword"):\n klass(arg, tz=tz_naive_fixture)\n else:\n result = klass(arg, tz=tz_naive_fixture)\n tm.assert_index_equal(result, index)\n\n if attr == "asi8":\n if err:\n with pytest.raises(TypeError, match=msg):\n DatetimeIndex(arg).astype(dtype)\n else:\n result = DatetimeIndex(arg).astype(dtype)\n tm.assert_index_equal(result, index)\n else:\n result = klass(arg, dtype=dtype)\n tm.assert_index_equal(result, index)\n\n if attr == "asi8":\n result = DatetimeIndex(list(arg)).tz_localize(tz_naive_fixture)\n tm.assert_index_equal(result, index)\n elif klass is Index:\n with pytest.raises(TypeError, match="unexpected keyword"):\n klass(arg, tz=tz_naive_fixture)\n else:\n result = klass(list(arg), tz=tz_naive_fixture)\n tm.assert_index_equal(result, index)\n\n if attr == "asi8":\n if err:\n with pytest.raises(TypeError, match=msg):\n DatetimeIndex(list(arg)).astype(dtype)\n else:\n result = DatetimeIndex(list(arg)).astype(dtype)\n tm.assert_index_equal(result, index)\n else:\n result = klass(list(arg), dtype=dtype)\n tm.assert_index_equal(result, index)\n\n @pytest.mark.parametrize("attr", ["values", "asi8"])\n @pytest.mark.parametrize("klass", [Index, TimedeltaIndex])\n def test_constructor_dtypes_timedelta(self, attr, klass):\n index = timedelta_range("1 days", periods=5)\n index = index._with_freq(None) # won't be preserved by constructors\n dtype = index.dtype\n\n values = getattr(index, attr)\n\n result = klass(values, dtype=dtype)\n tm.assert_index_equal(result, index)\n\n result = klass(list(values), dtype=dtype)\n tm.assert_index_equal(result, index)\n\n @pytest.mark.parametrize("value", [[], iter([]), (_ for _ in [])])\n @pytest.mark.parametrize(\n "klass",\n [\n Index,\n CategoricalIndex,\n DatetimeIndex,\n TimedeltaIndex,\n ],\n )\n def test_constructor_empty(self, value, klass):\n empty = klass(value)\n assert isinstance(empty, klass)\n assert not len(empty)\n\n @pytest.mark.parametrize(\n "empty,klass",\n [\n (PeriodIndex([], freq="D"), PeriodIndex),\n (PeriodIndex(iter([]), freq="D"), PeriodIndex),\n (PeriodIndex((_ for _ in []), freq="D"), PeriodIndex),\n (RangeIndex(step=1), RangeIndex),\n (MultiIndex(levels=[[1, 2], ["blue", "red"]], codes=[[], []]), MultiIndex),\n ],\n )\n def test_constructor_empty_special(self, empty, klass):\n assert isinstance(empty, klass)\n assert not len(empty)\n\n @pytest.mark.parametrize(\n "index",\n [\n "datetime",\n "float64",\n "float32",\n "int64",\n "int32",\n "period",\n "range",\n "repeats",\n "timedelta",\n "tuples",\n "uint64",\n "uint32",\n ],\n indirect=True,\n )\n def test_view_with_args(self, index):\n index.view("i8")\n\n @pytest.mark.parametrize(\n "index",\n [\n "string",\n pytest.param("categorical", marks=pytest.mark.xfail(reason="gh-25464")),\n "bool-object",\n "bool-dtype",\n "empty",\n ],\n indirect=True,\n )\n def test_view_with_args_object_array_raises(self, index):\n if index.dtype == bool:\n msg = "When changing to a larger dtype"\n with pytest.raises(ValueError, match=msg):\n index.view("i8")\n else:\n msg = (\n r"Cannot change data-type for array of references\.|"\n r"Cannot change data-type for object array\.|"\n r"Cannot change data-type for array of strings\.|"\n )\n with pytest.raises(TypeError, match=msg):\n index.view("i8")\n\n @pytest.mark.parametrize(\n "index",\n ["int64", "int32", "range"],\n indirect=True,\n )\n def test_astype(self, index):\n casted = index.astype("i8")\n\n # it works!\n casted.get_loc(5)\n\n # pass on name\n index.name = "foobar"\n casted = index.astype("i8")\n assert casted.name == "foobar"\n\n def test_equals_object(self):\n # same\n assert Index(["a", "b", "c"]).equals(Index(["a", "b", "c"]))\n\n @pytest.mark.parametrize(\n "comp", [Index(["a", "b"]), Index(["a", "b", "d"]), ["a", "b", "c"]]\n )\n def test_not_equals_object(self, comp):\n assert not Index(["a", "b", "c"]).equals(comp)\n\n def test_identical(self):\n # index\n i1 = Index(["a", "b", "c"])\n i2 = Index(["a", "b", "c"])\n\n assert i1.identical(i2)\n\n i1 = i1.rename("foo")\n assert i1.equals(i2)\n assert not i1.identical(i2)\n\n i2 = i2.rename("foo")\n assert i1.identical(i2)\n\n i3 = Index([("a", "a"), ("a", "b"), ("b", "a")])\n i4 = Index([("a", "a"), ("a", "b"), ("b", "a")], tupleize_cols=False)\n assert not i3.identical(i4)\n\n def test_is_(self):\n ind = Index(range(10))\n assert ind.is_(ind)\n assert ind.is_(ind.view().view().view().view())\n assert not ind.is_(Index(range(10)))\n assert not ind.is_(ind.copy())\n assert not ind.is_(ind.copy(deep=False))\n assert not ind.is_(ind[:])\n assert not ind.is_(np.array(range(10)))\n\n # quasi-implementation dependent\n assert ind.is_(ind.view())\n ind2 = ind.view()\n ind2.name = "bob"\n assert ind.is_(ind2)\n assert ind2.is_(ind)\n # doesn't matter if Indices are *actually* views of underlying data,\n assert not ind.is_(Index(ind.values))\n arr = np.array(range(1, 11))\n ind1 = Index(arr, copy=False)\n ind2 = Index(arr, copy=False)\n assert not ind1.is_(ind2)\n\n def test_asof_numeric_vs_bool_raises(self):\n left = Index([1, 2, 3])\n right = Index([True, False], dtype=object)\n\n msg = "Cannot compare dtypes int64 and bool"\n with pytest.raises(TypeError, match=msg):\n left.asof(right[0])\n # TODO: should right.asof(left[0]) also raise?\n\n with pytest.raises(InvalidIndexError, match=re.escape(str(right))):\n left.asof(right)\n\n with pytest.raises(InvalidIndexError, match=re.escape(str(left))):\n right.asof(left)\n\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n def test_booleanindex(self, index):\n bool_index = np.ones(len(index), dtype=bool)\n bool_index[5:30:2] = False\n\n sub_index = index[bool_index]\n\n for i, val in enumerate(sub_index):\n assert sub_index.get_loc(val) == i\n\n sub_index = index[list(bool_index)]\n for i, val in enumerate(sub_index):\n assert sub_index.get_loc(val) == i\n\n def test_fancy(self, simple_index):\n index = simple_index\n sl = index[[1, 2, 3]]\n for i in sl:\n assert i == sl[sl.get_loc(i)]\n\n @pytest.mark.parametrize(\n "index",\n ["string", "int64", "int32", "uint64", "uint32", "float64", "float32"],\n indirect=True,\n )\n @pytest.mark.parametrize("dtype", [int, np.bool_])\n def test_empty_fancy(self, index, dtype, request, using_infer_string):\n if dtype is np.bool_ and using_infer_string and index.dtype == "string":\n request.applymarker(pytest.mark.xfail(reason="numpy behavior is buggy"))\n empty_arr = np.array([], dtype=dtype)\n empty_index = type(index)([], dtype=index.dtype)\n\n assert index[[]].identical(empty_index)\n if dtype == np.bool_:\n with tm.assert_produces_warning(FutureWarning, match="is deprecated"):\n assert index[empty_arr].identical(empty_index)\n else:\n assert index[empty_arr].identical(empty_index)\n\n @pytest.mark.parametrize(\n "index",\n ["string", "int64", "int32", "uint64", "uint32", "float64", "float32"],\n indirect=True,\n )\n def test_empty_fancy_raises(self, index):\n # DatetimeIndex is excluded, because it overrides getitem and should\n # be tested separately.\n empty_farr = np.array([], dtype=np.float64)\n empty_index = type(index)([], dtype=index.dtype)\n\n assert index[[]].identical(empty_index)\n # np.ndarray only accepts ndarray of int & bool dtypes, so should Index\n msg = r"arrays used as indices must be of integer"\n with pytest.raises(IndexError, match=msg):\n index[empty_farr]\n\n def test_union_dt_as_obj(self, simple_index):\n # TODO: Replace with fixturesult\n index = simple_index\n date_index = date_range("2019-01-01", periods=10)\n first_cat = index.union(date_index)\n second_cat = index.union(index)\n\n appended = Index(np.append(index, date_index.astype("O")))\n\n tm.assert_index_equal(first_cat, appended)\n tm.assert_index_equal(second_cat, index)\n tm.assert_contains_all(index, first_cat)\n tm.assert_contains_all(index, second_cat)\n tm.assert_contains_all(date_index, first_cat)\n\n def test_map_with_tuples(self):\n # GH 12766\n\n # Test that returning a single tuple from an Index\n # returns an Index.\n index = Index(np.arange(3), dtype=np.int64)\n result = index.map(lambda x: (x,))\n expected = Index([(i,) for i in index])\n tm.assert_index_equal(result, expected)\n\n # Test that returning a tuple from a map of a single index\n # returns a MultiIndex object.\n result = index.map(lambda x: (x, x == 1))\n expected = MultiIndex.from_tuples([(i, i == 1) for i in index])\n tm.assert_index_equal(result, expected)\n\n def test_map_with_tuples_mi(self):\n # Test that returning a single object from a MultiIndex\n # returns an Index.\n first_level = ["foo", "bar", "baz"]\n multi_index = MultiIndex.from_tuples(zip(first_level, [1, 2, 3]))\n reduced_index = multi_index.map(lambda x: x[0])\n tm.assert_index_equal(reduced_index, Index(first_level))\n\n @pytest.mark.parametrize(\n "index",\n [\n date_range("2020-01-01", freq="D", periods=10),\n period_range("2020-01-01", freq="D", periods=10),\n timedelta_range("1 day", periods=10),\n ],\n )\n def test_map_tseries_indices_return_index(self, index):\n expected = Index([1] * 10)\n result = index.map(lambda x: 1)\n tm.assert_index_equal(expected, result)\n\n def test_map_tseries_indices_accsr_return_index(self):\n date_index = DatetimeIndex(\n date_range("2020-01-01", periods=24, freq="h"), name="hourly"\n )\n result = date_index.map(lambda x: x.hour)\n expected = Index(np.arange(24, dtype="int64"), name="hourly")\n tm.assert_index_equal(result, expected, exact=True)\n\n @pytest.mark.parametrize(\n "mapper",\n [\n lambda values, index: {i: e for e, i in zip(values, index)},\n lambda values, index: Series(values, index),\n ],\n )\n def test_map_dictlike_simple(self, mapper):\n # GH 12756\n expected = Index(["foo", "bar", "baz"])\n index = Index(np.arange(3), dtype=np.int64)\n result = index.map(mapper(expected.values, index))\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "mapper",\n [\n lambda values, index: {i: e for e, i in zip(values, index)},\n lambda values, index: Series(values, index),\n ],\n )\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_map_dictlike(self, index, mapper, request):\n # GH 12756\n if isinstance(index, CategoricalIndex):\n pytest.skip("Tested in test_categorical")\n elif not index.is_unique:\n pytest.skip("Cannot map duplicated index")\n\n rng = np.arange(len(index), 0, -1, dtype=np.int64)\n\n if index.empty:\n # to match proper result coercion for uints\n expected = Index([])\n elif is_numeric_dtype(index.dtype):\n expected = index._constructor(rng, dtype=index.dtype)\n elif type(index) is Index and index.dtype != object:\n # i.e. EA-backed, for now just Nullable\n expected = Index(rng, dtype=index.dtype)\n else:\n expected = Index(rng)\n\n result = index.map(mapper(expected, index))\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "mapper",\n [Series(["foo", 2.0, "baz"], index=[0, 2, -1]), {0: "foo", 2: 2.0, -1: "baz"}],\n )\n def test_map_with_non_function_missing_values(self, mapper):\n # GH 12756\n expected = Index([2.0, np.nan, "foo"])\n result = Index([2, 1, 0]).map(mapper)\n\n tm.assert_index_equal(expected, result)\n\n def test_map_na_exclusion(self):\n index = Index([1.5, np.nan, 3, np.nan, 5])\n\n result = index.map(lambda x: x * 2, na_action="ignore")\n expected = index * 2\n tm.assert_index_equal(result, expected)\n\n def test_map_defaultdict(self):\n index = Index([1, 2, 3])\n default_dict = defaultdict(lambda: "blank")\n default_dict[1] = "stuff"\n result = index.map(default_dict)\n expected = Index(["stuff", "blank", "blank"])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("name,expected", [("foo", "foo"), ("bar", None)])\n def test_append_empty_preserve_name(self, name, expected):\n left = Index([], name="foo")\n right = Index([1, 2, 3], name=name)\n\n msg = "The behavior of array concatenation with empty entries is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = left.append(right)\n assert result.name == expected\n\n @pytest.mark.parametrize(\n "index, expected",\n [\n ("string", False),\n ("bool-object", False),\n ("bool-dtype", False),\n ("categorical", False),\n ("int64", True),\n ("int32", True),\n ("uint64", True),\n ("uint32", True),\n ("datetime", False),\n ("float64", True),\n ("float32", True),\n ],\n indirect=["index"],\n )\n def test_is_numeric(self, index, expected):\n assert is_any_real_numeric_dtype(index) is expected\n\n @pytest.mark.parametrize(\n "index, expected",\n [\n ("string", True),\n ("bool-object", True),\n ("bool-dtype", False),\n ("categorical", False),\n ("int64", False),\n ("int32", False),\n ("uint64", False),\n ("uint32", False),\n ("datetime", False),\n ("float64", False),\n ("float32", False),\n ],\n indirect=["index"],\n )\n def test_is_object(self, index, expected, using_infer_string):\n if using_infer_string and index.dtype == "string" and expected:\n expected = False\n assert is_object_dtype(index) is expected\n\n def test_summary(self, index):\n index._summary()\n\n def test_format_bug(self):\n # GH 14626\n # windows has different precision on datetime.datetime.now (it doesn't\n # include us since the default for Timestamp shows these but Index\n # formatting does not we are skipping)\n now = datetime.now()\n msg = r"Index\.format is deprecated"\n\n if not str(now).endswith("000"):\n index = Index([now])\n with tm.assert_produces_warning(FutureWarning, match=msg):\n formatted = index.format()\n expected = [str(index[0])]\n assert formatted == expected\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n Index([]).format()\n\n @pytest.mark.parametrize("vals", [[1, 2.0 + 3.0j, 4.0], ["a", "b", "c"]])\n def test_format_missing(self, vals, nulls_fixture):\n # 2845\n vals = list(vals) # Copy for each iteration\n vals.append(nulls_fixture)\n index = Index(vals, dtype=object)\n # TODO: case with complex dtype?\n\n msg = r"Index\.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n formatted = index.format()\n null_repr = "NaN" if isinstance(nulls_fixture, float) else str(nulls_fixture)\n expected = [str(index[0]), str(index[1]), str(index[2]), null_repr]\n\n assert formatted == expected\n assert index[3] is nulls_fixture\n\n @pytest.mark.parametrize("op", ["any", "all"])\n def test_logical_compat(self, op, simple_index):\n index = simple_index\n left = getattr(index, op)()\n assert left == getattr(index.values, op)()\n right = getattr(index.to_series(), op)()\n # left might not match right exactly in e.g. string cases where the\n # because we use np.any/all instead of .any/all\n assert bool(left) == bool(right)\n\n @pytest.mark.parametrize(\n "index", ["string", "int64", "int32", "float64", "float32"], indirect=True\n )\n def test_drop_by_str_label(self, index):\n n = len(index)\n drop = index[list(range(5, 10))]\n dropped = index.drop(drop)\n\n expected = index[list(range(5)) + list(range(10, n))]\n tm.assert_index_equal(dropped, expected)\n\n dropped = index.drop(index[0])\n expected = index[1:]\n tm.assert_index_equal(dropped, expected)\n\n @pytest.mark.parametrize(\n "index", ["string", "int64", "int32", "float64", "float32"], indirect=True\n )\n @pytest.mark.parametrize("keys", [["foo", "bar"], ["1", "bar"]])\n def test_drop_by_str_label_raises_missing_keys(self, index, keys):\n with pytest.raises(KeyError, match=""):\n index.drop(keys)\n\n @pytest.mark.parametrize(\n "index", ["string", "int64", "int32", "float64", "float32"], indirect=True\n )\n def test_drop_by_str_label_errors_ignore(self, index):\n n = len(index)\n drop = index[list(range(5, 10))]\n mixed = drop.tolist() + ["foo"]\n dropped = index.drop(mixed, errors="ignore")\n\n expected = index[list(range(5)) + list(range(10, n))]\n tm.assert_index_equal(dropped, expected)\n\n dropped = index.drop(["foo", "bar"], errors="ignore")\n expected = index[list(range(n))]\n tm.assert_index_equal(dropped, expected)\n\n def test_drop_by_numeric_label_loc(self):\n # TODO: Parametrize numeric and str tests after self.strIndex fixture\n index = Index([1, 2, 3])\n dropped = index.drop(1)\n expected = Index([2, 3])\n\n tm.assert_index_equal(dropped, expected)\n\n def test_drop_by_numeric_label_raises_missing_keys(self):\n index = Index([1, 2, 3])\n with pytest.raises(KeyError, match=""):\n index.drop([3, 4])\n\n @pytest.mark.parametrize(\n "key,expected", [(4, Index([1, 2, 3])), ([3, 4, 5], Index([1, 2]))]\n )\n def test_drop_by_numeric_label_errors_ignore(self, key, expected):\n index = Index([1, 2, 3])\n dropped = index.drop(key, errors="ignore")\n\n tm.assert_index_equal(dropped, expected)\n\n @pytest.mark.parametrize(\n "values",\n [["a", "b", ("c", "d")], ["a", ("c", "d"), "b"], [("c", "d"), "a", "b"]],\n )\n @pytest.mark.parametrize("to_drop", [[("c", "d"), "a"], ["a", ("c", "d")]])\n def test_drop_tuple(self, values, to_drop):\n # GH 18304\n index = Index(values)\n expected = Index(["b"], dtype=object)\n\n result = index.drop(to_drop)\n tm.assert_index_equal(result, expected)\n\n removed = index.drop(to_drop[0])\n for drop_me in to_drop[1], [to_drop[1]]:\n result = removed.drop(drop_me)\n tm.assert_index_equal(result, expected)\n\n removed = index.drop(to_drop[1])\n msg = rf"\"\[{re.escape(to_drop[1].__repr__())}\] not found in axis\""\n for drop_me in to_drop[1], [to_drop[1]]:\n with pytest.raises(KeyError, match=msg):\n removed.drop(drop_me)\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_drop_with_duplicates_in_index(self, index):\n # GH38051\n if len(index) == 0 or isinstance(index, MultiIndex):\n pytest.skip("Test doesn't make sense for empty MultiIndex")\n if isinstance(index, IntervalIndex) and not IS64:\n pytest.skip("Cannot test IntervalIndex with int64 dtype on 32 bit platform")\n index = index.unique().repeat(2)\n expected = index[2:]\n result = index.drop(index[0])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "attr",\n [\n "is_monotonic_increasing",\n "is_monotonic_decreasing",\n "_is_strictly_monotonic_increasing",\n "_is_strictly_monotonic_decreasing",\n ],\n )\n def test_is_monotonic_incomparable(self, attr):\n index = Index([5, datetime.now(), 7])\n assert not getattr(index, attr)\n\n @pytest.mark.parametrize("values", [["foo", "bar", "quux"], {"foo", "bar", "quux"}])\n @pytest.mark.parametrize(\n "index,expected",\n [\n (Index(["qux", "baz", "foo", "bar"]), np.array([False, False, True, True])),\n (Index([]), np.array([], dtype=bool)), # empty\n ],\n )\n def test_isin(self, values, index, expected):\n result = index.isin(values)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_isin_nan_common_object(\n self, nulls_fixture, nulls_fixture2, using_infer_string\n ):\n # Test cartesian product of null fixtures and ensure that we don't\n # mangle the various types (save a corner case with PyPy)\n idx = Index(["a", nulls_fixture])\n\n # all nans are the same\n if (\n isinstance(nulls_fixture, float)\n and isinstance(nulls_fixture2, float)\n and math.isnan(nulls_fixture)\n and math.isnan(nulls_fixture2)\n ):\n tm.assert_numpy_array_equal(\n idx.isin([nulls_fixture2]),\n np.array([False, True]),\n )\n\n elif nulls_fixture is nulls_fixture2: # should preserve NA type\n tm.assert_numpy_array_equal(\n idx.isin([nulls_fixture2]),\n np.array([False, True]),\n )\n\n elif using_infer_string and idx.dtype == "string":\n tm.assert_numpy_array_equal(\n idx.isin([nulls_fixture2]),\n np.array([False, True]),\n )\n\n else:\n tm.assert_numpy_array_equal(\n idx.isin([nulls_fixture2]),\n np.array([False, False]),\n )\n\n def test_isin_nan_common_float64(self, nulls_fixture, float_numpy_dtype):\n dtype = float_numpy_dtype\n\n if nulls_fixture is pd.NaT or nulls_fixture is pd.NA:\n # Check 1) that we cannot construct a float64 Index with this value\n # and 2) that with an NaN we do not have .isin(nulls_fixture)\n msg = (\n r"float\(\) argument must be a string or a (real )?number, "\n f"not {repr(type(nulls_fixture).__name__)}"\n )\n with pytest.raises(TypeError, match=msg):\n Index([1.0, nulls_fixture], dtype=dtype)\n\n idx = Index([1.0, np.nan], dtype=dtype)\n assert not idx.isin([nulls_fixture]).any()\n return\n\n idx = Index([1.0, nulls_fixture], dtype=dtype)\n res = idx.isin([np.nan])\n tm.assert_numpy_array_equal(res, np.array([False, True]))\n\n # we cannot compare NaT with NaN\n res = idx.isin([pd.NaT])\n tm.assert_numpy_array_equal(res, np.array([False, False]))\n\n @pytest.mark.parametrize("level", [0, -1])\n @pytest.mark.parametrize(\n "index",\n [\n Index(["qux", "baz", "foo", "bar"]),\n Index([1.0, 2.0, 3.0, 4.0], dtype=np.float64),\n ],\n )\n def test_isin_level_kwarg(self, level, index):\n values = index.tolist()[-2:] + ["nonexisting"]\n\n expected = np.array([False, False, True, True])\n tm.assert_numpy_array_equal(expected, index.isin(values, level=level))\n\n index.name = "foobar"\n tm.assert_numpy_array_equal(expected, index.isin(values, level="foobar"))\n\n def test_isin_level_kwarg_bad_level_raises(self, index):\n for level in [10, index.nlevels, -(index.nlevels + 1)]:\n with pytest.raises(IndexError, match="Too many levels"):\n index.isin([], level=level)\n\n @pytest.mark.parametrize("label", [1.0, "foobar", "xyzzy", np.nan])\n def test_isin_level_kwarg_bad_label_raises(self, label, index):\n if isinstance(index, MultiIndex):\n index = index.rename(["foo", "bar"] + index.names[2:])\n msg = f"'Level {label} not found'"\n else:\n index = index.rename("foo")\n msg = rf"Requested level \({label}\) does not match index name \(foo\)"\n with pytest.raises(KeyError, match=msg):\n index.isin([], level=label)\n\n @pytest.mark.parametrize("empty", [[], Series(dtype=object), np.array([])])\n def test_isin_empty(self, empty):\n # see gh-16991\n index = Index(["a", "b"])\n expected = np.array([False, False])\n\n result = index.isin(empty)\n tm.assert_numpy_array_equal(expected, result)\n\n def test_isin_string_null(self, string_dtype_no_object):\n # GH#55821\n index = Index(["a", "b"], dtype=string_dtype_no_object)\n result = index.isin([None])\n expected = np.array([False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "values",\n [\n [1, 2, 3, 4],\n [1.0, 2.0, 3.0, 4.0],\n [True, True, True, True],\n ["foo", "bar", "baz", "qux"],\n date_range("2018-01-01", freq="D", periods=4),\n ],\n )\n def test_boolean_cmp(self, values):\n index = Index(values)\n result = index == values\n expected = np.array([True, True, True, True], dtype=bool)\n\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n @pytest.mark.parametrize("name,level", [(None, 0), ("a", "a")])\n def test_get_level_values(self, index, name, level):\n expected = index.copy()\n if name:\n expected.name = name\n\n result = expected.get_level_values(level)\n tm.assert_index_equal(result, expected)\n\n def test_slice_keep_name(self):\n index = Index(["a", "b"], name="asdf")\n assert index.name == index[1:].name\n\n @pytest.mark.parametrize(\n "index",\n [\n "string",\n "datetime",\n "int64",\n "int32",\n "uint64",\n "uint32",\n "float64",\n "float32",\n ],\n indirect=True,\n )\n def test_join_self(self, index, join_type):\n result = index.join(index, how=join_type)\n expected = index\n if join_type == "outer":\n expected = expected.sort_values()\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("method", ["strip", "rstrip", "lstrip"])\n def test_str_attribute(self, method):\n # GH9068\n index = Index([" jack", "jill ", " jesse ", "frank"])\n expected = Index([getattr(str, method)(x) for x in index.values])\n\n result = getattr(index.str, method)()\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "index",\n [\n Index(range(5)),\n date_range("2020-01-01", periods=10),\n MultiIndex.from_tuples([("foo", "1"), ("bar", "3")]),\n period_range(start="2000", end="2010", freq="Y"),\n ],\n )\n def test_str_attribute_raises(self, index):\n with pytest.raises(AttributeError, match="only use .str accessor"):\n index.str.repeat(2)\n\n @pytest.mark.parametrize(\n "expand,expected",\n [\n (None, Index([["a", "b", "c"], ["d", "e"], ["f"]])),\n (False, Index([["a", "b", "c"], ["d", "e"], ["f"]])),\n (\n True,\n MultiIndex.from_tuples(\n [("a", "b", "c"), ("d", "e", np.nan), ("f", np.nan, np.nan)]\n ),\n ),\n ],\n )\n def test_str_split(self, expand, expected):\n index = Index(["a b c", "d e", "f"])\n if expand is not None:\n result = index.str.split(expand=expand)\n else:\n result = index.str.split()\n\n tm.assert_index_equal(result, expected)\n\n def test_str_bool_return(self):\n # test boolean case, should return np.array instead of boolean Index\n index = Index(["a1", "a2", "b1", "b2"])\n result = index.str.startswith("a")\n expected = np.array([True, True, False, False])\n\n tm.assert_numpy_array_equal(result, expected)\n assert isinstance(result, np.ndarray)\n\n def test_str_bool_series_indexing(self):\n index = Index(["a1", "a2", "b1", "b2"])\n s = Series(range(4), index=index)\n\n result = s[s.index.str.startswith("a")]\n expected = Series(range(2), index=["a1", "a2"])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "index,expected", [(Index(list("abcd")), True), (Index(range(4)), False)]\n )\n def test_tab_completion(self, index, expected):\n # GH 9910\n result = "str" in dir(index)\n assert result == expected\n\n def test_indexing_doesnt_change_class(self):\n index = Index([1, 2, 3, "a", "b", "c"])\n\n assert index[1:3].identical(Index([2, 3], dtype=np.object_))\n assert index[[0, 1]].identical(Index([1, 2], dtype=np.object_))\n\n def test_outer_join_sort(self):\n left_index = Index(np.random.default_rng(2).permutation(15))\n right_index = date_range("2020-01-01", periods=10)\n\n with tm.assert_produces_warning(RuntimeWarning):\n result = left_index.join(right_index, how="outer")\n\n with tm.assert_produces_warning(RuntimeWarning):\n expected = left_index.astype(object).union(right_index.astype(object))\n\n tm.assert_index_equal(result, expected)\n\n def test_take_fill_value(self):\n # GH 12631\n index = Index(list("ABC"), name="xxx")\n result = index.take(np.array([1, 0, -1]))\n expected = Index(list("BAC"), name="xxx")\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = index.take(np.array([1, 0, -1]), fill_value=True)\n expected = Index(["B", "A", np.nan], name="xxx")\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = index.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\n expected = Index(["B", "A", "C"], name="xxx")\n tm.assert_index_equal(result, expected)\n\n def test_take_fill_value_none_raises(self):\n index = Index(list("ABC"), name="xxx")\n msg = (\n "When allow_fill=True and fill_value is not None, "\n "all indices must be >= -1"\n )\n\n with pytest.raises(ValueError, match=msg):\n index.take(np.array([1, 0, -2]), fill_value=True)\n with pytest.raises(ValueError, match=msg):\n index.take(np.array([1, 0, -5]), fill_value=True)\n\n def test_take_bad_bounds_raises(self):\n index = Index(list("ABC"), name="xxx")\n with pytest.raises(IndexError, match="out of bounds"):\n index.take(np.array([1, -5]))\n\n @pytest.mark.parametrize("name", [None, "foobar"])\n @pytest.mark.parametrize(\n "labels",\n [\n [],\n np.array([]),\n ["A", "B", "C"],\n ["C", "B", "A"],\n np.array(["A", "B", "C"]),\n np.array(["C", "B", "A"]),\n # Must preserve name even if dtype changes\n date_range("20130101", periods=3).values,\n date_range("20130101", periods=3).tolist(),\n ],\n )\n def test_reindex_preserves_name_if_target_is_list_or_ndarray(self, name, labels):\n # GH6552\n index = Index([0, 1, 2])\n index.name = name\n assert index.reindex(labels)[0].name == name\n\n @pytest.mark.parametrize("labels", [[], np.array([]), np.array([], dtype=np.int64)])\n def test_reindex_preserves_type_if_target_is_empty_list_or_array(self, labels):\n # GH7774\n index = Index(list("abc"))\n assert index.reindex(labels)[0].dtype.type == index.dtype.type\n\n @pytest.mark.parametrize(\n "labels,dtype",\n [\n (DatetimeIndex([]), np.datetime64),\n ],\n )\n def test_reindex_doesnt_preserve_type_if_target_is_empty_index(self, labels, dtype):\n # GH7774\n index = Index(list("abc"))\n assert index.reindex(labels)[0].dtype.type == dtype\n\n def test_reindex_doesnt_preserve_type_if_target_is_empty_index_numeric(\n self, any_real_numpy_dtype\n ):\n # GH7774\n dtype = any_real_numpy_dtype\n index = Index(list("abc"))\n labels = Index([], dtype=dtype)\n assert index.reindex(labels)[0].dtype == dtype\n\n def test_reindex_no_type_preserve_target_empty_mi(self):\n index = Index(list("abc"))\n result = index.reindex(\n MultiIndex([Index([], np.int64), Index([], np.float64)], [[], []])\n )[0]\n assert result.levels[0].dtype.type == np.int64\n assert result.levels[1].dtype.type == np.float64\n\n def test_reindex_ignoring_level(self):\n # GH#35132\n idx = Index([1, 2, 3], name="x")\n idx2 = Index([1, 2, 3, 4], name="x")\n expected = Index([1, 2, 3, 4], name="x")\n result, _ = idx.reindex(idx2, level="x")\n tm.assert_index_equal(result, expected)\n\n def test_groupby(self):\n index = Index(range(5))\n result = index.groupby(np.array([1, 1, 2, 2, 2]))\n expected = {1: Index([0, 1]), 2: Index([2, 3, 4])}\n\n tm.assert_dict_equal(result, expected)\n\n @pytest.mark.parametrize(\n "mi,expected",\n [\n (MultiIndex.from_tuples([(1, 2), (4, 5)]), np.array([True, True])),\n (MultiIndex.from_tuples([(1, 2), (4, 6)]), np.array([True, False])),\n ],\n )\n def test_equals_op_multiindex(self, mi, expected):\n # GH9785\n # test comparisons of multiindex\n df = DataFrame(\n [3, 6],\n columns=["c"],\n index=MultiIndex.from_arrays([[1, 4], [2, 5]], names=["a", "b"]),\n )\n\n result = df.index == mi\n tm.assert_numpy_array_equal(result, expected)\n\n def test_equals_op_multiindex_identify(self):\n df = DataFrame(\n [3, 6],\n columns=["c"],\n index=MultiIndex.from_arrays([[1, 4], [2, 5]], names=["a", "b"]),\n )\n\n result = df.index == df.index\n expected = np.array([True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "index",\n [\n MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)]),\n Index(["foo", "bar", "baz"]),\n ],\n )\n def test_equals_op_mismatched_multiindex_raises(self, index):\n df = DataFrame(\n [3, 6],\n columns=["c"],\n index=MultiIndex.from_arrays([[1, 4], [2, 5]], names=["a", "b"]),\n )\n\n with pytest.raises(ValueError, match="Lengths must match"):\n df.index == index\n\n def test_equals_op_index_vs_mi_same_length(self, using_infer_string):\n mi = MultiIndex.from_tuples([(1, 2), (4, 5), (8, 9)])\n index = Index(["foo", "bar", "baz"])\n\n result = mi == index\n expected = np.array([False, False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "dt_conv, arg",\n [\n (pd.to_datetime, ["2000-01-01", "2000-01-02"]),\n (pd.to_timedelta, ["01:02:03", "01:02:04"]),\n ],\n )\n def test_dt_conversion_preserves_name(self, dt_conv, arg):\n # GH 10875\n index = Index(arg, name="label")\n assert index.name == dt_conv(index).name\n\n def test_cached_properties_not_settable(self):\n index = Index([1, 2, 3])\n with pytest.raises(AttributeError, match="Can't set attribute"):\n index.is_unique = False\n\n def test_tab_complete_warning(self, ip):\n # https://github.com/pandas-dev/pandas/issues/16409\n pytest.importorskip("IPython", minversion="6.0.0")\n from IPython.core.completer import provisionalcompleter\n\n code = "import pandas as pd; idx = pd.Index([1, 2])"\n ip.run_cell(code)\n\n # GH 31324 newer jedi version raises Deprecation warning;\n # appears resolved 2021-02-02\n with tm.assert_produces_warning(None, raise_on_extra_warnings=False):\n with provisionalcompleter("ignore"):\n list(ip.Completer.completions("idx.", 4))\n\n def test_contains_method_removed(self, index):\n # GH#30103 method removed for all types except IntervalIndex\n if isinstance(index, IntervalIndex):\n index.contains(1)\n else:\n msg = f"'{type(index).__name__}' object has no attribute 'contains'"\n with pytest.raises(AttributeError, match=msg):\n index.contains(1)\n\n def test_sortlevel(self):\n index = Index([5, 4, 3, 2, 1])\n with pytest.raises(Exception, match="ascending must be a single bool value or"):\n index.sortlevel(ascending="True")\n\n with pytest.raises(\n Exception, match="ascending must be a list of bool values of length 1"\n ):\n index.sortlevel(ascending=[True, True])\n\n with pytest.raises(Exception, match="ascending must be a bool value"):\n index.sortlevel(ascending=["True"])\n\n expected = Index([1, 2, 3, 4, 5])\n result = index.sortlevel(ascending=[True])\n tm.assert_index_equal(result[0], expected)\n\n expected = Index([1, 2, 3, 4, 5])\n result = index.sortlevel(ascending=True)\n tm.assert_index_equal(result[0], expected)\n\n expected = Index([5, 4, 3, 2, 1])\n result = index.sortlevel(ascending=False)\n tm.assert_index_equal(result[0], expected)\n\n def test_sortlevel_na_position(self):\n # GH#51612\n idx = Index([1, np.nan])\n result = idx.sortlevel(na_position="first")[0]\n expected = Index([np.nan, 1])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "periods, expected_results",\n [\n (1, [np.nan, 10, 10, 10, 10]),\n (2, [np.nan, np.nan, 20, 20, 20]),\n (3, [np.nan, np.nan, np.nan, 30, 30]),\n ],\n )\n def test_index_diff(self, periods, expected_results):\n # GH#19708\n idx = Index([10, 20, 30, 40, 50])\n result = idx.diff(periods)\n expected = Index(expected_results)\n\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "decimals, expected_results",\n [\n (0, [1.0, 2.0, 3.0]),\n (1, [1.2, 2.3, 3.5]),\n (2, [1.23, 2.35, 3.46]),\n ],\n )\n def test_index_round(self, decimals, expected_results):\n # GH#19708\n idx = Index([1.234, 2.345, 3.456])\n result = idx.round(decimals)\n expected = Index(expected_results)\n\n tm.assert_index_equal(result, expected)\n\n\nclass TestMixedIntIndex:\n # Mostly the tests from common.py for which the results differ\n # in py2 and py3 because ints and strings are uncomparable in py3\n # (GH 13514)\n @pytest.fixture\n def simple_index(self) -> Index:\n return Index([0, "a", 1, "b", 2, "c"])\n\n def test_argsort(self, simple_index):\n index = simple_index\n with pytest.raises(TypeError, match="'>|<' not supported"):\n index.argsort()\n\n def test_numpy_argsort(self, simple_index):\n index = simple_index\n with pytest.raises(TypeError, match="'>|<' not supported"):\n np.argsort(index)\n\n def test_copy_name(self, simple_index):\n # Check that "name" argument passed at initialization is honoured\n # GH12309\n index = simple_index\n\n first = type(index)(index, copy=True, name="mario")\n second = type(first)(first, copy=False)\n\n # Even though "copy=False", we want a new object.\n assert first is not second\n tm.assert_index_equal(first, second)\n\n assert first.name == "mario"\n assert second.name == "mario"\n\n s1 = Series(2, index=first)\n s2 = Series(3, index=second[:-1])\n\n s3 = s1 * s2\n\n assert s3.index.name == "mario"\n\n def test_copy_name2(self):\n # Check that adding a "name" parameter to the copy is honored\n # GH14302\n index = Index([1, 2], name="MyName")\n index1 = index.copy()\n\n tm.assert_index_equal(index, index1)\n\n index2 = index.copy(name="NewName")\n tm.assert_index_equal(index, index2, check_names=False)\n assert index.name == "MyName"\n assert index2.name == "NewName"\n\n def test_unique_na(self):\n idx = Index([2, np.nan, 2, 1], name="my_index")\n expected = Index([2, np.nan, 1], name="my_index")\n result = idx.unique()\n tm.assert_index_equal(result, expected)\n\n def test_logical_compat(self, simple_index):\n index = simple_index\n assert index.all() == index.values.all()\n assert index.any() == index.values.any()\n\n @pytest.mark.parametrize("how", ["any", "all"])\n @pytest.mark.parametrize("dtype", [None, object, "category"])\n @pytest.mark.parametrize(\n "vals,expected",\n [\n ([1, 2, 3], [1, 2, 3]),\n ([1.0, 2.0, 3.0], [1.0, 2.0, 3.0]),\n ([1.0, 2.0, np.nan, 3.0], [1.0, 2.0, 3.0]),\n (["A", "B", "C"], ["A", "B", "C"]),\n (["A", np.nan, "B", "C"], ["A", "B", "C"]),\n ],\n )\n def test_dropna(self, how, dtype, vals, expected):\n # GH 6194\n index = Index(vals, dtype=dtype)\n result = index.dropna(how=how)\n expected = Index(expected, dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("how", ["any", "all"])\n @pytest.mark.parametrize(\n "index,expected",\n [\n (\n DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),\n DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),\n ),\n (\n DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03", pd.NaT]),\n DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"]),\n ),\n (\n TimedeltaIndex(["1 days", "2 days", "3 days"]),\n TimedeltaIndex(["1 days", "2 days", "3 days"]),\n ),\n (\n TimedeltaIndex([pd.NaT, "1 days", "2 days", "3 days", pd.NaT]),\n TimedeltaIndex(["1 days", "2 days", "3 days"]),\n ),\n (\n PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),\n PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),\n ),\n (\n PeriodIndex(["2012-02", "2012-04", "NaT", "2012-05"], freq="M"),\n PeriodIndex(["2012-02", "2012-04", "2012-05"], freq="M"),\n ),\n ],\n )\n def test_dropna_dt_like(self, how, index, expected):\n result = index.dropna(how=how)\n tm.assert_index_equal(result, expected)\n\n def test_dropna_invalid_how_raises(self):\n msg = "invalid how option: xxx"\n with pytest.raises(ValueError, match=msg):\n Index([1, 2, 3]).dropna(how="xxx")\n\n @pytest.mark.parametrize(\n "index",\n [\n Index([np.nan]),\n Index([np.nan, 1]),\n Index([1, 2, np.nan]),\n Index(["a", "b", np.nan]),\n pd.to_datetime(["NaT"]),\n pd.to_datetime(["NaT", "2000-01-01"]),\n pd.to_datetime(["2000-01-01", "NaT", "2000-01-02"]),\n pd.to_timedelta(["1 day", "NaT"]),\n ],\n )\n def test_is_monotonic_na(self, index):\n assert index.is_monotonic_increasing is False\n assert index.is_monotonic_decreasing is False\n assert index._is_strictly_monotonic_increasing is False\n assert index._is_strictly_monotonic_decreasing is False\n\n @pytest.mark.parametrize("dtype", ["f8", "m8[ns]", "M8[us]"])\n @pytest.mark.parametrize("unique_first", [True, False])\n def test_is_monotonic_unique_na(self, dtype, unique_first):\n # GH 55755\n index = Index([None, 1, 1], dtype=dtype)\n if unique_first:\n assert index.is_unique is False\n assert index.is_monotonic_increasing is False\n assert index.is_monotonic_decreasing is False\n else:\n assert index.is_monotonic_increasing is False\n assert index.is_monotonic_decreasing is False\n assert index.is_unique is False\n\n def test_int_name_format(self, frame_or_series):\n index = Index(["a", "b", "c"], name=0)\n result = frame_or_series(list(range(3)), index=index)\n assert "0" in repr(result)\n\n def test_str_to_bytes_raises(self):\n # GH 26447\n index = Index([str(x) for x in range(10)])\n msg = "^'str' object cannot be interpreted as an integer$"\n with pytest.raises(TypeError, match=msg):\n bytes(index)\n\n @pytest.mark.filterwarnings("ignore:elementwise comparison failed:FutureWarning")\n def test_index_with_tuple_bool(self):\n # GH34123\n # TODO: also this op right now produces FutureWarning from numpy\n # https://github.com/numpy/numpy/issues/11521\n idx = Index([("a", "b"), ("b", "c"), ("c", "a")])\n result = idx == ("c", "a")\n expected = np.array([False, False, True])\n tm.assert_numpy_array_equal(result, expected)\n\n\nclass TestIndexUtils:\n @pytest.mark.parametrize(\n "data, names, expected",\n [\n ([[1, 2, 3]], None, Index([1, 2, 3])),\n ([[1, 2, 3]], ["name"], Index([1, 2, 3], name="name")),\n (\n [["a", "a"], ["c", "d"]],\n None,\n MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]]),\n ),\n (\n [["a", "a"], ["c", "d"]],\n ["L1", "L2"],\n MultiIndex([["a"], ["c", "d"]], [[0, 0], [0, 1]], names=["L1", "L2"]),\n ),\n ],\n )\n def test_ensure_index_from_sequences(self, data, names, expected):\n result = ensure_index_from_sequences(data, names)\n tm.assert_index_equal(result, expected)\n\n def test_ensure_index_mixed_closed_intervals(self):\n # GH27172\n intervals = [\n pd.Interval(0, 1, closed="left"),\n pd.Interval(1, 2, closed="right"),\n pd.Interval(2, 3, closed="neither"),\n pd.Interval(3, 4, closed="both"),\n ]\n result = ensure_index(intervals)\n expected = Index(intervals, dtype=object)\n tm.assert_index_equal(result, expected)\n\n def test_ensure_index_uint64(self):\n # with both 0 and a large-uint64, np.array will infer to float64\n # https://github.com/numpy/numpy/issues/19146\n # but a more accurate choice would be uint64\n values = [0, np.iinfo(np.uint64).max]\n\n result = ensure_index(values)\n assert list(result) == values\n\n expected = Index(values, dtype="uint64")\n tm.assert_index_equal(result, expected)\n\n def test_get_combined_index(self):\n result = _get_combined_index([])\n expected = Index([])\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "opname",\n [\n "eq",\n "ne",\n "le",\n "lt",\n "ge",\n "gt",\n "add",\n "radd",\n "sub",\n "rsub",\n "mul",\n "rmul",\n "truediv",\n "rtruediv",\n "floordiv",\n "rfloordiv",\n "pow",\n "rpow",\n "mod",\n "divmod",\n ],\n)\ndef test_generated_op_names(opname, index):\n opname = f"__{opname}__"\n method = getattr(index, opname)\n assert method.__name__ == opname\n\n\n@pytest.mark.parametrize(\n "klass",\n [\n partial(CategoricalIndex, data=[1]),\n partial(DatetimeIndex, data=["2020-01-01"]),\n partial(PeriodIndex, data=["2020-01-01"]),\n partial(TimedeltaIndex, data=["1 day"]),\n partial(RangeIndex, data=range(1)),\n partial(IntervalIndex, data=[pd.Interval(0, 1)]),\n partial(Index, data=["a"], dtype=object),\n partial(MultiIndex, levels=[1], codes=[0]),\n ],\n)\ndef test_index_subclass_constructor_wrong_kwargs(klass):\n # GH #19348\n with pytest.raises(TypeError, match="unexpected keyword argument"):\n klass(foo="bar")\n\n\ndef test_deprecated_fastpath():\n msg = "[Uu]nexpected keyword argument"\n with pytest.raises(TypeError, match=msg):\n Index(np.array(["a", "b"], dtype=object), name="test", fastpath=True)\n\n with pytest.raises(TypeError, match=msg):\n Index(np.array([1, 2, 3], dtype="int64"), name="test", fastpath=True)\n\n with pytest.raises(TypeError, match=msg):\n RangeIndex(0, 5, 2, name="test", fastpath=True)\n\n with pytest.raises(TypeError, match=msg):\n CategoricalIndex(["a", "b", "c"], name="test", fastpath=True)\n\n\ndef test_shape_of_invalid_index():\n # Pre-2.0, it was possible to create "invalid" index objects backed by\n # a multi-dimensional array (see https://github.com/pandas-dev/pandas/issues/27125\n # about this). However, as long as this is not solved in general,this test ensures\n # that the returned shape is consistent with this underlying array for\n # compat with matplotlib (see https://github.com/pandas-dev/pandas/issues/27775)\n idx = Index([0, 1, 2, 3])\n with pytest.raises(ValueError, match="Multi-dimensional indexing"):\n # GH#30588 multi-dimensional indexing deprecated\n idx[:, None]\n\n\n@pytest.mark.parametrize("dtype", [None, np.int64, np.uint64, np.float64])\ndef test_validate_1d_input(dtype):\n # GH#27125 check that we do not have >1-dimensional input\n msg = "Index data must be 1-dimensional"\n\n arr = np.arange(8).reshape(2, 2, 2)\n with pytest.raises(ValueError, match=msg):\n Index(arr, dtype=dtype)\n\n df = DataFrame(arr.reshape(4, 2))\n with pytest.raises(ValueError, match=msg):\n Index(df, dtype=dtype)\n\n # GH#13601 trying to assign a multi-dimensional array to an index is not allowed\n ser = Series(0, range(4))\n with pytest.raises(ValueError, match=msg):\n ser.index = np.array([[2, 3]] * 4, dtype=dtype)\n\n\n@pytest.mark.parametrize(\n "klass, extra_kwargs",\n [\n [Index, {}],\n *[[lambda x: Index(x, dtype=dtyp), {}] for dtyp in tm.ALL_REAL_NUMPY_DTYPES],\n [DatetimeIndex, {}],\n [TimedeltaIndex, {}],\n [PeriodIndex, {"freq": "Y"}],\n ],\n)\ndef test_construct_from_memoryview(klass, extra_kwargs):\n # GH 13120\n result = klass(memoryview(np.arange(2000, 2005)), **extra_kwargs)\n expected = klass(list(range(2000, 2005)), **extra_kwargs)\n tm.assert_index_equal(result, expected, exact=True)\n\n\n@pytest.mark.parametrize("op", [operator.lt, operator.gt])\ndef test_nan_comparison_same_object(op):\n # GH#47105\n idx = Index([np.nan])\n expected = np.array([False])\n\n result = op(idx, idx)\n tm.assert_numpy_array_equal(result, expected)\n\n result = op(idx, idx.copy())\n tm.assert_numpy_array_equal(result, expected)\n\n\n@td.skip_if_no("pyarrow")\ndef test_is_monotonic_pyarrow_list_type():\n # GH 57333\n import pyarrow as pa\n\n idx = Index([[1], [2, 3]], dtype=pd.ArrowDtype(pa.list_(pa.int64())))\n assert not idx.is_monotonic_increasing\n assert not idx.is_monotonic_decreasing\n | .venv\Lib\site-packages\pandas\tests\indexes\test_base.py | test_base.py | Python | 60,527 | 0.75 | 0.107266 | 0.078947 | awesome-app | 216 | 2024-10-20T08:50:33.417332 | BSD-3-Clause | true | 561e3dd846f2466fa293c40a04a1dc4d |
"""\nCollection of tests asserting things that should be true for\nany index subclass except for MultiIndex. Makes use of the `index_flat`\nfixture defined in pandas/conftest.py.\n"""\nfrom copy import (\n copy,\n deepcopy,\n)\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import IS64\nfrom pandas.compat.numpy import np_version_gte1p25\n\nfrom pandas.core.dtypes.common import (\n is_integer_dtype,\n is_numeric_dtype,\n)\n\nimport pandas as pd\nfrom pandas import (\n CategoricalIndex,\n MultiIndex,\n PeriodIndex,\n RangeIndex,\n)\nimport pandas._testing as tm\n\n\nclass TestCommon:\n @pytest.mark.parametrize("name", [None, "new_name"])\n def test_to_frame(self, name, index_flat, using_copy_on_write):\n # see GH#15230, GH#22580\n idx = index_flat\n\n if name:\n idx_name = name\n else:\n idx_name = idx.name or 0\n\n df = idx.to_frame(name=idx_name)\n\n assert df.index is idx\n assert len(df.columns) == 1\n assert df.columns[0] == idx_name\n if not using_copy_on_write:\n assert df[idx_name].values is not idx.values\n\n df = idx.to_frame(index=False, name=idx_name)\n assert df.index is not idx\n\n def test_droplevel(self, index_flat):\n # GH 21115\n # MultiIndex is tested separately in test_multi.py\n index = index_flat\n\n assert index.droplevel([]).equals(index)\n\n for level in [index.name, [index.name]]:\n if isinstance(index.name, tuple) and level is index.name:\n # GH 21121 : droplevel with tuple name\n continue\n msg = (\n "Cannot remove 1 levels from an index with 1 levels: at least one "\n "level must be left."\n )\n with pytest.raises(ValueError, match=msg):\n index.droplevel(level)\n\n for level in "wrong", ["wrong"]:\n with pytest.raises(\n KeyError,\n match=r"'Requested level \(wrong\) does not match index name \(None\)'",\n ):\n index.droplevel(level)\n\n def test_constructor_non_hashable_name(self, index_flat):\n # GH 20527\n index = index_flat\n\n message = "Index.name must be a hashable type"\n renamed = [["1"]]\n\n # With .rename()\n with pytest.raises(TypeError, match=message):\n index.rename(name=renamed)\n\n # With .set_names()\n with pytest.raises(TypeError, match=message):\n index.set_names(names=renamed)\n\n def test_constructor_unwraps_index(self, index_flat):\n a = index_flat\n # Passing dtype is necessary for Index([True, False], dtype=object)\n # case.\n b = type(a)(a, dtype=a.dtype)\n tm.assert_equal(a._data, b._data)\n\n def test_to_flat_index(self, index_flat):\n # 22866\n index = index_flat\n\n result = index.to_flat_index()\n tm.assert_index_equal(result, index)\n\n def test_set_name_methods(self, index_flat):\n # MultiIndex tested separately\n index = index_flat\n new_name = "This is the new name for this index"\n\n original_name = index.name\n new_ind = index.set_names([new_name])\n assert new_ind.name == new_name\n assert index.name == original_name\n res = index.rename(new_name, inplace=True)\n\n # should return None\n assert res is None\n assert index.name == new_name\n assert index.names == [new_name]\n with pytest.raises(ValueError, match="Level must be None"):\n index.set_names("a", level=0)\n\n # rename in place just leaves tuples and other containers alone\n name = ("A", "B")\n index.rename(name, inplace=True)\n assert index.name == name\n assert index.names == [name]\n\n @pytest.mark.xfail\n def test_set_names_single_label_no_level(self, index_flat):\n with pytest.raises(TypeError, match="list-like"):\n # should still fail even if it would be the right length\n index_flat.set_names("a")\n\n def test_copy_and_deepcopy(self, index_flat):\n index = index_flat\n\n for func in (copy, deepcopy):\n idx_copy = func(index)\n assert idx_copy is not index\n assert idx_copy.equals(index)\n\n new_copy = index.copy(deep=True, name="banana")\n assert new_copy.name == "banana"\n\n @pytest.mark.filterwarnings(r"ignore:Dtype inference:FutureWarning")\n def test_copy_name(self, index_flat):\n # GH#12309: Check that the "name" argument\n # passed at initialization is honored.\n index = index_flat\n\n first = type(index)(index, copy=True, name="mario")\n second = type(first)(first, copy=False)\n\n # Even though "copy=False", we want a new object.\n assert first is not second\n tm.assert_index_equal(first, second)\n\n # Not using tm.assert_index_equal() since names differ.\n assert index.equals(first)\n\n assert first.name == "mario"\n assert second.name == "mario"\n\n # TODO: belongs in series arithmetic tests?\n s1 = pd.Series(2, index=first)\n s2 = pd.Series(3, index=second[:-1])\n # See GH#13365\n s3 = s1 * s2\n assert s3.index.name == "mario"\n\n def test_copy_name2(self, index_flat):\n # GH#35592\n index = index_flat\n\n assert index.copy(name="mario").name == "mario"\n\n with pytest.raises(ValueError, match="Length of new names must be 1, got 2"):\n index.copy(name=["mario", "luigi"])\n\n msg = f"{type(index).__name__}.name must be a hashable type"\n with pytest.raises(TypeError, match=msg):\n index.copy(name=[["mario"]])\n\n def test_unique_level(self, index_flat):\n # don't test a MultiIndex here (as its tested separated)\n index = index_flat\n\n # GH 17896\n expected = index.drop_duplicates()\n for level in [0, index.name, None]:\n result = index.unique(level=level)\n tm.assert_index_equal(result, expected)\n\n msg = "Too many levels: Index has only 1 level, not 4"\n with pytest.raises(IndexError, match=msg):\n index.unique(level=3)\n\n msg = (\n rf"Requested level \(wrong\) does not match index name "\n rf"\({re.escape(index.name.__repr__())}\)"\n )\n with pytest.raises(KeyError, match=msg):\n index.unique(level="wrong")\n\n def test_unique(self, index_flat):\n # MultiIndex tested separately\n index = index_flat\n if not len(index):\n pytest.skip("Skip check for empty Index and MultiIndex")\n\n idx = index[[0] * 5]\n idx_unique = index[[0]]\n\n # We test against `idx_unique`, so first we make sure it's unique\n # and doesn't contain nans.\n assert idx_unique.is_unique is True\n try:\n assert idx_unique.hasnans is False\n except NotImplementedError:\n pass\n\n result = idx.unique()\n tm.assert_index_equal(result, idx_unique)\n\n # nans:\n if not index._can_hold_na:\n pytest.skip("Skip na-check if index cannot hold na")\n\n vals = index._values[[0] * 5]\n vals[0] = np.nan\n\n vals_unique = vals[:2]\n idx_nan = index._shallow_copy(vals)\n idx_unique_nan = index._shallow_copy(vals_unique)\n assert idx_unique_nan.is_unique is True\n\n assert idx_nan.dtype == index.dtype\n assert idx_unique_nan.dtype == index.dtype\n\n expected = idx_unique_nan\n for pos, i in enumerate([idx_nan, idx_unique_nan]):\n result = i.unique()\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.filterwarnings("ignore:Period with BDay freq:FutureWarning")\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_searchsorted_monotonic(self, index_flat, request):\n # GH17271\n index = index_flat\n # not implemented for tuple searches in MultiIndex\n # or Intervals searches in IntervalIndex\n if isinstance(index, pd.IntervalIndex):\n mark = pytest.mark.xfail(\n reason="IntervalIndex.searchsorted does not support Interval arg",\n raises=NotImplementedError,\n )\n request.applymarker(mark)\n\n # nothing to test if the index is empty\n if index.empty:\n pytest.skip("Skip check for empty Index")\n value = index[0]\n\n # determine the expected results (handle dupes for 'right')\n expected_left, expected_right = 0, (index == value).argmin()\n if expected_right == 0:\n # all values are the same, expected_right should be length\n expected_right = len(index)\n\n # test _searchsorted_monotonic in all cases\n # test searchsorted only for increasing\n if index.is_monotonic_increasing:\n ssm_left = index._searchsorted_monotonic(value, side="left")\n assert expected_left == ssm_left\n\n ssm_right = index._searchsorted_monotonic(value, side="right")\n assert expected_right == ssm_right\n\n ss_left = index.searchsorted(value, side="left")\n assert expected_left == ss_left\n\n ss_right = index.searchsorted(value, side="right")\n assert expected_right == ss_right\n\n elif index.is_monotonic_decreasing:\n ssm_left = index._searchsorted_monotonic(value, side="left")\n assert expected_left == ssm_left\n\n ssm_right = index._searchsorted_monotonic(value, side="right")\n assert expected_right == ssm_right\n else:\n # non-monotonic should raise.\n msg = "index must be monotonic increasing or decreasing"\n with pytest.raises(ValueError, match=msg):\n index._searchsorted_monotonic(value, side="left")\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_drop_duplicates(self, index_flat, keep):\n # MultiIndex is tested separately\n index = index_flat\n if isinstance(index, RangeIndex):\n pytest.skip(\n "RangeIndex is tested in test_drop_duplicates_no_duplicates "\n "as it cannot hold duplicates"\n )\n if len(index) == 0:\n pytest.skip(\n "empty index is tested in test_drop_duplicates_no_duplicates "\n "as it cannot hold duplicates"\n )\n\n # make unique index\n holder = type(index)\n unique_values = list(set(index))\n dtype = index.dtype if is_numeric_dtype(index) else None\n unique_idx = holder(unique_values, dtype=dtype)\n\n # make duplicated index\n n = len(unique_idx)\n duplicated_selection = np.random.default_rng(2).choice(n, int(n * 1.5))\n idx = holder(unique_idx.values[duplicated_selection])\n\n # Series.duplicated is tested separately\n expected_duplicated = (\n pd.Series(duplicated_selection).duplicated(keep=keep).values\n )\n tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected_duplicated)\n\n # Series.drop_duplicates is tested separately\n expected_dropped = holder(pd.Series(idx).drop_duplicates(keep=keep))\n tm.assert_index_equal(idx.drop_duplicates(keep=keep), expected_dropped)\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_drop_duplicates_no_duplicates(self, index_flat):\n # MultiIndex is tested separately\n index = index_flat\n\n # make unique index\n if isinstance(index, RangeIndex):\n # RangeIndex cannot have duplicates\n unique_idx = index\n else:\n holder = type(index)\n unique_values = list(set(index))\n dtype = index.dtype if is_numeric_dtype(index) else None\n unique_idx = holder(unique_values, dtype=dtype)\n\n # check on unique index\n expected_duplicated = np.array([False] * len(unique_idx), dtype="bool")\n tm.assert_numpy_array_equal(unique_idx.duplicated(), expected_duplicated)\n result_dropped = unique_idx.drop_duplicates()\n tm.assert_index_equal(result_dropped, unique_idx)\n # validate shallow copy\n assert result_dropped is not unique_idx\n\n def test_drop_duplicates_inplace(self, index):\n msg = r"drop_duplicates\(\) got an unexpected keyword argument"\n with pytest.raises(TypeError, match=msg):\n index.drop_duplicates(inplace=True)\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_has_duplicates(self, index_flat):\n # MultiIndex tested separately in:\n # tests/indexes/multi/test_unique_and_duplicates.\n index = index_flat\n holder = type(index)\n if not len(index) or isinstance(index, RangeIndex):\n # MultiIndex tested separately in:\n # tests/indexes/multi/test_unique_and_duplicates.\n # RangeIndex is unique by definition.\n pytest.skip("Skip check for empty Index, MultiIndex, and RangeIndex")\n\n idx = holder([index[0]] * 5)\n assert idx.is_unique is False\n assert idx.has_duplicates is True\n\n @pytest.mark.parametrize(\n "dtype",\n ["int64", "uint64", "float64", "category", "datetime64[ns]", "timedelta64[ns]"],\n )\n def test_astype_preserves_name(self, index, dtype):\n # https://github.com/pandas-dev/pandas/issues/32013\n if isinstance(index, MultiIndex):\n index.names = ["idx" + str(i) for i in range(index.nlevels)]\n else:\n index.name = "idx"\n\n warn = None\n if index.dtype.kind == "c" and dtype in ["float64", "int64", "uint64"]:\n # imaginary components discarded\n if np_version_gte1p25:\n warn = np.exceptions.ComplexWarning\n else:\n warn = np.ComplexWarning\n\n is_pyarrow_str = str(index.dtype) == "string[pyarrow]" and dtype == "category"\n try:\n # Some of these conversions cannot succeed so we use a try / except\n with tm.assert_produces_warning(\n warn,\n raise_on_extra_warnings=is_pyarrow_str,\n check_stacklevel=False,\n ):\n result = index.astype(dtype)\n except (ValueError, TypeError, NotImplementedError, SystemError):\n return\n\n if isinstance(index, MultiIndex):\n assert result.names == index.names\n else:\n assert result.name == index.name\n\n def test_hasnans_isnans(self, index_flat):\n # GH#11343, added tests for hasnans / isnans\n index = index_flat\n\n # cases in indices doesn't include NaN\n idx = index.copy(deep=True)\n expected = np.array([False] * len(idx), dtype=bool)\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert idx.hasnans is False\n\n idx = index.copy(deep=True)\n values = idx._values\n\n if len(index) == 0:\n return\n elif is_integer_dtype(index.dtype):\n return\n elif index.dtype == bool:\n # values[1] = np.nan below casts to True!\n return\n\n values[1] = np.nan\n\n idx = type(index)(values)\n\n expected = np.array([False] * len(idx), dtype=bool)\n expected[1] = True\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert idx.hasnans is True\n\n\n@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n@pytest.mark.parametrize("na_position", [None, "middle"])\ndef test_sort_values_invalid_na_position(index_with_missing, na_position):\n with pytest.raises(ValueError, match=f"invalid na_position: {na_position}"):\n index_with_missing.sort_values(na_position=na_position)\n\n\n@pytest.mark.fails_arm_wheels\n@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n@pytest.mark.parametrize("na_position", ["first", "last"])\ndef test_sort_values_with_missing(index_with_missing, na_position, request):\n # GH 35584. Test that sort_values works with missing values,\n # sort non-missing and place missing according to na_position\n\n if isinstance(index_with_missing, CategoricalIndex):\n request.applymarker(\n pytest.mark.xfail(\n reason="missing value sorting order not well-defined", strict=False\n )\n )\n\n missing_count = np.sum(index_with_missing.isna())\n not_na_vals = index_with_missing[index_with_missing.notna()].values\n sorted_values = np.sort(not_na_vals)\n if na_position == "first":\n sorted_values = np.concatenate([[None] * missing_count, sorted_values])\n else:\n sorted_values = np.concatenate([sorted_values, [None] * missing_count])\n\n # Explicitly pass dtype needed for Index backed by EA e.g. IntegerArray\n expected = type(index_with_missing)(sorted_values, dtype=index_with_missing.dtype)\n\n result = index_with_missing.sort_values(na_position=na_position)\n tm.assert_index_equal(result, expected)\n\n\ndef test_ndarray_compat_properties(index):\n if isinstance(index, PeriodIndex) and not IS64:\n pytest.skip("Overflow")\n idx = index\n assert idx.T.equals(idx)\n assert idx.transpose().equals(idx)\n\n values = idx.values\n\n assert idx.shape == values.shape\n assert idx.ndim == values.ndim\n assert idx.size == values.size\n\n if not isinstance(index, (RangeIndex, MultiIndex)):\n # These two are not backed by an ndarray\n assert idx.nbytes == values.nbytes\n\n # test for validity\n idx.nbytes\n idx.values.nbytes\n\n\ndef test_compare_read_only_array():\n # GH#57130\n arr = np.array([], dtype=object)\n arr.flags.writeable = False\n idx = pd.Index(arr)\n result = idx > 69\n assert result.dtype == bool\n | .venv\Lib\site-packages\pandas\tests\indexes\test_common.py | test_common.py | Python | 17,972 | 0.95 | 0.1423 | 0.152174 | node-utils | 729 | 2025-02-21T18:43:10.221557 | GPL-3.0 | true | a2bcb6c6f440af5909d43da158c1c1ae |
""" generic datetimelike tests """\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass TestDatetimeLike:\n @pytest.fixture(\n params=[\n pd.period_range("20130101", periods=5, freq="D"),\n pd.TimedeltaIndex(\n [\n "0 days 01:00:00",\n "1 days 01:00:00",\n "2 days 01:00:00",\n "3 days 01:00:00",\n "4 days 01:00:00",\n ],\n dtype="timedelta64[ns]",\n freq="D",\n ),\n pd.DatetimeIndex(\n ["2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05"],\n dtype="datetime64[ns]",\n freq="D",\n ),\n ]\n )\n def simple_index(self, request):\n return request.param\n\n def test_isin(self, simple_index):\n index = simple_index[:4]\n result = index.isin(index)\n assert result.all()\n\n result = index.isin(list(index))\n assert result.all()\n\n result = index.isin([index[2], 5])\n expected = np.array([False, False, True, False])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_argsort_matches_array(self, simple_index):\n idx = simple_index\n idx = idx.insert(1, pd.NaT)\n\n result = idx.argsort()\n expected = idx._data.argsort()\n tm.assert_numpy_array_equal(result, expected)\n\n def test_can_hold_identifiers(self, simple_index):\n idx = simple_index\n key = idx[0]\n assert idx._can_hold_identifiers_and_holds_name(key) is False\n\n def test_shift_identity(self, simple_index):\n idx = simple_index\n tm.assert_index_equal(idx, idx.shift(0))\n\n def test_shift_empty(self, simple_index):\n # GH#14811\n idx = simple_index[:0]\n tm.assert_index_equal(idx, idx.shift(1))\n\n def test_str(self, simple_index):\n # test the string repr\n idx = simple_index.copy()\n idx.name = "foo"\n assert f"length={len(idx)}" not in str(idx)\n assert "'foo'" in str(idx)\n assert type(idx).__name__ in str(idx)\n\n if hasattr(idx, "tz"):\n if idx.tz is not None:\n assert idx.tz in str(idx)\n if isinstance(idx, pd.PeriodIndex):\n assert f"dtype='period[{idx.freqstr}]'" in str(idx)\n else:\n assert f"freq='{idx.freqstr}'" in str(idx)\n\n def test_view(self, simple_index):\n idx = simple_index\n\n idx_view = idx.view("i8")\n result = type(simple_index)(idx)\n tm.assert_index_equal(result, idx)\n\n msg = "Passing a type in .*Index.view is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n idx_view = idx.view(type(simple_index))\n result = type(simple_index)(idx)\n tm.assert_index_equal(result, idx_view)\n\n def test_map_callable(self, simple_index):\n index = simple_index\n expected = index + index.freq\n result = index.map(lambda x: x + index.freq)\n tm.assert_index_equal(result, expected)\n\n # map to NaT\n result = index.map(lambda x: pd.NaT if x == index[0] else x)\n expected = pd.Index([pd.NaT] + index[1:].tolist())\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "mapper",\n [\n lambda values, index: {i: e for e, i in zip(values, index)},\n lambda values, index: pd.Series(values, index, dtype=object),\n ],\n )\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_map_dictlike(self, mapper, simple_index):\n index = simple_index\n expected = index + index.freq\n\n # don't compare the freqs\n if isinstance(expected, (pd.DatetimeIndex, pd.TimedeltaIndex)):\n expected = expected._with_freq(None)\n\n result = index.map(mapper(expected, index))\n tm.assert_index_equal(result, expected)\n\n expected = pd.Index([pd.NaT] + index[1:].tolist())\n result = index.map(mapper(expected, index))\n tm.assert_index_equal(result, expected)\n\n # empty map; these map to np.nan because we cannot know\n # to re-infer things\n expected = pd.Index([np.nan] * len(index))\n result = index.map(mapper([], []))\n tm.assert_index_equal(result, expected)\n\n def test_getitem_preserves_freq(self, simple_index):\n index = simple_index\n assert index.freq is not None\n\n result = index[:]\n assert result.freq == index.freq\n\n def test_where_cast_str(self, simple_index):\n index = simple_index\n\n mask = np.ones(len(index), dtype=bool)\n mask[-1] = False\n\n result = index.where(mask, str(index[0]))\n expected = index.where(mask, index[0])\n tm.assert_index_equal(result, expected)\n\n result = index.where(mask, [str(index[0])])\n tm.assert_index_equal(result, expected)\n\n expected = index.astype(object).where(mask, "foo")\n result = index.where(mask, "foo")\n tm.assert_index_equal(result, expected)\n\n result = index.where(mask, ["foo"])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("unit", ["ns", "us", "ms", "s"])\n def test_diff(self, unit):\n # GH 55080\n dti = pd.to_datetime([10, 20, 30], unit=unit).as_unit(unit)\n result = dti.diff(1)\n expected = pd.to_timedelta([pd.NaT, 10, 10], unit=unit).as_unit(unit)\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\test_datetimelike.py | test_datetimelike.py | Python | 5,598 | 0.95 | 0.116959 | 0.050725 | react-lib | 247 | 2025-04-08T19:37:34.542074 | GPL-3.0 | true | 9f807d4594e8a140f0441614c0920245 |
import re\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import index as libindex\n\nimport pandas as pd\n\n\n@pytest.fixture(\n params=[\n (libindex.Int64Engine, np.int64),\n (libindex.Int32Engine, np.int32),\n (libindex.Int16Engine, np.int16),\n (libindex.Int8Engine, np.int8),\n (libindex.UInt64Engine, np.uint64),\n (libindex.UInt32Engine, np.uint32),\n (libindex.UInt16Engine, np.uint16),\n (libindex.UInt8Engine, np.uint8),\n (libindex.Float64Engine, np.float64),\n (libindex.Float32Engine, np.float32),\n ],\n ids=lambda x: x[0].__name__,\n)\ndef numeric_indexing_engine_type_and_dtype(request):\n return request.param\n\n\nclass TestDatetimeEngine:\n @pytest.mark.parametrize(\n "scalar",\n [\n pd.Timedelta(pd.Timestamp("2016-01-01").asm8.view("m8[ns]")),\n pd.Timestamp("2016-01-01")._value,\n pd.Timestamp("2016-01-01").to_pydatetime(),\n pd.Timestamp("2016-01-01").to_datetime64(),\n ],\n )\n def test_not_contains_requires_timestamp(self, scalar):\n dti1 = pd.date_range("2016-01-01", periods=3)\n dti2 = dti1.insert(1, pd.NaT) # non-monotonic\n dti3 = dti1.insert(3, dti1[0]) # non-unique\n dti4 = pd.date_range("2016-01-01", freq="ns", periods=2_000_000)\n dti5 = dti4.insert(0, dti4[0]) # over size threshold, not unique\n\n msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))])\n for dti in [dti1, dti2, dti3, dti4, dti5]:\n with pytest.raises(TypeError, match=msg):\n scalar in dti._engine\n\n with pytest.raises(KeyError, match=msg):\n dti._engine.get_loc(scalar)\n\n\nclass TestTimedeltaEngine:\n @pytest.mark.parametrize(\n "scalar",\n [\n pd.Timestamp(pd.Timedelta(days=42).asm8.view("datetime64[ns]")),\n pd.Timedelta(days=42)._value,\n pd.Timedelta(days=42).to_pytimedelta(),\n pd.Timedelta(days=42).to_timedelta64(),\n ],\n )\n def test_not_contains_requires_timedelta(self, scalar):\n tdi1 = pd.timedelta_range("42 days", freq="9h", periods=1234)\n tdi2 = tdi1.insert(1, pd.NaT) # non-monotonic\n tdi3 = tdi1.insert(3, tdi1[0]) # non-unique\n tdi4 = pd.timedelta_range("42 days", freq="ns", periods=2_000_000)\n tdi5 = tdi4.insert(0, tdi4[0]) # over size threshold, not unique\n\n msg = "|".join([re.escape(str(scalar)), re.escape(repr(scalar))])\n for tdi in [tdi1, tdi2, tdi3, tdi4, tdi5]:\n with pytest.raises(TypeError, match=msg):\n scalar in tdi._engine\n\n with pytest.raises(KeyError, match=msg):\n tdi._engine.get_loc(scalar)\n\n\nclass TestNumericEngine:\n def test_is_monotonic(self, numeric_indexing_engine_type_and_dtype):\n engine_type, dtype = numeric_indexing_engine_type_and_dtype\n num = 1000\n arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype)\n\n # monotonic increasing\n engine = engine_type(arr)\n assert engine.is_monotonic_increasing is True\n assert engine.is_monotonic_decreasing is False\n\n # monotonic decreasing\n engine = engine_type(arr[::-1])\n assert engine.is_monotonic_increasing is False\n assert engine.is_monotonic_decreasing is True\n\n # neither monotonic increasing or decreasing\n arr = np.array([1] * num + [2] * num + [1] * num, dtype=dtype)\n engine = engine_type(arr[::-1])\n assert engine.is_monotonic_increasing is False\n assert engine.is_monotonic_decreasing is False\n\n def test_is_unique(self, numeric_indexing_engine_type_and_dtype):\n engine_type, dtype = numeric_indexing_engine_type_and_dtype\n\n # unique\n arr = np.array([1, 3, 2], dtype=dtype)\n engine = engine_type(arr)\n assert engine.is_unique is True\n\n # not unique\n arr = np.array([1, 2, 1], dtype=dtype)\n engine = engine_type(arr)\n assert engine.is_unique is False\n\n def test_get_loc(self, numeric_indexing_engine_type_and_dtype):\n engine_type, dtype = numeric_indexing_engine_type_and_dtype\n\n # unique\n arr = np.array([1, 2, 3], dtype=dtype)\n engine = engine_type(arr)\n assert engine.get_loc(2) == 1\n\n # monotonic\n num = 1000\n arr = np.array([1] * num + [2] * num + [3] * num, dtype=dtype)\n engine = engine_type(arr)\n assert engine.get_loc(2) == slice(1000, 2000)\n\n # not monotonic\n arr = np.array([1, 2, 3] * num, dtype=dtype)\n engine = engine_type(arr)\n expected = np.array([False, True, False] * num, dtype=bool)\n result = engine.get_loc(2)\n assert (result == expected).all()\n\n\nclass TestObjectEngine:\n engine_type = libindex.ObjectEngine\n dtype = np.object_\n values = list("abc")\n\n def test_is_monotonic(self):\n num = 1000\n arr = np.array(["a"] * num + ["a"] * num + ["c"] * num, dtype=self.dtype)\n\n # monotonic increasing\n engine = self.engine_type(arr)\n assert engine.is_monotonic_increasing is True\n assert engine.is_monotonic_decreasing is False\n\n # monotonic decreasing\n engine = self.engine_type(arr[::-1])\n assert engine.is_monotonic_increasing is False\n assert engine.is_monotonic_decreasing is True\n\n # neither monotonic increasing or decreasing\n arr = np.array(["a"] * num + ["b"] * num + ["a"] * num, dtype=self.dtype)\n engine = self.engine_type(arr[::-1])\n assert engine.is_monotonic_increasing is False\n assert engine.is_monotonic_decreasing is False\n\n def test_is_unique(self):\n # unique\n arr = np.array(self.values, dtype=self.dtype)\n engine = self.engine_type(arr)\n assert engine.is_unique is True\n\n # not unique\n arr = np.array(["a", "b", "a"], dtype=self.dtype)\n engine = self.engine_type(arr)\n assert engine.is_unique is False\n\n def test_get_loc(self):\n # unique\n arr = np.array(self.values, dtype=self.dtype)\n engine = self.engine_type(arr)\n assert engine.get_loc("b") == 1\n\n # monotonic\n num = 1000\n arr = np.array(["a"] * num + ["b"] * num + ["c"] * num, dtype=self.dtype)\n engine = self.engine_type(arr)\n assert engine.get_loc("b") == slice(1000, 2000)\n\n # not monotonic\n arr = np.array(self.values * num, dtype=self.dtype)\n engine = self.engine_type(arr)\n expected = np.array([False, True, False] * num, dtype=bool)\n result = engine.get_loc("b")\n assert (result == expected).all()\n | .venv\Lib\site-packages\pandas\tests\indexes\test_engines.py | test_engines.py | Python | 6,699 | 0.95 | 0.078125 | 0.102564 | awesome-app | 249 | 2024-12-24T17:34:25.117462 | MIT | true | 1ccff6e68590564fb59543306843b6fc |
import re\n\nimport pytest\n\nfrom pandas.core.indexes.frozen import FrozenList\n\n\n@pytest.fixture\ndef lst():\n return [1, 2, 3, 4, 5]\n\n\n@pytest.fixture\ndef container(lst):\n return FrozenList(lst)\n\n\n@pytest.fixture\ndef unicode_container():\n return FrozenList(["\u05d0", "\u05d1", "c"])\n\n\nclass TestFrozenList:\n def check_mutable_error(self, *args, **kwargs):\n # Pass whatever function you normally would to pytest.raises\n # (after the Exception kind).\n mutable_regex = re.compile("does not support mutable operations")\n msg = "'(_s)?re.(SRE_)?Pattern' object is not callable"\n with pytest.raises(TypeError, match=msg):\n mutable_regex(*args, **kwargs)\n\n def test_no_mutable_funcs(self, container):\n def setitem():\n container[0] = 5\n\n self.check_mutable_error(setitem)\n\n def setslice():\n container[1:2] = 3\n\n self.check_mutable_error(setslice)\n\n def delitem():\n del container[0]\n\n self.check_mutable_error(delitem)\n\n def delslice():\n del container[0:3]\n\n self.check_mutable_error(delslice)\n\n mutable_methods = ("extend", "pop", "remove", "insert")\n\n for meth in mutable_methods:\n self.check_mutable_error(getattr(container, meth))\n\n def test_slicing_maintains_type(self, container, lst):\n result = container[1:2]\n expected = lst[1:2]\n self.check_result(result, expected)\n\n def check_result(self, result, expected):\n assert isinstance(result, FrozenList)\n assert result == expected\n\n def test_string_methods_dont_fail(self, container):\n repr(container)\n str(container)\n bytes(container)\n\n def test_tricky_container(self, unicode_container):\n repr(unicode_container)\n str(unicode_container)\n\n def test_add(self, container, lst):\n result = container + (1, 2, 3)\n expected = FrozenList(lst + [1, 2, 3])\n self.check_result(result, expected)\n\n result = (1, 2, 3) + container\n expected = FrozenList([1, 2, 3] + lst)\n self.check_result(result, expected)\n\n def test_iadd(self, container, lst):\n q = r = container\n\n q += [5]\n self.check_result(q, lst + [5])\n\n # Other shouldn't be mutated.\n self.check_result(r, lst)\n\n def test_union(self, container, lst):\n result = container.union((1, 2, 3))\n expected = FrozenList(lst + [1, 2, 3])\n self.check_result(result, expected)\n\n def test_difference(self, container):\n result = container.difference([2])\n expected = FrozenList([1, 3, 4, 5])\n self.check_result(result, expected)\n\n def test_difference_dupe(self):\n result = FrozenList([1, 2, 3, 2]).difference([2])\n expected = FrozenList([1, 3])\n self.check_result(result, expected)\n\n def test_tricky_container_to_bytes_raises(self, unicode_container):\n # GH 26447\n msg = "^'str' object cannot be interpreted as an integer$"\n with pytest.raises(TypeError, match=msg):\n bytes(unicode_container)\n | .venv\Lib\site-packages\pandas\tests\indexes\test_frozen.py | test_frozen.py | Python | 3,125 | 0.95 | 0.19469 | 0.05 | python-kit | 859 | 2024-12-24T15:06:12.017056 | GPL-3.0 | true | 02ebf553c0a9abf777cd96361a786e23 |
"""\ntest_indexing tests the following Index methods:\n __getitem__\n get_loc\n get_value\n __contains__\n take\n where\n get_indexer\n get_indexer_for\n slice_locs\n asof_locs\n\nThe corresponding tests.indexes.[index_type].test_indexing files\ncontain tests for the corresponding methods specific to those Index subclasses.\n"""\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import InvalidIndexError\n\nfrom pandas.core.dtypes.common import (\n is_float_dtype,\n is_scalar,\n)\n\nfrom pandas import (\n NA,\n DatetimeIndex,\n Index,\n IntervalIndex,\n MultiIndex,\n NaT,\n PeriodIndex,\n TimedeltaIndex,\n)\nimport pandas._testing as tm\n\n\nclass TestTake:\n def test_take_invalid_kwargs(self, index):\n indices = [1, 2]\n\n msg = r"take\(\) got an unexpected keyword argument 'foo'"\n with pytest.raises(TypeError, match=msg):\n index.take(indices, foo=2)\n\n msg = "the 'out' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n index.take(indices, out=indices)\n\n msg = "the 'mode' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n index.take(indices, mode="clip")\n\n def test_take(self, index):\n indexer = [4, 3, 0, 2]\n if len(index) < 5:\n pytest.skip("Test doesn't make sense since not enough elements")\n\n result = index.take(indexer)\n expected = index[indexer]\n assert result.equals(expected)\n\n if not isinstance(index, (DatetimeIndex, PeriodIndex, TimedeltaIndex)):\n # GH 10791\n msg = r"'(.*Index)' object has no attribute 'freq'"\n with pytest.raises(AttributeError, match=msg):\n index.freq\n\n def test_take_indexer_type(self):\n # GH#42875\n integer_index = Index([0, 1, 2, 3])\n scalar_index = 1\n msg = "Expected indices to be array-like"\n with pytest.raises(TypeError, match=msg):\n integer_index.take(scalar_index)\n\n def test_take_minus1_without_fill(self, index):\n # -1 does not get treated as NA unless allow_fill=True is passed\n if len(index) == 0:\n # Test is not applicable\n pytest.skip("Test doesn't make sense for empty index")\n\n result = index.take([0, 0, -1])\n\n expected = index.take([0, 0, len(index) - 1])\n tm.assert_index_equal(result, expected)\n\n\nclass TestContains:\n @pytest.mark.parametrize(\n "index,val",\n [\n (Index([0, 1, 2]), 2),\n (Index([0, 1, "2"]), "2"),\n (Index([0, 1, 2, np.inf, 4]), 4),\n (Index([0, 1, 2, np.nan, 4]), 4),\n (Index([0, 1, 2, np.inf]), np.inf),\n (Index([0, 1, 2, np.nan]), np.nan),\n ],\n )\n def test_index_contains(self, index, val):\n assert val in index\n\n @pytest.mark.parametrize(\n "index,val",\n [\n (Index([0, 1, 2]), "2"),\n (Index([0, 1, "2"]), 2),\n (Index([0, 1, 2, np.inf]), 4),\n (Index([0, 1, 2, np.nan]), 4),\n (Index([0, 1, 2, np.inf]), np.nan),\n (Index([0, 1, 2, np.nan]), np.inf),\n # Checking if np.inf in int64 Index should not cause an OverflowError\n # Related to GH 16957\n (Index([0, 1, 2], dtype=np.int64), np.inf),\n (Index([0, 1, 2], dtype=np.int64), np.nan),\n (Index([0, 1, 2], dtype=np.uint64), np.inf),\n (Index([0, 1, 2], dtype=np.uint64), np.nan),\n ],\n )\n def test_index_not_contains(self, index, val):\n assert val not in index\n\n @pytest.mark.parametrize(\n "index,val", [(Index([0, 1, "2"]), 0), (Index([0, 1, "2"]), "2")]\n )\n def test_mixed_index_contains(self, index, val):\n # GH#19860\n assert val in index\n\n @pytest.mark.parametrize(\n "index,val", [(Index([0, 1, "2"]), "1"), (Index([0, 1, "2"]), 2)]\n )\n def test_mixed_index_not_contains(self, index, val):\n # GH#19860\n assert val not in index\n\n def test_contains_with_float_index(self, any_real_numpy_dtype):\n # GH#22085\n dtype = any_real_numpy_dtype\n data = [0, 1, 2, 3] if not is_float_dtype(dtype) else [0.1, 1.1, 2.2, 3.3]\n index = Index(data, dtype=dtype)\n\n if not is_float_dtype(index.dtype):\n assert 1.1 not in index\n assert 1.0 in index\n assert 1 in index\n else:\n assert 1.1 in index\n assert 1.0 not in index\n assert 1 not in index\n\n def test_contains_requires_hashable_raises(self, index):\n if isinstance(index, MultiIndex):\n return # TODO: do we want this to raise?\n\n msg = "unhashable type: 'list'"\n with pytest.raises(TypeError, match=msg):\n [] in index\n\n msg = "|".join(\n [\n r"unhashable type: 'dict'",\n r"must be real number, not dict",\n r"an integer is required",\n r"\{\}",\n r"pandas\._libs\.interval\.IntervalTree' is not iterable",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n {} in index._engine\n\n\nclass TestGetLoc:\n def test_get_loc_non_hashable(self, index):\n with pytest.raises(InvalidIndexError, match="[0, 1]"):\n index.get_loc([0, 1])\n\n def test_get_loc_non_scalar_hashable(self, index):\n # GH52877\n from enum import Enum\n\n class E(Enum):\n X1 = "x1"\n\n assert not is_scalar(E.X1)\n\n exc = KeyError\n msg = "<E.X1: 'x1'>"\n if isinstance(\n index,\n (\n DatetimeIndex,\n TimedeltaIndex,\n PeriodIndex,\n IntervalIndex,\n ),\n ):\n # TODO: make these more consistent?\n exc = InvalidIndexError\n msg = "E.X1"\n with pytest.raises(exc, match=msg):\n index.get_loc(E.X1)\n\n def test_get_loc_generator(self, index):\n exc = KeyError\n if isinstance(\n index,\n (\n DatetimeIndex,\n TimedeltaIndex,\n PeriodIndex,\n IntervalIndex,\n MultiIndex,\n ),\n ):\n # TODO: make these more consistent?\n exc = InvalidIndexError\n with pytest.raises(exc, match="generator object"):\n # MultiIndex specifically checks for generator; others for scalar\n index.get_loc(x for x in range(5))\n\n def test_get_loc_masked_duplicated_na(self):\n # GH#48411\n idx = Index([1, 2, NA, NA], dtype="Int64")\n result = idx.get_loc(NA)\n expected = np.array([False, False, True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n\nclass TestGetIndexer:\n def test_get_indexer_base(self, index):\n if index._index_as_unique:\n expected = np.arange(index.size, dtype=np.intp)\n actual = index.get_indexer(index)\n tm.assert_numpy_array_equal(expected, actual)\n else:\n msg = "Reindexing only valid with uniquely valued Index objects"\n with pytest.raises(InvalidIndexError, match=msg):\n index.get_indexer(index)\n\n with pytest.raises(ValueError, match="Invalid fill method"):\n index.get_indexer(index, method="invalid")\n\n def test_get_indexer_consistency(self, index):\n # See GH#16819\n\n if index._index_as_unique:\n indexer = index.get_indexer(index[0:2])\n assert isinstance(indexer, np.ndarray)\n assert indexer.dtype == np.intp\n else:\n msg = "Reindexing only valid with uniquely valued Index objects"\n with pytest.raises(InvalidIndexError, match=msg):\n index.get_indexer(index[0:2])\n\n indexer, _ = index.get_indexer_non_unique(index[0:2])\n assert isinstance(indexer, np.ndarray)\n assert indexer.dtype == np.intp\n\n def test_get_indexer_masked_duplicated_na(self):\n # GH#48411\n idx = Index([1, 2, NA, NA], dtype="Int64")\n result = idx.get_indexer_for(Index([1, NA], dtype="Int64"))\n expected = np.array([0, 2, 3], dtype=result.dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n\nclass TestConvertSliceIndexer:\n def test_convert_almost_null_slice(self, index):\n # slice with None at both ends, but not step\n\n key = slice(None, None, "foo")\n\n if isinstance(index, IntervalIndex):\n msg = "label-based slicing with step!=1 is not supported for IntervalIndex"\n with pytest.raises(ValueError, match=msg):\n index._convert_slice_indexer(key, "loc")\n else:\n msg = "'>=' not supported between instances of 'str' and 'int'"\n with pytest.raises(TypeError, match=msg):\n index._convert_slice_indexer(key, "loc")\n\n\nclass TestPutmask:\n def test_putmask_with_wrong_mask(self, index):\n # GH#18368\n if not len(index):\n pytest.skip("Test doesn't make sense for empty index")\n\n fill = index[0]\n\n msg = "putmask: mask and data must be the same size"\n with pytest.raises(ValueError, match=msg):\n index.putmask(np.ones(len(index) + 1, np.bool_), fill)\n\n with pytest.raises(ValueError, match=msg):\n index.putmask(np.ones(len(index) - 1, np.bool_), fill)\n\n with pytest.raises(ValueError, match=msg):\n index.putmask("foo", fill)\n\n\n@pytest.mark.parametrize(\n "idx", [Index([1, 2, 3]), Index([0.1, 0.2, 0.3]), Index(["a", "b", "c"])]\n)\ndef test_getitem_deprecated_float(idx):\n # https://github.com/pandas-dev/pandas/issues/34191\n\n msg = "Indexing with a float is no longer supported"\n with pytest.raises(IndexError, match=msg):\n idx[1.0]\n\n\n@pytest.mark.parametrize(\n "idx,target,expected",\n [\n ([np.nan, "var1", np.nan], [np.nan], np.array([0, 2], dtype=np.intp)),\n (\n [np.nan, "var1", np.nan],\n [np.nan, "var1"],\n np.array([0, 2, 1], dtype=np.intp),\n ),\n (\n np.array([np.nan, "var1", np.nan], dtype=object),\n [np.nan],\n np.array([0, 2], dtype=np.intp),\n ),\n (\n DatetimeIndex(["2020-08-05", NaT, NaT]),\n [NaT],\n np.array([1, 2], dtype=np.intp),\n ),\n (["a", "b", "a", np.nan], [np.nan], np.array([3], dtype=np.intp)),\n (\n np.array(["b", np.nan, float("NaN"), "b"], dtype=object),\n Index([np.nan], dtype=object),\n np.array([1, 2], dtype=np.intp),\n ),\n ],\n)\ndef test_get_indexer_non_unique_multiple_nans(idx, target, expected):\n # GH 35392\n axis = Index(idx)\n actual = axis.get_indexer_for(target)\n tm.assert_numpy_array_equal(actual, expected)\n\n\ndef test_get_indexer_non_unique_nans_in_object_dtype_target(nulls_fixture):\n idx = Index([1.0, 2.0])\n target = Index([1, nulls_fixture], dtype="object")\n\n result_idx, result_missing = idx.get_indexer_non_unique(target)\n tm.assert_numpy_array_equal(result_idx, np.array([0, -1], dtype=np.intp))\n tm.assert_numpy_array_equal(result_missing, np.array([1], dtype=np.intp))\n | .venv\Lib\site-packages\pandas\tests\indexes\test_indexing.py | test_indexing.py | Python | 11,309 | 0.95 | 0.137255 | 0.067114 | vue-tools | 744 | 2023-11-25T17:45:59.110920 | MIT | true | ff499e99451ee6a7617e18358e619a69 |
"""\nTests for the Index constructor conducting inference.\n"""\nfrom datetime import (\n datetime,\n timedelta,\n timezone,\n)\nfrom decimal import Decimal\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs.timezones import maybe_get_tz\n\nfrom pandas import (\n NA,\n Categorical,\n CategoricalIndex,\n DatetimeIndex,\n Index,\n IntervalIndex,\n MultiIndex,\n NaT,\n PeriodIndex,\n Series,\n TimedeltaIndex,\n Timestamp,\n array,\n date_range,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\nclass TestIndexConstructorInference:\n def test_object_all_bools(self):\n # GH#49594 match Series behavior on ndarray[object] of all bools\n arr = np.array([True, False], dtype=object)\n res = Index(arr)\n assert res.dtype == object\n\n # since the point is matching Series behavior, let's double check\n assert Series(arr).dtype == object\n\n def test_object_all_complex(self):\n # GH#49594 match Series behavior on ndarray[object] of all complex\n arr = np.array([complex(1), complex(2)], dtype=object)\n res = Index(arr)\n assert res.dtype == object\n\n # since the point is matching Series behavior, let's double check\n assert Series(arr).dtype == object\n\n @pytest.mark.parametrize("val", [NaT, None, np.nan, float("nan")])\n def test_infer_nat(self, val):\n # GH#49340 all NaT/None/nan and at least 1 NaT -> datetime64[ns],\n # matching Series behavior\n values = [NaT, val]\n\n idx = Index(values)\n assert idx.dtype == "datetime64[ns]" and idx.isna().all()\n\n idx = Index(values[::-1])\n assert idx.dtype == "datetime64[ns]" and idx.isna().all()\n\n idx = Index(np.array(values, dtype=object))\n assert idx.dtype == "datetime64[ns]" and idx.isna().all()\n\n idx = Index(np.array(values, dtype=object)[::-1])\n assert idx.dtype == "datetime64[ns]" and idx.isna().all()\n\n @pytest.mark.parametrize("na_value", [None, np.nan])\n @pytest.mark.parametrize("vtype", [list, tuple, iter])\n def test_construction_list_tuples_nan(self, na_value, vtype):\n # GH#18505 : valid tuples containing NaN\n values = [(1, "two"), (3.0, na_value)]\n result = Index(vtype(values))\n expected = MultiIndex.from_tuples(values)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "dtype",\n [int, "int64", "int32", "int16", "int8", "uint64", "uint32", "uint16", "uint8"],\n )\n def test_constructor_int_dtype_float(self, dtype):\n # GH#18400\n expected = Index([0, 1, 2, 3], dtype=dtype)\n result = Index([0.0, 1.0, 2.0, 3.0], dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("cast_index", [True, False])\n @pytest.mark.parametrize(\n "vals", [[True, False, True], np.array([True, False, True], dtype=bool)]\n )\n def test_constructor_dtypes_to_object(self, cast_index, vals):\n if cast_index:\n index = Index(vals, dtype=bool)\n else:\n index = Index(vals)\n\n assert type(index) is Index\n assert index.dtype == bool\n\n def test_constructor_categorical_to_object(self):\n # GH#32167 Categorical data and dtype=object should return object-dtype\n ci = CategoricalIndex(range(5))\n result = Index(ci, dtype=object)\n assert not isinstance(result, CategoricalIndex)\n\n def test_constructor_infer_periodindex(self):\n xp = period_range("2012-1-1", freq="M", periods=3)\n rs = Index(xp)\n tm.assert_index_equal(rs, xp)\n assert isinstance(rs, PeriodIndex)\n\n def test_from_list_of_periods(self):\n rng = period_range("1/1/2000", periods=20, freq="D")\n periods = list(rng)\n\n result = Index(periods)\n assert isinstance(result, PeriodIndex)\n\n @pytest.mark.parametrize("pos", [0, 1])\n @pytest.mark.parametrize(\n "klass,dtype,ctor",\n [\n (DatetimeIndex, "datetime64[ns]", np.datetime64("nat")),\n (TimedeltaIndex, "timedelta64[ns]", np.timedelta64("nat")),\n ],\n )\n def test_constructor_infer_nat_dt_like(\n self, pos, klass, dtype, ctor, nulls_fixture, request\n ):\n if isinstance(nulls_fixture, Decimal):\n # We dont cast these to datetime64/timedelta64\n pytest.skip(\n f"We don't cast {type(nulls_fixture).__name__} to "\n "datetime64/timedelta64"\n )\n\n expected = klass([NaT, NaT])\n assert expected.dtype == dtype\n data = [ctor]\n data.insert(pos, nulls_fixture)\n\n warn = None\n if nulls_fixture is NA:\n expected = Index([NA, NaT])\n mark = pytest.mark.xfail(reason="Broken with np.NaT ctor; see GH 31884")\n request.applymarker(mark)\n # GH#35942 numpy will emit a DeprecationWarning within the\n # assert_index_equal calls. Since we can't do anything\n # about it until GH#31884 is fixed, we suppress that warning.\n warn = DeprecationWarning\n\n result = Index(data)\n\n with tm.assert_produces_warning(warn):\n tm.assert_index_equal(result, expected)\n\n result = Index(np.array(data, dtype=object))\n\n with tm.assert_produces_warning(warn):\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("swap_objs", [True, False])\n def test_constructor_mixed_nat_objs_infers_object(self, swap_objs):\n # mixed np.datetime64/timedelta64 nat results in object\n data = [np.datetime64("nat"), np.timedelta64("nat")]\n if swap_objs:\n data = data[::-1]\n\n expected = Index(data, dtype=object)\n tm.assert_index_equal(Index(data), expected)\n tm.assert_index_equal(Index(np.array(data, dtype=object)), expected)\n\n @pytest.mark.parametrize("swap_objs", [True, False])\n def test_constructor_datetime_and_datetime64(self, swap_objs):\n data = [Timestamp(2021, 6, 8, 9, 42), np.datetime64("now")]\n if swap_objs:\n data = data[::-1]\n expected = DatetimeIndex(data)\n\n tm.assert_index_equal(Index(data), expected)\n tm.assert_index_equal(Index(np.array(data, dtype=object)), expected)\n\n def test_constructor_datetimes_mixed_tzs(self):\n # https://github.com/pandas-dev/pandas/pull/55793/files#r1383719998\n tz = maybe_get_tz("US/Central")\n dt1 = datetime(2020, 1, 1, tzinfo=tz)\n dt2 = datetime(2020, 1, 1, tzinfo=timezone.utc)\n result = Index([dt1, dt2])\n expected = Index([dt1, dt2], dtype=object)\n tm.assert_index_equal(result, expected)\n\n\nclass TestDtypeEnforced:\n # check we don't silently ignore the dtype keyword\n\n def test_constructor_object_dtype_with_ea_data(self, any_numeric_ea_dtype):\n # GH#45206\n arr = array([0], dtype=any_numeric_ea_dtype)\n\n idx = Index(arr, dtype=object)\n assert idx.dtype == object\n\n @pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"])\n def test_constructor_range_values_mismatched_dtype(self, dtype):\n rng = Index(range(5))\n\n result = Index(rng, dtype=dtype)\n assert result.dtype == dtype\n\n result = Index(range(5), dtype=dtype)\n assert result.dtype == dtype\n\n @pytest.mark.parametrize("dtype", [object, "float64", "uint64", "category"])\n def test_constructor_categorical_values_mismatched_non_ea_dtype(self, dtype):\n cat = Categorical([1, 2, 3])\n\n result = Index(cat, dtype=dtype)\n assert result.dtype == dtype\n\n def test_constructor_categorical_values_mismatched_dtype(self):\n dti = date_range("2016-01-01", periods=3)\n cat = Categorical(dti)\n result = Index(cat, dti.dtype)\n tm.assert_index_equal(result, dti)\n\n dti2 = dti.tz_localize("Asia/Tokyo")\n cat2 = Categorical(dti2)\n result = Index(cat2, dti2.dtype)\n tm.assert_index_equal(result, dti2)\n\n ii = IntervalIndex.from_breaks(range(5))\n cat3 = Categorical(ii)\n result = Index(cat3, dtype=ii.dtype)\n tm.assert_index_equal(result, ii)\n\n def test_constructor_ea_values_mismatched_categorical_dtype(self):\n dti = date_range("2016-01-01", periods=3)\n result = Index(dti, dtype="category")\n expected = CategoricalIndex(dti)\n tm.assert_index_equal(result, expected)\n\n dti2 = date_range("2016-01-01", periods=3, tz="US/Pacific")\n result = Index(dti2, dtype="category")\n expected = CategoricalIndex(dti2)\n tm.assert_index_equal(result, expected)\n\n def test_constructor_period_values_mismatched_dtype(self):\n pi = period_range("2016-01-01", periods=3, freq="D")\n result = Index(pi, dtype="category")\n expected = CategoricalIndex(pi)\n tm.assert_index_equal(result, expected)\n\n def test_constructor_timedelta64_values_mismatched_dtype(self):\n # check we don't silently ignore the dtype keyword\n tdi = timedelta_range("4 Days", periods=5)\n result = Index(tdi, dtype="category")\n expected = CategoricalIndex(tdi)\n tm.assert_index_equal(result, expected)\n\n def test_constructor_interval_values_mismatched_dtype(self):\n dti = date_range("2016-01-01", periods=3)\n ii = IntervalIndex.from_breaks(dti)\n result = Index(ii, dtype="category")\n expected = CategoricalIndex(ii)\n tm.assert_index_equal(result, expected)\n\n def test_constructor_datetime64_values_mismatched_period_dtype(self):\n dti = date_range("2016-01-01", periods=3)\n result = Index(dti, dtype="Period[D]")\n expected = dti.to_period("D")\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", ["int64", "uint64"])\n def test_constructor_int_dtype_nan_raises(self, dtype):\n # see GH#15187\n data = [np.nan]\n msg = "cannot convert"\n with pytest.raises(ValueError, match=msg):\n Index(data, dtype=dtype)\n\n @pytest.mark.parametrize(\n "vals",\n [\n [1, 2, 3],\n np.array([1, 2, 3]),\n np.array([1, 2, 3], dtype=int),\n # below should coerce\n [1.0, 2.0, 3.0],\n np.array([1.0, 2.0, 3.0], dtype=float),\n ],\n )\n def test_constructor_dtypes_to_int(self, vals, any_int_numpy_dtype):\n dtype = any_int_numpy_dtype\n index = Index(vals, dtype=dtype)\n assert index.dtype == dtype\n\n @pytest.mark.parametrize(\n "vals",\n [\n [1, 2, 3],\n [1.0, 2.0, 3.0],\n np.array([1.0, 2.0, 3.0]),\n np.array([1, 2, 3], dtype=int),\n np.array([1.0, 2.0, 3.0], dtype=float),\n ],\n )\n def test_constructor_dtypes_to_float(self, vals, float_numpy_dtype):\n dtype = float_numpy_dtype\n index = Index(vals, dtype=dtype)\n assert index.dtype == dtype\n\n @pytest.mark.parametrize(\n "vals",\n [\n [1, 2, 3],\n np.array([1, 2, 3], dtype=int),\n np.array(["2011-01-01", "2011-01-02"], dtype="datetime64[ns]"),\n [datetime(2011, 1, 1), datetime(2011, 1, 2)],\n ],\n )\n def test_constructor_dtypes_to_categorical(self, vals):\n index = Index(vals, dtype="category")\n assert isinstance(index, CategoricalIndex)\n\n @pytest.mark.parametrize("cast_index", [True, False])\n @pytest.mark.parametrize(\n "vals",\n [\n Index(np.array([np.datetime64("2011-01-01"), np.datetime64("2011-01-02")])),\n Index([datetime(2011, 1, 1), datetime(2011, 1, 2)]),\n ],\n )\n def test_constructor_dtypes_to_datetime(self, cast_index, vals):\n if cast_index:\n index = Index(vals, dtype=object)\n assert isinstance(index, Index)\n assert index.dtype == object\n else:\n index = Index(vals)\n assert isinstance(index, DatetimeIndex)\n\n @pytest.mark.parametrize("cast_index", [True, False])\n @pytest.mark.parametrize(\n "vals",\n [\n np.array([np.timedelta64(1, "D"), np.timedelta64(1, "D")]),\n [timedelta(1), timedelta(1)],\n ],\n )\n def test_constructor_dtypes_to_timedelta(self, cast_index, vals):\n if cast_index:\n index = Index(vals, dtype=object)\n assert isinstance(index, Index)\n assert index.dtype == object\n else:\n index = Index(vals)\n assert isinstance(index, TimedeltaIndex)\n\n def test_pass_timedeltaindex_to_index(self):\n rng = timedelta_range("1 days", "10 days")\n idx = Index(rng, dtype=object)\n\n expected = Index(rng.to_pytimedelta(), dtype=object)\n\n tm.assert_numpy_array_equal(idx.values, expected.values)\n\n def test_pass_datetimeindex_to_index(self):\n # GH#1396\n rng = date_range("1/1/2000", "3/1/2000")\n idx = Index(rng, dtype=object)\n\n expected = Index(rng.to_pydatetime(), dtype=object)\n\n tm.assert_numpy_array_equal(idx.values, expected.values)\n\n\nclass TestIndexConstructorUnwrapping:\n # Test passing different arraylike values to pd.Index\n\n @pytest.mark.parametrize("klass", [Index, DatetimeIndex])\n def test_constructor_from_series_dt64(self, klass):\n stamps = [Timestamp("20110101"), Timestamp("20120101"), Timestamp("20130101")]\n expected = DatetimeIndex(stamps)\n ser = Series(stamps)\n result = klass(ser)\n tm.assert_index_equal(result, expected)\n\n def test_constructor_no_pandas_array(self):\n ser = Series([1, 2, 3])\n result = Index(ser.array)\n expected = Index([1, 2, 3])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "array",\n [\n np.arange(5),\n np.array(["a", "b", "c"]),\n date_range("2000-01-01", periods=3).values,\n ],\n )\n def test_constructor_ndarray_like(self, array):\n # GH#5460#issuecomment-44474502\n # it should be possible to convert any object that satisfies the numpy\n # ndarray interface directly into an Index\n class ArrayLike:\n def __init__(self, array) -> None:\n self.array = array\n\n def __array__(self, dtype=None, copy=None) -> np.ndarray:\n return self.array\n\n expected = Index(array)\n result = Index(ArrayLike(array))\n tm.assert_index_equal(result, expected)\n\n\nclass TestIndexConstructionErrors:\n def test_constructor_overflow_int64(self):\n # see GH#15832\n msg = (\n "The elements provided in the data cannot "\n "all be casted to the dtype int64"\n )\n with pytest.raises(OverflowError, match=msg):\n Index([np.iinfo(np.uint64).max - 1], dtype="int64")\n | .venv\Lib\site-packages\pandas\tests\indexes\test_index_new.py | test_index_new.py | Python | 14,923 | 0.95 | 0.113426 | 0.072222 | awesome-app | 750 | 2025-06-16T21:40:46.344337 | MIT | true | 0cedb6511b605efebaf9c2f2eb05517e |
import numpy as np\nimport pytest\n\nfrom pandas import (\n CategoricalIndex,\n DatetimeIndex,\n Index,\n PeriodIndex,\n TimedeltaIndex,\n isna,\n)\nimport pandas._testing as tm\nfrom pandas.api.types import (\n is_complex_dtype,\n is_numeric_dtype,\n)\nfrom pandas.core.arrays import BooleanArray\nfrom pandas.core.indexes.datetimelike import DatetimeIndexOpsMixin\n\n\ndef test_numpy_ufuncs_out(index):\n result = index == index\n\n out = np.empty(index.shape, dtype=bool)\n np.equal(index, index, out=out)\n tm.assert_numpy_array_equal(out, result)\n\n if not index._is_multi:\n # same thing on the ExtensionArray\n out = np.empty(index.shape, dtype=bool)\n np.equal(index.array, index.array, out=out)\n tm.assert_numpy_array_equal(out, result)\n\n\n@pytest.mark.parametrize(\n "func",\n [\n np.exp,\n np.exp2,\n np.expm1,\n np.log,\n np.log2,\n np.log10,\n np.log1p,\n np.sqrt,\n np.sin,\n np.cos,\n np.tan,\n np.arcsin,\n np.arccos,\n np.arctan,\n np.sinh,\n np.cosh,\n np.tanh,\n np.arcsinh,\n np.arccosh,\n np.arctanh,\n np.deg2rad,\n np.rad2deg,\n ],\n ids=lambda x: x.__name__,\n)\ndef test_numpy_ufuncs_basic(index, func):\n # test ufuncs of numpy, see:\n # https://numpy.org/doc/stable/reference/ufuncs.html\n\n if isinstance(index, DatetimeIndexOpsMixin):\n with tm.external_error_raised((TypeError, AttributeError)):\n with np.errstate(all="ignore"):\n func(index)\n elif is_numeric_dtype(index) and not (\n is_complex_dtype(index) and func in [np.deg2rad, np.rad2deg]\n ):\n # coerces to float (e.g. np.sin)\n with np.errstate(all="ignore"):\n result = func(index)\n arr_result = func(index.values)\n if arr_result.dtype == np.float16:\n arr_result = arr_result.astype(np.float32)\n exp = Index(arr_result, name=index.name)\n\n tm.assert_index_equal(result, exp)\n if isinstance(index.dtype, np.dtype) and is_numeric_dtype(index):\n if is_complex_dtype(index):\n assert result.dtype == index.dtype\n elif index.dtype in ["bool", "int8", "uint8"]:\n assert result.dtype in ["float16", "float32"]\n elif index.dtype in ["int16", "uint16", "float32"]:\n assert result.dtype == "float32"\n else:\n assert result.dtype == "float64"\n else:\n # e.g. np.exp with Int64 -> Float64\n assert type(result) is Index\n # raise AttributeError or TypeError\n elif len(index) == 0:\n pass\n else:\n with tm.external_error_raised((TypeError, AttributeError)):\n with np.errstate(all="ignore"):\n func(index)\n\n\n@pytest.mark.parametrize(\n "func", [np.isfinite, np.isinf, np.isnan, np.signbit], ids=lambda x: x.__name__\n)\ndef test_numpy_ufuncs_other(index, func):\n # test ufuncs of numpy, see:\n # https://numpy.org/doc/stable/reference/ufuncs.html\n if isinstance(index, (DatetimeIndex, TimedeltaIndex)):\n if func in (np.isfinite, np.isinf, np.isnan):\n # numpy 1.18 changed isinf and isnan to not raise on dt64/td64\n result = func(index)\n assert isinstance(result, np.ndarray)\n\n out = np.empty(index.shape, dtype=bool)\n func(index, out=out)\n tm.assert_numpy_array_equal(out, result)\n else:\n with tm.external_error_raised(TypeError):\n func(index)\n\n elif isinstance(index, PeriodIndex):\n with tm.external_error_raised(TypeError):\n func(index)\n\n elif is_numeric_dtype(index) and not (\n is_complex_dtype(index) and func is np.signbit\n ):\n # Results in bool array\n result = func(index)\n if not isinstance(index.dtype, np.dtype):\n # e.g. Int64 we expect to get BooleanArray back\n assert isinstance(result, BooleanArray)\n else:\n assert isinstance(result, np.ndarray)\n\n out = np.empty(index.shape, dtype=bool)\n func(index, out=out)\n\n if not isinstance(index.dtype, np.dtype):\n tm.assert_numpy_array_equal(out, result._data)\n else:\n tm.assert_numpy_array_equal(out, result)\n\n elif len(index) == 0:\n pass\n else:\n with tm.external_error_raised(TypeError):\n func(index)\n\n\n@pytest.mark.parametrize("func", [np.maximum, np.minimum])\ndef test_numpy_ufuncs_reductions(index, func, request):\n # TODO: overlap with tests.series.test_ufunc.test_reductions\n if len(index) == 0:\n pytest.skip("Test doesn't make sense for empty index.")\n\n if isinstance(index, CategoricalIndex) and index.dtype.ordered is False:\n with pytest.raises(TypeError, match="is not ordered for"):\n func.reduce(index)\n return\n else:\n result = func.reduce(index)\n\n if func is np.maximum:\n expected = index.max(skipna=False)\n else:\n expected = index.min(skipna=False)\n # TODO: do we have cases both with and without NAs?\n\n assert type(result) is type(expected)\n if isna(result):\n assert isna(expected)\n else:\n assert result == expected\n\n\n@pytest.mark.parametrize("func", [np.bitwise_and, np.bitwise_or, np.bitwise_xor])\ndef test_numpy_ufuncs_bitwise(func):\n # https://github.com/pandas-dev/pandas/issues/46769\n idx1 = Index([1, 2, 3, 4], dtype="int64")\n idx2 = Index([3, 4, 5, 6], dtype="int64")\n\n with tm.assert_produces_warning(None):\n result = func(idx1, idx2)\n\n expected = Index(func(idx1.values, idx2.values))\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\test_numpy_compat.py | test_numpy_compat.py | Python | 5,776 | 0.95 | 0.10582 | 0.08589 | awesome-app | 586 | 2024-12-24T18:18:14.202867 | BSD-3-Clause | true | 65c1ae663cc0309956ccb1e04a5ed653 |
from __future__ import annotations\n\nfrom datetime import datetime\nimport weakref\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import Timestamp\n\nfrom pandas.core.dtypes.common import (\n is_integer_dtype,\n is_numeric_dtype,\n)\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nimport pandas as pd\nfrom pandas import (\n CategoricalIndex,\n DatetimeIndex,\n DatetimeTZDtype,\n Index,\n IntervalIndex,\n MultiIndex,\n PeriodIndex,\n RangeIndex,\n Series,\n StringDtype,\n TimedeltaIndex,\n isna,\n period_range,\n)\nimport pandas._testing as tm\nimport pandas.core.algorithms as algos\nfrom pandas.core.arrays import BaseMaskedArray\n\n\nclass TestBase:\n @pytest.fixture(\n params=[\n RangeIndex(start=0, stop=20, step=2),\n Index(np.arange(5, dtype=np.float64)),\n Index(np.arange(5, dtype=np.float32)),\n Index(np.arange(5, dtype=np.uint64)),\n Index(range(0, 20, 2), dtype=np.int64),\n Index(range(0, 20, 2), dtype=np.int32),\n Index(range(0, 20, 2), dtype=np.int16),\n Index(range(0, 20, 2), dtype=np.int8),\n Index(list("abcde")),\n Index([0, "a", 1, "b", 2, "c"]),\n period_range("20130101", periods=5, freq="D"),\n TimedeltaIndex(\n [\n "0 days 01:00:00",\n "1 days 01:00:00",\n "2 days 01:00:00",\n "3 days 01:00:00",\n "4 days 01:00:00",\n ],\n dtype="timedelta64[ns]",\n freq="D",\n ),\n DatetimeIndex(\n ["2013-01-01", "2013-01-02", "2013-01-03", "2013-01-04", "2013-01-05"],\n dtype="datetime64[ns]",\n freq="D",\n ),\n IntervalIndex.from_breaks(range(11), closed="right"),\n ]\n )\n def simple_index(self, request):\n return request.param\n\n def test_pickle_compat_construction(self, simple_index):\n # need an object to create with\n if isinstance(simple_index, RangeIndex):\n pytest.skip("RangeIndex() is a valid constructor")\n msg = "|".join(\n [\n r"Index\(\.\.\.\) must be called with a collection of some "\n r"kind, None was passed",\n r"DatetimeIndex\(\) must be called with a collection of some "\n r"kind, None was passed",\n r"TimedeltaIndex\(\) must be called with a collection of some "\n r"kind, None was passed",\n r"__new__\(\) missing 1 required positional argument: 'data'",\n r"__new__\(\) takes at least 2 arguments \(1 given\)",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n type(simple_index)()\n\n def test_shift(self, simple_index):\n # GH8083 test the base class for shift\n if isinstance(simple_index, (DatetimeIndex, TimedeltaIndex, PeriodIndex)):\n pytest.skip("Tested in test_ops/test_arithmetic")\n idx = simple_index\n msg = (\n f"This method is only implemented for DatetimeIndex, PeriodIndex and "\n f"TimedeltaIndex; Got type {type(idx).__name__}"\n )\n with pytest.raises(NotImplementedError, match=msg):\n idx.shift(1)\n with pytest.raises(NotImplementedError, match=msg):\n idx.shift(1, 2)\n\n def test_constructor_name_unhashable(self, simple_index):\n # GH#29069 check that name is hashable\n # See also same-named test in tests.series.test_constructors\n idx = simple_index\n with pytest.raises(TypeError, match="Index.name must be a hashable type"):\n type(idx)(idx, name=[])\n\n def test_create_index_existing_name(self, simple_index):\n # GH11193, when an existing index is passed, and a new name is not\n # specified, the new index should inherit the previous object name\n expected = simple_index.copy()\n if not isinstance(expected, MultiIndex):\n expected.name = "foo"\n result = Index(expected)\n tm.assert_index_equal(result, expected)\n\n result = Index(expected, name="bar")\n expected.name = "bar"\n tm.assert_index_equal(result, expected)\n else:\n expected.names = ["foo", "bar"]\n result = Index(expected)\n tm.assert_index_equal(\n result,\n Index(\n Index(\n [\n ("foo", "one"),\n ("foo", "two"),\n ("bar", "one"),\n ("baz", "two"),\n ("qux", "one"),\n ("qux", "two"),\n ],\n dtype="object",\n ),\n names=["foo", "bar"],\n ),\n )\n\n result = Index(expected, names=["A", "B"])\n tm.assert_index_equal(\n result,\n Index(\n Index(\n [\n ("foo", "one"),\n ("foo", "two"),\n ("bar", "one"),\n ("baz", "two"),\n ("qux", "one"),\n ("qux", "two"),\n ],\n dtype="object",\n ),\n names=["A", "B"],\n ),\n )\n\n def test_numeric_compat(self, simple_index):\n idx = simple_index\n # Check that this doesn't cover MultiIndex case, if/when it does,\n # we can remove multi.test_compat.test_numeric_compat\n assert not isinstance(idx, MultiIndex)\n if type(idx) is Index:\n pytest.skip("Not applicable for Index")\n if is_numeric_dtype(simple_index.dtype) or isinstance(\n simple_index, TimedeltaIndex\n ):\n pytest.skip("Tested elsewhere.")\n\n typ = type(idx._data).__name__\n cls = type(idx).__name__\n lmsg = "|".join(\n [\n rf"unsupported operand type\(s\) for \*: '{typ}' and 'int'",\n "cannot perform (__mul__|__truediv__|__floordiv__) with "\n f"this index type: ({cls}|{typ})",\n ]\n )\n with pytest.raises(TypeError, match=lmsg):\n idx * 1\n rmsg = "|".join(\n [\n rf"unsupported operand type\(s\) for \*: 'int' and '{typ}'",\n "cannot perform (__rmul__|__rtruediv__|__rfloordiv__) with "\n f"this index type: ({cls}|{typ})",\n ]\n )\n with pytest.raises(TypeError, match=rmsg):\n 1 * idx\n\n div_err = lmsg.replace("*", "/")\n with pytest.raises(TypeError, match=div_err):\n idx / 1\n div_err = rmsg.replace("*", "/")\n with pytest.raises(TypeError, match=div_err):\n 1 / idx\n\n floordiv_err = lmsg.replace("*", "//")\n with pytest.raises(TypeError, match=floordiv_err):\n idx // 1\n floordiv_err = rmsg.replace("*", "//")\n with pytest.raises(TypeError, match=floordiv_err):\n 1 // idx\n\n def test_logical_compat(self, simple_index):\n if simple_index.dtype in (object, "string"):\n pytest.skip("Tested elsewhere.")\n idx = simple_index\n if idx.dtype.kind in "iufcbm":\n assert idx.all() == idx._values.all()\n assert idx.all() == idx.to_series().all()\n assert idx.any() == idx._values.any()\n assert idx.any() == idx.to_series().any()\n else:\n msg = "cannot perform (any|all)"\n if isinstance(idx, IntervalIndex):\n msg = (\n r"'IntervalArray' with dtype interval\[.*\] does "\n "not support reduction '(any|all)'"\n )\n with pytest.raises(TypeError, match=msg):\n idx.all()\n with pytest.raises(TypeError, match=msg):\n idx.any()\n\n def test_repr_roundtrip(self, simple_index):\n if isinstance(simple_index, IntervalIndex):\n pytest.skip(f"Not a valid repr for {type(simple_index).__name__}")\n idx = simple_index\n tm.assert_index_equal(eval(repr(idx)), idx)\n\n def test_repr_max_seq_item_setting(self, simple_index):\n # GH10182\n if isinstance(simple_index, IntervalIndex):\n pytest.skip(f"Not a valid repr for {type(simple_index).__name__}")\n idx = simple_index\n idx = idx.repeat(50)\n with pd.option_context("display.max_seq_items", None):\n repr(idx)\n assert "..." not in str(idx)\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_ensure_copied_data(self, index):\n # Check the "copy" argument of each Index.__new__ is honoured\n # GH12309\n init_kwargs = {}\n if isinstance(index, PeriodIndex):\n # Needs "freq" specification:\n init_kwargs["freq"] = index.freq\n elif isinstance(index, (RangeIndex, MultiIndex, CategoricalIndex)):\n pytest.skip(\n "RangeIndex cannot be initialized from data, "\n "MultiIndex and CategoricalIndex are tested separately"\n )\n elif index.dtype == object and index.inferred_type in ["boolean", "string"]:\n init_kwargs["dtype"] = index.dtype\n\n index_type = type(index)\n result = index_type(index.values, copy=True, **init_kwargs)\n if isinstance(index.dtype, DatetimeTZDtype):\n result = result.tz_localize("UTC").tz_convert(index.tz)\n if isinstance(index, (DatetimeIndex, TimedeltaIndex)):\n index = index._with_freq(None)\n\n tm.assert_index_equal(index, result)\n\n if isinstance(index, PeriodIndex):\n # .values an object array of Period, thus copied\n depr_msg = "The 'ordinal' keyword in PeriodIndex is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n result = index_type(ordinal=index.asi8, copy=False, **init_kwargs)\n tm.assert_numpy_array_equal(index.asi8, result.asi8, check_same="same")\n elif isinstance(index, IntervalIndex):\n # checked in test_interval.py\n pass\n elif type(index) is Index and not isinstance(index.dtype, np.dtype):\n result = index_type(index.values, copy=False, **init_kwargs)\n tm.assert_index_equal(result, index)\n\n if isinstance(index._values, BaseMaskedArray):\n assert np.shares_memory(index._values._data, result._values._data)\n tm.assert_numpy_array_equal(\n index._values._data, result._values._data, check_same="same"\n )\n assert np.shares_memory(index._values._mask, result._values._mask)\n tm.assert_numpy_array_equal(\n index._values._mask, result._values._mask, check_same="same"\n )\n elif (\n isinstance(index.dtype, StringDtype) and index.dtype.storage == "python"\n ):\n assert np.shares_memory(index._values._ndarray, result._values._ndarray)\n tm.assert_numpy_array_equal(\n index._values._ndarray, result._values._ndarray, check_same="same"\n )\n elif (\n isinstance(index.dtype, StringDtype)\n and index.dtype.storage == "pyarrow"\n ):\n assert tm.shares_memory(result._values, index._values)\n else:\n raise NotImplementedError(index.dtype)\n else:\n result = index_type(index.values, copy=False, **init_kwargs)\n tm.assert_numpy_array_equal(index.values, result.values, check_same="same")\n\n def test_memory_usage(self, index):\n index._engine.clear_mapping()\n result = index.memory_usage()\n if index.empty:\n # we report 0 for no-length\n assert result == 0\n return\n\n # non-zero length\n index.get_loc(index[0])\n result2 = index.memory_usage()\n result3 = index.memory_usage(deep=True)\n\n # RangeIndex, IntervalIndex\n # don't have engines\n # Index[EA] has engine but it does not have a Hashtable .mapping\n if not isinstance(index, (RangeIndex, IntervalIndex)) and not (\n type(index) is Index and not isinstance(index.dtype, np.dtype)\n ):\n assert result2 > result\n\n if index.inferred_type == "object":\n assert result3 > result2\n\n def test_argsort(self, index):\n if isinstance(index, CategoricalIndex):\n pytest.skip(f"{type(self).__name__} separately tested")\n\n result = index.argsort()\n expected = np.array(index).argsort()\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n def test_numpy_argsort(self, index):\n result = np.argsort(index)\n expected = index.argsort()\n tm.assert_numpy_array_equal(result, expected)\n\n result = np.argsort(index, kind="mergesort")\n expected = index.argsort(kind="mergesort")\n tm.assert_numpy_array_equal(result, expected)\n\n # these are the only two types that perform\n # pandas compatibility input validation - the\n # rest already perform separate (or no) such\n # validation via their 'values' attribute as\n # defined in pandas.core.indexes/base.py - they\n # cannot be changed at the moment due to\n # backwards compatibility concerns\n if isinstance(index, (CategoricalIndex, RangeIndex)):\n msg = "the 'axis' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n np.argsort(index, axis=1)\n\n msg = "the 'order' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n np.argsort(index, order=("a", "b"))\n\n def test_repeat(self, simple_index):\n rep = 2\n idx = simple_index.copy()\n new_index_cls = idx._constructor\n expected = new_index_cls(idx.values.repeat(rep), name=idx.name)\n tm.assert_index_equal(idx.repeat(rep), expected)\n\n idx = simple_index\n rep = np.arange(len(idx))\n expected = new_index_cls(idx.values.repeat(rep), name=idx.name)\n tm.assert_index_equal(idx.repeat(rep), expected)\n\n def test_numpy_repeat(self, simple_index):\n rep = 2\n idx = simple_index\n expected = idx.repeat(rep)\n tm.assert_index_equal(np.repeat(idx, rep), expected)\n\n msg = "the 'axis' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n np.repeat(idx, rep, axis=0)\n\n def test_where(self, listlike_box, simple_index):\n if isinstance(simple_index, (IntervalIndex, PeriodIndex)) or is_numeric_dtype(\n simple_index.dtype\n ):\n pytest.skip("Tested elsewhere.")\n klass = listlike_box\n\n idx = simple_index\n if isinstance(idx, (DatetimeIndex, TimedeltaIndex)):\n # where does not preserve freq\n idx = idx._with_freq(None)\n\n cond = [True] * len(idx)\n result = idx.where(klass(cond))\n expected = idx\n tm.assert_index_equal(result, expected)\n\n cond = [False] + [True] * len(idx[1:])\n expected = Index([idx._na_value] + idx[1:].tolist(), dtype=idx.dtype)\n result = idx.where(klass(cond))\n tm.assert_index_equal(result, expected)\n\n def test_insert_base(self, index):\n trimmed = index[1:4]\n\n if not len(index):\n pytest.skip("Not applicable for empty index")\n\n # test 0th element\n warn = None\n if index.dtype == object and index.inferred_type == "boolean":\n # GH#51363\n warn = FutureWarning\n msg = "The behavior of Index.insert with object-dtype is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n result = trimmed.insert(0, index[0])\n assert index[0:4].equals(result)\n\n def test_insert_out_of_bounds(self, index, using_infer_string):\n # TypeError/IndexError matches what np.insert raises in these cases\n\n if len(index) > 0:\n err = TypeError\n else:\n err = IndexError\n if len(index) == 0:\n # 0 vs 0.5 in error message varies with numpy version\n msg = "index (0|0.5) is out of bounds for axis 0 with size 0"\n else:\n msg = "slice indices must be integers or None or have an __index__ method"\n\n if using_infer_string and (\n index.dtype == "string" or index.dtype == "category" # noqa: PLR1714\n ):\n msg = "loc must be an integer between"\n\n with pytest.raises(err, match=msg):\n index.insert(0.5, "foo")\n\n msg = "|".join(\n [\n r"index -?\d+ is out of bounds for axis 0 with size \d+",\n "loc must be an integer between",\n ]\n )\n with pytest.raises(IndexError, match=msg):\n index.insert(len(index) + 1, 1)\n\n with pytest.raises(IndexError, match=msg):\n index.insert(-len(index) - 1, 1)\n\n def test_delete_base(self, index):\n if not len(index):\n pytest.skip("Not applicable for empty index")\n\n if isinstance(index, RangeIndex):\n # tested in class\n pytest.skip(f"{type(self).__name__} tested elsewhere")\n\n expected = index[1:]\n result = index.delete(0)\n assert result.equals(expected)\n assert result.name == expected.name\n\n expected = index[:-1]\n result = index.delete(-1)\n assert result.equals(expected)\n assert result.name == expected.name\n\n length = len(index)\n msg = f"index {length} is out of bounds for axis 0 with size {length}"\n with pytest.raises(IndexError, match=msg):\n index.delete(length)\n\n @pytest.mark.filterwarnings(r"ignore:Dtype inference:FutureWarning")\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_equals(self, index):\n if isinstance(index, IntervalIndex):\n pytest.skip(f"{type(index).__name__} tested elsewhere")\n\n is_ea_idx = type(index) is Index and not isinstance(index.dtype, np.dtype)\n\n assert index.equals(index)\n assert index.equals(index.copy())\n if not is_ea_idx:\n # doesn't hold for e.g. IntegerDtype\n assert index.equals(index.astype(object))\n\n assert not index.equals(list(index))\n assert not index.equals(np.array(index))\n\n # Cannot pass in non-int64 dtype to RangeIndex\n if not isinstance(index, RangeIndex) and not is_ea_idx:\n same_values = Index(index, dtype=object)\n assert index.equals(same_values)\n assert same_values.equals(index)\n\n if index.nlevels == 1:\n # do not test MultiIndex\n assert not index.equals(Series(index))\n\n def test_equals_op(self, simple_index):\n # GH9947, GH10637\n index_a = simple_index\n\n n = len(index_a)\n index_b = index_a[0:-1]\n index_c = index_a[0:-1].append(index_a[-2:-1])\n index_d = index_a[0:1]\n\n msg = "Lengths must match|could not be broadcast"\n with pytest.raises(ValueError, match=msg):\n index_a == index_b\n expected1 = np.array([True] * n)\n expected2 = np.array([True] * (n - 1) + [False])\n tm.assert_numpy_array_equal(index_a == index_a, expected1)\n tm.assert_numpy_array_equal(index_a == index_c, expected2)\n\n # test comparisons with numpy arrays\n array_a = np.array(index_a)\n array_b = np.array(index_a[0:-1])\n array_c = np.array(index_a[0:-1].append(index_a[-2:-1]))\n array_d = np.array(index_a[0:1])\n with pytest.raises(ValueError, match=msg):\n index_a == array_b\n tm.assert_numpy_array_equal(index_a == array_a, expected1)\n tm.assert_numpy_array_equal(index_a == array_c, expected2)\n\n # test comparisons with Series\n series_a = Series(array_a)\n series_b = Series(array_b)\n series_c = Series(array_c)\n series_d = Series(array_d)\n with pytest.raises(ValueError, match=msg):\n index_a == series_b\n\n tm.assert_numpy_array_equal(index_a == series_a, expected1)\n tm.assert_numpy_array_equal(index_a == series_c, expected2)\n\n # cases where length is 1 for one of them\n with pytest.raises(ValueError, match="Lengths must match"):\n index_a == index_d\n with pytest.raises(ValueError, match="Lengths must match"):\n index_a == series_d\n with pytest.raises(ValueError, match="Lengths must match"):\n index_a == array_d\n msg = "Can only compare identically-labeled Series objects"\n with pytest.raises(ValueError, match=msg):\n series_a == series_d\n with pytest.raises(ValueError, match="Lengths must match"):\n series_a == array_d\n\n # comparing with a scalar should broadcast; note that we are excluding\n # MultiIndex because in this case each item in the index is a tuple of\n # length 2, and therefore is considered an array of length 2 in the\n # comparison instead of a scalar\n if not isinstance(index_a, MultiIndex):\n expected3 = np.array([False] * (len(index_a) - 2) + [True, False])\n # assuming the 2nd to last item is unique in the data\n item = index_a[-2]\n tm.assert_numpy_array_equal(index_a == item, expected3)\n tm.assert_series_equal(series_a == item, Series(expected3))\n\n def test_format(self, simple_index):\n # GH35439\n if is_numeric_dtype(simple_index.dtype) or isinstance(\n simple_index, DatetimeIndex\n ):\n pytest.skip("Tested elsewhere.")\n idx = simple_index\n expected = [str(x) for x in idx]\n msg = r"Index\.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert idx.format() == expected\n\n def test_format_empty(self, simple_index):\n # GH35712\n if isinstance(simple_index, (PeriodIndex, RangeIndex)):\n pytest.skip("Tested elsewhere")\n empty_idx = type(simple_index)([])\n msg = r"Index\.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert empty_idx.format() == []\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert empty_idx.format(name=True) == [""]\n\n def test_fillna(self, index):\n # GH 11343\n if len(index) == 0:\n pytest.skip("Not relevant for empty index")\n elif index.dtype == bool:\n pytest.skip(f"{index.dtype} cannot hold NAs")\n elif isinstance(index, Index) and is_integer_dtype(index.dtype):\n pytest.skip(f"Not relevant for Index with {index.dtype}")\n elif isinstance(index, MultiIndex):\n idx = index.copy(deep=True)\n msg = "isna is not defined for MultiIndex"\n with pytest.raises(NotImplementedError, match=msg):\n idx.fillna(idx[0])\n else:\n idx = index.copy(deep=True)\n result = idx.fillna(idx[0])\n tm.assert_index_equal(result, idx)\n assert result is not idx\n\n msg = "'value' must be a scalar, passed: "\n with pytest.raises(TypeError, match=msg):\n idx.fillna([idx[0]])\n\n idx = index.copy(deep=True)\n values = idx._values\n\n values[1] = np.nan\n\n idx = type(index)(values)\n\n msg = "does not support 'downcast'"\n msg2 = r"The 'downcast' keyword in .*Index\.fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n with pytest.raises(NotImplementedError, match=msg):\n # For now at least, we only raise if there are NAs present\n idx.fillna(idx[0], downcast="infer")\n\n expected = np.array([False] * len(idx), dtype=bool)\n expected[1] = True\n tm.assert_numpy_array_equal(idx._isnan, expected)\n assert idx.hasnans is True\n\n def test_nulls(self, index):\n # this is really a smoke test for the methods\n # as these are adequately tested for function elsewhere\n if len(index) == 0:\n tm.assert_numpy_array_equal(index.isna(), np.array([], dtype=bool))\n elif isinstance(index, MultiIndex):\n idx = index.copy()\n msg = "isna is not defined for MultiIndex"\n with pytest.raises(NotImplementedError, match=msg):\n idx.isna()\n elif not index.hasnans:\n tm.assert_numpy_array_equal(index.isna(), np.zeros(len(index), dtype=bool))\n tm.assert_numpy_array_equal(index.notna(), np.ones(len(index), dtype=bool))\n else:\n result = isna(index)\n tm.assert_numpy_array_equal(index.isna(), result)\n tm.assert_numpy_array_equal(index.notna(), ~result)\n\n def test_empty(self, simple_index):\n # GH 15270\n idx = simple_index\n assert not idx.empty\n assert idx[:0].empty\n\n def test_join_self_unique(self, join_type, simple_index):\n idx = simple_index\n if idx.is_unique:\n joined = idx.join(idx, how=join_type)\n expected = simple_index\n if join_type == "outer":\n expected = algos.safe_sort(expected)\n tm.assert_index_equal(joined, expected)\n\n def test_map(self, simple_index):\n # callable\n if isinstance(simple_index, (TimedeltaIndex, PeriodIndex)):\n pytest.skip("Tested elsewhere.")\n idx = simple_index\n\n result = idx.map(lambda x: x)\n # RangeIndex are equivalent to the similar Index with int64 dtype\n tm.assert_index_equal(result, idx, exact="equiv")\n\n @pytest.mark.parametrize(\n "mapper",\n [\n lambda values, index: {i: e for e, i in zip(values, index)},\n lambda values, index: Series(values, index),\n ],\n )\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_map_dictlike(self, mapper, simple_index, request):\n idx = simple_index\n if isinstance(idx, (DatetimeIndex, TimedeltaIndex, PeriodIndex)):\n pytest.skip("Tested elsewhere.")\n\n identity = mapper(idx.values, idx)\n\n result = idx.map(identity)\n # RangeIndex are equivalent to the similar Index with int64 dtype\n tm.assert_index_equal(result, idx, exact="equiv")\n\n # empty mappable\n dtype = None\n if idx.dtype.kind == "f":\n dtype = idx.dtype\n\n expected = Index([np.nan] * len(idx), dtype=dtype)\n result = idx.map(mapper(expected, idx))\n tm.assert_index_equal(result, expected)\n\n def test_map_str(self, simple_index):\n # GH 31202\n if isinstance(simple_index, CategoricalIndex):\n pytest.skip("See test_map.py")\n idx = simple_index\n result = idx.map(str)\n expected = Index([str(x) for x in idx])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("copy", [True, False])\n @pytest.mark.parametrize("name", [None, "foo"])\n @pytest.mark.parametrize("ordered", [True, False])\n def test_astype_category(self, copy, name, ordered, simple_index):\n # GH 18630\n idx = simple_index\n if name:\n idx = idx.rename(name)\n\n # standard categories\n dtype = CategoricalDtype(ordered=ordered)\n result = idx.astype(dtype, copy=copy)\n expected = CategoricalIndex(idx, name=name, ordered=ordered)\n tm.assert_index_equal(result, expected, exact=True)\n\n # non-standard categories\n dtype = CategoricalDtype(idx.unique().tolist()[:-1], ordered)\n result = idx.astype(dtype, copy=copy)\n expected = CategoricalIndex(idx, name=name, dtype=dtype)\n tm.assert_index_equal(result, expected, exact=True)\n\n if ordered is False:\n # dtype='category' defaults to ordered=False, so only test once\n result = idx.astype("category", copy=copy)\n expected = CategoricalIndex(idx, name=name)\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_is_unique(self, simple_index):\n # initialize a unique index\n index = simple_index.drop_duplicates()\n assert index.is_unique is True\n\n # empty index should be unique\n index_empty = index[:0]\n assert index_empty.is_unique is True\n\n # test basic dupes\n index_dup = index.insert(0, index[0])\n assert index_dup.is_unique is False\n\n # single NA should be unique\n index_na = index.insert(0, np.nan)\n assert index_na.is_unique is True\n\n # multiple NA should not be unique\n index_na_dup = index_na.insert(0, np.nan)\n assert index_na_dup.is_unique is False\n\n @pytest.mark.arm_slow\n def test_engine_reference_cycle(self, simple_index):\n # GH27585\n index = simple_index.copy()\n ref = weakref.ref(index)\n index._engine\n del index\n assert ref() is None\n\n def test_getitem_2d_deprecated(self, simple_index):\n # GH#30588, GH#31479\n if isinstance(simple_index, IntervalIndex):\n pytest.skip("Tested elsewhere")\n idx = simple_index\n msg = "Multi-dimensional indexing|too many|only"\n with pytest.raises((ValueError, IndexError), match=msg):\n idx[:, None]\n\n if not isinstance(idx, RangeIndex):\n # GH#44051 RangeIndex already raised pre-2.0 with a different message\n with pytest.raises((ValueError, IndexError), match=msg):\n idx[True]\n with pytest.raises((ValueError, IndexError), match=msg):\n idx[False]\n else:\n msg = "only integers, slices"\n with pytest.raises(IndexError, match=msg):\n idx[True]\n with pytest.raises(IndexError, match=msg):\n idx[False]\n\n def test_copy_shares_cache(self, simple_index):\n # GH32898, GH36840\n idx = simple_index\n idx.get_loc(idx[0]) # populates the _cache.\n copy = idx.copy()\n\n assert copy._cache is idx._cache\n\n def test_shallow_copy_shares_cache(self, simple_index):\n # GH32669, GH36840\n idx = simple_index\n idx.get_loc(idx[0]) # populates the _cache.\n shallow_copy = idx._view()\n\n assert shallow_copy._cache is idx._cache\n\n shallow_copy = idx._shallow_copy(idx._data)\n assert shallow_copy._cache is not idx._cache\n assert shallow_copy._cache == {}\n\n def test_index_groupby(self, simple_index):\n idx = simple_index[:5]\n to_groupby = np.array([1, 2, np.nan, 2, 1])\n tm.assert_dict_equal(\n idx.groupby(to_groupby), {1.0: idx[[0, 4]], 2.0: idx[[1, 3]]}\n )\n\n to_groupby = DatetimeIndex(\n [\n datetime(2011, 11, 1),\n datetime(2011, 12, 1),\n pd.NaT,\n datetime(2011, 12, 1),\n datetime(2011, 11, 1),\n ],\n tz="UTC",\n ).values\n\n ex_keys = [Timestamp("2011-11-01"), Timestamp("2011-12-01")]\n expected = {ex_keys[0]: idx[[0, 4]], ex_keys[1]: idx[[1, 3]]}\n tm.assert_dict_equal(idx.groupby(to_groupby), expected)\n\n def test_append_preserves_dtype(self, simple_index):\n # In particular Index with dtype float32\n index = simple_index\n N = len(index)\n\n result = index.append(index)\n assert result.dtype == index.dtype\n tm.assert_index_equal(result[:N], index, check_exact=True)\n tm.assert_index_equal(result[N:], index, check_exact=True)\n\n alt = index.take(list(range(N)) * 2)\n tm.assert_index_equal(result, alt, check_exact=True)\n\n def test_inv(self, simple_index, using_infer_string):\n idx = simple_index\n\n if idx.dtype.kind in ["i", "u"]:\n res = ~idx\n expected = Index(~idx.values, name=idx.name)\n tm.assert_index_equal(res, expected)\n\n # check that we are matching Series behavior\n res2 = ~Series(idx)\n tm.assert_series_equal(res2, Series(expected))\n else:\n if idx.dtype.kind == "f":\n msg = "ufunc 'invert' not supported for the input types"\n else:\n msg = "bad operand|__invert__ is not supported for string dtype"\n with pytest.raises(TypeError, match=msg):\n ~idx\n\n # check that we get the same behavior with Series\n with pytest.raises(TypeError, match=msg):\n ~Series(idx)\n\n def test_is_boolean_is_deprecated(self, simple_index):\n # GH50042\n idx = simple_index\n with tm.assert_produces_warning(FutureWarning):\n idx.is_boolean()\n\n def test_is_floating_is_deprecated(self, simple_index):\n # GH50042\n idx = simple_index\n with tm.assert_produces_warning(FutureWarning):\n idx.is_floating()\n\n def test_is_integer_is_deprecated(self, simple_index):\n # GH50042\n idx = simple_index\n with tm.assert_produces_warning(FutureWarning):\n idx.is_integer()\n\n def test_holds_integer_deprecated(self, simple_index):\n # GH50243\n idx = simple_index\n msg = f"{type(idx).__name__}.holds_integer is deprecated. "\n with tm.assert_produces_warning(FutureWarning, match=msg):\n idx.holds_integer()\n\n def test_is_numeric_is_deprecated(self, simple_index):\n # GH50042\n idx = simple_index\n with tm.assert_produces_warning(\n FutureWarning,\n match=f"{type(idx).__name__}.is_numeric is deprecated. ",\n ):\n idx.is_numeric()\n\n def test_is_categorical_is_deprecated(self, simple_index):\n # GH50042\n idx = simple_index\n with tm.assert_produces_warning(\n FutureWarning,\n match=r"Use pandas\.api\.types\.is_categorical_dtype instead",\n ):\n idx.is_categorical()\n\n def test_is_interval_is_deprecated(self, simple_index):\n # GH50042\n idx = simple_index\n with tm.assert_produces_warning(FutureWarning):\n idx.is_interval()\n\n def test_is_object_is_deprecated(self, simple_index):\n # GH50042\n idx = simple_index\n with tm.assert_produces_warning(FutureWarning):\n idx.is_object()\n\n\nclass TestNumericBase:\n @pytest.fixture(\n params=[\n RangeIndex(start=0, stop=20, step=2),\n Index(np.arange(5, dtype=np.float64)),\n Index(np.arange(5, dtype=np.float32)),\n Index(np.arange(5, dtype=np.uint64)),\n Index(range(0, 20, 2), dtype=np.int64),\n Index(range(0, 20, 2), dtype=np.int32),\n Index(range(0, 20, 2), dtype=np.int16),\n Index(range(0, 20, 2), dtype=np.int8),\n ]\n )\n def simple_index(self, request):\n return request.param\n\n def test_constructor_unwraps_index(self, simple_index):\n if isinstance(simple_index, RangeIndex):\n pytest.skip("Tested elsewhere.")\n index_cls = type(simple_index)\n dtype = simple_index.dtype\n\n idx = Index([1, 2], dtype=dtype)\n result = index_cls(idx)\n expected = np.array([1, 2], dtype=idx.dtype)\n tm.assert_numpy_array_equal(result._data, expected)\n\n def test_can_hold_identifiers(self, simple_index):\n idx = simple_index\n key = idx[0]\n assert idx._can_hold_identifiers_and_holds_name(key) is False\n\n def test_view(self, simple_index):\n if isinstance(simple_index, RangeIndex):\n pytest.skip("Tested elsewhere.")\n index_cls = type(simple_index)\n dtype = simple_index.dtype\n\n idx = index_cls([], dtype=dtype, name="Foo")\n idx_view = idx.view()\n assert idx_view.name == "Foo"\n\n idx_view = idx.view(dtype)\n tm.assert_index_equal(idx, index_cls(idx_view, name="Foo"), exact=True)\n\n msg = "Passing a type in .*Index.view is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n idx_view = idx.view(index_cls)\n tm.assert_index_equal(idx, index_cls(idx_view, name="Foo"), exact=True)\n\n def test_format(self, simple_index):\n # GH35439\n if isinstance(simple_index, DatetimeIndex):\n pytest.skip("Tested elsewhere")\n idx = simple_index\n max_width = max(len(str(x)) for x in idx)\n expected = [str(x).ljust(max_width) for x in idx]\n msg = r"Index\.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert idx.format() == expected\n\n def test_insert_non_na(self, simple_index):\n # GH#43921 inserting an element that we know we can hold should\n # not change dtype or type (except for RangeIndex)\n index = simple_index\n\n result = index.insert(0, index[0])\n\n expected = Index([index[0]] + list(index), dtype=index.dtype)\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_insert_na(self, nulls_fixture, simple_index):\n # GH 18295 (test missing)\n index = simple_index\n na_val = nulls_fixture\n\n if na_val is pd.NaT:\n expected = Index([index[0], pd.NaT] + list(index[1:]), dtype=object)\n else:\n expected = Index([index[0], np.nan] + list(index[1:]))\n # GH#43921 we preserve float dtype\n if index.dtype.kind == "f":\n expected = Index(expected, dtype=index.dtype)\n\n result = index.insert(1, na_val)\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_arithmetic_explicit_conversions(self, simple_index):\n # GH 8608\n # add/sub are overridden explicitly for Float/Int Index\n index_cls = type(simple_index)\n if index_cls is RangeIndex:\n idx = RangeIndex(5)\n else:\n idx = index_cls(np.arange(5, dtype="int64"))\n\n # float conversions\n arr = np.arange(5, dtype="int64") * 3.2\n expected = Index(arr, dtype=np.float64)\n fidx = idx * 3.2\n tm.assert_index_equal(fidx, expected)\n fidx = 3.2 * idx\n tm.assert_index_equal(fidx, expected)\n\n # interops with numpy arrays\n expected = Index(arr, dtype=np.float64)\n a = np.zeros(5, dtype="float64")\n result = fidx - a\n tm.assert_index_equal(result, expected)\n\n expected = Index(-arr, dtype=np.float64)\n a = np.zeros(5, dtype="float64")\n result = a - fidx\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("complex_dtype", [np.complex64, np.complex128])\n def test_astype_to_complex(self, complex_dtype, simple_index):\n result = simple_index.astype(complex_dtype)\n\n assert type(result) is Index and result.dtype == complex_dtype\n\n def test_cast_string(self, simple_index):\n if isinstance(simple_index, RangeIndex):\n pytest.skip("casting of strings not relevant for RangeIndex")\n result = type(simple_index)(["0", "1", "2"], dtype=simple_index.dtype)\n expected = type(simple_index)([0, 1, 2], dtype=simple_index.dtype)\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\test_old_base.py | test_old_base.py | Python | 39,976 | 0.95 | 0.143261 | 0.098684 | react-lib | 544 | 2023-12-28T01:40:29.329399 | MIT | true | b3815238b6739ade23b039802f2e3044 |
"""\nThe tests in this package are to ensure the proper resultant dtypes of\nset operations.\n"""\nfrom datetime import datetime\nimport operator\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import lib\n\nfrom pandas.core.dtypes.cast import find_common_type\n\nfrom pandas import (\n CategoricalDtype,\n CategoricalIndex,\n DatetimeTZDtype,\n Index,\n MultiIndex,\n PeriodDtype,\n RangeIndex,\n Series,\n Timestamp,\n)\nimport pandas._testing as tm\nfrom pandas.api.types import (\n is_signed_integer_dtype,\n pandas_dtype,\n)\n\n\ndef equal_contents(arr1, arr2) -> bool:\n """\n Checks if the set of unique elements of arr1 and arr2 are equivalent.\n """\n return frozenset(arr1) == frozenset(arr2)\n\n\n@pytest.fixture(\n params=tm.ALL_REAL_NUMPY_DTYPES\n + [\n "object",\n "category",\n "datetime64[ns]",\n "timedelta64[ns]",\n ]\n)\ndef any_dtype_for_small_pos_integer_indexes(request):\n """\n Dtypes that can be given to an Index with small positive integers.\n\n This means that for any dtype `x` in the params list, `Index([1, 2, 3], dtype=x)` is\n valid and gives the correct Index (sub-)class.\n """\n return request.param\n\n\ndef test_union_same_types(index):\n # Union with a non-unique, non-monotonic index raises error\n # Only needed for bool index factory\n idx1 = index.sort_values()\n idx2 = index.sort_values()\n assert idx1.union(idx2).dtype == idx1.dtype\n\n\ndef test_union_different_types(index_flat, index_flat2, request):\n # This test only considers combinations of indices\n # GH 23525\n idx1 = index_flat\n idx2 = index_flat2\n\n if (\n not idx1.is_unique\n and not idx2.is_unique\n and idx1.dtype.kind == "i"\n and idx2.dtype.kind == "b"\n ) or (\n not idx2.is_unique\n and not idx1.is_unique\n and idx2.dtype.kind == "i"\n and idx1.dtype.kind == "b"\n ):\n # Each condition had idx[1|2].is_monotonic_decreasing\n # but failed when e.g.\n # idx1 = Index(\n # [True, True, True, True, True, True, True, True, False, False], dtype='bool'\n # )\n # idx2 = Index([0, 0, 1, 1, 2, 2], dtype='int64')\n mark = pytest.mark.xfail(\n reason="GH#44000 True==1", raises=ValueError, strict=False\n )\n request.applymarker(mark)\n\n common_dtype = find_common_type([idx1.dtype, idx2.dtype])\n\n warn = None\n msg = "'<' not supported between"\n if not len(idx1) or not len(idx2):\n pass\n elif (idx1.dtype.kind == "c" and (not lib.is_np_dtype(idx2.dtype, "iufc"))) or (\n idx2.dtype.kind == "c" and (not lib.is_np_dtype(idx1.dtype, "iufc"))\n ):\n # complex objects non-sortable\n warn = RuntimeWarning\n elif (\n isinstance(idx1.dtype, PeriodDtype) and isinstance(idx2.dtype, CategoricalDtype)\n ) or (\n isinstance(idx2.dtype, PeriodDtype) and isinstance(idx1.dtype, CategoricalDtype)\n ):\n warn = FutureWarning\n msg = r"PeriodDtype\[B\] is deprecated"\n mark = pytest.mark.xfail(\n reason="Warning not produced on all builds",\n raises=AssertionError,\n strict=False,\n )\n request.applymarker(mark)\n\n any_uint64 = np.uint64 in (idx1.dtype, idx2.dtype)\n idx1_signed = is_signed_integer_dtype(idx1.dtype)\n idx2_signed = is_signed_integer_dtype(idx2.dtype)\n\n # Union with a non-unique, non-monotonic index raises error\n # This applies to the boolean index\n idx1 = idx1.sort_values()\n idx2 = idx2.sort_values()\n\n with tm.assert_produces_warning(warn, match=msg):\n res1 = idx1.union(idx2)\n res2 = idx2.union(idx1)\n\n if any_uint64 and (idx1_signed or idx2_signed):\n assert res1.dtype == np.dtype("O")\n assert res2.dtype == np.dtype("O")\n else:\n assert res1.dtype == common_dtype\n assert res2.dtype == common_dtype\n\n\n@pytest.mark.parametrize(\n "idx1,idx2",\n [\n (Index(np.arange(5), dtype=np.int64), RangeIndex(5)),\n (Index(np.arange(5), dtype=np.float64), Index(np.arange(5), dtype=np.int64)),\n (Index(np.arange(5), dtype=np.float64), RangeIndex(5)),\n (Index(np.arange(5), dtype=np.float64), Index(np.arange(5), dtype=np.uint64)),\n ],\n)\ndef test_compatible_inconsistent_pairs(idx1, idx2):\n # GH 23525\n res1 = idx1.union(idx2)\n res2 = idx2.union(idx1)\n\n assert res1.dtype in (idx1.dtype, idx2.dtype)\n assert res2.dtype in (idx1.dtype, idx2.dtype)\n\n\n@pytest.mark.parametrize(\n "left, right, expected",\n [\n ("int64", "int64", "int64"),\n ("int64", "uint64", "object"),\n ("int64", "float64", "float64"),\n ("uint64", "float64", "float64"),\n ("uint64", "uint64", "uint64"),\n ("float64", "float64", "float64"),\n ("datetime64[ns]", "int64", "object"),\n ("datetime64[ns]", "uint64", "object"),\n ("datetime64[ns]", "float64", "object"),\n ("datetime64[ns, CET]", "int64", "object"),\n ("datetime64[ns, CET]", "uint64", "object"),\n ("datetime64[ns, CET]", "float64", "object"),\n ("Period[D]", "int64", "object"),\n ("Period[D]", "uint64", "object"),\n ("Period[D]", "float64", "object"),\n ],\n)\n@pytest.mark.parametrize("names", [("foo", "foo", "foo"), ("foo", "bar", None)])\ndef test_union_dtypes(left, right, expected, names):\n left = pandas_dtype(left)\n right = pandas_dtype(right)\n a = Index([], dtype=left, name=names[0])\n b = Index([], dtype=right, name=names[1])\n result = a.union(b)\n assert result.dtype == expected\n assert result.name == names[2]\n\n # Testing name retention\n # TODO: pin down desired dtype; do we want it to be commutative?\n result = a.intersection(b)\n assert result.name == names[2]\n\n\n@pytest.mark.parametrize("values", [[1, 2, 2, 3], [3, 3]])\ndef test_intersection_duplicates(values):\n # GH#31326\n a = Index(values)\n b = Index([3, 3])\n result = a.intersection(b)\n expected = Index([3])\n tm.assert_index_equal(result, expected)\n\n\nclass TestSetOps:\n # Set operation tests shared by all indexes in the `index` fixture\n @pytest.mark.parametrize("case", [0.5, "xxx"])\n @pytest.mark.parametrize(\n "method", ["intersection", "union", "difference", "symmetric_difference"]\n )\n def test_set_ops_error_cases(self, case, method, index):\n # non-iterable input\n msg = "Input must be Index or array-like"\n with pytest.raises(TypeError, match=msg):\n getattr(index, method)(case)\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_intersection_base(self, index):\n if isinstance(index, CategoricalIndex):\n pytest.skip(f"Not relevant for {type(index).__name__}")\n\n first = index[:5].unique()\n second = index[:3].unique()\n intersect = first.intersection(second)\n tm.assert_index_equal(intersect, second)\n\n if isinstance(index.dtype, DatetimeTZDtype):\n # The second.values below will drop tz, so the rest of this test\n # is not applicable.\n return\n\n # GH#10149\n cases = [second.to_numpy(), second.to_series(), second.to_list()]\n for case in cases:\n result = first.intersection(case)\n assert equal_contents(result, second)\n\n if isinstance(index, MultiIndex):\n msg = "other must be a MultiIndex or a list of tuples"\n with pytest.raises(TypeError, match=msg):\n first.intersection([1, 2, 3])\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_union_base(self, index):\n index = index.unique()\n first = index[3:]\n second = index[:5]\n everything = index\n\n union = first.union(second)\n tm.assert_index_equal(union.sort_values(), everything.sort_values())\n\n if isinstance(index.dtype, DatetimeTZDtype):\n # The second.values below will drop tz, so the rest of this test\n # is not applicable.\n return\n\n # GH#10149\n cases = [second.to_numpy(), second.to_series(), second.to_list()]\n for case in cases:\n result = first.union(case)\n assert equal_contents(result, everything)\n\n if isinstance(index, MultiIndex):\n msg = "other must be a MultiIndex or a list of tuples"\n with pytest.raises(TypeError, match=msg):\n first.union([1, 2, 3])\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_difference_base(self, sort, index):\n first = index[2:]\n second = index[:4]\n if index.inferred_type == "boolean":\n # i think (TODO: be sure) there assumptions baked in about\n # the index fixture that don't hold here?\n answer = set(first).difference(set(second))\n elif isinstance(index, CategoricalIndex):\n answer = []\n else:\n answer = index[4:]\n result = first.difference(second, sort)\n assert equal_contents(result, answer)\n\n # GH#10149\n cases = [second.to_numpy(), second.to_series(), second.to_list()]\n for case in cases:\n result = first.difference(case, sort)\n assert equal_contents(result, answer)\n\n if isinstance(index, MultiIndex):\n msg = "other must be a MultiIndex or a list of tuples"\n with pytest.raises(TypeError, match=msg):\n first.difference([1, 2, 3], sort)\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_symmetric_difference(self, index, using_infer_string, request):\n if (\n using_infer_string\n and index.dtype == "object"\n and index.inferred_type == "string"\n ):\n request.applymarker(pytest.mark.xfail(reason="TODO: infer_string"))\n if isinstance(index, CategoricalIndex):\n pytest.skip(f"Not relevant for {type(index).__name__}")\n if len(index) < 2:\n pytest.skip("Too few values for test")\n if index[0] in index[1:] or index[-1] in index[:-1]:\n # index fixture has e.g. an index of bools that does not satisfy this,\n # another with [0, 0, 1, 1, 2, 2]\n pytest.skip("Index values no not satisfy test condition.")\n\n first = index[1:]\n second = index[:-1]\n answer = index[[0, -1]]\n result = first.symmetric_difference(second)\n tm.assert_index_equal(result.sort_values(), answer.sort_values())\n\n # GH#10149\n cases = [second.to_numpy(), second.to_series(), second.to_list()]\n for case in cases:\n result = first.symmetric_difference(case)\n assert equal_contents(result, answer)\n\n if isinstance(index, MultiIndex):\n msg = "other must be a MultiIndex or a list of tuples"\n with pytest.raises(TypeError, match=msg):\n first.symmetric_difference([1, 2, 3])\n\n @pytest.mark.parametrize(\n "fname, sname, expected_name",\n [\n ("A", "A", "A"),\n ("A", "B", None),\n ("A", None, None),\n (None, "B", None),\n (None, None, None),\n ],\n )\n def test_corner_union(self, index_flat, fname, sname, expected_name):\n # GH#9943, GH#9862\n # Test unions with various name combinations\n # Do not test MultiIndex or repeats\n if not index_flat.is_unique:\n index = index_flat.unique()\n else:\n index = index_flat\n\n # Test copy.union(copy)\n first = index.copy().set_names(fname)\n second = index.copy().set_names(sname)\n union = first.union(second)\n expected = index.copy().set_names(expected_name)\n tm.assert_index_equal(union, expected)\n\n # Test copy.union(empty)\n first = index.copy().set_names(fname)\n second = index.drop(index).set_names(sname)\n union = first.union(second)\n expected = index.copy().set_names(expected_name)\n tm.assert_index_equal(union, expected)\n\n # Test empty.union(copy)\n first = index.drop(index).set_names(fname)\n second = index.copy().set_names(sname)\n union = first.union(second)\n expected = index.copy().set_names(expected_name)\n tm.assert_index_equal(union, expected)\n\n # Test empty.union(empty)\n first = index.drop(index).set_names(fname)\n second = index.drop(index).set_names(sname)\n union = first.union(second)\n expected = index.drop(index).set_names(expected_name)\n tm.assert_index_equal(union, expected)\n\n @pytest.mark.parametrize(\n "fname, sname, expected_name",\n [\n ("A", "A", "A"),\n ("A", "B", None),\n ("A", None, None),\n (None, "B", None),\n (None, None, None),\n ],\n )\n def test_union_unequal(self, index_flat, fname, sname, expected_name):\n if not index_flat.is_unique:\n index = index_flat.unique()\n else:\n index = index_flat\n\n # test copy.union(subset) - need sort for unicode and string\n first = index.copy().set_names(fname)\n second = index[1:].set_names(sname)\n union = first.union(second).sort_values()\n expected = index.set_names(expected_name).sort_values()\n tm.assert_index_equal(union, expected)\n\n @pytest.mark.parametrize(\n "fname, sname, expected_name",\n [\n ("A", "A", "A"),\n ("A", "B", None),\n ("A", None, None),\n (None, "B", None),\n (None, None, None),\n ],\n )\n def test_corner_intersect(self, index_flat, fname, sname, expected_name):\n # GH#35847\n # Test intersections with various name combinations\n if not index_flat.is_unique:\n index = index_flat.unique()\n else:\n index = index_flat\n\n # Test copy.intersection(copy)\n first = index.copy().set_names(fname)\n second = index.copy().set_names(sname)\n intersect = first.intersection(second)\n expected = index.copy().set_names(expected_name)\n tm.assert_index_equal(intersect, expected)\n\n # Test copy.intersection(empty)\n first = index.copy().set_names(fname)\n second = index.drop(index).set_names(sname)\n intersect = first.intersection(second)\n expected = index.drop(index).set_names(expected_name)\n tm.assert_index_equal(intersect, expected)\n\n # Test empty.intersection(copy)\n first = index.drop(index).set_names(fname)\n second = index.copy().set_names(sname)\n intersect = first.intersection(second)\n expected = index.drop(index).set_names(expected_name)\n tm.assert_index_equal(intersect, expected)\n\n # Test empty.intersection(empty)\n first = index.drop(index).set_names(fname)\n second = index.drop(index).set_names(sname)\n intersect = first.intersection(second)\n expected = index.drop(index).set_names(expected_name)\n tm.assert_index_equal(intersect, expected)\n\n @pytest.mark.parametrize(\n "fname, sname, expected_name",\n [\n ("A", "A", "A"),\n ("A", "B", None),\n ("A", None, None),\n (None, "B", None),\n (None, None, None),\n ],\n )\n def test_intersect_unequal(self, index_flat, fname, sname, expected_name):\n if not index_flat.is_unique:\n index = index_flat.unique()\n else:\n index = index_flat\n\n # test copy.intersection(subset) - need sort for unicode and string\n first = index.copy().set_names(fname)\n second = index[1:].set_names(sname)\n intersect = first.intersection(second).sort_values()\n expected = index[1:].set_names(expected_name).sort_values()\n tm.assert_index_equal(intersect, expected)\n\n @pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n def test_intersection_name_retention_with_nameless(self, index):\n if isinstance(index, MultiIndex):\n index = index.rename(list(range(index.nlevels)))\n else:\n index = index.rename("foo")\n\n other = np.asarray(index)\n\n result = index.intersection(other)\n assert result.name == index.name\n\n # empty other, same dtype\n result = index.intersection(other[:0])\n assert result.name == index.name\n\n # empty `self`\n result = index[:0].intersection(other)\n assert result.name == index.name\n\n def test_difference_preserves_type_empty(self, index, sort):\n # GH#20040\n # If taking difference of a set and itself, it\n # needs to preserve the type of the index\n if not index.is_unique:\n pytest.skip("Not relevant since index is not unique")\n result = index.difference(index, sort=sort)\n expected = index[:0]\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_difference_name_retention_equals(self, index, names):\n if isinstance(index, MultiIndex):\n names = [[x] * index.nlevels for x in names]\n index = index.rename(names[0])\n other = index.rename(names[1])\n\n assert index.equals(other)\n\n result = index.difference(other)\n expected = index[:0].rename(names[2])\n tm.assert_index_equal(result, expected)\n\n def test_intersection_difference_match_empty(self, index, sort):\n # GH#20040\n # Test that the intersection of an index with an\n # empty index produces the same index as the difference\n # of an index with itself. Test for all types\n if not index.is_unique:\n pytest.skip("Not relevant because index is not unique")\n inter = index.intersection(index[:0])\n diff = index.difference(index, sort=sort)\n tm.assert_index_equal(inter, diff, exact=True)\n\n\n@pytest.mark.filterwarnings("ignore:invalid value encountered in cast:RuntimeWarning")\n@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")\n@pytest.mark.parametrize(\n "method", ["intersection", "union", "difference", "symmetric_difference"]\n)\ndef test_setop_with_categorical(index_flat, sort, method):\n # MultiIndex tested separately in tests.indexes.multi.test_setops\n index = index_flat\n\n other = index.astype("category")\n exact = "equiv" if isinstance(index, RangeIndex) else True\n\n result = getattr(index, method)(other, sort=sort)\n expected = getattr(index, method)(index, sort=sort)\n tm.assert_index_equal(result, expected, exact=exact)\n\n result = getattr(index, method)(other[:5], sort=sort)\n expected = getattr(index, method)(index[:5], sort=sort)\n tm.assert_index_equal(result, expected, exact=exact)\n\n\ndef test_intersection_duplicates_all_indexes(index):\n # GH#38743\n if index.empty:\n # No duplicates in empty indexes\n pytest.skip("Not relevant for empty Index")\n\n idx = index\n idx_non_unique = idx[[0, 0, 1, 2]]\n\n assert idx.intersection(idx_non_unique).equals(idx_non_unique.intersection(idx))\n assert idx.intersection(idx_non_unique).is_unique\n\n\ndef test_union_duplicate_index_subsets_of_each_other(\n any_dtype_for_small_pos_integer_indexes,\n):\n # GH#31326\n dtype = any_dtype_for_small_pos_integer_indexes\n a = Index([1, 2, 2, 3], dtype=dtype)\n b = Index([3, 3, 4], dtype=dtype)\n\n expected = Index([1, 2, 2, 3, 3, 4], dtype=dtype)\n if isinstance(a, CategoricalIndex):\n expected = Index([1, 2, 2, 3, 3, 4])\n result = a.union(b)\n tm.assert_index_equal(result, expected)\n result = a.union(b, sort=False)\n tm.assert_index_equal(result, expected)\n\n\ndef test_union_with_duplicate_index_and_non_monotonic(\n any_dtype_for_small_pos_integer_indexes,\n):\n # GH#36289\n dtype = any_dtype_for_small_pos_integer_indexes\n a = Index([1, 0, 0], dtype=dtype)\n b = Index([0, 1], dtype=dtype)\n expected = Index([0, 0, 1], dtype=dtype)\n\n result = a.union(b)\n tm.assert_index_equal(result, expected)\n\n result = b.union(a)\n tm.assert_index_equal(result, expected)\n\n\ndef test_union_duplicate_index_different_dtypes():\n # GH#36289\n a = Index([1, 2, 2, 3])\n b = Index(["1", "0", "0"])\n expected = Index([1, 2, 2, 3, "1", "0", "0"])\n result = a.union(b, sort=False)\n tm.assert_index_equal(result, expected)\n\n\ndef test_union_same_value_duplicated_in_both():\n # GH#36289\n a = Index([0, 0, 1])\n b = Index([0, 0, 1, 2])\n result = a.union(b)\n expected = Index([0, 0, 1, 2])\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize("dup", [1, np.nan])\ndef test_union_nan_in_both(dup):\n # GH#36289\n a = Index([np.nan, 1, 2, 2])\n b = Index([np.nan, dup, 1, 2])\n result = a.union(b, sort=False)\n expected = Index([np.nan, dup, 1.0, 2.0, 2.0])\n tm.assert_index_equal(result, expected)\n\n\ndef test_union_rangeindex_sort_true():\n # GH 53490\n idx1 = RangeIndex(1, 100, 6)\n idx2 = RangeIndex(1, 50, 3)\n result = idx1.union(idx2, sort=True)\n expected = Index(\n [\n 1,\n 4,\n 7,\n 10,\n 13,\n 16,\n 19,\n 22,\n 25,\n 28,\n 31,\n 34,\n 37,\n 40,\n 43,\n 46,\n 49,\n 55,\n 61,\n 67,\n 73,\n 79,\n 85,\n 91,\n 97,\n ]\n )\n tm.assert_index_equal(result, expected)\n\n\ndef test_union_with_duplicate_index_not_subset_and_non_monotonic(\n any_dtype_for_small_pos_integer_indexes,\n):\n # GH#36289\n dtype = any_dtype_for_small_pos_integer_indexes\n a = Index([1, 0, 2], dtype=dtype)\n b = Index([0, 0, 1], dtype=dtype)\n expected = Index([0, 0, 1, 2], dtype=dtype)\n if isinstance(a, CategoricalIndex):\n expected = Index([0, 0, 1, 2])\n\n result = a.union(b)\n tm.assert_index_equal(result, expected)\n\n result = b.union(a)\n tm.assert_index_equal(result, expected)\n\n\ndef test_union_int_categorical_with_nan():\n ci = CategoricalIndex([1, 2, np.nan])\n assert ci.categories.dtype.kind == "i"\n\n idx = Index([1, 2])\n\n result = idx.union(ci)\n expected = Index([1, 2, np.nan], dtype=np.float64)\n tm.assert_index_equal(result, expected)\n\n result = ci.union(idx)\n tm.assert_index_equal(result, expected)\n\n\nclass TestSetOpsUnsorted:\n # These may eventually belong in a dtype-specific test_setops, or\n # parametrized over a more general fixture\n def test_intersect_str_dates(self):\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\n\n index1 = Index(dt_dates, dtype=object)\n index2 = Index(["aa"], dtype=object)\n result = index2.intersection(index1)\n\n expected = Index([], dtype=object)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n def test_intersection(self, index, sort):\n first = index[:20]\n second = index[:10]\n intersect = first.intersection(second, sort=sort)\n if sort in (None, False):\n tm.assert_index_equal(intersect.sort_values(), second.sort_values())\n else:\n tm.assert_index_equal(intersect, second)\n\n # Corner cases\n inter = first.intersection(first, sort=sort)\n assert inter is first\n\n @pytest.mark.parametrize(\n "index2,keeps_name",\n [\n (Index([3, 4, 5, 6, 7], name="index"), True), # preserve same name\n (Index([3, 4, 5, 6, 7], name="other"), False), # drop diff names\n (Index([3, 4, 5, 6, 7]), False),\n ],\n )\n def test_intersection_name_preservation(self, index2, keeps_name, sort):\n index1 = Index([1, 2, 3, 4, 5], name="index")\n expected = Index([3, 4, 5])\n result = index1.intersection(index2, sort)\n\n if keeps_name:\n expected.name = "index"\n\n assert result.name == expected.name\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n @pytest.mark.parametrize(\n "first_name,second_name,expected_name",\n [("A", "A", "A"), ("A", "B", None), (None, "B", None)],\n )\n def test_intersection_name_preservation2(\n self, index, first_name, second_name, expected_name, sort\n ):\n first = index[5:20]\n second = index[:10]\n first.name = first_name\n second.name = second_name\n intersect = first.intersection(second, sort=sort)\n assert intersect.name == expected_name\n\n def test_chained_union(self, sort):\n # Chained unions handles names correctly\n i1 = Index([1, 2], name="i1")\n i2 = Index([5, 6], name="i2")\n i3 = Index([3, 4], name="i3")\n union = i1.union(i2.union(i3, sort=sort), sort=sort)\n expected = i1.union(i2, sort=sort).union(i3, sort=sort)\n tm.assert_index_equal(union, expected)\n\n j1 = Index([1, 2], name="j1")\n j2 = Index([], name="j2")\n j3 = Index([], name="j3")\n union = j1.union(j2.union(j3, sort=sort), sort=sort)\n expected = j1.union(j2, sort=sort).union(j3, sort=sort)\n tm.assert_index_equal(union, expected)\n\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n def test_union(self, index, sort):\n first = index[5:20]\n second = index[:10]\n everything = index[:20]\n\n union = first.union(second, sort=sort)\n if sort in (None, False):\n tm.assert_index_equal(union.sort_values(), everything.sort_values())\n else:\n tm.assert_index_equal(union, everything)\n\n @pytest.mark.parametrize("klass", [np.array, Series, list])\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n def test_union_from_iterables(self, index, klass, sort):\n # GH#10149\n first = index[5:20]\n second = index[:10]\n everything = index[:20]\n\n case = klass(second.values)\n result = first.union(case, sort=sort)\n if sort in (None, False):\n tm.assert_index_equal(result.sort_values(), everything.sort_values())\n else:\n tm.assert_index_equal(result, everything)\n\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n def test_union_identity(self, index, sort):\n first = index[5:20]\n\n union = first.union(first, sort=sort)\n # i.e. identity is not preserved when sort is True\n assert (union is first) is (not sort)\n\n # This should no longer be the same object, since [] is not consistent,\n # both objects will be recast to dtype('O')\n union = first.union(Index([], dtype=first.dtype), sort=sort)\n assert (union is first) is (not sort)\n\n union = Index([], dtype=first.dtype).union(first, sort=sort)\n assert (union is first) is (not sort)\n\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n @pytest.mark.parametrize("second_name,expected", [(None, None), ("name", "name")])\n def test_difference_name_preservation(self, index, second_name, expected, sort):\n first = index[5:20]\n second = index[:10]\n answer = index[10:20]\n\n first.name = "name"\n second.name = second_name\n result = first.difference(second, sort=sort)\n\n if sort is True:\n tm.assert_index_equal(result, answer)\n else:\n answer.name = second_name\n tm.assert_index_equal(result.sort_values(), answer.sort_values())\n\n if expected is None:\n assert result.name is None\n else:\n assert result.name == expected\n\n def test_difference_empty_arg(self, index, sort):\n first = index.copy()\n first = first[5:20]\n first.name = "name"\n result = first.difference([], sort)\n expected = index[5:20].unique()\n expected.name = "name"\n tm.assert_index_equal(result, expected)\n\n def test_difference_should_not_compare(self):\n # GH 55113\n left = Index([1, 1])\n right = Index([True])\n result = left.difference(right)\n expected = Index([1])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n def test_difference_identity(self, index, sort):\n first = index[5:20]\n first.name = "name"\n result = first.difference(first, sort)\n\n assert len(result) == 0\n assert result.name == first.name\n\n @pytest.mark.parametrize("index", ["string"], indirect=True)\n def test_difference_sort(self, index, sort):\n first = index[5:20]\n second = index[:10]\n\n result = first.difference(second, sort)\n expected = index[10:20]\n\n if sort is None:\n expected = expected.sort_values()\n\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("opname", ["difference", "symmetric_difference"])\n def test_difference_incomparable(self, opname):\n a = Index([3, Timestamp("2000"), 1])\n b = Index([2, Timestamp("1999"), 1])\n op = operator.methodcaller(opname, b)\n\n with tm.assert_produces_warning(RuntimeWarning):\n # sort=None, the default\n result = op(a)\n expected = Index([3, Timestamp("2000"), 2, Timestamp("1999")])\n if opname == "difference":\n expected = expected[:2]\n tm.assert_index_equal(result, expected)\n\n # sort=False\n op = operator.methodcaller(opname, b, sort=False)\n result = op(a)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("opname", ["difference", "symmetric_difference"])\n def test_difference_incomparable_true(self, opname):\n a = Index([3, Timestamp("2000"), 1])\n b = Index([2, Timestamp("1999"), 1])\n op = operator.methodcaller(opname, b, sort=True)\n\n msg = "'<' not supported between instances of 'Timestamp' and 'int'"\n with pytest.raises(TypeError, match=msg):\n op(a)\n\n def test_symmetric_difference_mi(self, sort):\n index1 = MultiIndex.from_tuples(zip(["foo", "bar", "baz"], [1, 2, 3]))\n index2 = MultiIndex.from_tuples([("foo", 1), ("bar", 3)])\n result = index1.symmetric_difference(index2, sort=sort)\n expected = MultiIndex.from_tuples([("bar", 2), ("baz", 3), ("bar", 3)])\n if sort is None:\n expected = expected.sort_values()\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "index2,expected",\n [\n (Index([0, 1, np.nan]), Index([2.0, 3.0, 0.0])),\n (Index([0, 1]), Index([np.nan, 2.0, 3.0, 0.0])),\n ],\n )\n def test_symmetric_difference_missing(self, index2, expected, sort):\n # GH#13514 change: {nan} - {nan} == {}\n # (GH#6444, sorting of nans, is no longer an issue)\n index1 = Index([1, np.nan, 2, 3])\n\n result = index1.symmetric_difference(index2, sort=sort)\n if sort is None:\n expected = expected.sort_values()\n tm.assert_index_equal(result, expected)\n\n def test_symmetric_difference_non_index(self, sort):\n index1 = Index([1, 2, 3, 4], name="index1")\n index2 = np.array([2, 3, 4, 5])\n expected = Index([1, 5], name="index1")\n result = index1.symmetric_difference(index2, sort=sort)\n if sort in (None, True):\n tm.assert_index_equal(result, expected)\n else:\n tm.assert_index_equal(result.sort_values(), expected)\n assert result.name == "index1"\n\n result = index1.symmetric_difference(index2, result_name="new_name", sort=sort)\n expected.name = "new_name"\n if sort in (None, True):\n tm.assert_index_equal(result, expected)\n else:\n tm.assert_index_equal(result.sort_values(), expected)\n assert result.name == "new_name"\n\n def test_union_ea_dtypes(self, any_numeric_ea_and_arrow_dtype):\n # GH#51365\n idx = Index([1, 2, 3], dtype=any_numeric_ea_and_arrow_dtype)\n idx2 = Index([3, 4, 5], dtype=any_numeric_ea_and_arrow_dtype)\n result = idx.union(idx2)\n expected = Index([1, 2, 3, 4, 5], dtype=any_numeric_ea_and_arrow_dtype)\n tm.assert_index_equal(result, expected)\n\n def test_union_string_array(self, any_string_dtype):\n idx1 = Index(["a"], dtype=any_string_dtype)\n idx2 = Index(["b"], dtype=any_string_dtype)\n result = idx1.union(idx2)\n expected = Index(["a", "b"], dtype=any_string_dtype)\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\test_setops.py | test_setops.py | Python | 32,813 | 0.95 | 0.112159 | 0.098015 | vue-tools | 581 | 2024-10-11T17:15:03.528460 | GPL-3.0 | true | dadfe60a6972109acde4abc4fa491a48 |
"""\nTests involving custom Index subclasses\n"""\nimport numpy as np\n\nfrom pandas import (\n DataFrame,\n Index,\n)\nimport pandas._testing as tm\n\n\nclass CustomIndex(Index):\n def __new__(cls, data, name=None):\n # assert that this index class cannot hold strings\n if any(isinstance(val, str) for val in data):\n raise TypeError("CustomIndex cannot hold strings")\n\n if name is None and hasattr(data, "name"):\n name = data.name\n data = np.array(data, dtype="O")\n\n return cls._simple_new(data, name)\n\n\ndef test_insert_fallback_to_base_index():\n # https://github.com/pandas-dev/pandas/issues/47071\n\n idx = CustomIndex([1, 2, 3])\n result = idx.insert(0, "string")\n expected = Index(["string", 1, 2, 3], dtype=object)\n tm.assert_index_equal(result, expected)\n\n df = DataFrame(\n np.random.default_rng(2).standard_normal((2, 3)),\n columns=idx,\n index=Index([1, 2], name="string"),\n )\n result = df.reset_index()\n tm.assert_index_equal(result.columns, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\test_subclass.py | test_subclass.py | Python | 1,058 | 0.95 | 0.175 | 0.064516 | vue-tools | 967 | 2024-10-16T10:09:35.339905 | GPL-3.0 | true | aae5f6e4b481cee1506ab2b7c05552a4 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Index,\n MultiIndex,\n Series,\n)\nimport pandas._testing as tm\n\n\nclass TestIndexConstructor:\n # Tests for the Index constructor, specifically for cases that do\n # not return a subclass\n\n @pytest.mark.parametrize("value", [1, np.int64(1)])\n def test_constructor_corner(self, value):\n # corner case\n msg = (\n r"Index\(\.\.\.\) must be called with a collection of some "\n f"kind, {value} was passed"\n )\n with pytest.raises(TypeError, match=msg):\n Index(value)\n\n @pytest.mark.parametrize("index_vals", [[("A", 1), "B"], ["B", ("A", 1)]])\n def test_construction_list_mixed_tuples(self, index_vals):\n # see gh-10697: if we are constructing from a mixed list of tuples,\n # make sure that we are independent of the sorting order.\n index = Index(index_vals)\n assert isinstance(index, Index)\n assert not isinstance(index, MultiIndex)\n\n def test_constructor_cast(self):\n msg = "could not convert string to float"\n with pytest.raises(ValueError, match=msg):\n Index(["a", "b", "c"], dtype=float)\n\n @pytest.mark.parametrize("tuple_list", [[()], [(), ()]])\n def test_construct_empty_tuples(self, tuple_list):\n # GH #45608\n result = Index(tuple_list)\n expected = MultiIndex.from_tuples(tuple_list)\n\n tm.assert_index_equal(result, expected)\n\n def test_index_string_inference(self):\n # GH#54430\n expected = Index(["a", "b"], dtype=pd.StringDtype(na_value=np.nan))\n with pd.option_context("future.infer_string", True):\n ser = Index(["a", "b"])\n tm.assert_index_equal(ser, expected)\n\n expected = Index(["a", 1], dtype="object")\n with pd.option_context("future.infer_string", True):\n ser = Index(["a", 1])\n tm.assert_index_equal(ser, expected)\n\n def test_inference_on_pandas_objects(self):\n # GH#56012\n idx = Index([pd.Timestamp("2019-12-31")], dtype=object)\n with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):\n result = Index(idx)\n assert result.dtype != np.object_\n\n ser = Series([pd.Timestamp("2019-12-31")], dtype=object)\n\n with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):\n result = Index(ser)\n assert result.dtype != np.object_\n\n def test_constructor_not_read_only(self):\n # GH#57130\n ser = Series([1, 2], dtype=object)\n with pd.option_context("mode.copy_on_write", True):\n idx = Index(ser)\n assert idx._values.flags.writeable\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\test_constructors.py | test_constructors.py | Python | 2,710 | 0.95 | 0.141026 | 0.140625 | node-utils | 558 | 2024-08-02T23:57:21.560276 | MIT | true | 8f97e920b40f1e5396860ee717f97394 |
import numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\nimport pandas._config.config as cf\n\nfrom pandas import Index\nimport pandas._testing as tm\n\n\nclass TestIndexRendering:\n def test_repr_is_valid_construction_code(self):\n # for the case of Index, where the repr is traditional rather than\n # stylized\n idx = Index(["a", "b"])\n res = eval(repr(idx))\n tm.assert_index_equal(res, idx)\n\n @pytest.mark.xfail(using_string_dtype(), reason="repr different")\n @pytest.mark.parametrize(\n "index,expected",\n [\n # ASCII\n # short\n (\n Index(["a", "bb", "ccc"]),\n """Index(['a', 'bb', 'ccc'], dtype='object')""",\n ),\n # multiple lines\n (\n Index(["a", "bb", "ccc"] * 10),\n "Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', "\n "'bb', 'ccc', 'a', 'bb', 'ccc',\n"\n " 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', "\n "'bb', 'ccc', 'a', 'bb', 'ccc',\n"\n " 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n"\n " dtype='object')",\n ),\n # truncated\n (\n Index(["a", "bb", "ccc"] * 100),\n "Index(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n"\n " ...\n"\n " 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n"\n " dtype='object', length=300)",\n ),\n # Non-ASCII\n # short\n (\n Index(["あ", "いい", "ううう"]),\n """Index(['あ', 'いい', 'ううう'], dtype='object')""",\n ),\n # multiple lines\n (\n Index(["あ", "いい", "ううう"] * 10),\n (\n "Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "\n "'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"\n " 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "\n "'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n"\n " 'あ', 'いい', 'ううう', 'あ', 'いい', "\n "'ううう'],\n"\n " dtype='object')"\n ),\n ),\n # truncated\n (\n Index(["あ", "いい", "ううう"] * 100),\n (\n "Index(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', "\n "'あ', 'いい', 'ううう', 'あ',\n"\n " ...\n"\n " 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', "\n "'ううう', 'あ', 'いい', 'ううう'],\n"\n " dtype='object', length=300)"\n ),\n ),\n ],\n )\n def test_string_index_repr(self, index, expected):\n result = repr(index)\n assert result == expected\n\n @pytest.mark.xfail(using_string_dtype(), reason="repr different")\n @pytest.mark.parametrize(\n "index,expected",\n [\n # short\n (\n Index(["あ", "いい", "ううう"]),\n ("Index(['あ', 'いい', 'ううう'], dtype='object')"),\n ),\n # multiple lines\n (\n Index(["あ", "いい", "ううう"] * 10),\n (\n "Index(['あ', 'いい', 'ううう', 'あ', 'いい', "\n "'ううう', 'あ', 'いい', 'ううう',\n"\n " 'あ', 'いい', 'ううう', 'あ', 'いい', "\n "'ううう', 'あ', 'いい', 'ううう',\n"\n " 'あ', 'いい', 'ううう', 'あ', 'いい', "\n "'ううう', 'あ', 'いい', 'ううう',\n"\n " 'あ', 'いい', 'ううう'],\n"\n " dtype='object')"\n ""\n ),\n ),\n # truncated\n (\n Index(["あ", "いい", "ううう"] * 100),\n (\n "Index(['あ', 'いい', 'ううう', 'あ', 'いい', "\n "'ううう', 'あ', 'いい', 'ううう',\n"\n " 'あ',\n"\n " ...\n"\n " 'ううう', 'あ', 'いい', 'ううう', 'あ', "\n "'いい', 'ううう', 'あ', 'いい',\n"\n " 'ううう'],\n"\n " dtype='object', length=300)"\n ),\n ),\n ],\n )\n def test_string_index_repr_with_unicode_option(self, index, expected):\n # Enable Unicode option -----------------------------------------\n with cf.option_context("display.unicode.east_asian_width", True):\n result = repr(index)\n assert result == expected\n\n def test_repr_summary(self):\n with cf.option_context("display.max_seq_items", 10):\n result = repr(Index(np.arange(1000)))\n assert len(result) < 200\n assert "..." in result\n\n def test_summary_bug(self):\n # GH#3869\n ind = Index(["{other}%s", "~:{range}:0"], name="A")\n result = ind._summary()\n # shouldn't be formatted accidentally.\n assert "~:{range}:0" in result\n assert "{other}%s" in result\n\n def test_index_repr_bool_nan(self):\n # GH32146\n arr = Index([True, False, np.nan], dtype=object)\n msg = "Index.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n exp1 = arr.format()\n out1 = ["True", "False", "NaN"]\n assert out1 == exp1\n\n exp2 = repr(arr)\n out2 = "Index([True, False, nan], dtype='object')"\n assert out2 == exp2\n\n def test_format_different_scalar_lengths(self):\n # GH#35439\n idx = Index(["aaaaaaaaa", "b"])\n expected = ["aaaaaaaaa", "b"]\n msg = r"Index\.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert idx.format() == expected\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\test_formats.py | test_formats.py | Python | 6,305 | 0.95 | 0.055215 | 0.118421 | node-utils | 976 | 2023-12-19T02:12:38.903078 | GPL-3.0 | true | 54d79e32db1f30543f94ebf1a24f2948 |
import numpy as np\nimport pytest\n\nfrom pandas._libs import index as libindex\n\nimport pandas as pd\nfrom pandas import (\n Index,\n NaT,\n)\nimport pandas._testing as tm\n\n\nclass TestGetSliceBounds:\n @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)])\n def test_get_slice_bounds_within(self, side, expected):\n index = Index(list("abcdef"))\n result = index.get_slice_bound("e", side=side)\n assert result == expected\n\n @pytest.mark.parametrize("side", ["left", "right"])\n @pytest.mark.parametrize(\n "data, bound, expected", [(list("abcdef"), "x", 6), (list("bcdefg"), "a", 0)]\n )\n def test_get_slice_bounds_outside(self, side, expected, data, bound):\n index = Index(data)\n result = index.get_slice_bound(bound, side=side)\n assert result == expected\n\n def test_get_slice_bounds_invalid_side(self):\n with pytest.raises(ValueError, match="Invalid value for side kwarg"):\n Index([]).get_slice_bound("a", side="middle")\n\n\nclass TestGetIndexerNonUnique:\n def test_get_indexer_non_unique_dtype_mismatch(self):\n # GH#25459\n indexes, missing = Index(["A", "B"]).get_indexer_non_unique(Index([0]))\n tm.assert_numpy_array_equal(np.array([-1], dtype=np.intp), indexes)\n tm.assert_numpy_array_equal(np.array([0], dtype=np.intp), missing)\n\n @pytest.mark.parametrize(\n "idx_values,idx_non_unique",\n [\n ([np.nan, 100, 200, 100], [np.nan, 100]),\n ([np.nan, 100.0, 200.0, 100.0], [np.nan, 100.0]),\n ],\n )\n def test_get_indexer_non_unique_int_index(self, idx_values, idx_non_unique):\n indexes, missing = Index(idx_values).get_indexer_non_unique(Index([np.nan]))\n tm.assert_numpy_array_equal(np.array([0], dtype=np.intp), indexes)\n tm.assert_numpy_array_equal(np.array([], dtype=np.intp), missing)\n\n indexes, missing = Index(idx_values).get_indexer_non_unique(\n Index(idx_non_unique)\n )\n tm.assert_numpy_array_equal(np.array([0, 1, 3], dtype=np.intp), indexes)\n tm.assert_numpy_array_equal(np.array([], dtype=np.intp), missing)\n\n\nclass TestGetLoc:\n @pytest.mark.slow # to_flat_index takes a while\n def test_get_loc_tuple_monotonic_above_size_cutoff(self, monkeypatch):\n # Go through the libindex path for which using\n # _bin_search vs ndarray.searchsorted makes a difference\n\n with monkeypatch.context():\n monkeypatch.setattr(libindex, "_SIZE_CUTOFF", 100)\n lev = list("ABCD")\n dti = pd.date_range("2016-01-01", periods=10)\n\n mi = pd.MultiIndex.from_product([lev, range(5), dti])\n oidx = mi.to_flat_index()\n\n loc = len(oidx) // 2\n tup = oidx[loc]\n\n res = oidx.get_loc(tup)\n assert res == loc\n\n def test_get_loc_nan_object_dtype_nonmonotonic_nonunique(self):\n # case that goes through _maybe_get_bool_indexer\n idx = Index(["foo", np.nan, None, "foo", 1.0, None], dtype=object)\n\n # we dont raise KeyError on nan\n res = idx.get_loc(np.nan)\n assert res == 1\n\n # we only match on None, not on np.nan\n res = idx.get_loc(None)\n expected = np.array([False, False, True, False, False, True])\n tm.assert_numpy_array_equal(res, expected)\n\n # we don't match at all on mismatched NA\n with pytest.raises(KeyError, match="NaT"):\n idx.get_loc(NaT)\n\n\ndef test_getitem_boolean_ea_indexer():\n # GH#45806\n ser = pd.Series([True, False, pd.NA], dtype="boolean")\n result = ser.index[ser]\n expected = Index([0])\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\test_indexing.py | test_indexing.py | Python | 3,687 | 0.95 | 0.134615 | 0.097561 | vue-tools | 808 | 2023-09-25T22:34:12.214767 | MIT | true | 81efa8cb6071ad18321f09d56806ff0e |
from pandas import Index\nimport pandas._testing as tm\n\n\ndef test_pickle_preserves_object_dtype():\n # GH#43188, GH#43155 don't infer numeric dtype\n index = Index([1, 2, 3], dtype=object)\n\n result = tm.round_trip_pickle(index)\n assert result.dtype == object\n tm.assert_index_equal(index, result)\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\test_pickle.py | test_pickle.py | Python | 309 | 0.95 | 0.090909 | 0.125 | vue-tools | 72 | 2024-09-22T15:16:37.130181 | BSD-3-Clause | true | 548ed61364cd33d3d1cb025964c72db3 |
"""\nTests for ndarray-like method on the base Index class\n"""\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import Index\nimport pandas._testing as tm\n\n\nclass TestReshape:\n def test_repeat(self):\n repeats = 2\n index = Index([1, 2, 3])\n expected = Index([1, 1, 2, 2, 3, 3])\n\n result = index.repeat(repeats)\n tm.assert_index_equal(result, expected)\n\n def test_insert(self):\n # GH 7256\n # validate neg/pos inserts\n result = Index(["b", "c", "d"])\n\n # test 0th element\n tm.assert_index_equal(Index(["a", "b", "c", "d"]), result.insert(0, "a"))\n\n # test Nth element that follows Python list behavior\n tm.assert_index_equal(Index(["b", "c", "e", "d"]), result.insert(-1, "e"))\n\n # test loc +/- neq (0, -1)\n tm.assert_index_equal(result.insert(1, "z"), result.insert(-2, "z"))\n\n # test empty\n null_index = Index([])\n tm.assert_index_equal(Index(["a"], dtype=object), null_index.insert(0, "a"))\n\n def test_insert_missing(self, request, nulls_fixture, using_infer_string):\n if using_infer_string and nulls_fixture is pd.NA:\n request.applymarker(pytest.mark.xfail(reason="TODO(infer_string)"))\n # GH#22295\n # test there is no mangling of NA values\n expected = Index(["a", nulls_fixture, "b", "c"], dtype=object)\n result = Index(list("abc"), dtype=object).insert(\n 1, Index([nulls_fixture], dtype=object)\n )\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "val", [(1, 2), np.datetime64("2019-12-31"), np.timedelta64(1, "D")]\n )\n @pytest.mark.parametrize("loc", [-1, 2])\n def test_insert_datetime_into_object(self, loc, val):\n # GH#44509\n idx = Index(["1", "2", "3"])\n result = idx.insert(loc, val)\n expected = Index(["1", "2", val, "3"])\n tm.assert_index_equal(result, expected)\n assert type(expected[2]) is type(val)\n\n def test_insert_none_into_string_numpy(self, string_dtype_no_object):\n # GH#55365\n index = Index(["a", "b", "c"], dtype=string_dtype_no_object)\n result = index.insert(-1, None)\n expected = Index(["a", "b", None, "c"], dtype=string_dtype_no_object)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "pos,expected",\n [\n (0, Index(["b", "c", "d"], name="index")),\n (-1, Index(["a", "b", "c"], name="index")),\n ],\n )\n def test_delete(self, pos, expected):\n index = Index(["a", "b", "c", "d"], name="index")\n result = index.delete(pos)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n\n def test_delete_raises(self):\n index = Index(["a", "b", "c", "d"], name="index")\n msg = "index 5 is out of bounds for axis 0 with size 4"\n with pytest.raises(IndexError, match=msg):\n index.delete(5)\n\n def test_append_multiple(self):\n index = Index(["a", "b", "c", "d", "e", "f"])\n\n foos = [index[:2], index[2:4], index[4:]]\n result = foos[0].append(foos[1:])\n tm.assert_index_equal(result, index)\n\n # empty\n result = index.append([])\n tm.assert_index_equal(result, index)\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\test_reshape.py | test_reshape.py | Python | 3,318 | 0.95 | 0.134021 | 0.1375 | react-lib | 707 | 2023-10-20T23:38:16.124104 | MIT | true | 4ea7a4deb80549c46ea7c3c5ef3814e7 |
from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n Index,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.core.algorithms import safe_sort\n\n\ndef equal_contents(arr1, arr2) -> bool:\n """\n Checks if the set of unique elements of arr1 and arr2 are equivalent.\n """\n return frozenset(arr1) == frozenset(arr2)\n\n\nclass TestIndexSetOps:\n @pytest.mark.parametrize(\n "method", ["union", "intersection", "difference", "symmetric_difference"]\n )\n def test_setops_sort_validation(self, method):\n idx1 = Index(["a", "b"])\n idx2 = Index(["b", "c"])\n\n with pytest.raises(ValueError, match="The 'sort' keyword only takes"):\n getattr(idx1, method)(idx2, sort=2)\n\n # sort=True is supported as of GH#??\n getattr(idx1, method)(idx2, sort=True)\n\n def test_setops_preserve_object_dtype(self):\n idx = Index([1, 2, 3], dtype=object)\n result = idx.intersection(idx[1:])\n expected = idx[1:]\n tm.assert_index_equal(result, expected)\n\n # if other is not monotonic increasing, intersection goes through\n # a different route\n result = idx.intersection(idx[1:][::-1])\n tm.assert_index_equal(result, expected)\n\n result = idx._union(idx[1:], sort=None)\n expected = idx\n tm.assert_numpy_array_equal(result, expected.values)\n\n result = idx.union(idx[1:], sort=None)\n tm.assert_index_equal(result, expected)\n\n # if other is not monotonic increasing, _union goes through\n # a different route\n result = idx._union(idx[1:][::-1], sort=None)\n tm.assert_numpy_array_equal(result, expected.values)\n\n result = idx.union(idx[1:][::-1], sort=None)\n tm.assert_index_equal(result, expected)\n\n def test_union_base(self):\n index = Index([0, "a", 1, "b", 2, "c"])\n first = index[3:]\n second = index[:5]\n\n result = first.union(second)\n\n expected = Index([0, 1, 2, "a", "b", "c"])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("klass", [np.array, Series, list])\n def test_union_different_type_base(self, klass):\n # GH 10149\n index = Index([0, "a", 1, "b", 2, "c"])\n first = index[3:]\n second = index[:5]\n\n result = first.union(klass(second.values))\n\n assert equal_contents(result, index)\n\n def test_union_sort_other_incomparable(self):\n # https://github.com/pandas-dev/pandas/issues/24959\n idx = Index([1, pd.Timestamp("2000")])\n # default (sort=None)\n with tm.assert_produces_warning(RuntimeWarning):\n result = idx.union(idx[:1])\n\n tm.assert_index_equal(result, idx)\n\n # sort=None\n with tm.assert_produces_warning(RuntimeWarning):\n result = idx.union(idx[:1], sort=None)\n tm.assert_index_equal(result, idx)\n\n # sort=False\n result = idx.union(idx[:1], sort=False)\n tm.assert_index_equal(result, idx)\n\n def test_union_sort_other_incomparable_true(self):\n idx = Index([1, pd.Timestamp("2000")])\n with pytest.raises(TypeError, match=".*"):\n idx.union(idx[:1], sort=True)\n\n def test_intersection_equal_sort_true(self):\n idx = Index(["c", "a", "b"])\n sorted_ = Index(["a", "b", "c"])\n tm.assert_index_equal(idx.intersection(idx, sort=True), sorted_)\n\n def test_intersection_base(self, sort):\n # (same results for py2 and py3 but sortedness not tested elsewhere)\n index = Index([0, "a", 1, "b", 2, "c"])\n first = index[:5]\n second = index[:3]\n\n expected = Index([0, 1, "a"]) if sort is None else Index([0, "a", 1])\n result = first.intersection(second, sort=sort)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("klass", [np.array, Series, list])\n def test_intersection_different_type_base(self, klass, sort):\n # GH 10149\n index = Index([0, "a", 1, "b", 2, "c"])\n first = index[:5]\n second = index[:3]\n\n result = first.intersection(klass(second.values), sort=sort)\n assert equal_contents(result, second)\n\n def test_intersection_nosort(self):\n result = Index(["c", "b", "a"]).intersection(["b", "a"])\n expected = Index(["b", "a"])\n tm.assert_index_equal(result, expected)\n\n def test_intersection_equal_sort(self):\n idx = Index(["c", "a", "b"])\n tm.assert_index_equal(idx.intersection(idx, sort=False), idx)\n tm.assert_index_equal(idx.intersection(idx, sort=None), idx)\n\n def test_intersection_str_dates(self, sort):\n dt_dates = [datetime(2012, 2, 9), datetime(2012, 2, 22)]\n\n i1 = Index(dt_dates, dtype=object)\n i2 = Index(["aa"], dtype=object)\n result = i2.intersection(i1, sort=sort)\n\n assert len(result) == 0\n\n @pytest.mark.parametrize(\n "index2,expected_arr",\n [(Index(["B", "D"]), ["B"]), (Index(["B", "D", "A"]), ["A", "B"])],\n )\n def test_intersection_non_monotonic_non_unique(self, index2, expected_arr, sort):\n # non-monotonic non-unique\n index1 = Index(["A", "B", "A", "C"])\n expected = Index(expected_arr)\n result = index1.intersection(index2, sort=sort)\n if sort is None:\n expected = expected.sort_values()\n tm.assert_index_equal(result, expected)\n\n def test_difference_base(self, sort):\n # (same results for py2 and py3 but sortedness not tested elsewhere)\n index = Index([0, "a", 1, "b", 2, "c"])\n first = index[:4]\n second = index[3:]\n\n result = first.difference(second, sort)\n expected = Index([0, "a", 1])\n if sort is None:\n expected = Index(safe_sort(expected))\n tm.assert_index_equal(result, expected)\n\n def test_symmetric_difference(self):\n # (same results for py2 and py3 but sortedness not tested elsewhere)\n index = Index([0, "a", 1, "b", 2, "c"])\n first = index[:4]\n second = index[3:]\n\n result = first.symmetric_difference(second)\n expected = Index([0, 1, 2, "a", "c"])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "method,expected,sort",\n [\n (\n "intersection",\n np.array(\n [(1, "A"), (2, "A"), (1, "B"), (2, "B")],\n dtype=[("num", int), ("let", "S1")],\n ),\n False,\n ),\n (\n "intersection",\n np.array(\n [(1, "A"), (1, "B"), (2, "A"), (2, "B")],\n dtype=[("num", int), ("let", "S1")],\n ),\n None,\n ),\n (\n "union",\n np.array(\n [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")],\n dtype=[("num", int), ("let", "S1")],\n ),\n None,\n ),\n ],\n )\n def test_tuple_union_bug(self, method, expected, sort):\n index1 = Index(\n np.array(\n [(1, "A"), (2, "A"), (1, "B"), (2, "B")],\n dtype=[("num", int), ("let", "S1")],\n )\n )\n index2 = Index(\n np.array(\n [(1, "A"), (2, "A"), (1, "B"), (2, "B"), (1, "C"), (2, "C")],\n dtype=[("num", int), ("let", "S1")],\n )\n )\n\n result = getattr(index1, method)(index2, sort=sort)\n assert result.ndim == 1\n\n expected = Index(expected)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("first_list", [["b", "a"], []])\n @pytest.mark.parametrize("second_list", [["a", "b"], []])\n @pytest.mark.parametrize(\n "first_name, second_name, expected_name",\n [("A", "B", None), (None, "B", None), ("A", None, None)],\n )\n def test_union_name_preservation(\n self, first_list, second_list, first_name, second_name, expected_name, sort\n ):\n expected_dtype = object if not first_list or not second_list else "str"\n first = Index(first_list, name=first_name)\n second = Index(second_list, name=second_name)\n union = first.union(second, sort=sort)\n\n vals = set(first_list).union(second_list)\n\n if sort is None and len(first_list) > 0 and len(second_list) > 0:\n expected = Index(sorted(vals), name=expected_name)\n tm.assert_index_equal(union, expected)\n else:\n expected = Index(vals, name=expected_name, dtype=expected_dtype)\n tm.assert_index_equal(union.sort_values(), expected.sort_values())\n\n @pytest.mark.parametrize(\n "diff_type, expected",\n [["difference", [1, "B"]], ["symmetric_difference", [1, 2, "B", "C"]]],\n )\n def test_difference_object_type(self, diff_type, expected):\n # GH 13432\n idx1 = Index([0, 1, "A", "B"])\n idx2 = Index([0, 2, "A", "C"])\n result = getattr(idx1, diff_type)(idx2)\n expected = Index(expected)\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\test_setops.py | test_setops.py | Python | 9,178 | 0.95 | 0.116105 | 0.072727 | awesome-app | 191 | 2024-11-07T06:08:02.024306 | GPL-3.0 | true | 146e52ec785e703b7f360ef34bbe16c7 |
import numpy as np\n\nfrom pandas import Index\nimport pandas._testing as tm\n\n\nclass TestWhere:\n def test_where_intlike_str_doesnt_cast_ints(self):\n idx = Index(range(3))\n mask = np.array([True, False, True])\n res = idx.where(mask, "2")\n expected = Index([0, "2", 2])\n tm.assert_index_equal(res, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\test_where.py | test_where.py | Python | 341 | 0.85 | 0.153846 | 0 | python-kit | 120 | 2023-11-14T18:29:55.361139 | MIT | true | 636e8c2f5cb7ca4bb3a61cb142a87dc1 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\__pycache__\test_constructors.cpython-313.pyc | test_constructors.cpython-313.pyc | Other | 5,378 | 0.8 | 0 | 0 | awesome-app | 562 | 2025-02-27T21:19:24.388498 | MIT | true | d096b23c45f94081035fb2e915275905 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\__pycache__\test_formats.cpython-313.pyc | test_formats.cpython-313.pyc | Other | 6,771 | 0.8 | 0 | 0 | vue-tools | 448 | 2024-03-18T11:52:11.552292 | GPL-3.0 | true | 138bd217261c6a2416b817b9ce6d931a |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\__pycache__\test_indexing.cpython-313.pyc | test_indexing.cpython-313.pyc | Other | 7,365 | 0.8 | 0.014706 | 0.015385 | react-lib | 98 | 2025-03-30T21:48:43.314667 | BSD-3-Clause | true | 772a5ec36a8deadc4878d007ce508aa0 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\__pycache__\test_pickle.cpython-313.pyc | test_pickle.cpython-313.pyc | Other | 743 | 0.7 | 0 | 0 | python-kit | 796 | 2024-05-24T01:10:01.515076 | BSD-3-Clause | true | 595770a4a36a6b8f8c1a14a5a3d741bc |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\__pycache__\test_reshape.cpython-313.pyc | test_reshape.cpython-313.pyc | Other | 5,857 | 0.8 | 0.057692 | 0 | awesome-app | 118 | 2023-09-12T06:35:00.193677 | BSD-3-Clause | true | 2147ebdf9dde5b6a568737342d3d1924 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\__pycache__\test_setops.cpython-313.pyc | test_setops.cpython-313.pyc | Other | 13,612 | 0.8 | 0.00813 | 0.008264 | node-utils | 143 | 2024-08-05T11:45:23.626835 | BSD-3-Clause | true | 658e8dd415504b706ddafe68db1191b7 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\__pycache__\test_where.cpython-313.pyc | test_where.cpython-313.pyc | Other | 1,100 | 0.7 | 0 | 0 | vue-tools | 7 | 2023-11-29T10:45:36.688928 | MIT | true | 1e2cd4fd5a0eccc278276ac34ecf1a50 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\base_class\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 206 | 0.7 | 0 | 0 | awesome-app | 499 | 2025-03-04T05:49:30.520281 | BSD-3-Clause | true | 989695db606949ed863795994d9cfa58 |
import pytest\n\nfrom pandas import (\n CategoricalIndex,\n Index,\n)\nimport pandas._testing as tm\n\n\nclass TestAppend:\n @pytest.fixture\n def ci(self):\n categories = list("cab")\n return CategoricalIndex(list("aabbca"), categories=categories, ordered=False)\n\n def test_append(self, ci):\n # append cats with the same categories\n result = ci[:3].append(ci[3:])\n tm.assert_index_equal(result, ci, exact=True)\n\n foos = [ci[:1], ci[1:3], ci[3:]]\n result = foos[0].append(foos[1:])\n tm.assert_index_equal(result, ci, exact=True)\n\n def test_append_empty(self, ci):\n # empty\n result = ci.append([])\n tm.assert_index_equal(result, ci, exact=True)\n\n def test_append_mismatched_categories(self, ci):\n # appending with different categories or reordered is not ok\n msg = "all inputs must be Index"\n with pytest.raises(TypeError, match=msg):\n ci.append(ci.values.set_categories(list("abcd")))\n with pytest.raises(TypeError, match=msg):\n ci.append(ci.values.reorder_categories(list("abc")))\n\n def test_append_category_objects(self, ci):\n # with objects\n result = ci.append(Index(["c", "a"]))\n expected = CategoricalIndex(list("aabbcaca"), categories=ci.categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_append_non_categories(self, ci):\n # invalid objects -> cast to object via concat_compat\n result = ci.append(Index(["a", "d"]))\n expected = Index(["a", "a", "b", "b", "c", "a", "a", "d"])\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_append_object(self, ci):\n # GH#14298 - if base object is not categorical -> coerce to object\n result = Index(["c", "a"]).append(ci)\n expected = Index(list("caaabbca"))\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_append_to_another(self):\n # hits Index._concat\n fst = Index(["a", "b"])\n snd = CategoricalIndex(["d", "e"])\n result = fst.append(snd)\n expected = Index(["a", "b", "d", "e"])\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_append.py | test_append.py | Python | 2,191 | 0.95 | 0.16129 | 0.137255 | react-lib | 18 | 2025-06-22T13:11:40.807888 | GPL-3.0 | true | 81e2312386832b6d8dbaa5b9e42d380c |
from datetime import date\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n CategoricalDtype,\n CategoricalIndex,\n Index,\n IntervalIndex,\n)\nimport pandas._testing as tm\n\n\nclass TestAstype:\n def test_astype(self):\n ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)\n\n result = ci.astype(object)\n tm.assert_index_equal(result, Index(np.array(ci), dtype=object))\n\n # this IS equal, but not the same class\n assert result.equals(ci)\n assert isinstance(result, Index)\n assert not isinstance(result, CategoricalIndex)\n\n # interval\n ii = IntervalIndex.from_arrays(left=[-0.001, 2.0], right=[2, 4], closed="right")\n\n ci = CategoricalIndex(\n Categorical.from_codes([0, 1, -1], categories=ii, ordered=True)\n )\n\n result = ci.astype("interval")\n expected = ii.take([0, 1, -1], allow_fill=True, fill_value=np.nan)\n tm.assert_index_equal(result, expected)\n\n result = IntervalIndex(result.values)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("name", [None, "foo"])\n @pytest.mark.parametrize("dtype_ordered", [True, False])\n @pytest.mark.parametrize("index_ordered", [True, False])\n def test_astype_category(self, name, dtype_ordered, index_ordered):\n # GH#18630\n index = CategoricalIndex(\n list("aabbca"), categories=list("cab"), ordered=index_ordered\n )\n if name:\n index = index.rename(name)\n\n # standard categories\n dtype = CategoricalDtype(ordered=dtype_ordered)\n result = index.astype(dtype)\n expected = CategoricalIndex(\n index.tolist(),\n name=name,\n categories=index.categories,\n ordered=dtype_ordered,\n )\n tm.assert_index_equal(result, expected)\n\n # non-standard categories\n dtype = CategoricalDtype(index.unique().tolist()[:-1], dtype_ordered)\n result = index.astype(dtype)\n expected = CategoricalIndex(index.tolist(), name=name, dtype=dtype)\n tm.assert_index_equal(result, expected)\n\n if dtype_ordered is False:\n # dtype='category' can't specify ordered, so only test once\n result = index.astype("category")\n expected = index\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("box", [True, False])\n def test_categorical_date_roundtrip(self, box):\n # astype to categorical and back should preserve date objects\n v = date.today()\n\n obj = Index([v, v])\n assert obj.dtype == object\n if box:\n obj = obj.array\n\n cat = obj.astype("category")\n\n rtrip = cat.astype(object)\n assert rtrip.dtype == object\n assert type(rtrip[0]) is date\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_astype.py | test_astype.py | Python | 2,860 | 0.95 | 0.088889 | 0.097222 | vue-tools | 720 | 2024-01-06T09:45:49.820341 | Apache-2.0 | true | a67fb9e328753954008c80d30e61c41d |
import numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas._libs import index as libindex\nfrom pandas._libs.arrays import NDArrayBacked\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n CategoricalDtype,\n)\nimport pandas._testing as tm\nfrom pandas.core.indexes.api import (\n CategoricalIndex,\n Index,\n)\n\n\nclass TestCategoricalIndex:\n @pytest.fixture\n def simple_index(self) -> CategoricalIndex:\n return CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)\n\n def test_can_hold_identifiers(self):\n idx = CategoricalIndex(list("aabbca"), categories=None, ordered=False)\n key = idx[0]\n assert idx._can_hold_identifiers_and_holds_name(key) is True\n\n def test_insert(self, simple_index):\n ci = simple_index\n categories = ci.categories\n\n # test 0th element\n result = ci.insert(0, "a")\n expected = CategoricalIndex(list("aaabbca"), categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n # test Nth element that follows Python list behavior\n result = ci.insert(-1, "a")\n expected = CategoricalIndex(list("aabbcaa"), categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n # test empty\n result = CategoricalIndex([], categories=categories).insert(0, "a")\n expected = CategoricalIndex(["a"], categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n # invalid -> cast to object\n expected = ci.astype(object).insert(0, "d")\n result = ci.insert(0, "d").astype(object)\n tm.assert_index_equal(result, expected, exact=True)\n\n # GH 18295 (test missing)\n expected = CategoricalIndex(["a", np.nan, "a", "b", "c", "b"])\n for na in (np.nan, pd.NaT, None):\n result = CategoricalIndex(list("aabcb")).insert(1, na)\n tm.assert_index_equal(result, expected)\n\n def test_insert_na_mismatched_dtype(self):\n ci = CategoricalIndex([0, 1, 1])\n result = ci.insert(0, pd.NaT)\n expected = Index([pd.NaT, 0, 1, 1], dtype=object)\n tm.assert_index_equal(result, expected)\n\n def test_delete(self, simple_index):\n ci = simple_index\n categories = ci.categories\n\n result = ci.delete(0)\n expected = CategoricalIndex(list("abbca"), categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n result = ci.delete(-1)\n expected = CategoricalIndex(list("aabbc"), categories=categories)\n tm.assert_index_equal(result, expected, exact=True)\n\n with tm.external_error_raised((IndexError, ValueError)):\n # Either depending on NumPy version\n ci.delete(10)\n\n @pytest.mark.parametrize(\n "data, non_lexsorted_data",\n [[[1, 2, 3], [9, 0, 1, 2, 3]], [list("abc"), list("fabcd")]],\n )\n def test_is_monotonic(self, data, non_lexsorted_data):\n c = CategoricalIndex(data)\n assert c.is_monotonic_increasing is True\n assert c.is_monotonic_decreasing is False\n\n c = CategoricalIndex(data, ordered=True)\n assert c.is_monotonic_increasing is True\n assert c.is_monotonic_decreasing is False\n\n c = CategoricalIndex(data, categories=reversed(data))\n assert c.is_monotonic_increasing is False\n assert c.is_monotonic_decreasing is True\n\n c = CategoricalIndex(data, categories=reversed(data), ordered=True)\n assert c.is_monotonic_increasing is False\n assert c.is_monotonic_decreasing is True\n\n # test when data is neither monotonic increasing nor decreasing\n reordered_data = [data[0], data[2], data[1]]\n c = CategoricalIndex(reordered_data, categories=reversed(data))\n assert c.is_monotonic_increasing is False\n assert c.is_monotonic_decreasing is False\n\n # non lexsorted categories\n categories = non_lexsorted_data\n\n c = CategoricalIndex(categories[:2], categories=categories)\n assert c.is_monotonic_increasing is True\n assert c.is_monotonic_decreasing is False\n\n c = CategoricalIndex(categories[1:3], categories=categories)\n assert c.is_monotonic_increasing is True\n assert c.is_monotonic_decreasing is False\n\n def test_has_duplicates(self):\n idx = CategoricalIndex([0, 0, 0], name="foo")\n assert idx.is_unique is False\n assert idx.has_duplicates is True\n\n idx = CategoricalIndex([0, 1], categories=[2, 3], name="foo")\n assert idx.is_unique is False\n assert idx.has_duplicates is True\n\n idx = CategoricalIndex([0, 1, 2, 3], categories=[1, 2, 3], name="foo")\n assert idx.is_unique is True\n assert idx.has_duplicates is False\n\n @pytest.mark.parametrize(\n "data, categories, expected",\n [\n (\n [1, 1, 1],\n [1, 2, 3],\n {\n "first": np.array([False, True, True]),\n "last": np.array([True, True, False]),\n False: np.array([True, True, True]),\n },\n ),\n (\n [1, 1, 1],\n list("abc"),\n {\n "first": np.array([False, True, True]),\n "last": np.array([True, True, False]),\n False: np.array([True, True, True]),\n },\n ),\n (\n [2, "a", "b"],\n list("abc"),\n {\n "first": np.zeros(shape=(3), dtype=np.bool_),\n "last": np.zeros(shape=(3), dtype=np.bool_),\n False: np.zeros(shape=(3), dtype=np.bool_),\n },\n ),\n (\n list("abb"),\n list("abc"),\n {\n "first": np.array([False, False, True]),\n "last": np.array([False, True, False]),\n False: np.array([False, True, True]),\n },\n ),\n ],\n )\n def test_drop_duplicates(self, data, categories, expected):\n idx = CategoricalIndex(data, categories=categories, name="foo")\n for keep, e in expected.items():\n tm.assert_numpy_array_equal(idx.duplicated(keep=keep), e)\n e = idx[~e]\n result = idx.drop_duplicates(keep=keep)\n tm.assert_index_equal(result, e)\n\n @pytest.mark.parametrize(\n "data, categories, expected_data",\n [\n ([1, 1, 1], [1, 2, 3], [1]),\n ([1, 1, 1], list("abc"), [np.nan]),\n ([1, 2, "a"], [1, 2, 3], [1, 2, np.nan]),\n ([2, "a", "b"], list("abc"), [np.nan, "a", "b"]),\n ],\n )\n def test_unique(self, data, categories, expected_data, ordered):\n dtype = CategoricalDtype(categories, ordered=ordered)\n\n idx = CategoricalIndex(data, dtype=dtype)\n expected = CategoricalIndex(expected_data, dtype=dtype)\n tm.assert_index_equal(idx.unique(), expected)\n\n @pytest.mark.xfail(using_string_dtype(), reason="repr doesn't roundtrip")\n def test_repr_roundtrip(self):\n ci = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)\n str(ci)\n tm.assert_index_equal(eval(repr(ci)), ci, exact=True)\n\n # formatting\n str(ci)\n\n # long format\n # this is not reprable\n ci = CategoricalIndex(np.random.default_rng(2).integers(0, 5, size=100))\n str(ci)\n\n def test_isin(self):\n ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"])\n tm.assert_numpy_array_equal(\n ci.isin(["c"]), np.array([False, False, False, True, False, False])\n )\n tm.assert_numpy_array_equal(\n ci.isin(["c", "a", "b"]), np.array([True] * 5 + [False])\n )\n tm.assert_numpy_array_equal(\n ci.isin(["c", "a", "b", np.nan]), np.array([True] * 6)\n )\n\n # mismatched categorical -> coerced to ndarray so doesn't matter\n result = ci.isin(ci.set_categories(list("abcdefghi")))\n expected = np.array([True] * 6)\n tm.assert_numpy_array_equal(result, expected)\n\n result = ci.isin(ci.set_categories(list("defghi")))\n expected = np.array([False] * 5 + [True])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_isin_overlapping_intervals(self):\n # GH 34974\n idx = pd.IntervalIndex([pd.Interval(0, 2), pd.Interval(0, 1)])\n result = CategoricalIndex(idx).isin(idx)\n expected = np.array([True, True])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_identical(self):\n ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)\n ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True)\n assert ci1.identical(ci1)\n assert ci1.identical(ci1.copy())\n assert not ci1.identical(ci2)\n\n def test_ensure_copied_data(self):\n # gh-12309: Check the "copy" argument of each\n # Index.__new__ is honored.\n #\n # Must be tested separately from other indexes because\n # self.values is not an ndarray.\n index = CategoricalIndex(list("ab") * 5)\n\n result = CategoricalIndex(index.values, copy=True)\n tm.assert_index_equal(index, result)\n assert not np.shares_memory(result._data._codes, index._data._codes)\n\n result = CategoricalIndex(index.values, copy=False)\n assert result._data._codes is index._data._codes\n\n\nclass TestCategoricalIndex2:\n def test_view_i8(self):\n # GH#25464\n ci = CategoricalIndex(list("ab") * 50)\n msg = "When changing to a larger dtype, its size must be a divisor"\n with pytest.raises(ValueError, match=msg):\n ci.view("i8")\n with pytest.raises(ValueError, match=msg):\n ci._data.view("i8")\n\n ci = ci[:-4] # length divisible by 8\n\n res = ci.view("i8")\n expected = ci._data.codes.view("i8")\n tm.assert_numpy_array_equal(res, expected)\n\n cat = ci._data\n tm.assert_numpy_array_equal(cat.view("i8"), expected)\n\n @pytest.mark.parametrize(\n "dtype, engine_type",\n [\n (np.int8, libindex.Int8Engine),\n (np.int16, libindex.Int16Engine),\n (np.int32, libindex.Int32Engine),\n (np.int64, libindex.Int64Engine),\n ],\n )\n def test_engine_type(self, dtype, engine_type):\n if dtype != np.int64:\n # num. of uniques required to push CategoricalIndex.codes to a\n # dtype (128 categories required for .codes dtype to be int16 etc.)\n num_uniques = {np.int8: 1, np.int16: 128, np.int32: 32768}[dtype]\n ci = CategoricalIndex(range(num_uniques))\n else:\n # having 2**32 - 2**31 categories would be very memory-intensive,\n # so we cheat a bit with the dtype\n ci = CategoricalIndex(range(32768)) # == 2**16 - 2**(16 - 1)\n arr = ci.values._ndarray.astype("int64")\n NDArrayBacked.__init__(ci._data, arr, ci.dtype)\n assert np.issubdtype(ci.codes.dtype, dtype)\n assert isinstance(ci._engine, engine_type)\n\n @pytest.mark.parametrize(\n "func,op_name",\n [\n (lambda idx: idx - idx, "__sub__"),\n (lambda idx: idx + idx, "__add__"),\n (lambda idx: idx - ["a", "b"], "__sub__"),\n (lambda idx: idx + ["a", "b"], "__add__"),\n (lambda idx: ["a", "b"] - idx, "__rsub__"),\n (lambda idx: ["a", "b"] + idx, "__radd__"),\n ],\n )\n def test_disallow_addsub_ops(self, func, op_name):\n # GH 10039\n # set ops (+/-) raise TypeError\n idx = Index(Categorical(["a", "b"]))\n cat_or_list = "'(Categorical|list)' and '(Categorical|list)'"\n msg = "|".join(\n [\n f"cannot perform {op_name} with this index type: CategoricalIndex",\n "can only concatenate list",\n rf"unsupported operand type\(s\) for [\+-]: {cat_or_list}",\n ]\n )\n with pytest.raises(TypeError, match=msg):\n func(idx)\n\n def test_method_delegation(self):\n ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))\n result = ci.set_categories(list("cab"))\n tm.assert_index_equal(\n result, CategoricalIndex(list("aabbca"), categories=list("cab"))\n )\n\n ci = CategoricalIndex(list("aabbca"), categories=list("cab"))\n result = ci.rename_categories(list("efg"))\n tm.assert_index_equal(\n result, CategoricalIndex(list("ffggef"), categories=list("efg"))\n )\n\n # GH18862 (let rename_categories take callables)\n result = ci.rename_categories(lambda x: x.upper())\n tm.assert_index_equal(\n result, CategoricalIndex(list("AABBCA"), categories=list("CAB"))\n )\n\n ci = CategoricalIndex(list("aabbca"), categories=list("cab"))\n result = ci.add_categories(["d"])\n tm.assert_index_equal(\n result, CategoricalIndex(list("aabbca"), categories=list("cabd"))\n )\n\n ci = CategoricalIndex(list("aabbca"), categories=list("cab"))\n result = ci.remove_categories(["c"])\n tm.assert_index_equal(\n result,\n CategoricalIndex(list("aabb") + [np.nan] + ["a"], categories=list("ab")),\n )\n\n ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))\n result = ci.as_unordered()\n tm.assert_index_equal(result, ci)\n\n ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"))\n result = ci.as_ordered()\n tm.assert_index_equal(\n result,\n CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=True),\n )\n\n # invalid\n msg = "cannot use inplace with CategoricalIndex"\n with pytest.raises(ValueError, match=msg):\n ci.set_categories(list("cab"), inplace=True)\n\n def test_remove_maintains_order(self):\n ci = CategoricalIndex(list("abcdda"), categories=list("abcd"))\n result = ci.reorder_categories(["d", "c", "b", "a"], ordered=True)\n tm.assert_index_equal(\n result,\n CategoricalIndex(list("abcdda"), categories=list("dcba"), ordered=True),\n )\n result = result.remove_categories(["c"])\n tm.assert_index_equal(\n result,\n CategoricalIndex(\n ["a", "b", np.nan, "d", "d", "a"], categories=list("dba"), ordered=True\n ),\n )\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_category.py | test_category.py | Python | 14,667 | 0.95 | 0.06599 | 0.080357 | node-utils | 479 | 2025-03-11T00:32:25.825864 | GPL-3.0 | true | ec5f1ec20a65769d211359ef6db8c056 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n CategoricalDtype,\n CategoricalIndex,\n Index,\n)\nimport pandas._testing as tm\n\n\nclass TestCategoricalIndexConstructors:\n def test_construction_disallows_scalar(self):\n msg = "must be called with a collection of some kind"\n with pytest.raises(TypeError, match=msg):\n CategoricalIndex(data=1, categories=list("abcd"), ordered=False)\n with pytest.raises(TypeError, match=msg):\n CategoricalIndex(categories=list("abcd"), ordered=False)\n\n def test_construction(self):\n ci = CategoricalIndex(list("aabbca"), categories=list("abcd"), ordered=False)\n categories = ci.categories\n\n result = Index(ci)\n tm.assert_index_equal(result, ci, exact=True)\n assert not result.ordered\n\n result = Index(ci.values)\n tm.assert_index_equal(result, ci, exact=True)\n assert not result.ordered\n\n # empty\n result = CategoricalIndex([], categories=categories)\n tm.assert_index_equal(result.categories, Index(categories))\n tm.assert_numpy_array_equal(result.codes, np.array([], dtype="int8"))\n assert not result.ordered\n\n # passing categories\n result = CategoricalIndex(list("aabbca"), categories=categories)\n tm.assert_index_equal(result.categories, Index(categories))\n tm.assert_numpy_array_equal(\n result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")\n )\n\n c = Categorical(list("aabbca"))\n result = CategoricalIndex(c)\n tm.assert_index_equal(result.categories, Index(list("abc")))\n tm.assert_numpy_array_equal(\n result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")\n )\n assert not result.ordered\n\n result = CategoricalIndex(c, categories=categories)\n tm.assert_index_equal(result.categories, Index(categories))\n tm.assert_numpy_array_equal(\n result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")\n )\n assert not result.ordered\n\n ci = CategoricalIndex(c, categories=list("abcd"))\n result = CategoricalIndex(ci)\n tm.assert_index_equal(result.categories, Index(categories))\n tm.assert_numpy_array_equal(\n result.codes, np.array([0, 0, 1, 1, 2, 0], dtype="int8")\n )\n assert not result.ordered\n\n result = CategoricalIndex(ci, categories=list("ab"))\n tm.assert_index_equal(result.categories, Index(list("ab")))\n tm.assert_numpy_array_equal(\n result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8")\n )\n assert not result.ordered\n\n result = CategoricalIndex(ci, categories=list("ab"), ordered=True)\n tm.assert_index_equal(result.categories, Index(list("ab")))\n tm.assert_numpy_array_equal(\n result.codes, np.array([0, 0, 1, 1, -1, 0], dtype="int8")\n )\n assert result.ordered\n\n result = CategoricalIndex(ci, categories=list("ab"), ordered=True)\n expected = CategoricalIndex(\n ci, categories=list("ab"), ordered=True, dtype="category"\n )\n tm.assert_index_equal(result, expected, exact=True)\n\n # turn me to an Index\n result = Index(np.array(ci))\n assert isinstance(result, Index)\n assert not isinstance(result, CategoricalIndex)\n\n def test_construction_with_dtype(self):\n # specify dtype\n ci = CategoricalIndex(list("aabbca"), categories=list("abc"), ordered=False)\n\n result = Index(np.array(ci), dtype="category")\n tm.assert_index_equal(result, ci, exact=True)\n\n result = Index(np.array(ci).tolist(), dtype="category")\n tm.assert_index_equal(result, ci, exact=True)\n\n # these are generally only equal when the categories are reordered\n ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)\n\n result = Index(np.array(ci), dtype="category").reorder_categories(ci.categories)\n tm.assert_index_equal(result, ci, exact=True)\n\n # make sure indexes are handled\n idx = Index(range(3))\n expected = CategoricalIndex([0, 1, 2], categories=idx, ordered=True)\n result = CategoricalIndex(idx, categories=idx, ordered=True)\n tm.assert_index_equal(result, expected, exact=True)\n\n def test_construction_empty_with_bool_categories(self):\n # see GH#22702\n cat = CategoricalIndex([], categories=[True, False])\n categories = sorted(cat.categories.tolist())\n assert categories == [False, True]\n\n def test_construction_with_categorical_dtype(self):\n # construction with CategoricalDtype\n # GH#18109\n data, cats, ordered = "a a b b".split(), "c b a".split(), True\n dtype = CategoricalDtype(categories=cats, ordered=ordered)\n\n result = CategoricalIndex(data, dtype=dtype)\n expected = CategoricalIndex(data, categories=cats, ordered=ordered)\n tm.assert_index_equal(result, expected, exact=True)\n\n # GH#19032\n result = Index(data, dtype=dtype)\n tm.assert_index_equal(result, expected, exact=True)\n\n # error when combining categories/ordered and dtype kwargs\n msg = "Cannot specify `categories` or `ordered` together with `dtype`."\n with pytest.raises(ValueError, match=msg):\n CategoricalIndex(data, categories=cats, dtype=dtype)\n\n with pytest.raises(ValueError, match=msg):\n CategoricalIndex(data, ordered=ordered, dtype=dtype)\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_constructors.py | test_constructors.py | Python | 5,536 | 0.95 | 0.042254 | 0.095652 | node-utils | 131 | 2024-02-28T01:12:53.880187 | GPL-3.0 | true | 6611f7057cefd9ad272ea1256d7b1e45 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n Index,\n MultiIndex,\n)\n\n\nclass TestEquals:\n def test_equals_categorical(self):\n ci1 = CategoricalIndex(["a", "b"], categories=["a", "b"], ordered=True)\n ci2 = CategoricalIndex(["a", "b"], categories=["a", "b", "c"], ordered=True)\n\n assert ci1.equals(ci1)\n assert not ci1.equals(ci2)\n assert ci1.equals(ci1.astype(object))\n assert ci1.astype(object).equals(ci1)\n\n assert (ci1 == ci1).all()\n assert not (ci1 != ci1).all()\n assert not (ci1 > ci1).all()\n assert not (ci1 < ci1).all()\n assert (ci1 <= ci1).all()\n assert (ci1 >= ci1).all()\n\n assert not (ci1 == 1).all()\n assert (ci1 == Index(["a", "b"])).all()\n assert (ci1 == ci1.values).all()\n\n # invalid comparisons\n with pytest.raises(ValueError, match="Lengths must match"):\n ci1 == Index(["a", "b", "c"])\n\n msg = "Categoricals can only be compared if 'categories' are the same"\n with pytest.raises(TypeError, match=msg):\n ci1 == ci2\n with pytest.raises(TypeError, match=msg):\n ci1 == Categorical(ci1.values, ordered=False)\n with pytest.raises(TypeError, match=msg):\n ci1 == Categorical(ci1.values, categories=list("abc"))\n\n # tests\n # make sure that we are testing for category inclusion properly\n ci = CategoricalIndex(list("aabca"), categories=["c", "a", "b"])\n assert not ci.equals(list("aabca"))\n # Same categories, but different order\n # Unordered\n assert ci.equals(CategoricalIndex(list("aabca")))\n # Ordered\n assert not ci.equals(CategoricalIndex(list("aabca"), ordered=True))\n assert ci.equals(ci.copy())\n\n ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"])\n assert not ci.equals(list("aabca"))\n assert not ci.equals(CategoricalIndex(list("aabca")))\n assert ci.equals(ci.copy())\n\n ci = CategoricalIndex(list("aabca") + [np.nan], categories=["c", "a", "b"])\n assert not ci.equals(list("aabca") + [np.nan])\n assert ci.equals(CategoricalIndex(list("aabca") + [np.nan]))\n assert not ci.equals(CategoricalIndex(list("aabca") + [np.nan], ordered=True))\n assert ci.equals(ci.copy())\n\n def test_equals_categorical_unordered(self):\n # https://github.com/pandas-dev/pandas/issues/16603\n a = CategoricalIndex(["A"], categories=["A", "B"])\n b = CategoricalIndex(["A"], categories=["B", "A"])\n c = CategoricalIndex(["C"], categories=["B", "A"])\n assert a.equals(b)\n assert not a.equals(c)\n assert not b.equals(c)\n\n def test_equals_non_category(self):\n # GH#37667 Case where other contains a value not among ci's\n # categories ("D") and also contains np.nan\n ci = CategoricalIndex(["A", "B", np.nan, np.nan])\n other = Index(["A", "B", "D", np.nan])\n\n assert not ci.equals(other)\n\n def test_equals_multiindex(self):\n # dont raise NotImplementedError when calling is_dtype_compat\n\n mi = MultiIndex.from_arrays([["A", "B", "C", "D"], range(4)])\n ci = mi.to_flat_index().astype("category")\n\n assert not ci.equals(mi)\n\n def test_equals_string_dtype(self, any_string_dtype):\n # GH#55364\n idx = CategoricalIndex(list("abc"), name="B")\n other = Index(["a", "b", "c"], name="B", dtype=any_string_dtype)\n assert idx.equals(other)\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_equals.py | test_equals.py | Python | 3,569 | 0.95 | 0.083333 | 0.141026 | python-kit | 401 | 2025-03-15T18:29:18.738637 | BSD-3-Clause | true | 832336041c7895b9fde260ec15cd9440 |
import numpy as np\nimport pytest\n\nfrom pandas import CategoricalIndex\nimport pandas._testing as tm\n\n\nclass TestFillNA:\n def test_fillna_categorical(self):\n # GH#11343\n idx = CategoricalIndex([1.0, np.nan, 3.0, 1.0], name="x")\n # fill by value in categories\n exp = CategoricalIndex([1.0, 1.0, 3.0, 1.0], name="x")\n tm.assert_index_equal(idx.fillna(1.0), exp)\n\n cat = idx._data\n\n # fill by value not in categories raises TypeError on EA, casts on CI\n msg = "Cannot setitem on a Categorical with a new category"\n with pytest.raises(TypeError, match=msg):\n cat.fillna(2.0)\n\n result = idx.fillna(2.0)\n expected = idx.astype(object).fillna(2.0)\n tm.assert_index_equal(result, expected)\n\n def test_fillna_copies_with_no_nas(self):\n # Nothing to fill, should still get a copy for the Categorical method,\n # but OK to get a view on CategoricalIndex method\n ci = CategoricalIndex([0, 1, 1])\n result = ci.fillna(0)\n assert result is not ci\n assert tm.shares_memory(result, ci)\n\n # But at the EA level we always get a copy.\n cat = ci._data\n result = cat.fillna(0)\n assert result._ndarray is not cat._ndarray\n assert result._ndarray.base is None\n assert not tm.shares_memory(result, cat)\n\n def test_fillna_validates_with_no_nas(self):\n # We validate the fill value even if fillna is a no-op\n ci = CategoricalIndex([2, 3, 3])\n cat = ci._data\n\n msg = "Cannot setitem on a Categorical with a new category"\n res = ci.fillna(False)\n # nothing to fill, so we dont cast\n tm.assert_index_equal(res, ci)\n\n # Same check directly on the Categorical\n with pytest.raises(TypeError, match=msg):\n cat.fillna(False)\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_fillna.py | test_fillna.py | Python | 1,850 | 0.95 | 0.111111 | 0.209302 | awesome-app | 601 | 2024-09-08T14:32:26.615478 | BSD-3-Clause | true | f4253bcd25c66d4108375afc56b68f3b |
"""\nTests for CategoricalIndex.__repr__ and related methods.\n"""\nimport pytest\n\nfrom pandas._config import using_string_dtype\nimport pandas._config.config as cf\n\nfrom pandas import CategoricalIndex\nimport pandas._testing as tm\n\n\nclass TestCategoricalIndexRepr:\n def test_format_different_scalar_lengths(self):\n # GH#35439\n idx = CategoricalIndex(["aaaaaaaaa", "b"])\n expected = ["aaaaaaaaa", "b"]\n msg = r"CategoricalIndex\.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert idx.format() == expected\n\n @pytest.mark.xfail(using_string_dtype(), reason="repr different")\n def test_string_categorical_index_repr(self):\n # short\n idx = CategoricalIndex(["a", "bb", "ccc"])\n expected = """CategoricalIndex(['a', 'bb', 'ccc'], categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501\n assert repr(idx) == expected\n\n # multiple lines\n idx = CategoricalIndex(["a", "bb", "ccc"] * 10)\n expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb',\n 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n categories=['a', 'bb', 'ccc'], ordered=False, dtype='category')""" # noqa: E501\n\n assert repr(idx) == expected\n\n # truncated\n idx = CategoricalIndex(["a", "bb", "ccc"] * 100)\n expected = """CategoricalIndex(['a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a',\n ...\n 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc', 'a', 'bb', 'ccc'],\n categories=['a', 'bb', 'ccc'], ordered=False, dtype='category', length=300)""" # noqa: E501\n\n assert repr(idx) == expected\n\n # larger categories\n idx = CategoricalIndex(list("abcdefghijklmmo"))\n expected = """CategoricalIndex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l',\n 'm', 'm', 'o'],\n categories=['a', 'b', 'c', 'd', ..., 'k', 'l', 'm', 'o'], ordered=False, dtype='category')""" # noqa: E501\n\n assert repr(idx) == expected\n\n # short\n idx = CategoricalIndex(["あ", "いい", "ううう"])\n expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501\n assert repr(idx) == expected\n\n # multiple lines\n idx = CategoricalIndex(["あ", "いい", "ううう"] * 10)\n expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',\n 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501\n\n assert repr(idx) == expected\n\n # truncated\n idx = CategoricalIndex(["あ", "いい", "ううう"] * 100)\n expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ',\n ...\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501\n\n assert repr(idx) == expected\n\n # larger categories\n idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ"))\n expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し',\n 'す', 'せ', 'そ'],\n categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501\n\n assert repr(idx) == expected\n\n # Enable Unicode option -----------------------------------------\n with cf.option_context("display.unicode.east_asian_width", True):\n # short\n idx = CategoricalIndex(["あ", "いい", "ううう"])\n expected = """CategoricalIndex(['あ', 'いい', 'ううう'], categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501\n assert repr(idx) == expected\n\n # multiple lines\n idx = CategoricalIndex(["あ", "いい", "ううう"] * 10)\n expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category')""" # noqa: E501\n\n assert repr(idx) == expected\n\n # truncated\n idx = CategoricalIndex(["あ", "いい", "ううう"] * 100)\n expected = """CategoricalIndex(['あ', 'いい', 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい',\n 'ううう', 'あ',\n ...\n 'ううう', 'あ', 'いい', 'ううう', 'あ', 'いい', 'ううう',\n 'あ', 'いい', 'ううう'],\n categories=['あ', 'いい', 'ううう'], ordered=False, dtype='category', length=300)""" # noqa: E501\n\n assert repr(idx) == expected\n\n # larger categories\n idx = CategoricalIndex(list("あいうえおかきくけこさしすせそ"))\n expected = """CategoricalIndex(['あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ',\n 'さ', 'し', 'す', 'せ', 'そ'],\n categories=['あ', 'い', 'う', 'え', ..., 'し', 'す', 'せ', 'そ'], ordered=False, dtype='category')""" # noqa: E501\n\n assert repr(idx) == expected\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_formats.py | test_formats.py | Python | 6,281 | 0.95 | 0.033333 | 0.147368 | python-kit | 329 | 2024-02-18T20:09:01.250840 | MIT | true | aae53995cae698896e04912decd6e3bb |
import numpy as np\nimport pytest\n\nfrom pandas.errors import InvalidIndexError\n\nimport pandas as pd\nfrom pandas import (\n CategoricalIndex,\n Index,\n IntervalIndex,\n Timestamp,\n)\nimport pandas._testing as tm\n\n\nclass TestTake:\n def test_take_fill_value(self):\n # GH 12631\n\n # numeric category\n idx = CategoricalIndex([1, 2, 3], name="xxx")\n result = idx.take(np.array([1, 0, -1]))\n expected = CategoricalIndex([2, 1, 3], name="xxx")\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = CategoricalIndex([2, 1, np.nan], categories=[1, 2, 3], name="xxx")\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\n expected = CategoricalIndex([2, 1, 3], name="xxx")\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n # object category\n idx = CategoricalIndex(\n list("CBA"), categories=list("ABC"), ordered=True, name="xxx"\n )\n result = idx.take(np.array([1, 0, -1]))\n expected = CategoricalIndex(\n list("BCA"), categories=list("ABC"), ordered=True, name="xxx"\n )\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = CategoricalIndex(\n ["B", "C", np.nan], categories=list("ABC"), ordered=True, name="xxx"\n )\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\n expected = CategoricalIndex(\n list("BCA"), categories=list("ABC"), ordered=True, name="xxx"\n )\n tm.assert_index_equal(result, expected)\n tm.assert_categorical_equal(result.values, expected.values)\n\n msg = (\n "When allow_fill=True and fill_value is not None, "\n "all indices must be >= -1"\n )\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n msg = "index -5 is out of bounds for (axis 0 with )?size 3"\n with pytest.raises(IndexError, match=msg):\n idx.take(np.array([1, -5]))\n\n def test_take_fill_value_datetime(self):\n # datetime category\n idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx")\n idx = CategoricalIndex(idx)\n result = idx.take(np.array([1, 0, -1]))\n expected = pd.DatetimeIndex(\n ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"\n )\n expected = CategoricalIndex(expected)\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx")\n exp_cats = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"])\n expected = CategoricalIndex(expected, categories=exp_cats)\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\n expected = pd.DatetimeIndex(\n ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"\n )\n expected = CategoricalIndex(expected)\n tm.assert_index_equal(result, expected)\n\n msg = (\n "When allow_fill=True and fill_value is not None, "\n "all indices must be >= -1"\n )\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n msg = "index -5 is out of bounds for (axis 0 with )?size 3"\n with pytest.raises(IndexError, match=msg):\n idx.take(np.array([1, -5]))\n\n def test_take_invalid_kwargs(self):\n idx = CategoricalIndex([1, 2, 3], name="foo")\n indices = [1, 0, -1]\n\n msg = r"take\(\) got an unexpected keyword argument 'foo'"\n with pytest.raises(TypeError, match=msg):\n idx.take(indices, foo=2)\n\n msg = "the 'out' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, out=indices)\n\n msg = "the 'mode' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, mode="clip")\n\n\nclass TestGetLoc:\n def test_get_loc(self):\n # GH 12531\n cidx1 = CategoricalIndex(list("abcde"), categories=list("edabc"))\n idx1 = Index(list("abcde"))\n assert cidx1.get_loc("a") == idx1.get_loc("a")\n assert cidx1.get_loc("e") == idx1.get_loc("e")\n\n for i in [cidx1, idx1]:\n with pytest.raises(KeyError, match="'NOT-EXIST'"):\n i.get_loc("NOT-EXIST")\n\n # non-unique\n cidx2 = CategoricalIndex(list("aacded"), categories=list("edabc"))\n idx2 = Index(list("aacded"))\n\n # results in bool array\n res = cidx2.get_loc("d")\n tm.assert_numpy_array_equal(res, idx2.get_loc("d"))\n tm.assert_numpy_array_equal(\n res, np.array([False, False, False, True, False, True])\n )\n # unique element results in scalar\n res = cidx2.get_loc("e")\n assert res == idx2.get_loc("e")\n assert res == 4\n\n for i in [cidx2, idx2]:\n with pytest.raises(KeyError, match="'NOT-EXIST'"):\n i.get_loc("NOT-EXIST")\n\n # non-unique, sliceable\n cidx3 = CategoricalIndex(list("aabbb"), categories=list("abc"))\n idx3 = Index(list("aabbb"))\n\n # results in slice\n res = cidx3.get_loc("a")\n assert res == idx3.get_loc("a")\n assert res == slice(0, 2, None)\n\n res = cidx3.get_loc("b")\n assert res == idx3.get_loc("b")\n assert res == slice(2, 5, None)\n\n for i in [cidx3, idx3]:\n with pytest.raises(KeyError, match="'c'"):\n i.get_loc("c")\n\n def test_get_loc_unique(self):\n cidx = CategoricalIndex(list("abc"))\n result = cidx.get_loc("b")\n assert result == 1\n\n def test_get_loc_monotonic_nonunique(self):\n cidx = CategoricalIndex(list("abbc"))\n result = cidx.get_loc("b")\n expected = slice(1, 3, None)\n assert result == expected\n\n def test_get_loc_nonmonotonic_nonunique(self):\n cidx = CategoricalIndex(list("abcb"))\n result = cidx.get_loc("b")\n expected = np.array([False, True, False, True], dtype=bool)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_get_loc_nan(self):\n # GH#41933\n ci = CategoricalIndex(["A", "B", np.nan])\n res = ci.get_loc(np.nan)\n\n assert res == 2\n\n\nclass TestGetIndexer:\n def test_get_indexer_base(self):\n # Determined by cat ordering.\n idx = CategoricalIndex(list("cab"), categories=list("cab"))\n expected = np.arange(len(idx), dtype=np.intp)\n\n actual = idx.get_indexer(idx)\n tm.assert_numpy_array_equal(expected, actual)\n\n with pytest.raises(ValueError, match="Invalid fill method"):\n idx.get_indexer(idx, method="invalid")\n\n def test_get_indexer_requires_unique(self):\n ci = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)\n oidx = Index(np.array(ci))\n\n msg = "Reindexing only valid with uniquely valued Index objects"\n\n for n in [1, 2, 5, len(ci)]:\n finder = oidx[np.random.default_rng(2).integers(0, len(ci), size=n)]\n\n with pytest.raises(InvalidIndexError, match=msg):\n ci.get_indexer(finder)\n\n # see gh-17323\n #\n # Even when indexer is equal to the\n # members in the index, we should\n # respect duplicates instead of taking\n # the fast-track path.\n for finder in [list("aabbca"), list("aababca")]:\n with pytest.raises(InvalidIndexError, match=msg):\n ci.get_indexer(finder)\n\n def test_get_indexer_non_unique(self):\n idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc"))\n idx2 = CategoricalIndex(list("abf"))\n\n for indexer in [idx2, list("abf"), Index(list("abf"))]:\n msg = "Reindexing only valid with uniquely valued Index objects"\n with pytest.raises(InvalidIndexError, match=msg):\n idx1.get_indexer(indexer)\n\n r1, _ = idx1.get_indexer_non_unique(indexer)\n expected = np.array([0, 1, 2, -1], dtype=np.intp)\n tm.assert_almost_equal(r1, expected)\n\n def test_get_indexer_method(self):\n idx1 = CategoricalIndex(list("aabcde"), categories=list("edabc"))\n idx2 = CategoricalIndex(list("abf"))\n\n msg = "method pad not yet implemented for CategoricalIndex"\n with pytest.raises(NotImplementedError, match=msg):\n idx2.get_indexer(idx1, method="pad")\n msg = "method backfill not yet implemented for CategoricalIndex"\n with pytest.raises(NotImplementedError, match=msg):\n idx2.get_indexer(idx1, method="backfill")\n\n msg = "method nearest not yet implemented for CategoricalIndex"\n with pytest.raises(NotImplementedError, match=msg):\n idx2.get_indexer(idx1, method="nearest")\n\n def test_get_indexer_array(self):\n arr = np.array(\n [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")],\n dtype=object,\n )\n cats = [Timestamp("1999-12-31 00:00:00"), Timestamp("2000-12-31 00:00:00")]\n ci = CategoricalIndex(cats, categories=cats, ordered=False, dtype="category")\n result = ci.get_indexer(arr)\n expected = np.array([0, 1], dtype="intp")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_get_indexer_same_categories_same_order(self):\n ci = CategoricalIndex(["a", "b"], categories=["a", "b"])\n\n result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["a", "b"]))\n expected = np.array([1, 1], dtype="intp")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_get_indexer_same_categories_different_order(self):\n # https://github.com/pandas-dev/pandas/issues/19551\n ci = CategoricalIndex(["a", "b"], categories=["a", "b"])\n\n result = ci.get_indexer(CategoricalIndex(["b", "b"], categories=["b", "a"]))\n expected = np.array([1, 1], dtype="intp")\n tm.assert_numpy_array_equal(result, expected)\n\n def test_get_indexer_nans_in_index_and_target(self):\n # GH 45361\n ci = CategoricalIndex([1, 2, np.nan, 3])\n other1 = [2, 3, 4, np.nan]\n res1 = ci.get_indexer(other1)\n expected1 = np.array([1, 3, -1, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(res1, expected1)\n other2 = [1, 4, 2, 3]\n res2 = ci.get_indexer(other2)\n expected2 = np.array([0, -1, 1, 3], dtype=np.intp)\n tm.assert_numpy_array_equal(res2, expected2)\n\n\nclass TestWhere:\n def test_where(self, listlike_box):\n klass = listlike_box\n\n i = CategoricalIndex(list("aabbca"), categories=list("cab"), ordered=False)\n cond = [True] * len(i)\n expected = i\n result = i.where(klass(cond))\n tm.assert_index_equal(result, expected)\n\n cond = [False] + [True] * (len(i) - 1)\n expected = CategoricalIndex([np.nan] + i[1:].tolist(), categories=i.categories)\n result = i.where(klass(cond))\n tm.assert_index_equal(result, expected)\n\n def test_where_non_categories(self):\n ci = CategoricalIndex(["a", "b", "c", "d"])\n mask = np.array([True, False, True, False])\n\n result = ci.where(mask, 2)\n expected = Index(["a", 2, "c", 2], dtype=object)\n tm.assert_index_equal(result, expected)\n\n msg = "Cannot setitem on a Categorical with a new category"\n with pytest.raises(TypeError, match=msg):\n # Test the Categorical method directly\n ci._data._where(mask, 2)\n\n\nclass TestContains:\n def test_contains(self):\n ci = CategoricalIndex(list("aabbca"), categories=list("cabdef"), ordered=False)\n\n assert "a" in ci\n assert "z" not in ci\n assert "e" not in ci\n assert np.nan not in ci\n\n # assert codes NOT in index\n assert 0 not in ci\n assert 1 not in ci\n\n def test_contains_nan(self):\n ci = CategoricalIndex(list("aabbca") + [np.nan], categories=list("cabdef"))\n assert np.nan in ci\n\n @pytest.mark.parametrize("unwrap", [True, False])\n def test_contains_na_dtype(self, unwrap):\n dti = pd.date_range("2016-01-01", periods=100).insert(0, pd.NaT)\n pi = dti.to_period("D")\n tdi = dti - dti[-1]\n ci = CategoricalIndex(dti)\n\n obj = ci\n if unwrap:\n obj = ci._data\n\n assert np.nan in obj\n assert None in obj\n assert pd.NaT in obj\n assert np.datetime64("NaT") in obj\n assert np.timedelta64("NaT") not in obj\n\n obj2 = CategoricalIndex(tdi)\n if unwrap:\n obj2 = obj2._data\n\n assert np.nan in obj2\n assert None in obj2\n assert pd.NaT in obj2\n assert np.datetime64("NaT") not in obj2\n assert np.timedelta64("NaT") in obj2\n\n obj3 = CategoricalIndex(pi)\n if unwrap:\n obj3 = obj3._data\n\n assert np.nan in obj3\n assert None in obj3\n assert pd.NaT in obj3\n assert np.datetime64("NaT") not in obj3\n assert np.timedelta64("NaT") not in obj3\n\n @pytest.mark.parametrize(\n "item, expected",\n [\n (pd.Interval(0, 1), True),\n (1.5, True),\n (pd.Interval(0.5, 1.5), False),\n ("a", False),\n (Timestamp(1), False),\n (pd.Timedelta(1), False),\n ],\n ids=str,\n )\n def test_contains_interval(self, item, expected):\n # GH 23705\n ci = CategoricalIndex(IntervalIndex.from_breaks(range(3)))\n result = item in ci\n assert result is expected\n\n def test_contains_list(self):\n # GH#21729\n idx = CategoricalIndex([1, 2, 3])\n\n assert "a" not in idx\n\n with pytest.raises(TypeError, match="unhashable type"):\n ["a"] in idx\n\n with pytest.raises(TypeError, match="unhashable type"):\n ["a", "b"] in idx\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_indexing.py | test_indexing.py | Python | 14,978 | 0.95 | 0.1 | 0.088496 | node-utils | 880 | 2025-06-21T07:58:42.262890 | MIT | true | dc4a09ac70d8ed076991d76c022865ff |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n CategoricalIndex,\n Index,\n Series,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "data, categories",\n [\n (list("abcbca"), list("cab")),\n (pd.interval_range(0, 3).repeat(3), pd.interval_range(0, 3)),\n ],\n ids=["string", "interval"],\n)\ndef test_map_str(data, categories, ordered):\n # GH 31202 - override base class since we want to maintain categorical/ordered\n index = CategoricalIndex(data, categories=categories, ordered=ordered)\n result = index.map(str)\n expected = CategoricalIndex(\n map(str, data), categories=map(str, categories), ordered=ordered\n )\n tm.assert_index_equal(result, expected)\n\n\ndef test_map():\n ci = CategoricalIndex(list("ABABC"), categories=list("CBA"), ordered=True)\n result = ci.map(lambda x: x.lower())\n exp = CategoricalIndex(list("ababc"), categories=list("cba"), ordered=True)\n tm.assert_index_equal(result, exp)\n\n ci = CategoricalIndex(\n list("ABABC"), categories=list("BAC"), ordered=False, name="XXX"\n )\n result = ci.map(lambda x: x.lower())\n exp = CategoricalIndex(\n list("ababc"), categories=list("bac"), ordered=False, name="XXX"\n )\n tm.assert_index_equal(result, exp)\n\n # GH 12766: Return an index not an array\n tm.assert_index_equal(\n ci.map(lambda x: 1), Index(np.array([1] * 5, dtype=np.int64), name="XXX")\n )\n\n # change categories dtype\n ci = CategoricalIndex(list("ABABC"), categories=list("BAC"), ordered=False)\n\n def f(x):\n return {"A": 10, "B": 20, "C": 30}.get(x)\n\n result = ci.map(f)\n exp = CategoricalIndex([10, 20, 10, 20, 30], categories=[20, 10, 30], ordered=False)\n tm.assert_index_equal(result, exp)\n\n result = ci.map(Series([10, 20, 30], index=["A", "B", "C"]))\n tm.assert_index_equal(result, exp)\n\n result = ci.map({"A": 10, "B": 20, "C": 30})\n tm.assert_index_equal(result, exp)\n\n\ndef test_map_with_categorical_series():\n # GH 12756\n a = Index([1, 2, 3, 4])\n b = Series(["even", "odd", "even", "odd"], dtype="category")\n c = Series(["even", "odd", "even", "odd"])\n\n exp = CategoricalIndex(["odd", "even", "odd", np.nan])\n tm.assert_index_equal(a.map(b), exp)\n exp = Index(["odd", "even", "odd", np.nan])\n tm.assert_index_equal(a.map(c), exp)\n\n\n@pytest.mark.parametrize(\n ("data", "f", "expected"),\n (\n ([1, 1, np.nan], pd.isna, CategoricalIndex([False, False, np.nan])),\n ([1, 2, np.nan], pd.isna, Index([False, False, np.nan])),\n ([1, 1, np.nan], {1: False}, CategoricalIndex([False, False, np.nan])),\n ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])),\n (\n [1, 1, np.nan],\n Series([False, False]),\n CategoricalIndex([False, False, np.nan]),\n ),\n (\n [1, 2, np.nan],\n Series([False, False, False]),\n Index([False, False, np.nan]),\n ),\n ),\n)\ndef test_map_with_nan_ignore(data, f, expected): # GH 24241\n values = CategoricalIndex(data)\n result = values.map(f, na_action="ignore")\n tm.assert_index_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n ("data", "f", "expected"),\n (\n ([1, 1, np.nan], pd.isna, Index([False, False, True])),\n ([1, 2, np.nan], pd.isna, Index([False, False, True])),\n ([1, 1, np.nan], {1: False}, CategoricalIndex([False, False, np.nan])),\n ([1, 2, np.nan], {1: False, 2: False}, Index([False, False, np.nan])),\n (\n [1, 1, np.nan],\n Series([False, False]),\n CategoricalIndex([False, False, np.nan]),\n ),\n (\n [1, 2, np.nan],\n Series([False, False, False]),\n Index([False, False, np.nan]),\n ),\n ),\n)\ndef test_map_with_nan_none(data, f, expected): # GH 24241\n values = CategoricalIndex(data)\n result = values.map(f, na_action=None)\n tm.assert_index_equal(result, expected)\n\n\ndef test_map_with_dict_or_series():\n orig_values = ["a", "B", 1, "a"]\n new_values = ["one", 2, 3.0, "one"]\n cur_index = CategoricalIndex(orig_values, name="XXX")\n expected = CategoricalIndex(new_values, name="XXX", categories=[3.0, 2, "one"])\n\n mapper = Series(new_values[:-1], index=orig_values[:-1])\n result = cur_index.map(mapper)\n # Order of categories in result can be different\n tm.assert_index_equal(result, expected)\n\n mapper = dict(zip(orig_values[:-1], new_values[:-1]))\n result = cur_index.map(mapper)\n # Order of categories in result can be different\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_map.py | test_map.py | Python | 4,664 | 0.95 | 0.055556 | 0.049587 | vue-tools | 74 | 2024-06-08T06:50:54.789866 | GPL-3.0 | true | 397f0ab7b1bce8bb629fbe4981bd4492 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n Index,\n Interval,\n)\nimport pandas._testing as tm\n\n\nclass TestReindex:\n def test_reindex_list_non_unique(self):\n # GH#11586\n msg = "cannot reindex on an axis with duplicate labels"\n ci = CategoricalIndex(["a", "b", "c", "a"])\n with pytest.raises(ValueError, match=msg):\n ci.reindex(["a", "c"])\n\n def test_reindex_categorical_non_unique(self):\n msg = "cannot reindex on an axis with duplicate labels"\n ci = CategoricalIndex(["a", "b", "c", "a"])\n with pytest.raises(ValueError, match=msg):\n ci.reindex(Categorical(["a", "c"]))\n\n def test_reindex_list_non_unique_unused_category(self):\n msg = "cannot reindex on an axis with duplicate labels"\n ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])\n with pytest.raises(ValueError, match=msg):\n ci.reindex(["a", "c"])\n\n def test_reindex_categorical_non_unique_unused_category(self):\n msg = "cannot reindex on an axis with duplicate labels"\n ci = CategoricalIndex(["a", "b", "c", "a"], categories=["a", "b", "c", "d"])\n with pytest.raises(ValueError, match=msg):\n ci.reindex(Categorical(["a", "c"]))\n\n def test_reindex_duplicate_target(self):\n # See GH25459\n cat = CategoricalIndex(["a", "b", "c"], categories=["a", "b", "c", "d"])\n res, indexer = cat.reindex(["a", "c", "c"])\n exp = Index(["a", "c", "c"])\n tm.assert_index_equal(res, exp, exact=True)\n tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))\n\n res, indexer = cat.reindex(\n CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])\n )\n exp = CategoricalIndex(["a", "c", "c"], categories=["a", "b", "c", "d"])\n tm.assert_index_equal(res, exp, exact=True)\n tm.assert_numpy_array_equal(indexer, np.array([0, 2, 2], dtype=np.intp))\n\n def test_reindex_empty_index(self):\n # See GH16770\n c = CategoricalIndex([])\n res, indexer = c.reindex(["a", "b"])\n tm.assert_index_equal(res, Index(["a", "b"]), exact=True)\n tm.assert_numpy_array_equal(indexer, np.array([-1, -1], dtype=np.intp))\n\n def test_reindex_categorical_added_category(self):\n # GH 42424\n ci = CategoricalIndex(\n [Interval(0, 1, closed="right"), Interval(1, 2, closed="right")],\n ordered=True,\n )\n ci_add = CategoricalIndex(\n [\n Interval(0, 1, closed="right"),\n Interval(1, 2, closed="right"),\n Interval(2, 3, closed="right"),\n Interval(3, 4, closed="right"),\n ],\n ordered=True,\n )\n result, _ = ci.reindex(ci_add)\n expected = ci_add\n tm.assert_index_equal(expected, result)\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_reindex.py | test_reindex.py | Python | 2,938 | 0.95 | 0.102564 | 0.058824 | react-lib | 7 | 2023-10-11T21:37:36.763386 | BSD-3-Clause | true | 1ef62907e8d8b0d343149f6830f54d3e |
import numpy as np\nimport pytest\n\nfrom pandas import (\n CategoricalIndex,\n Index,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize("na_value", [None, np.nan])\ndef test_difference_with_na(na_value):\n # GH 57318\n ci = CategoricalIndex(["a", "b", "c", None])\n other = Index(["c", na_value])\n result = ci.difference(other)\n expected = CategoricalIndex(["a", "b"], categories=["a", "b", "c"])\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\test_setops.py | test_setops.py | Python | 462 | 0.95 | 0.055556 | 0.066667 | awesome-app | 275 | 2024-10-02T10:32:14.678916 | Apache-2.0 | true | aa7a13ed1e8d023687197c176d376fbf |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_append.cpython-313.pyc | test_append.cpython-313.pyc | Other | 4,222 | 0.8 | 0 | 0 | awesome-app | 893 | 2024-06-19T19:58:59.360917 | Apache-2.0 | true | 0f63730b1babf9da69480c0a8ac166f4 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_astype.cpython-313.pyc | test_astype.cpython-313.pyc | Other | 4,422 | 0.8 | 0 | 0 | node-utils | 593 | 2024-11-11T00:47:59.539768 | BSD-3-Clause | true | 0e5e65e161a9ae64f49a0175f002a7f8 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_category.cpython-313.pyc | test_category.cpython-313.pyc | Other | 21,691 | 0.8 | 0.004545 | 0 | vue-tools | 877 | 2024-09-10T02:30:54.790492 | Apache-2.0 | true | 2620d3e65834ce1112b637a0cd83774b |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_constructors.cpython-313.pyc | test_constructors.cpython-313.pyc | Other | 7,997 | 0.8 | 0 | 0 | react-lib | 344 | 2023-09-05T17:13:27.313492 | Apache-2.0 | true | 89a257b3609a635763e3a85633ec4c95 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_equals.cpython-313.pyc | test_equals.cpython-313.pyc | Other | 6,524 | 0.8 | 0.018182 | 0.039216 | vue-tools | 165 | 2023-08-22T19:49:15.457741 | MIT | true | 2e6c7f69a7dfbb057e77c31b12f730e7 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_fillna.cpython-313.pyc | test_fillna.cpython-313.pyc | Other | 3,023 | 0.8 | 0 | 0 | python-kit | 216 | 2024-09-25T18:16:05.188830 | Apache-2.0 | true | 5be677888733f7f4c47b0556fc11a93a |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_formats.cpython-313.pyc | test_formats.cpython-313.pyc | Other | 6,738 | 0.8 | 0.015625 | 0 | vue-tools | 25 | 2025-04-22T19:05:37.529945 | BSD-3-Clause | true | 5d3a66d3a0734f69aca07258b2242357 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_indexing.cpython-313.pyc | test_indexing.cpython-313.pyc | Other | 24,814 | 0.95 | 0.01487 | 0 | node-utils | 543 | 2024-03-13T02:44:14.916263 | MIT | true | f87803148f5c79fbedc1bb4f65af759f |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_map.cpython-313.pyc | test_map.cpython-313.pyc | Other | 7,269 | 0.8 | 0 | 0.036585 | python-kit | 664 | 2025-01-20T09:19:25.833543 | BSD-3-Clause | true | f23a827c7a6641d389082e78c6ef7d81 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_reindex.cpython-313.pyc | test_reindex.cpython-313.pyc | Other | 4,908 | 0.8 | 0 | 0 | vue-tools | 745 | 2025-03-21T05:27:31.765596 | BSD-3-Clause | true | 52364ac6bba353af40f793f893548ba6 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\test_setops.cpython-313.pyc | test_setops.cpython-313.pyc | Other | 1,009 | 0.7 | 0 | 0 | vue-tools | 406 | 2024-12-27T00:16:24.137572 | Apache-2.0 | true | 984f314f553e2981425abbd636f6343c |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\categorical\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 207 | 0.7 | 0 | 0 | react-lib | 47 | 2024-10-31T03:26:20.151908 | BSD-3-Clause | true | 30f8294bf6eb7ac4e4e8f01f2b400a8f |
import numpy as np\nimport pytest\n\nfrom pandas import (\n PeriodIndex,\n Series,\n date_range,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\nclass DropDuplicates:\n def test_drop_duplicates_metadata(self, idx):\n # GH#10115\n result = idx.drop_duplicates()\n tm.assert_index_equal(idx, result)\n assert idx.freq == result.freq\n\n idx_dup = idx.append(idx)\n result = idx_dup.drop_duplicates()\n\n expected = idx\n if not isinstance(idx, PeriodIndex):\n # freq is reset except for PeriodIndex\n assert idx_dup.freq is None\n assert result.freq is None\n expected = idx._with_freq(None)\n else:\n assert result.freq == expected.freq\n\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "keep, expected, index",\n [\n (\n "first",\n np.concatenate(([False] * 10, [True] * 5)),\n np.arange(0, 10, dtype=np.int64),\n ),\n (\n "last",\n np.concatenate(([True] * 5, [False] * 10)),\n np.arange(5, 15, dtype=np.int64),\n ),\n (\n False,\n np.concatenate(([True] * 5, [False] * 5, [True] * 5)),\n np.arange(5, 10, dtype=np.int64),\n ),\n ],\n )\n def test_drop_duplicates(self, keep, expected, index, idx):\n # to check Index/Series compat\n idx = idx.append(idx[:5])\n\n tm.assert_numpy_array_equal(idx.duplicated(keep=keep), expected)\n expected = idx[~expected]\n\n result = idx.drop_duplicates(keep=keep)\n tm.assert_index_equal(result, expected)\n\n result = Series(idx).drop_duplicates(keep=keep)\n expected = Series(expected, index=index)\n tm.assert_series_equal(result, expected)\n\n\nclass TestDropDuplicatesPeriodIndex(DropDuplicates):\n @pytest.fixture(params=["D", "3D", "h", "2h", "min", "2min", "s", "3s"])\n def freq(self, request):\n return request.param\n\n @pytest.fixture\n def idx(self, freq):\n return period_range("2011-01-01", periods=10, freq=freq, name="idx")\n\n\nclass TestDropDuplicatesDatetimeIndex(DropDuplicates):\n @pytest.fixture\n def idx(self, freq_sample):\n return date_range("2011-01-01", freq=freq_sample, periods=10, name="idx")\n\n\nclass TestDropDuplicatesTimedeltaIndex(DropDuplicates):\n @pytest.fixture\n def idx(self, freq_sample):\n return timedelta_range("1 day", periods=10, freq=freq_sample, name="idx")\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\test_drop_duplicates.py | test_drop_duplicates.py | Python | 2,600 | 0.95 | 0.134831 | 0.041667 | react-lib | 605 | 2025-01-25T18:41:20.304244 | MIT | true | 0d6e75e240697044f47cae5f5c9ca08b |
"""\nTests shared for DatetimeIndex/TimedeltaIndex/PeriodIndex\n"""\nfrom datetime import (\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n CategoricalIndex,\n DatetimeIndex,\n Index,\n PeriodIndex,\n TimedeltaIndex,\n date_range,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\nclass EqualsTests:\n def test_not_equals_numeric(self, index):\n assert not index.equals(Index(index.asi8))\n assert not index.equals(Index(index.asi8.astype("u8")))\n assert not index.equals(Index(index.asi8).astype("f8"))\n\n def test_equals(self, index):\n assert index.equals(index)\n assert index.equals(index.astype(object))\n assert index.equals(CategoricalIndex(index))\n assert index.equals(CategoricalIndex(index.astype(object)))\n\n def test_not_equals_non_arraylike(self, index):\n assert not index.equals(list(index))\n\n def test_not_equals_strings(self, index):\n other = Index([str(x) for x in index], dtype=object)\n assert not index.equals(other)\n assert not index.equals(CategoricalIndex(other))\n\n def test_not_equals_misc_strs(self, index):\n other = Index(list("abc"))\n assert not index.equals(other)\n\n\nclass TestPeriodIndexEquals(EqualsTests):\n @pytest.fixture\n def index(self):\n return period_range("2013-01-01", periods=5, freq="D")\n\n # TODO: de-duplicate with other test_equals2 methods\n @pytest.mark.parametrize("freq", ["D", "M"])\n def test_equals2(self, freq):\n # GH#13107\n idx = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq=freq)\n assert idx.equals(idx)\n assert idx.equals(idx.copy())\n assert idx.equals(idx.astype(object))\n assert idx.astype(object).equals(idx)\n assert idx.astype(object).equals(idx.astype(object))\n assert not idx.equals(list(idx))\n assert not idx.equals(pd.Series(idx))\n\n idx2 = PeriodIndex(["2011-01-01", "2011-01-02", "NaT"], freq="h")\n assert not idx.equals(idx2)\n assert not idx.equals(idx2.copy())\n assert not idx.equals(idx2.astype(object))\n assert not idx.astype(object).equals(idx2)\n assert not idx.equals(list(idx2))\n assert not idx.equals(pd.Series(idx2))\n\n # same internal, different tz\n idx3 = PeriodIndex._simple_new(\n idx._values._simple_new(idx._values.asi8, dtype=pd.PeriodDtype("h"))\n )\n tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)\n assert not idx.equals(idx3)\n assert not idx.equals(idx3.copy())\n assert not idx.equals(idx3.astype(object))\n assert not idx.astype(object).equals(idx3)\n assert not idx.equals(list(idx3))\n assert not idx.equals(pd.Series(idx3))\n\n\nclass TestDatetimeIndexEquals(EqualsTests):\n @pytest.fixture\n def index(self):\n return date_range("2013-01-01", periods=5)\n\n def test_equals2(self):\n # GH#13107\n idx = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"])\n assert idx.equals(idx)\n assert idx.equals(idx.copy())\n assert idx.equals(idx.astype(object))\n assert idx.astype(object).equals(idx)\n assert idx.astype(object).equals(idx.astype(object))\n assert not idx.equals(list(idx))\n assert not idx.equals(pd.Series(idx))\n\n idx2 = DatetimeIndex(["2011-01-01", "2011-01-02", "NaT"], tz="US/Pacific")\n assert not idx.equals(idx2)\n assert not idx.equals(idx2.copy())\n assert not idx.equals(idx2.astype(object))\n assert not idx.astype(object).equals(idx2)\n assert not idx.equals(list(idx2))\n assert not idx.equals(pd.Series(idx2))\n\n # same internal, different tz\n idx3 = DatetimeIndex(idx.asi8, tz="US/Pacific")\n tm.assert_numpy_array_equal(idx.asi8, idx3.asi8)\n assert not idx.equals(idx3)\n assert not idx.equals(idx3.copy())\n assert not idx.equals(idx3.astype(object))\n assert not idx.astype(object).equals(idx3)\n assert not idx.equals(list(idx3))\n assert not idx.equals(pd.Series(idx3))\n\n # check that we do not raise when comparing with OutOfBounds objects\n oob = Index([datetime(2500, 1, 1)] * 3, dtype=object)\n assert not idx.equals(oob)\n assert not idx2.equals(oob)\n assert not idx3.equals(oob)\n\n # check that we do not raise when comparing with OutOfBounds dt64\n oob2 = oob.map(np.datetime64)\n assert not idx.equals(oob2)\n assert not idx2.equals(oob2)\n assert not idx3.equals(oob2)\n\n @pytest.mark.parametrize("freq", ["B", "C"])\n def test_not_equals_bday(self, freq):\n rng = date_range("2009-01-01", "2010-01-01", freq=freq)\n assert not rng.equals(list(rng))\n\n\nclass TestTimedeltaIndexEquals(EqualsTests):\n @pytest.fixture\n def index(self):\n return timedelta_range("1 day", periods=10)\n\n def test_equals2(self):\n # GH#13107\n idx = TimedeltaIndex(["1 days", "2 days", "NaT"])\n assert idx.equals(idx)\n assert idx.equals(idx.copy())\n assert idx.equals(idx.astype(object))\n assert idx.astype(object).equals(idx)\n assert idx.astype(object).equals(idx.astype(object))\n assert not idx.equals(list(idx))\n assert not idx.equals(pd.Series(idx))\n\n idx2 = TimedeltaIndex(["2 days", "1 days", "NaT"])\n assert not idx.equals(idx2)\n assert not idx.equals(idx2.copy())\n assert not idx.equals(idx2.astype(object))\n assert not idx.astype(object).equals(idx2)\n assert not idx.astype(object).equals(idx2.astype(object))\n assert not idx.equals(list(idx2))\n assert not idx.equals(pd.Series(idx2))\n\n # Check that we dont raise OverflowError on comparisons outside the\n # implementation range GH#28532\n oob = Index([timedelta(days=10**6)] * 3, dtype=object)\n assert not idx.equals(oob)\n assert not idx2.equals(oob)\n\n oob2 = Index([np.timedelta64(x) for x in oob], dtype=object)\n assert (oob == oob2).all()\n assert not idx.equals(oob2)\n assert not idx2.equals(oob2)\n\n oob3 = oob.map(np.timedelta64)\n assert (oob3 == oob).all()\n assert not idx.equals(oob3)\n assert not idx2.equals(oob3)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\test_equals.py | test_equals.py | Python | 6,348 | 0.95 | 0.104972 | 0.065359 | react-lib | 599 | 2025-07-01T07:44:10.098352 | BSD-3-Clause | true | 373960eb476cfe4e5f7c82b5ce8de861 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n Index,\n)\nimport pandas._testing as tm\n\ndtlike_dtypes = [\n np.dtype("timedelta64[ns]"),\n np.dtype("datetime64[ns]"),\n pd.DatetimeTZDtype("ns", "Asia/Tokyo"),\n pd.PeriodDtype("ns"),\n]\n\n\n@pytest.mark.parametrize("ldtype", dtlike_dtypes)\n@pytest.mark.parametrize("rdtype", dtlike_dtypes)\ndef test_get_indexer_non_unique_wrong_dtype(ldtype, rdtype):\n vals = np.tile(3600 * 10**9 * np.arange(3, dtype=np.int64), 2)\n\n def construct(dtype):\n if dtype is dtlike_dtypes[-1]:\n # PeriodArray will try to cast ints to strings\n return DatetimeIndex(vals).astype(dtype)\n return Index(vals, dtype=dtype)\n\n left = construct(ldtype)\n right = construct(rdtype)\n\n result = left.get_indexer_non_unique(right)\n\n if ldtype is rdtype:\n ex1 = np.array([0, 3, 1, 4, 2, 5] * 2, dtype=np.intp)\n ex2 = np.array([], dtype=np.intp)\n tm.assert_numpy_array_equal(result[0], ex1)\n tm.assert_numpy_array_equal(result[1], ex2)\n\n else:\n no_matches = np.array([-1] * 6, dtype=np.intp)\n missing = np.arange(6, dtype=np.intp)\n tm.assert_numpy_array_equal(result[0], no_matches)\n tm.assert_numpy_array_equal(result[1], missing)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\test_indexing.py | test_indexing.py | Python | 1,310 | 0.95 | 0.111111 | 0.027778 | awesome-app | 756 | 2025-03-25T22:58:02.344914 | MIT | true | 4d15fc89612f0315ac334d2d6240cf27 |
from pandas import (\n Index,\n NaT,\n date_range,\n)\n\n\ndef test_is_monotonic_with_nat():\n # GH#31437\n # PeriodIndex.is_monotonic_increasing should behave analogously to DatetimeIndex,\n # in particular never be monotonic when we have NaT\n dti = date_range("2016-01-01", periods=3)\n pi = dti.to_period("D")\n tdi = Index(dti.view("timedelta64[ns]"))\n\n for obj in [pi, pi._engine, dti, dti._engine, tdi, tdi._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert obj.is_monotonic_increasing\n assert obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n dti1 = dti.insert(0, NaT)\n pi1 = dti1.to_period("D")\n tdi1 = Index(dti1.view("timedelta64[ns]"))\n\n for obj in [pi1, pi1._engine, dti1, dti1._engine, tdi1, tdi1._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert not obj.is_monotonic_increasing\n assert not obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n\n dti2 = dti.insert(3, NaT)\n pi2 = dti2.to_period("h")\n tdi2 = Index(dti2.view("timedelta64[ns]"))\n\n for obj in [pi2, pi2._engine, dti2, dti2._engine, tdi2, tdi2._engine]:\n if isinstance(obj, Index):\n # i.e. not Engines\n assert not obj.is_monotonic_increasing\n assert not obj.is_monotonic_increasing\n assert not obj.is_monotonic_decreasing\n assert obj.is_unique\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\test_is_monotonic.py | test_is_monotonic.py | Python | 1,522 | 0.95 | 0.152174 | 0.153846 | vue-tools | 318 | 2024-11-29T07:30:37.946314 | MIT | true | 1ebbb48965767b0691ba87b13c4fa430 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DatetimeIndex,\n NaT,\n PeriodIndex,\n TimedeltaIndex,\n)\nimport pandas._testing as tm\n\n\nclass NATests:\n def test_nat(self, index_without_na):\n empty_index = index_without_na[:0]\n\n index_with_na = index_without_na.copy(deep=True)\n index_with_na._data[1] = NaT\n\n assert empty_index._na_value is NaT\n assert index_with_na._na_value is NaT\n assert index_without_na._na_value is NaT\n\n idx = index_without_na\n assert idx._can_hold_na\n\n tm.assert_numpy_array_equal(idx._isnan, np.array([False, False]))\n assert idx.hasnans is False\n\n idx = index_with_na\n assert idx._can_hold_na\n\n tm.assert_numpy_array_equal(idx._isnan, np.array([False, True]))\n assert idx.hasnans is True\n\n\nclass TestDatetimeIndexNA(NATests):\n @pytest.fixture\n def index_without_na(self, tz_naive_fixture):\n tz = tz_naive_fixture\n return DatetimeIndex(["2011-01-01", "2011-01-02"], tz=tz)\n\n\nclass TestTimedeltaIndexNA(NATests):\n @pytest.fixture\n def index_without_na(self):\n return TimedeltaIndex(["1 days", "2 days"])\n\n\nclass TestPeriodIndexNA(NATests):\n @pytest.fixture\n def index_without_na(self):\n return PeriodIndex(["2011-01-01", "2011-01-02"], freq="D")\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\test_nat.py | test_nat.py | Python | 1,335 | 0.85 | 0.150943 | 0 | react-lib | 223 | 2024-04-25T22:15:57.500814 | GPL-3.0 | true | c11acec95c5f5ad0c4bd3dff46251742 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DatetimeIndex,\n Index,\n NaT,\n PeriodIndex,\n TimedeltaIndex,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\ndef check_freq_ascending(ordered, orig, ascending):\n """\n Check the expected freq on a PeriodIndex/DatetimeIndex/TimedeltaIndex\n when the original index is generated (or generate-able) with\n period_range/date_range/timedelta_range.\n """\n if isinstance(ordered, PeriodIndex):\n assert ordered.freq == orig.freq\n elif isinstance(ordered, (DatetimeIndex, TimedeltaIndex)):\n if ascending:\n assert ordered.freq.n == orig.freq.n\n else:\n assert ordered.freq.n == -1 * orig.freq.n\n\n\ndef check_freq_nonmonotonic(ordered, orig):\n """\n Check the expected freq on a PeriodIndex/DatetimeIndex/TimedeltaIndex\n when the original index is _not_ generated (or generate-able) with\n period_range/date_range//timedelta_range.\n """\n if isinstance(ordered, PeriodIndex):\n assert ordered.freq == orig.freq\n elif isinstance(ordered, (DatetimeIndex, TimedeltaIndex)):\n assert ordered.freq is None\n\n\nclass TestSortValues:\n @pytest.fixture(params=[DatetimeIndex, TimedeltaIndex, PeriodIndex])\n def non_monotonic_idx(self, request):\n if request.param is DatetimeIndex:\n return DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"])\n elif request.param is PeriodIndex:\n dti = DatetimeIndex(["2000-01-04", "2000-01-01", "2000-01-02"])\n return dti.to_period("D")\n else:\n return TimedeltaIndex(\n ["1 day 00:00:05", "1 day 00:00:01", "1 day 00:00:02"]\n )\n\n def test_argmin_argmax(self, non_monotonic_idx):\n assert non_monotonic_idx.argmin() == 1\n assert non_monotonic_idx.argmax() == 0\n\n def test_sort_values(self, non_monotonic_idx):\n idx = non_monotonic_idx\n ordered = idx.sort_values()\n assert ordered.is_monotonic_increasing\n ordered = idx.sort_values(ascending=False)\n assert ordered[::-1].is_monotonic_increasing\n\n ordered, dexer = idx.sort_values(return_indexer=True)\n assert ordered.is_monotonic_increasing\n tm.assert_numpy_array_equal(dexer, np.array([1, 2, 0], dtype=np.intp))\n\n ordered, dexer = idx.sort_values(return_indexer=True, ascending=False)\n assert ordered[::-1].is_monotonic_increasing\n tm.assert_numpy_array_equal(dexer, np.array([0, 2, 1], dtype=np.intp))\n\n def check_sort_values_with_freq(self, idx):\n ordered = idx.sort_values()\n tm.assert_index_equal(ordered, idx)\n check_freq_ascending(ordered, idx, True)\n\n ordered = idx.sort_values(ascending=False)\n expected = idx[::-1]\n tm.assert_index_equal(ordered, expected)\n check_freq_ascending(ordered, idx, False)\n\n ordered, indexer = idx.sort_values(return_indexer=True)\n tm.assert_index_equal(ordered, idx)\n tm.assert_numpy_array_equal(indexer, np.array([0, 1, 2], dtype=np.intp))\n check_freq_ascending(ordered, idx, True)\n\n ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)\n expected = idx[::-1]\n tm.assert_index_equal(ordered, expected)\n tm.assert_numpy_array_equal(indexer, np.array([2, 1, 0], dtype=np.intp))\n check_freq_ascending(ordered, idx, False)\n\n @pytest.mark.parametrize("freq", ["D", "h"])\n def test_sort_values_with_freq_timedeltaindex(self, freq):\n # GH#10295\n idx = timedelta_range(start=f"1{freq}", periods=3, freq=freq).rename("idx")\n\n self.check_sort_values_with_freq(idx)\n\n @pytest.mark.parametrize(\n "idx",\n [\n DatetimeIndex(\n ["2011-01-01", "2011-01-02", "2011-01-03"], freq="D", name="idx"\n ),\n DatetimeIndex(\n ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],\n freq="h",\n name="tzidx",\n tz="Asia/Tokyo",\n ),\n ],\n )\n def test_sort_values_with_freq_datetimeindex(self, idx):\n self.check_sort_values_with_freq(idx)\n\n @pytest.mark.parametrize("freq", ["D", "2D", "4D"])\n def test_sort_values_with_freq_periodindex(self, freq):\n # here with_freq refers to being period_range-like\n idx = PeriodIndex(\n ["2011-01-01", "2011-01-02", "2011-01-03"], freq=freq, name="idx"\n )\n self.check_sort_values_with_freq(idx)\n\n @pytest.mark.parametrize(\n "idx",\n [\n PeriodIndex(["2011", "2012", "2013"], name="pidx", freq="Y"),\n Index([2011, 2012, 2013], name="idx"), # for compatibility check\n ],\n )\n def test_sort_values_with_freq_periodindex2(self, idx):\n # here with_freq indicates this is period_range-like\n self.check_sort_values_with_freq(idx)\n\n def check_sort_values_without_freq(self, idx, expected):\n ordered = idx.sort_values(na_position="first")\n tm.assert_index_equal(ordered, expected)\n check_freq_nonmonotonic(ordered, idx)\n\n if not idx.isna().any():\n ordered = idx.sort_values()\n tm.assert_index_equal(ordered, expected)\n check_freq_nonmonotonic(ordered, idx)\n\n ordered = idx.sort_values(ascending=False)\n tm.assert_index_equal(ordered, expected[::-1])\n check_freq_nonmonotonic(ordered, idx)\n\n ordered, indexer = idx.sort_values(return_indexer=True, na_position="first")\n tm.assert_index_equal(ordered, expected)\n\n exp = np.array([0, 4, 3, 1, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(indexer, exp)\n check_freq_nonmonotonic(ordered, idx)\n\n if not idx.isna().any():\n ordered, indexer = idx.sort_values(return_indexer=True)\n tm.assert_index_equal(ordered, expected)\n\n exp = np.array([0, 4, 3, 1, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(indexer, exp)\n check_freq_nonmonotonic(ordered, idx)\n\n ordered, indexer = idx.sort_values(return_indexer=True, ascending=False)\n tm.assert_index_equal(ordered, expected[::-1])\n\n exp = np.array([2, 1, 3, 0, 4], dtype=np.intp)\n tm.assert_numpy_array_equal(indexer, exp)\n check_freq_nonmonotonic(ordered, idx)\n\n def test_sort_values_without_freq_timedeltaindex(self):\n # GH#10295\n\n idx = TimedeltaIndex(\n ["1 hour", "3 hour", "5 hour", "2 hour ", "1 hour"], name="idx1"\n )\n expected = TimedeltaIndex(\n ["1 hour", "1 hour", "2 hour", "3 hour", "5 hour"], name="idx1"\n )\n self.check_sort_values_without_freq(idx, expected)\n\n @pytest.mark.parametrize(\n "index_dates,expected_dates",\n [\n (\n ["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],\n ["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],\n ),\n (\n ["2011-01-01", "2011-01-03", "2011-01-05", "2011-01-02", "2011-01-01"],\n ["2011-01-01", "2011-01-01", "2011-01-02", "2011-01-03", "2011-01-05"],\n ),\n (\n [NaT, "2011-01-03", "2011-01-05", "2011-01-02", NaT],\n [NaT, NaT, "2011-01-02", "2011-01-03", "2011-01-05"],\n ),\n ],\n )\n def test_sort_values_without_freq_datetimeindex(\n self, index_dates, expected_dates, tz_naive_fixture\n ):\n tz = tz_naive_fixture\n\n # without freq\n idx = DatetimeIndex(index_dates, tz=tz, name="idx")\n expected = DatetimeIndex(expected_dates, tz=tz, name="idx")\n\n self.check_sort_values_without_freq(idx, expected)\n\n @pytest.mark.parametrize(\n "idx,expected",\n [\n (\n PeriodIndex(\n [\n "2011-01-01",\n "2011-01-03",\n "2011-01-05",\n "2011-01-02",\n "2011-01-01",\n ],\n freq="D",\n name="idx1",\n ),\n PeriodIndex(\n [\n "2011-01-01",\n "2011-01-01",\n "2011-01-02",\n "2011-01-03",\n "2011-01-05",\n ],\n freq="D",\n name="idx1",\n ),\n ),\n (\n PeriodIndex(\n [\n "2011-01-01",\n "2011-01-03",\n "2011-01-05",\n "2011-01-02",\n "2011-01-01",\n ],\n freq="D",\n name="idx2",\n ),\n PeriodIndex(\n [\n "2011-01-01",\n "2011-01-01",\n "2011-01-02",\n "2011-01-03",\n "2011-01-05",\n ],\n freq="D",\n name="idx2",\n ),\n ),\n (\n PeriodIndex(\n [NaT, "2011-01-03", "2011-01-05", "2011-01-02", NaT],\n freq="D",\n name="idx3",\n ),\n PeriodIndex(\n [NaT, NaT, "2011-01-02", "2011-01-03", "2011-01-05"],\n freq="D",\n name="idx3",\n ),\n ),\n (\n PeriodIndex(\n ["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="Y"\n ),\n PeriodIndex(\n ["2011", "2011", "2012", "2013", "2015"], name="pidx", freq="Y"\n ),\n ),\n (\n # For compatibility check\n Index([2011, 2013, 2015, 2012, 2011], name="idx"),\n Index([2011, 2011, 2012, 2013, 2015], name="idx"),\n ),\n ],\n )\n def test_sort_values_without_freq_periodindex(self, idx, expected):\n # here without_freq means not generateable by period_range\n self.check_sort_values_without_freq(idx, expected)\n\n def test_sort_values_without_freq_periodindex_nat(self):\n # doesn't quite fit into check_sort_values_without_freq\n idx = PeriodIndex(["2011", "2013", "NaT", "2011"], name="pidx", freq="D")\n expected = PeriodIndex(["NaT", "2011", "2011", "2013"], name="pidx", freq="D")\n\n ordered = idx.sort_values(na_position="first")\n tm.assert_index_equal(ordered, expected)\n check_freq_nonmonotonic(ordered, idx)\n\n ordered = idx.sort_values(ascending=False)\n tm.assert_index_equal(ordered, expected[::-1])\n check_freq_nonmonotonic(ordered, idx)\n\n\ndef test_order_stability_compat():\n # GH#35922. sort_values is stable both for normal and datetime-like Index\n pidx = PeriodIndex(["2011", "2013", "2015", "2012", "2011"], name="pidx", freq="Y")\n iidx = Index([2011, 2013, 2015, 2012, 2011], name="idx")\n ordered1, indexer1 = pidx.sort_values(return_indexer=True, ascending=False)\n ordered2, indexer2 = iidx.sort_values(return_indexer=True, ascending=False)\n tm.assert_numpy_array_equal(indexer1, indexer2)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\test_sort_values.py | test_sort_values.py | Python | 11,463 | 0.95 | 0.079365 | 0.032727 | awesome-app | 546 | 2025-03-15T13:19:11.302650 | GPL-3.0 | true | 743104fd19917917a47310fcfa7fdc39 |
import numpy as np\n\nfrom pandas import (\n DatetimeIndex,\n NaT,\n PeriodIndex,\n Series,\n TimedeltaIndex,\n date_range,\n period_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\nclass TestValueCounts:\n # GH#7735\n\n def test_value_counts_unique_datetimeindex(self, tz_naive_fixture):\n tz = tz_naive_fixture\n orig = date_range("2011-01-01 09:00", freq="h", periods=10, tz=tz)\n self._check_value_counts_with_repeats(orig)\n\n def test_value_counts_unique_timedeltaindex(self):\n orig = timedelta_range("1 days 09:00:00", freq="h", periods=10)\n self._check_value_counts_with_repeats(orig)\n\n def test_value_counts_unique_periodindex(self):\n orig = period_range("2011-01-01 09:00", freq="h", periods=10)\n self._check_value_counts_with_repeats(orig)\n\n def _check_value_counts_with_repeats(self, orig):\n # create repeated values, 'n'th element is repeated by n+1 times\n idx = type(orig)(\n np.repeat(orig._values, range(1, len(orig) + 1)), dtype=orig.dtype\n )\n\n exp_idx = orig[::-1]\n if not isinstance(exp_idx, PeriodIndex):\n exp_idx = exp_idx._with_freq(None)\n expected = Series(range(10, 0, -1), index=exp_idx, dtype="int64", name="count")\n\n for obj in [idx, Series(idx)]:\n tm.assert_series_equal(obj.value_counts(), expected)\n\n tm.assert_index_equal(idx.unique(), orig)\n\n def test_value_counts_unique_datetimeindex2(self, tz_naive_fixture):\n tz = tz_naive_fixture\n idx = DatetimeIndex(\n [\n "2013-01-01 09:00",\n "2013-01-01 09:00",\n "2013-01-01 09:00",\n "2013-01-01 08:00",\n "2013-01-01 08:00",\n NaT,\n ],\n tz=tz,\n )\n self._check_value_counts_dropna(idx)\n\n def test_value_counts_unique_timedeltaindex2(self):\n idx = TimedeltaIndex(\n [\n "1 days 09:00:00",\n "1 days 09:00:00",\n "1 days 09:00:00",\n "1 days 08:00:00",\n "1 days 08:00:00",\n NaT,\n ]\n )\n self._check_value_counts_dropna(idx)\n\n def test_value_counts_unique_periodindex2(self):\n idx = PeriodIndex(\n [\n "2013-01-01 09:00",\n "2013-01-01 09:00",\n "2013-01-01 09:00",\n "2013-01-01 08:00",\n "2013-01-01 08:00",\n NaT,\n ],\n freq="h",\n )\n self._check_value_counts_dropna(idx)\n\n def _check_value_counts_dropna(self, idx):\n exp_idx = idx[[2, 3]]\n expected = Series([3, 2], index=exp_idx, name="count")\n\n for obj in [idx, Series(idx)]:\n tm.assert_series_equal(obj.value_counts(), expected)\n\n exp_idx = idx[[2, 3, -1]]\n expected = Series([3, 2, 1], index=exp_idx, name="count")\n\n for obj in [idx, Series(idx)]:\n tm.assert_series_equal(obj.value_counts(dropna=False), expected)\n\n tm.assert_index_equal(idx.unique(), exp_idx)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\test_value_counts.py | test_value_counts.py | Python | 3,150 | 0.95 | 0.126214 | 0.023529 | react-lib | 683 | 2024-05-28T16:56:24.300580 | BSD-3-Clause | true | bd8dc1700eb6adfc379fb87619d4a5ce |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\__pycache__\test_drop_duplicates.cpython-313.pyc | test_drop_duplicates.cpython-313.pyc | Other | 4,717 | 0.8 | 0 | 0 | vue-tools | 66 | 2023-12-16T15:05:23.178553 | MIT | true | e9b86da302b2cd85a6beb619f6f46028 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\__pycache__\test_equals.cpython-313.pyc | test_equals.cpython-313.pyc | Other | 13,744 | 0.8 | 0.027027 | 0 | vue-tools | 602 | 2025-04-28T16:29:50.233856 | GPL-3.0 | true | a8b0b725edcb0346a624b174d361cf8c |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\__pycache__\test_indexing.cpython-313.pyc | test_indexing.cpython-313.pyc | Other | 2,588 | 0.8 | 0 | 0 | node-utils | 113 | 2023-07-18T14:56:31.561699 | BSD-3-Clause | true | 05e4dc363d2508bb60a956e2f46f2d6f |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\__pycache__\test_is_monotonic.cpython-313.pyc | test_is_monotonic.cpython-313.pyc | Other | 2,310 | 0.8 | 0 | 0 | awesome-app | 642 | 2025-03-31T11:09:25.954596 | MIT | true | 306753b6eb39a65631d168e6c3832e35 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\__pycache__\test_nat.cpython-313.pyc | test_nat.cpython-313.pyc | Other | 3,058 | 0.8 | 0 | 0 | react-lib | 97 | 2023-12-13T07:43:46.151089 | BSD-3-Clause | true | fc2b8d7237054b560b3ca3d7df690950 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\__pycache__\test_sort_values.cpython-313.pyc | test_sort_values.cpython-313.pyc | Other | 12,525 | 0.8 | 0 | 0 | vue-tools | 856 | 2023-09-04T08:35:50.847721 | BSD-3-Clause | true | 1aeb2b66f97a4ce777360236235e06bb |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\__pycache__\test_value_counts.cpython-313.pyc | test_value_counts.cpython-313.pyc | Other | 4,735 | 0.8 | 0 | 0 | python-kit | 206 | 2025-01-11T08:35:56.529180 | BSD-3-Clause | true | b6e4a203ec8b1f5ad038a8dbdfefda41 |
\n\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimelike_\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 209 | 0.7 | 0 | 0 | vue-tools | 540 | 2024-06-24T00:25:58.821141 | GPL-3.0 | true | 56d9b3830977f320b65f190a8b6e9ce0 |
# Arithmetic tests specific to DatetimeIndex are generally about `freq`\n# rentention or inference. Other arithmetic tests belong in\n# tests/arithmetic/test_datetime64.py\nimport pytest\n\nfrom pandas import (\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n date_range,\n timedelta_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDatetimeIndexArithmetic:\n def test_add_timedelta_preserves_freq(self):\n # GH#37295 should hold for any DTI with freq=None or Tick freq\n tz = "Canada/Eastern"\n dti = date_range(\n start=Timestamp("2019-03-26 00:00:00-0400", tz=tz),\n end=Timestamp("2020-10-17 00:00:00-0400", tz=tz),\n freq="D",\n )\n result = dti + Timedelta(days=1)\n assert result.freq == dti.freq\n\n def test_sub_datetime_preserves_freq(self, tz_naive_fixture):\n # GH#48818\n dti = date_range("2016-01-01", periods=12, tz=tz_naive_fixture)\n\n res = dti - dti[0]\n expected = timedelta_range("0 Days", "11 Days")\n tm.assert_index_equal(res, expected)\n assert res.freq == expected.freq\n\n @pytest.mark.xfail(\n reason="The inherited freq is incorrect bc dti.freq is incorrect "\n "https://github.com/pandas-dev/pandas/pull/48818/files#r982793461"\n )\n def test_sub_datetime_preserves_freq_across_dst(self):\n # GH#48818\n ts = Timestamp("2016-03-11", tz="US/Pacific")\n dti = date_range(ts, periods=4)\n\n res = dti - dti[0]\n expected = TimedeltaIndex(\n [\n Timedelta(days=0),\n Timedelta(days=1),\n Timedelta(days=2),\n Timedelta(days=2, hours=23),\n ]\n )\n tm.assert_index_equal(res, expected)\n assert res.freq == expected.freq\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_arithmetic.py | test_arithmetic.py | Python | 1,796 | 0.95 | 0.089286 | 0.122449 | vue-tools | 756 | 2024-04-24T11:05:26.962257 | BSD-3-Clause | true | 281d775c44f2f32a3c213a23dbc8e724 |
from __future__ import annotations\n\nfrom datetime import (\n datetime,\n timedelta,\n timezone,\n)\nfrom functools import partial\nfrom operator import attrgetter\n\nimport dateutil\nimport dateutil.tz\nfrom dateutil.tz import gettz\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs import (\n OutOfBoundsDatetime,\n astype_overflowsafe,\n timezones,\n)\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n Index,\n Timestamp,\n date_range,\n offsets,\n to_datetime,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import period_array\n\n\nclass TestDatetimeIndex:\n def test_closed_deprecated(self):\n # GH#52628\n msg = "The 'closed' keyword"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n DatetimeIndex([], closed=True)\n\n def test_normalize_deprecated(self):\n # GH#52628\n msg = "The 'normalize' keyword"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n DatetimeIndex([], normalize=True)\n\n def test_from_dt64_unsupported_unit(self):\n # GH#49292\n val = np.datetime64(1, "D")\n result = DatetimeIndex([val], tz="US/Pacific")\n\n expected = DatetimeIndex([val.astype("M8[s]")], tz="US/Pacific")\n tm.assert_index_equal(result, expected)\n\n def test_explicit_tz_none(self):\n # GH#48659\n dti = date_range("2016-01-01", periods=10, tz="UTC")\n\n msg = "Passed data is timezone-aware, incompatible with 'tz=None'"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(dti, tz=None)\n\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(np.array(dti), tz=None)\n\n msg = "Cannot pass both a timezone-aware dtype and tz=None"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex([], dtype="M8[ns, UTC]", tz=None)\n\n def test_freq_validation_with_nat(self):\n # GH#11587 make sure we get a useful error message when generate_range\n # raises\n msg = (\n "Inferred frequency None from passed values does not conform "\n "to passed frequency D"\n )\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex([pd.NaT, Timestamp("2011-01-01")], freq="D")\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex([pd.NaT, Timestamp("2011-01-01")._value], freq="D")\n\n # TODO: better place for tests shared by DTI/TDI?\n @pytest.mark.parametrize(\n "index",\n [\n date_range("2016-01-01", periods=5, tz="US/Pacific"),\n pd.timedelta_range("1 Day", periods=5),\n ],\n )\n def test_shallow_copy_inherits_array_freq(self, index):\n # If we pass a DTA/TDA to shallow_copy and dont specify a freq,\n # we should inherit the array's freq, not our own.\n array = index._data\n\n arr = array[[0, 3, 2, 4, 1]]\n assert arr.freq is None\n\n result = index._shallow_copy(arr)\n assert result.freq is None\n\n def test_categorical_preserves_tz(self):\n # GH#18664 retain tz when going DTI-->Categorical-->DTI\n dti = DatetimeIndex(\n [pd.NaT, "2015-01-01", "1999-04-06 15:14:13", "2015-01-01"], tz="US/Eastern"\n )\n\n for dtobj in [dti, dti._data]:\n # works for DatetimeIndex or DatetimeArray\n\n ci = pd.CategoricalIndex(dtobj)\n carr = pd.Categorical(dtobj)\n cser = pd.Series(ci)\n\n for obj in [ci, carr, cser]:\n result = DatetimeIndex(obj)\n tm.assert_index_equal(result, dti)\n\n def test_dti_with_period_data_raises(self):\n # GH#23675\n data = pd.PeriodIndex(["2016Q1", "2016Q2"], freq="Q")\n\n with pytest.raises(TypeError, match="PeriodDtype data is invalid"):\n DatetimeIndex(data)\n\n with pytest.raises(TypeError, match="PeriodDtype data is invalid"):\n to_datetime(data)\n\n with pytest.raises(TypeError, match="PeriodDtype data is invalid"):\n DatetimeIndex(period_array(data))\n\n with pytest.raises(TypeError, match="PeriodDtype data is invalid"):\n to_datetime(period_array(data))\n\n def test_dti_with_timedelta64_data_raises(self):\n # GH#23675 deprecated, enforrced in GH#29794\n data = np.array([0], dtype="m8[ns]")\n msg = r"timedelta64\[ns\] cannot be converted to datetime64"\n with pytest.raises(TypeError, match=msg):\n DatetimeIndex(data)\n\n with pytest.raises(TypeError, match=msg):\n to_datetime(data)\n\n with pytest.raises(TypeError, match=msg):\n DatetimeIndex(pd.TimedeltaIndex(data))\n\n with pytest.raises(TypeError, match=msg):\n to_datetime(pd.TimedeltaIndex(data))\n\n def test_constructor_from_sparse_array(self):\n # https://github.com/pandas-dev/pandas/issues/35843\n values = [\n Timestamp("2012-05-01T01:00:00.000000"),\n Timestamp("2016-05-01T01:00:00.000000"),\n ]\n arr = pd.arrays.SparseArray(values)\n result = Index(arr)\n assert type(result) is Index\n assert result.dtype == arr.dtype\n\n def test_construction_caching(self):\n df = pd.DataFrame(\n {\n "dt": date_range("20130101", periods=3),\n "dttz": date_range("20130101", periods=3, tz="US/Eastern"),\n "dt_with_null": [\n Timestamp("20130101"),\n pd.NaT,\n Timestamp("20130103"),\n ],\n "dtns": date_range("20130101", periods=3, freq="ns"),\n }\n )\n assert df.dttz.dtype.tz.zone == "US/Eastern"\n\n @pytest.mark.parametrize(\n "kwargs",\n [{"tz": "dtype.tz"}, {"dtype": "dtype"}, {"dtype": "dtype", "tz": "dtype.tz"}],\n )\n def test_construction_with_alt(self, kwargs, tz_aware_fixture):\n tz = tz_aware_fixture\n i = date_range("20130101", periods=5, freq="h", tz=tz)\n kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}\n result = DatetimeIndex(i, **kwargs)\n tm.assert_index_equal(i, result)\n\n @pytest.mark.parametrize(\n "kwargs",\n [{"tz": "dtype.tz"}, {"dtype": "dtype"}, {"dtype": "dtype", "tz": "dtype.tz"}],\n )\n def test_construction_with_alt_tz_localize(self, kwargs, tz_aware_fixture):\n tz = tz_aware_fixture\n i = date_range("20130101", periods=5, freq="h", tz=tz)\n i = i._with_freq(None)\n kwargs = {key: attrgetter(val)(i) for key, val in kwargs.items()}\n\n if "tz" in kwargs:\n result = DatetimeIndex(i.asi8, tz="UTC").tz_convert(kwargs["tz"])\n\n expected = DatetimeIndex(i, **kwargs)\n tm.assert_index_equal(result, expected)\n\n # localize into the provided tz\n i2 = DatetimeIndex(i.tz_localize(None).asi8, tz="UTC")\n expected = i.tz_localize(None).tz_localize("UTC")\n tm.assert_index_equal(i2, expected)\n\n # incompat tz/dtype\n msg = "cannot supply both a tz and a dtype with a tz"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(i.tz_localize(None).asi8, dtype=i.dtype, tz="US/Pacific")\n\n def test_construction_index_with_mixed_timezones(self):\n # gh-11488: no tz results in DatetimeIndex\n result = Index([Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx")\n exp = DatetimeIndex(\n [Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx"\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # same tz results in DatetimeIndex\n result = Index(\n [\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n Timestamp("2011-01-02 10:00", tz="Asia/Tokyo"),\n ],\n name="idx",\n )\n exp = DatetimeIndex(\n [Timestamp("2011-01-01 10:00"), Timestamp("2011-01-02 10:00")],\n tz="Asia/Tokyo",\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # same tz results in DatetimeIndex (DST)\n result = Index(\n [\n Timestamp("2011-01-01 10:00", tz="US/Eastern"),\n Timestamp("2011-08-01 10:00", tz="US/Eastern"),\n ],\n name="idx",\n )\n exp = DatetimeIndex(\n [Timestamp("2011-01-01 10:00"), Timestamp("2011-08-01 10:00")],\n tz="US/Eastern",\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # Different tz results in Index(dtype=object)\n result = Index(\n [\n Timestamp("2011-01-01 10:00"),\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n name="idx",\n )\n exp = Index(\n [\n Timestamp("2011-01-01 10:00"),\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n dtype="object",\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n result = Index(\n [\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n name="idx",\n )\n exp = Index(\n [\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n dtype="object",\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n msg = "DatetimeIndex has mixed timezones"\n msg_depr = "parsing datetimes with mixed time zones will raise an error"\n with pytest.raises(TypeError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=msg_depr):\n DatetimeIndex(["2013-11-02 22:00-05:00", "2013-11-03 22:00-06:00"])\n\n # length = 1\n result = Index([Timestamp("2011-01-01")], name="idx")\n exp = DatetimeIndex([Timestamp("2011-01-01")], name="idx")\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # length = 1 with tz\n result = Index([Timestamp("2011-01-01 10:00", tz="Asia/Tokyo")], name="idx")\n exp = DatetimeIndex(\n [Timestamp("2011-01-01 10:00")], tz="Asia/Tokyo", name="idx"\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n def test_construction_index_with_mixed_timezones_with_NaT(self):\n # see gh-11488\n result = Index(\n [pd.NaT, Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-02")],\n name="idx",\n )\n exp = DatetimeIndex(\n [pd.NaT, Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-02")],\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n # Same tz results in DatetimeIndex\n result = Index(\n [\n pd.NaT,\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n pd.NaT,\n Timestamp("2011-01-02 10:00", tz="Asia/Tokyo"),\n ],\n name="idx",\n )\n exp = DatetimeIndex(\n [\n pd.NaT,\n Timestamp("2011-01-01 10:00"),\n pd.NaT,\n Timestamp("2011-01-02 10:00"),\n ],\n tz="Asia/Tokyo",\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # same tz results in DatetimeIndex (DST)\n result = Index(\n [\n Timestamp("2011-01-01 10:00", tz="US/Eastern"),\n pd.NaT,\n Timestamp("2011-08-01 10:00", tz="US/Eastern"),\n ],\n name="idx",\n )\n exp = DatetimeIndex(\n [Timestamp("2011-01-01 10:00"), pd.NaT, Timestamp("2011-08-01 10:00")],\n tz="US/Eastern",\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is not None\n assert result.tz == exp.tz\n\n # different tz results in Index(dtype=object)\n result = Index(\n [\n pd.NaT,\n Timestamp("2011-01-01 10:00"),\n pd.NaT,\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n name="idx",\n )\n exp = Index(\n [\n pd.NaT,\n Timestamp("2011-01-01 10:00"),\n pd.NaT,\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n dtype="object",\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n result = Index(\n [\n pd.NaT,\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n pd.NaT,\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n name="idx",\n )\n exp = Index(\n [\n pd.NaT,\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n pd.NaT,\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n dtype="object",\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert not isinstance(result, DatetimeIndex)\n\n # all NaT\n result = Index([pd.NaT, pd.NaT], name="idx")\n exp = DatetimeIndex([pd.NaT, pd.NaT], name="idx")\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is None\n\n def test_construction_dti_with_mixed_timezones(self):\n # GH 11488 (not changed, added explicit tests)\n\n # no tz results in DatetimeIndex\n result = DatetimeIndex(\n [Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx"\n )\n exp = DatetimeIndex(\n [Timestamp("2011-01-01"), Timestamp("2011-01-02")], name="idx"\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n\n # same tz results in DatetimeIndex\n result = DatetimeIndex(\n [\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n Timestamp("2011-01-02 10:00", tz="Asia/Tokyo"),\n ],\n name="idx",\n )\n exp = DatetimeIndex(\n [Timestamp("2011-01-01 10:00"), Timestamp("2011-01-02 10:00")],\n tz="Asia/Tokyo",\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n\n # same tz results in DatetimeIndex (DST)\n result = DatetimeIndex(\n [\n Timestamp("2011-01-01 10:00", tz="US/Eastern"),\n Timestamp("2011-08-01 10:00", tz="US/Eastern"),\n ],\n name="idx",\n )\n exp = DatetimeIndex(\n [Timestamp("2011-01-01 10:00"), Timestamp("2011-08-01 10:00")],\n tz="US/Eastern",\n name="idx",\n )\n tm.assert_index_equal(result, exp, exact=True)\n assert isinstance(result, DatetimeIndex)\n\n # tz mismatch affecting to tz-aware raises TypeError/ValueError\n\n msg = "cannot be converted to datetime64"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(\n [\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n name="idx",\n )\n\n # pre-2.0 this raised bc of awareness mismatch. in 2.0 with a tz#\n # specified we behave as if this was called pointwise, so\n # the naive Timestamp is treated as a wall time.\n dti = DatetimeIndex(\n [\n Timestamp("2011-01-01 10:00"),\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n tz="Asia/Tokyo",\n name="idx",\n )\n expected = DatetimeIndex(\n [\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n Timestamp("2011-01-02 10:00", tz="US/Eastern").tz_convert("Asia/Tokyo"),\n ],\n tz="Asia/Tokyo",\n name="idx",\n )\n tm.assert_index_equal(dti, expected)\n\n # pre-2.0 mixed-tz scalars raised even if a tz/dtype was specified.\n # as of 2.0 we successfully return the requested tz/dtype\n dti = DatetimeIndex(\n [\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n tz="US/Eastern",\n name="idx",\n )\n expected = DatetimeIndex(\n [\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo").tz_convert("US/Eastern"),\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n tz="US/Eastern",\n name="idx",\n )\n tm.assert_index_equal(dti, expected)\n\n # same thing but pass dtype instead of tz\n dti = DatetimeIndex(\n [\n Timestamp("2011-01-01 10:00", tz="Asia/Tokyo"),\n Timestamp("2011-01-02 10:00", tz="US/Eastern"),\n ],\n dtype="M8[ns, US/Eastern]",\n name="idx",\n )\n tm.assert_index_equal(dti, expected)\n\n def test_construction_base_constructor(self):\n arr = [Timestamp("2011-01-01"), pd.NaT, Timestamp("2011-01-03")]\n tm.assert_index_equal(Index(arr), DatetimeIndex(arr))\n tm.assert_index_equal(Index(np.array(arr)), DatetimeIndex(np.array(arr)))\n\n arr = [np.nan, pd.NaT, Timestamp("2011-01-03")]\n tm.assert_index_equal(Index(arr), DatetimeIndex(arr))\n tm.assert_index_equal(Index(np.array(arr)), DatetimeIndex(np.array(arr)))\n\n def test_construction_outofbounds(self):\n # GH 13663\n dates = [\n datetime(3000, 1, 1),\n datetime(4000, 1, 1),\n datetime(5000, 1, 1),\n datetime(6000, 1, 1),\n ]\n exp = Index(dates, dtype=object)\n # coerces to object\n tm.assert_index_equal(Index(dates), exp)\n\n msg = "^Out of bounds nanosecond timestamp: 3000-01-01 00:00:00, at position 0$"\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n # can't create DatetimeIndex\n DatetimeIndex(dates)\n\n @pytest.mark.parametrize("data", [["1400-01-01"], [datetime(1400, 1, 1)]])\n def test_dti_date_out_of_range(self, data):\n # GH#1475\n msg = (\n "^Out of bounds nanosecond timestamp: "\n "1400-01-01( 00:00:00)?, at position 0$"\n )\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n DatetimeIndex(data)\n\n def test_construction_with_ndarray(self):\n # GH 5152\n dates = [datetime(2013, 10, 7), datetime(2013, 10, 8), datetime(2013, 10, 9)]\n data = DatetimeIndex(dates, freq=offsets.BDay()).values\n result = DatetimeIndex(data, freq=offsets.BDay())\n expected = DatetimeIndex(["2013-10-07", "2013-10-08", "2013-10-09"], freq="B")\n tm.assert_index_equal(result, expected)\n\n def test_integer_values_and_tz_interpreted_as_utc(self):\n # GH-24559\n val = np.datetime64("2000-01-01 00:00:00", "ns")\n values = np.array([val.view("i8")])\n\n result = DatetimeIndex(values).tz_localize("US/Central")\n\n expected = DatetimeIndex(["2000-01-01T00:00:00"], dtype="M8[ns, US/Central]")\n tm.assert_index_equal(result, expected)\n\n # but UTC is *not* deprecated.\n with tm.assert_produces_warning(None):\n result = DatetimeIndex(values, tz="UTC")\n expected = DatetimeIndex(["2000-01-01T00:00:00"], dtype="M8[ns, UTC]")\n tm.assert_index_equal(result, expected)\n\n def test_constructor_coverage(self):\n msg = r"DatetimeIndex\(\.\.\.\) must be called with a collection"\n with pytest.raises(TypeError, match=msg):\n DatetimeIndex("1/1/2000")\n\n # generator expression\n gen = (datetime(2000, 1, 1) + timedelta(i) for i in range(10))\n result = DatetimeIndex(gen)\n expected = DatetimeIndex(\n [datetime(2000, 1, 1) + timedelta(i) for i in range(10)]\n )\n tm.assert_index_equal(result, expected)\n\n # NumPy string array\n strings = np.array(["2000-01-01", "2000-01-02", "2000-01-03"])\n result = DatetimeIndex(strings)\n expected = DatetimeIndex(strings.astype("O"))\n tm.assert_index_equal(result, expected)\n\n from_ints = DatetimeIndex(expected.asi8)\n tm.assert_index_equal(from_ints, expected)\n\n # string with NaT\n strings = np.array(["2000-01-01", "2000-01-02", "NaT"])\n result = DatetimeIndex(strings)\n expected = DatetimeIndex(strings.astype("O"))\n tm.assert_index_equal(result, expected)\n\n from_ints = DatetimeIndex(expected.asi8)\n tm.assert_index_equal(from_ints, expected)\n\n # non-conforming\n msg = (\n "Inferred frequency None from passed values does not conform "\n "to passed frequency D"\n )\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-04"], freq="D")\n\n @pytest.mark.parametrize("freq", ["YS", "W-SUN"])\n def test_constructor_datetime64_tzformat(self, freq):\n # see GH#6572: ISO 8601 format results in stdlib timezone object\n idx = date_range(\n "2013-01-01T00:00:00-05:00", "2016-01-01T23:59:59-05:00", freq=freq\n )\n expected = date_range(\n "2013-01-01T00:00:00",\n "2016-01-01T23:59:59",\n freq=freq,\n tz=timezone(timedelta(minutes=-300)),\n )\n tm.assert_index_equal(idx, expected)\n # Unable to use `US/Eastern` because of DST\n expected_i8 = date_range(\n "2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="America/Lima"\n )\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n idx = date_range(\n "2013-01-01T00:00:00+09:00", "2016-01-01T23:59:59+09:00", freq=freq\n )\n expected = date_range(\n "2013-01-01T00:00:00",\n "2016-01-01T23:59:59",\n freq=freq,\n tz=timezone(timedelta(minutes=540)),\n )\n tm.assert_index_equal(idx, expected)\n expected_i8 = date_range(\n "2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="Asia/Tokyo"\n )\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n # Non ISO 8601 format results in dateutil.tz.tzoffset\n idx = date_range("2013/1/1 0:00:00-5:00", "2016/1/1 23:59:59-5:00", freq=freq)\n expected = date_range(\n "2013-01-01T00:00:00",\n "2016-01-01T23:59:59",\n freq=freq,\n tz=timezone(timedelta(minutes=-300)),\n )\n tm.assert_index_equal(idx, expected)\n # Unable to use `US/Eastern` because of DST\n expected_i8 = date_range(\n "2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="America/Lima"\n )\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n idx = date_range("2013/1/1 0:00:00+9:00", "2016/1/1 23:59:59+09:00", freq=freq)\n expected = date_range(\n "2013-01-01T00:00:00",\n "2016-01-01T23:59:59",\n freq=freq,\n tz=timezone(timedelta(minutes=540)),\n )\n tm.assert_index_equal(idx, expected)\n expected_i8 = date_range(\n "2013-01-01T00:00:00", "2016-01-01T23:59:59", freq=freq, tz="Asia/Tokyo"\n )\n tm.assert_numpy_array_equal(idx.asi8, expected_i8.asi8)\n\n def test_constructor_dtype(self):\n # passing a dtype with a tz should localize\n idx = DatetimeIndex(\n ["2013-01-01", "2013-01-02"], dtype="datetime64[ns, US/Eastern]"\n )\n expected = (\n DatetimeIndex(["2013-01-01", "2013-01-02"])\n .as_unit("ns")\n .tz_localize("US/Eastern")\n )\n tm.assert_index_equal(idx, expected)\n\n idx = DatetimeIndex(["2013-01-01", "2013-01-02"], tz="US/Eastern").as_unit("ns")\n tm.assert_index_equal(idx, expected)\n\n def test_constructor_dtype_tz_mismatch_raises(self):\n # if we already have a tz and its not the same, then raise\n idx = DatetimeIndex(\n ["2013-01-01", "2013-01-02"], dtype="datetime64[ns, US/Eastern]"\n )\n\n msg = (\n "cannot supply both a tz and a timezone-naive dtype "\n r"\(i\.e\. datetime64\[ns\]\)"\n )\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(idx, dtype="datetime64[ns]")\n\n # this is effectively trying to convert tz's\n msg = "data is already tz-aware US/Eastern, unable to set specified tz: CET"\n with pytest.raises(TypeError, match=msg):\n DatetimeIndex(idx, dtype="datetime64[ns, CET]")\n msg = "cannot supply both a tz and a dtype with a tz"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(idx, tz="CET", dtype="datetime64[ns, US/Eastern]")\n\n result = DatetimeIndex(idx, dtype="datetime64[ns, US/Eastern]")\n tm.assert_index_equal(idx, result)\n\n @pytest.mark.parametrize("dtype", [object, np.int32, np.int64])\n def test_constructor_invalid_dtype_raises(self, dtype):\n # GH 23986\n msg = "Unexpected value for 'dtype'"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex([1, 2], dtype=dtype)\n\n def test_000constructor_resolution(self):\n # 2252\n t1 = Timestamp((1352934390 * 1000000000) + 1000000 + 1000 + 1)\n idx = DatetimeIndex([t1])\n\n assert idx.nanosecond[0] == t1.nanosecond\n\n def test_disallow_setting_tz(self):\n # GH 3746\n dti = DatetimeIndex(["2010"], tz="UTC")\n msg = "Cannot directly set timezone"\n with pytest.raises(AttributeError, match=msg):\n dti.tz = pytz.timezone("US/Pacific")\n\n @pytest.mark.parametrize(\n "tz",\n [\n None,\n "America/Los_Angeles",\n pytz.timezone("America/Los_Angeles"),\n Timestamp("2000", tz="America/Los_Angeles").tz,\n ],\n )\n def test_constructor_start_end_with_tz(self, tz):\n # GH 18595\n start = Timestamp("2013-01-01 06:00:00", tz="America/Los_Angeles")\n end = Timestamp("2013-01-02 06:00:00", tz="America/Los_Angeles")\n result = date_range(freq="D", start=start, end=end, tz=tz)\n expected = DatetimeIndex(\n ["2013-01-01 06:00:00", "2013-01-02 06:00:00"],\n dtype="M8[ns, America/Los_Angeles]",\n freq="D",\n )\n tm.assert_index_equal(result, expected)\n # Especially assert that the timezone is consistent for pytz\n assert pytz.timezone("America/Los_Angeles") is result.tz\n\n @pytest.mark.parametrize("tz", ["US/Pacific", "US/Eastern", "Asia/Tokyo"])\n def test_constructor_with_non_normalized_pytz(self, tz):\n # GH 18595\n non_norm_tz = Timestamp("2010", tz=tz).tz\n result = DatetimeIndex(["2010"], tz=non_norm_tz)\n assert pytz.timezone(tz) is result.tz\n\n def test_constructor_timestamp_near_dst(self):\n # GH 20854\n ts = [\n Timestamp("2016-10-30 03:00:00+0300", tz="Europe/Helsinki"),\n Timestamp("2016-10-30 03:00:00+0200", tz="Europe/Helsinki"),\n ]\n result = DatetimeIndex(ts)\n expected = DatetimeIndex([ts[0].to_pydatetime(), ts[1].to_pydatetime()])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("klass", [Index, DatetimeIndex])\n @pytest.mark.parametrize("box", [np.array, partial(np.array, dtype=object), list])\n @pytest.mark.parametrize(\n "tz, dtype",\n [("US/Pacific", "datetime64[ns, US/Pacific]"), (None, "datetime64[ns]")],\n )\n def test_constructor_with_int_tz(self, klass, box, tz, dtype):\n # GH 20997, 20964\n ts = Timestamp("2018-01-01", tz=tz).as_unit("ns")\n result = klass(box([ts._value]), dtype=dtype)\n expected = klass([ts])\n assert result == expected\n\n def test_construction_int_rountrip(self, tz_naive_fixture):\n # GH 12619, GH#24559\n tz = tz_naive_fixture\n\n result = 1293858000000000000\n expected = DatetimeIndex([result], tz=tz).asi8[0]\n assert result == expected\n\n def test_construction_from_replaced_timestamps_with_dst(self):\n # GH 18785\n index = date_range(\n Timestamp(2000, 12, 31),\n Timestamp(2005, 12, 31),\n freq="YE-DEC",\n tz="Australia/Melbourne",\n )\n result = DatetimeIndex([x.replace(month=6, day=1) for x in index])\n expected = DatetimeIndex(\n [\n "2000-06-01 00:00:00",\n "2001-06-01 00:00:00",\n "2002-06-01 00:00:00",\n "2003-06-01 00:00:00",\n "2004-06-01 00:00:00",\n "2005-06-01 00:00:00",\n ],\n tz="Australia/Melbourne",\n )\n tm.assert_index_equal(result, expected)\n\n def test_construction_with_tz_and_tz_aware_dti(self):\n # GH 23579\n dti = date_range("2016-01-01", periods=3, tz="US/Central")\n msg = "data is already tz-aware US/Central, unable to set specified tz"\n with pytest.raises(TypeError, match=msg):\n DatetimeIndex(dti, tz="Asia/Tokyo")\n\n def test_construction_with_nat_and_tzlocal(self):\n tz = dateutil.tz.tzlocal()\n result = DatetimeIndex(["2018", "NaT"], tz=tz)\n expected = DatetimeIndex([Timestamp("2018", tz=tz), pd.NaT])\n tm.assert_index_equal(result, expected)\n\n def test_constructor_with_ambiguous_keyword_arg(self):\n # GH 35297\n\n expected = DatetimeIndex(\n ["2020-11-01 01:00:00", "2020-11-02 01:00:00"],\n dtype="datetime64[ns, America/New_York]",\n freq="D",\n ambiguous=False,\n )\n\n # ambiguous keyword in start\n timezone = "America/New_York"\n start = Timestamp(year=2020, month=11, day=1, hour=1).tz_localize(\n timezone, ambiguous=False\n )\n result = date_range(start=start, periods=2, ambiguous=False)\n tm.assert_index_equal(result, expected)\n\n # ambiguous keyword in end\n timezone = "America/New_York"\n end = Timestamp(year=2020, month=11, day=2, hour=1).tz_localize(\n timezone, ambiguous=False\n )\n result = date_range(end=end, periods=2, ambiguous=False)\n tm.assert_index_equal(result, expected)\n\n def test_constructor_with_nonexistent_keyword_arg(self, warsaw):\n # GH 35297\n timezone = warsaw\n\n # nonexistent keyword in start\n start = Timestamp("2015-03-29 02:30:00").tz_localize(\n timezone, nonexistent="shift_forward"\n )\n result = date_range(start=start, periods=2, freq="h")\n expected = DatetimeIndex(\n [\n Timestamp("2015-03-29 03:00:00+02:00", tz=timezone),\n Timestamp("2015-03-29 04:00:00+02:00", tz=timezone),\n ]\n )\n\n tm.assert_index_equal(result, expected)\n\n # nonexistent keyword in end\n end = start\n result = date_range(end=end, periods=2, freq="h")\n expected = DatetimeIndex(\n [\n Timestamp("2015-03-29 01:00:00+01:00", tz=timezone),\n Timestamp("2015-03-29 03:00:00+02:00", tz=timezone),\n ]\n )\n\n tm.assert_index_equal(result, expected)\n\n def test_constructor_no_precision_raises(self):\n # GH-24753, GH-24739\n\n msg = "with no precision is not allowed"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(["2000"], dtype="datetime64")\n\n msg = "The 'datetime64' dtype has no unit. Please pass in"\n with pytest.raises(ValueError, match=msg):\n Index(["2000"], dtype="datetime64")\n\n def test_constructor_wrong_precision_raises(self):\n dti = DatetimeIndex(["2000"], dtype="datetime64[us]")\n assert dti.dtype == "M8[us]"\n assert dti[0] == Timestamp(2000, 1, 1)\n\n def test_index_constructor_with_numpy_object_array_and_timestamp_tz_with_nan(self):\n # GH 27011\n result = Index(np.array([Timestamp("2019", tz="UTC"), np.nan], dtype=object))\n expected = DatetimeIndex([Timestamp("2019", tz="UTC"), pd.NaT])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])\n def test_dti_from_tzaware_datetime(self, tz):\n d = [datetime(2012, 8, 19, tzinfo=tz)]\n\n index = DatetimeIndex(d)\n assert timezones.tz_compare(index.tz, tz)\n\n @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])\n def test_dti_tz_constructors(self, tzstr):\n """Test different DatetimeIndex constructions with timezone\n Follow-up of GH#4229\n """\n arr = ["11/10/2005 08:00:00", "11/10/2005 09:00:00"]\n\n idx1 = to_datetime(arr).tz_localize(tzstr)\n idx2 = date_range(start="2005-11-10 08:00:00", freq="h", periods=2, tz=tzstr)\n idx2 = idx2._with_freq(None) # the others all have freq=None\n idx3 = DatetimeIndex(arr, tz=tzstr)\n idx4 = DatetimeIndex(np.array(arr), tz=tzstr)\n\n for other in [idx2, idx3, idx4]:\n tm.assert_index_equal(idx1, other)\n\n def test_dti_construction_idempotent(self, unit):\n rng = date_range(\n "03/12/2012 00:00", periods=10, freq="W-FRI", tz="US/Eastern", unit=unit\n )\n rng2 = DatetimeIndex(data=rng, tz="US/Eastern")\n tm.assert_index_equal(rng, rng2)\n\n @pytest.mark.parametrize("prefix", ["", "dateutil/"])\n def test_dti_constructor_static_tzinfo(self, prefix):\n # it works!\n index = DatetimeIndex([datetime(2012, 1, 1)], tz=prefix + "EST")\n index.hour\n index[0]\n\n @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])\n def test_dti_convert_datetime_list(self, tzstr):\n dr = date_range("2012-06-02", periods=10, tz=tzstr, name="foo")\n dr2 = DatetimeIndex(list(dr), name="foo", freq="D")\n tm.assert_index_equal(dr, dr2)\n\n @pytest.mark.parametrize(\n "tz",\n [\n pytz.timezone("US/Eastern"),\n gettz("US/Eastern"),\n ],\n )\n @pytest.mark.parametrize("use_str", [True, False])\n @pytest.mark.parametrize("box_cls", [Timestamp, DatetimeIndex])\n def test_dti_ambiguous_matches_timestamp(self, tz, use_str, box_cls, request):\n # GH#47471 check that we get the same raising behavior in the DTI\n # constructor and Timestamp constructor\n dtstr = "2013-11-03 01:59:59.999999"\n item = dtstr\n if not use_str:\n item = Timestamp(dtstr).to_pydatetime()\n if box_cls is not Timestamp:\n item = [item]\n\n if not use_str and isinstance(tz, dateutil.tz.tzfile):\n # FIXME: The Timestamp constructor here behaves differently than all\n # the other cases bc with dateutil/zoneinfo tzinfos we implicitly\n # get fold=0. Having this raise is not important, but having the\n # behavior be consistent across cases is.\n mark = pytest.mark.xfail(reason="We implicitly get fold=0.")\n request.applymarker(mark)\n\n with pytest.raises(pytz.AmbiguousTimeError, match=dtstr):\n box_cls(item, tz=tz)\n\n @pytest.mark.parametrize("tz", [None, "UTC", "US/Pacific"])\n def test_dti_constructor_with_non_nano_dtype(self, tz):\n # GH#55756, GH#54620\n ts = Timestamp("2999-01-01")\n dtype = "M8[us]"\n if tz is not None:\n dtype = f"M8[us, {tz}]"\n vals = [ts, "2999-01-02 03:04:05.678910", 2500]\n result = DatetimeIndex(vals, dtype=dtype)\n # The 2500 is interpreted as microseconds, consistent with what\n # we would get if we created DatetimeIndexes from vals[:2] and vals[2:]\n # and concated the results.\n pointwise = [\n vals[0].tz_localize(tz),\n Timestamp(vals[1], tz=tz),\n to_datetime(vals[2], unit="us", utc=True).tz_convert(tz),\n ]\n exp_vals = [x.as_unit("us").asm8 for x in pointwise]\n exp_arr = np.array(exp_vals, dtype="M8[us]")\n expected = DatetimeIndex(exp_arr, dtype="M8[us]")\n if tz is not None:\n expected = expected.tz_localize("UTC").tz_convert(tz)\n tm.assert_index_equal(result, expected)\n\n result2 = DatetimeIndex(np.array(vals, dtype=object), dtype=dtype)\n tm.assert_index_equal(result2, expected)\n\n def test_dti_constructor_with_non_nano_now_today(self):\n # GH#55756\n now = Timestamp.now()\n today = Timestamp.today()\n result = DatetimeIndex(["now", "today"], dtype="M8[s]")\n assert result.dtype == "M8[s]"\n\n # result may not exactly match [now, today] so we'll test it up to a tolerance.\n # (it *may* match exactly due to rounding)\n tolerance = pd.Timedelta(microseconds=1)\n\n diff0 = result[0] - now.as_unit("s")\n assert diff0 >= pd.Timedelta(0)\n assert diff0 < tolerance\n\n diff1 = result[1] - today.as_unit("s")\n assert diff1 >= pd.Timedelta(0)\n assert diff1 < tolerance\n\n def test_dti_constructor_object_float_matches_float_dtype(self):\n # GH#55780\n arr = np.array([0, np.nan], dtype=np.float64)\n arr2 = arr.astype(object)\n\n dti1 = DatetimeIndex(arr, tz="CET")\n dti2 = DatetimeIndex(arr2, tz="CET")\n tm.assert_index_equal(dti1, dti2)\n\n @pytest.mark.parametrize("dtype", ["M8[us]", "M8[us, US/Pacific]"])\n def test_dti_constructor_with_dtype_object_int_matches_int_dtype(self, dtype):\n # Going through the object path should match the non-object path\n\n vals1 = np.arange(5, dtype="i8") * 1000\n vals1[0] = pd.NaT.value\n\n vals2 = vals1.astype(np.float64)\n vals2[0] = np.nan\n\n vals3 = vals1.astype(object)\n # change lib.infer_dtype(vals3) from "integer" so we go through\n # array_to_datetime in _sequence_to_dt64\n vals3[0] = pd.NaT\n\n vals4 = vals2.astype(object)\n\n res1 = DatetimeIndex(vals1, dtype=dtype)\n res2 = DatetimeIndex(vals2, dtype=dtype)\n res3 = DatetimeIndex(vals3, dtype=dtype)\n res4 = DatetimeIndex(vals4, dtype=dtype)\n\n expected = DatetimeIndex(vals1.view("M8[us]"))\n if res1.tz is not None:\n expected = expected.tz_localize("UTC").tz_convert(res1.tz)\n tm.assert_index_equal(res1, expected)\n tm.assert_index_equal(res2, expected)\n tm.assert_index_equal(res3, expected)\n tm.assert_index_equal(res4, expected)\n\n\nclass TestTimeSeries:\n def test_dti_constructor_preserve_dti_freq(self):\n rng = date_range("1/1/2000", "1/2/2000", freq="5min")\n\n rng2 = DatetimeIndex(rng)\n assert rng.freq == rng2.freq\n\n def test_explicit_none_freq(self):\n # Explicitly passing freq=None is respected\n rng = date_range("1/1/2000", "1/2/2000", freq="5min")\n\n result = DatetimeIndex(rng, freq=None)\n assert result.freq is None\n\n result = DatetimeIndex(rng._data, freq=None)\n assert result.freq is None\n\n def test_dti_constructor_small_int(self, any_int_numpy_dtype):\n # see gh-13721\n exp = DatetimeIndex(\n [\n "1970-01-01 00:00:00.00000000",\n "1970-01-01 00:00:00.00000001",\n "1970-01-01 00:00:00.00000002",\n ]\n )\n\n arr = np.array([0, 10, 20], dtype=any_int_numpy_dtype)\n tm.assert_index_equal(DatetimeIndex(arr), exp)\n\n def test_ctor_str_intraday(self):\n rng = DatetimeIndex(["1-1-2000 00:00:01"])\n assert rng[0].second == 1\n\n def test_index_cast_datetime64_other_units(self):\n arr = np.arange(0, 100, 10, dtype=np.int64).view("M8[D]")\n idx = Index(arr)\n\n assert (idx.values == astype_overflowsafe(arr, dtype=np.dtype("M8[ns]"))).all()\n\n def test_constructor_int64_nocopy(self):\n # GH#1624\n arr = np.arange(1000, dtype=np.int64)\n index = DatetimeIndex(arr)\n\n arr[50:100] = -1\n assert (index.asi8[50:100] == -1).all()\n\n arr = np.arange(1000, dtype=np.int64)\n index = DatetimeIndex(arr, copy=True)\n\n arr[50:100] = -1\n assert (index.asi8[50:100] != -1).all()\n\n @pytest.mark.parametrize(\n "freq",\n ["ME", "QE", "YE", "D", "B", "bh", "min", "s", "ms", "us", "h", "ns", "C"],\n )\n def test_from_freq_recreate_from_data(self, freq):\n org = date_range(start="2001/02/01 09:00", freq=freq, periods=1)\n idx = DatetimeIndex(org, freq=freq)\n tm.assert_index_equal(idx, org)\n\n org = date_range(\n start="2001/02/01 09:00", freq=freq, tz="US/Pacific", periods=1\n )\n idx = DatetimeIndex(org, freq=freq, tz="US/Pacific")\n tm.assert_index_equal(idx, org)\n\n def test_datetimeindex_constructor_misc(self):\n arr = ["1/1/2005", "1/2/2005", "Jn 3, 2005", "2005-01-04"]\n msg = r"(\(')?Unknown datetime string format(:', 'Jn 3, 2005'\))?"\n with pytest.raises(ValueError, match=msg):\n DatetimeIndex(arr)\n\n arr = ["1/1/2005", "1/2/2005", "1/3/2005", "2005-01-04"]\n idx1 = DatetimeIndex(arr)\n\n arr = [datetime(2005, 1, 1), "1/2/2005", "1/3/2005", "2005-01-04"]\n idx2 = DatetimeIndex(arr)\n\n arr = [Timestamp(datetime(2005, 1, 1)), "1/2/2005", "1/3/2005", "2005-01-04"]\n idx3 = DatetimeIndex(arr)\n\n arr = np.array(["1/1/2005", "1/2/2005", "1/3/2005", "2005-01-04"], dtype="O")\n idx4 = DatetimeIndex(arr)\n\n idx5 = DatetimeIndex(["12/05/2007", "25/01/2008"], dayfirst=True)\n idx6 = DatetimeIndex(\n ["2007/05/12", "2008/01/25"], dayfirst=False, yearfirst=True\n )\n tm.assert_index_equal(idx5, idx6)\n\n for other in [idx2, idx3, idx4]:\n assert (idx1.values == other.values).all()\n\n def test_dti_constructor_object_dtype_dayfirst_yearfirst_with_tz(self):\n # GH#55813\n val = "5/10/16"\n\n dfirst = Timestamp(2016, 10, 5, tz="US/Pacific")\n yfirst = Timestamp(2005, 10, 16, tz="US/Pacific")\n\n result1 = DatetimeIndex([val], tz="US/Pacific", dayfirst=True)\n expected1 = DatetimeIndex([dfirst])\n tm.assert_index_equal(result1, expected1)\n\n result2 = DatetimeIndex([val], tz="US/Pacific", yearfirst=True)\n expected2 = DatetimeIndex([yfirst])\n tm.assert_index_equal(result2, expected2)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_constructors.py | test_constructors.py | Python | 43,922 | 0.95 | 0.072259 | 0.09372 | python-kit | 240 | 2025-07-08T16:05:56.280023 | BSD-3-Clause | true | e3baf21874f9ebc349c19bd20566f951 |
import datetime as dt\nfrom datetime import date\nimport re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_long\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n Timestamp,\n date_range,\n offsets,\n)\nimport pandas._testing as tm\n\n\nclass TestDatetimeIndex:\n def test_is_(self):\n dti = date_range(start="1/1/2005", end="12/1/2005", freq="ME")\n assert dti.is_(dti)\n assert dti.is_(dti.view())\n assert not dti.is_(dti.copy())\n\n def test_time_overflow_for_32bit_machines(self):\n # GH8943. On some machines NumPy defaults to np.int32 (for example,\n # 32-bit Linux machines). In the function _generate_regular_range\n # found in tseries/index.py, `periods` gets multiplied by `strides`\n # (which has value 1e9) and since the max value for np.int32 is ~2e9,\n # and since those machines won't promote np.int32 to np.int64, we get\n # overflow.\n periods = np_long(1000)\n\n idx1 = date_range(start="2000", periods=periods, freq="s")\n assert len(idx1) == periods\n\n idx2 = date_range(end="2000", periods=periods, freq="s")\n assert len(idx2) == periods\n\n def test_nat(self):\n assert DatetimeIndex([np.nan])[0] is pd.NaT\n\n def test_week_of_month_frequency(self):\n # GH 5348: "ValueError: Could not evaluate WOM-1SUN" shouldn't raise\n d1 = date(2002, 9, 1)\n d2 = date(2013, 10, 27)\n d3 = date(2012, 9, 30)\n idx1 = DatetimeIndex([d1, d2])\n idx2 = DatetimeIndex([d3])\n result_append = idx1.append(idx2)\n expected = DatetimeIndex([d1, d2, d3])\n tm.assert_index_equal(result_append, expected)\n result_union = idx1.union(idx2)\n expected = DatetimeIndex([d1, d3, d2])\n tm.assert_index_equal(result_union, expected)\n\n def test_append_nondatetimeindex(self):\n rng = date_range("1/1/2000", periods=10)\n idx = Index(["a", "b", "c", "d"])\n\n result = rng.append(idx)\n assert isinstance(result[0], Timestamp)\n\n def test_misc_coverage(self):\n rng = date_range("1/1/2000", periods=5)\n result = rng.groupby(rng.day)\n assert isinstance(next(iter(result.values()))[0], Timestamp)\n\n # TODO: belongs in frame groupby tests?\n def test_groupby_function_tuple_1677(self):\n df = DataFrame(\n np.random.default_rng(2).random(100),\n index=date_range("1/1/2000", periods=100),\n )\n monthly_group = df.groupby(lambda x: (x.year, x.month))\n\n result = monthly_group.mean()\n assert isinstance(result.index[0], tuple)\n\n def assert_index_parameters(self, index):\n assert index.freq == "40960ns"\n assert index.inferred_freq == "40960ns"\n\n def test_ns_index(self):\n nsamples = 400\n ns = int(1e9 / 24414)\n dtstart = np.datetime64("2012-09-20T00:00:00")\n\n dt = dtstart + np.arange(nsamples) * np.timedelta64(ns, "ns")\n freq = ns * offsets.Nano()\n index = DatetimeIndex(dt, freq=freq, name="time")\n self.assert_index_parameters(index)\n\n new_index = date_range(start=index[0], end=index[-1], freq=index.freq)\n self.assert_index_parameters(new_index)\n\n def test_asarray_tz_naive(self):\n # This shouldn't produce a warning.\n idx = date_range("2000", periods=2)\n # M8[ns] by default\n result = np.asarray(idx)\n\n expected = np.array(["2000-01-01", "2000-01-02"], dtype="M8[ns]")\n tm.assert_numpy_array_equal(result, expected)\n\n # optionally, object\n result = np.asarray(idx, dtype=object)\n\n expected = np.array([Timestamp("2000-01-01"), Timestamp("2000-01-02")])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_asarray_tz_aware(self):\n tz = "US/Central"\n idx = date_range("2000", periods=2, tz=tz)\n expected = np.array(["2000-01-01T06", "2000-01-02T06"], dtype="M8[ns]")\n result = np.asarray(idx, dtype="datetime64[ns]")\n\n tm.assert_numpy_array_equal(result, expected)\n\n # Old behavior with no warning\n result = np.asarray(idx, dtype="M8[ns]")\n\n tm.assert_numpy_array_equal(result, expected)\n\n # Future behavior with no warning\n expected = np.array(\n [Timestamp("2000-01-01", tz=tz), Timestamp("2000-01-02", tz=tz)]\n )\n result = np.asarray(idx, dtype=object)\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_CBH_deprecated(self):\n msg = "'CBH' is deprecated and will be removed in a future version."\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = date_range(\n dt.datetime(2022, 12, 11), dt.datetime(2022, 12, 13), freq="CBH"\n )\n result = DatetimeIndex(\n [\n "2022-12-12 09:00:00",\n "2022-12-12 10:00:00",\n "2022-12-12 11:00:00",\n "2022-12-12 12:00:00",\n "2022-12-12 13:00:00",\n "2022-12-12 14:00:00",\n "2022-12-12 15:00:00",\n "2022-12-12 16:00:00",\n ],\n dtype="datetime64[ns]",\n freq="cbh",\n )\n\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "freq_depr, expected_values, expected_freq",\n [\n (\n "AS-AUG",\n ["2021-08-01", "2022-08-01", "2023-08-01"],\n "YS-AUG",\n ),\n (\n "1BAS-MAY",\n ["2021-05-03", "2022-05-02", "2023-05-01"],\n "1BYS-MAY",\n ),\n ],\n )\n def test_AS_BAS_deprecated(self, freq_depr, expected_values, expected_freq):\n # GH#55479\n freq_msg = re.split("[0-9]*", freq_depr, maxsplit=1)[1]\n msg = f"'{freq_msg}' is deprecated and will be removed in a future version."\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = date_range(\n dt.datetime(2020, 12, 1), dt.datetime(2023, 12, 1), freq=freq_depr\n )\n result = DatetimeIndex(\n expected_values,\n dtype="datetime64[ns]",\n freq=expected_freq,\n )\n\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "freq, expected_values, freq_depr",\n [\n ("2BYE-MAR", ["2016-03-31"], "2BA-MAR"),\n ("2BYE-JUN", ["2016-06-30"], "2BY-JUN"),\n ("2BME", ["2016-02-29", "2016-04-29", "2016-06-30"], "2BM"),\n ("2BQE", ["2016-03-31"], "2BQ"),\n ("1BQE-MAR", ["2016-03-31", "2016-06-30"], "1BQ-MAR"),\n ],\n )\n def test_BM_BQ_BY_deprecated(self, freq, expected_values, freq_depr):\n # GH#52064\n msg = f"'{freq_depr[1:]}' is deprecated and will be removed "\n f"in a future version, please use '{freq[1:]}' instead."\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = date_range(start="2016-02-21", end="2016-08-21", freq=freq_depr)\n result = DatetimeIndex(\n data=expected_values,\n dtype="datetime64[ns]",\n freq=freq,\n )\n\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_datetime.py | test_datetime.py | Python | 7,305 | 0.95 | 0.083333 | 0.08427 | vue-tools | 865 | 2024-02-21T12:32:39.042244 | MIT | true | 50d39d08a967af9f647a78195a16a66f |
"""\ntest date_range, bdate_range construction from the convenience range functions\n"""\n\nfrom datetime import (\n datetime,\n time,\n timedelta,\n)\nimport re\n\nimport numpy as np\nimport pytest\nimport pytz\nfrom pytz import timezone\n\nfrom pandas._libs.tslibs import timezones\nfrom pandas._libs.tslibs.offsets import (\n BDay,\n CDay,\n DateOffset,\n MonthEnd,\n prefix_mapping,\n)\nfrom pandas.errors import OutOfBoundsDatetime\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Series,\n Timedelta,\n Timestamp,\n bdate_range,\n date_range,\n offsets,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays.datetimes import _generate_range as generate_range\nfrom pandas.tests.indexes.datetimes.test_timezones import (\n FixedOffset,\n fixed_off_no_name,\n)\n\nfrom pandas.tseries.holiday import USFederalHolidayCalendar\n\nSTART, END = datetime(2009, 1, 1), datetime(2010, 1, 1)\n\n\ndef _get_expected_range(\n begin_to_match,\n end_to_match,\n both_range,\n inclusive_endpoints,\n):\n """Helper to get expected range from a both inclusive range"""\n left_match = begin_to_match == both_range[0]\n right_match = end_to_match == both_range[-1]\n\n if inclusive_endpoints == "left" and right_match:\n expected_range = both_range[:-1]\n elif inclusive_endpoints == "right" and left_match:\n expected_range = both_range[1:]\n elif inclusive_endpoints == "neither" and left_match and right_match:\n expected_range = both_range[1:-1]\n elif inclusive_endpoints == "neither" and right_match:\n expected_range = both_range[:-1]\n elif inclusive_endpoints == "neither" and left_match:\n expected_range = both_range[1:]\n elif inclusive_endpoints == "both":\n expected_range = both_range[:]\n else:\n expected_range = both_range[:]\n\n return expected_range\n\n\nclass TestTimestampEquivDateRange:\n # Older tests in TestTimeSeries constructed their `stamp` objects\n # using `date_range` instead of the `Timestamp` constructor.\n # TestTimestampEquivDateRange checks that these are equivalent in the\n # pertinent cases.\n\n def test_date_range_timestamp_equiv(self):\n rng = date_range("20090415", "20090519", tz="US/Eastern")\n stamp = rng[0]\n\n ts = Timestamp("20090415", tz="US/Eastern")\n assert ts == stamp\n\n def test_date_range_timestamp_equiv_dateutil(self):\n rng = date_range("20090415", "20090519", tz="dateutil/US/Eastern")\n stamp = rng[0]\n\n ts = Timestamp("20090415", tz="dateutil/US/Eastern")\n assert ts == stamp\n\n def test_date_range_timestamp_equiv_explicit_pytz(self):\n rng = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))\n stamp = rng[0]\n\n ts = Timestamp("20090415", tz=pytz.timezone("US/Eastern"))\n assert ts == stamp\n\n @td.skip_if_windows\n def test_date_range_timestamp_equiv_explicit_dateutil(self):\n from pandas._libs.tslibs.timezones import dateutil_gettz as gettz\n\n rng = date_range("20090415", "20090519", tz=gettz("US/Eastern"))\n stamp = rng[0]\n\n ts = Timestamp("20090415", tz=gettz("US/Eastern"))\n assert ts == stamp\n\n def test_date_range_timestamp_equiv_from_datetime_instance(self):\n datetime_instance = datetime(2014, 3, 4)\n # build a timestamp with a frequency, since then it supports\n # addition/subtraction of integers\n timestamp_instance = date_range(datetime_instance, periods=1, freq="D")[0]\n\n ts = Timestamp(datetime_instance)\n assert ts == timestamp_instance\n\n def test_date_range_timestamp_equiv_preserve_frequency(self):\n timestamp_instance = date_range("2014-03-05", periods=1, freq="D")[0]\n ts = Timestamp("2014-03-05")\n\n assert timestamp_instance == ts\n\n\nclass TestDateRanges:\n def test_date_range_name(self):\n idx = date_range(start="2000-01-01", periods=1, freq="YE", name="TEST")\n assert idx.name == "TEST"\n\n def test_date_range_invalid_periods(self):\n msg = "periods must be a number, got foo"\n with pytest.raises(TypeError, match=msg):\n date_range(start="1/1/2000", periods="foo", freq="D")\n\n def test_date_range_fractional_period(self):\n msg = "Non-integer 'periods' in pd.date_range, pd.timedelta_range"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n rng = date_range("1/1/2000", periods=10.5)\n exp = date_range("1/1/2000", periods=10)\n tm.assert_index_equal(rng, exp)\n\n @pytest.mark.parametrize(\n "freq,freq_depr",\n [\n ("2ME", "2M"),\n ("2SME", "2SM"),\n ("2BQE", "2BQ"),\n ("2BYE", "2BY"),\n ],\n )\n def test_date_range_frequency_M_SM_BQ_BY_deprecated(self, freq, freq_depr):\n # GH#52064\n depr_msg = f"'{freq_depr[1:]}' is deprecated and will be removed "\n f"in a future version, please use '{freq[1:]}' instead."\n\n expected = date_range("1/1/2000", periods=4, freq=freq)\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n result = date_range("1/1/2000", periods=4, freq=freq_depr)\n tm.assert_index_equal(result, expected)\n\n def test_date_range_tuple_freq_raises(self):\n # GH#34703\n edate = datetime(2000, 1, 1)\n with pytest.raises(TypeError, match="pass as a string instead"):\n date_range(end=edate, freq=("D", 5), periods=20)\n\n @pytest.mark.parametrize("freq", ["ns", "us", "ms", "min", "s", "h", "D"])\n def test_date_range_edges(self, freq):\n # GH#13672\n td = Timedelta(f"1{freq}")\n ts = Timestamp("1970-01-01")\n\n idx = date_range(\n start=ts + td,\n end=ts + 4 * td,\n freq=freq,\n )\n exp = DatetimeIndex(\n [ts + n * td for n in range(1, 5)],\n dtype="M8[ns]",\n freq=freq,\n )\n tm.assert_index_equal(idx, exp)\n\n # start after end\n idx = date_range(\n start=ts + 4 * td,\n end=ts + td,\n freq=freq,\n )\n exp = DatetimeIndex([], dtype="M8[ns]", freq=freq)\n tm.assert_index_equal(idx, exp)\n\n # start matches end\n idx = date_range(\n start=ts + td,\n end=ts + td,\n freq=freq,\n )\n exp = DatetimeIndex([ts + td], dtype="M8[ns]", freq=freq)\n tm.assert_index_equal(idx, exp)\n\n def test_date_range_near_implementation_bound(self):\n # GH#???\n freq = Timedelta(1)\n\n with pytest.raises(OutOfBoundsDatetime, match="Cannot generate range with"):\n date_range(end=Timestamp.min, periods=2, freq=freq)\n\n def test_date_range_nat(self):\n # GH#11587\n msg = "Neither `start` nor `end` can be NaT"\n with pytest.raises(ValueError, match=msg):\n date_range(start="2016-01-01", end=pd.NaT, freq="D")\n with pytest.raises(ValueError, match=msg):\n date_range(start=pd.NaT, end="2016-01-01", freq="D")\n\n def test_date_range_multiplication_overflow(self):\n # GH#24255\n # check that overflows in calculating `addend = periods * stride`\n # are caught\n with tm.assert_produces_warning(None):\n # we should _not_ be seeing a overflow RuntimeWarning\n dti = date_range(start="1677-09-22", periods=213503, freq="D")\n\n assert dti[0] == Timestamp("1677-09-22")\n assert len(dti) == 213503\n\n msg = "Cannot generate range with"\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range("1969-05-04", periods=200000000, freq="30000D")\n\n def test_date_range_unsigned_overflow_handling(self):\n # GH#24255\n # case where `addend = periods * stride` overflows int64 bounds\n # but not uint64 bounds\n dti = date_range(start="1677-09-22", end="2262-04-11", freq="D")\n\n dti2 = date_range(start=dti[0], periods=len(dti), freq="D")\n assert dti2.equals(dti)\n\n dti3 = date_range(end=dti[-1], periods=len(dti), freq="D")\n assert dti3.equals(dti)\n\n def test_date_range_int64_overflow_non_recoverable(self):\n # GH#24255\n # case with start later than 1970-01-01, overflow int64 but not uint64\n msg = "Cannot generate range with"\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range(start="1970-02-01", periods=106752 * 24, freq="h")\n\n # case with end before 1970-01-01, overflow int64 but not uint64\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range(end="1969-11-14", periods=106752 * 24, freq="h")\n\n @pytest.mark.slow\n @pytest.mark.parametrize(\n "s_ts, e_ts", [("2262-02-23", "1969-11-14"), ("1970-02-01", "1677-10-22")]\n )\n def test_date_range_int64_overflow_stride_endpoint_different_signs(\n self, s_ts, e_ts\n ):\n # cases where stride * periods overflow int64 and stride/endpoint\n # have different signs\n start = Timestamp(s_ts)\n end = Timestamp(e_ts)\n\n expected = date_range(start=start, end=end, freq="-1h")\n assert expected[0] == start\n assert expected[-1] == end\n\n dti = date_range(end=end, periods=len(expected), freq="-1h")\n tm.assert_index_equal(dti, expected)\n\n def test_date_range_out_of_bounds(self):\n # GH#14187\n msg = "Cannot generate range"\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range("2016-01-01", periods=100000, freq="D")\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range(end="1763-10-12", periods=100000, freq="D")\n\n def test_date_range_gen_error(self):\n rng = date_range("1/1/2000 00:00", "1/1/2000 00:18", freq="5min")\n assert len(rng) == 4\n\n def test_date_range_normalize(self):\n snap = datetime.today()\n n = 50\n\n rng = date_range(snap, periods=n, normalize=False, freq="2D")\n\n offset = timedelta(2)\n expected = DatetimeIndex(\n [snap + i * offset for i in range(n)], dtype="M8[ns]", freq=offset\n )\n\n tm.assert_index_equal(rng, expected)\n\n rng = date_range("1/1/2000 08:15", periods=n, normalize=False, freq="B")\n the_time = time(8, 15)\n for val in rng:\n assert val.time() == the_time\n\n def test_date_range_ambiguous_arguments(self):\n # #2538\n start = datetime(2011, 1, 1, 5, 3, 40)\n end = datetime(2011, 1, 1, 8, 9, 40)\n\n msg = (\n "Of the four parameters: start, end, periods, and "\n "freq, exactly three must be specified"\n )\n with pytest.raises(ValueError, match=msg):\n date_range(start, end, periods=10, freq="s")\n\n def test_date_range_convenience_periods(self, unit):\n # GH 20808\n result = date_range("2018-04-24", "2018-04-27", periods=3, unit=unit)\n expected = DatetimeIndex(\n ["2018-04-24 00:00:00", "2018-04-25 12:00:00", "2018-04-27 00:00:00"],\n dtype=f"M8[{unit}]",\n freq=None,\n )\n\n tm.assert_index_equal(result, expected)\n\n # Test if spacing remains linear if tz changes to dst in range\n result = date_range(\n "2018-04-01 01:00:00",\n "2018-04-01 04:00:00",\n tz="Australia/Sydney",\n periods=3,\n unit=unit,\n )\n expected = DatetimeIndex(\n [\n Timestamp("2018-04-01 01:00:00+1100", tz="Australia/Sydney"),\n Timestamp("2018-04-01 02:00:00+1000", tz="Australia/Sydney"),\n Timestamp("2018-04-01 04:00:00+1000", tz="Australia/Sydney"),\n ]\n ).as_unit(unit)\n tm.assert_index_equal(result, expected)\n\n def test_date_range_index_comparison(self):\n rng = date_range("2011-01-01", periods=3, tz="US/Eastern")\n df = Series(rng).to_frame()\n arr = np.array([rng.to_list()]).T\n arr2 = np.array([rng]).T\n\n with pytest.raises(ValueError, match="Unable to coerce to Series"):\n rng == df\n\n with pytest.raises(ValueError, match="Unable to coerce to Series"):\n df == rng\n\n expected = DataFrame([True, True, True])\n\n results = df == arr2\n tm.assert_frame_equal(results, expected)\n\n expected = Series([True, True, True], name=0)\n\n results = df[0] == arr2[:, 0]\n tm.assert_series_equal(results, expected)\n\n expected = np.array(\n [[True, False, False], [False, True, False], [False, False, True]]\n )\n results = rng == arr\n tm.assert_numpy_array_equal(results, expected)\n\n @pytest.mark.parametrize(\n "start,end,result_tz",\n [\n ["20180101", "20180103", "US/Eastern"],\n [datetime(2018, 1, 1), datetime(2018, 1, 3), "US/Eastern"],\n [Timestamp("20180101"), Timestamp("20180103"), "US/Eastern"],\n [\n Timestamp("20180101", tz="US/Eastern"),\n Timestamp("20180103", tz="US/Eastern"),\n "US/Eastern",\n ],\n [\n Timestamp("20180101", tz="US/Eastern"),\n Timestamp("20180103", tz="US/Eastern"),\n None,\n ],\n ],\n )\n def test_date_range_linspacing_tz(self, start, end, result_tz):\n # GH 20983\n result = date_range(start, end, periods=3, tz=result_tz)\n expected = date_range("20180101", periods=3, freq="D", tz="US/Eastern")\n tm.assert_index_equal(result, expected)\n\n def test_date_range_timedelta(self):\n start = "2020-01-01"\n end = "2020-01-11"\n rng1 = date_range(start, end, freq="3D")\n rng2 = date_range(start, end, freq=timedelta(days=3))\n tm.assert_index_equal(rng1, rng2)\n\n def test_range_misspecified(self):\n # GH #1095\n msg = (\n "Of the four parameters: start, end, periods, and "\n "freq, exactly three must be specified"\n )\n\n with pytest.raises(ValueError, match=msg):\n date_range(start="1/1/2000")\n\n with pytest.raises(ValueError, match=msg):\n date_range(end="1/1/2000")\n\n with pytest.raises(ValueError, match=msg):\n date_range(periods=10)\n\n with pytest.raises(ValueError, match=msg):\n date_range(start="1/1/2000", freq="h")\n\n with pytest.raises(ValueError, match=msg):\n date_range(end="1/1/2000", freq="h")\n\n with pytest.raises(ValueError, match=msg):\n date_range(periods=10, freq="h")\n\n with pytest.raises(ValueError, match=msg):\n date_range()\n\n def test_compat_replace(self):\n # https://github.com/statsmodels/statsmodels/issues/3349\n # replace should take ints/longs for compat\n result = date_range(Timestamp("1960-04-01 00:00:00"), periods=76, freq="QS-JAN")\n assert len(result) == 76\n\n def test_catch_infinite_loop(self):\n offset = offsets.DateOffset(minute=5)\n # blow up, don't loop forever\n msg = "Offset <DateOffset: minute=5> did not increment date"\n with pytest.raises(ValueError, match=msg):\n date_range(datetime(2011, 11, 11), datetime(2011, 11, 12), freq=offset)\n\n def test_construct_over_dst(self, unit):\n # GH 20854\n pre_dst = Timestamp("2010-11-07 01:00:00").tz_localize(\n "US/Pacific", ambiguous=True\n )\n pst_dst = Timestamp("2010-11-07 01:00:00").tz_localize(\n "US/Pacific", ambiguous=False\n )\n expect_data = [\n Timestamp("2010-11-07 00:00:00", tz="US/Pacific"),\n pre_dst,\n pst_dst,\n ]\n expected = DatetimeIndex(expect_data, freq="h").as_unit(unit)\n result = date_range(\n start="2010-11-7", periods=3, freq="h", tz="US/Pacific", unit=unit\n )\n tm.assert_index_equal(result, expected)\n\n def test_construct_with_different_start_end_string_format(self, unit):\n # GH 12064\n result = date_range(\n "2013-01-01 00:00:00+09:00",\n "2013/01/01 02:00:00+09:00",\n freq="h",\n unit=unit,\n )\n expected = DatetimeIndex(\n [\n Timestamp("2013-01-01 00:00:00+09:00"),\n Timestamp("2013-01-01 01:00:00+09:00"),\n Timestamp("2013-01-01 02:00:00+09:00"),\n ],\n freq="h",\n ).as_unit(unit)\n tm.assert_index_equal(result, expected)\n\n def test_error_with_zero_monthends(self):\n msg = r"Offset <0 \* MonthEnds> did not increment date"\n with pytest.raises(ValueError, match=msg):\n date_range("1/1/2000", "1/1/2001", freq=MonthEnd(0))\n\n def test_range_bug(self, unit):\n # GH #770\n offset = DateOffset(months=3)\n result = date_range("2011-1-1", "2012-1-31", freq=offset, unit=unit)\n\n start = datetime(2011, 1, 1)\n expected = DatetimeIndex(\n [start + i * offset for i in range(5)], dtype=f"M8[{unit}]", freq=offset\n )\n tm.assert_index_equal(result, expected)\n\n def test_range_tz_pytz(self):\n # see gh-2906\n tz = timezone("US/Eastern")\n start = tz.localize(datetime(2011, 1, 1))\n end = tz.localize(datetime(2011, 1, 3))\n\n dr = date_range(start=start, periods=3)\n assert dr.tz.zone == tz.zone\n assert dr[0] == start\n assert dr[2] == end\n\n dr = date_range(end=end, periods=3)\n assert dr.tz.zone == tz.zone\n assert dr[0] == start\n assert dr[2] == end\n\n dr = date_range(start=start, end=end)\n assert dr.tz.zone == tz.zone\n assert dr[0] == start\n assert dr[2] == end\n\n @pytest.mark.parametrize(\n "start, end",\n [\n [\n Timestamp(datetime(2014, 3, 6), tz="US/Eastern"),\n Timestamp(datetime(2014, 3, 12), tz="US/Eastern"),\n ],\n [\n Timestamp(datetime(2013, 11, 1), tz="US/Eastern"),\n Timestamp(datetime(2013, 11, 6), tz="US/Eastern"),\n ],\n ],\n )\n def test_range_tz_dst_straddle_pytz(self, start, end):\n dr = date_range(start, end, freq="D")\n assert dr[0] == start\n assert dr[-1] == end\n assert np.all(dr.hour == 0)\n\n dr = date_range(start, end, freq="D", tz="US/Eastern")\n assert dr[0] == start\n assert dr[-1] == end\n assert np.all(dr.hour == 0)\n\n dr = date_range(\n start.replace(tzinfo=None),\n end.replace(tzinfo=None),\n freq="D",\n tz="US/Eastern",\n )\n assert dr[0] == start\n assert dr[-1] == end\n assert np.all(dr.hour == 0)\n\n def test_range_tz_dateutil(self):\n # see gh-2906\n\n # Use maybe_get_tz to fix filename in tz under dateutil.\n from pandas._libs.tslibs.timezones import maybe_get_tz\n\n tz = lambda x: maybe_get_tz("dateutil/" + x)\n\n start = datetime(2011, 1, 1, tzinfo=tz("US/Eastern"))\n end = datetime(2011, 1, 3, tzinfo=tz("US/Eastern"))\n\n dr = date_range(start=start, periods=3)\n assert dr.tz == tz("US/Eastern")\n assert dr[0] == start\n assert dr[2] == end\n\n dr = date_range(end=end, periods=3)\n assert dr.tz == tz("US/Eastern")\n assert dr[0] == start\n assert dr[2] == end\n\n dr = date_range(start=start, end=end)\n assert dr.tz == tz("US/Eastern")\n assert dr[0] == start\n assert dr[2] == end\n\n @pytest.mark.parametrize("freq", ["1D", "3D", "2ME", "7W", "3h", "YE"])\n @pytest.mark.parametrize("tz", [None, "US/Eastern"])\n def test_range_closed(self, freq, tz, inclusive_endpoints_fixture):\n # GH#12409, GH#12684\n\n begin = Timestamp("2011/1/1", tz=tz)\n end = Timestamp("2014/1/1", tz=tz)\n\n result_range = date_range(\n begin, end, inclusive=inclusive_endpoints_fixture, freq=freq\n )\n both_range = date_range(begin, end, inclusive="both", freq=freq)\n expected_range = _get_expected_range(\n begin, end, both_range, inclusive_endpoints_fixture\n )\n\n tm.assert_index_equal(expected_range, result_range)\n\n @pytest.mark.parametrize("freq", ["1D", "3D", "2ME", "7W", "3h", "YE"])\n def test_range_with_tz_closed_with_tz_aware_start_end(\n self, freq, inclusive_endpoints_fixture\n ):\n begin = Timestamp("2011/1/1")\n end = Timestamp("2014/1/1")\n begintz = Timestamp("2011/1/1", tz="US/Eastern")\n endtz = Timestamp("2014/1/1", tz="US/Eastern")\n\n result_range = date_range(\n begin,\n end,\n inclusive=inclusive_endpoints_fixture,\n freq=freq,\n tz="US/Eastern",\n )\n both_range = date_range(\n begin, end, inclusive="both", freq=freq, tz="US/Eastern"\n )\n expected_range = _get_expected_range(\n begintz,\n endtz,\n both_range,\n inclusive_endpoints_fixture,\n )\n\n tm.assert_index_equal(expected_range, result_range)\n\n def test_range_closed_boundary(self, inclusive_endpoints_fixture):\n # GH#11804\n right_boundary = date_range(\n "2015-09-12",\n "2015-12-01",\n freq="QS-MAR",\n inclusive=inclusive_endpoints_fixture,\n )\n left_boundary = date_range(\n "2015-09-01",\n "2015-09-12",\n freq="QS-MAR",\n inclusive=inclusive_endpoints_fixture,\n )\n both_boundary = date_range(\n "2015-09-01",\n "2015-12-01",\n freq="QS-MAR",\n inclusive=inclusive_endpoints_fixture,\n )\n neither_boundary = date_range(\n "2015-09-11",\n "2015-09-12",\n freq="QS-MAR",\n inclusive=inclusive_endpoints_fixture,\n )\n\n expected_right = both_boundary\n expected_left = both_boundary\n expected_both = both_boundary\n\n if inclusive_endpoints_fixture == "right":\n expected_left = both_boundary[1:]\n elif inclusive_endpoints_fixture == "left":\n expected_right = both_boundary[:-1]\n elif inclusive_endpoints_fixture == "both":\n expected_right = both_boundary[1:]\n expected_left = both_boundary[:-1]\n\n expected_neither = both_boundary[1:-1]\n\n tm.assert_index_equal(right_boundary, expected_right)\n tm.assert_index_equal(left_boundary, expected_left)\n tm.assert_index_equal(both_boundary, expected_both)\n tm.assert_index_equal(neither_boundary, expected_neither)\n\n def test_date_range_years_only(self, tz_naive_fixture):\n tz = tz_naive_fixture\n # GH#6961\n rng1 = date_range("2014", "2015", freq="ME", tz=tz)\n expected1 = date_range("2014-01-31", "2014-12-31", freq="ME", tz=tz)\n tm.assert_index_equal(rng1, expected1)\n\n rng2 = date_range("2014", "2015", freq="MS", tz=tz)\n expected2 = date_range("2014-01-01", "2015-01-01", freq="MS", tz=tz)\n tm.assert_index_equal(rng2, expected2)\n\n rng3 = date_range("2014", "2020", freq="YE", tz=tz)\n expected3 = date_range("2014-12-31", "2019-12-31", freq="YE", tz=tz)\n tm.assert_index_equal(rng3, expected3)\n\n rng4 = date_range("2014", "2020", freq="YS", tz=tz)\n expected4 = date_range("2014-01-01", "2020-01-01", freq="YS", tz=tz)\n tm.assert_index_equal(rng4, expected4)\n\n def test_freq_divides_end_in_nanos(self):\n # GH 10885\n result_1 = date_range("2005-01-12 10:00", "2005-01-12 16:00", freq="345min")\n result_2 = date_range("2005-01-13 10:00", "2005-01-13 16:00", freq="345min")\n expected_1 = DatetimeIndex(\n ["2005-01-12 10:00:00", "2005-01-12 15:45:00"],\n dtype="datetime64[ns]",\n freq="345min",\n tz=None,\n )\n expected_2 = DatetimeIndex(\n ["2005-01-13 10:00:00", "2005-01-13 15:45:00"],\n dtype="datetime64[ns]",\n freq="345min",\n tz=None,\n )\n tm.assert_index_equal(result_1, expected_1)\n tm.assert_index_equal(result_2, expected_2)\n\n def test_cached_range_bug(self):\n rng = date_range("2010-09-01 05:00:00", periods=50, freq=DateOffset(hours=6))\n assert len(rng) == 50\n assert rng[0] == datetime(2010, 9, 1, 5)\n\n def test_timezone_comparison_bug(self):\n # smoke test\n start = Timestamp("20130220 10:00", tz="US/Eastern")\n result = date_range(start, periods=2, tz="US/Eastern")\n assert len(result) == 2\n\n def test_timezone_comparison_assert(self):\n start = Timestamp("20130220 10:00", tz="US/Eastern")\n msg = "Inferred time zone not equal to passed time zone"\n with pytest.raises(AssertionError, match=msg):\n date_range(start, periods=2, tz="Europe/Berlin")\n\n def test_negative_non_tick_frequency_descending_dates(self, tz_aware_fixture):\n # GH 23270\n tz = tz_aware_fixture\n result = date_range(start="2011-06-01", end="2011-01-01", freq="-1MS", tz=tz)\n expected = date_range(end="2011-06-01", start="2011-01-01", freq="1MS", tz=tz)[\n ::-1\n ]\n tm.assert_index_equal(result, expected)\n\n def test_range_where_start_equal_end(self, inclusive_endpoints_fixture):\n # GH 43394\n start = "2021-09-02"\n end = "2021-09-02"\n result = date_range(\n start=start, end=end, freq="D", inclusive=inclusive_endpoints_fixture\n )\n\n both_range = date_range(start=start, end=end, freq="D", inclusive="both")\n if inclusive_endpoints_fixture == "neither":\n expected = both_range[1:-1]\n elif inclusive_endpoints_fixture in ("left", "right", "both"):\n expected = both_range[:]\n\n tm.assert_index_equal(result, expected)\n\n def test_freq_dateoffset_with_relateivedelta_nanos(self):\n # GH 46877\n freq = DateOffset(hours=10, days=57, nanoseconds=3)\n result = date_range(end="1970-01-01 00:00:00", periods=10, freq=freq, name="a")\n expected = DatetimeIndex(\n [\n "1968-08-02T05:59:59.999999973",\n "1968-09-28T15:59:59.999999976",\n "1968-11-25T01:59:59.999999979",\n "1969-01-21T11:59:59.999999982",\n "1969-03-19T21:59:59.999999985",\n "1969-05-16T07:59:59.999999988",\n "1969-07-12T17:59:59.999999991",\n "1969-09-08T03:59:59.999999994",\n "1969-11-04T13:59:59.999999997",\n "1970-01-01T00:00:00.000000000",\n ],\n name="a",\n )\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "freq,freq_depr",\n [\n ("h", "H"),\n ("2min", "2T"),\n ("1s", "1S"),\n ("2ms", "2L"),\n ("1us", "1U"),\n ("2ns", "2N"),\n ],\n )\n def test_frequencies_H_T_S_L_U_N_deprecated(self, freq, freq_depr):\n # GH#52536\n freq_msg = re.split("[0-9]*", freq, maxsplit=1)[1]\n freq_depr_msg = re.split("[0-9]*", freq_depr, maxsplit=1)[1]\n msg = (\n f"'{freq_depr_msg}' is deprecated and will be removed in a future version, "\n )\n f"please use '{freq_msg}' instead"\n\n expected = date_range("1/1/2000", periods=2, freq=freq)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = date_range("1/1/2000", periods=2, freq=freq_depr)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "freq,freq_depr",\n [\n ("200YE", "200A"),\n ("YE", "Y"),\n ("2YE-MAY", "2A-MAY"),\n ("YE-MAY", "Y-MAY"),\n ],\n )\n def test_frequencies_A_deprecated_Y_renamed(self, freq, freq_depr):\n # GH#9586, GH#54275\n freq_msg = re.split("[0-9]*", freq, maxsplit=1)[1]\n freq_depr_msg = re.split("[0-9]*", freq_depr, maxsplit=1)[1]\n msg = f"'{freq_depr_msg}' is deprecated and will be removed "\n f"in a future version, please use '{freq_msg}' instead."\n\n expected = date_range("1/1/2000", periods=2, freq=freq)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = date_range("1/1/2000", periods=2, freq=freq_depr)\n tm.assert_index_equal(result, expected)\n\n def test_to_offset_with_lowercase_deprecated_freq(self) -> None:\n # https://github.com/pandas-dev/pandas/issues/56847\n msg = (\n "'m' is deprecated and will be removed in a future version, please use "\n "'ME' instead."\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = date_range("2010-01-01", periods=2, freq="m")\n expected = DatetimeIndex(["2010-01-31", "2010-02-28"], freq="ME")\n tm.assert_index_equal(result, expected)\n\n def test_date_range_bday(self):\n sdate = datetime(1999, 12, 25)\n idx = date_range(start=sdate, freq="1B", periods=20)\n assert len(idx) == 20\n assert idx[0] == sdate + 0 * offsets.BDay()\n assert idx.freq == "B"\n\n\nclass TestDateRangeTZ:\n """Tests for date_range with timezones"""\n\n def test_hongkong_tz_convert(self):\n # GH#1673 smoke test\n dr = date_range("2012-01-01", "2012-01-10", freq="D", tz="Hongkong")\n\n # it works!\n dr.hour\n\n @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])\n def test_date_range_span_dst_transition(self, tzstr):\n # GH#1778\n\n # Standard -> Daylight Savings Time\n dr = date_range("03/06/2012 00:00", periods=200, freq="W-FRI", tz="US/Eastern")\n\n assert (dr.hour == 0).all()\n\n dr = date_range("2012-11-02", periods=10, tz=tzstr)\n result = dr.hour\n expected = pd.Index([0] * 10, dtype="int32")\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])\n def test_date_range_timezone_str_argument(self, tzstr):\n tz = timezones.maybe_get_tz(tzstr)\n result = date_range("1/1/2000", periods=10, tz=tzstr)\n expected = date_range("1/1/2000", periods=10, tz=tz)\n\n tm.assert_index_equal(result, expected)\n\n def test_date_range_with_fixed_tz(self):\n off = FixedOffset(420, "+07:00")\n start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)\n end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)\n rng = date_range(start=start, end=end)\n assert off == rng.tz\n\n rng2 = date_range(start, periods=len(rng), tz=off)\n tm.assert_index_equal(rng, rng2)\n\n rng3 = date_range("3/11/2012 05:00:00+07:00", "6/11/2012 05:00:00+07:00")\n assert (rng.values == rng3.values).all()\n\n def test_date_range_with_fixedoffset_noname(self):\n off = fixed_off_no_name\n start = datetime(2012, 3, 11, 5, 0, 0, tzinfo=off)\n end = datetime(2012, 6, 11, 5, 0, 0, tzinfo=off)\n rng = date_range(start=start, end=end)\n assert off == rng.tz\n\n idx = pd.Index([start, end])\n assert off == idx.tz\n\n @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])\n def test_date_range_with_tz(self, tzstr):\n stamp = Timestamp("3/11/2012 05:00", tz=tzstr)\n assert stamp.hour == 5\n\n rng = date_range("3/11/2012 04:00", periods=10, freq="h", tz=tzstr)\n\n assert stamp == rng[1]\n\n @pytest.mark.parametrize("tz", ["Europe/London", "dateutil/Europe/London"])\n def test_date_range_ambiguous_endpoint(self, tz):\n # construction with an ambiguous end-point\n # GH#11626\n\n with pytest.raises(pytz.AmbiguousTimeError, match="Cannot infer dst time"):\n date_range(\n "2013-10-26 23:00", "2013-10-27 01:00", tz="Europe/London", freq="h"\n )\n\n times = date_range(\n "2013-10-26 23:00", "2013-10-27 01:00", freq="h", tz=tz, ambiguous="infer"\n )\n assert times[0] == Timestamp("2013-10-26 23:00", tz=tz)\n assert times[-1] == Timestamp("2013-10-27 01:00:00+0000", tz=tz)\n\n @pytest.mark.parametrize(\n "tz, option, expected",\n [\n ["US/Pacific", "shift_forward", "2019-03-10 03:00"],\n ["dateutil/US/Pacific", "shift_forward", "2019-03-10 03:00"],\n ["US/Pacific", "shift_backward", "2019-03-10 01:00"],\n ["dateutil/US/Pacific", "shift_backward", "2019-03-10 01:00"],\n ["US/Pacific", timedelta(hours=1), "2019-03-10 03:00"],\n ],\n )\n def test_date_range_nonexistent_endpoint(self, tz, option, expected):\n # construction with an nonexistent end-point\n\n with pytest.raises(pytz.NonExistentTimeError, match="2019-03-10 02:00:00"):\n date_range(\n "2019-03-10 00:00", "2019-03-10 02:00", tz="US/Pacific", freq="h"\n )\n\n times = date_range(\n "2019-03-10 00:00", "2019-03-10 02:00", freq="h", tz=tz, nonexistent=option\n )\n assert times[-1] == Timestamp(expected, tz=tz)\n\n\nclass TestGenRangeGeneration:\n @pytest.mark.parametrize(\n "freqstr,offset",\n [\n ("B", BDay()),\n ("C", CDay()),\n ],\n )\n def test_generate(self, freqstr, offset):\n rng1 = list(generate_range(START, END, periods=None, offset=offset, unit="ns"))\n rng2 = list(generate_range(START, END, periods=None, offset=freqstr, unit="ns"))\n assert rng1 == rng2\n\n def test_1(self):\n rng = list(\n generate_range(\n start=datetime(2009, 3, 25),\n end=None,\n periods=2,\n offset=BDay(),\n unit="ns",\n )\n )\n expected = [datetime(2009, 3, 25), datetime(2009, 3, 26)]\n assert rng == expected\n\n def test_2(self):\n rng = list(\n generate_range(\n start=datetime(2008, 1, 1),\n end=datetime(2008, 1, 3),\n periods=None,\n offset=BDay(),\n unit="ns",\n )\n )\n expected = [datetime(2008, 1, 1), datetime(2008, 1, 2), datetime(2008, 1, 3)]\n assert rng == expected\n\n def test_3(self):\n rng = list(\n generate_range(\n start=datetime(2008, 1, 5),\n end=datetime(2008, 1, 6),\n periods=None,\n offset=BDay(),\n unit="ns",\n )\n )\n expected = []\n assert rng == expected\n\n def test_precision_finer_than_offset(self):\n # GH#9907\n result1 = date_range(\n start="2015-04-15 00:00:03", end="2016-04-22 00:00:00", freq="QE"\n )\n result2 = date_range(\n start="2015-04-15 00:00:03", end="2015-06-22 00:00:04", freq="W"\n )\n expected1_list = [\n "2015-06-30 00:00:03",\n "2015-09-30 00:00:03",\n "2015-12-31 00:00:03",\n "2016-03-31 00:00:03",\n ]\n expected2_list = [\n "2015-04-19 00:00:03",\n "2015-04-26 00:00:03",\n "2015-05-03 00:00:03",\n "2015-05-10 00:00:03",\n "2015-05-17 00:00:03",\n "2015-05-24 00:00:03",\n "2015-05-31 00:00:03",\n "2015-06-07 00:00:03",\n "2015-06-14 00:00:03",\n "2015-06-21 00:00:03",\n ]\n expected1 = DatetimeIndex(\n expected1_list, dtype="datetime64[ns]", freq="QE-DEC", tz=None\n )\n expected2 = DatetimeIndex(\n expected2_list, dtype="datetime64[ns]", freq="W-SUN", tz=None\n )\n tm.assert_index_equal(result1, expected1)\n tm.assert_index_equal(result2, expected2)\n\n dt1, dt2 = "2017-01-01", "2017-01-01"\n tz1, tz2 = "US/Eastern", "Europe/London"\n\n @pytest.mark.parametrize(\n "start,end",\n [\n (Timestamp(dt1, tz=tz1), Timestamp(dt2)),\n (Timestamp(dt1), Timestamp(dt2, tz=tz2)),\n (Timestamp(dt1, tz=tz1), Timestamp(dt2, tz=tz2)),\n (Timestamp(dt1, tz=tz2), Timestamp(dt2, tz=tz1)),\n ],\n )\n def test_mismatching_tz_raises_err(self, start, end):\n # issue 18488\n msg = "Start and end cannot both be tz-aware with different timezones"\n with pytest.raises(TypeError, match=msg):\n date_range(start, end)\n with pytest.raises(TypeError, match=msg):\n date_range(start, end, freq=BDay())\n\n\nclass TestBusinessDateRange:\n def test_constructor(self):\n bdate_range(START, END, freq=BDay())\n bdate_range(START, periods=20, freq=BDay())\n bdate_range(end=START, periods=20, freq=BDay())\n\n msg = "periods must be a number, got B"\n with pytest.raises(TypeError, match=msg):\n date_range("2011-1-1", "2012-1-1", "B")\n\n with pytest.raises(TypeError, match=msg):\n bdate_range("2011-1-1", "2012-1-1", "B")\n\n msg = "freq must be specified for bdate_range; use date_range instead"\n with pytest.raises(TypeError, match=msg):\n bdate_range(START, END, periods=10, freq=None)\n\n def test_misc(self):\n end = datetime(2009, 5, 13)\n dr = bdate_range(end=end, periods=20)\n firstDate = end - 19 * BDay()\n\n assert len(dr) == 20\n assert dr[0] == firstDate\n assert dr[-1] == end\n\n def test_date_parse_failure(self):\n badly_formed_date = "2007/100/1"\n\n msg = "Unknown datetime string format, unable to parse: 2007/100/1"\n with pytest.raises(ValueError, match=msg):\n Timestamp(badly_formed_date)\n\n with pytest.raises(ValueError, match=msg):\n bdate_range(start=badly_formed_date, periods=10)\n\n with pytest.raises(ValueError, match=msg):\n bdate_range(end=badly_formed_date, periods=10)\n\n with pytest.raises(ValueError, match=msg):\n bdate_range(badly_formed_date, badly_formed_date)\n\n def test_daterange_bug_456(self):\n # GH #456\n rng1 = bdate_range("12/5/2011", "12/5/2011")\n rng2 = bdate_range("12/2/2011", "12/5/2011")\n assert rng2._data.freq == BDay()\n\n result = rng1.union(rng2)\n assert isinstance(result, DatetimeIndex)\n\n @pytest.mark.parametrize("inclusive", ["left", "right", "neither", "both"])\n def test_bdays_and_open_boundaries(self, inclusive):\n # GH 6673\n start = "2018-07-21" # Saturday\n end = "2018-07-29" # Sunday\n result = date_range(start, end, freq="B", inclusive=inclusive)\n\n bday_start = "2018-07-23" # Monday\n bday_end = "2018-07-27" # Friday\n expected = date_range(bday_start, bday_end, freq="D")\n tm.assert_index_equal(result, expected)\n # Note: we do _not_ expect the freqs to match here\n\n def test_bday_near_overflow(self):\n # GH#24252 avoid doing unnecessary addition that _would_ overflow\n start = Timestamp.max.floor("D").to_pydatetime()\n rng = date_range(start, end=None, periods=1, freq="B")\n expected = DatetimeIndex([start], freq="B").as_unit("ns")\n tm.assert_index_equal(rng, expected)\n\n def test_bday_overflow_error(self):\n # GH#24252 check that we get OutOfBoundsDatetime and not OverflowError\n msg = "Out of bounds nanosecond timestamp"\n start = Timestamp.max.floor("D").to_pydatetime()\n with pytest.raises(OutOfBoundsDatetime, match=msg):\n date_range(start, periods=2, freq="B")\n\n\nclass TestCustomDateRange:\n def test_constructor(self):\n bdate_range(START, END, freq=CDay())\n bdate_range(START, periods=20, freq=CDay())\n bdate_range(end=START, periods=20, freq=CDay())\n\n msg = "periods must be a number, got C"\n with pytest.raises(TypeError, match=msg):\n date_range("2011-1-1", "2012-1-1", "C")\n\n with pytest.raises(TypeError, match=msg):\n bdate_range("2011-1-1", "2012-1-1", "C")\n\n def test_misc(self):\n end = datetime(2009, 5, 13)\n dr = bdate_range(end=end, periods=20, freq="C")\n firstDate = end - 19 * CDay()\n\n assert len(dr) == 20\n assert dr[0] == firstDate\n assert dr[-1] == end\n\n def test_daterange_bug_456(self):\n # GH #456\n rng1 = bdate_range("12/5/2011", "12/5/2011", freq="C")\n rng2 = bdate_range("12/2/2011", "12/5/2011", freq="C")\n assert rng2._data.freq == CDay()\n\n result = rng1.union(rng2)\n assert isinstance(result, DatetimeIndex)\n\n def test_cdaterange(self, unit):\n result = bdate_range("2013-05-01", periods=3, freq="C", unit=unit)\n expected = DatetimeIndex(\n ["2013-05-01", "2013-05-02", "2013-05-03"], dtype=f"M8[{unit}]", freq="C"\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n def test_cdaterange_weekmask(self, unit):\n result = bdate_range(\n "2013-05-01", periods=3, freq="C", weekmask="Sun Mon Tue Wed Thu", unit=unit\n )\n expected = DatetimeIndex(\n ["2013-05-01", "2013-05-02", "2013-05-05"],\n dtype=f"M8[{unit}]",\n freq=result.freq,\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n # raise with non-custom freq\n msg = (\n "a custom frequency string is required when holidays or "\n "weekmask are passed, got frequency B"\n )\n with pytest.raises(ValueError, match=msg):\n bdate_range("2013-05-01", periods=3, weekmask="Sun Mon Tue Wed Thu")\n\n def test_cdaterange_holidays(self, unit):\n result = bdate_range(\n "2013-05-01", periods=3, freq="C", holidays=["2013-05-01"], unit=unit\n )\n expected = DatetimeIndex(\n ["2013-05-02", "2013-05-03", "2013-05-06"],\n dtype=f"M8[{unit}]",\n freq=result.freq,\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n # raise with non-custom freq\n msg = (\n "a custom frequency string is required when holidays or "\n "weekmask are passed, got frequency B"\n )\n with pytest.raises(ValueError, match=msg):\n bdate_range("2013-05-01", periods=3, holidays=["2013-05-01"])\n\n def test_cdaterange_weekmask_and_holidays(self, unit):\n result = bdate_range(\n "2013-05-01",\n periods=3,\n freq="C",\n weekmask="Sun Mon Tue Wed Thu",\n holidays=["2013-05-01"],\n unit=unit,\n )\n expected = DatetimeIndex(\n ["2013-05-02", "2013-05-05", "2013-05-06"],\n dtype=f"M8[{unit}]",\n freq=result.freq,\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n def test_cdaterange_holidays_weekmask_requires_freqstr(self):\n # raise with non-custom freq\n msg = (\n "a custom frequency string is required when holidays or "\n "weekmask are passed, got frequency B"\n )\n with pytest.raises(ValueError, match=msg):\n bdate_range(\n "2013-05-01",\n periods=3,\n weekmask="Sun Mon Tue Wed Thu",\n holidays=["2013-05-01"],\n )\n\n @pytest.mark.parametrize(\n "freq", [freq for freq in prefix_mapping if freq.startswith("C")]\n )\n def test_all_custom_freq(self, freq):\n # should not raise\n bdate_range(\n START, END, freq=freq, weekmask="Mon Wed Fri", holidays=["2009-03-14"]\n )\n\n bad_freq = freq + "FOO"\n msg = f"invalid custom frequency string: {bad_freq}"\n with pytest.raises(ValueError, match=msg):\n bdate_range(START, END, freq=bad_freq)\n\n @pytest.mark.parametrize(\n "start_end",\n [\n ("2018-01-01T00:00:01.000Z", "2018-01-03T00:00:01.000Z"),\n ("2018-01-01T00:00:00.010Z", "2018-01-03T00:00:00.010Z"),\n ("2001-01-01T00:00:00.010Z", "2001-01-03T00:00:00.010Z"),\n ],\n )\n def test_range_with_millisecond_resolution(self, start_end):\n # https://github.com/pandas-dev/pandas/issues/24110\n start, end = start_end\n result = date_range(start=start, end=end, periods=2, inclusive="left")\n expected = DatetimeIndex([start], dtype="M8[ns, UTC]")\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "start,period,expected",\n [\n ("2022-07-23 00:00:00+02:00", 1, ["2022-07-25 00:00:00+02:00"]),\n ("2022-07-22 00:00:00+02:00", 1, ["2022-07-22 00:00:00+02:00"]),\n (\n "2022-07-22 00:00:00+02:00",\n 2,\n ["2022-07-22 00:00:00+02:00", "2022-07-25 00:00:00+02:00"],\n ),\n ],\n )\n def test_range_with_timezone_and_custombusinessday(self, start, period, expected):\n # GH49441\n result = date_range(start=start, periods=period, freq="C")\n expected = DatetimeIndex(expected).as_unit("ns")\n tm.assert_index_equal(result, expected)\n\n\nclass TestDateRangeNonNano:\n def test_date_range_reso_validation(self):\n msg = "'unit' must be one of 's', 'ms', 'us', 'ns'"\n with pytest.raises(ValueError, match=msg):\n date_range("2016-01-01", "2016-03-04", periods=3, unit="h")\n\n def test_date_range_freq_higher_than_reso(self):\n # freq being higher-resolution than reso is a problem\n msg = "Use a lower freq or a higher unit instead"\n with pytest.raises(ValueError, match=msg):\n # # TODO give a more useful or informative message?\n date_range("2016-01-01", "2016-01-02", freq="ns", unit="ms")\n\n def test_date_range_freq_matches_reso(self):\n # GH#49106 matching reso is OK\n dti = date_range("2016-01-01", "2016-01-01 00:00:01", freq="ms", unit="ms")\n rng = np.arange(1_451_606_400_000, 1_451_606_401_001, dtype=np.int64)\n expected = DatetimeIndex(rng.view("M8[ms]"), freq="ms")\n tm.assert_index_equal(dti, expected)\n\n dti = date_range("2016-01-01", "2016-01-01 00:00:01", freq="us", unit="us")\n rng = np.arange(1_451_606_400_000_000, 1_451_606_401_000_001, dtype=np.int64)\n expected = DatetimeIndex(rng.view("M8[us]"), freq="us")\n tm.assert_index_equal(dti, expected)\n\n dti = date_range("2016-01-01", "2016-01-01 00:00:00.001", freq="ns", unit="ns")\n rng = np.arange(\n 1_451_606_400_000_000_000, 1_451_606_400_001_000_001, dtype=np.int64\n )\n expected = DatetimeIndex(rng.view("M8[ns]"), freq="ns")\n tm.assert_index_equal(dti, expected)\n\n def test_date_range_freq_lower_than_endpoints(self):\n start = Timestamp("2022-10-19 11:50:44.719781")\n end = Timestamp("2022-10-19 11:50:47.066458")\n\n # start and end cannot be cast to "s" unit without lossy rounding,\n # so we do not allow this in date_range\n with pytest.raises(ValueError, match="Cannot losslessly convert units"):\n date_range(start, end, periods=3, unit="s")\n\n # but we can losslessly cast to "us"\n dti = date_range(start, end, periods=2, unit="us")\n rng = np.array(\n [start.as_unit("us")._value, end.as_unit("us")._value], dtype=np.int64\n )\n expected = DatetimeIndex(rng.view("M8[us]"))\n tm.assert_index_equal(dti, expected)\n\n def test_date_range_non_nano(self):\n start = np.datetime64("1066-10-14") # Battle of Hastings\n end = np.datetime64("2305-07-13") # Jean-Luc Picard's birthday\n\n dti = date_range(start, end, freq="D", unit="s")\n assert dti.freq == "D"\n assert dti.dtype == "M8[s]"\n\n exp = np.arange(\n start.astype("M8[s]").view("i8"),\n (end + 1).astype("M8[s]").view("i8"),\n 24 * 3600,\n ).view("M8[s]")\n\n tm.assert_numpy_array_equal(dti.to_numpy(), exp)\n\n\nclass TestDateRangeNonTickFreq:\n # Tests revolving around less-common (non-Tick) `freq` keywords.\n\n def test_date_range_custom_business_month_begin(self, unit):\n hcal = USFederalHolidayCalendar()\n freq = offsets.CBMonthBegin(calendar=hcal)\n dti = date_range(start="20120101", end="20130101", freq=freq, unit=unit)\n assert all(freq.is_on_offset(x) for x in dti)\n\n expected = DatetimeIndex(\n [\n "2012-01-03",\n "2012-02-01",\n "2012-03-01",\n "2012-04-02",\n "2012-05-01",\n "2012-06-01",\n "2012-07-02",\n "2012-08-01",\n "2012-09-04",\n "2012-10-01",\n "2012-11-01",\n "2012-12-03",\n ],\n dtype=f"M8[{unit}]",\n freq=freq,\n )\n tm.assert_index_equal(dti, expected)\n\n def test_date_range_custom_business_month_end(self, unit):\n hcal = USFederalHolidayCalendar()\n freq = offsets.CBMonthEnd(calendar=hcal)\n dti = date_range(start="20120101", end="20130101", freq=freq, unit=unit)\n assert all(freq.is_on_offset(x) for x in dti)\n\n expected = DatetimeIndex(\n [\n "2012-01-31",\n "2012-02-29",\n "2012-03-30",\n "2012-04-30",\n "2012-05-31",\n "2012-06-29",\n "2012-07-31",\n "2012-08-31",\n "2012-09-28",\n "2012-10-31",\n "2012-11-30",\n "2012-12-31",\n ],\n dtype=f"M8[{unit}]",\n freq=freq,\n )\n tm.assert_index_equal(dti, expected)\n\n def test_date_range_with_custom_holidays(self, unit):\n # GH#30593\n freq = offsets.CustomBusinessHour(start="15:00", holidays=["2020-11-26"])\n result = date_range(start="2020-11-25 15:00", periods=4, freq=freq, unit=unit)\n expected = DatetimeIndex(\n [\n "2020-11-25 15:00:00",\n "2020-11-25 16:00:00",\n "2020-11-27 15:00:00",\n "2020-11-27 16:00:00",\n ],\n dtype=f"M8[{unit}]",\n freq=freq,\n )\n tm.assert_index_equal(result, expected)\n\n def test_date_range_businesshour(self, unit):\n idx = DatetimeIndex(\n [\n "2014-07-04 09:00",\n "2014-07-04 10:00",\n "2014-07-04 11:00",\n "2014-07-04 12:00",\n "2014-07-04 13:00",\n "2014-07-04 14:00",\n "2014-07-04 15:00",\n "2014-07-04 16:00",\n ],\n dtype=f"M8[{unit}]",\n freq="bh",\n )\n rng = date_range("2014-07-04 09:00", "2014-07-04 16:00", freq="bh", unit=unit)\n tm.assert_index_equal(idx, rng)\n\n idx = DatetimeIndex(\n ["2014-07-04 16:00", "2014-07-07 09:00"], dtype=f"M8[{unit}]", freq="bh"\n )\n rng = date_range("2014-07-04 16:00", "2014-07-07 09:00", freq="bh", unit=unit)\n tm.assert_index_equal(idx, rng)\n\n idx = DatetimeIndex(\n [\n "2014-07-04 09:00",\n "2014-07-04 10:00",\n "2014-07-04 11:00",\n "2014-07-04 12:00",\n "2014-07-04 13:00",\n "2014-07-04 14:00",\n "2014-07-04 15:00",\n "2014-07-04 16:00",\n "2014-07-07 09:00",\n "2014-07-07 10:00",\n "2014-07-07 11:00",\n "2014-07-07 12:00",\n "2014-07-07 13:00",\n "2014-07-07 14:00",\n "2014-07-07 15:00",\n "2014-07-07 16:00",\n "2014-07-08 09:00",\n "2014-07-08 10:00",\n "2014-07-08 11:00",\n "2014-07-08 12:00",\n "2014-07-08 13:00",\n "2014-07-08 14:00",\n "2014-07-08 15:00",\n "2014-07-08 16:00",\n ],\n dtype=f"M8[{unit}]",\n freq="bh",\n )\n rng = date_range("2014-07-04 09:00", "2014-07-08 16:00", freq="bh", unit=unit)\n tm.assert_index_equal(idx, rng)\n\n def test_date_range_business_hour2(self, unit):\n idx1 = date_range(\n start="2014-07-04 15:00", end="2014-07-08 10:00", freq="bh", unit=unit\n )\n idx2 = date_range(start="2014-07-04 15:00", periods=12, freq="bh", unit=unit)\n idx3 = date_range(end="2014-07-08 10:00", periods=12, freq="bh", unit=unit)\n expected = DatetimeIndex(\n [\n "2014-07-04 15:00",\n "2014-07-04 16:00",\n "2014-07-07 09:00",\n "2014-07-07 10:00",\n "2014-07-07 11:00",\n "2014-07-07 12:00",\n "2014-07-07 13:00",\n "2014-07-07 14:00",\n "2014-07-07 15:00",\n "2014-07-07 16:00",\n "2014-07-08 09:00",\n "2014-07-08 10:00",\n ],\n dtype=f"M8[{unit}]",\n freq="bh",\n )\n tm.assert_index_equal(idx1, expected)\n tm.assert_index_equal(idx2, expected)\n tm.assert_index_equal(idx3, expected)\n\n idx4 = date_range(\n start="2014-07-04 15:45", end="2014-07-08 10:45", freq="bh", unit=unit\n )\n idx5 = date_range(start="2014-07-04 15:45", periods=12, freq="bh", unit=unit)\n idx6 = date_range(end="2014-07-08 10:45", periods=12, freq="bh", unit=unit)\n\n expected2 = expected + Timedelta(minutes=45).as_unit(unit)\n expected2.freq = "bh"\n tm.assert_index_equal(idx4, expected2)\n tm.assert_index_equal(idx5, expected2)\n tm.assert_index_equal(idx6, expected2)\n\n def test_date_range_business_hour_short(self, unit):\n # GH#49835\n idx4 = date_range(start="2014-07-01 10:00", freq="bh", periods=1, unit=unit)\n expected4 = DatetimeIndex(["2014-07-01 10:00"], dtype=f"M8[{unit}]", freq="bh")\n tm.assert_index_equal(idx4, expected4)\n\n def test_date_range_year_start(self, unit):\n # see GH#9313\n rng = date_range("1/1/2013", "7/1/2017", freq="YS", unit=unit)\n exp = DatetimeIndex(\n ["2013-01-01", "2014-01-01", "2015-01-01", "2016-01-01", "2017-01-01"],\n dtype=f"M8[{unit}]",\n freq="YS",\n )\n tm.assert_index_equal(rng, exp)\n\n def test_date_range_year_end(self, unit):\n # see GH#9313\n rng = date_range("1/1/2013", "7/1/2017", freq="YE", unit=unit)\n exp = DatetimeIndex(\n ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-31"],\n dtype=f"M8[{unit}]",\n freq="YE",\n )\n tm.assert_index_equal(rng, exp)\n\n def test_date_range_negative_freq_year_end(self, unit):\n # GH#11018\n rng = date_range("2011-12-31", freq="-2YE", periods=3, unit=unit)\n exp = DatetimeIndex(\n ["2011-12-31", "2009-12-31", "2007-12-31"], dtype=f"M8[{unit}]", freq="-2YE"\n )\n tm.assert_index_equal(rng, exp)\n assert rng.freq == "-2YE"\n\n def test_date_range_business_year_end_year(self, unit):\n # see GH#9313\n rng = date_range("1/1/2013", "7/1/2017", freq="BYE", unit=unit)\n exp = DatetimeIndex(\n ["2013-12-31", "2014-12-31", "2015-12-31", "2016-12-30"],\n dtype=f"M8[{unit}]",\n freq="BYE",\n )\n tm.assert_index_equal(rng, exp)\n\n def test_date_range_bms(self, unit):\n # GH#1645\n result = date_range("1/1/2000", periods=10, freq="BMS", unit=unit)\n\n expected = DatetimeIndex(\n [\n "2000-01-03",\n "2000-02-01",\n "2000-03-01",\n "2000-04-03",\n "2000-05-01",\n "2000-06-01",\n "2000-07-03",\n "2000-08-01",\n "2000-09-01",\n "2000-10-02",\n ],\n dtype=f"M8[{unit}]",\n freq="BMS",\n )\n tm.assert_index_equal(result, expected)\n\n def test_date_range_semi_month_begin(self, unit):\n dates = [\n datetime(2007, 12, 15),\n datetime(2008, 1, 1),\n datetime(2008, 1, 15),\n datetime(2008, 2, 1),\n datetime(2008, 2, 15),\n datetime(2008, 3, 1),\n datetime(2008, 3, 15),\n datetime(2008, 4, 1),\n datetime(2008, 4, 15),\n datetime(2008, 5, 1),\n datetime(2008, 5, 15),\n datetime(2008, 6, 1),\n datetime(2008, 6, 15),\n datetime(2008, 7, 1),\n datetime(2008, 7, 15),\n datetime(2008, 8, 1),\n datetime(2008, 8, 15),\n datetime(2008, 9, 1),\n datetime(2008, 9, 15),\n datetime(2008, 10, 1),\n datetime(2008, 10, 15),\n datetime(2008, 11, 1),\n datetime(2008, 11, 15),\n datetime(2008, 12, 1),\n datetime(2008, 12, 15),\n ]\n # ensure generating a range with DatetimeIndex gives same result\n result = date_range(start=dates[0], end=dates[-1], freq="SMS", unit=unit)\n exp = DatetimeIndex(dates, dtype=f"M8[{unit}]", freq="SMS")\n tm.assert_index_equal(result, exp)\n\n def test_date_range_semi_month_end(self, unit):\n dates = [\n datetime(2007, 12, 31),\n datetime(2008, 1, 15),\n datetime(2008, 1, 31),\n datetime(2008, 2, 15),\n datetime(2008, 2, 29),\n datetime(2008, 3, 15),\n datetime(2008, 3, 31),\n datetime(2008, 4, 15),\n datetime(2008, 4, 30),\n datetime(2008, 5, 15),\n datetime(2008, 5, 31),\n datetime(2008, 6, 15),\n datetime(2008, 6, 30),\n datetime(2008, 7, 15),\n datetime(2008, 7, 31),\n datetime(2008, 8, 15),\n datetime(2008, 8, 31),\n datetime(2008, 9, 15),\n datetime(2008, 9, 30),\n datetime(2008, 10, 15),\n datetime(2008, 10, 31),\n datetime(2008, 11, 15),\n datetime(2008, 11, 30),\n datetime(2008, 12, 15),\n datetime(2008, 12, 31),\n ]\n # ensure generating a range with DatetimeIndex gives same result\n result = date_range(start=dates[0], end=dates[-1], freq="SME", unit=unit)\n exp = DatetimeIndex(dates, dtype=f"M8[{unit}]", freq="SME")\n tm.assert_index_equal(result, exp)\n\n def test_date_range_week_of_month(self, unit):\n # GH#20517\n # Note the start here is not on_offset for this freq\n result = date_range(start="20110101", periods=1, freq="WOM-1MON", unit=unit)\n expected = DatetimeIndex(["2011-01-03"], dtype=f"M8[{unit}]", freq="WOM-1MON")\n tm.assert_index_equal(result, expected)\n\n result2 = date_range(start="20110101", periods=2, freq="WOM-1MON", unit=unit)\n expected2 = DatetimeIndex(\n ["2011-01-03", "2011-02-07"], dtype=f"M8[{unit}]", freq="WOM-1MON"\n )\n tm.assert_index_equal(result2, expected2)\n\n def test_date_range_week_of_month2(self, unit):\n # GH#5115, GH#5348\n result = date_range("2013-1-1", periods=4, freq="WOM-1SAT", unit=unit)\n expected = DatetimeIndex(\n ["2013-01-05", "2013-02-02", "2013-03-02", "2013-04-06"],\n dtype=f"M8[{unit}]",\n freq="WOM-1SAT",\n )\n tm.assert_index_equal(result, expected)\n\n def test_date_range_negative_freq_month_end(self, unit):\n # GH#11018\n rng = date_range("2011-01-31", freq="-2ME", periods=3, unit=unit)\n exp = DatetimeIndex(\n ["2011-01-31", "2010-11-30", "2010-09-30"], dtype=f"M8[{unit}]", freq="-2ME"\n )\n tm.assert_index_equal(rng, exp)\n assert rng.freq == "-2ME"\n\n def test_date_range_fy5253(self, unit):\n freq = offsets.FY5253(startingMonth=1, weekday=3, variation="nearest")\n dti = date_range(\n start="2013-01-01",\n periods=2,\n freq=freq,\n unit=unit,\n )\n expected = DatetimeIndex(\n ["2013-01-31", "2014-01-30"], dtype=f"M8[{unit}]", freq=freq\n )\n\n tm.assert_index_equal(dti, expected)\n\n @pytest.mark.parametrize(\n "freqstr,offset",\n [\n ("QS", offsets.QuarterBegin(startingMonth=1)),\n ("BQE", offsets.BQuarterEnd(startingMonth=12)),\n ("W-SUN", offsets.Week(weekday=6)),\n ],\n )\n def test_date_range_freqstr_matches_offset(self, freqstr, offset):\n sdate = datetime(1999, 12, 25)\n edate = datetime(2000, 1, 1)\n\n idx1 = date_range(start=sdate, end=edate, freq=freqstr)\n idx2 = date_range(start=sdate, end=edate, freq=offset)\n assert len(idx1) == len(idx2)\n assert idx1.freq == idx2.freq\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_date_range.py | test_date_range.py | Python | 61,363 | 0.75 | 0.0767 | 0.062288 | react-lib | 519 | 2024-02-10T06:43:42.227323 | GPL-3.0 | true | bfc03c538a4943f47b4a042f86143087 |
from datetime import datetime\n\nimport dateutil.tz\nimport numpy as np\nimport pytest\nimport pytz\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n NaT,\n Series,\n)\nimport pandas._testing as tm\n\n\n@pytest.fixture(params=["s", "ms", "us", "ns"])\ndef unit(request):\n return request.param\n\n\ndef test_get_values_for_csv():\n index = pd.date_range(freq="1D", periods=3, start="2017-01-01")\n\n # First, with no arguments.\n expected = np.array(["2017-01-01", "2017-01-02", "2017-01-03"], dtype=object)\n\n result = index._get_values_for_csv()\n tm.assert_numpy_array_equal(result, expected)\n\n # No NaN values, so na_rep has no effect\n result = index._get_values_for_csv(na_rep="pandas")\n tm.assert_numpy_array_equal(result, expected)\n\n # Make sure date formatting works\n expected = np.array(["01-2017-01", "01-2017-02", "01-2017-03"], dtype=object)\n\n result = index._get_values_for_csv(date_format="%m-%Y-%d")\n tm.assert_numpy_array_equal(result, expected)\n\n # NULL object handling should work\n index = DatetimeIndex(["2017-01-01", NaT, "2017-01-03"])\n expected = np.array(["2017-01-01", "NaT", "2017-01-03"], dtype=object)\n\n result = index._get_values_for_csv(na_rep="NaT")\n tm.assert_numpy_array_equal(result, expected)\n\n expected = np.array(["2017-01-01", "pandas", "2017-01-03"], dtype=object)\n\n result = index._get_values_for_csv(na_rep="pandas")\n tm.assert_numpy_array_equal(result, expected)\n\n result = index._get_values_for_csv(na_rep="NaT", date_format="%Y-%m-%d %H:%M:%S.%f")\n expected = np.array(\n ["2017-01-01 00:00:00.000000", "NaT", "2017-01-03 00:00:00.000000"],\n dtype=object,\n )\n tm.assert_numpy_array_equal(result, expected)\n\n # invalid format\n result = index._get_values_for_csv(na_rep="NaT", date_format="foo")\n expected = np.array(["foo", "NaT", "foo"], dtype=object)\n tm.assert_numpy_array_equal(result, expected)\n\n\nclass TestDatetimeIndexRendering:\n @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])\n def test_dti_with_timezone_repr(self, tzstr):\n rng = pd.date_range("4/13/2010", "5/6/2010")\n\n rng_eastern = rng.tz_localize(tzstr)\n\n rng_repr = repr(rng_eastern)\n assert "2010-04-13 00:00:00" in rng_repr\n\n def test_dti_repr_dates(self):\n text = str(pd.to_datetime([datetime(2013, 1, 1), datetime(2014, 1, 1)]))\n assert "['2013-01-01'," in text\n assert ", '2014-01-01']" in text\n\n def test_dti_repr_mixed(self):\n text = str(\n pd.to_datetime(\n [datetime(2013, 1, 1), datetime(2014, 1, 1, 12), datetime(2014, 1, 1)]\n )\n )\n assert "'2013-01-01 00:00:00'," in text\n assert "'2014-01-01 00:00:00']" in text\n\n def test_dti_repr_short(self):\n dr = pd.date_range(start="1/1/2012", periods=1)\n repr(dr)\n\n dr = pd.date_range(start="1/1/2012", periods=2)\n repr(dr)\n\n dr = pd.date_range(start="1/1/2012", periods=3)\n repr(dr)\n\n @pytest.mark.parametrize(\n "dates, freq, expected_repr",\n [\n (\n ["2012-01-01 00:00:00"],\n "60min",\n (\n "DatetimeIndex(['2012-01-01 00:00:00'], "\n "dtype='datetime64[ns]', freq='60min')"\n ),\n ),\n (\n ["2012-01-01 00:00:00", "2012-01-01 01:00:00"],\n "60min",\n "DatetimeIndex(['2012-01-01 00:00:00', '2012-01-01 01:00:00'], "\n "dtype='datetime64[ns]', freq='60min')",\n ),\n (\n ["2012-01-01"],\n "24h",\n "DatetimeIndex(['2012-01-01'], dtype='datetime64[ns]', freq='24h')",\n ),\n ],\n )\n def test_dti_repr_time_midnight(self, dates, freq, expected_repr, unit):\n # GH53634\n dti = DatetimeIndex(dates, freq).as_unit(unit)\n actual_repr = repr(dti)\n assert actual_repr == expected_repr.replace("[ns]", f"[{unit}]")\n\n def test_dti_representation(self, unit):\n idxs = []\n idxs.append(DatetimeIndex([], freq="D"))\n idxs.append(DatetimeIndex(["2011-01-01"], freq="D"))\n idxs.append(DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D"))\n idxs.append(DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D"))\n idxs.append(\n DatetimeIndex(\n ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],\n freq="h",\n tz="Asia/Tokyo",\n )\n )\n idxs.append(\n DatetimeIndex(\n ["2011-01-01 09:00", "2011-01-01 10:00", NaT], tz="US/Eastern"\n )\n )\n idxs.append(\n DatetimeIndex(["2011-01-01 09:00", "2011-01-01 10:00", NaT], tz="UTC")\n )\n\n exp = []\n exp.append("DatetimeIndex([], dtype='datetime64[ns]', freq='D')")\n exp.append("DatetimeIndex(['2011-01-01'], dtype='datetime64[ns]', freq='D')")\n exp.append(\n "DatetimeIndex(['2011-01-01', '2011-01-02'], "\n "dtype='datetime64[ns]', freq='D')"\n )\n exp.append(\n "DatetimeIndex(['2011-01-01', '2011-01-02', '2011-01-03'], "\n "dtype='datetime64[ns]', freq='D')"\n )\n exp.append(\n "DatetimeIndex(['2011-01-01 09:00:00+09:00', "\n "'2011-01-01 10:00:00+09:00', '2011-01-01 11:00:00+09:00']"\n ", dtype='datetime64[ns, Asia/Tokyo]', freq='h')"\n )\n exp.append(\n "DatetimeIndex(['2011-01-01 09:00:00-05:00', "\n "'2011-01-01 10:00:00-05:00', 'NaT'], "\n "dtype='datetime64[ns, US/Eastern]', freq=None)"\n )\n exp.append(\n "DatetimeIndex(['2011-01-01 09:00:00+00:00', "\n "'2011-01-01 10:00:00+00:00', 'NaT'], "\n "dtype='datetime64[ns, UTC]', freq=None)"\n ""\n )\n\n with pd.option_context("display.width", 300):\n for index, expected in zip(idxs, exp):\n index = index.as_unit(unit)\n expected = expected.replace("[ns", f"[{unit}")\n result = repr(index)\n assert result == expected\n result = str(index)\n assert result == expected\n\n # TODO: this is a Series.__repr__ test\n def test_dti_representation_to_series(self, unit):\n idx1 = DatetimeIndex([], freq="D")\n idx2 = DatetimeIndex(["2011-01-01"], freq="D")\n idx3 = DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")\n idx4 = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")\n idx5 = DatetimeIndex(\n ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],\n freq="h",\n tz="Asia/Tokyo",\n )\n idx6 = DatetimeIndex(\n ["2011-01-01 09:00", "2011-01-01 10:00", NaT], tz="US/Eastern"\n )\n idx7 = DatetimeIndex(["2011-01-01 09:00", "2011-01-02 10:15"])\n\n exp1 = """Series([], dtype: datetime64[ns])"""\n\n exp2 = "0 2011-01-01\ndtype: datetime64[ns]"\n\n exp3 = "0 2011-01-01\n1 2011-01-02\ndtype: datetime64[ns]"\n\n exp4 = (\n "0 2011-01-01\n"\n "1 2011-01-02\n"\n "2 2011-01-03\n"\n "dtype: datetime64[ns]"\n )\n\n exp5 = (\n "0 2011-01-01 09:00:00+09:00\n"\n "1 2011-01-01 10:00:00+09:00\n"\n "2 2011-01-01 11:00:00+09:00\n"\n "dtype: datetime64[ns, Asia/Tokyo]"\n )\n\n exp6 = (\n "0 2011-01-01 09:00:00-05:00\n"\n "1 2011-01-01 10:00:00-05:00\n"\n "2 NaT\n"\n "dtype: datetime64[ns, US/Eastern]"\n )\n\n exp7 = (\n "0 2011-01-01 09:00:00\n"\n "1 2011-01-02 10:15:00\n"\n "dtype: datetime64[ns]"\n )\n\n with pd.option_context("display.width", 300):\n for idx, expected in zip(\n [idx1, idx2, idx3, idx4, idx5, idx6, idx7],\n [exp1, exp2, exp3, exp4, exp5, exp6, exp7],\n ):\n ser = Series(idx.as_unit(unit))\n result = repr(ser)\n assert result == expected.replace("[ns", f"[{unit}")\n\n def test_dti_summary(self):\n # GH#9116\n idx1 = DatetimeIndex([], freq="D")\n idx2 = DatetimeIndex(["2011-01-01"], freq="D")\n idx3 = DatetimeIndex(["2011-01-01", "2011-01-02"], freq="D")\n idx4 = DatetimeIndex(["2011-01-01", "2011-01-02", "2011-01-03"], freq="D")\n idx5 = DatetimeIndex(\n ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"],\n freq="h",\n tz="Asia/Tokyo",\n )\n idx6 = DatetimeIndex(\n ["2011-01-01 09:00", "2011-01-01 10:00", NaT], tz="US/Eastern"\n )\n\n exp1 = "DatetimeIndex: 0 entries\nFreq: D"\n\n exp2 = "DatetimeIndex: 1 entries, 2011-01-01 to 2011-01-01\nFreq: D"\n\n exp3 = "DatetimeIndex: 2 entries, 2011-01-01 to 2011-01-02\nFreq: D"\n\n exp4 = "DatetimeIndex: 3 entries, 2011-01-01 to 2011-01-03\nFreq: D"\n\n exp5 = (\n "DatetimeIndex: 3 entries, 2011-01-01 09:00:00+09:00 "\n "to 2011-01-01 11:00:00+09:00\n"\n "Freq: h"\n )\n\n exp6 = """DatetimeIndex: 3 entries, 2011-01-01 09:00:00-05:00 to NaT"""\n\n for idx, expected in zip(\n [idx1, idx2, idx3, idx4, idx5, idx6], [exp1, exp2, exp3, exp4, exp5, exp6]\n ):\n result = idx._summary()\n assert result == expected\n\n @pytest.mark.parametrize("tz", [None, pytz.utc, dateutil.tz.tzutc()])\n @pytest.mark.parametrize("freq", ["B", "C"])\n def test_dti_business_repr_etc_smoke(self, tz, freq):\n # only really care that it works\n dti = pd.bdate_range(\n datetime(2009, 1, 1), datetime(2010, 1, 1), tz=tz, freq=freq\n )\n repr(dti)\n dti._summary()\n dti[2:2]._summary()\n\n\nclass TestFormat:\n def test_format(self):\n # GH#35439\n idx = pd.date_range("20130101", periods=5)\n expected = [f"{x:%Y-%m-%d}" for x in idx]\n msg = r"DatetimeIndex\.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert idx.format() == expected\n\n def test_format_with_name_time_info(self):\n # bug I fixed 12/20/2011\n dates = pd.date_range("2011-01-01 04:00:00", periods=10, name="something")\n\n msg = "DatetimeIndex.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n formatted = dates.format(name=True)\n assert formatted[0] == "something"\n\n def test_format_datetime_with_time(self):\n dti = DatetimeIndex([datetime(2012, 2, 7), datetime(2012, 2, 7, 23)])\n\n msg = "DatetimeIndex.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = dti.format()\n expected = ["2012-02-07 00:00:00", "2012-02-07 23:00:00"]\n assert len(result) == 2\n assert result == expected\n\n def test_format_datetime(self):\n msg = "DatetimeIndex.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n formatted = pd.to_datetime([datetime(2003, 1, 1, 12), NaT]).format()\n assert formatted[0] == "2003-01-01 12:00:00"\n assert formatted[1] == "NaT"\n\n def test_format_date(self):\n msg = "DatetimeIndex.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n formatted = pd.to_datetime([datetime(2003, 1, 1), NaT]).format()\n assert formatted[0] == "2003-01-01"\n assert formatted[1] == "NaT"\n\n def test_format_date_tz(self):\n dti = pd.to_datetime([datetime(2013, 1, 1)], utc=True)\n msg = "DatetimeIndex.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n formatted = dti.format()\n assert formatted[0] == "2013-01-01 00:00:00+00:00"\n\n dti = pd.to_datetime([datetime(2013, 1, 1), NaT], utc=True)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n formatted = dti.format()\n assert formatted[0] == "2013-01-01 00:00:00+00:00"\n\n def test_format_date_explicit_date_format(self):\n dti = pd.to_datetime([datetime(2003, 2, 1), NaT])\n msg = "DatetimeIndex.format is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n formatted = dti.format(date_format="%m-%d-%Y", na_rep="UT")\n assert formatted[0] == "02-01-2003"\n assert formatted[1] == "UT"\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_formats.py | test_formats.py | Python | 12,738 | 0.95 | 0.067416 | 0.037037 | node-utils | 491 | 2024-04-06T03:42:49.130368 | Apache-2.0 | true | 13eda318ef80bc7b54902d002b4b1ab9 |
import pytest\n\nfrom pandas import (\n DatetimeIndex,\n date_range,\n)\n\nfrom pandas.tseries.offsets import (\n BDay,\n DateOffset,\n Day,\n Hour,\n)\n\n\nclass TestFreq:\n def test_freq_setter_errors(self):\n # GH#20678\n idx = DatetimeIndex(["20180101", "20180103", "20180105"])\n\n # setting with an incompatible freq\n msg = (\n "Inferred frequency 2D from passed values does not conform to "\n "passed frequency 5D"\n )\n with pytest.raises(ValueError, match=msg):\n idx._data.freq = "5D"\n\n # setting with non-freq string\n with pytest.raises(ValueError, match="Invalid frequency"):\n idx._data.freq = "foo"\n\n @pytest.mark.parametrize("values", [["20180101", "20180103", "20180105"], []])\n @pytest.mark.parametrize("freq", ["2D", Day(2), "2B", BDay(2), "48h", Hour(48)])\n @pytest.mark.parametrize("tz", [None, "US/Eastern"])\n def test_freq_setter(self, values, freq, tz):\n # GH#20678\n idx = DatetimeIndex(values, tz=tz)\n\n # can set to an offset, converting from string if necessary\n idx._data.freq = freq\n assert idx.freq == freq\n assert isinstance(idx.freq, DateOffset)\n\n # can reset to None\n idx._data.freq = None\n assert idx.freq is None\n\n def test_freq_view_safe(self):\n # Setting the freq for one DatetimeIndex shouldn't alter the freq\n # for another that views the same data\n\n dti = date_range("2016-01-01", periods=5)\n dta = dti._data\n\n dti2 = DatetimeIndex(dta)._with_freq(None)\n assert dti2.freq is None\n\n # Original was not altered\n assert dti.freq == "D"\n assert dta.freq == "D"\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_freq_attr.py | test_freq_attr.py | Python | 1,732 | 0.95 | 0.114754 | 0.1875 | vue-tools | 642 | 2024-08-01T19:16:13.182320 | MIT | true | 56ab9e03bdb223fc90a06028a72298c1 |
from datetime import (\n date,\n datetime,\n time,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import index as libindex\nfrom pandas.compat.numpy import np_long\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n Index,\n Timestamp,\n bdate_range,\n date_range,\n notna,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries.frequencies import to_offset\n\nSTART, END = datetime(2009, 1, 1), datetime(2010, 1, 1)\n\n\nclass TestGetItem:\n def test_getitem_slice_keeps_name(self):\n # GH4226\n st = Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles")\n et = Timestamp("2013-07-02 00:00:00", tz="America/Los_Angeles")\n dr = date_range(st, et, freq="h", name="timebucket")\n assert dr[1:].name == dr.name\n\n @pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])\n def test_getitem(self, tz):\n idx = date_range("2011-01-01", "2011-01-31", freq="D", tz=tz, name="idx")\n\n result = idx[0]\n assert result == Timestamp("2011-01-01", tz=idx.tz)\n\n result = idx[0:5]\n expected = date_range(\n "2011-01-01", "2011-01-05", freq="D", tz=idx.tz, name="idx"\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[0:10:2]\n expected = date_range(\n "2011-01-01", "2011-01-09", freq="2D", tz=idx.tz, name="idx"\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[-20:-5:3]\n expected = date_range(\n "2011-01-12", "2011-01-24", freq="3D", tz=idx.tz, name="idx"\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx[4::-1]\n expected = DatetimeIndex(\n ["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],\n dtype=idx.dtype,\n freq="-1D",\n name="idx",\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n @pytest.mark.parametrize("freq", ["B", "C"])\n def test_dti_business_getitem(self, freq):\n rng = bdate_range(START, END, freq=freq)\n smaller = rng[:5]\n exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq=freq)\n tm.assert_index_equal(smaller, exp)\n assert smaller.freq == exp.freq\n assert smaller.freq == rng.freq\n\n sliced = rng[::5]\n assert sliced.freq == to_offset(freq) * 5\n\n fancy_indexed = rng[[4, 3, 2, 1, 0]]\n assert len(fancy_indexed) == 5\n assert isinstance(fancy_indexed, DatetimeIndex)\n assert fancy_indexed.freq is None\n\n # 32-bit vs. 64-bit platforms\n assert rng[4] == rng[np_long(4)]\n\n @pytest.mark.parametrize("freq", ["B", "C"])\n def test_dti_business_getitem_matplotlib_hackaround(self, freq):\n rng = bdate_range(START, END, freq=freq)\n with pytest.raises(ValueError, match="Multi-dimensional indexing"):\n # GH#30588 multi-dimensional indexing deprecated\n rng[:, None]\n\n def test_getitem_int_list(self):\n dti = date_range(start="1/1/2005", end="12/1/2005", freq="ME")\n dti2 = dti[[1, 3, 5]]\n\n v1 = dti2[0]\n v2 = dti2[1]\n v3 = dti2[2]\n\n assert v1 == Timestamp("2/28/2005")\n assert v2 == Timestamp("4/30/2005")\n assert v3 == Timestamp("6/30/2005")\n\n # getitem with non-slice drops freq\n assert dti2.freq is None\n\n\nclass TestWhere:\n def test_where_doesnt_retain_freq(self):\n dti = date_range("20130101", periods=3, freq="D", name="idx")\n cond = [True, True, False]\n expected = DatetimeIndex([dti[0], dti[1], dti[0]], freq=None, name="idx")\n\n result = dti.where(cond, dti[::-1])\n tm.assert_index_equal(result, expected)\n\n def test_where_other(self):\n # other is ndarray or Index\n i = date_range("20130101", periods=3, tz="US/Eastern")\n\n for arr in [np.nan, pd.NaT]:\n result = i.where(notna(i), other=arr)\n expected = i\n tm.assert_index_equal(result, expected)\n\n i2 = i.copy()\n i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())\n result = i.where(notna(i2), i2)\n tm.assert_index_equal(result, i2)\n\n i2 = i.copy()\n i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())\n result = i.where(notna(i2), i2._values)\n tm.assert_index_equal(result, i2)\n\n def test_where_invalid_dtypes(self):\n dti = date_range("20130101", periods=3, tz="US/Eastern")\n\n tail = dti[2:].tolist()\n i2 = Index([pd.NaT, pd.NaT] + tail)\n\n mask = notna(i2)\n\n # passing tz-naive ndarray to tzaware DTI\n result = dti.where(mask, i2.values)\n expected = Index([pd.NaT.asm8, pd.NaT.asm8] + tail, dtype=object)\n tm.assert_index_equal(result, expected)\n\n # passing tz-aware DTI to tznaive DTI\n naive = dti.tz_localize(None)\n result = naive.where(mask, i2)\n expected = Index([i2[0], i2[1]] + naive[2:].tolist(), dtype=object)\n tm.assert_index_equal(result, expected)\n\n pi = i2.tz_localize(None).to_period("D")\n result = dti.where(mask, pi)\n expected = Index([pi[0], pi[1]] + tail, dtype=object)\n tm.assert_index_equal(result, expected)\n\n tda = i2.asi8.view("timedelta64[ns]")\n result = dti.where(mask, tda)\n expected = Index([tda[0], tda[1]] + tail, dtype=object)\n assert isinstance(expected[0], np.timedelta64)\n tm.assert_index_equal(result, expected)\n\n result = dti.where(mask, i2.asi8)\n expected = Index([pd.NaT._value, pd.NaT._value] + tail, dtype=object)\n assert isinstance(expected[0], int)\n tm.assert_index_equal(result, expected)\n\n # non-matching scalar\n td = pd.Timedelta(days=4)\n result = dti.where(mask, td)\n expected = Index([td, td] + tail, dtype=object)\n assert expected[0] is td\n tm.assert_index_equal(result, expected)\n\n def test_where_mismatched_nat(self, tz_aware_fixture):\n tz = tz_aware_fixture\n dti = date_range("2013-01-01", periods=3, tz=tz)\n cond = np.array([True, False, True])\n\n tdnat = np.timedelta64("NaT", "ns")\n expected = Index([dti[0], tdnat, dti[2]], dtype=object)\n assert expected[1] is tdnat\n\n result = dti.where(cond, tdnat)\n tm.assert_index_equal(result, expected)\n\n def test_where_tz(self):\n i = date_range("20130101", periods=3, tz="US/Eastern")\n result = i.where(notna(i))\n expected = i\n tm.assert_index_equal(result, expected)\n\n i2 = i.copy()\n i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())\n result = i.where(notna(i2))\n expected = i2\n tm.assert_index_equal(result, expected)\n\n\nclass TestTake:\n @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])\n def test_dti_take_dont_lose_meta(self, tzstr):\n rng = date_range("1/1/2000", periods=20, tz=tzstr)\n\n result = rng.take(range(5))\n assert result.tz == rng.tz\n assert result.freq == rng.freq\n\n def test_take_nan_first_datetime(self):\n index = DatetimeIndex([pd.NaT, Timestamp("20130101"), Timestamp("20130102")])\n result = index.take([-1, 0, 1])\n expected = DatetimeIndex([index[-1], index[0], index[1]])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])\n def test_take(self, tz):\n # GH#10295\n idx = date_range("2011-01-01", "2011-01-31", freq="D", name="idx", tz=tz)\n\n result = idx.take([0])\n assert result == Timestamp("2011-01-01", tz=idx.tz)\n\n result = idx.take([0, 1, 2])\n expected = date_range(\n "2011-01-01", "2011-01-03", freq="D", tz=idx.tz, name="idx"\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([0, 2, 4])\n expected = date_range(\n "2011-01-01", "2011-01-05", freq="2D", tz=idx.tz, name="idx"\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([7, 4, 1])\n expected = date_range(\n "2011-01-08", "2011-01-02", freq="-3D", tz=idx.tz, name="idx"\n )\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n result = idx.take([3, 2, 5])\n expected = DatetimeIndex(\n ["2011-01-04", "2011-01-03", "2011-01-06"],\n dtype=idx.dtype,\n freq=None,\n name="idx",\n )\n tm.assert_index_equal(result, expected)\n assert result.freq is None\n\n result = idx.take([-3, 2, 5])\n expected = DatetimeIndex(\n ["2011-01-29", "2011-01-03", "2011-01-06"],\n dtype=idx.dtype,\n freq=None,\n name="idx",\n )\n tm.assert_index_equal(result, expected)\n assert result.freq is None\n\n def test_take_invalid_kwargs(self):\n idx = date_range("2011-01-01", "2011-01-31", freq="D", name="idx")\n indices = [1, 6, 5, 9, 10, 13, 15, 3]\n\n msg = r"take\(\) got an unexpected keyword argument 'foo'"\n with pytest.raises(TypeError, match=msg):\n idx.take(indices, foo=2)\n\n msg = "the 'out' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, out=indices)\n\n msg = "the 'mode' parameter is not supported"\n with pytest.raises(ValueError, match=msg):\n idx.take(indices, mode="clip")\n\n # TODO: This method came from test_datetime; de-dup with version above\n @pytest.mark.parametrize("tz", [None, "US/Eastern", "Asia/Tokyo"])\n def test_take2(self, tz):\n dates = [\n datetime(2010, 1, 1, 14),\n datetime(2010, 1, 1, 15),\n datetime(2010, 1, 1, 17),\n datetime(2010, 1, 1, 21),\n ]\n\n idx = date_range(\n start="2010-01-01 09:00",\n end="2010-02-01 09:00",\n freq="h",\n tz=tz,\n name="idx",\n )\n expected = DatetimeIndex(dates, freq=None, name="idx", dtype=idx.dtype)\n\n taken1 = idx.take([5, 6, 8, 12])\n taken2 = idx[[5, 6, 8, 12]]\n\n for taken in [taken1, taken2]:\n tm.assert_index_equal(taken, expected)\n assert isinstance(taken, DatetimeIndex)\n assert taken.freq is None\n assert taken.tz == expected.tz\n assert taken.name == expected.name\n\n def test_take_fill_value(self):\n # GH#12631\n idx = DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx")\n result = idx.take(np.array([1, 0, -1]))\n expected = DatetimeIndex(["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx")\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx")\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\n expected = DatetimeIndex(["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx")\n tm.assert_index_equal(result, expected)\n\n msg = (\n "When allow_fill=True and fill_value is not None, "\n "all indices must be >= -1"\n )\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n msg = "out of bounds"\n with pytest.raises(IndexError, match=msg):\n idx.take(np.array([1, -5]))\n\n def test_take_fill_value_with_timezone(self):\n idx = DatetimeIndex(\n ["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", tz="US/Eastern"\n )\n result = idx.take(np.array([1, 0, -1]))\n expected = DatetimeIndex(\n ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern"\n )\n tm.assert_index_equal(result, expected)\n\n # fill_value\n result = idx.take(np.array([1, 0, -1]), fill_value=True)\n expected = DatetimeIndex(\n ["2011-02-01", "2011-01-01", "NaT"], name="xxx", tz="US/Eastern"\n )\n tm.assert_index_equal(result, expected)\n\n # allow_fill=False\n result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)\n expected = DatetimeIndex(\n ["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern"\n )\n tm.assert_index_equal(result, expected)\n\n msg = (\n "When allow_fill=True and fill_value is not None, "\n "all indices must be >= -1"\n )\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -2]), fill_value=True)\n with pytest.raises(ValueError, match=msg):\n idx.take(np.array([1, 0, -5]), fill_value=True)\n\n msg = "out of bounds"\n with pytest.raises(IndexError, match=msg):\n idx.take(np.array([1, -5]))\n\n\nclass TestGetLoc:\n def test_get_loc_key_unit_mismatch(self):\n idx = date_range("2000-01-01", periods=3)\n key = idx[1].as_unit("ms")\n loc = idx.get_loc(key)\n assert loc == 1\n assert key in idx\n\n def test_get_loc_key_unit_mismatch_not_castable(self):\n dta = date_range("2000-01-01", periods=3)._data.astype("M8[s]")\n dti = DatetimeIndex(dta)\n key = dta[0].as_unit("ns") + pd.Timedelta(1)\n\n with pytest.raises(\n KeyError, match=r"Timestamp\('2000-01-01 00:00:00.000000001'\)"\n ):\n dti.get_loc(key)\n\n assert key not in dti\n\n def test_get_loc_time_obj(self):\n # time indexing\n idx = date_range("2000-01-01", periods=24, freq="h")\n\n result = idx.get_loc(time(12))\n expected = np.array([12])\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n result = idx.get_loc(time(12, 30))\n expected = np.array([])\n tm.assert_numpy_array_equal(result, expected, check_dtype=False)\n\n @pytest.mark.parametrize("offset", [-10, 10])\n def test_get_loc_time_obj2(self, monkeypatch, offset):\n # GH#8667\n size_cutoff = 50\n n = size_cutoff + offset\n key = time(15, 11, 30)\n start = key.hour * 3600 + key.minute * 60 + key.second\n step = 24 * 3600\n\n with monkeypatch.context():\n monkeypatch.setattr(libindex, "_SIZE_CUTOFF", size_cutoff)\n idx = date_range("2014-11-26", periods=n, freq="s")\n ts = pd.Series(np.random.default_rng(2).standard_normal(n), index=idx)\n locs = np.arange(start, n, step, dtype=np.intp)\n\n result = ts.index.get_loc(key)\n tm.assert_numpy_array_equal(result, locs)\n tm.assert_series_equal(ts[key], ts.iloc[locs])\n\n left, right = ts.copy(), ts.copy()\n left[key] *= -10\n right.iloc[locs] *= -10\n tm.assert_series_equal(left, right)\n\n def test_get_loc_time_nat(self):\n # GH#35114\n # Case where key's total microseconds happens to match iNaT % 1e6 // 1000\n tic = time(minute=12, second=43, microsecond=145224)\n dti = DatetimeIndex([pd.NaT])\n\n loc = dti.get_loc(tic)\n expected = np.array([], dtype=np.intp)\n tm.assert_numpy_array_equal(loc, expected)\n\n def test_get_loc_nat(self):\n # GH#20464\n index = DatetimeIndex(["1/3/2000", "NaT"])\n assert index.get_loc(pd.NaT) == 1\n\n assert index.get_loc(None) == 1\n\n assert index.get_loc(np.nan) == 1\n\n assert index.get_loc(pd.NA) == 1\n\n assert index.get_loc(np.datetime64("NaT")) == 1\n\n with pytest.raises(KeyError, match="NaT"):\n index.get_loc(np.timedelta64("NaT"))\n\n @pytest.mark.parametrize("key", [pd.Timedelta(0), pd.Timedelta(1), timedelta(0)])\n def test_get_loc_timedelta_invalid_key(self, key):\n # GH#20464\n dti = date_range("1970-01-01", periods=10)\n msg = "Cannot index DatetimeIndex with [Tt]imedelta"\n with pytest.raises(TypeError, match=msg):\n dti.get_loc(key)\n\n def test_get_loc_reasonable_key_error(self):\n # GH#1062\n index = DatetimeIndex(["1/3/2000"])\n with pytest.raises(KeyError, match="2000"):\n index.get_loc("1/1/2000")\n\n def test_get_loc_year_str(self):\n rng = date_range("1/1/2000", "1/1/2010")\n\n result = rng.get_loc("2009")\n expected = slice(3288, 3653)\n assert result == expected\n\n\nclass TestContains:\n def test_dti_contains_with_duplicates(self):\n d = datetime(2011, 12, 5, 20, 30)\n ix = DatetimeIndex([d, d])\n assert d in ix\n\n @pytest.mark.parametrize(\n "vals",\n [\n [0, 1, 0],\n [0, 0, -1],\n [0, -1, -1],\n ["2015", "2015", "2016"],\n ["2015", "2015", "2014"],\n ],\n )\n def test_contains_nonunique(self, vals):\n # GH#9512\n idx = DatetimeIndex(vals)\n assert idx[0] in idx\n\n\nclass TestGetIndexer:\n def test_get_indexer_date_objs(self):\n rng = date_range("1/1/2000", periods=20)\n\n result = rng.get_indexer(rng.map(lambda x: x.date()))\n expected = rng.get_indexer(rng)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_get_indexer(self):\n idx = date_range("2000-01-01", periods=3)\n exp = np.array([0, 1, 2], dtype=np.intp)\n tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)\n\n target = idx[0] + pd.to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"])\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp)\n )\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp)\n )\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp)\n )\n tm.assert_numpy_array_equal(\n idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 hour")),\n np.array([0, -1, 1], dtype=np.intp),\n )\n tol_raw = [\n pd.Timedelta("1 hour"),\n pd.Timedelta("1 hour"),\n pd.Timedelta("1 hour").to_timedelta64(),\n ]\n tm.assert_numpy_array_equal(\n idx.get_indexer(\n target, "nearest", tolerance=[np.timedelta64(x) for x in tol_raw]\n ),\n np.array([0, -1, 1], dtype=np.intp),\n )\n tol_bad = [\n pd.Timedelta("2 hour").to_timedelta64(),\n pd.Timedelta("1 hour").to_timedelta64(),\n "foo",\n ]\n msg = "Could not convert 'foo' to NumPy timedelta"\n with pytest.raises(ValueError, match=msg):\n idx.get_indexer(target, "nearest", tolerance=tol_bad)\n with pytest.raises(ValueError, match="abbreviation w/o a number"):\n idx.get_indexer(idx[[0]], method="nearest", tolerance="foo")\n\n @pytest.mark.parametrize(\n "target",\n [\n [date(2020, 1, 1), Timestamp("2020-01-02")],\n [Timestamp("2020-01-01"), date(2020, 1, 2)],\n ],\n )\n def test_get_indexer_mixed_dtypes(self, target):\n # https://github.com/pandas-dev/pandas/issues/33741\n values = DatetimeIndex([Timestamp("2020-01-01"), Timestamp("2020-01-02")])\n result = values.get_indexer(target)\n expected = np.array([0, 1], dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "target, positions",\n [\n ([date(9999, 1, 1), Timestamp("2020-01-01")], [-1, 0]),\n ([Timestamp("2020-01-01"), date(9999, 1, 1)], [0, -1]),\n ([date(9999, 1, 1), date(9999, 1, 1)], [-1, -1]),\n ],\n )\n def test_get_indexer_out_of_bounds_date(self, target, positions):\n values = DatetimeIndex([Timestamp("2020-01-01"), Timestamp("2020-01-02")])\n\n result = values.get_indexer(target)\n expected = np.array(positions, dtype=np.intp)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_get_indexer_pad_requires_monotonicity(self):\n rng = date_range("1/1/2000", "3/1/2000", freq="B")\n\n # neither monotonic increasing or decreasing\n rng2 = rng[[1, 0, 2]]\n\n msg = "index must be monotonic increasing or decreasing"\n with pytest.raises(ValueError, match=msg):\n rng2.get_indexer(rng, method="pad")\n\n\nclass TestMaybeCastSliceBound:\n def test_maybe_cast_slice_bounds_empty(self):\n # GH#14354\n empty_idx = date_range(freq="1h", periods=0, end="2015")\n\n right = empty_idx._maybe_cast_slice_bound("2015-01-02", "right")\n exp = Timestamp("2015-01-02 23:59:59.999999999")\n assert right == exp\n\n left = empty_idx._maybe_cast_slice_bound("2015-01-02", "left")\n exp = Timestamp("2015-01-02 00:00:00")\n assert left == exp\n\n def test_maybe_cast_slice_duplicate_monotonic(self):\n # https://github.com/pandas-dev/pandas/issues/16515\n idx = DatetimeIndex(["2017", "2017"])\n result = idx._maybe_cast_slice_bound("2017-01-01", "left")\n expected = Timestamp("2017-01-01")\n assert result == expected\n\n\nclass TestGetSliceBounds:\n @pytest.mark.parametrize("box", [date, datetime, Timestamp])\n @pytest.mark.parametrize("side, expected", [("left", 4), ("right", 5)])\n def test_get_slice_bounds_datetime_within(\n self, box, side, expected, tz_aware_fixture\n ):\n # GH 35690\n tz = tz_aware_fixture\n index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz)\n key = box(year=2000, month=1, day=7)\n\n if tz is not None:\n with pytest.raises(TypeError, match="Cannot compare tz-naive"):\n # GH#36148 we require tzawareness-compat as of 2.0\n index.get_slice_bound(key, side=side)\n else:\n result = index.get_slice_bound(key, side=side)\n assert result == expected\n\n @pytest.mark.parametrize("box", [datetime, Timestamp])\n @pytest.mark.parametrize("side", ["left", "right"])\n @pytest.mark.parametrize("year, expected", [(1999, 0), (2020, 30)])\n def test_get_slice_bounds_datetime_outside(\n self, box, side, year, expected, tz_aware_fixture\n ):\n # GH 35690\n tz = tz_aware_fixture\n index = bdate_range("2000-01-03", "2000-02-11").tz_localize(tz)\n key = box(year=year, month=1, day=7)\n\n if tz is not None:\n with pytest.raises(TypeError, match="Cannot compare tz-naive"):\n # GH#36148 we require tzawareness-compat as of 2.0\n index.get_slice_bound(key, side=side)\n else:\n result = index.get_slice_bound(key, side=side)\n assert result == expected\n\n @pytest.mark.parametrize("box", [datetime, Timestamp])\n def test_slice_datetime_locs(self, box, tz_aware_fixture):\n # GH 34077\n tz = tz_aware_fixture\n index = DatetimeIndex(["2010-01-01", "2010-01-03"]).tz_localize(tz)\n key = box(2010, 1, 1)\n\n if tz is not None:\n with pytest.raises(TypeError, match="Cannot compare tz-naive"):\n # GH#36148 we require tzawareness-compat as of 2.0\n index.slice_locs(key, box(2010, 1, 2))\n else:\n result = index.slice_locs(key, box(2010, 1, 2))\n expected = (0, 1)\n assert result == expected\n\n\nclass TestIndexerBetweenTime:\n def test_indexer_between_time(self):\n # GH#11818\n rng = date_range("1/1/2000", "1/5/2000", freq="5min")\n msg = r"Cannot convert arg \[datetime\.datetime\(2010, 1, 2, 1, 0\)\] to a time"\n with pytest.raises(ValueError, match=msg):\n rng.indexer_between_time(datetime(2010, 1, 2, 1), datetime(2010, 1, 2, 5))\n\n @pytest.mark.parametrize("unit", ["us", "ms", "s"])\n def test_indexer_between_time_non_nano(self, unit):\n # For simple cases like this, the non-nano indexer_between_time\n # should match the nano result\n\n rng = date_range("1/1/2000", "1/5/2000", freq="5min")\n arr_nano = rng._data._ndarray\n\n arr = arr_nano.astype(f"M8[{unit}]")\n\n dta = type(rng._data)._simple_new(arr, dtype=arr.dtype)\n dti = DatetimeIndex(dta)\n assert dti.dtype == arr.dtype\n\n tic = time(1, 25)\n toc = time(2, 29)\n\n result = dti.indexer_between_time(tic, toc)\n expected = rng.indexer_between_time(tic, toc)\n tm.assert_numpy_array_equal(result, expected)\n\n # case with non-zero micros in arguments\n tic = time(1, 25, 0, 45678)\n toc = time(2, 29, 0, 1234)\n\n result = dti.indexer_between_time(tic, toc)\n expected = rng.indexer_between_time(tic, toc)\n tm.assert_numpy_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_indexing.py | test_indexing.py | Python | 25,241 | 0.95 | 0.076709 | 0.063248 | awesome-app | 83 | 2023-09-18T01:22:38.569279 | MIT | true | 07bde351d2f6e66409d9adaae739ba31 |
import dateutil.tz\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DatetimeIndex,\n date_range,\n to_datetime,\n)\nfrom pandas.core.arrays import datetimes\n\n\nclass TestDatetimeIndexIteration:\n @pytest.mark.parametrize(\n "tz", [None, "UTC", "US/Central", dateutil.tz.tzoffset(None, -28800)]\n )\n def test_iteration_preserves_nanoseconds(self, tz):\n # GH#19603\n index = DatetimeIndex(\n ["2018-02-08 15:00:00.168456358", "2018-02-08 15:00:00.168456359"], tz=tz\n )\n for i, ts in enumerate(index):\n assert ts == index[i] # pylint: disable=unnecessary-list-index-lookup\n\n def test_iter_readonly(self):\n # GH#28055 ints_to_pydatetime with readonly array\n arr = np.array([np.datetime64("2012-02-15T12:00:00.000000000")])\n arr.setflags(write=False)\n dti = to_datetime(arr)\n list(dti)\n\n def test_iteration_preserves_tz(self):\n # see GH#8890\n index = date_range("2012-01-01", periods=3, freq="h", tz="US/Eastern")\n\n for i, ts in enumerate(index):\n result = ts\n expected = index[i] # pylint: disable=unnecessary-list-index-lookup\n assert result == expected\n\n def test_iteration_preserves_tz2(self):\n index = date_range(\n "2012-01-01", periods=3, freq="h", tz=dateutil.tz.tzoffset(None, -28800)\n )\n\n for i, ts in enumerate(index):\n result = ts\n expected = index[i] # pylint: disable=unnecessary-list-index-lookup\n assert result._repr_base == expected._repr_base\n assert result == expected\n\n def test_iteration_preserves_tz3(self):\n # GH#9100\n index = DatetimeIndex(\n ["2014-12-01 03:32:39.987000-08:00", "2014-12-01 04:12:34.987000-08:00"]\n )\n for i, ts in enumerate(index):\n result = ts\n expected = index[i] # pylint: disable=unnecessary-list-index-lookup\n assert result._repr_base == expected._repr_base\n assert result == expected\n\n @pytest.mark.parametrize("offset", [-5, -1, 0, 1])\n def test_iteration_over_chunksize(self, offset, monkeypatch):\n # GH#21012\n chunksize = 5\n index = date_range(\n "2000-01-01 00:00:00", periods=chunksize - offset, freq="min"\n )\n num = 0\n with monkeypatch.context() as m:\n m.setattr(datetimes, "_ITER_CHUNKSIZE", chunksize)\n for stamp in index:\n assert index[num] == stamp\n num += 1\n assert num == len(index)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_iter.py | test_iter.py | Python | 2,590 | 0.95 | 0.157895 | 0.075758 | python-kit | 144 | 2024-10-20T12:37:46.948973 | Apache-2.0 | true | 425f1cb3fbc3c2e47ac8a325f00f44fc |
from datetime import (\n datetime,\n timezone,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n Timestamp,\n date_range,\n period_range,\n to_datetime,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries.offsets import (\n BDay,\n BMonthEnd,\n)\n\n\nclass TestJoin:\n def test_does_not_convert_mixed_integer(self):\n df = DataFrame(np.ones((3, 2)), columns=date_range("2020-01-01", periods=2))\n cols = df.columns.join(df.index, how="outer")\n joined = cols.join(df.columns)\n assert cols.dtype == np.dtype("O")\n assert cols.dtype == joined.dtype\n tm.assert_numpy_array_equal(cols.values, joined.values)\n\n def test_join_self(self, join_type):\n index = date_range("1/1/2000", periods=10)\n joined = index.join(index, how=join_type)\n assert index is joined\n\n def test_join_with_period_index(self, join_type):\n df = DataFrame(\n np.ones((10, 2)),\n index=date_range("2020-01-01", periods=10),\n columns=period_range("2020-01-01", periods=2),\n )\n s = df.iloc[:5, 0]\n\n expected = df.columns.astype("O").join(s.index, how=join_type)\n result = df.columns.join(s.index, how=join_type)\n tm.assert_index_equal(expected, result)\n\n def test_join_object_index(self):\n rng = date_range("1/1/2000", periods=10)\n idx = Index(["a", "b", "c", "d"])\n\n result = rng.join(idx, how="outer")\n assert isinstance(result[0], Timestamp)\n\n def test_join_utc_convert(self, join_type):\n rng = date_range("1/1/2011", periods=100, freq="h", tz="utc")\n\n left = rng.tz_convert("US/Eastern")\n right = rng.tz_convert("Europe/Berlin")\n\n result = left.join(left[:-5], how=join_type)\n assert isinstance(result, DatetimeIndex)\n assert result.tz == left.tz\n\n result = left.join(right[:-5], how=join_type)\n assert isinstance(result, DatetimeIndex)\n assert result.tz is timezone.utc\n\n def test_datetimeindex_union_join_empty(self, sort):\n dti = date_range(start="1/1/2001", end="2/1/2001", freq="D")\n empty = Index([])\n\n result = dti.union(empty, sort=sort)\n expected = dti.astype("O")\n tm.assert_index_equal(result, expected)\n\n result = dti.join(empty)\n assert isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, dti)\n\n def test_join_nonunique(self):\n idx1 = to_datetime(["2012-11-06 16:00:11.477563", "2012-11-06 16:00:11.477563"])\n idx2 = to_datetime(["2012-11-06 15:11:09.006507", "2012-11-06 15:11:09.006507"])\n rs = idx1.join(idx2, how="outer")\n assert rs.is_monotonic_increasing\n\n @pytest.mark.parametrize("freq", ["B", "C"])\n def test_outer_join(self, freq):\n # should just behave as union\n start, end = datetime(2009, 1, 1), datetime(2010, 1, 1)\n rng = date_range(start=start, end=end, freq=freq)\n\n # overlapping\n left = rng[:10]\n right = rng[5:10]\n\n the_join = left.join(right, how="outer")\n assert isinstance(the_join, DatetimeIndex)\n\n # non-overlapping, gap in middle\n left = rng[:5]\n right = rng[10:]\n\n the_join = left.join(right, how="outer")\n assert isinstance(the_join, DatetimeIndex)\n assert the_join.freq is None\n\n # non-overlapping, no gap\n left = rng[:5]\n right = rng[5:10]\n\n the_join = left.join(right, how="outer")\n assert isinstance(the_join, DatetimeIndex)\n\n # overlapping, but different offset\n other = date_range(start, end, freq=BMonthEnd())\n\n the_join = rng.join(other, how="outer")\n assert isinstance(the_join, DatetimeIndex)\n assert the_join.freq is None\n\n def test_naive_aware_conflicts(self):\n start, end = datetime(2009, 1, 1), datetime(2010, 1, 1)\n naive = date_range(start, end, freq=BDay(), tz=None)\n aware = date_range(start, end, freq=BDay(), tz="Asia/Hong_Kong")\n\n msg = "tz-naive.*tz-aware"\n with pytest.raises(TypeError, match=msg):\n naive.join(aware)\n\n with pytest.raises(TypeError, match=msg):\n aware.join(naive)\n\n @pytest.mark.parametrize("tz", [None, "US/Pacific"])\n def test_join_preserves_freq(self, tz):\n # GH#32157\n dti = date_range("2016-01-01", periods=10, tz=tz)\n result = dti[:5].join(dti[5:], how="outer")\n assert result.freq == dti.freq\n tm.assert_index_equal(result, dti)\n\n result = dti[:5].join(dti[6:], how="outer")\n assert result.freq is None\n expected = dti.delete(5)\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_join.py | test_join.py | Python | 4,742 | 0.95 | 0.073826 | 0.051282 | vue-tools | 110 | 2024-12-21T21:28:30.219975 | Apache-2.0 | true | 525d4add542fd944afcb6517e190c250 |
import numpy as np\n\nfrom pandas import date_range\nimport pandas._testing as tm\n\n\nclass TestSplit:\n def test_split_non_utc(self):\n # GH#14042\n indices = date_range("2016-01-01 00:00:00+0200", freq="s", periods=10)\n result = np.split(indices, indices_or_sections=[])[0]\n expected = indices._with_freq(None)\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_npfuncs.py | test_npfuncs.py | Python | 384 | 0.95 | 0.153846 | 0.1 | vue-tools | 887 | 2024-11-08T23:20:02.294772 | Apache-2.0 | true | a58b144b456d135a14d56b6a4f42e178 |
from datetime import datetime\n\nimport pytest\n\nfrom pandas import (\n DatetimeIndex,\n Index,\n bdate_range,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDatetimeIndexOps:\n def test_infer_freq(self, freq_sample):\n # GH 11018\n idx = date_range("2011-01-01 09:00:00", freq=freq_sample, periods=10)\n result = DatetimeIndex(idx.asi8, freq="infer")\n tm.assert_index_equal(idx, result)\n assert result.freq == freq_sample\n\n\n@pytest.mark.parametrize("freq", ["B", "C"])\nclass TestBusinessDatetimeIndex:\n @pytest.fixture\n def rng(self, freq):\n START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)\n return bdate_range(START, END, freq=freq)\n\n def test_comparison(self, rng):\n d = rng[10]\n\n comp = rng > d\n assert comp[11]\n assert not comp[9]\n\n def test_copy(self, rng):\n cp = rng.copy()\n tm.assert_index_equal(cp, rng)\n\n def test_identical(self, rng):\n t1 = rng.copy()\n t2 = rng.copy()\n assert t1.identical(t2)\n\n # name\n t1 = t1.rename("foo")\n assert t1.equals(t2)\n assert not t1.identical(t2)\n t2 = t2.rename("foo")\n assert t1.identical(t2)\n\n # freq\n t2v = Index(t2.values)\n assert t1.equals(t2v)\n assert not t1.identical(t2v)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_ops.py | test_ops.py | Python | 1,340 | 0.95 | 0.125 | 0.068182 | awesome-app | 963 | 2024-03-11T15:19:18.018680 | Apache-2.0 | true | 3b90f3267339f081ac4a9f24e4cb2579 |
""" test partial slicing on Series/Frame """\n\nfrom datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n MultiIndex,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestSlicing:\n def test_string_index_series_name_converted(self):\n # GH#1644\n df = DataFrame(\n np.random.default_rng(2).standard_normal((10, 4)),\n index=date_range("1/1/2000", periods=10),\n )\n\n result = df.loc["1/3/2000"]\n assert result.name == df.index[2]\n\n result = df.T["1/3/2000"]\n assert result.name == df.index[2]\n\n def test_stringified_slice_with_tz(self):\n # GH#2658\n start = "2013-01-07"\n idx = date_range(start=start, freq="1d", periods=10, tz="US/Eastern")\n df = DataFrame(np.arange(10), index=idx)\n df["2013-01-14 23:44:34.437768-05:00":] # no exception here\n\n def test_return_type_doesnt_depend_on_monotonicity(self):\n # GH#24892 we get Series back regardless of whether our DTI is monotonic\n dti = date_range(start="2015-5-13 23:59:00", freq="min", periods=3)\n ser = Series(range(3), index=dti)\n\n # non-monotonic index\n ser2 = Series(range(3), index=[dti[1], dti[0], dti[2]])\n\n # key with resolution strictly lower than "min"\n key = "2015-5-14 00"\n\n # monotonic increasing index\n result = ser.loc[key]\n expected = ser.iloc[1:]\n tm.assert_series_equal(result, expected)\n\n # monotonic decreasing index\n result = ser.iloc[::-1].loc[key]\n expected = ser.iloc[::-1][:-1]\n tm.assert_series_equal(result, expected)\n\n # non-monotonic index\n result2 = ser2.loc[key]\n expected2 = ser2.iloc[::2]\n tm.assert_series_equal(result2, expected2)\n\n def test_return_type_doesnt_depend_on_monotonicity_higher_reso(self):\n # GH#24892 we get Series back regardless of whether our DTI is monotonic\n dti = date_range(start="2015-5-13 23:59:00", freq="min", periods=3)\n ser = Series(range(3), index=dti)\n\n # non-monotonic index\n ser2 = Series(range(3), index=[dti[1], dti[0], dti[2]])\n\n # key with resolution strictly *higher) than "min"\n key = "2015-5-14 00:00:00"\n\n # monotonic increasing index\n result = ser.loc[key]\n assert result == 1\n\n # monotonic decreasing index\n result = ser.iloc[::-1].loc[key]\n assert result == 1\n\n # non-monotonic index\n result2 = ser2.loc[key]\n assert result2 == 0\n\n def test_monotone_DTI_indexing_bug(self):\n # GH 19362\n # Testing accessing the first element in a monotonic descending\n # partial string indexing.\n\n df = DataFrame(list(range(5)))\n date_list = [\n "2018-01-02",\n "2017-02-10",\n "2016-03-10",\n "2015-03-15",\n "2014-03-16",\n ]\n date_index = DatetimeIndex(date_list)\n df["date"] = date_index\n expected = DataFrame({0: list(range(5)), "date": date_index})\n tm.assert_frame_equal(df, expected)\n\n # We get a slice because df.index's resolution is hourly and we\n # are slicing with a daily-resolution string. If both were daily,\n # we would get a single item back\n dti = date_range("20170101 01:00:00", periods=3)\n df = DataFrame({"A": [1, 2, 3]}, index=dti[::-1])\n\n expected = DataFrame({"A": 1}, index=dti[-1:][::-1])\n result = df.loc["2017-01-03"]\n tm.assert_frame_equal(result, expected)\n\n result2 = df.iloc[::-1].loc["2017-01-03"]\n expected2 = expected.iloc[::-1]\n tm.assert_frame_equal(result2, expected2)\n\n def test_slice_year(self):\n dti = date_range(freq="B", start=datetime(2005, 1, 1), periods=500)\n\n s = Series(np.arange(len(dti)), index=dti)\n result = s["2005"]\n expected = s[s.index.year == 2005]\n tm.assert_series_equal(result, expected)\n\n df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti)\n result = df.loc["2005"]\n expected = df[df.index.year == 2005]\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "partial_dtime",\n [\n "2019",\n "2019Q4",\n "Dec 2019",\n "2019-12-31",\n "2019-12-31 23",\n "2019-12-31 23:59",\n ],\n )\n def test_slice_end_of_period_resolution(self, partial_dtime):\n # GH#31064\n dti = date_range("2019-12-31 23:59:55.999999999", periods=10, freq="s")\n\n ser = Series(range(10), index=dti)\n result = ser[partial_dtime]\n expected = ser.iloc[:5]\n tm.assert_series_equal(result, expected)\n\n def test_slice_quarter(self):\n dti = date_range(freq="D", start=datetime(2000, 6, 1), periods=500)\n\n s = Series(np.arange(len(dti)), index=dti)\n assert len(s["2001Q1"]) == 90\n\n df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti)\n assert len(df.loc["1Q01"]) == 90\n\n def test_slice_month(self):\n dti = date_range(freq="D", start=datetime(2005, 1, 1), periods=500)\n s = Series(np.arange(len(dti)), index=dti)\n assert len(s["2005-11"]) == 30\n\n df = DataFrame(np.random.default_rng(2).random((len(dti), 5)), index=dti)\n assert len(df.loc["2005-11"]) == 30\n\n tm.assert_series_equal(s["2005-11"], s["11-2005"])\n\n def test_partial_slice(self):\n rng = date_range(freq="D", start=datetime(2005, 1, 1), periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s["2005-05":"2006-02"]\n expected = s["20050501":"20060228"]\n tm.assert_series_equal(result, expected)\n\n result = s["2005-05":]\n expected = s["20050501":]\n tm.assert_series_equal(result, expected)\n\n result = s[:"2006-02"]\n expected = s[:"20060228"]\n tm.assert_series_equal(result, expected)\n\n result = s["2005-1-1"]\n assert result == s.iloc[0]\n\n with pytest.raises(KeyError, match=r"^'2004-12-31'$"):\n s["2004-12-31"]\n\n def test_partial_slice_daily(self):\n rng = date_range(freq="h", start=datetime(2005, 1, 31), periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s["2005-1-31"]\n tm.assert_series_equal(result, s.iloc[:24])\n\n with pytest.raises(KeyError, match=r"^'2004-12-31 00'$"):\n s["2004-12-31 00"]\n\n def test_partial_slice_hourly(self):\n rng = date_range(freq="min", start=datetime(2005, 1, 1, 20, 0, 0), periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s["2005-1-1"]\n tm.assert_series_equal(result, s.iloc[: 60 * 4])\n\n result = s["2005-1-1 20"]\n tm.assert_series_equal(result, s.iloc[:60])\n\n assert s["2005-1-1 20:00"] == s.iloc[0]\n with pytest.raises(KeyError, match=r"^'2004-12-31 00:15'$"):\n s["2004-12-31 00:15"]\n\n def test_partial_slice_minutely(self):\n rng = date_range(freq="s", start=datetime(2005, 1, 1, 23, 59, 0), periods=500)\n s = Series(np.arange(len(rng)), index=rng)\n\n result = s["2005-1-1 23:59"]\n tm.assert_series_equal(result, s.iloc[:60])\n\n result = s["2005-1-1"]\n tm.assert_series_equal(result, s.iloc[:60])\n\n assert s[Timestamp("2005-1-1 23:59:00")] == s.iloc[0]\n with pytest.raises(KeyError, match=r"^'2004-12-31 00:00:00'$"):\n s["2004-12-31 00:00:00"]\n\n def test_partial_slice_second_precision(self):\n rng = date_range(\n start=datetime(2005, 1, 1, 0, 0, 59, microsecond=999990),\n periods=20,\n freq="us",\n )\n s = Series(np.arange(20), rng)\n\n tm.assert_series_equal(s["2005-1-1 00:00"], s.iloc[:10])\n tm.assert_series_equal(s["2005-1-1 00:00:59"], s.iloc[:10])\n\n tm.assert_series_equal(s["2005-1-1 00:01"], s.iloc[10:])\n tm.assert_series_equal(s["2005-1-1 00:01:00"], s.iloc[10:])\n\n assert s[Timestamp("2005-1-1 00:00:59.999990")] == s.iloc[0]\n with pytest.raises(KeyError, match="2005-1-1 00:00:00"):\n s["2005-1-1 00:00:00"]\n\n def test_partial_slicing_dataframe(self):\n # GH14856\n # Test various combinations of string slicing resolution vs.\n # index resolution\n # - If string resolution is less precise than index resolution,\n # string is considered a slice\n # - If string resolution is equal to or more precise than index\n # resolution, string is considered an exact match\n formats = [\n "%Y",\n "%Y-%m",\n "%Y-%m-%d",\n "%Y-%m-%d %H",\n "%Y-%m-%d %H:%M",\n "%Y-%m-%d %H:%M:%S",\n ]\n resolutions = ["year", "month", "day", "hour", "minute", "second"]\n for rnum, resolution in enumerate(resolutions[2:], 2):\n # we check only 'day', 'hour', 'minute' and 'second'\n unit = Timedelta("1 " + resolution)\n middate = datetime(2012, 1, 1, 0, 0, 0)\n index = DatetimeIndex([middate - unit, middate, middate + unit])\n values = [1, 2, 3]\n df = DataFrame({"a": values}, index, dtype=np.int64)\n assert df.index.resolution == resolution\n\n # Timestamp with the same resolution as index\n # Should be exact match for Series (return scalar)\n # and raise KeyError for Frame\n for timestamp, expected in zip(index, values):\n ts_string = timestamp.strftime(formats[rnum])\n # make ts_string as precise as index\n result = df["a"][ts_string]\n assert isinstance(result, np.int64)\n assert result == expected\n msg = rf"^'{ts_string}'$"\n with pytest.raises(KeyError, match=msg):\n df[ts_string]\n\n # Timestamp with resolution less precise than index\n for fmt in formats[:rnum]:\n for element, theslice in [[0, slice(None, 1)], [1, slice(1, None)]]:\n ts_string = index[element].strftime(fmt)\n\n # Series should return slice\n result = df["a"][ts_string]\n expected = df["a"][theslice]\n tm.assert_series_equal(result, expected)\n\n # pre-2.0 df[ts_string] was overloaded to interpret this\n # as slicing along index\n with pytest.raises(KeyError, match=ts_string):\n df[ts_string]\n\n # Timestamp with resolution more precise than index\n # Compatible with existing key\n # Should return scalar for Series\n # and raise KeyError for Frame\n for fmt in formats[rnum + 1 :]:\n ts_string = index[1].strftime(fmt)\n result = df["a"][ts_string]\n assert isinstance(result, np.int64)\n assert result == 2\n msg = rf"^'{ts_string}'$"\n with pytest.raises(KeyError, match=msg):\n df[ts_string]\n\n # Not compatible with existing key\n # Should raise KeyError\n for fmt, res in list(zip(formats, resolutions))[rnum + 1 :]:\n ts = index[1] + Timedelta("1 " + res)\n ts_string = ts.strftime(fmt)\n msg = rf"^'{ts_string}'$"\n with pytest.raises(KeyError, match=msg):\n df["a"][ts_string]\n with pytest.raises(KeyError, match=msg):\n df[ts_string]\n\n def test_partial_slicing_with_multiindex(self):\n # GH 4758\n # partial string indexing with a multi-index buggy\n df = DataFrame(\n {\n "ACCOUNT": ["ACCT1", "ACCT1", "ACCT1", "ACCT2"],\n "TICKER": ["ABC", "MNP", "XYZ", "XYZ"],\n "val": [1, 2, 3, 4],\n },\n index=date_range("2013-06-19 09:30:00", periods=4, freq="5min"),\n )\n df_multi = df.set_index(["ACCOUNT", "TICKER"], append=True)\n\n expected = DataFrame(\n [[1]], index=Index(["ABC"], name="TICKER"), columns=["val"]\n )\n result = df_multi.loc[("2013-06-19 09:30:00", "ACCT1")]\n tm.assert_frame_equal(result, expected)\n\n expected = df_multi.loc[\n (Timestamp("2013-06-19 09:30:00", tz=None), "ACCT1", "ABC")\n ]\n result = df_multi.loc[("2013-06-19 09:30:00", "ACCT1", "ABC")]\n tm.assert_series_equal(result, expected)\n\n # partial string indexing on first level, scalar indexing on the other two\n result = df_multi.loc[("2013-06-19", "ACCT1", "ABC")]\n expected = df_multi.iloc[:1].droplevel([1, 2])\n tm.assert_frame_equal(result, expected)\n\n def test_partial_slicing_with_multiindex_series(self):\n # GH 4294\n # partial slice on a series mi\n ser = Series(\n range(250),\n index=MultiIndex.from_product(\n [date_range("2000-1-1", periods=50), range(5)]\n ),\n )\n\n s2 = ser[:-1].copy()\n expected = s2["2000-1-4"]\n result = s2[Timestamp("2000-1-4")]\n tm.assert_series_equal(result, expected)\n\n result = ser[Timestamp("2000-1-4")]\n expected = ser["2000-1-4"]\n tm.assert_series_equal(result, expected)\n\n df2 = DataFrame(ser)\n expected = df2.xs("2000-1-4")\n result = df2.loc[Timestamp("2000-1-4")]\n tm.assert_frame_equal(result, expected)\n\n def test_partial_slice_requires_monotonicity(self):\n # Disallowed since 2.0 (GH 37819)\n ser = Series(np.arange(10), date_range("2014-01-01", periods=10))\n\n nonmonotonic = ser.iloc[[3, 5, 4]]\n timestamp = Timestamp("2014-01-10")\n with pytest.raises(\n KeyError, match="Value based partial slicing on non-monotonic"\n ):\n nonmonotonic["2014-01-10":]\n\n with pytest.raises(KeyError, match=r"Timestamp\('2014-01-10 00:00:00'\)"):\n nonmonotonic[timestamp:]\n\n with pytest.raises(\n KeyError, match="Value based partial slicing on non-monotonic"\n ):\n nonmonotonic.loc["2014-01-10":]\n\n with pytest.raises(KeyError, match=r"Timestamp\('2014-01-10 00:00:00'\)"):\n nonmonotonic.loc[timestamp:]\n\n def test_loc_datetime_length_one(self):\n # GH16071\n df = DataFrame(\n columns=["1"],\n index=date_range("2016-10-01T00:00:00", "2016-10-01T23:59:59"),\n )\n result = df.loc[datetime(2016, 10, 1) :]\n tm.assert_frame_equal(result, df)\n\n result = df.loc["2016-10-01T00:00:00":]\n tm.assert_frame_equal(result, df)\n\n @pytest.mark.parametrize(\n "start",\n [\n "2018-12-02 21:50:00+00:00",\n Timestamp("2018-12-02 21:50:00+00:00"),\n Timestamp("2018-12-02 21:50:00+00:00").to_pydatetime(),\n ],\n )\n @pytest.mark.parametrize(\n "end",\n [\n "2018-12-02 21:52:00+00:00",\n Timestamp("2018-12-02 21:52:00+00:00"),\n Timestamp("2018-12-02 21:52:00+00:00").to_pydatetime(),\n ],\n )\n def test_getitem_with_datestring_with_UTC_offset(self, start, end):\n # GH 24076\n idx = date_range(\n start="2018-12-02 14:50:00-07:00",\n end="2018-12-02 14:50:00-07:00",\n freq="1min",\n )\n df = DataFrame(1, index=idx, columns=["A"])\n result = df[start:end]\n expected = df.iloc[0:3, :]\n tm.assert_frame_equal(result, expected)\n\n # GH 16785\n start = str(start)\n end = str(end)\n with pytest.raises(ValueError, match="Both dates must"):\n df[start : end[:-4] + "1:00"]\n\n with pytest.raises(ValueError, match="The index must be timezone"):\n df = df.tz_localize(None)\n df[start:end]\n\n def test_slice_reduce_to_series(self):\n # GH 27516\n df = DataFrame(\n {"A": range(24)}, index=date_range("2000", periods=24, freq="ME")\n )\n expected = Series(\n range(12), index=date_range("2000", periods=12, freq="ME"), name="A"\n )\n result = df.loc["2000", "A"]\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_partial_slicing.py | test_partial_slicing.py | Python | 16,495 | 0.95 | 0.06867 | 0.138381 | vue-tools | 748 | 2024-08-27T09:30:47.893466 | BSD-3-Clause | true | fbc3a90ba44e51f51236ce0ea354e699 |
import pytest\n\nfrom pandas import (\n NaT,\n date_range,\n to_datetime,\n)\nimport pandas._testing as tm\n\n\nclass TestPickle:\n def test_pickle(self):\n # GH#4606\n idx = to_datetime(["2013-01-01", NaT, "2014-01-06"])\n idx_p = tm.round_trip_pickle(idx)\n assert idx_p[0] == idx[0]\n assert idx_p[1] is NaT\n assert idx_p[2] == idx[2]\n\n def test_pickle_dont_infer_freq(self):\n # GH#11002\n # don't infer freq\n idx = date_range("1750-1-1", "2050-1-1", freq="7D")\n idx_p = tm.round_trip_pickle(idx)\n tm.assert_index_equal(idx, idx_p)\n\n def test_pickle_after_set_freq(self):\n dti = date_range("20130101", periods=3, tz="US/Eastern", name="foo")\n dti = dti._with_freq(None)\n\n res = tm.round_trip_pickle(dti)\n tm.assert_index_equal(res, dti)\n\n def test_roundtrip_pickle_with_tz(self):\n # GH#8367\n # round-trip of timezone\n index = date_range("20130101", periods=3, tz="US/Eastern", name="foo")\n unpickled = tm.round_trip_pickle(index)\n tm.assert_index_equal(index, unpickled)\n\n @pytest.mark.parametrize("freq", ["B", "C"])\n def test_pickle_unpickle(self, freq):\n rng = date_range("2009-01-01", "2010-01-01", freq=freq)\n unpickled = tm.round_trip_pickle(rng)\n assert unpickled.freq == freq\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_pickle.py | test_pickle.py | Python | 1,358 | 0.95 | 0.133333 | 0.135135 | react-lib | 299 | 2024-12-18T09:55:14.269922 | MIT | true | df2ebefa5f4ca19aca5d65d284318b44 |
from datetime import timedelta\n\nimport numpy as np\n\nfrom pandas import (\n DatetimeIndex,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDatetimeIndexReindex:\n def test_reindex_preserves_tz_if_target_is_empty_list_or_array(self):\n # GH#7774\n index = date_range("2013-01-01", periods=3, tz="US/Eastern")\n assert str(index.reindex([])[0].tz) == "US/Eastern"\n assert str(index.reindex(np.array([]))[0].tz) == "US/Eastern"\n\n def test_reindex_with_same_tz_nearest(self):\n # GH#32740\n rng_a = date_range("2010-01-01", "2010-01-02", periods=24, tz="utc")\n rng_b = date_range("2010-01-01", "2010-01-02", periods=23, tz="utc")\n result1, result2 = rng_a.reindex(\n rng_b, method="nearest", tolerance=timedelta(seconds=20)\n )\n expected_list1 = [\n "2010-01-01 00:00:00",\n "2010-01-01 01:05:27.272727272",\n "2010-01-01 02:10:54.545454545",\n "2010-01-01 03:16:21.818181818",\n "2010-01-01 04:21:49.090909090",\n "2010-01-01 05:27:16.363636363",\n "2010-01-01 06:32:43.636363636",\n "2010-01-01 07:38:10.909090909",\n "2010-01-01 08:43:38.181818181",\n "2010-01-01 09:49:05.454545454",\n "2010-01-01 10:54:32.727272727",\n "2010-01-01 12:00:00",\n "2010-01-01 13:05:27.272727272",\n "2010-01-01 14:10:54.545454545",\n "2010-01-01 15:16:21.818181818",\n "2010-01-01 16:21:49.090909090",\n "2010-01-01 17:27:16.363636363",\n "2010-01-01 18:32:43.636363636",\n "2010-01-01 19:38:10.909090909",\n "2010-01-01 20:43:38.181818181",\n "2010-01-01 21:49:05.454545454",\n "2010-01-01 22:54:32.727272727",\n "2010-01-02 00:00:00",\n ]\n expected1 = DatetimeIndex(\n expected_list1, dtype="datetime64[ns, UTC]", freq=None\n )\n expected2 = np.array([0] + [-1] * 21 + [23], dtype=np.dtype("intp"))\n tm.assert_index_equal(result1, expected1)\n tm.assert_numpy_array_equal(result2, expected2)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_reindex.py | test_reindex.py | Python | 2,145 | 0.95 | 0.053571 | 0.039216 | awesome-app | 233 | 2023-12-05T03:26:26.687998 | GPL-3.0 | true | efd0fae388cabbe7b1fe17cb95c2ebc7 |
"""\nTests for DatetimeIndex methods behaving like their Timestamp counterparts\n"""\n\nimport calendar\nfrom datetime import (\n date,\n datetime,\n time,\n)\nimport locale\nimport unicodedata\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import timezones\n\nfrom pandas import (\n DatetimeIndex,\n Index,\n NaT,\n Timestamp,\n date_range,\n offsets,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import DatetimeArray\n\n\nclass TestDatetimeIndexOps:\n def test_dti_no_millisecond_field(self):\n msg = "type object 'DatetimeIndex' has no attribute 'millisecond'"\n with pytest.raises(AttributeError, match=msg):\n DatetimeIndex.millisecond\n\n msg = "'DatetimeIndex' object has no attribute 'millisecond'"\n with pytest.raises(AttributeError, match=msg):\n DatetimeIndex([]).millisecond\n\n def test_dti_time(self):\n rng = date_range("1/1/2000", freq="12min", periods=10)\n result = Index(rng).time\n expected = [t.time() for t in rng]\n assert (result == expected).all()\n\n def test_dti_date(self):\n rng = date_range("1/1/2000", freq="12h", periods=10)\n result = Index(rng).date\n expected = [t.date() for t in rng]\n assert (result == expected).all()\n\n @pytest.mark.parametrize(\n "dtype",\n [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],\n )\n def test_dti_date2(self, dtype):\n # Regression test for GH#21230\n expected = np.array([date(2018, 6, 4), NaT])\n\n index = DatetimeIndex(["2018-06-04 10:00:00", NaT], dtype=dtype)\n result = index.date\n\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "dtype",\n [None, "datetime64[ns, CET]", "datetime64[ns, EST]", "datetime64[ns, UTC]"],\n )\n def test_dti_time2(self, dtype):\n # Regression test for GH#21267\n expected = np.array([time(10, 20, 30), NaT])\n\n index = DatetimeIndex(["2018-06-04 10:20:30", NaT], dtype=dtype)\n result = index.time\n\n tm.assert_numpy_array_equal(result, expected)\n\n def test_dti_timetz(self, tz_naive_fixture):\n # GH#21358\n tz = timezones.maybe_get_tz(tz_naive_fixture)\n\n expected = np.array([time(10, 20, 30, tzinfo=tz), NaT])\n\n index = DatetimeIndex(["2018-06-04 10:20:30", NaT], tz=tz)\n result = index.timetz\n\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "field",\n [\n "dayofweek",\n "day_of_week",\n "dayofyear",\n "day_of_year",\n "quarter",\n "days_in_month",\n "is_month_start",\n "is_month_end",\n "is_quarter_start",\n "is_quarter_end",\n "is_year_start",\n "is_year_end",\n ],\n )\n def test_dti_timestamp_fields(self, field):\n # extra fields from DatetimeIndex like quarter and week\n idx = date_range("2020-01-01", periods=10)\n expected = getattr(idx, field)[-1]\n\n result = getattr(Timestamp(idx[-1]), field)\n assert result == expected\n\n def test_dti_nanosecond(self):\n dti = DatetimeIndex(np.arange(10))\n expected = Index(np.arange(10, dtype=np.int32))\n\n tm.assert_index_equal(dti.nanosecond, expected)\n\n @pytest.mark.parametrize("prefix", ["", "dateutil/"])\n def test_dti_hour_tzaware(self, prefix):\n strdates = ["1/1/2012", "3/1/2012", "4/1/2012"]\n rng = DatetimeIndex(strdates, tz=prefix + "US/Eastern")\n assert (rng.hour == 0).all()\n\n # a more unusual time zone, GH#1946\n dr = date_range(\n "2011-10-02 00:00", freq="h", periods=10, tz=prefix + "America/Atikokan"\n )\n\n expected = Index(np.arange(10, dtype=np.int32))\n tm.assert_index_equal(dr.hour, expected)\n\n # GH#12806\n # error: Unsupported operand types for + ("List[None]" and "List[str]")\n @pytest.mark.parametrize(\n "time_locale", [None] + tm.get_locales() # type: ignore[operator]\n )\n def test_day_name_month_name(self, time_locale):\n # Test Monday -> Sunday and January -> December, in that sequence\n if time_locale is None:\n # If the time_locale is None, day-name and month_name should\n # return the english attributes\n expected_days = [\n "Monday",\n "Tuesday",\n "Wednesday",\n "Thursday",\n "Friday",\n "Saturday",\n "Sunday",\n ]\n expected_months = [\n "January",\n "February",\n "March",\n "April",\n "May",\n "June",\n "July",\n "August",\n "September",\n "October",\n "November",\n "December",\n ]\n else:\n with tm.set_locale(time_locale, locale.LC_TIME):\n expected_days = calendar.day_name[:]\n expected_months = calendar.month_name[1:]\n\n # GH#11128\n dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365)\n english_days = [\n "Monday",\n "Tuesday",\n "Wednesday",\n "Thursday",\n "Friday",\n "Saturday",\n "Sunday",\n ]\n for day, name, eng_name in zip(range(4, 11), expected_days, english_days):\n name = name.capitalize()\n assert dti.day_name(locale=time_locale)[day] == name\n assert dti.day_name(locale=None)[day] == eng_name\n ts = Timestamp(datetime(2016, 4, day))\n assert ts.day_name(locale=time_locale) == name\n dti = dti.append(DatetimeIndex([NaT]))\n assert np.isnan(dti.day_name(locale=time_locale)[-1])\n ts = Timestamp(NaT)\n assert np.isnan(ts.day_name(locale=time_locale))\n\n # GH#12805\n dti = date_range(freq="ME", start="2012", end="2013")\n result = dti.month_name(locale=time_locale)\n expected = Index([month.capitalize() for month in expected_months])\n\n # work around different normalization schemes GH#22342\n result = result.str.normalize("NFD")\n expected = expected.str.normalize("NFD")\n\n tm.assert_index_equal(result, expected)\n\n for item, expected in zip(dti, expected_months):\n result = item.month_name(locale=time_locale)\n expected = expected.capitalize()\n\n result = unicodedata.normalize("NFD", result)\n expected = unicodedata.normalize("NFD", result)\n\n assert result == expected\n dti = dti.append(DatetimeIndex([NaT]))\n assert np.isnan(dti.month_name(locale=time_locale)[-1])\n\n def test_dti_week(self):\n # GH#6538: Check that DatetimeIndex and its TimeStamp elements\n # return the same weekofyear accessor close to new year w/ tz\n dates = ["2013/12/29", "2013/12/30", "2013/12/31"]\n dates = DatetimeIndex(dates, tz="Europe/Brussels")\n expected = [52, 1, 1]\n assert dates.isocalendar().week.tolist() == expected\n assert [d.weekofyear for d in dates] == expected\n\n @pytest.mark.parametrize("tz", [None, "US/Eastern"])\n def test_dti_fields(self, tz):\n # GH#13303\n dti = date_range(freq="D", start=datetime(1998, 1, 1), periods=365, tz=tz)\n assert dti.year[0] == 1998\n assert dti.month[0] == 1\n assert dti.day[0] == 1\n assert dti.hour[0] == 0\n assert dti.minute[0] == 0\n assert dti.second[0] == 0\n assert dti.microsecond[0] == 0\n assert dti.dayofweek[0] == 3\n\n assert dti.dayofyear[0] == 1\n assert dti.dayofyear[120] == 121\n\n assert dti.isocalendar().week.iloc[0] == 1\n assert dti.isocalendar().week.iloc[120] == 18\n\n assert dti.quarter[0] == 1\n assert dti.quarter[120] == 2\n\n assert dti.days_in_month[0] == 31\n assert dti.days_in_month[90] == 30\n\n assert dti.is_month_start[0]\n assert not dti.is_month_start[1]\n assert dti.is_month_start[31]\n assert dti.is_quarter_start[0]\n assert dti.is_quarter_start[90]\n assert dti.is_year_start[0]\n assert not dti.is_year_start[364]\n assert not dti.is_month_end[0]\n assert dti.is_month_end[30]\n assert not dti.is_month_end[31]\n assert dti.is_month_end[364]\n assert not dti.is_quarter_end[0]\n assert not dti.is_quarter_end[30]\n assert dti.is_quarter_end[89]\n assert dti.is_quarter_end[364]\n assert not dti.is_year_end[0]\n assert dti.is_year_end[364]\n\n assert len(dti.year) == 365\n assert len(dti.month) == 365\n assert len(dti.day) == 365\n assert len(dti.hour) == 365\n assert len(dti.minute) == 365\n assert len(dti.second) == 365\n assert len(dti.microsecond) == 365\n assert len(dti.dayofweek) == 365\n assert len(dti.dayofyear) == 365\n assert len(dti.isocalendar()) == 365\n assert len(dti.quarter) == 365\n assert len(dti.is_month_start) == 365\n assert len(dti.is_month_end) == 365\n assert len(dti.is_quarter_start) == 365\n assert len(dti.is_quarter_end) == 365\n assert len(dti.is_year_start) == 365\n assert len(dti.is_year_end) == 365\n\n dti.name = "name"\n\n # non boolean accessors -> return Index\n for accessor in DatetimeArray._field_ops:\n res = getattr(dti, accessor)\n assert len(res) == 365\n assert isinstance(res, Index)\n assert res.name == "name"\n\n # boolean accessors -> return array\n for accessor in DatetimeArray._bool_ops:\n res = getattr(dti, accessor)\n assert len(res) == 365\n assert isinstance(res, np.ndarray)\n\n # test boolean indexing\n res = dti[dti.is_quarter_start]\n exp = dti[[0, 90, 181, 273]]\n tm.assert_index_equal(res, exp)\n res = dti[dti.is_leap_year]\n exp = DatetimeIndex([], freq="D", tz=dti.tz, name="name").as_unit("ns")\n tm.assert_index_equal(res, exp)\n\n def test_dti_is_year_quarter_start(self):\n dti = date_range(freq="BQE-FEB", start=datetime(1998, 1, 1), periods=4)\n\n assert sum(dti.is_quarter_start) == 0\n assert sum(dti.is_quarter_end) == 4\n assert sum(dti.is_year_start) == 0\n assert sum(dti.is_year_end) == 1\n\n def test_dti_is_month_start(self):\n dti = DatetimeIndex(["2000-01-01", "2000-01-02", "2000-01-03"])\n\n assert dti.is_month_start[0] == 1\n\n def test_dti_is_month_start_custom(self):\n # Ensure is_start/end accessors throw ValueError for CustomBusinessDay,\n bday_egypt = offsets.CustomBusinessDay(weekmask="Sun Mon Tue Wed Thu")\n dti = date_range(datetime(2013, 4, 30), periods=5, freq=bday_egypt)\n msg = "Custom business days is not supported by is_month_start"\n with pytest.raises(ValueError, match=msg):\n dti.is_month_start\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_scalar_compat.py | test_scalar_compat.py | Python | 11,156 | 0.95 | 0.091185 | 0.071942 | python-kit | 413 | 2023-09-12T07:12:27.143614 | GPL-3.0 | true | b8ca345cbea3c1e9ab3b5b65e4ae5621 |
from datetime import (\n datetime,\n timezone,\n)\n\nimport numpy as np\nimport pytest\nimport pytz\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n Series,\n Timestamp,\n bdate_range,\n date_range,\n)\nimport pandas._testing as tm\n\nfrom pandas.tseries.offsets import (\n BMonthEnd,\n Minute,\n MonthEnd,\n)\n\nSTART, END = datetime(2009, 1, 1), datetime(2010, 1, 1)\n\n\nclass TestDatetimeIndexSetOps:\n tz = [\n None,\n "UTC",\n "Asia/Tokyo",\n "US/Eastern",\n "dateutil/Asia/Singapore",\n "dateutil/US/Pacific",\n ]\n\n # TODO: moved from test_datetimelike; dedup with version below\n def test_union2(self, sort):\n everything = date_range("2020-01-01", periods=10)\n first = everything[:5]\n second = everything[5:]\n union = first.union(second, sort=sort)\n tm.assert_index_equal(union, everything)\n\n @pytest.mark.parametrize("box", [np.array, Series, list])\n def test_union3(self, sort, box):\n everything = date_range("2020-01-01", periods=10)\n first = everything[:5]\n second = everything[5:]\n\n # GH 10149 support listlike inputs other than Index objects\n expected = first.union(second, sort=sort)\n case = box(second.values)\n result = first.union(case, sort=sort)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("tz", tz)\n def test_union(self, tz, sort):\n rng1 = date_range("1/1/2000", freq="D", periods=5, tz=tz)\n other1 = date_range("1/6/2000", freq="D", periods=5, tz=tz)\n expected1 = date_range("1/1/2000", freq="D", periods=10, tz=tz)\n expected1_notsorted = DatetimeIndex(list(other1) + list(rng1))\n\n rng2 = date_range("1/1/2000", freq="D", periods=5, tz=tz)\n other2 = date_range("1/4/2000", freq="D", periods=5, tz=tz)\n expected2 = date_range("1/1/2000", freq="D", periods=8, tz=tz)\n expected2_notsorted = DatetimeIndex(list(other2) + list(rng2[:3]))\n\n rng3 = date_range("1/1/2000", freq="D", periods=5, tz=tz)\n other3 = DatetimeIndex([], tz=tz).as_unit("ns")\n expected3 = date_range("1/1/2000", freq="D", periods=5, tz=tz)\n expected3_notsorted = rng3\n\n for rng, other, exp, exp_notsorted in [\n (rng1, other1, expected1, expected1_notsorted),\n (rng2, other2, expected2, expected2_notsorted),\n (rng3, other3, expected3, expected3_notsorted),\n ]:\n result_union = rng.union(other, sort=sort)\n tm.assert_index_equal(result_union, exp)\n\n result_union = other.union(rng, sort=sort)\n if sort is None:\n tm.assert_index_equal(result_union, exp)\n else:\n tm.assert_index_equal(result_union, exp_notsorted)\n\n def test_union_coverage(self, sort):\n idx = DatetimeIndex(["2000-01-03", "2000-01-01", "2000-01-02"])\n ordered = DatetimeIndex(idx.sort_values(), freq="infer")\n result = ordered.union(idx, sort=sort)\n tm.assert_index_equal(result, ordered)\n\n result = ordered[:0].union(ordered, sort=sort)\n tm.assert_index_equal(result, ordered)\n assert result.freq == ordered.freq\n\n def test_union_bug_1730(self, sort):\n rng_a = date_range("1/1/2012", periods=4, freq="3h")\n rng_b = date_range("1/1/2012", periods=4, freq="4h")\n\n result = rng_a.union(rng_b, sort=sort)\n exp = list(rng_a) + list(rng_b[1:])\n if sort is None:\n exp = DatetimeIndex(sorted(exp))\n else:\n exp = DatetimeIndex(exp)\n tm.assert_index_equal(result, exp)\n\n def test_union_bug_1745(self, sort):\n left = DatetimeIndex(["2012-05-11 15:19:49.695000"])\n right = DatetimeIndex(\n [\n "2012-05-29 13:04:21.322000",\n "2012-05-11 15:27:24.873000",\n "2012-05-11 15:31:05.350000",\n ]\n )\n\n result = left.union(right, sort=sort)\n exp = DatetimeIndex(\n [\n "2012-05-11 15:19:49.695000",\n "2012-05-29 13:04:21.322000",\n "2012-05-11 15:27:24.873000",\n "2012-05-11 15:31:05.350000",\n ]\n )\n if sort is None:\n exp = exp.sort_values()\n tm.assert_index_equal(result, exp)\n\n def test_union_bug_4564(self, sort):\n from pandas import DateOffset\n\n left = date_range("2013-01-01", "2013-02-01")\n right = left + DateOffset(minutes=15)\n\n result = left.union(right, sort=sort)\n exp = list(left) + list(right)\n if sort is None:\n exp = DatetimeIndex(sorted(exp))\n else:\n exp = DatetimeIndex(exp)\n tm.assert_index_equal(result, exp)\n\n def test_union_freq_both_none(self, sort):\n # GH11086\n expected = bdate_range("20150101", periods=10)\n expected._data.freq = None\n\n result = expected.union(expected, sort=sort)\n tm.assert_index_equal(result, expected)\n assert result.freq is None\n\n def test_union_freq_infer(self):\n # When taking the union of two DatetimeIndexes, we infer\n # a freq even if the arguments don't have freq. This matches\n # TimedeltaIndex behavior.\n dti = date_range("2016-01-01", periods=5)\n left = dti[[0, 1, 3, 4]]\n right = dti[[2, 3, 1]]\n\n assert left.freq is None\n assert right.freq is None\n\n result = left.union(right)\n tm.assert_index_equal(result, dti)\n assert result.freq == "D"\n\n def test_union_dataframe_index(self):\n rng1 = date_range("1/1/1999", "1/1/2012", freq="MS")\n s1 = Series(np.random.default_rng(2).standard_normal(len(rng1)), rng1)\n\n rng2 = date_range("1/1/1980", "12/1/2001", freq="MS")\n s2 = Series(np.random.default_rng(2).standard_normal(len(rng2)), rng2)\n df = DataFrame({"s1": s1, "s2": s2})\n\n exp = date_range("1/1/1980", "1/1/2012", freq="MS")\n tm.assert_index_equal(df.index, exp)\n\n def test_union_with_DatetimeIndex(self, sort):\n i1 = Index(np.arange(0, 20, 2, dtype=np.int64))\n i2 = date_range(start="2012-01-03 00:00:00", periods=10, freq="D")\n # Works\n i1.union(i2, sort=sort)\n # Fails with "AttributeError: can't set attribute"\n i2.union(i1, sort=sort)\n\n def test_union_same_timezone_different_units(self):\n # GH 55238\n idx1 = date_range("2000-01-01", periods=3, tz="UTC").as_unit("ms")\n idx2 = date_range("2000-01-01", periods=3, tz="UTC").as_unit("us")\n result = idx1.union(idx2)\n expected = date_range("2000-01-01", periods=3, tz="UTC").as_unit("us")\n tm.assert_index_equal(result, expected)\n\n # TODO: moved from test_datetimelike; de-duplicate with version below\n def test_intersection2(self):\n first = date_range("2020-01-01", periods=10)\n second = first[5:]\n intersect = first.intersection(second)\n tm.assert_index_equal(intersect, second)\n\n # GH 10149\n cases = [klass(second.values) for klass in [np.array, Series, list]]\n for case in cases:\n result = first.intersection(case)\n tm.assert_index_equal(result, second)\n\n third = Index(["a", "b", "c"])\n result = first.intersection(third)\n expected = Index([], dtype=object)\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "tz", [None, "Asia/Tokyo", "US/Eastern", "dateutil/US/Pacific"]\n )\n def test_intersection(self, tz, sort):\n # GH 4690 (with tz)\n base = date_range("6/1/2000", "6/30/2000", freq="D", name="idx")\n\n # if target has the same name, it is preserved\n rng2 = date_range("5/15/2000", "6/20/2000", freq="D", name="idx")\n expected2 = date_range("6/1/2000", "6/20/2000", freq="D", name="idx")\n\n # if target name is different, it will be reset\n rng3 = date_range("5/15/2000", "6/20/2000", freq="D", name="other")\n expected3 = date_range("6/1/2000", "6/20/2000", freq="D", name=None)\n\n rng4 = date_range("7/1/2000", "7/31/2000", freq="D", name="idx")\n expected4 = DatetimeIndex([], freq="D", name="idx", dtype="M8[ns]")\n\n for rng, expected in [\n (rng2, expected2),\n (rng3, expected3),\n (rng4, expected4),\n ]:\n result = base.intersection(rng)\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n # non-monotonic\n base = DatetimeIndex(\n ["2011-01-05", "2011-01-04", "2011-01-02", "2011-01-03"], tz=tz, name="idx"\n ).as_unit("ns")\n\n rng2 = DatetimeIndex(\n ["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"], tz=tz, name="idx"\n ).as_unit("ns")\n expected2 = DatetimeIndex(\n ["2011-01-04", "2011-01-02"], tz=tz, name="idx"\n ).as_unit("ns")\n\n rng3 = DatetimeIndex(\n ["2011-01-04", "2011-01-02", "2011-02-02", "2011-02-03"],\n tz=tz,\n name="other",\n ).as_unit("ns")\n expected3 = DatetimeIndex(\n ["2011-01-04", "2011-01-02"], tz=tz, name=None\n ).as_unit("ns")\n\n # GH 7880\n rng4 = date_range("7/1/2000", "7/31/2000", freq="D", tz=tz, name="idx")\n expected4 = DatetimeIndex([], tz=tz, name="idx").as_unit("ns")\n assert expected4.freq is None\n\n for rng, expected in [\n (rng2, expected2),\n (rng3, expected3),\n (rng4, expected4),\n ]:\n result = base.intersection(rng, sort=sort)\n if sort is None:\n expected = expected.sort_values()\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n # parametrize over both anchored and non-anchored freqs, as they\n # have different code paths\n @pytest.mark.parametrize("freq", ["min", "B"])\n def test_intersection_empty(self, tz_aware_fixture, freq):\n # empty same freq GH2129\n tz = tz_aware_fixture\n rng = date_range("6/1/2000", "6/15/2000", freq=freq, tz=tz)\n result = rng[0:0].intersection(rng)\n assert len(result) == 0\n assert result.freq == rng.freq\n\n result = rng.intersection(rng[0:0])\n assert len(result) == 0\n assert result.freq == rng.freq\n\n # no overlap GH#33604\n check_freq = freq != "min" # We don't preserve freq on non-anchored offsets\n result = rng[:3].intersection(rng[-3:])\n tm.assert_index_equal(result, rng[:0])\n if check_freq:\n # We don't preserve freq on non-anchored offsets\n assert result.freq == rng.freq\n\n # swapped left and right\n result = rng[-3:].intersection(rng[:3])\n tm.assert_index_equal(result, rng[:0])\n if check_freq:\n # We don't preserve freq on non-anchored offsets\n assert result.freq == rng.freq\n\n def test_intersection_bug_1708(self):\n from pandas import DateOffset\n\n index_1 = date_range("1/1/2012", periods=4, freq="12h")\n index_2 = index_1 + DateOffset(hours=1)\n\n result = index_1.intersection(index_2)\n assert len(result) == 0\n\n @pytest.mark.parametrize("tz", tz)\n def test_difference(self, tz, sort):\n rng_dates = ["1/2/2000", "1/3/2000", "1/1/2000", "1/4/2000", "1/5/2000"]\n\n rng1 = DatetimeIndex(rng_dates, tz=tz)\n other1 = date_range("1/6/2000", freq="D", periods=5, tz=tz)\n expected1 = DatetimeIndex(rng_dates, tz=tz)\n\n rng2 = DatetimeIndex(rng_dates, tz=tz)\n other2 = date_range("1/4/2000", freq="D", periods=5, tz=tz)\n expected2 = DatetimeIndex(rng_dates[:3], tz=tz)\n\n rng3 = DatetimeIndex(rng_dates, tz=tz)\n other3 = DatetimeIndex([], tz=tz)\n expected3 = DatetimeIndex(rng_dates, tz=tz)\n\n for rng, other, expected in [\n (rng1, other1, expected1),\n (rng2, other2, expected2),\n (rng3, other3, expected3),\n ]:\n result_diff = rng.difference(other, sort)\n if sort is None and len(other):\n # We dont sort (yet?) when empty GH#24959\n expected = expected.sort_values()\n tm.assert_index_equal(result_diff, expected)\n\n def test_difference_freq(self, sort):\n # GH14323: difference of DatetimeIndex should not preserve frequency\n\n index = date_range("20160920", "20160925", freq="D")\n other = date_range("20160921", "20160924", freq="D")\n expected = DatetimeIndex(["20160920", "20160925"], dtype="M8[ns]", freq=None)\n idx_diff = index.difference(other, sort)\n tm.assert_index_equal(idx_diff, expected)\n tm.assert_attr_equal("freq", idx_diff, expected)\n\n # preserve frequency when the difference is a contiguous\n # subset of the original range\n other = date_range("20160922", "20160925", freq="D")\n idx_diff = index.difference(other, sort)\n expected = DatetimeIndex(["20160920", "20160921"], dtype="M8[ns]", freq="D")\n tm.assert_index_equal(idx_diff, expected)\n tm.assert_attr_equal("freq", idx_diff, expected)\n\n def test_datetimeindex_diff(self, sort):\n dti1 = date_range(freq="QE-JAN", start=datetime(1997, 12, 31), periods=100)\n dti2 = date_range(freq="QE-JAN", start=datetime(1997, 12, 31), periods=98)\n assert len(dti1.difference(dti2, sort)) == 2\n\n @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Eastern"])\n def test_setops_preserve_freq(self, tz):\n rng = date_range("1/1/2000", "1/1/2002", name="idx", tz=tz)\n\n result = rng[:50].union(rng[50:100])\n assert result.name == rng.name\n assert result.freq == rng.freq\n assert result.tz == rng.tz\n\n result = rng[:50].union(rng[30:100])\n assert result.name == rng.name\n assert result.freq == rng.freq\n assert result.tz == rng.tz\n\n result = rng[:50].union(rng[60:100])\n assert result.name == rng.name\n assert result.freq is None\n assert result.tz == rng.tz\n\n result = rng[:50].intersection(rng[25:75])\n assert result.name == rng.name\n assert result.freqstr == "D"\n assert result.tz == rng.tz\n\n nofreq = DatetimeIndex(list(rng[25:75]), name="other")\n result = rng[:50].union(nofreq)\n assert result.name is None\n assert result.freq == rng.freq\n assert result.tz == rng.tz\n\n result = rng[:50].intersection(nofreq)\n assert result.name is None\n assert result.freq == rng.freq\n assert result.tz == rng.tz\n\n def test_intersection_non_tick_no_fastpath(self):\n # GH#42104\n dti = DatetimeIndex(\n [\n "2018-12-31",\n "2019-03-31",\n "2019-06-30",\n "2019-09-30",\n "2019-12-31",\n "2020-03-31",\n ],\n freq="QE-DEC",\n )\n result = dti[::2].intersection(dti[1::2])\n expected = dti[:0]\n tm.assert_index_equal(result, expected)\n\n def test_dti_intersection(self):\n rng = date_range("1/1/2011", periods=100, freq="h", tz="utc")\n\n left = rng[10:90][::-1]\n right = rng[20:80][::-1]\n\n assert left.tz == rng.tz\n result = left.intersection(right)\n assert result.tz == left.tz\n\n # Note: not difference, as there is no symmetry requirement there\n @pytest.mark.parametrize("setop", ["union", "intersection", "symmetric_difference"])\n def test_dti_setop_aware(self, setop):\n # non-overlapping\n # GH#39328 as of 2.0 we cast these to UTC instead of object\n rng = date_range("2012-11-15 00:00:00", periods=6, freq="h", tz="US/Central")\n\n rng2 = date_range("2012-11-15 12:00:00", periods=6, freq="h", tz="US/Eastern")\n\n result = getattr(rng, setop)(rng2)\n\n left = rng.tz_convert("UTC")\n right = rng2.tz_convert("UTC")\n expected = getattr(left, setop)(right)\n tm.assert_index_equal(result, expected)\n assert result.tz == left.tz\n if len(result):\n assert result[0].tz is timezone.utc\n assert result[-1].tz is timezone.utc\n\n def test_dti_union_mixed(self):\n # GH#21671\n rng = DatetimeIndex([Timestamp("2011-01-01"), pd.NaT])\n rng2 = DatetimeIndex(["2012-01-01", "2012-01-02"], tz="Asia/Tokyo")\n result = rng.union(rng2)\n expected = Index(\n [\n Timestamp("2011-01-01"),\n pd.NaT,\n Timestamp("2012-01-01", tz="Asia/Tokyo"),\n Timestamp("2012-01-02", tz="Asia/Tokyo"),\n ],\n dtype=object,\n )\n tm.assert_index_equal(result, expected)\n\n\nclass TestBusinessDatetimeIndex:\n def test_union(self, sort):\n rng = bdate_range(START, END)\n # overlapping\n left = rng[:10]\n right = rng[5:10]\n\n the_union = left.union(right, sort=sort)\n assert isinstance(the_union, DatetimeIndex)\n\n # non-overlapping, gap in middle\n left = rng[:5]\n right = rng[10:]\n\n the_union = left.union(right, sort=sort)\n assert isinstance(the_union, Index)\n\n # non-overlapping, no gap\n left = rng[:5]\n right = rng[5:10]\n\n the_union = left.union(right, sort=sort)\n assert isinstance(the_union, DatetimeIndex)\n\n # order does not matter\n if sort is None:\n tm.assert_index_equal(right.union(left, sort=sort), the_union)\n else:\n expected = DatetimeIndex(list(right) + list(left))\n tm.assert_index_equal(right.union(left, sort=sort), expected)\n\n # overlapping, but different offset\n rng = date_range(START, END, freq=BMonthEnd())\n\n the_union = rng.union(rng, sort=sort)\n assert isinstance(the_union, DatetimeIndex)\n\n def test_union_not_cacheable(self, sort):\n rng = date_range("1/1/2000", periods=50, freq=Minute())\n rng1 = rng[10:]\n rng2 = rng[:25]\n the_union = rng1.union(rng2, sort=sort)\n if sort is None:\n tm.assert_index_equal(the_union, rng)\n else:\n expected = DatetimeIndex(list(rng[10:]) + list(rng[:10]))\n tm.assert_index_equal(the_union, expected)\n\n rng1 = rng[10:]\n rng2 = rng[15:35]\n the_union = rng1.union(rng2, sort=sort)\n expected = rng[10:]\n tm.assert_index_equal(the_union, expected)\n\n def test_intersection(self):\n rng = date_range("1/1/2000", periods=50, freq=Minute())\n rng1 = rng[10:]\n rng2 = rng[:25]\n the_int = rng1.intersection(rng2)\n expected = rng[10:25]\n tm.assert_index_equal(the_int, expected)\n assert isinstance(the_int, DatetimeIndex)\n assert the_int.freq == rng.freq\n\n the_int = rng1.intersection(rng2)\n tm.assert_index_equal(the_int, expected)\n\n # non-overlapping\n the_int = rng[:10].intersection(rng[10:])\n expected = DatetimeIndex([]).as_unit("ns")\n tm.assert_index_equal(the_int, expected)\n\n def test_intersection_bug(self):\n # GH #771\n a = bdate_range("11/30/2011", "12/31/2011")\n b = bdate_range("12/10/2011", "12/20/2011")\n result = a.intersection(b)\n tm.assert_index_equal(result, b)\n assert result.freq == b.freq\n\n def test_intersection_list(self):\n # GH#35876\n # values is not an Index -> no name -> retain "a"\n values = [Timestamp("2020-01-01"), Timestamp("2020-02-01")]\n idx = DatetimeIndex(values, name="a")\n res = idx.intersection(values)\n tm.assert_index_equal(res, idx)\n\n def test_month_range_union_tz_pytz(self, sort):\n tz = pytz.timezone("US/Eastern")\n\n early_start = datetime(2011, 1, 1)\n early_end = datetime(2011, 3, 1)\n\n late_start = datetime(2011, 3, 1)\n late_end = datetime(2011, 5, 1)\n\n early_dr = date_range(start=early_start, end=early_end, tz=tz, freq=MonthEnd())\n late_dr = date_range(start=late_start, end=late_end, tz=tz, freq=MonthEnd())\n\n early_dr.union(late_dr, sort=sort)\n\n @td.skip_if_windows\n def test_month_range_union_tz_dateutil(self, sort):\n from pandas._libs.tslibs.timezones import dateutil_gettz\n\n tz = dateutil_gettz("US/Eastern")\n\n early_start = datetime(2011, 1, 1)\n early_end = datetime(2011, 3, 1)\n\n late_start = datetime(2011, 3, 1)\n late_end = datetime(2011, 5, 1)\n\n early_dr = date_range(start=early_start, end=early_end, tz=tz, freq=MonthEnd())\n late_dr = date_range(start=late_start, end=late_end, tz=tz, freq=MonthEnd())\n\n early_dr.union(late_dr, sort=sort)\n\n @pytest.mark.parametrize("sort", [False, None])\n def test_intersection_duplicates(self, sort):\n # GH#38196\n idx1 = Index(\n [\n Timestamp("2019-12-13"),\n Timestamp("2019-12-12"),\n Timestamp("2019-12-12"),\n ]\n )\n result = idx1.intersection(idx1, sort=sort)\n expected = Index([Timestamp("2019-12-13"), Timestamp("2019-12-12")])\n tm.assert_index_equal(result, expected)\n\n\nclass TestCustomDatetimeIndex:\n def test_union(self, sort):\n # overlapping\n rng = bdate_range(START, END, freq="C")\n left = rng[:10]\n right = rng[5:10]\n\n the_union = left.union(right, sort=sort)\n assert isinstance(the_union, DatetimeIndex)\n\n # non-overlapping, gap in middle\n left = rng[:5]\n right = rng[10:]\n\n the_union = left.union(right, sort)\n assert isinstance(the_union, Index)\n\n # non-overlapping, no gap\n left = rng[:5]\n right = rng[5:10]\n\n the_union = left.union(right, sort=sort)\n assert isinstance(the_union, DatetimeIndex)\n\n # order does not matter\n if sort is None:\n tm.assert_index_equal(right.union(left, sort=sort), the_union)\n\n # overlapping, but different offset\n rng = date_range(START, END, freq=BMonthEnd())\n\n the_union = rng.union(rng, sort=sort)\n assert isinstance(the_union, DatetimeIndex)\n\n def test_intersection_bug(self):\n # GH #771\n a = bdate_range("11/30/2011", "12/31/2011", freq="C")\n b = bdate_range("12/10/2011", "12/20/2011", freq="C")\n result = a.intersection(b)\n tm.assert_index_equal(result, b)\n assert result.freq == b.freq\n\n @pytest.mark.parametrize(\n "tz", [None, "UTC", "Europe/Berlin", pytz.FixedOffset(-60)]\n )\n def test_intersection_dst_transition(self, tz):\n # GH 46702: Europe/Berlin has DST transition\n idx1 = date_range("2020-03-27", periods=5, freq="D", tz=tz)\n idx2 = date_range("2020-03-30", periods=5, freq="D", tz=tz)\n result = idx1.intersection(idx2)\n expected = date_range("2020-03-30", periods=2, freq="D", tz=tz)\n tm.assert_index_equal(result, expected)\n\n # GH#45863 same problem for union\n index1 = date_range("2021-10-28", periods=3, freq="D", tz="Europe/London")\n index2 = date_range("2021-10-30", periods=4, freq="D", tz="Europe/London")\n result = index1.union(index2)\n expected = date_range("2021-10-28", periods=6, freq="D", tz="Europe/London")\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_setops.py | test_setops.py | Python | 23,574 | 0.95 | 0.09009 | 0.091743 | react-lib | 321 | 2024-03-24T17:36:23.472621 | MIT | true | 887830f0f3a828cfbbd57501baf94d9c |
"""\nTests for DatetimeIndex timezone-related methods\n"""\nfrom datetime import (\n datetime,\n timedelta,\n timezone,\n tzinfo,\n)\n\nfrom dateutil.tz import gettz\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs import (\n conversion,\n timezones,\n)\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n Timestamp,\n bdate_range,\n date_range,\n isna,\n to_datetime,\n)\nimport pandas._testing as tm\n\n\nclass FixedOffset(tzinfo):\n """Fixed offset in minutes east from UTC."""\n\n def __init__(self, offset, name) -> None:\n self.__offset = timedelta(minutes=offset)\n self.__name = name\n\n def utcoffset(self, dt):\n return self.__offset\n\n def tzname(self, dt):\n return self.__name\n\n def dst(self, dt):\n return timedelta(0)\n\n\nfixed_off_no_name = FixedOffset(-330, None)\n\n\nclass TestDatetimeIndexTimezones:\n # -------------------------------------------------------------\n # Unsorted\n\n def test_dti_drop_dont_lose_tz(self):\n # GH#2621\n ind = date_range("2012-12-01", periods=10, tz="utc")\n ind = ind.drop(ind[-1])\n\n assert ind.tz is not None\n\n def test_dti_tz_conversion_freq(self, tz_naive_fixture):\n # GH25241\n t3 = DatetimeIndex(["2019-01-01 10:00"], freq="h")\n assert t3.tz_localize(tz=tz_naive_fixture).freq == t3.freq\n t4 = DatetimeIndex(["2019-01-02 12:00"], tz="UTC", freq="min")\n assert t4.tz_convert(tz="UTC").freq == t4.freq\n\n def test_drop_dst_boundary(self):\n # see gh-18031\n tz = "Europe/Brussels"\n freq = "15min"\n\n start = Timestamp("201710290100", tz=tz)\n end = Timestamp("201710290300", tz=tz)\n index = date_range(start=start, end=end, freq=freq)\n\n expected = DatetimeIndex(\n [\n "201710290115",\n "201710290130",\n "201710290145",\n "201710290200",\n "201710290215",\n "201710290230",\n "201710290245",\n "201710290200",\n "201710290215",\n "201710290230",\n "201710290245",\n "201710290300",\n ],\n dtype="M8[ns, Europe/Brussels]",\n freq=freq,\n ambiguous=[\n True,\n True,\n True,\n True,\n True,\n True,\n True,\n False,\n False,\n False,\n False,\n False,\n ],\n )\n result = index.drop(index[0])\n tm.assert_index_equal(result, expected)\n\n def test_date_range_localize(self, unit):\n rng = date_range(\n "3/11/2012 03:00", periods=15, freq="h", tz="US/Eastern", unit=unit\n )\n rng2 = DatetimeIndex(\n ["3/11/2012 03:00", "3/11/2012 04:00"], dtype=f"M8[{unit}, US/Eastern]"\n )\n rng3 = date_range("3/11/2012 03:00", periods=15, freq="h", unit=unit)\n rng3 = rng3.tz_localize("US/Eastern")\n\n tm.assert_index_equal(rng._with_freq(None), rng3)\n\n # DST transition time\n val = rng[0]\n exp = Timestamp("3/11/2012 03:00", tz="US/Eastern")\n\n assert val.hour == 3\n assert exp.hour == 3\n assert val == exp # same UTC value\n tm.assert_index_equal(rng[:2], rng2)\n\n def test_date_range_localize2(self, unit):\n # Right before the DST transition\n rng = date_range(\n "3/11/2012 00:00", periods=2, freq="h", tz="US/Eastern", unit=unit\n )\n rng2 = DatetimeIndex(\n ["3/11/2012 00:00", "3/11/2012 01:00"],\n dtype=f"M8[{unit}, US/Eastern]",\n freq="h",\n )\n tm.assert_index_equal(rng, rng2)\n exp = Timestamp("3/11/2012 00:00", tz="US/Eastern")\n assert exp.hour == 0\n assert rng[0] == exp\n exp = Timestamp("3/11/2012 01:00", tz="US/Eastern")\n assert exp.hour == 1\n assert rng[1] == exp\n\n rng = date_range(\n "3/11/2012 00:00", periods=10, freq="h", tz="US/Eastern", unit=unit\n )\n assert rng[2].hour == 3\n\n def test_timestamp_equality_different_timezones(self):\n utc_range = date_range("1/1/2000", periods=20, tz="UTC")\n eastern_range = utc_range.tz_convert("US/Eastern")\n berlin_range = utc_range.tz_convert("Europe/Berlin")\n\n for a, b, c in zip(utc_range, eastern_range, berlin_range):\n assert a == b\n assert b == c\n assert a == c\n\n assert (utc_range == eastern_range).all()\n assert (utc_range == berlin_range).all()\n assert (berlin_range == eastern_range).all()\n\n def test_dti_equals_with_tz(self):\n left = date_range("1/1/2011", periods=100, freq="h", tz="utc")\n right = date_range("1/1/2011", periods=100, freq="h", tz="US/Eastern")\n\n assert not left.equals(right)\n\n @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])\n def test_dti_tz_nat(self, tzstr):\n idx = DatetimeIndex([Timestamp("2013-1-1", tz=tzstr), pd.NaT])\n\n assert isna(idx[1])\n assert idx[0].tzinfo is not None\n\n @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])\n def test_utc_box_timestamp_and_localize(self, tzstr):\n tz = timezones.maybe_get_tz(tzstr)\n\n rng = date_range("3/11/2012", "3/12/2012", freq="h", tz="utc")\n rng_eastern = rng.tz_convert(tzstr)\n\n expected = rng[-1].astimezone(tz)\n\n stamp = rng_eastern[-1]\n assert stamp == expected\n assert stamp.tzinfo == expected.tzinfo\n\n # right tzinfo\n rng = date_range("3/13/2012", "3/14/2012", freq="h", tz="utc")\n rng_eastern = rng.tz_convert(tzstr)\n # test not valid for dateutil timezones.\n # assert 'EDT' in repr(rng_eastern[0].tzinfo)\n assert "EDT" in repr(rng_eastern[0].tzinfo) or "tzfile" in repr(\n rng_eastern[0].tzinfo\n )\n\n @pytest.mark.parametrize("tz", [pytz.timezone("US/Central"), gettz("US/Central")])\n def test_with_tz(self, tz):\n # just want it to work\n start = datetime(2011, 3, 12, tzinfo=pytz.utc)\n dr = bdate_range(start, periods=50, freq=pd.offsets.Hour())\n assert dr.tz is pytz.utc\n\n # DateRange with naive datetimes\n dr = bdate_range("1/1/2005", "1/1/2009", tz=pytz.utc)\n dr = bdate_range("1/1/2005", "1/1/2009", tz=tz)\n\n # normalized\n central = dr.tz_convert(tz)\n assert central.tz is tz\n naive = central[0].to_pydatetime().replace(tzinfo=None)\n comp = conversion.localize_pydatetime(naive, tz).tzinfo\n assert central[0].tz is comp\n\n # compare vs a localized tz\n naive = dr[0].to_pydatetime().replace(tzinfo=None)\n comp = conversion.localize_pydatetime(naive, tz).tzinfo\n assert central[0].tz is comp\n\n # datetimes with tzinfo set\n dr = bdate_range(\n datetime(2005, 1, 1, tzinfo=pytz.utc), datetime(2009, 1, 1, tzinfo=pytz.utc)\n )\n msg = "Start and end cannot both be tz-aware with different timezones"\n with pytest.raises(Exception, match=msg):\n bdate_range(datetime(2005, 1, 1, tzinfo=pytz.utc), "1/1/2009", tz=tz)\n\n @pytest.mark.parametrize("tz", [pytz.timezone("US/Eastern"), gettz("US/Eastern")])\n def test_dti_convert_tz_aware_datetime_datetime(self, tz):\n # GH#1581\n dates = [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)]\n\n dates_aware = [conversion.localize_pydatetime(x, tz) for x in dates]\n result = DatetimeIndex(dates_aware).as_unit("ns")\n assert timezones.tz_compare(result.tz, tz)\n\n converted = to_datetime(dates_aware, utc=True).as_unit("ns")\n ex_vals = np.array([Timestamp(x).as_unit("ns")._value for x in dates_aware])\n tm.assert_numpy_array_equal(converted.asi8, ex_vals)\n assert converted.tz is timezone.utc\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\test_timezones.py | test_timezones.py | Python | 8,046 | 0.95 | 0.087649 | 0.07767 | python-kit | 261 | 2023-10-26T00:17:51.849062 | Apache-2.0 | true | d3e92a7bf06304168e74979d6860418a |
from datetime import timedelta\n\nfrom pandas import (\n Index,\n Timestamp,\n date_range,\n isna,\n)\n\n\nclass TestAsOf:\n def test_asof_partial(self):\n index = date_range("2010-01-01", periods=2, freq="ME")\n expected = Timestamp("2010-02-28")\n result = index.asof("2010-02")\n assert result == expected\n assert not isinstance(result, Index)\n\n def test_asof(self):\n index = date_range("2020-01-01", periods=10)\n\n dt = index[0]\n assert index.asof(dt) == dt\n assert isna(index.asof(dt - timedelta(1)))\n\n dt = index[-1]\n assert index.asof(dt + timedelta(1)) == dt\n\n dt = index[0].to_pydatetime()\n assert isinstance(index.asof(dt), Timestamp)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_asof.py | test_asof.py | Python | 738 | 0.85 | 0.1 | 0 | node-utils | 395 | 2023-08-03T22:07:06.947148 | GPL-3.0 | true | c1efc9eed6bf93ba24dd53319aaa5789 |
from datetime import datetime\n\nimport dateutil\nimport numpy as np\nimport pytest\nimport pytz\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n Index,\n NaT,\n PeriodIndex,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDatetimeIndex:\n @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"])\n def test_dti_astype_asobject_around_dst_transition(self, tzstr):\n # GH#1345\n\n # dates around a dst transition\n rng = date_range("2/13/2010", "5/6/2010", tz=tzstr)\n\n objs = rng.astype(object)\n for i, x in enumerate(objs):\n exval = rng[i]\n assert x == exval\n assert x.tzinfo == exval.tzinfo\n\n objs = rng.astype(object)\n for i, x in enumerate(objs):\n exval = rng[i]\n assert x == exval\n assert x.tzinfo == exval.tzinfo\n\n def test_astype(self):\n # GH 13149, GH 13209\n idx = DatetimeIndex(\n ["2016-05-16", "NaT", NaT, np.nan], dtype="M8[ns]", name="idx"\n )\n\n result = idx.astype(object)\n expected = Index(\n [Timestamp("2016-05-16")] + [NaT] * 3, dtype=object, name="idx"\n )\n tm.assert_index_equal(result, expected)\n\n result = idx.astype(np.int64)\n expected = Index(\n [1463356800000000000] + [-9223372036854775808] * 3,\n dtype=np.int64,\n name="idx",\n )\n tm.assert_index_equal(result, expected)\n\n def test_astype2(self):\n rng = date_range("1/1/2000", periods=10, name="idx")\n result = rng.astype("i8")\n tm.assert_index_equal(result, Index(rng.asi8, name="idx"))\n tm.assert_numpy_array_equal(result.values, rng.asi8)\n\n def test_astype_uint(self):\n arr = date_range("2000", periods=2, name="idx")\n\n with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):\n arr.astype("uint64")\n with pytest.raises(TypeError, match=r"Do obj.astype\('int64'\)"):\n arr.astype("uint32")\n\n def test_astype_with_tz(self):\n # with tz\n rng = date_range("1/1/2000", periods=10, tz="US/Eastern")\n msg = "Cannot use .astype to convert from timezone-aware"\n with pytest.raises(TypeError, match=msg):\n # deprecated\n rng.astype("datetime64[ns]")\n with pytest.raises(TypeError, match=msg):\n # check DatetimeArray while we're here deprecated\n rng._data.astype("datetime64[ns]")\n\n def test_astype_tzaware_to_tzaware(self):\n # GH 18951: tz-aware to tz-aware\n idx = date_range("20170101", periods=4, tz="US/Pacific")\n result = idx.astype("datetime64[ns, US/Eastern]")\n expected = date_range("20170101 03:00:00", periods=4, tz="US/Eastern")\n tm.assert_index_equal(result, expected)\n assert result.freq == expected.freq\n\n def test_astype_tznaive_to_tzaware(self):\n # GH 18951: tz-naive to tz-aware\n idx = date_range("20170101", periods=4)\n idx = idx._with_freq(None) # tz_localize does not preserve freq\n msg = "Cannot use .astype to convert from timezone-naive"\n with pytest.raises(TypeError, match=msg):\n # dt64->dt64tz deprecated\n idx.astype("datetime64[ns, US/Eastern]")\n with pytest.raises(TypeError, match=msg):\n # dt64->dt64tz deprecated\n idx._data.astype("datetime64[ns, US/Eastern]")\n\n def test_astype_str_nat(self, using_infer_string):\n # GH 13149, GH 13209\n # verify that we are returning NaT as a string (and not unicode)\n\n idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan])\n result = idx.astype(str)\n if using_infer_string:\n expected = Index(["2016-05-16", None, None, None], dtype="str")\n else:\n expected = Index(["2016-05-16", "NaT", "NaT", "NaT"], dtype=object)\n tm.assert_index_equal(result, expected)\n\n def test_astype_str(self):\n # test astype string - #10442\n dti = date_range("2012-01-01", periods=4, name="test_name")\n result = dti.astype(str)\n expected = Index(\n ["2012-01-01", "2012-01-02", "2012-01-03", "2012-01-04"],\n name="test_name",\n dtype="str",\n )\n tm.assert_index_equal(result, expected)\n\n def test_astype_str_tz_and_name(self):\n # test astype string with tz and name\n dti = date_range("2012-01-01", periods=3, name="test_name", tz="US/Eastern")\n result = dti.astype(str)\n expected = Index(\n [\n "2012-01-01 00:00:00-05:00",\n "2012-01-02 00:00:00-05:00",\n "2012-01-03 00:00:00-05:00",\n ],\n name="test_name",\n dtype="str",\n )\n tm.assert_index_equal(result, expected)\n\n def test_astype_str_freq_and_name(self):\n # test astype string with freqH and name\n dti = date_range("1/1/2011", periods=3, freq="h", name="test_name")\n result = dti.astype(str)\n expected = Index(\n ["2011-01-01 00:00:00", "2011-01-01 01:00:00", "2011-01-01 02:00:00"],\n name="test_name",\n dtype="str",\n )\n tm.assert_index_equal(result, expected)\n\n def test_astype_str_freq_and_tz(self):\n # test astype string with freqH and timezone\n dti = date_range(\n "3/6/2012 00:00", periods=2, freq="h", tz="Europe/London", name="test_name"\n )\n result = dti.astype(str)\n expected = Index(\n ["2012-03-06 00:00:00+00:00", "2012-03-06 01:00:00+00:00"],\n dtype="str",\n name="test_name",\n )\n tm.assert_index_equal(result, expected)\n\n def test_astype_datetime64(self):\n # GH 13149, GH 13209\n idx = DatetimeIndex(\n ["2016-05-16", "NaT", NaT, np.nan], dtype="M8[ns]", name="idx"\n )\n\n result = idx.astype("datetime64[ns]")\n tm.assert_index_equal(result, idx)\n assert result is not idx\n\n result = idx.astype("datetime64[ns]", copy=False)\n tm.assert_index_equal(result, idx)\n assert result is idx\n\n idx_tz = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan], tz="EST", name="idx")\n msg = "Cannot use .astype to convert from timezone-aware"\n with pytest.raises(TypeError, match=msg):\n # dt64tz->dt64 deprecated\n result = idx_tz.astype("datetime64[ns]")\n\n def test_astype_object(self):\n rng = date_range("1/1/2000", periods=20)\n\n casted = rng.astype("O")\n exp_values = list(rng)\n\n tm.assert_index_equal(casted, Index(exp_values, dtype=np.object_))\n assert casted.tolist() == exp_values\n\n @pytest.mark.parametrize("tz", [None, "Asia/Tokyo"])\n def test_astype_object_tz(self, tz):\n idx = date_range(start="2013-01-01", periods=4, freq="ME", name="idx", tz=tz)\n expected_list = [\n Timestamp("2013-01-31", tz=tz),\n Timestamp("2013-02-28", tz=tz),\n Timestamp("2013-03-31", tz=tz),\n Timestamp("2013-04-30", tz=tz),\n ]\n expected = Index(expected_list, dtype=object, name="idx")\n result = idx.astype(object)\n tm.assert_index_equal(result, expected)\n assert idx.tolist() == expected_list\n\n def test_astype_object_with_nat(self):\n idx = DatetimeIndex(\n [datetime(2013, 1, 1), datetime(2013, 1, 2), NaT, datetime(2013, 1, 4)],\n name="idx",\n )\n expected_list = [\n Timestamp("2013-01-01"),\n Timestamp("2013-01-02"),\n NaT,\n Timestamp("2013-01-04"),\n ]\n expected = Index(expected_list, dtype=object, name="idx")\n result = idx.astype(object)\n tm.assert_index_equal(result, expected)\n assert idx.tolist() == expected_list\n\n @pytest.mark.parametrize(\n "dtype",\n [float, "timedelta64", "timedelta64[ns]", "datetime64", "datetime64[D]"],\n )\n def test_astype_raises(self, dtype):\n # GH 13149, GH 13209\n idx = DatetimeIndex(["2016-05-16", "NaT", NaT, np.nan])\n msg = "Cannot cast DatetimeIndex to dtype"\n if dtype == "datetime64":\n msg = "Casting to unit-less dtype 'datetime64' is not supported"\n with pytest.raises(TypeError, match=msg):\n idx.astype(dtype)\n\n def test_index_convert_to_datetime_array(self):\n def _check_rng(rng):\n converted = rng.to_pydatetime()\n assert isinstance(converted, np.ndarray)\n for x, stamp in zip(converted, rng):\n assert isinstance(x, datetime)\n assert x == stamp.to_pydatetime()\n assert x.tzinfo == stamp.tzinfo\n\n rng = date_range("20090415", "20090519")\n rng_eastern = date_range("20090415", "20090519", tz="US/Eastern")\n rng_utc = date_range("20090415", "20090519", tz="utc")\n\n _check_rng(rng)\n _check_rng(rng_eastern)\n _check_rng(rng_utc)\n\n def test_index_convert_to_datetime_array_explicit_pytz(self):\n def _check_rng(rng):\n converted = rng.to_pydatetime()\n assert isinstance(converted, np.ndarray)\n for x, stamp in zip(converted, rng):\n assert isinstance(x, datetime)\n assert x == stamp.to_pydatetime()\n assert x.tzinfo == stamp.tzinfo\n\n rng = date_range("20090415", "20090519")\n rng_eastern = date_range("20090415", "20090519", tz=pytz.timezone("US/Eastern"))\n rng_utc = date_range("20090415", "20090519", tz=pytz.utc)\n\n _check_rng(rng)\n _check_rng(rng_eastern)\n _check_rng(rng_utc)\n\n def test_index_convert_to_datetime_array_dateutil(self):\n def _check_rng(rng):\n converted = rng.to_pydatetime()\n assert isinstance(converted, np.ndarray)\n for x, stamp in zip(converted, rng):\n assert isinstance(x, datetime)\n assert x == stamp.to_pydatetime()\n assert x.tzinfo == stamp.tzinfo\n\n rng = date_range("20090415", "20090519")\n rng_eastern = date_range("20090415", "20090519", tz="dateutil/US/Eastern")\n rng_utc = date_range("20090415", "20090519", tz=dateutil.tz.tzutc())\n\n _check_rng(rng)\n _check_rng(rng_eastern)\n _check_rng(rng_utc)\n\n @pytest.mark.parametrize(\n "tz, dtype",\n [["US/Pacific", "datetime64[ns, US/Pacific]"], [None, "datetime64[ns]"]],\n )\n def test_integer_index_astype_datetime(self, tz, dtype):\n # GH 20997, 20964, 24559\n val = [Timestamp("2018-01-01", tz=tz).as_unit("ns")._value]\n result = Index(val, name="idx").astype(dtype)\n expected = DatetimeIndex(["2018-01-01"], tz=tz, name="idx").as_unit("ns")\n tm.assert_index_equal(result, expected)\n\n def test_dti_astype_period(self):\n idx = DatetimeIndex([NaT, "2011-01-01", "2011-02-01"], name="idx")\n\n res = idx.astype("period[M]")\n exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="M", name="idx")\n tm.assert_index_equal(res, exp)\n\n res = idx.astype("period[3M]")\n exp = PeriodIndex(["NaT", "2011-01", "2011-02"], freq="3M", name="idx")\n tm.assert_index_equal(res, exp)\n\n\nclass TestAstype:\n @pytest.mark.parametrize("tz", [None, "US/Central"])\n def test_astype_category(self, tz):\n obj = date_range("2000", periods=2, tz=tz, name="idx")\n result = obj.astype("category")\n dti = DatetimeIndex(["2000-01-01", "2000-01-02"], tz=tz).as_unit("ns")\n expected = pd.CategoricalIndex(\n dti,\n name="idx",\n )\n tm.assert_index_equal(result, expected)\n\n result = obj._data.astype("category")\n expected = expected.values\n tm.assert_categorical_equal(result, expected)\n\n @pytest.mark.parametrize("tz", [None, "US/Central"])\n def test_astype_array_fallback(self, tz):\n obj = date_range("2000", periods=2, tz=tz, name="idx")\n result = obj.astype(bool)\n expected = Index(np.array([True, True]), name="idx")\n tm.assert_index_equal(result, expected)\n\n result = obj._data.astype(bool)\n expected = np.array([True, True])\n tm.assert_numpy_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_astype.py | test_astype.py | Python | 12,342 | 0.95 | 0.109467 | 0.069444 | node-utils | 0 | 2025-07-08T15:05:26.900575 | GPL-3.0 | true | e1b931bae5765d9ca2494ed22424fbc6 |
import pytest\n\nfrom pandas import (\n DatetimeIndex,\n Series,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDelete:\n def test_delete(self, unit):\n idx = date_range(\n start="2000-01-01", periods=5, freq="ME", name="idx", unit=unit\n )\n\n # preserve freq\n expected_0 = date_range(\n start="2000-02-01", periods=4, freq="ME", name="idx", unit=unit\n )\n expected_4 = date_range(\n start="2000-01-01", periods=4, freq="ME", name="idx", unit=unit\n )\n\n # reset freq to None\n expected_1 = DatetimeIndex(\n ["2000-01-31", "2000-03-31", "2000-04-30", "2000-05-31"],\n freq=None,\n name="idx",\n ).as_unit(unit)\n\n cases = {\n 0: expected_0,\n -5: expected_0,\n -1: expected_4,\n 4: expected_4,\n 1: expected_1,\n }\n for n, expected in cases.items():\n result = idx.delete(n)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n with pytest.raises((IndexError, ValueError), match="out of bounds"):\n # either depending on numpy version\n idx.delete(5)\n\n @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Pacific"])\n def test_delete2(self, tz):\n idx = date_range(\n start="2000-01-01 09:00", periods=10, freq="h", name="idx", tz=tz\n )\n\n expected = date_range(\n start="2000-01-01 10:00", periods=9, freq="h", name="idx", tz=tz\n )\n result = idx.delete(0)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freqstr == "h"\n assert result.tz == expected.tz\n\n expected = date_range(\n start="2000-01-01 09:00", periods=9, freq="h", name="idx", tz=tz\n )\n result = idx.delete(-1)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freqstr == "h"\n assert result.tz == expected.tz\n\n def test_delete_slice(self, unit):\n idx = date_range(\n start="2000-01-01", periods=10, freq="D", name="idx", unit=unit\n )\n\n # preserve freq\n expected_0_2 = date_range(\n start="2000-01-04", periods=7, freq="D", name="idx", unit=unit\n )\n expected_7_9 = date_range(\n start="2000-01-01", periods=7, freq="D", name="idx", unit=unit\n )\n\n # reset freq to None\n expected_3_5 = DatetimeIndex(\n [\n "2000-01-01",\n "2000-01-02",\n "2000-01-03",\n "2000-01-07",\n "2000-01-08",\n "2000-01-09",\n "2000-01-10",\n ],\n freq=None,\n name="idx",\n ).as_unit(unit)\n\n cases = {\n (0, 1, 2): expected_0_2,\n (7, 8, 9): expected_7_9,\n (3, 4, 5): expected_3_5,\n }\n for n, expected in cases.items():\n result = idx.delete(n)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n result = idx.delete(slice(n[0], n[-1] + 1))\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n # TODO: belongs in Series.drop tests?\n @pytest.mark.parametrize("tz", [None, "Asia/Tokyo", "US/Pacific"])\n def test_delete_slice2(self, tz, unit):\n dti = date_range(\n "2000-01-01 09:00", periods=10, freq="h", name="idx", tz=tz, unit=unit\n )\n ts = Series(\n 1,\n index=dti,\n )\n # preserve freq\n result = ts.drop(ts.index[:5]).index\n expected = dti[5:]\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n assert result.tz == expected.tz\n\n # reset freq to None\n result = ts.drop(ts.index[[1, 3, 5, 7, 9]]).index\n expected = dti[::2]._with_freq(None)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n assert result.tz == expected.tz\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_delete.py | test_delete.py | Python | 4,441 | 0.95 | 0.049645 | 0.064516 | awesome-app | 2 | 2024-07-02T17:30:02.444592 | GPL-3.0 | true | 9f7981e941c12d40a56c8e7b8ccec090 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DatetimeIndex,\n Index,\n date_range,\n factorize,\n)\nimport pandas._testing as tm\n\n\nclass TestDatetimeIndexFactorize:\n def test_factorize(self):\n idx1 = DatetimeIndex(\n ["2014-01", "2014-01", "2014-02", "2014-02", "2014-03", "2014-03"]\n )\n\n exp_arr = np.array([0, 0, 1, 1, 2, 2], dtype=np.intp)\n exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"])\n\n arr, idx = idx1.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n assert idx.freq == exp_idx.freq\n\n arr, idx = idx1.factorize(sort=True)\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n assert idx.freq == exp_idx.freq\n\n # tz must be preserved\n idx1 = idx1.tz_localize("Asia/Tokyo")\n exp_idx = exp_idx.tz_localize("Asia/Tokyo")\n\n arr, idx = idx1.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n assert idx.freq == exp_idx.freq\n\n idx2 = DatetimeIndex(\n ["2014-03", "2014-03", "2014-02", "2014-01", "2014-03", "2014-01"]\n )\n\n exp_arr = np.array([2, 2, 1, 0, 2, 0], dtype=np.intp)\n exp_idx = DatetimeIndex(["2014-01", "2014-02", "2014-03"])\n arr, idx = idx2.factorize(sort=True)\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n assert idx.freq == exp_idx.freq\n\n exp_arr = np.array([0, 0, 1, 2, 0, 2], dtype=np.intp)\n exp_idx = DatetimeIndex(["2014-03", "2014-02", "2014-01"])\n arr, idx = idx2.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, exp_idx)\n assert idx.freq == exp_idx.freq\n\n def test_factorize_preserves_freq(self):\n # GH#38120 freq should be preserved\n idx3 = date_range("2000-01", periods=4, freq="ME", tz="Asia/Tokyo")\n exp_arr = np.array([0, 1, 2, 3], dtype=np.intp)\n\n arr, idx = idx3.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, idx3)\n assert idx.freq == idx3.freq\n\n arr, idx = factorize(idx3)\n tm.assert_numpy_array_equal(arr, exp_arr)\n tm.assert_index_equal(idx, idx3)\n assert idx.freq == idx3.freq\n\n def test_factorize_tz(self, tz_naive_fixture, index_or_series):\n tz = tz_naive_fixture\n # GH#13750\n base = date_range("2016-11-05", freq="h", periods=100, tz=tz)\n idx = base.repeat(5)\n\n exp_arr = np.arange(100, dtype=np.intp).repeat(5)\n\n obj = index_or_series(idx)\n\n arr, res = obj.factorize()\n tm.assert_numpy_array_equal(arr, exp_arr)\n expected = base._with_freq(None)\n tm.assert_index_equal(res, expected)\n assert res.freq == expected.freq\n\n def test_factorize_dst(self, index_or_series):\n # GH#13750\n idx = date_range("2016-11-06", freq="h", periods=12, tz="US/Eastern")\n obj = index_or_series(idx)\n\n arr, res = obj.factorize()\n tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))\n tm.assert_index_equal(res, idx)\n if index_or_series is Index:\n assert res.freq == idx.freq\n\n idx = date_range("2016-06-13", freq="h", periods=12, tz="US/Eastern")\n obj = index_or_series(idx)\n\n arr, res = obj.factorize()\n tm.assert_numpy_array_equal(arr, np.arange(12, dtype=np.intp))\n tm.assert_index_equal(res, idx)\n if index_or_series is Index:\n assert res.freq == idx.freq\n\n @pytest.mark.parametrize("sort", [True, False])\n def test_factorize_no_freq_non_nano(self, tz_naive_fixture, sort):\n # GH#51978 case that does not go through the fastpath based on\n # non-None freq\n tz = tz_naive_fixture\n idx = date_range("2016-11-06", freq="h", periods=5, tz=tz)[[0, 4, 1, 3, 2]]\n exp_codes, exp_uniques = idx.factorize(sort=sort)\n\n res_codes, res_uniques = idx.as_unit("s").factorize(sort=sort)\n\n tm.assert_numpy_array_equal(res_codes, exp_codes)\n tm.assert_index_equal(res_uniques, exp_uniques.as_unit("s"))\n\n res_codes, res_uniques = idx.as_unit("s").to_series().factorize(sort=sort)\n tm.assert_numpy_array_equal(res_codes, exp_codes)\n tm.assert_index_equal(res_uniques, exp_uniques.as_unit("s"))\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_factorize.py | test_factorize.py | Python | 4,468 | 0.95 | 0.064 | 0.060606 | node-utils | 729 | 2024-07-14T14:48:45.045630 | Apache-2.0 | true | 811b75e32e29e15197f5dd3b0e8c1d89 |
import pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass TestDatetimeIndexFillNA:\n @pytest.mark.parametrize("tz", ["US/Eastern", "Asia/Tokyo"])\n def test_fillna_datetime64(self, tz):\n # GH 11343\n idx = pd.DatetimeIndex(["2011-01-01 09:00", pd.NaT, "2011-01-01 11:00"])\n\n exp = pd.DatetimeIndex(\n ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"]\n )\n tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00")), exp)\n\n # tz mismatch\n exp = pd.Index(\n [\n pd.Timestamp("2011-01-01 09:00"),\n pd.Timestamp("2011-01-01 10:00", tz=tz),\n pd.Timestamp("2011-01-01 11:00"),\n ],\n dtype=object,\n )\n tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00", tz=tz)), exp)\n\n # object\n exp = pd.Index(\n [pd.Timestamp("2011-01-01 09:00"), "x", pd.Timestamp("2011-01-01 11:00")],\n dtype=object,\n )\n tm.assert_index_equal(idx.fillna("x"), exp)\n\n idx = pd.DatetimeIndex(["2011-01-01 09:00", pd.NaT, "2011-01-01 11:00"], tz=tz)\n\n exp = pd.DatetimeIndex(\n ["2011-01-01 09:00", "2011-01-01 10:00", "2011-01-01 11:00"], tz=tz\n )\n tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00", tz=tz)), exp)\n\n exp = pd.Index(\n [\n pd.Timestamp("2011-01-01 09:00", tz=tz),\n pd.Timestamp("2011-01-01 10:00"),\n pd.Timestamp("2011-01-01 11:00", tz=tz),\n ],\n dtype=object,\n )\n tm.assert_index_equal(idx.fillna(pd.Timestamp("2011-01-01 10:00")), exp)\n\n # object\n exp = pd.Index(\n [\n pd.Timestamp("2011-01-01 09:00", tz=tz),\n "x",\n pd.Timestamp("2011-01-01 11:00", tz=tz),\n ],\n dtype=object,\n )\n tm.assert_index_equal(idx.fillna("x"), exp)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_fillna.py | test_fillna.py | Python | 2,004 | 0.95 | 0.032258 | 0.076923 | vue-tools | 925 | 2024-05-31T10:44:00.755914 | MIT | true | af5d95628361fdc4e631a44e81f0fddf |
from datetime import datetime\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas import (\n NA,\n DatetimeIndex,\n Index,\n NaT,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestInsert:\n @pytest.mark.parametrize("null", [None, np.nan, np.datetime64("NaT"), NaT, NA])\n @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"])\n def test_insert_nat(self, tz, null):\n # GH#16537, GH#18295 (test missing)\n\n idx = DatetimeIndex(["2017-01-01"], tz=tz)\n expected = DatetimeIndex(["NaT", "2017-01-01"], tz=tz)\n if tz is not None and isinstance(null, np.datetime64):\n expected = Index([null, idx[0]], dtype=object)\n\n res = idx.insert(0, null)\n tm.assert_index_equal(res, expected)\n\n @pytest.mark.parametrize("tz", [None, "UTC", "US/Eastern"])\n def test_insert_invalid_na(self, tz):\n idx = DatetimeIndex(["2017-01-01"], tz=tz)\n\n item = np.timedelta64("NaT")\n result = idx.insert(0, item)\n expected = Index([item] + list(idx), dtype=object)\n tm.assert_index_equal(result, expected)\n\n def test_insert_empty_preserves_freq(self, tz_naive_fixture):\n # GH#33573\n tz = tz_naive_fixture\n dti = DatetimeIndex([], tz=tz, freq="D")\n item = Timestamp("2017-04-05").tz_localize(tz)\n\n result = dti.insert(0, item)\n assert result.freq == dti.freq\n\n # But not when we insert an item that doesn't conform to freq\n dti = DatetimeIndex([], tz=tz, freq="W-THU")\n result = dti.insert(0, item)\n assert result.freq is None\n\n def test_insert(self, unit):\n idx = DatetimeIndex(\n ["2000-01-04", "2000-01-01", "2000-01-02"], name="idx"\n ).as_unit(unit)\n\n result = idx.insert(2, datetime(2000, 1, 5))\n exp = DatetimeIndex(\n ["2000-01-04", "2000-01-01", "2000-01-05", "2000-01-02"], name="idx"\n ).as_unit(unit)\n tm.assert_index_equal(result, exp)\n\n # insertion of non-datetime should coerce to object index\n result = idx.insert(1, "inserted")\n expected = Index(\n [\n datetime(2000, 1, 4),\n "inserted",\n datetime(2000, 1, 1),\n datetime(2000, 1, 2),\n ],\n name="idx",\n )\n assert not isinstance(result, DatetimeIndex)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n\n def test_insert2(self, unit):\n idx = date_range("1/1/2000", periods=3, freq="ME", name="idx", unit=unit)\n\n # preserve freq\n expected_0 = DatetimeIndex(\n ["1999-12-31", "2000-01-31", "2000-02-29", "2000-03-31"],\n name="idx",\n freq="ME",\n ).as_unit(unit)\n expected_3 = DatetimeIndex(\n ["2000-01-31", "2000-02-29", "2000-03-31", "2000-04-30"],\n name="idx",\n freq="ME",\n ).as_unit(unit)\n\n # reset freq to None\n expected_1_nofreq = DatetimeIndex(\n ["2000-01-31", "2000-01-31", "2000-02-29", "2000-03-31"],\n name="idx",\n freq=None,\n ).as_unit(unit)\n expected_3_nofreq = DatetimeIndex(\n ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"],\n name="idx",\n freq=None,\n ).as_unit(unit)\n\n cases = [\n (0, datetime(1999, 12, 31), expected_0),\n (-3, datetime(1999, 12, 31), expected_0),\n (3, datetime(2000, 4, 30), expected_3),\n (1, datetime(2000, 1, 31), expected_1_nofreq),\n (3, datetime(2000, 1, 2), expected_3_nofreq),\n ]\n\n for n, d, expected in cases:\n result = idx.insert(n, d)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n\n def test_insert3(self, unit):\n idx = date_range("1/1/2000", periods=3, freq="ME", name="idx", unit=unit)\n\n # reset freq to None\n result = idx.insert(3, datetime(2000, 1, 2))\n expected = DatetimeIndex(\n ["2000-01-31", "2000-02-29", "2000-03-31", "2000-01-02"],\n name="idx",\n freq=None,\n ).as_unit(unit)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq is None\n\n def test_insert4(self, unit):\n for tz in ["US/Pacific", "Asia/Singapore"]:\n idx = date_range(\n "1/1/2000 09:00", periods=6, freq="h", tz=tz, name="idx", unit=unit\n )\n # preserve freq\n expected = date_range(\n "1/1/2000 09:00", periods=7, freq="h", tz=tz, name="idx", unit=unit\n )\n for d in [\n Timestamp("2000-01-01 15:00", tz=tz),\n pytz.timezone(tz).localize(datetime(2000, 1, 1, 15)),\n ]:\n result = idx.insert(6, d)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.freq == expected.freq\n assert result.tz == expected.tz\n\n expected = DatetimeIndex(\n [\n "2000-01-01 09:00",\n "2000-01-01 10:00",\n "2000-01-01 11:00",\n "2000-01-01 12:00",\n "2000-01-01 13:00",\n "2000-01-01 14:00",\n "2000-01-01 10:00",\n ],\n name="idx",\n tz=tz,\n freq=None,\n ).as_unit(unit)\n # reset freq to None\n for d in [\n Timestamp("2000-01-01 10:00", tz=tz),\n pytz.timezone(tz).localize(datetime(2000, 1, 1, 10)),\n ]:\n result = idx.insert(6, d)\n tm.assert_index_equal(result, expected)\n assert result.name == expected.name\n assert result.tz == expected.tz\n assert result.freq is None\n\n # TODO: also changes DataFrame.__setitem__ with expansion\n def test_insert_mismatched_tzawareness(self):\n # see GH#7299\n idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx")\n\n # mismatched tz-awareness\n item = Timestamp("2000-01-04")\n result = idx.insert(3, item)\n expected = Index(\n list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx"\n )\n tm.assert_index_equal(result, expected)\n\n # mismatched tz-awareness\n item = datetime(2000, 1, 4)\n result = idx.insert(3, item)\n expected = Index(\n list(idx[:3]) + [item] + list(idx[3:]), dtype=object, name="idx"\n )\n tm.assert_index_equal(result, expected)\n\n # TODO: also changes DataFrame.__setitem__ with expansion\n def test_insert_mismatched_tz(self):\n # see GH#7299\n # pre-2.0 with mismatched tzs we would cast to object\n idx = date_range("1/1/2000", periods=3, freq="D", tz="Asia/Tokyo", name="idx")\n\n # mismatched tz -> cast to object (could reasonably cast to same tz or UTC)\n item = Timestamp("2000-01-04", tz="US/Eastern")\n result = idx.insert(3, item)\n expected = Index(\n list(idx[:3]) + [item.tz_convert(idx.tz)] + list(idx[3:]),\n name="idx",\n )\n assert expected.dtype == idx.dtype\n tm.assert_index_equal(result, expected)\n\n item = datetime(2000, 1, 4, tzinfo=pytz.timezone("US/Eastern"))\n result = idx.insert(3, item)\n expected = Index(\n list(idx[:3]) + [item.astimezone(idx.tzinfo)] + list(idx[3:]),\n name="idx",\n )\n assert expected.dtype == idx.dtype\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize(\n "item", [0, np.int64(0), np.float64(0), np.array(0), np.timedelta64(456)]\n )\n def test_insert_mismatched_types_raises(self, tz_aware_fixture, item):\n # GH#33703 dont cast these to dt64\n tz = tz_aware_fixture\n dti = date_range("2019-11-04", periods=9, freq="-1D", name=9, tz=tz)\n\n result = dti.insert(1, item)\n\n if isinstance(item, np.ndarray):\n assert item.item() == 0\n expected = Index([dti[0], 0] + list(dti[1:]), dtype=object, name=9)\n else:\n expected = Index([dti[0], item] + list(dti[1:]), dtype=object, name=9)\n\n tm.assert_index_equal(result, expected)\n\n def test_insert_castable_str(self, tz_aware_fixture):\n # GH#33703\n tz = tz_aware_fixture\n dti = date_range("2019-11-04", periods=3, freq="-1D", name=9, tz=tz)\n\n value = "2019-11-05"\n result = dti.insert(0, value)\n\n ts = Timestamp(value).tz_localize(tz)\n expected = DatetimeIndex([ts] + list(dti), dtype=dti.dtype, name=9)\n tm.assert_index_equal(result, expected)\n\n def test_insert_non_castable_str(self, tz_aware_fixture):\n # GH#33703\n tz = tz_aware_fixture\n dti = date_range("2019-11-04", periods=3, freq="-1D", name=9, tz=tz)\n\n value = "foo"\n result = dti.insert(0, value)\n\n expected = Index(["foo"] + list(dti), dtype=object, name=9)\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_insert.py | test_insert.py | Python | 9,343 | 0.95 | 0.071698 | 0.088496 | vue-tools | 696 | 2024-11-11T10:08:39.330462 | MIT | true | 35842ec0088c81ad4e2c633043620619 |
from pandas import (\n DataFrame,\n DatetimeIndex,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef test_isocalendar_returns_correct_values_close_to_new_year_with_tz():\n # GH#6538: Check that DatetimeIndex and its TimeStamp elements\n # return the same weekofyear accessor close to new year w/ tz\n dates = ["2013/12/29", "2013/12/30", "2013/12/31"]\n dates = DatetimeIndex(dates, tz="Europe/Brussels")\n result = dates.isocalendar()\n expected_data_frame = DataFrame(\n [[2013, 52, 7], [2014, 1, 1], [2014, 1, 2]],\n columns=["year", "week", "day"],\n index=dates,\n dtype="UInt32",\n )\n tm.assert_frame_equal(result, expected_data_frame)\n\n\ndef test_dti_timestamp_isocalendar_fields():\n idx = date_range("2020-01-01", periods=10)\n expected = tuple(idx.isocalendar().iloc[-1].to_list())\n result = idx[-1].isocalendar()\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_isocalendar.py | test_isocalendar.py | Python | 908 | 0.95 | 0.071429 | 0.083333 | awesome-app | 513 | 2024-08-07T13:53:31.678083 | MIT | true | 3cc7ce1aeef4b65f237b9bc72f7a4457 |
import pytest\n\nfrom pandas import (\n DatetimeIndex,\n Index,\n MultiIndex,\n Period,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestMap:\n def test_map(self):\n rng = date_range("1/1/2000", periods=10)\n\n f = lambda x: x.strftime("%Y%m%d")\n result = rng.map(f)\n exp = Index([f(x) for x in rng])\n tm.assert_index_equal(result, exp)\n\n def test_map_fallthrough(self, capsys):\n # GH#22067, check we don't get warnings about silently ignored errors\n dti = date_range("2017-01-01", "2018-01-01", freq="B")\n\n dti.map(lambda x: Period(year=x.year, month=x.month, freq="M"))\n\n captured = capsys.readouterr()\n assert captured.err == ""\n\n def test_map_bug_1677(self):\n index = DatetimeIndex(["2012-04-25 09:30:00.393000"])\n f = index.asof\n\n result = index.map(f)\n expected = Index([f(index[0])])\n tm.assert_index_equal(result, expected)\n\n @pytest.mark.parametrize("name", [None, "name"])\n def test_index_map(self, name):\n # see GH#20990\n count = 6\n index = date_range("2018-01-01", periods=count, freq="ME", name=name).map(\n lambda x: (x.year, x.month)\n )\n exp_index = MultiIndex.from_product(((2018,), range(1, 7)), names=[name, name])\n tm.assert_index_equal(index, exp_index)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_map.py | test_map.py | Python | 1,358 | 0.95 | 0.12766 | 0.054054 | node-utils | 612 | 2024-03-26T07:13:55.872056 | GPL-3.0 | true | 6f52cab5f0717123cfcd76c84f6ebc7e |
from dateutil.tz import tzlocal\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n DatetimeIndex,\n NaT,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestNormalize:\n def test_normalize(self):\n rng = date_range("1/1/2000 9:30", periods=10, freq="D")\n\n result = rng.normalize()\n expected = date_range("1/1/2000", periods=10, freq="D")\n tm.assert_index_equal(result, expected)\n\n arr_ns = np.array([1380585623454345752, 1380585612343234312]).astype(\n "datetime64[ns]"\n )\n rng_ns = DatetimeIndex(arr_ns)\n rng_ns_normalized = rng_ns.normalize()\n\n arr_ns = np.array([1380585600000000000, 1380585600000000000]).astype(\n "datetime64[ns]"\n )\n expected = DatetimeIndex(arr_ns)\n tm.assert_index_equal(rng_ns_normalized, expected)\n\n assert result.is_normalized\n assert not rng.is_normalized\n\n def test_normalize_nat(self):\n dti = DatetimeIndex([NaT, Timestamp("2018-01-01 01:00:00")])\n result = dti.normalize()\n expected = DatetimeIndex([NaT, Timestamp("2018-01-01")])\n tm.assert_index_equal(result, expected)\n\n def test_normalize_tz(self):\n rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="US/Eastern")\n\n result = rng.normalize() # does not preserve freq\n expected = date_range("1/1/2000", periods=10, freq="D", tz="US/Eastern")\n tm.assert_index_equal(result, expected._with_freq(None))\n\n assert result.is_normalized\n assert not rng.is_normalized\n\n rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz="UTC")\n\n result = rng.normalize()\n expected = date_range("1/1/2000", periods=10, freq="D", tz="UTC")\n tm.assert_index_equal(result, expected)\n\n assert result.is_normalized\n assert not rng.is_normalized\n\n rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())\n result = rng.normalize() # does not preserve freq\n expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())\n tm.assert_index_equal(result, expected._with_freq(None))\n\n assert result.is_normalized\n assert not rng.is_normalized\n\n @td.skip_if_windows\n @pytest.mark.parametrize(\n "timezone",\n [\n "US/Pacific",\n "US/Eastern",\n "UTC",\n "Asia/Kolkata",\n "Asia/Shanghai",\n "Australia/Canberra",\n ],\n )\n def test_normalize_tz_local(self, timezone):\n # GH#13459\n with tm.set_timezone(timezone):\n rng = date_range("1/1/2000 9:30", periods=10, freq="D", tz=tzlocal())\n\n result = rng.normalize()\n expected = date_range("1/1/2000", periods=10, freq="D", tz=tzlocal())\n expected = expected._with_freq(None)\n tm.assert_index_equal(result, expected)\n\n assert result.is_normalized\n assert not rng.is_normalized\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_normalize.py | test_normalize.py | Python | 3,041 | 0.95 | 0.052632 | 0.013333 | node-utils | 791 | 2023-09-13T19:56:41.491766 | GPL-3.0 | true | 1d3237f55ca7ba83b94e4a55b39b20ab |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DatetimeIndex,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestRepeat:\n def test_repeat_range(self, tz_naive_fixture):\n rng = date_range("1/1/2000", "1/1/2001")\n\n result = rng.repeat(5)\n assert result.freq is None\n assert len(result) == 5 * len(rng)\n\n def test_repeat_range2(self, tz_naive_fixture, unit):\n tz = tz_naive_fixture\n index = date_range("2001-01-01", periods=2, freq="D", tz=tz, unit=unit)\n exp = DatetimeIndex(\n ["2001-01-01", "2001-01-01", "2001-01-02", "2001-01-02"], tz=tz\n ).as_unit(unit)\n for res in [index.repeat(2), np.repeat(index, 2)]:\n tm.assert_index_equal(res, exp)\n assert res.freq is None\n\n def test_repeat_range3(self, tz_naive_fixture, unit):\n tz = tz_naive_fixture\n index = date_range("2001-01-01", periods=2, freq="2D", tz=tz, unit=unit)\n exp = DatetimeIndex(\n ["2001-01-01", "2001-01-01", "2001-01-03", "2001-01-03"], tz=tz\n ).as_unit(unit)\n for res in [index.repeat(2), np.repeat(index, 2)]:\n tm.assert_index_equal(res, exp)\n assert res.freq is None\n\n def test_repeat_range4(self, tz_naive_fixture, unit):\n tz = tz_naive_fixture\n index = DatetimeIndex(["2001-01-01", "NaT", "2003-01-01"], tz=tz).as_unit(unit)\n exp = DatetimeIndex(\n [\n "2001-01-01",\n "2001-01-01",\n "2001-01-01",\n "NaT",\n "NaT",\n "NaT",\n "2003-01-01",\n "2003-01-01",\n "2003-01-01",\n ],\n tz=tz,\n ).as_unit(unit)\n for res in [index.repeat(3), np.repeat(index, 3)]:\n tm.assert_index_equal(res, exp)\n assert res.freq is None\n\n def test_repeat(self, tz_naive_fixture, unit):\n tz = tz_naive_fixture\n reps = 2\n msg = "the 'axis' parameter is not supported"\n\n rng = date_range(start="2016-01-01", periods=2, freq="30Min", tz=tz, unit=unit)\n\n expected_rng = DatetimeIndex(\n [\n Timestamp("2016-01-01 00:00:00", tz=tz),\n Timestamp("2016-01-01 00:00:00", tz=tz),\n Timestamp("2016-01-01 00:30:00", tz=tz),\n Timestamp("2016-01-01 00:30:00", tz=tz),\n ]\n ).as_unit(unit)\n\n res = rng.repeat(reps)\n tm.assert_index_equal(res, expected_rng)\n assert res.freq is None\n\n tm.assert_index_equal(np.repeat(rng, reps), expected_rng)\n with pytest.raises(ValueError, match=msg):\n np.repeat(rng, reps, axis=1)\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_repeat.py | test_repeat.py | Python | 2,740 | 0.85 | 0.108434 | 0 | python-kit | 753 | 2023-09-09T22:10:17.856514 | GPL-3.0 | true | ceafdbeab9bc76f90302823fcf519257 |
from dateutil.tz import tzlocal\nimport pytest\n\nfrom pandas.compat import IS64\n\nfrom pandas import date_range\n\n\n@pytest.mark.parametrize(\n "freq,expected",\n [\n ("YE", "day"),\n ("QE", "day"),\n ("ME", "day"),\n ("D", "day"),\n ("h", "hour"),\n ("min", "minute"),\n ("s", "second"),\n ("ms", "millisecond"),\n ("us", "microsecond"),\n ],\n)\ndef test_dti_resolution(request, tz_naive_fixture, freq, expected):\n tz = tz_naive_fixture\n if freq == "YE" and not IS64 and isinstance(tz, tzlocal):\n request.applymarker(\n pytest.mark.xfail(reason="OverflowError inside tzlocal past 2038")\n )\n\n idx = date_range(start="2013-04-01", periods=30, freq=freq, tz=tz)\n assert idx.resolution == expected\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_resolution.py | test_resolution.py | Python | 785 | 0.85 | 0.064516 | 0 | react-lib | 8 | 2024-01-23T18:56:23.256761 | BSD-3-Clause | true | 45a10e401a97af4199bf59ed447f501a |
import pytest\n\nfrom pandas._libs.tslibs import to_offset\nfrom pandas._libs.tslibs.offsets import INVALID_FREQ_ERR_MSG\n\nfrom pandas import (\n DatetimeIndex,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\nclass TestDatetimeIndexRound:\n def test_round_daily(self):\n dti = date_range("20130101 09:10:11", periods=5)\n result = dti.round("D")\n expected = date_range("20130101", periods=5)\n tm.assert_index_equal(result, expected)\n\n dti = dti.tz_localize("UTC").tz_convert("US/Eastern")\n result = dti.round("D")\n expected = date_range("20130101", periods=5).tz_localize("US/Eastern")\n tm.assert_index_equal(result, expected)\n\n result = dti.round("s")\n tm.assert_index_equal(result, dti)\n\n @pytest.mark.parametrize(\n "freq, error_msg",\n [\n ("YE", "<YearEnd: month=12> is a non-fixed frequency"),\n ("ME", "<MonthEnd> is a non-fixed frequency"),\n ("foobar", "Invalid frequency: foobar"),\n ],\n )\n def test_round_invalid(self, freq, error_msg):\n dti = date_range("20130101 09:10:11", periods=5)\n dti = dti.tz_localize("UTC").tz_convert("US/Eastern")\n with pytest.raises(ValueError, match=error_msg):\n dti.round(freq)\n\n def test_round(self, tz_naive_fixture, unit):\n tz = tz_naive_fixture\n rng = date_range(start="2016-01-01", periods=5, freq="30Min", tz=tz, unit=unit)\n elt = rng[1]\n\n expected_rng = DatetimeIndex(\n [\n Timestamp("2016-01-01 00:00:00", tz=tz),\n Timestamp("2016-01-01 00:00:00", tz=tz),\n Timestamp("2016-01-01 01:00:00", tz=tz),\n Timestamp("2016-01-01 02:00:00", tz=tz),\n Timestamp("2016-01-01 02:00:00", tz=tz),\n ]\n ).as_unit(unit)\n expected_elt = expected_rng[1]\n\n result = rng.round(freq="h")\n tm.assert_index_equal(result, expected_rng)\n assert elt.round(freq="h") == expected_elt\n\n msg = INVALID_FREQ_ERR_MSG\n with pytest.raises(ValueError, match=msg):\n rng.round(freq="foo")\n with pytest.raises(ValueError, match=msg):\n elt.round(freq="foo")\n\n msg = "<MonthEnd> is a non-fixed frequency"\n with pytest.raises(ValueError, match=msg):\n rng.round(freq="ME")\n with pytest.raises(ValueError, match=msg):\n elt.round(freq="ME")\n\n def test_round2(self, tz_naive_fixture):\n tz = tz_naive_fixture\n # GH#14440 & GH#15578\n index = DatetimeIndex(["2016-10-17 12:00:00.0015"], tz=tz).as_unit("ns")\n result = index.round("ms")\n expected = DatetimeIndex(["2016-10-17 12:00:00.002000"], tz=tz).as_unit("ns")\n tm.assert_index_equal(result, expected)\n\n for freq in ["us", "ns"]:\n tm.assert_index_equal(index, index.round(freq))\n\n def test_round3(self, tz_naive_fixture):\n tz = tz_naive_fixture\n index = DatetimeIndex(["2016-10-17 12:00:00.00149"], tz=tz).as_unit("ns")\n result = index.round("ms")\n expected = DatetimeIndex(["2016-10-17 12:00:00.001000"], tz=tz).as_unit("ns")\n tm.assert_index_equal(result, expected)\n\n def test_round4(self, tz_naive_fixture):\n index = DatetimeIndex(["2016-10-17 12:00:00.001501031"], dtype="M8[ns]")\n result = index.round("10ns")\n expected = DatetimeIndex(["2016-10-17 12:00:00.001501030"], dtype="M8[ns]")\n tm.assert_index_equal(result, expected)\n\n ts = "2016-10-17 12:00:00.001501031"\n dti = DatetimeIndex([ts], dtype="M8[ns]")\n with tm.assert_produces_warning(False):\n dti.round("1010ns")\n\n def test_no_rounding_occurs(self, tz_naive_fixture):\n # GH 21262\n tz = tz_naive_fixture\n rng = date_range(start="2016-01-01", periods=5, freq="2Min", tz=tz)\n\n expected_rng = DatetimeIndex(\n [\n Timestamp("2016-01-01 00:00:00", tz=tz),\n Timestamp("2016-01-01 00:02:00", tz=tz),\n Timestamp("2016-01-01 00:04:00", tz=tz),\n Timestamp("2016-01-01 00:06:00", tz=tz),\n Timestamp("2016-01-01 00:08:00", tz=tz),\n ]\n ).as_unit("ns")\n\n result = rng.round(freq="2min")\n tm.assert_index_equal(result, expected_rng)\n\n @pytest.mark.parametrize(\n "test_input, rounder, freq, expected",\n [\n (["2117-01-01 00:00:45"], "floor", "15s", ["2117-01-01 00:00:45"]),\n (["2117-01-01 00:00:45"], "ceil", "15s", ["2117-01-01 00:00:45"]),\n (\n ["2117-01-01 00:00:45.000000012"],\n "floor",\n "10ns",\n ["2117-01-01 00:00:45.000000010"],\n ),\n (\n ["1823-01-01 00:00:01.000000012"],\n "ceil",\n "10ns",\n ["1823-01-01 00:00:01.000000020"],\n ),\n (["1823-01-01 00:00:01"], "floor", "1s", ["1823-01-01 00:00:01"]),\n (["1823-01-01 00:00:01"], "ceil", "1s", ["1823-01-01 00:00:01"]),\n (["2018-01-01 00:15:00"], "ceil", "15min", ["2018-01-01 00:15:00"]),\n (["2018-01-01 00:15:00"], "floor", "15min", ["2018-01-01 00:15:00"]),\n (["1823-01-01 03:00:00"], "ceil", "3h", ["1823-01-01 03:00:00"]),\n (["1823-01-01 03:00:00"], "floor", "3h", ["1823-01-01 03:00:00"]),\n (\n ("NaT", "1823-01-01 00:00:01"),\n "floor",\n "1s",\n ("NaT", "1823-01-01 00:00:01"),\n ),\n (\n ("NaT", "1823-01-01 00:00:01"),\n "ceil",\n "1s",\n ("NaT", "1823-01-01 00:00:01"),\n ),\n ],\n )\n def test_ceil_floor_edge(self, test_input, rounder, freq, expected):\n dt = DatetimeIndex(list(test_input))\n func = getattr(dt, rounder)\n result = func(freq)\n expected = DatetimeIndex(list(expected))\n assert expected.equals(result)\n\n @pytest.mark.parametrize(\n "start, index_freq, periods",\n [("2018-01-01", "12h", 25), ("2018-01-01 0:0:0.124999", "1ns", 1000)],\n )\n @pytest.mark.parametrize(\n "round_freq",\n [\n "2ns",\n "3ns",\n "4ns",\n "5ns",\n "6ns",\n "7ns",\n "250ns",\n "500ns",\n "750ns",\n "1us",\n "19us",\n "250us",\n "500us",\n "750us",\n "1s",\n "2s",\n "3s",\n "12h",\n "1D",\n ],\n )\n def test_round_int64(self, start, index_freq, periods, round_freq):\n dt = date_range(start=start, freq=index_freq, periods=periods)\n unit = to_offset(round_freq).nanos\n\n # test floor\n result = dt.floor(round_freq)\n diff = dt.asi8 - result.asi8\n mod = result.asi8 % unit\n assert (mod == 0).all(), f"floor not a {round_freq} multiple"\n assert (0 <= diff).all() and (diff < unit).all(), "floor error"\n\n # test ceil\n result = dt.ceil(round_freq)\n diff = result.asi8 - dt.asi8\n mod = result.asi8 % unit\n assert (mod == 0).all(), f"ceil not a {round_freq} multiple"\n assert (0 <= diff).all() and (diff < unit).all(), "ceil error"\n\n # test round\n result = dt.round(round_freq)\n diff = abs(result.asi8 - dt.asi8)\n mod = result.asi8 % unit\n assert (mod == 0).all(), f"round not a {round_freq} multiple"\n assert (diff <= unit // 2).all(), "round error"\n if unit % 2 == 0:\n assert (\n result.asi8[diff == unit // 2] % 2 == 0\n ).all(), "round half to even error"\n | .venv\Lib\site-packages\pandas\tests\indexes\datetimes\methods\test_round.py | test_round.py | Python | 7,822 | 0.95 | 0.054299 | 0.02551 | awesome-app | 354 | 2024-08-07T21:31:30.915236 | Apache-2.0 | true | d8f059c335a4f606f38eda78311bd796 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.