content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
\n\n | .venv\Lib\site-packages\pandas\tests\config\__pycache__\test_config.cpython-313.pyc | test_config.cpython-313.pyc | Other | 27,344 | 0.95 | 0.01199 | 0 | awesome-app | 295 | 2025-07-09T09:58:19.090384 | Apache-2.0 | true | a373a2671b8a00ed93e30fc86808fecc |
\n\n | .venv\Lib\site-packages\pandas\tests\config\__pycache__\test_localization.cpython-313.pyc | test_localization.cpython-313.pyc | Other | 6,546 | 0.8 | 0.016393 | 0.037736 | vue-tools | 582 | 2024-06-07T21:46:37.233148 | MIT | true | e3f365d0d3f90a1f4bd1e90e047a0114 |
\n\n | .venv\Lib\site-packages\pandas\tests\config\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 194 | 0.7 | 0 | 0 | vue-tools | 893 | 2024-09-20T02:59:24.450541 | GPL-3.0 | true | 70da11dd1a46be2c753bdf8545cada5b |
from pandas import Index\nimport pandas._testing as tm\nfrom pandas.core.construction import extract_array\n\n\ndef test_extract_array_rangeindex():\n ri = Index(range(5))\n\n expected = ri._values\n res = extract_array(ri, extract_numpy=True, extract_range=True)\n tm.assert_numpy_array_equal(res, expected)\n res = extract_array(ri, extract_numpy=False, extract_range=True)\n tm.assert_numpy_array_equal(res, expected)\n\n res = extract_array(ri, extract_numpy=True, extract_range=False)\n tm.assert_index_equal(res, ri)\n res = extract_array(ri, extract_numpy=False, extract_range=False)\n tm.assert_index_equal(res, ri)\n | .venv\Lib\site-packages\pandas\tests\construction\test_extract_array.py | test_extract_array.py | Python | 637 | 0.85 | 0.055556 | 0 | awesome-app | 642 | 2023-12-25T02:54:37.196892 | GPL-3.0 | true | 895417e489b96374d4081a7a74b5865d |
\n\n | .venv\Lib\site-packages\pandas\tests\construction\__pycache__\test_extract_array.cpython-313.pyc | test_extract_array.cpython-313.pyc | Other | 1,073 | 0.8 | 0 | 0 | awesome-app | 985 | 2024-08-06T19:49:26.644080 | GPL-3.0 | true | ead2c6baf78cb859cd00062c2ab1d8fd |
\n\n | .venv\Lib\site-packages\pandas\tests\construction\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 200 | 0.7 | 0 | 0 | node-utils | 258 | 2025-04-28T04:16:29.733757 | MIT | true | 762467aebab21c6bf3f2f915cc48b156 |
import numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_version_gt2\n\nfrom pandas import (\n DataFrame,\n Series,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n# -----------------------------------------------------------------------------\n# Copy/view behaviour for accessing underlying array of Series/DataFrame\n\n\n@pytest.mark.parametrize(\n "method",\n [\n lambda ser: ser.values,\n lambda ser: np.asarray(ser),\n lambda ser: np.array(ser, copy=False),\n ],\n ids=["values", "asarray", "array"],\n)\ndef test_series_values(using_copy_on_write, method):\n ser = Series([1, 2, 3], name="name")\n ser_orig = ser.copy()\n\n arr = method(ser)\n\n if using_copy_on_write:\n # .values still gives a view but is read-only\n assert np.shares_memory(arr, get_array(ser, "name"))\n assert arr.flags.writeable is False\n\n # mutating series through arr therefore doesn't work\n with pytest.raises(ValueError, match="read-only"):\n arr[0] = 0\n tm.assert_series_equal(ser, ser_orig)\n\n # mutating the series itself still works\n ser.iloc[0] = 0\n assert ser.values[0] == 0\n else:\n assert arr.flags.writeable is True\n arr[0] = 0\n assert ser.iloc[0] == 0\n\n\n@pytest.mark.parametrize(\n "method",\n [\n lambda df: df.values,\n lambda df: np.asarray(df),\n lambda ser: np.array(ser, copy=False),\n ],\n ids=["values", "asarray", "array"],\n)\ndef test_dataframe_values(using_copy_on_write, using_array_manager, method):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n df_orig = df.copy()\n\n arr = method(df)\n\n if using_copy_on_write:\n # .values still gives a view but is read-only\n assert np.shares_memory(arr, get_array(df, "a"))\n assert arr.flags.writeable is False\n\n # mutating series through arr therefore doesn't work\n with pytest.raises(ValueError, match="read-only"):\n arr[0, 0] = 0\n tm.assert_frame_equal(df, df_orig)\n\n # mutating the series itself still works\n df.iloc[0, 0] = 0\n assert df.values[0, 0] == 0\n else:\n assert arr.flags.writeable is True\n arr[0, 0] = 0\n if not using_array_manager:\n assert df.iloc[0, 0] == 0\n else:\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_series_to_numpy(using_copy_on_write):\n ser = Series([1, 2, 3], name="name")\n ser_orig = ser.copy()\n\n # default: copy=False, no dtype or NAs\n arr = ser.to_numpy()\n if using_copy_on_write:\n # to_numpy still gives a view but is read-only\n assert np.shares_memory(arr, get_array(ser, "name"))\n assert arr.flags.writeable is False\n\n # mutating series through arr therefore doesn't work\n with pytest.raises(ValueError, match="read-only"):\n arr[0] = 0\n tm.assert_series_equal(ser, ser_orig)\n\n # mutating the series itself still works\n ser.iloc[0] = 0\n assert ser.values[0] == 0\n else:\n assert arr.flags.writeable is True\n arr[0] = 0\n assert ser.iloc[0] == 0\n\n # specify copy=True gives a writeable array\n ser = Series([1, 2, 3], name="name")\n arr = ser.to_numpy(copy=True)\n assert not np.shares_memory(arr, get_array(ser, "name"))\n assert arr.flags.writeable is True\n\n # specifying a dtype that already causes a copy also gives a writeable array\n ser = Series([1, 2, 3], name="name")\n arr = ser.to_numpy(dtype="float64")\n assert not np.shares_memory(arr, get_array(ser, "name"))\n assert arr.flags.writeable is True\n\n\n@pytest.mark.parametrize("order", ["F", "C"])\ndef test_ravel_read_only(using_copy_on_write, order):\n ser = Series([1, 2, 3])\n with tm.assert_produces_warning(FutureWarning, match="is deprecated"):\n arr = ser.ravel(order=order)\n if using_copy_on_write:\n assert arr.flags.writeable is False\n assert np.shares_memory(get_array(ser), arr)\n\n\ndef test_series_array_ea_dtypes(using_copy_on_write):\n ser = Series([1, 2, 3], dtype="Int64")\n arr = np.asarray(ser, dtype="int64")\n assert np.shares_memory(arr, get_array(ser))\n if using_copy_on_write:\n assert arr.flags.writeable is False\n else:\n assert arr.flags.writeable is True\n\n arr = np.asarray(ser)\n assert np.shares_memory(arr, get_array(ser))\n if using_copy_on_write:\n assert arr.flags.writeable is False\n else:\n assert arr.flags.writeable is True\n\n\ndef test_dataframe_array_ea_dtypes(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")\n arr = np.asarray(df, dtype="int64")\n assert np.shares_memory(arr, get_array(df, "a"))\n if using_copy_on_write:\n assert arr.flags.writeable is False\n else:\n assert arr.flags.writeable is True\n\n arr = np.asarray(df)\n assert np.shares_memory(arr, get_array(df, "a"))\n if using_copy_on_write:\n assert arr.flags.writeable is False\n else:\n assert arr.flags.writeable is True\n\n\ndef test_dataframe_array_string_dtype(using_copy_on_write, using_array_manager):\n df = DataFrame({"a": ["a", "b"]}, dtype="string")\n arr = np.asarray(df)\n if not using_array_manager:\n assert np.shares_memory(arr, get_array(df, "a"))\n if using_copy_on_write:\n assert arr.flags.writeable is False\n else:\n assert arr.flags.writeable is True\n\n\ndef test_dataframe_multiple_numpy_dtypes():\n df = DataFrame({"a": [1, 2, 3], "b": 1.5})\n arr = np.asarray(df)\n assert not np.shares_memory(arr, get_array(df, "a"))\n assert arr.flags.writeable is True\n\n if np_version_gt2:\n # copy=False semantics are only supported in NumPy>=2.\n\n msg = "Starting with NumPy 2.0, the behavior of the 'copy' keyword has changed"\n with pytest.raises(FutureWarning, match=msg):\n arr = np.array(df, copy=False)\n\n arr = np.array(df, copy=True)\n assert arr.flags.writeable is True\n\n\ndef test_dataframe_single_block_copy_true():\n # the copy=False/None cases are tested above in test_dataframe_values\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n arr = np.array(df, copy=True)\n assert not np.shares_memory(arr, get_array(df, "a"))\n assert arr.flags.writeable is True\n\n\ndef test_values_is_ea(using_copy_on_write):\n df = DataFrame({"a": date_range("2012-01-01", periods=3)})\n arr = np.asarray(df)\n if using_copy_on_write:\n assert arr.flags.writeable is False\n else:\n assert arr.flags.writeable is True\n\n\ndef test_empty_dataframe():\n df = DataFrame()\n arr = np.asarray(df)\n assert arr.flags.writeable is True\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_array.py | test_array.py | Python | 6,696 | 0.95 | 0.114679 | 0.091429 | vue-tools | 247 | 2025-01-14T09:25:48.174029 | GPL-3.0 | true | dc8ad3fa4dd9a833116a7adb9fa481fc |
import pickle\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import HAS_PYARROW\nfrom pandas.compat.pyarrow import pa_version_under12p0\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n\ndef test_astype_single_dtype(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": 1.5})\n df_orig = df.copy()\n df2 = df.astype("float64")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column/block\n df2.iloc[0, 2] = 5.5\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n tm.assert_frame_equal(df, df_orig)\n\n # mutating parent also doesn't update result\n df2 = df.astype("float64")\n df.iloc[0, 2] = 5.5\n tm.assert_frame_equal(df2, df_orig.astype("float64"))\n\n\n@pytest.mark.parametrize("dtype", ["int64", "Int64"])\n@pytest.mark.parametrize("new_dtype", ["int64", "Int64", "int64[pyarrow]"])\ndef test_astype_avoids_copy(using_copy_on_write, dtype, new_dtype):\n if new_dtype == "int64[pyarrow]":\n pytest.importorskip("pyarrow")\n df = DataFrame({"a": [1, 2, 3]}, dtype=dtype)\n df_orig = df.copy()\n df2 = df.astype(new_dtype)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column/block\n df2.iloc[0, 0] = 10\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n # mutating parent also doesn't update result\n df2 = df.astype(new_dtype)\n df.iloc[0, 0] = 100\n tm.assert_frame_equal(df2, df_orig.astype(new_dtype))\n\n\n@pytest.mark.parametrize("dtype", ["float64", "int32", "Int32", "int32[pyarrow]"])\ndef test_astype_different_target_dtype(using_copy_on_write, dtype):\n if dtype == "int32[pyarrow]":\n pytest.importorskip("pyarrow")\n df = DataFrame({"a": [1, 2, 3]})\n df_orig = df.copy()\n df2 = df.astype(dtype)\n\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n if using_copy_on_write:\n assert df2._mgr._has_no_reference(0)\n\n df2.iloc[0, 0] = 5\n tm.assert_frame_equal(df, df_orig)\n\n # mutating parent also doesn't update result\n df2 = df.astype(dtype)\n df.iloc[0, 0] = 100\n tm.assert_frame_equal(df2, df_orig.astype(dtype))\n\n\n@td.skip_array_manager_invalid_test\ndef test_astype_numpy_to_ea():\n ser = Series([1, 2, 3])\n with pd.option_context("mode.copy_on_write", True):\n result = ser.astype("Int64")\n assert np.shares_memory(get_array(ser), get_array(result))\n\n\n@pytest.mark.parametrize(\n "dtype, new_dtype", [("object", "string"), ("string", "object")]\n)\ndef test_astype_string_and_object(using_copy_on_write, dtype, new_dtype):\n df = DataFrame({"a": ["a", "b", "c"]}, dtype=dtype)\n df_orig = df.copy()\n df2 = df.astype(new_dtype)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df2.iloc[0, 0] = "x"\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "dtype, new_dtype", [("object", "string"), ("string", "object")]\n)\ndef test_astype_string_and_object_update_original(\n using_copy_on_write, dtype, new_dtype\n):\n df = DataFrame({"a": ["a", "b", "c"]}, dtype=dtype)\n df2 = df.astype(new_dtype)\n df_orig = df2.copy()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df.iloc[0, 0] = "x"\n tm.assert_frame_equal(df2, df_orig)\n\n\ndef test_astype_str_copy_on_pickle_roundrip():\n # TODO(infer_string) this test can be removed after 3.0 (once str is the default)\n # https://github.com/pandas-dev/pandas/issues/54654\n # ensure_string_array may alter array inplace\n base = Series(np.array([(1, 2), None, 1], dtype="object"))\n base_copy = pickle.loads(pickle.dumps(base))\n base_copy.astype(str)\n tm.assert_series_equal(base, base_copy)\n\n\ndef test_astype_string_copy_on_pickle_roundrip(any_string_dtype):\n # https://github.com/pandas-dev/pandas/issues/54654\n # ensure_string_array may alter array inplace\n base = Series(np.array([(1, 2), None, 1], dtype="object"))\n base_copy = pickle.loads(pickle.dumps(base))\n base_copy.astype(any_string_dtype)\n tm.assert_series_equal(base, base_copy)\n\n\ndef test_astype_string_read_only_on_pickle_roundrip(any_string_dtype):\n # https://github.com/pandas-dev/pandas/issues/54654\n # ensure_string_array may alter read-only array inplace\n base = Series(np.array([(1, 2), None, 1], dtype="object"))\n base_copy = pickle.loads(pickle.dumps(base))\n base_copy._values.flags.writeable = False\n base_copy.astype(any_string_dtype)\n tm.assert_series_equal(base, base_copy)\n\n\ndef test_astype_dict_dtypes(using_copy_on_write):\n df = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": Series([1.5, 1.5, 1.5], dtype="float64")}\n )\n df_orig = df.copy()\n df2 = df.astype({"a": "float64", "c": "float64"})\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column/block\n df2.iloc[0, 2] = 5.5\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n\n df2.iloc[0, 1] = 10\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_astype_different_datetime_resos(using_copy_on_write):\n df = DataFrame({"a": date_range("2019-12-31", periods=2, freq="D")})\n result = df.astype("datetime64[ms]")\n\n assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n if using_copy_on_write:\n assert result._mgr._has_no_reference(0)\n\n\ndef test_astype_different_timezones(using_copy_on_write):\n df = DataFrame(\n {"a": date_range("2019-12-31", periods=5, freq="D", tz="US/Pacific")}\n )\n result = df.astype("datetime64[ns, Europe/Berlin]")\n if using_copy_on_write:\n assert not result._mgr._has_no_reference(0)\n assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n\n\ndef test_astype_different_timezones_different_reso(using_copy_on_write):\n df = DataFrame(\n {"a": date_range("2019-12-31", periods=5, freq="D", tz="US/Pacific")}\n )\n result = df.astype("datetime64[ms, Europe/Berlin]")\n if using_copy_on_write:\n assert result._mgr._has_no_reference(0)\n assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n\n\ndef test_astype_arrow_timestamp(using_copy_on_write):\n pytest.importorskip("pyarrow")\n df = DataFrame(\n {\n "a": [\n Timestamp("2020-01-01 01:01:01.000001"),\n Timestamp("2020-01-01 01:01:01.000001"),\n ]\n },\n dtype="M8[ns]",\n )\n result = df.astype("timestamp[ns][pyarrow]")\n if using_copy_on_write:\n assert not result._mgr._has_no_reference(0)\n if pa_version_under12p0:\n assert not np.shares_memory(\n get_array(df, "a"), get_array(result, "a")._pa_array\n )\n else:\n assert np.shares_memory(\n get_array(df, "a"), get_array(result, "a")._pa_array\n )\n\n\ndef test_convert_dtypes_infer_objects(using_copy_on_write):\n ser = Series(["a", "b", "c"])\n ser_orig = ser.copy()\n result = ser.convert_dtypes(\n convert_integer=False,\n convert_boolean=False,\n convert_floating=False,\n convert_string=False,\n )\n\n if using_copy_on_write:\n assert tm.shares_memory(get_array(ser), get_array(result))\n else:\n assert not np.shares_memory(get_array(ser), get_array(result))\n\n result.iloc[0] = "x"\n tm.assert_series_equal(ser, ser_orig)\n\n\ndef test_convert_dtypes(using_copy_on_write, using_infer_string):\n df = DataFrame({"a": ["a", "b"], "b": [1, 2], "c": [1.5, 2.5], "d": [True, False]})\n df_orig = df.copy()\n df2 = df.convert_dtypes()\n\n if using_copy_on_write:\n if using_infer_string and HAS_PYARROW:\n # TODO the default nullable string dtype still uses python storage\n # this should be changed to pyarrow if installed\n assert not tm.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert tm.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n assert tm.shares_memory(get_array(df2, "d"), get_array(df, "d"))\n assert tm.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n assert tm.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n assert not np.shares_memory(get_array(df2, "d"), get_array(df, "d"))\n\n df2.iloc[0, 0] = "x"\n df2.iloc[0, 1] = 10\n tm.assert_frame_equal(df, df_orig)\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_astype.py | test_astype.py | Python | 10,185 | 0.95 | 0.139373 | 0.064935 | react-lib | 575 | 2024-06-11T23:56:56.196777 | MIT | true | 5614a7e5d7c90a20c60a2c4c8b2fc942 |
import numpy as np\nimport pytest\n\nfrom pandas.compat import PY311\nfrom pandas.errors import (\n ChainedAssignmentError,\n SettingWithCopyWarning,\n)\n\nfrom pandas import (\n DataFrame,\n option_context,\n)\nimport pandas._testing as tm\n\n\ndef test_methods_iloc_warn(using_copy_on_write):\n if not using_copy_on_write:\n df = DataFrame({"a": [1, 2, 3], "b": 1})\n with tm.assert_cow_warning(match="A value"):\n df.iloc[:, 0].replace(1, 5, inplace=True)\n\n with tm.assert_cow_warning(match="A value"):\n df.iloc[:, 0].fillna(1, inplace=True)\n\n with tm.assert_cow_warning(match="A value"):\n df.iloc[:, 0].interpolate(inplace=True)\n\n with tm.assert_cow_warning(match="A value"):\n df.iloc[:, 0].ffill(inplace=True)\n\n with tm.assert_cow_warning(match="A value"):\n df.iloc[:, 0].bfill(inplace=True)\n\n\n@pytest.mark.parametrize(\n "func, args",\n [\n ("replace", (4, 5)),\n ("fillna", (1,)),\n ("interpolate", ()),\n ("bfill", ()),\n ("ffill", ()),\n ],\n)\ndef test_methods_iloc_getitem_item_cache(\n func, args, using_copy_on_write, warn_copy_on_write\n):\n # ensure we don't incorrectly raise chained assignment warning because\n # of the item cache / iloc not setting the item cache\n df_orig = DataFrame({"a": [1, 2, 3], "b": 1})\n\n df = df_orig.copy()\n ser = df.iloc[:, 0]\n getattr(ser, func)(*args, inplace=True)\n\n # parent that holds item_cache is dead, so don't increase ref count\n df = df_orig.copy()\n ser = df.copy()["a"]\n getattr(ser, func)(*args, inplace=True)\n\n df = df_orig.copy()\n df["a"] # populate the item_cache\n ser = df.iloc[:, 0] # iloc creates a new object\n getattr(ser, func)(*args, inplace=True)\n\n df = df_orig.copy()\n df["a"] # populate the item_cache\n ser = df["a"]\n getattr(ser, func)(*args, inplace=True)\n\n df = df_orig.copy()\n df["a"] # populate the item_cache\n # TODO(CoW-warn) because of the usage of *args, this doesn't warn on Py3.11+\n if using_copy_on_write:\n with tm.raises_chained_assignment_error(not PY311):\n getattr(df["a"], func)(*args, inplace=True)\n else:\n with tm.assert_cow_warning(not PY311, match="A value"):\n getattr(df["a"], func)(*args, inplace=True)\n\n df = df_orig.copy()\n ser = df["a"] # populate the item_cache and keep ref\n if using_copy_on_write:\n with tm.raises_chained_assignment_error(not PY311):\n getattr(df["a"], func)(*args, inplace=True)\n else:\n # ideally also warns on the default mode, but the ser' _cacher\n # messes up the refcount + even in warning mode this doesn't trigger\n # the warning of Py3.1+ (see above)\n with tm.assert_cow_warning(warn_copy_on_write and not PY311, match="A value"):\n getattr(df["a"], func)(*args, inplace=True)\n\n\ndef test_methods_iloc_getitem_item_cache_fillna(\n using_copy_on_write, warn_copy_on_write\n):\n # ensure we don't incorrectly raise chained assignment warning because\n # of the item cache / iloc not setting the item cache\n df_orig = DataFrame({"a": [1, 2, 3], "b": 1})\n\n df = df_orig.copy()\n ser = df.iloc[:, 0]\n ser.fillna(1, inplace=True)\n\n # parent that holds item_cache is dead, so don't increase ref count\n df = df_orig.copy()\n ser = df.copy()["a"]\n ser.fillna(1, inplace=True)\n\n df = df_orig.copy()\n df["a"] # populate the item_cache\n ser = df.iloc[:, 0] # iloc creates a new object\n ser.fillna(1, inplace=True)\n\n df = df_orig.copy()\n df["a"] # populate the item_cache\n ser = df["a"]\n ser.fillna(1, inplace=True)\n\n df = df_orig.copy()\n df["a"] # populate the item_cache\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n df["a"].fillna(1, inplace=True)\n else:\n with tm.assert_cow_warning(match="A value"):\n df["a"].fillna(1, inplace=True)\n\n df = df_orig.copy()\n ser = df["a"] # populate the item_cache and keep ref\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n df["a"].fillna(1, inplace=True)\n else:\n # TODO(CoW-warn) ideally also warns on the default mode, but the ser' _cacher\n # messes up the refcount\n with tm.assert_cow_warning(warn_copy_on_write, match="A value"):\n df["a"].fillna(1, inplace=True)\n\n\n# TODO(CoW-warn) expand the cases\n@pytest.mark.parametrize(\n "indexer", [0, [0, 1], slice(0, 2), np.array([True, False, True])]\n)\ndef test_series_setitem(indexer, using_copy_on_write, warn_copy_on_write):\n # ensure we only get a single warning for those typical cases of chained\n # assignment\n df = DataFrame({"a": [1, 2, 3], "b": 1})\n\n # using custom check instead of tm.assert_produces_warning because that doesn't\n # fail if multiple warnings are raised\n with pytest.warns() as record:\n df["a"][indexer] = 0\n assert len(record) == 1\n if using_copy_on_write:\n assert record[0].category == ChainedAssignmentError\n else:\n assert record[0].category == FutureWarning\n assert "ChainedAssignmentError" in record[0].message.args[0]\n\n\n@pytest.mark.filterwarnings("ignore::pandas.errors.SettingWithCopyWarning")\n@pytest.mark.parametrize(\n "indexer", ["a", ["a", "b"], slice(0, 2), np.array([True, False, True])]\n)\ndef test_frame_setitem(indexer, using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3, 4, 5], "b": 1})\n\n extra_warnings = () if using_copy_on_write else (SettingWithCopyWarning,)\n\n with option_context("chained_assignment", "warn"):\n with tm.raises_chained_assignment_error(extra_warnings=extra_warnings):\n df[0:3][indexer] = 10\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_chained_assignment_deprecation.py | test_chained_assignment_deprecation.py | Python | 5,750 | 0.95 | 0.08046 | 0.118881 | vue-tools | 529 | 2024-05-25T04:39:47.229594 | BSD-3-Clause | true | 34a0050611a46690e38c54659e5891b6 |
import numpy as np\n\nfrom pandas import (\n DataFrame,\n option_context,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n\ndef test_clip_inplace_reference(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1.5, 2, 3]})\n df_copy = df.copy()\n arr_a = get_array(df, "a")\n view = df[:]\n if warn_copy_on_write:\n with tm.assert_cow_warning():\n df.clip(lower=2, inplace=True)\n else:\n df.clip(lower=2, inplace=True)\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "a"), arr_a)\n assert df._mgr._has_no_reference(0)\n assert view._mgr._has_no_reference(0)\n tm.assert_frame_equal(df_copy, view)\n else:\n assert np.shares_memory(get_array(df, "a"), arr_a)\n\n\ndef test_clip_inplace_reference_no_op(using_copy_on_write):\n df = DataFrame({"a": [1.5, 2, 3]})\n df_copy = df.copy()\n arr_a = get_array(df, "a")\n view = df[:]\n df.clip(lower=0, inplace=True)\n\n assert np.shares_memory(get_array(df, "a"), arr_a)\n\n if using_copy_on_write:\n assert not df._mgr._has_no_reference(0)\n assert not view._mgr._has_no_reference(0)\n tm.assert_frame_equal(df_copy, view)\n\n\ndef test_clip_inplace(using_copy_on_write):\n df = DataFrame({"a": [1.5, 2, 3]})\n arr_a = get_array(df, "a")\n df.clip(lower=2, inplace=True)\n\n assert np.shares_memory(get_array(df, "a"), arr_a)\n\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n\n\ndef test_clip(using_copy_on_write):\n df = DataFrame({"a": [1.5, 2, 3]})\n df_orig = df.copy()\n df2 = df.clip(lower=2)\n\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n tm.assert_frame_equal(df_orig, df)\n\n\ndef test_clip_no_op(using_copy_on_write):\n df = DataFrame({"a": [1.5, 2, 3]})\n df2 = df.clip(lower=0)\n\n if using_copy_on_write:\n assert not df._mgr._has_no_reference(0)\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n\ndef test_clip_chained_inplace(using_copy_on_write):\n df = DataFrame({"a": [1, 4, 2], "b": 1})\n df_orig = df.copy()\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n df["a"].clip(1, 2, inplace=True)\n tm.assert_frame_equal(df, df_orig)\n\n with tm.raises_chained_assignment_error():\n df[["a"]].clip(1, 2, inplace=True)\n tm.assert_frame_equal(df, df_orig)\n else:\n with tm.assert_produces_warning(FutureWarning, match="inplace method"):\n df["a"].clip(1, 2, inplace=True)\n\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n df[["a"]].clip(1, 2, inplace=True)\n\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n df[df["a"] > 1].clip(1, 2, inplace=True)\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_clip.py | test_clip.py | Python | 3,077 | 0.85 | 0.128713 | 0 | node-utils | 442 | 2025-05-10T05:59:59.098152 | MIT | true | 1d1a7e4a630cd61b31487d4761ffd5ad |
import numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n Period,\n PeriodIndex,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n# -----------------------------------------------------------------------------\n# Copy/view behaviour for Series / DataFrame constructors\n\n\n@pytest.mark.parametrize("dtype", [None, "int64"])\ndef test_series_from_series(dtype, using_copy_on_write, warn_copy_on_write):\n # Case: constructing a Series from another Series object follows CoW rules:\n # a new object is returned and thus mutations are not propagated\n ser = Series([1, 2, 3], name="name")\n\n # default is copy=False -> new Series is a shallow copy / view of original\n result = Series(ser, dtype=dtype)\n\n # the shallow copy still shares memory\n assert np.shares_memory(get_array(ser), get_array(result))\n\n if using_copy_on_write:\n assert result._mgr.blocks[0].refs.has_reference()\n\n if using_copy_on_write:\n # mutating new series copy doesn't mutate original\n result.iloc[0] = 0\n assert ser.iloc[0] == 1\n # mutating triggered a copy-on-write -> no longer shares memory\n assert not np.shares_memory(get_array(ser), get_array(result))\n else:\n # mutating shallow copy does mutate original\n with tm.assert_cow_warning(warn_copy_on_write):\n result.iloc[0] = 0\n assert ser.iloc[0] == 0\n # and still shares memory\n assert np.shares_memory(get_array(ser), get_array(result))\n\n # the same when modifying the parent\n result = Series(ser, dtype=dtype)\n\n if using_copy_on_write:\n # mutating original doesn't mutate new series\n ser.iloc[0] = 0\n assert result.iloc[0] == 1\n else:\n # mutating original does mutate shallow copy\n with tm.assert_cow_warning(warn_copy_on_write):\n ser.iloc[0] = 0\n assert result.iloc[0] == 0\n\n\ndef test_series_from_series_with_reindex(using_copy_on_write, warn_copy_on_write):\n # Case: constructing a Series from another Series with specifying an index\n # that potentially requires a reindex of the values\n ser = Series([1, 2, 3], name="name")\n\n # passing an index that doesn't actually require a reindex of the values\n # -> without CoW we get an actual mutating view\n for index in [\n ser.index,\n ser.index.copy(),\n list(ser.index),\n ser.index.rename("idx"),\n ]:\n result = Series(ser, index=index)\n assert np.shares_memory(ser.values, result.values)\n with tm.assert_cow_warning(warn_copy_on_write):\n result.iloc[0] = 0\n if using_copy_on_write:\n assert ser.iloc[0] == 1\n else:\n assert ser.iloc[0] == 0\n\n # ensure that if an actual reindex is needed, we don't have any refs\n # (mutating the result wouldn't trigger CoW)\n result = Series(ser, index=[0, 1, 2, 3])\n assert not np.shares_memory(ser.values, result.values)\n if using_copy_on_write:\n assert not result._mgr.blocks[0].refs.has_reference()\n\n\n@pytest.mark.parametrize("fastpath", [False, True])\n@pytest.mark.parametrize("dtype", [None, "int64"])\n@pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)])\n@pytest.mark.parametrize(\n "arr", [np.array([1, 2, 3], dtype="int64"), pd.array([1, 2, 3], dtype="Int64")]\n)\ndef test_series_from_array(using_copy_on_write, idx, dtype, fastpath, arr):\n if idx is None or dtype is not None:\n fastpath = False\n msg = "The 'fastpath' keyword in pd.Series is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n ser = Series(arr, dtype=dtype, index=idx, fastpath=fastpath)\n ser_orig = ser.copy()\n data = getattr(arr, "_data", arr)\n if using_copy_on_write:\n assert not np.shares_memory(get_array(ser), data)\n else:\n assert np.shares_memory(get_array(ser), data)\n\n arr[0] = 100\n if using_copy_on_write:\n tm.assert_series_equal(ser, ser_orig)\n else:\n expected = Series([100, 2, 3], dtype=dtype if dtype is not None else arr.dtype)\n tm.assert_series_equal(ser, expected)\n\n\n@pytest.mark.parametrize("copy", [True, False, None])\ndef test_series_from_array_different_dtype(using_copy_on_write, copy):\n arr = np.array([1, 2, 3], dtype="int64")\n ser = Series(arr, dtype="int32", copy=copy)\n assert not np.shares_memory(get_array(ser), arr)\n\n\n@pytest.mark.parametrize(\n "idx",\n [\n Index([1, 2]),\n DatetimeIndex([Timestamp("2019-12-31"), Timestamp("2020-12-31")]),\n PeriodIndex([Period("2019-12-31"), Period("2020-12-31")]),\n TimedeltaIndex([Timedelta("1 days"), Timedelta("2 days")]),\n ],\n)\ndef test_series_from_index(using_copy_on_write, idx):\n ser = Series(idx)\n expected = idx.copy(deep=True)\n if using_copy_on_write:\n assert np.shares_memory(get_array(ser), get_array(idx))\n assert not ser._mgr._has_no_reference(0)\n else:\n assert not np.shares_memory(get_array(ser), get_array(idx))\n ser.iloc[0] = ser.iloc[1]\n tm.assert_index_equal(idx, expected)\n\n\ndef test_series_from_index_different_dtypes(using_copy_on_write):\n idx = Index([1, 2, 3], dtype="int64")\n ser = Series(idx, dtype="int32")\n assert not np.shares_memory(get_array(ser), get_array(idx))\n if using_copy_on_write:\n assert ser._mgr._has_no_reference(0)\n\n\n@pytest.mark.filterwarnings("ignore:Setting a value on a view:FutureWarning")\n@pytest.mark.parametrize("fastpath", [False, True])\n@pytest.mark.parametrize("dtype", [None, "int64"])\n@pytest.mark.parametrize("idx", [None, pd.RangeIndex(start=0, stop=3, step=1)])\ndef test_series_from_block_manager(using_copy_on_write, idx, dtype, fastpath):\n ser = Series([1, 2, 3], dtype="int64")\n ser_orig = ser.copy()\n msg = "The 'fastpath' keyword in pd.Series is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n ser2 = Series(ser._mgr, dtype=dtype, fastpath=fastpath, index=idx)\n assert np.shares_memory(get_array(ser), get_array(ser2))\n if using_copy_on_write:\n assert not ser2._mgr._has_no_reference(0)\n\n ser2.iloc[0] = 100\n if using_copy_on_write:\n tm.assert_series_equal(ser, ser_orig)\n else:\n expected = Series([100, 2, 3])\n tm.assert_series_equal(ser, expected)\n\n\ndef test_series_from_block_manager_different_dtype(using_copy_on_write):\n ser = Series([1, 2, 3], dtype="int64")\n msg = "Passing a SingleBlockManager to Series"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n ser2 = Series(ser._mgr, dtype="int32")\n assert not np.shares_memory(get_array(ser), get_array(ser2))\n if using_copy_on_write:\n assert ser2._mgr._has_no_reference(0)\n\n\n@pytest.mark.parametrize("use_mgr", [True, False])\n@pytest.mark.parametrize("columns", [None, ["a"]])\ndef test_dataframe_constructor_mgr_or_df(\n using_copy_on_write, warn_copy_on_write, columns, use_mgr\n):\n df = DataFrame({"a": [1, 2, 3]})\n df_orig = df.copy()\n\n if use_mgr:\n data = df._mgr\n warn = DeprecationWarning\n else:\n data = df\n warn = None\n msg = "Passing a BlockManager to DataFrame"\n with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):\n new_df = DataFrame(data)\n\n assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))\n with tm.assert_cow_warning(warn_copy_on_write and not use_mgr):\n new_df.iloc[0] = 100\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))\n tm.assert_frame_equal(df, df_orig)\n else:\n assert np.shares_memory(get_array(df, "a"), get_array(new_df, "a"))\n tm.assert_frame_equal(df, new_df)\n\n\n@pytest.mark.parametrize("dtype", [None, "int64", "Int64"])\n@pytest.mark.parametrize("index", [None, [0, 1, 2]])\n@pytest.mark.parametrize("columns", [None, ["a", "b"], ["a", "b", "c"]])\ndef test_dataframe_from_dict_of_series(\n request, using_copy_on_write, warn_copy_on_write, columns, index, dtype\n):\n # Case: constructing a DataFrame from Series objects with copy=False\n # has to do a lazy following CoW rules\n # (the default for DataFrame(dict) is still to copy to ensure consolidation)\n s1 = Series([1, 2, 3])\n s2 = Series([4, 5, 6])\n s1_orig = s1.copy()\n expected = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6]}, index=index, columns=columns, dtype=dtype\n )\n\n result = DataFrame(\n {"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False\n )\n\n # the shallow copy still shares memory\n assert np.shares_memory(get_array(result, "a"), get_array(s1))\n\n # mutating the new dataframe doesn't mutate original\n with tm.assert_cow_warning(warn_copy_on_write):\n result.iloc[0, 0] = 10\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(s1))\n tm.assert_series_equal(s1, s1_orig)\n else:\n assert s1.iloc[0] == 10\n\n # the same when modifying the parent series\n s1 = Series([1, 2, 3])\n s2 = Series([4, 5, 6])\n result = DataFrame(\n {"a": s1, "b": s2}, index=index, columns=columns, dtype=dtype, copy=False\n )\n with tm.assert_cow_warning(warn_copy_on_write):\n s1.iloc[0] = 10\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(s1))\n tm.assert_frame_equal(result, expected)\n else:\n assert result.iloc[0, 0] == 10\n\n\n@pytest.mark.parametrize("dtype", [None, "int64"])\ndef test_dataframe_from_dict_of_series_with_reindex(dtype):\n # Case: constructing a DataFrame from Series objects with copy=False\n # and passing an index that requires an actual (no-view) reindex -> need\n # to ensure the result doesn't have refs set up to unnecessarily trigger\n # a copy on write\n s1 = Series([1, 2, 3])\n s2 = Series([4, 5, 6])\n df = DataFrame({"a": s1, "b": s2}, index=[1, 2, 3], dtype=dtype, copy=False)\n\n # df should own its memory, so mutating shouldn't trigger a copy\n arr_before = get_array(df, "a")\n assert not np.shares_memory(arr_before, get_array(s1))\n df.iloc[0, 0] = 100\n arr_after = get_array(df, "a")\n assert np.shares_memory(arr_before, arr_after)\n\n\n@pytest.mark.parametrize("cons", [Series, Index])\n@pytest.mark.parametrize(\n "data, dtype", [([1, 2], None), ([1, 2], "int64"), (["a", "b"], object)]\n)\ndef test_dataframe_from_series_or_index(\n using_copy_on_write, warn_copy_on_write, data, dtype, cons\n):\n obj = cons(data, dtype=dtype)\n obj_orig = obj.copy()\n df = DataFrame(obj, dtype=dtype)\n assert np.shares_memory(get_array(obj), get_array(df, 0))\n if using_copy_on_write:\n assert not df._mgr._has_no_reference(0)\n\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = data[-1]\n if using_copy_on_write:\n tm.assert_equal(obj, obj_orig)\n\n\n@pytest.mark.parametrize("cons", [Series, Index])\ndef test_dataframe_from_series_or_index_different_dtype(using_copy_on_write, cons):\n obj = cons([1, 2], dtype="int64")\n df = DataFrame(obj, dtype="int32")\n assert not np.shares_memory(get_array(obj), get_array(df, 0))\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n\n\ndef test_dataframe_from_series_infer_datetime(using_copy_on_write):\n ser = Series([Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype=object)\n with tm.assert_produces_warning(FutureWarning, match="Dtype inference"):\n df = DataFrame(ser)\n assert not np.shares_memory(get_array(ser), get_array(df, 0))\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n\n\n@pytest.mark.parametrize("index", [None, [0, 1, 2]])\ndef test_dataframe_from_dict_of_series_with_dtype(index):\n # Variant of above, but now passing a dtype that causes a copy\n # -> need to ensure the result doesn't have refs set up to unnecessarily\n # trigger a copy on write\n s1 = Series([1.0, 2.0, 3.0])\n s2 = Series([4, 5, 6])\n df = DataFrame({"a": s1, "b": s2}, index=index, dtype="int64", copy=False)\n\n # df should own its memory, so mutating shouldn't trigger a copy\n arr_before = get_array(df, "a")\n assert not np.shares_memory(arr_before, get_array(s1))\n df.iloc[0, 0] = 100\n arr_after = get_array(df, "a")\n assert np.shares_memory(arr_before, arr_after)\n\n\n@pytest.mark.parametrize("copy", [False, None, True])\ndef test_frame_from_numpy_array(using_copy_on_write, copy, using_array_manager):\n arr = np.array([[1, 2], [3, 4]])\n df = DataFrame(arr, copy=copy)\n\n if (\n using_copy_on_write\n and copy is not False\n or copy is True\n or (using_array_manager and copy is None)\n ):\n assert not np.shares_memory(get_array(df, 0), arr)\n else:\n assert np.shares_memory(get_array(df, 0), arr)\n\n\ndef test_dataframe_from_records_with_dataframe(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3]})\n df_orig = df.copy()\n with tm.assert_produces_warning(FutureWarning):\n df2 = DataFrame.from_records(df)\n if using_copy_on_write:\n assert not df._mgr._has_no_reference(0)\n assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n with tm.assert_cow_warning(warn_copy_on_write):\n df2.iloc[0, 0] = 100\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n else:\n tm.assert_frame_equal(df, df2)\n\n\ndef test_frame_from_dict_of_index(using_copy_on_write):\n idx = Index([1, 2, 3])\n expected = idx.copy(deep=True)\n df = DataFrame({"a": idx}, copy=False)\n assert np.shares_memory(get_array(df, "a"), idx._values)\n if using_copy_on_write:\n assert not df._mgr._has_no_reference(0)\n\n df.iloc[0, 0] = 100\n tm.assert_index_equal(idx, expected)\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_constructors.py | test_constructors.py | Python | 13,952 | 0.95 | 0.125654 | 0.10559 | react-lib | 1,000 | 2023-09-26T10:07:13.891545 | BSD-3-Clause | true | 5df16ba28c9765a6f97f66cf330ac107 |
import numpy as np\nimport pytest\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n\ndef test_assigning_to_same_variable_removes_references(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3]})\n df = df.reset_index()\n if using_copy_on_write:\n assert df._mgr._has_no_reference(1)\n arr = get_array(df, "a")\n df.iloc[0, 1] = 100 # Write into a\n\n assert np.shares_memory(arr, get_array(df, "a"))\n\n\ndef test_setitem_dont_track_unnecessary_references(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})\n\n df["b"] = 100\n arr = get_array(df, "a")\n # We split the block in setitem, if we are not careful the new blocks will\n # reference each other triggering a copy\n df.iloc[0, 0] = 100\n assert np.shares_memory(arr, get_array(df, "a"))\n\n\ndef test_setitem_with_view_copies(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})\n view = df[:]\n expected = df.copy()\n\n df["b"] = 100\n arr = get_array(df, "a")\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = 100 # Check that we correctly track reference\n if using_copy_on_write:\n assert not np.shares_memory(arr, get_array(df, "a"))\n tm.assert_frame_equal(view, expected)\n\n\ndef test_setitem_with_view_invalidated_does_not_copy(\n using_copy_on_write, warn_copy_on_write, request\n):\n df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 1})\n view = df[:]\n\n df["b"] = 100\n arr = get_array(df, "a")\n view = None # noqa: F841\n # TODO(CoW-warn) false positive? -> block gets split because of `df["b"] = 100`\n # which introduces additional refs, even when those of `view` go out of scopes\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = 100\n if using_copy_on_write:\n # Setitem split the block. Since the old block shared data with view\n # all the new blocks are referencing view and each other. When view\n # goes out of scope, they don't share data with any other block,\n # so we should not trigger a copy\n mark = pytest.mark.xfail(\n reason="blk.delete does not track references correctly"\n )\n request.applymarker(mark)\n assert np.shares_memory(arr, get_array(df, "a"))\n\n\ndef test_out_of_scope(using_copy_on_write):\n def func():\n df = DataFrame({"a": [1, 2], "b": 1.5, "c": 1})\n # create some subset\n result = df[["a", "b"]]\n return result\n\n result = func()\n if using_copy_on_write:\n assert not result._mgr.blocks[0].refs.has_reference()\n assert not result._mgr.blocks[1].refs.has_reference()\n\n\ndef test_delete(using_copy_on_write):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 3)), columns=["a", "b", "c"]\n )\n del df["b"]\n if using_copy_on_write:\n assert not df._mgr.blocks[0].refs.has_reference()\n assert not df._mgr.blocks[1].refs.has_reference()\n\n df = df[["a"]]\n if using_copy_on_write:\n assert not df._mgr.blocks[0].refs.has_reference()\n\n\ndef test_delete_reference(using_copy_on_write):\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 3)), columns=["a", "b", "c"]\n )\n x = df[:]\n del df["b"]\n if using_copy_on_write:\n assert df._mgr.blocks[0].refs.has_reference()\n assert df._mgr.blocks[1].refs.has_reference()\n assert x._mgr.blocks[0].refs.has_reference()\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_core_functionalities.py | test_core_functionalities.py | Python | 3,506 | 0.95 | 0.150943 | 0.105882 | vue-tools | 511 | 2024-12-19T17:35:47.468909 | BSD-3-Clause | true | 57a9772b01ee9c3f15e419774c1cdb54 |
import numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas.compat import HAS_PYARROW\n\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n concat,\n merge,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n\ndef test_concat_frames(using_copy_on_write):\n df = DataFrame({"b": ["a"] * 3}, dtype=object)\n df2 = DataFrame({"a": ["a"] * 3}, dtype=object)\n df_orig = df.copy()\n result = concat([df, df2], axis=1)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "b"), get_array(df, "b"))\n assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))\n else:\n assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))\n assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))\n\n result.iloc[0, 0] = "d"\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))\n assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))\n\n result.iloc[0, 1] = "d"\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_concat_frames_updating_input(using_copy_on_write):\n df = DataFrame({"b": ["a"] * 3}, dtype=object)\n df2 = DataFrame({"a": ["a"] * 3}, dtype=object)\n result = concat([df, df2], axis=1)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "b"), get_array(df, "b"))\n assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))\n else:\n assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))\n assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))\n\n expected = result.copy()\n df.iloc[0, 0] = "d"\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "b"), get_array(df, "b"))\n assert np.shares_memory(get_array(result, "a"), get_array(df2, "a"))\n\n df2.iloc[0, 0] = "d"\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df2, "a"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_series(using_copy_on_write):\n ser = Series([1, 2], name="a")\n ser2 = Series([3, 4], name="b")\n ser_orig = ser.copy()\n ser2_orig = ser2.copy()\n result = concat([ser, ser2], axis=1)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), ser.values)\n assert np.shares_memory(get_array(result, "b"), ser2.values)\n else:\n assert not np.shares_memory(get_array(result, "a"), ser.values)\n assert not np.shares_memory(get_array(result, "b"), ser2.values)\n\n result.iloc[0, 0] = 100\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), ser.values)\n assert np.shares_memory(get_array(result, "b"), ser2.values)\n\n result.iloc[0, 1] = 1000\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "b"), ser2.values)\n tm.assert_series_equal(ser, ser_orig)\n tm.assert_series_equal(ser2, ser2_orig)\n\n\ndef test_concat_frames_chained(using_copy_on_write):\n df1 = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})\n df2 = DataFrame({"c": [4, 5, 6]})\n df3 = DataFrame({"d": [4, 5, 6]})\n result = concat([concat([df1, df2], axis=1), df3], axis=1)\n expected = result.copy()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert np.shares_memory(get_array(result, "c"), get_array(df2, "c"))\n assert np.shares_memory(get_array(result, "d"), get_array(df3, "d"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert not np.shares_memory(get_array(result, "c"), get_array(df2, "c"))\n assert not np.shares_memory(get_array(result, "d"), get_array(df3, "d"))\n\n df1.iloc[0, 0] = 100\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_series_chained(using_copy_on_write):\n ser1 = Series([1, 2, 3], name="a")\n ser2 = Series([4, 5, 6], name="c")\n ser3 = Series([4, 5, 6], name="d")\n result = concat([concat([ser1, ser2], axis=1), ser3], axis=1)\n expected = result.copy()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(ser1, "a"))\n assert np.shares_memory(get_array(result, "c"), get_array(ser2, "c"))\n assert np.shares_memory(get_array(result, "d"), get_array(ser3, "d"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(ser1, "a"))\n assert not np.shares_memory(get_array(result, "c"), get_array(ser2, "c"))\n assert not np.shares_memory(get_array(result, "d"), get_array(ser3, "d"))\n\n ser1.iloc[0] = 100\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(ser1, "a"))\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_series_updating_input(using_copy_on_write):\n ser = Series([1, 2], name="a")\n ser2 = Series([3, 4], name="b")\n expected = DataFrame({"a": [1, 2], "b": [3, 4]})\n result = concat([ser, ser2], axis=1)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(ser, "a"))\n assert np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(ser, "a"))\n assert not np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))\n\n ser.iloc[0] = 100\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(ser, "a"))\n assert np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))\n tm.assert_frame_equal(result, expected)\n\n ser2.iloc[0] = 1000\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "b"), get_array(ser2, "b"))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_mixed_series_frame(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "c": 1})\n ser = Series([4, 5, 6], name="d")\n result = concat([df, ser], axis=1)\n expected = result.copy()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n assert np.shares_memory(get_array(result, "c"), get_array(df, "c"))\n assert np.shares_memory(get_array(result, "d"), get_array(ser, "d"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n assert not np.shares_memory(get_array(result, "c"), get_array(df, "c"))\n assert not np.shares_memory(get_array(result, "d"), get_array(ser, "d"))\n\n ser.iloc[0] = 100\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "d"), get_array(ser, "d"))\n\n df.iloc[0, 0] = 100\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("copy", [True, None, False])\ndef test_concat_copy_keyword(using_copy_on_write, copy):\n df = DataFrame({"a": [1, 2]})\n df2 = DataFrame({"b": [1.5, 2.5]})\n\n result = concat([df, df2], axis=1, copy=copy)\n\n if using_copy_on_write or copy is False:\n assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n assert np.shares_memory(get_array(df2, "b"), get_array(result, "b"))\n else:\n assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n assert not np.shares_memory(get_array(df2, "b"), get_array(result, "b"))\n\n\n@pytest.mark.parametrize(\n "func",\n [\n lambda df1, df2, **kwargs: df1.merge(df2, **kwargs),\n lambda df1, df2, **kwargs: merge(df1, df2, **kwargs),\n ],\n)\ndef test_merge_on_key(using_copy_on_write, func):\n df1 = DataFrame({"key": Series(["a", "b", "c"], dtype=object), "a": [1, 2, 3]})\n df2 = DataFrame({"key": Series(["a", "b", "c"], dtype=object), "b": [4, 5, 6]})\n df1_orig = df1.copy()\n df2_orig = df2.copy()\n\n result = func(df1, df2, on="key")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n assert np.shares_memory(get_array(result, "key"), get_array(df1, "key"))\n assert not np.shares_memory(get_array(result, "key"), get_array(df2, "key"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n\n result.iloc[0, 1] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n\n result.iloc[0, 2] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n tm.assert_frame_equal(df1, df1_orig)\n tm.assert_frame_equal(df2, df2_orig)\n\n\ndef test_merge_on_index(using_copy_on_write):\n df1 = DataFrame({"a": [1, 2, 3]})\n df2 = DataFrame({"b": [4, 5, 6]})\n df1_orig = df1.copy()\n df2_orig = df2.copy()\n\n result = merge(df1, df2, left_index=True, right_index=True)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n\n result.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n\n result.iloc[0, 1] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n tm.assert_frame_equal(df1, df1_orig)\n tm.assert_frame_equal(df2, df2_orig)\n\n\n@pytest.mark.parametrize(\n "func, how",\n [\n (lambda df1, df2, **kwargs: merge(df2, df1, on="key", **kwargs), "right"),\n (lambda df1, df2, **kwargs: merge(df1, df2, on="key", **kwargs), "left"),\n ],\n)\ndef test_merge_on_key_enlarging_one(using_copy_on_write, func, how):\n df1 = DataFrame({"key": Series(["a", "b", "c"], dtype=object), "a": [1, 2, 3]})\n df2 = DataFrame({"key": Series(["a", "b"], dtype=object), "b": [4, 5]})\n df1_orig = df1.copy()\n df2_orig = df2.copy()\n\n result = func(df1, df2, how=how)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n assert df2._mgr._has_no_reference(1)\n assert df2._mgr._has_no_reference(0)\n assert np.shares_memory(get_array(result, "key"), get_array(df1, "key")) is (\n how == "left"\n )\n assert not np.shares_memory(get_array(result, "key"), get_array(df2, "key"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n\n if how == "left":\n result.iloc[0, 1] = 0\n else:\n result.iloc[0, 2] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n tm.assert_frame_equal(df1, df1_orig)\n tm.assert_frame_equal(df2, df2_orig)\n\n\n@pytest.mark.parametrize("copy", [True, None, False])\ndef test_merge_copy_keyword(using_copy_on_write, copy):\n df = DataFrame({"a": [1, 2]})\n df2 = DataFrame({"b": [3, 4.5]})\n\n result = df.merge(df2, copy=copy, left_index=True, right_index=True)\n\n if using_copy_on_write or copy is False:\n assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n assert np.shares_memory(get_array(df2, "b"), get_array(result, "b"))\n else:\n assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n assert not np.shares_memory(get_array(df2, "b"), get_array(result, "b"))\n\n\n@pytest.mark.xfail(\n using_string_dtype() and HAS_PYARROW,\n reason="TODO(infer_string); result.index infers str dtype while both "\n "df1 and df2 index are object.",\n)\ndef test_join_on_key(using_copy_on_write):\n df_index = Index(["a", "b", "c"], name="key", dtype=object)\n\n df1 = DataFrame({"a": [1, 2, 3]}, index=df_index.copy(deep=True))\n df2 = DataFrame({"b": [4, 5, 6]}, index=df_index.copy(deep=True))\n\n df1_orig = df1.copy()\n df2_orig = df2.copy()\n\n result = df1.join(df2, on="key")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n assert np.shares_memory(get_array(result.index), get_array(df1.index))\n assert not np.shares_memory(get_array(result.index), get_array(df2.index))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n\n result.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n\n result.iloc[0, 1] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "b"), get_array(df2, "b"))\n\n tm.assert_frame_equal(df1, df1_orig)\n tm.assert_frame_equal(df2, df2_orig)\n\n\ndef test_join_multiple_dataframes_on_key(using_copy_on_write):\n df_index = Index(["a", "b", "c"], name="key", dtype=object)\n\n df1 = DataFrame({"a": [1, 2, 3]}, index=df_index.copy(deep=True))\n dfs_list = [\n DataFrame({"b": [4, 5, 6]}, index=df_index.copy(deep=True)),\n DataFrame({"c": [7, 8, 9]}, index=df_index.copy(deep=True)),\n ]\n\n df1_orig = df1.copy()\n dfs_list_orig = [df.copy() for df in dfs_list]\n\n result = df1.join(dfs_list)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b"))\n assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c"))\n assert np.shares_memory(get_array(result.index), get_array(df1.index))\n assert not np.shares_memory(\n get_array(result.index), get_array(dfs_list[0].index)\n )\n assert not np.shares_memory(\n get_array(result.index), get_array(dfs_list[1].index)\n )\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert not np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b"))\n assert not np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c"))\n\n result.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df1, "a"))\n assert np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b"))\n assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c"))\n\n result.iloc[0, 1] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "b"), get_array(dfs_list[0], "b"))\n assert np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c"))\n\n result.iloc[0, 2] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "c"), get_array(dfs_list[1], "c"))\n\n tm.assert_frame_equal(df1, df1_orig)\n for df, df_orig in zip(dfs_list, dfs_list_orig):\n tm.assert_frame_equal(df, df_orig)\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_functions.py | test_functions.py | Python | 15,939 | 0.85 | 0.133333 | 0 | vue-tools | 190 | 2024-07-31T06:23:48.132259 | Apache-2.0 | true | 9f9341b1f4c12b8cc84b424bad7345a3 |
import numpy as np\nimport pytest\n\nfrom pandas.errors import SettingWithCopyWarning\n\nfrom pandas.core.dtypes.common import is_float_dtype\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n\n@pytest.fixture(params=["numpy", "nullable"])\ndef backend(request):\n if request.param == "numpy":\n\n def make_dataframe(*args, **kwargs):\n return DataFrame(*args, **kwargs)\n\n def make_series(*args, **kwargs):\n return Series(*args, **kwargs)\n\n elif request.param == "nullable":\n\n def make_dataframe(*args, **kwargs):\n df = DataFrame(*args, **kwargs)\n df_nullable = df.convert_dtypes()\n # convert_dtypes will try to cast float to int if there is no loss in\n # precision -> undo that change\n for col in df.columns:\n if is_float_dtype(df[col].dtype) and not is_float_dtype(\n df_nullable[col].dtype\n ):\n df_nullable[col] = df_nullable[col].astype("Float64")\n # copy final result to ensure we start with a fully self-owning DataFrame\n return df_nullable.copy()\n\n def make_series(*args, **kwargs):\n ser = Series(*args, **kwargs)\n return ser.convert_dtypes().copy()\n\n return request.param, make_dataframe, make_series\n\n\n# -----------------------------------------------------------------------------\n# Indexing operations taking subset + modifying the subset/parent\n\n\ndef test_subset_column_selection(backend, using_copy_on_write):\n # Case: taking a subset of the columns of a DataFrame\n # + afterwards modifying the subset\n _, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n\n subset = df[["a", "c"]]\n\n if using_copy_on_write:\n # the subset shares memory ...\n assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))\n # ... but uses CoW when being modified\n subset.iloc[0, 0] = 0\n else:\n assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))\n # INFO this no longer raise warning since pandas 1.4\n # with pd.option_context("chained_assignment", "warn"):\n # with tm.assert_produces_warning(SettingWithCopyWarning):\n subset.iloc[0, 0] = 0\n\n assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))\n\n expected = DataFrame({"a": [0, 2, 3], "c": [0.1, 0.2, 0.3]})\n tm.assert_frame_equal(subset, expected)\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_subset_column_selection_modify_parent(backend, using_copy_on_write):\n # Case: taking a subset of the columns of a DataFrame\n # + afterwards modifying the parent\n _, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n\n subset = df[["a", "c"]]\n\n if using_copy_on_write:\n # the subset shares memory ...\n assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))\n # ... but parent uses CoW parent when it is modified\n df.iloc[0, 0] = 0\n\n assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))\n if using_copy_on_write:\n # different column/block still shares memory\n assert np.shares_memory(get_array(subset, "c"), get_array(df, "c"))\n\n expected = DataFrame({"a": [1, 2, 3], "c": [0.1, 0.2, 0.3]})\n tm.assert_frame_equal(subset, expected)\n\n\ndef test_subset_row_slice(backend, using_copy_on_write, warn_copy_on_write):\n # Case: taking a subset of the rows of a DataFrame using a slice\n # + afterwards modifying the subset\n _, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n\n subset = df[1:3]\n subset._mgr._verify_integrity()\n\n assert np.shares_memory(get_array(subset, "a"), get_array(df, "a"))\n\n if using_copy_on_write:\n subset.iloc[0, 0] = 0\n assert not np.shares_memory(get_array(subset, "a"), get_array(df, "a"))\n\n else:\n # INFO this no longer raise warning since pandas 1.4\n # with pd.option_context("chained_assignment", "warn"):\n # with tm.assert_produces_warning(SettingWithCopyWarning):\n with tm.assert_cow_warning(warn_copy_on_write):\n subset.iloc[0, 0] = 0\n\n subset._mgr._verify_integrity()\n\n expected = DataFrame({"a": [0, 3], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3))\n tm.assert_frame_equal(subset, expected)\n if using_copy_on_write:\n # original parent dataframe is not modified (CoW)\n tm.assert_frame_equal(df, df_orig)\n else:\n # original parent dataframe is actually updated\n df_orig.iloc[1, 0] = 0\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]\n)\ndef test_subset_column_slice(\n backend, using_copy_on_write, warn_copy_on_write, using_array_manager, dtype\n):\n # Case: taking a subset of the columns of a DataFrame using a slice\n # + afterwards modifying the subset\n dtype_backend, DataFrame, _ = backend\n single_block = (\n dtype == "int64" and dtype_backend == "numpy"\n ) and not using_array_manager\n df = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}\n )\n df_orig = df.copy()\n\n subset = df.iloc[:, 1:]\n subset._mgr._verify_integrity()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(subset, "b"), get_array(df, "b"))\n\n subset.iloc[0, 0] = 0\n assert not np.shares_memory(get_array(subset, "b"), get_array(df, "b"))\n elif warn_copy_on_write:\n with tm.assert_cow_warning(single_block):\n subset.iloc[0, 0] = 0\n else:\n # we only get a warning in case of a single block\n warn = SettingWithCopyWarning if single_block else None\n with pd.option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(warn):\n subset.iloc[0, 0] = 0\n\n expected = DataFrame({"b": [0, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)})\n tm.assert_frame_equal(subset, expected)\n # original parent dataframe is not modified (also not for BlockManager case,\n # except for single block)\n if not using_copy_on_write and (using_array_manager or single_block):\n df_orig.iloc[0, 1] = 0\n tm.assert_frame_equal(df, df_orig)\n else:\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]\n)\n@pytest.mark.parametrize(\n "row_indexer",\n [slice(1, 2), np.array([False, True, True]), np.array([1, 2])],\n ids=["slice", "mask", "array"],\n)\n@pytest.mark.parametrize(\n "column_indexer",\n [slice("b", "c"), np.array([False, True, True]), ["b", "c"]],\n ids=["slice", "mask", "array"],\n)\ndef test_subset_loc_rows_columns(\n backend,\n dtype,\n row_indexer,\n column_indexer,\n using_array_manager,\n using_copy_on_write,\n warn_copy_on_write,\n):\n # Case: taking a subset of the rows+columns of a DataFrame using .loc\n # + afterwards modifying the subset\n # Generic test for several combinations of row/column indexers, not all\n # of those could actually return a view / need CoW (so this test is not\n # checking memory sharing, only ensuring subsequent mutation doesn't\n # affect the parent dataframe)\n dtype_backend, DataFrame, _ = backend\n df = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}\n )\n df_orig = df.copy()\n\n subset = df.loc[row_indexer, column_indexer]\n\n # a few corner cases _do_ actually modify the parent (with both row and column\n # slice, and in case of ArrayManager or BlockManager with single block)\n mutate_parent = (\n isinstance(row_indexer, slice)\n and isinstance(column_indexer, slice)\n and (\n using_array_manager\n or (\n dtype == "int64"\n and dtype_backend == "numpy"\n and not using_copy_on_write\n )\n )\n )\n\n # modifying the subset never modifies the parent\n with tm.assert_cow_warning(warn_copy_on_write and mutate_parent):\n subset.iloc[0, 0] = 0\n\n expected = DataFrame(\n {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3)\n )\n tm.assert_frame_equal(subset, expected)\n if mutate_parent:\n df_orig.iloc[1, 1] = 0\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]\n)\n@pytest.mark.parametrize(\n "row_indexer",\n [slice(1, 3), np.array([False, True, True]), np.array([1, 2])],\n ids=["slice", "mask", "array"],\n)\n@pytest.mark.parametrize(\n "column_indexer",\n [slice(1, 3), np.array([False, True, True]), [1, 2]],\n ids=["slice", "mask", "array"],\n)\ndef test_subset_iloc_rows_columns(\n backend,\n dtype,\n row_indexer,\n column_indexer,\n using_array_manager,\n using_copy_on_write,\n warn_copy_on_write,\n):\n # Case: taking a subset of the rows+columns of a DataFrame using .iloc\n # + afterwards modifying the subset\n # Generic test for several combinations of row/column indexers, not all\n # of those could actually return a view / need CoW (so this test is not\n # checking memory sharing, only ensuring subsequent mutation doesn't\n # affect the parent dataframe)\n dtype_backend, DataFrame, _ = backend\n df = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}\n )\n df_orig = df.copy()\n\n subset = df.iloc[row_indexer, column_indexer]\n\n # a few corner cases _do_ actually modify the parent (with both row and column\n # slice, and in case of ArrayManager or BlockManager with single block)\n mutate_parent = (\n isinstance(row_indexer, slice)\n and isinstance(column_indexer, slice)\n and (\n using_array_manager\n or (\n dtype == "int64"\n and dtype_backend == "numpy"\n and not using_copy_on_write\n )\n )\n )\n\n # modifying the subset never modifies the parent\n with tm.assert_cow_warning(warn_copy_on_write and mutate_parent):\n subset.iloc[0, 0] = 0\n\n expected = DataFrame(\n {"b": [0, 6], "c": np.array([8, 9], dtype=dtype)}, index=range(1, 3)\n )\n tm.assert_frame_equal(subset, expected)\n if mutate_parent:\n df_orig.iloc[1, 1] = 0\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "indexer",\n [slice(0, 2), np.array([True, True, False]), np.array([0, 1])],\n ids=["slice", "mask", "array"],\n)\ndef test_subset_set_with_row_indexer(\n backend, indexer_si, indexer, using_copy_on_write, warn_copy_on_write\n):\n # Case: setting values with a row indexer on a viewing subset\n # subset[indexer] = value and subset.iloc[indexer] = value\n _, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})\n df_orig = df.copy()\n subset = df[1:4]\n\n if (\n indexer_si is tm.setitem\n and isinstance(indexer, np.ndarray)\n and indexer.dtype == "int"\n ):\n pytest.skip("setitem with labels selects on columns")\n\n if using_copy_on_write:\n indexer_si(subset)[indexer] = 0\n elif warn_copy_on_write:\n with tm.assert_cow_warning():\n indexer_si(subset)[indexer] = 0\n else:\n # INFO iloc no longer raises warning since pandas 1.4\n warn = SettingWithCopyWarning if indexer_si is tm.setitem else None\n with pd.option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(warn):\n indexer_si(subset)[indexer] = 0\n\n expected = DataFrame(\n {"a": [0, 0, 4], "b": [0, 0, 7], "c": [0.0, 0.0, 0.4]}, index=range(1, 4)\n )\n tm.assert_frame_equal(subset, expected)\n if using_copy_on_write:\n # original parent dataframe is not modified (CoW)\n tm.assert_frame_equal(df, df_orig)\n else:\n # original parent dataframe is actually updated\n df_orig[1:3] = 0\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_subset_set_with_mask(backend, using_copy_on_write, warn_copy_on_write):\n # Case: setting values with a mask on a viewing subset: subset[mask] = value\n _, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3, 4], "b": [4, 5, 6, 7], "c": [0.1, 0.2, 0.3, 0.4]})\n df_orig = df.copy()\n subset = df[1:4]\n\n mask = subset > 3\n\n if using_copy_on_write:\n subset[mask] = 0\n elif warn_copy_on_write:\n with tm.assert_cow_warning():\n subset[mask] = 0\n else:\n with pd.option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(SettingWithCopyWarning):\n subset[mask] = 0\n\n expected = DataFrame(\n {"a": [2, 3, 0], "b": [0, 0, 0], "c": [0.20, 0.3, 0.4]}, index=range(1, 4)\n )\n tm.assert_frame_equal(subset, expected)\n if using_copy_on_write:\n # original parent dataframe is not modified (CoW)\n tm.assert_frame_equal(df, df_orig)\n else:\n # original parent dataframe is actually updated\n df_orig.loc[3, "a"] = 0\n df_orig.loc[1:3, "b"] = 0\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_subset_set_column(backend, using_copy_on_write, warn_copy_on_write):\n # Case: setting a single column on a viewing subset -> subset[col] = value\n dtype_backend, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n subset = df[1:3]\n\n if dtype_backend == "numpy":\n arr = np.array([10, 11], dtype="int64")\n else:\n arr = pd.array([10, 11], dtype="Int64")\n\n if using_copy_on_write or warn_copy_on_write:\n subset["a"] = arr\n else:\n with pd.option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(SettingWithCopyWarning):\n subset["a"] = arr\n\n subset._mgr._verify_integrity()\n expected = DataFrame(\n {"a": [10, 11], "b": [5, 6], "c": [0.2, 0.3]}, index=range(1, 3)\n )\n tm.assert_frame_equal(subset, expected)\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]\n)\ndef test_subset_set_column_with_loc(\n backend, using_copy_on_write, warn_copy_on_write, using_array_manager, dtype\n):\n # Case: setting a single column with loc on a viewing subset\n # -> subset.loc[:, col] = value\n _, DataFrame, _ = backend\n df = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}\n )\n df_orig = df.copy()\n subset = df[1:3]\n\n if using_copy_on_write:\n subset.loc[:, "a"] = np.array([10, 11], dtype="int64")\n elif warn_copy_on_write:\n with tm.assert_cow_warning():\n subset.loc[:, "a"] = np.array([10, 11], dtype="int64")\n else:\n with pd.option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(\n None,\n raise_on_extra_warnings=not using_array_manager,\n ):\n subset.loc[:, "a"] = np.array([10, 11], dtype="int64")\n\n subset._mgr._verify_integrity()\n expected = DataFrame(\n {"a": [10, 11], "b": [5, 6], "c": np.array([8, 9], dtype=dtype)},\n index=range(1, 3),\n )\n tm.assert_frame_equal(subset, expected)\n if using_copy_on_write:\n # original parent dataframe is not modified (CoW)\n tm.assert_frame_equal(df, df_orig)\n else:\n # original parent dataframe is actually updated\n df_orig.loc[1:3, "a"] = np.array([10, 11], dtype="int64")\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_subset_set_column_with_loc2(\n backend, using_copy_on_write, warn_copy_on_write, using_array_manager\n):\n # Case: setting a single column with loc on a viewing subset\n # -> subset.loc[:, col] = value\n # separate test for case of DataFrame of a single column -> takes a separate\n # code path\n _, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3]})\n df_orig = df.copy()\n subset = df[1:3]\n\n if using_copy_on_write:\n subset.loc[:, "a"] = 0\n elif warn_copy_on_write:\n with tm.assert_cow_warning():\n subset.loc[:, "a"] = 0\n else:\n with pd.option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(\n None,\n raise_on_extra_warnings=not using_array_manager,\n ):\n subset.loc[:, "a"] = 0\n\n subset._mgr._verify_integrity()\n expected = DataFrame({"a": [0, 0]}, index=range(1, 3))\n tm.assert_frame_equal(subset, expected)\n if using_copy_on_write:\n # original parent dataframe is not modified (CoW)\n tm.assert_frame_equal(df, df_orig)\n else:\n # original parent dataframe is actually updated\n df_orig.loc[1:3, "a"] = 0\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]\n)\ndef test_subset_set_columns(backend, using_copy_on_write, warn_copy_on_write, dtype):\n # Case: setting multiple columns on a viewing subset\n # -> subset[[col1, col2]] = value\n dtype_backend, DataFrame, _ = backend\n df = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}\n )\n df_orig = df.copy()\n subset = df[1:3]\n\n if using_copy_on_write or warn_copy_on_write:\n subset[["a", "c"]] = 0\n else:\n with pd.option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(SettingWithCopyWarning):\n subset[["a", "c"]] = 0\n\n subset._mgr._verify_integrity()\n if using_copy_on_write:\n # first and third column should certainly have no references anymore\n assert all(subset._mgr._has_no_reference(i) for i in [0, 2])\n expected = DataFrame({"a": [0, 0], "b": [5, 6], "c": [0, 0]}, index=range(1, 3))\n if dtype_backend == "nullable":\n # there is not yet a global option, so overriding a column by setting a scalar\n # defaults to numpy dtype even if original column was nullable\n expected["a"] = expected["a"].astype("int64")\n expected["c"] = expected["c"].astype("int64")\n\n tm.assert_frame_equal(subset, expected)\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "indexer",\n [slice("a", "b"), np.array([True, True, False]), ["a", "b"]],\n ids=["slice", "mask", "array"],\n)\ndef test_subset_set_with_column_indexer(\n backend, indexer, using_copy_on_write, warn_copy_on_write\n):\n # Case: setting multiple columns with a column indexer on a viewing subset\n # -> subset.loc[:, [col1, col2]] = value\n _, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "c": [4, 5, 6]})\n df_orig = df.copy()\n subset = df[1:3]\n\n if using_copy_on_write:\n subset.loc[:, indexer] = 0\n elif warn_copy_on_write:\n with tm.assert_cow_warning():\n subset.loc[:, indexer] = 0\n else:\n with pd.option_context("chained_assignment", "warn"):\n # As of 2.0, this setitem attempts (successfully) to set values\n # inplace, so the assignment is not chained.\n subset.loc[:, indexer] = 0\n\n subset._mgr._verify_integrity()\n expected = DataFrame({"a": [0, 0], "b": [0.0, 0.0], "c": [5, 6]}, index=range(1, 3))\n tm.assert_frame_equal(subset, expected)\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n else:\n # pre-2.0, in the mixed case with BlockManager, only column "a"\n # would be mutated in the parent frame. this changed with the\n # enforcement of GH#45333\n df_orig.loc[1:2, ["a", "b"]] = 0\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "method",\n [\n lambda df: df[["a", "b"]][0:2],\n lambda df: df[0:2][["a", "b"]],\n lambda df: df[["a", "b"]].iloc[0:2],\n lambda df: df[["a", "b"]].loc[0:1],\n lambda df: df[0:2].iloc[:, 0:2],\n lambda df: df[0:2].loc[:, "a":"b"], # type: ignore[misc]\n ],\n ids=[\n "row-getitem-slice",\n "column-getitem",\n "row-iloc-slice",\n "row-loc-slice",\n "column-iloc-slice",\n "column-loc-slice",\n ],\n)\n@pytest.mark.parametrize(\n "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]\n)\ndef test_subset_chained_getitem(\n request,\n backend,\n method,\n dtype,\n using_copy_on_write,\n using_array_manager,\n warn_copy_on_write,\n):\n # Case: creating a subset using multiple, chained getitem calls using views\n # still needs to guarantee proper CoW behaviour\n _, DataFrame, _ = backend\n df = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}\n )\n df_orig = df.copy()\n\n # when not using CoW, it depends on whether we have a single block or not\n # and whether we are slicing the columns -> in that case we have a view\n test_callspec = request.node.callspec.id\n if not using_array_manager:\n subset_is_view = test_callspec in (\n "numpy-single-block-column-iloc-slice",\n "numpy-single-block-column-loc-slice",\n )\n else:\n # with ArrayManager, it doesn't matter whether we have\n # single vs mixed block or numpy vs nullable dtypes\n subset_is_view = test_callspec.endswith(\n ("column-iloc-slice", "column-loc-slice")\n )\n\n # modify subset -> don't modify parent\n subset = method(df)\n\n with tm.assert_cow_warning(warn_copy_on_write and subset_is_view):\n subset.iloc[0, 0] = 0\n if using_copy_on_write or (not subset_is_view):\n tm.assert_frame_equal(df, df_orig)\n else:\n assert df.iloc[0, 0] == 0\n\n # modify parent -> don't modify subset\n subset = method(df)\n with tm.assert_cow_warning(warn_copy_on_write and subset_is_view):\n df.iloc[0, 0] = 0\n expected = DataFrame({"a": [1, 2], "b": [4, 5]})\n if using_copy_on_write or not subset_is_view:\n tm.assert_frame_equal(subset, expected)\n else:\n assert subset.iloc[0, 0] == 0\n\n\n@pytest.mark.parametrize(\n "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]\n)\ndef test_subset_chained_getitem_column(\n backend, dtype, using_copy_on_write, warn_copy_on_write\n):\n # Case: creating a subset using multiple, chained getitem calls using views\n # still needs to guarantee proper CoW behaviour\n dtype_backend, DataFrame, Series = backend\n df = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}\n )\n df_orig = df.copy()\n\n # modify subset -> don't modify parent\n subset = df[:]["a"][0:2]\n df._clear_item_cache()\n with tm.assert_cow_warning(warn_copy_on_write):\n subset.iloc[0] = 0\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n else:\n assert df.iloc[0, 0] == 0\n\n # modify parent -> don't modify subset\n subset = df[:]["a"][0:2]\n df._clear_item_cache()\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = 0\n expected = Series([1, 2], name="a")\n if using_copy_on_write:\n tm.assert_series_equal(subset, expected)\n else:\n assert subset.iloc[0] == 0\n\n\n@pytest.mark.parametrize(\n "method",\n [\n lambda s: s["a":"c"]["a":"b"], # type: ignore[misc]\n lambda s: s.iloc[0:3].iloc[0:2],\n lambda s: s.loc["a":"c"].loc["a":"b"], # type: ignore[misc]\n lambda s: s.loc["a":"c"] # type: ignore[misc]\n .iloc[0:3]\n .iloc[0:2]\n .loc["a":"b"] # type: ignore[misc]\n .iloc[0:1],\n ],\n ids=["getitem", "iloc", "loc", "long-chain"],\n)\ndef test_subset_chained_getitem_series(\n backend, method, using_copy_on_write, warn_copy_on_write\n):\n # Case: creating a subset using multiple, chained getitem calls using views\n # still needs to guarantee proper CoW behaviour\n _, _, Series = backend\n s = Series([1, 2, 3], index=["a", "b", "c"])\n s_orig = s.copy()\n\n # modify subset -> don't modify parent\n subset = method(s)\n with tm.assert_cow_warning(warn_copy_on_write):\n subset.iloc[0] = 0\n if using_copy_on_write:\n tm.assert_series_equal(s, s_orig)\n else:\n assert s.iloc[0] == 0\n\n # modify parent -> don't modify subset\n subset = s.iloc[0:3].iloc[0:2]\n with tm.assert_cow_warning(warn_copy_on_write):\n s.iloc[0] = 0\n expected = Series([1, 2], index=["a", "b"])\n if using_copy_on_write:\n tm.assert_series_equal(subset, expected)\n else:\n assert subset.iloc[0] == 0\n\n\ndef test_subset_chained_single_block_row(\n using_copy_on_write, using_array_manager, warn_copy_on_write\n):\n # not parametrizing this for dtype backend, since this explicitly tests single block\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})\n df_orig = df.copy()\n\n # modify subset -> don't modify parent\n subset = df[:].iloc[0].iloc[0:2]\n with tm.assert_cow_warning(warn_copy_on_write):\n subset.iloc[0] = 0\n if using_copy_on_write or using_array_manager:\n tm.assert_frame_equal(df, df_orig)\n else:\n assert df.iloc[0, 0] == 0\n\n # modify parent -> don't modify subset\n subset = df[:].iloc[0].iloc[0:2]\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = 0\n expected = Series([1, 4], index=["a", "b"], name=0)\n if using_copy_on_write or using_array_manager:\n tm.assert_series_equal(subset, expected)\n else:\n assert subset.iloc[0] == 0\n\n\n@pytest.mark.parametrize(\n "method",\n [\n lambda df: df[:],\n lambda df: df.loc[:, :],\n lambda df: df.loc[:],\n lambda df: df.iloc[:, :],\n lambda df: df.iloc[:],\n ],\n ids=["getitem", "loc", "loc-rows", "iloc", "iloc-rows"],\n)\ndef test_null_slice(backend, method, using_copy_on_write, warn_copy_on_write):\n # Case: also all variants of indexing with a null slice (:) should return\n # new objects to ensure we correctly use CoW for the results\n dtype_backend, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})\n df_orig = df.copy()\n\n df2 = method(df)\n\n # we always return new objects (shallow copy), regardless of CoW or not\n assert df2 is not df\n\n # and those trigger CoW when mutated\n with tm.assert_cow_warning(warn_copy_on_write):\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n else:\n assert df.iloc[0, 0] == 0\n\n\n@pytest.mark.parametrize(\n "method",\n [\n lambda s: s[:],\n lambda s: s.loc[:],\n lambda s: s.iloc[:],\n ],\n ids=["getitem", "loc", "iloc"],\n)\ndef test_null_slice_series(backend, method, using_copy_on_write, warn_copy_on_write):\n _, _, Series = backend\n s = Series([1, 2, 3], index=["a", "b", "c"])\n s_orig = s.copy()\n\n s2 = method(s)\n\n # we always return new objects, regardless of CoW or not\n assert s2 is not s\n\n # and those trigger CoW when mutated\n with tm.assert_cow_warning(warn_copy_on_write):\n s2.iloc[0] = 0\n if using_copy_on_write:\n tm.assert_series_equal(s, s_orig)\n else:\n assert s.iloc[0] == 0\n\n\n# TODO add more tests modifying the parent\n\n\n# -----------------------------------------------------------------------------\n# Series -- Indexing operations taking subset + modifying the subset/parent\n\n\ndef test_series_getitem_slice(backend, using_copy_on_write, warn_copy_on_write):\n # Case: taking a slice of a Series + afterwards modifying the subset\n _, _, Series = backend\n s = Series([1, 2, 3], index=["a", "b", "c"])\n s_orig = s.copy()\n\n subset = s[:]\n assert np.shares_memory(get_array(subset), get_array(s))\n\n with tm.assert_cow_warning(warn_copy_on_write):\n subset.iloc[0] = 0\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(subset), get_array(s))\n\n expected = Series([0, 2, 3], index=["a", "b", "c"])\n tm.assert_series_equal(subset, expected)\n\n if using_copy_on_write:\n # original parent series is not modified (CoW)\n tm.assert_series_equal(s, s_orig)\n else:\n # original parent series is actually updated\n assert s.iloc[0] == 0\n\n\ndef test_series_getitem_ellipsis(using_copy_on_write, warn_copy_on_write):\n # Case: taking a view of a Series using Ellipsis + afterwards modifying the subset\n s = Series([1, 2, 3])\n s_orig = s.copy()\n\n subset = s[...]\n assert np.shares_memory(get_array(subset), get_array(s))\n\n with tm.assert_cow_warning(warn_copy_on_write):\n subset.iloc[0] = 0\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(subset), get_array(s))\n\n expected = Series([0, 2, 3])\n tm.assert_series_equal(subset, expected)\n\n if using_copy_on_write:\n # original parent series is not modified (CoW)\n tm.assert_series_equal(s, s_orig)\n else:\n # original parent series is actually updated\n assert s.iloc[0] == 0\n\n\n@pytest.mark.parametrize(\n "indexer",\n [slice(0, 2), np.array([True, True, False]), np.array([0, 1])],\n ids=["slice", "mask", "array"],\n)\ndef test_series_subset_set_with_indexer(\n backend, indexer_si, indexer, using_copy_on_write, warn_copy_on_write\n):\n # Case: setting values in a viewing Series with an indexer\n _, _, Series = backend\n s = Series([1, 2, 3], index=["a", "b", "c"])\n s_orig = s.copy()\n subset = s[:]\n\n warn = None\n msg = "Series.__setitem__ treating keys as positions is deprecated"\n if (\n indexer_si is tm.setitem\n and isinstance(indexer, np.ndarray)\n and indexer.dtype.kind == "i"\n ):\n warn = FutureWarning\n if warn_copy_on_write:\n with tm.assert_cow_warning(raise_on_extra_warnings=warn is not None):\n indexer_si(subset)[indexer] = 0\n else:\n with tm.assert_produces_warning(warn, match=msg):\n indexer_si(subset)[indexer] = 0\n expected = Series([0, 0, 3], index=["a", "b", "c"])\n tm.assert_series_equal(subset, expected)\n\n if using_copy_on_write:\n tm.assert_series_equal(s, s_orig)\n else:\n tm.assert_series_equal(s, expected)\n\n\n# -----------------------------------------------------------------------------\n# del operator\n\n\ndef test_del_frame(backend, using_copy_on_write, warn_copy_on_write):\n # Case: deleting a column with `del` on a viewing child dataframe should\n # not modify parent + update the references\n dtype_backend, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df[:]\n\n assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n del df2["b"]\n\n assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n tm.assert_frame_equal(df, df_orig)\n tm.assert_frame_equal(df2, df_orig[["a", "c"]])\n df2._mgr._verify_integrity()\n\n with tm.assert_cow_warning(warn_copy_on_write and dtype_backend == "numpy"):\n df.loc[0, "b"] = 200\n assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n df_orig = df.copy()\n\n with tm.assert_cow_warning(warn_copy_on_write):\n df2.loc[0, "a"] = 100\n if using_copy_on_write:\n # modifying child after deleting a column still doesn't update parent\n tm.assert_frame_equal(df, df_orig)\n else:\n assert df.loc[0, "a"] == 100\n\n\ndef test_del_series(backend):\n _, _, Series = backend\n s = Series([1, 2, 3], index=["a", "b", "c"])\n s_orig = s.copy()\n s2 = s[:]\n\n assert np.shares_memory(get_array(s), get_array(s2))\n\n del s2["a"]\n\n assert not np.shares_memory(get_array(s), get_array(s2))\n tm.assert_series_equal(s, s_orig)\n tm.assert_series_equal(s2, s_orig[["b", "c"]])\n\n # modifying s2 doesn't need copy on write (due to `del`, s2 is backed by new array)\n values = s2.values\n s2.loc["b"] = 100\n assert values[0] == 100\n\n\n# -----------------------------------------------------------------------------\n# Accessing column as Series\n\n\ndef test_column_as_series(\n backend, using_copy_on_write, warn_copy_on_write, using_array_manager\n):\n # Case: selecting a single column now also uses Copy-on-Write\n dtype_backend, DataFrame, Series = backend\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n\n s = df["a"]\n\n assert np.shares_memory(get_array(s, "a"), get_array(df, "a"))\n\n if using_copy_on_write or using_array_manager:\n s[0] = 0\n else:\n if warn_copy_on_write:\n with tm.assert_cow_warning():\n s[0] = 0\n else:\n warn = SettingWithCopyWarning if dtype_backend == "numpy" else None\n with pd.option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(warn):\n s[0] = 0\n\n expected = Series([0, 2, 3], name="a")\n tm.assert_series_equal(s, expected)\n if using_copy_on_write:\n # assert not np.shares_memory(s.values, get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n # ensure cached series on getitem is not the changed series\n tm.assert_series_equal(df["a"], df_orig["a"])\n else:\n df_orig.iloc[0, 0] = 0\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_column_as_series_set_with_upcast(\n backend, using_copy_on_write, using_array_manager, warn_copy_on_write\n):\n # Case: selecting a single column now also uses Copy-on-Write -> when\n # setting a value causes an upcast, we don't need to update the parent\n # DataFrame through the cache mechanism\n dtype_backend, DataFrame, Series = backend\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n\n s = df["a"]\n if dtype_backend == "nullable":\n with tm.assert_cow_warning(warn_copy_on_write):\n with pytest.raises(TypeError, match="Invalid value"):\n s[0] = "foo"\n expected = Series([1, 2, 3], name="a")\n elif using_copy_on_write or warn_copy_on_write or using_array_manager:\n # TODO(CoW-warn) assert the FutureWarning for CoW is also raised\n with tm.assert_produces_warning(FutureWarning, match="incompatible dtype"):\n s[0] = "foo"\n expected = Series(["foo", 2, 3], dtype=object, name="a")\n else:\n with pd.option_context("chained_assignment", "warn"):\n msg = "|".join(\n [\n "A value is trying to be set on a copy of a slice from a DataFrame",\n "Setting an item of incompatible dtype is deprecated",\n ]\n )\n with tm.assert_produces_warning(\n (SettingWithCopyWarning, FutureWarning), match=msg\n ):\n s[0] = "foo"\n expected = Series(["foo", 2, 3], dtype=object, name="a")\n\n tm.assert_series_equal(s, expected)\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n # ensure cached series on getitem is not the changed series\n tm.assert_series_equal(df["a"], df_orig["a"])\n else:\n df_orig["a"] = expected\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "method",\n [\n lambda df: df["a"],\n lambda df: df.loc[:, "a"],\n lambda df: df.iloc[:, 0],\n ],\n ids=["getitem", "loc", "iloc"],\n)\ndef test_column_as_series_no_item_cache(\n request,\n backend,\n method,\n using_copy_on_write,\n warn_copy_on_write,\n using_array_manager,\n):\n # Case: selecting a single column (which now also uses Copy-on-Write to protect\n # the view) should always give a new object (i.e. not make use of a cache)\n dtype_backend, DataFrame, _ = backend\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n\n s1 = method(df)\n s2 = method(df)\n\n is_iloc = "iloc" in request.node.name\n if using_copy_on_write or warn_copy_on_write or is_iloc:\n assert s1 is not s2\n else:\n assert s1 is s2\n\n if using_copy_on_write or using_array_manager:\n s1.iloc[0] = 0\n elif warn_copy_on_write:\n with tm.assert_cow_warning():\n s1.iloc[0] = 0\n else:\n warn = SettingWithCopyWarning if dtype_backend == "numpy" else None\n with pd.option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(warn):\n s1.iloc[0] = 0\n\n if using_copy_on_write:\n tm.assert_series_equal(s2, df_orig["a"])\n tm.assert_frame_equal(df, df_orig)\n else:\n assert s2.iloc[0] == 0\n\n\n# TODO add tests for other indexing methods on the Series\n\n\ndef test_dataframe_add_column_from_series(backend, using_copy_on_write):\n # Case: adding a new column to a DataFrame from an existing column/series\n # -> delays copy under CoW\n _, DataFrame, Series = backend\n df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})\n\n s = Series([10, 11, 12])\n df["new"] = s\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "new"), get_array(s))\n else:\n assert not np.shares_memory(get_array(df, "new"), get_array(s))\n\n # editing series -> doesn't modify column in frame\n s[0] = 0\n expected = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3], "new": [10, 11, 12]})\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize("val", [100, "a"])\n@pytest.mark.parametrize(\n "indexer_func, indexer",\n [\n (tm.loc, (0, "a")),\n (tm.iloc, (0, 0)),\n (tm.loc, ([0], "a")),\n (tm.iloc, ([0], 0)),\n (tm.loc, (slice(None), "a")),\n (tm.iloc, (slice(None), 0)),\n ],\n)\n@pytest.mark.parametrize(\n "col", [[0.1, 0.2, 0.3], [7, 8, 9]], ids=["mixed-block", "single-block"]\n)\ndef test_set_value_copy_only_necessary_column(\n using_copy_on_write, warn_copy_on_write, indexer_func, indexer, val, col\n):\n # When setting inplace, only copy column that is modified instead of the whole\n # block (by splitting the block)\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": col})\n df_orig = df.copy()\n view = df[:]\n\n if val == "a" and not warn_copy_on_write:\n with tm.assert_produces_warning(\n FutureWarning, match="Setting an item of incompatible dtype is deprecated"\n ):\n indexer_func(df)[indexer] = val\n if val == "a" and warn_copy_on_write:\n with tm.assert_produces_warning(\n FutureWarning, match="incompatible dtype|Setting a value on a view"\n ):\n indexer_func(df)[indexer] = val\n else:\n with tm.assert_cow_warning(warn_copy_on_write and val == 100):\n indexer_func(df)[indexer] = val\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "b"), get_array(view, "b"))\n assert not np.shares_memory(get_array(df, "a"), get_array(view, "a"))\n tm.assert_frame_equal(view, df_orig)\n else:\n assert np.shares_memory(get_array(df, "c"), get_array(view, "c"))\n if val == "a":\n assert not np.shares_memory(get_array(df, "a"), get_array(view, "a"))\n else:\n assert np.shares_memory(get_array(df, "a"), get_array(view, "a"))\n\n\ndef test_series_midx_slice(using_copy_on_write, warn_copy_on_write):\n ser = Series([1, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]]))\n ser_orig = ser.copy()\n result = ser[1]\n assert np.shares_memory(get_array(ser), get_array(result))\n with tm.assert_cow_warning(warn_copy_on_write):\n result.iloc[0] = 100\n if using_copy_on_write:\n tm.assert_series_equal(ser, ser_orig)\n else:\n expected = Series(\n [100, 2, 3], index=pd.MultiIndex.from_arrays([[1, 1, 2], [3, 4, 5]])\n )\n tm.assert_series_equal(ser, expected)\n\n\ndef test_getitem_midx_slice(\n using_copy_on_write, warn_copy_on_write, using_array_manager\n):\n df = DataFrame({("a", "x"): [1, 2], ("a", "y"): 1, ("b", "x"): 2})\n df_orig = df.copy()\n new_df = df[("a",)]\n\n if using_copy_on_write:\n assert not new_df._mgr._has_no_reference(0)\n\n if not using_array_manager:\n assert np.shares_memory(get_array(df, ("a", "x")), get_array(new_df, "x"))\n if using_copy_on_write:\n new_df.iloc[0, 0] = 100\n tm.assert_frame_equal(df_orig, df)\n else:\n if warn_copy_on_write:\n with tm.assert_cow_warning():\n new_df.iloc[0, 0] = 100\n else:\n with pd.option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(SettingWithCopyWarning):\n new_df.iloc[0, 0] = 100\n assert df.iloc[0, 0] == 100\n\n\ndef test_series_midx_tuples_slice(using_copy_on_write, warn_copy_on_write):\n ser = Series(\n [1, 2, 3],\n index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]),\n )\n result = ser[(1, 2)]\n assert np.shares_memory(get_array(ser), get_array(result))\n with tm.assert_cow_warning(warn_copy_on_write):\n result.iloc[0] = 100\n if using_copy_on_write:\n expected = Series(\n [1, 2, 3],\n index=pd.MultiIndex.from_tuples([((1, 2), 3), ((1, 2), 4), ((2, 3), 4)]),\n )\n tm.assert_series_equal(ser, expected)\n\n\ndef test_midx_read_only_bool_indexer():\n # GH#56635\n def mklbl(prefix, n):\n return [f"{prefix}{i}" for i in range(n)]\n\n idx = pd.MultiIndex.from_product(\n [mklbl("A", 4), mklbl("B", 2), mklbl("C", 4), mklbl("D", 2)]\n )\n cols = pd.MultiIndex.from_tuples(\n [("a", "foo"), ("a", "bar"), ("b", "foo"), ("b", "bah")], names=["lvl0", "lvl1"]\n )\n df = DataFrame(1, index=idx, columns=cols).sort_index().sort_index(axis=1)\n\n mask = df[("a", "foo")] == 1\n expected_mask = mask.copy()\n result = df.loc[pd.IndexSlice[mask, :, ["C1", "C3"]], :]\n expected = df.loc[pd.IndexSlice[:, :, ["C1", "C3"]], :]\n tm.assert_frame_equal(result, expected)\n tm.assert_series_equal(mask, expected_mask)\n\n\ndef test_loc_enlarging_with_dataframe(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3]})\n rhs = DataFrame({"b": [1, 2, 3], "c": [4, 5, 6]})\n rhs_orig = rhs.copy()\n df.loc[:, ["b", "c"]] = rhs\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "b"), get_array(rhs, "b"))\n assert np.shares_memory(get_array(df, "c"), get_array(rhs, "c"))\n assert not df._mgr._has_no_reference(1)\n else:\n assert not np.shares_memory(get_array(df, "b"), get_array(rhs, "b"))\n\n df.iloc[0, 1] = 100\n tm.assert_frame_equal(rhs, rhs_orig)\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_indexing.py | test_indexing.py | Python | 42,980 | 0.95 | 0.098736 | 0.128372 | react-lib | 3 | 2024-05-28T00:56:25.715968 | BSD-3-Clause | true | cd293f4494287feba6eb03a27c932e19 |
import numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n\n@td.skip_array_manager_invalid_test\ndef test_consolidate(using_copy_on_write):\n # create unconsolidated DataFrame\n df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})\n df["c"] = [4, 5, 6]\n\n # take a viewing subset\n subset = df[:]\n\n # each block of subset references a block of df\n assert all(blk.refs.has_reference() for blk in subset._mgr.blocks)\n\n # consolidate the two int64 blocks\n subset._consolidate_inplace()\n\n # the float64 block still references the parent one because it still a view\n assert subset._mgr.blocks[0].refs.has_reference()\n # equivalent of assert np.shares_memory(df["b"].values, subset["b"].values)\n # but avoids caching df["b"]\n assert np.shares_memory(get_array(df, "b"), get_array(subset, "b"))\n\n # the new consolidated int64 block does not reference another\n assert not subset._mgr.blocks[1].refs.has_reference()\n\n # the parent dataframe now also only is linked for the float column\n assert not df._mgr.blocks[0].refs.has_reference()\n assert df._mgr.blocks[1].refs.has_reference()\n assert not df._mgr.blocks[2].refs.has_reference()\n\n # and modifying subset still doesn't modify parent\n if using_copy_on_write:\n subset.iloc[0, 1] = 0.0\n assert not df._mgr.blocks[1].refs.has_reference()\n assert df.loc[0, "b"] == 0.1\n\n\n@pytest.mark.single_cpu\n@td.skip_array_manager_invalid_test\ndef test_switch_options():\n # ensure we can switch the value of the option within one session\n # (assuming data is constructed after switching)\n\n # using the option_context to ensure we set back to global option value\n # after running the test\n with pd.option_context("mode.copy_on_write", False):\n df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})\n subset = df[:]\n subset.iloc[0, 0] = 0\n # df updated with CoW disabled\n assert df.iloc[0, 0] == 0\n\n pd.options.mode.copy_on_write = True\n df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})\n subset = df[:]\n subset.iloc[0, 0] = 0\n # df not updated with CoW enabled\n assert df.iloc[0, 0] == 1\n\n pd.options.mode.copy_on_write = False\n df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})\n subset = df[:]\n subset.iloc[0, 0] = 0\n # df updated with CoW disabled\n assert df.iloc[0, 0] == 0\n\n\n@td.skip_array_manager_invalid_test\n@pytest.mark.parametrize("dtype", [np.intp, np.int8])\n@pytest.mark.parametrize(\n "locs, arr",\n [\n ([0], np.array([-1, -2, -3])),\n ([1], np.array([-1, -2, -3])),\n ([5], np.array([-1, -2, -3])),\n ([0, 1], np.array([[-1, -2, -3], [-4, -5, -6]]).T),\n ([0, 2], np.array([[-1, -2, -3], [-4, -5, -6]]).T),\n ([0, 1, 2], np.array([[-1, -2, -3], [-4, -5, -6], [-4, -5, -6]]).T),\n ([1, 2], np.array([[-1, -2, -3], [-4, -5, -6]]).T),\n ([1, 3], np.array([[-1, -2, -3], [-4, -5, -6]]).T),\n ([1, 3], np.array([[-1, -2, -3], [-4, -5, -6]]).T),\n ],\n)\ndef test_iset_splits_blocks_inplace(using_copy_on_write, locs, arr, dtype):\n # Nothing currently calls iset with\n # more than 1 loc with inplace=True (only happens with inplace=False)\n # but ensure that it works\n df = DataFrame(\n {\n "a": [1, 2, 3],\n "b": [4, 5, 6],\n "c": [7, 8, 9],\n "d": [10, 11, 12],\n "e": [13, 14, 15],\n "f": Series(["a", "b", "c"], dtype=object),\n },\n )\n arr = arr.astype(dtype)\n df_orig = df.copy()\n df2 = df.copy(deep=None) # Trigger a CoW (if enabled, otherwise makes copy)\n df2._mgr.iset(locs, arr, inplace=True)\n\n tm.assert_frame_equal(df, df_orig)\n\n if using_copy_on_write:\n for i, col in enumerate(df.columns):\n if i not in locs:\n assert np.shares_memory(get_array(df, col), get_array(df2, col))\n else:\n for col in df.columns:\n assert not np.shares_memory(get_array(df, col), get_array(df2, col))\n\n\ndef test_exponential_backoff():\n # GH#55518\n df = DataFrame({"a": [1, 2, 3]})\n for i in range(490):\n df.copy(deep=False)\n\n assert len(df._mgr.blocks[0].refs.referenced_blocks) == 491\n\n df = DataFrame({"a": [1, 2, 3]})\n dfs = [df.copy(deep=False) for i in range(510)]\n\n for i in range(20):\n df.copy(deep=False)\n assert len(df._mgr.blocks[0].refs.referenced_blocks) == 531\n assert df._mgr.blocks[0].refs.clear_counter == 1000\n\n for i in range(500):\n df.copy(deep=False)\n\n # Don't reduce since we still have over 500 objects alive\n assert df._mgr.blocks[0].refs.clear_counter == 1000\n\n dfs = dfs[:300]\n for i in range(500):\n df.copy(deep=False)\n\n # Reduce since there are less than 500 objects alive\n assert df._mgr.blocks[0].refs.clear_counter == 500\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_internals.py | test_internals.py | Python | 5,063 | 0.95 | 0.116883 | 0.184 | react-lib | 606 | 2024-09-14T08:47:18.018312 | BSD-3-Clause | true | 946badd3f6e17c4d946f6929a3da1ed3 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n NA,\n ArrowDtype,\n DataFrame,\n Interval,\n NaT,\n Series,\n Timestamp,\n interval_range,\n option_context,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n\n@pytest.mark.parametrize("method", ["pad", "nearest", "linear"])\ndef test_interpolate_no_op(using_copy_on_write, method):\n df = DataFrame({"a": [1, 2]})\n df_orig = df.copy()\n\n warn = None\n if method == "pad":\n warn = FutureWarning\n msg = "DataFrame.interpolate with method=pad is deprecated"\n with tm.assert_produces_warning(warn, match=msg):\n result = df.interpolate(method=method)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n\n result.iloc[0, 0] = 100\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize("func", ["ffill", "bfill"])\ndef test_interp_fill_functions(using_copy_on_write, func):\n # Check that these takes the same code paths as interpolate\n df = DataFrame({"a": [1, 2]})\n df_orig = df.copy()\n\n result = getattr(df, func)()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n\n result.iloc[0, 0] = 100\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize("func", ["ffill", "bfill"])\n@pytest.mark.parametrize(\n "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]\n)\ndef test_interpolate_triggers_copy(using_copy_on_write, vals, func):\n df = DataFrame({"a": vals})\n result = getattr(df, func)()\n\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n if using_copy_on_write:\n # Check that we don't have references when triggering a copy\n assert result._mgr._has_no_reference(0)\n\n\n@pytest.mark.parametrize(\n "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]\n)\ndef test_interpolate_inplace_no_reference_no_copy(using_copy_on_write, vals):\n df = DataFrame({"a": vals})\n arr = get_array(df, "a")\n df.interpolate(method="linear", inplace=True)\n\n assert np.shares_memory(arr, get_array(df, "a"))\n if using_copy_on_write:\n # Check that we don't have references when triggering a copy\n assert df._mgr._has_no_reference(0)\n\n\n@pytest.mark.parametrize(\n "vals", [[1, np.nan, 2], [Timestamp("2019-12-31"), NaT, Timestamp("2020-12-31")]]\n)\ndef test_interpolate_inplace_with_refs(using_copy_on_write, vals, warn_copy_on_write):\n df = DataFrame({"a": [1, np.nan, 2]})\n df_orig = df.copy()\n arr = get_array(df, "a")\n view = df[:]\n with tm.assert_cow_warning(warn_copy_on_write):\n df.interpolate(method="linear", inplace=True)\n\n if using_copy_on_write:\n # Check that copy was triggered in interpolate and that we don't\n # have any references left\n assert not np.shares_memory(arr, get_array(df, "a"))\n tm.assert_frame_equal(df_orig, view)\n assert df._mgr._has_no_reference(0)\n assert view._mgr._has_no_reference(0)\n else:\n assert np.shares_memory(arr, get_array(df, "a"))\n\n\n@pytest.mark.parametrize("func", ["ffill", "bfill"])\n@pytest.mark.parametrize("dtype", ["float64", "Float64"])\ndef test_interp_fill_functions_inplace(\n using_copy_on_write, func, warn_copy_on_write, dtype\n):\n # Check that these takes the same code paths as interpolate\n df = DataFrame({"a": [1, np.nan, 2]}, dtype=dtype)\n df_orig = df.copy()\n arr = get_array(df, "a")\n view = df[:]\n\n with tm.assert_cow_warning(warn_copy_on_write and dtype == "float64"):\n getattr(df, func)(inplace=True)\n\n if using_copy_on_write:\n # Check that copy was triggered in interpolate and that we don't\n # have any references left\n assert not np.shares_memory(arr, get_array(df, "a"))\n tm.assert_frame_equal(df_orig, view)\n assert df._mgr._has_no_reference(0)\n assert view._mgr._has_no_reference(0)\n else:\n assert np.shares_memory(arr, get_array(df, "a")) is (dtype == "float64")\n\n\ndef test_interpolate_cannot_with_object_dtype(using_copy_on_write):\n df = DataFrame({"a": ["a", np.nan, "c"], "b": 1})\n df["a"] = df["a"].astype(object)\n df_orig = df.copy()\n\n msg = "DataFrame.interpolate with object dtype"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = df.interpolate(method="linear")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n\n result.iloc[0, 0] = Timestamp("2021-12-31")\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_interpolate_object_convert_no_op(using_copy_on_write, using_infer_string):\n df = DataFrame({"a": ["a", "b", "c"], "b": 1})\n df["a"] = df["a"].astype(object)\n arr_a = get_array(df, "a")\n msg = "DataFrame.interpolate with method=pad is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.interpolate(method="pad", inplace=True)\n\n # Now CoW makes a copy, it should not!\n if using_copy_on_write and not using_infer_string:\n assert df._mgr._has_no_reference(0)\n assert np.shares_memory(arr_a, get_array(df, "a"))\n\n\ndef test_interpolate_object_convert_copies(using_copy_on_write):\n df = DataFrame({"a": Series([1, 2], dtype=object), "b": 1})\n arr_a = get_array(df, "a")\n msg = "DataFrame.interpolate with method=pad is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.interpolate(method="pad", inplace=True)\n\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n assert not np.shares_memory(arr_a, get_array(df, "a"))\n\n\ndef test_interpolate_downcast(using_copy_on_write):\n df = DataFrame({"a": [1, np.nan, 2.5], "b": 1})\n arr_a = get_array(df, "a")\n msg = "DataFrame.interpolate with method=pad is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.interpolate(method="pad", inplace=True, downcast="infer")\n\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n assert np.shares_memory(arr_a, get_array(df, "a"))\n\n\ndef test_interpolate_downcast_reference_triggers_copy(using_copy_on_write):\n df = DataFrame({"a": [1, np.nan, 2.5], "b": 1})\n df_orig = df.copy()\n arr_a = get_array(df, "a")\n view = df[:]\n msg = "DataFrame.interpolate with method=pad is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.interpolate(method="pad", inplace=True, downcast="infer")\n\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n assert not np.shares_memory(arr_a, get_array(df, "a"))\n tm.assert_frame_equal(df_orig, view)\n else:\n tm.assert_frame_equal(df, view)\n\n\ndef test_fillna(using_copy_on_write):\n df = DataFrame({"a": [1.5, np.nan], "b": 1})\n df_orig = df.copy()\n\n df2 = df.fillna(5.5)\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n else:\n assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n\n df2.iloc[0, 1] = 100\n tm.assert_frame_equal(df_orig, df)\n\n\ndef test_fillna_dict(using_copy_on_write):\n df = DataFrame({"a": [1.5, np.nan], "b": 1})\n df_orig = df.copy()\n\n df2 = df.fillna({"a": 100.5})\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n else:\n assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n\n df2.iloc[0, 1] = 100\n tm.assert_frame_equal(df_orig, df)\n\n\n@pytest.mark.parametrize("downcast", [None, False])\ndef test_fillna_inplace(using_copy_on_write, downcast):\n df = DataFrame({"a": [1.5, np.nan], "b": 1})\n arr_a = get_array(df, "a")\n arr_b = get_array(df, "b")\n\n msg = "The 'downcast' keyword in fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.fillna(5.5, inplace=True, downcast=downcast)\n assert np.shares_memory(get_array(df, "a"), arr_a)\n assert np.shares_memory(get_array(df, "b"), arr_b)\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n assert df._mgr._has_no_reference(1)\n\n\ndef test_fillna_inplace_reference(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1.5, np.nan], "b": 1})\n df_orig = df.copy()\n arr_a = get_array(df, "a")\n arr_b = get_array(df, "b")\n view = df[:]\n\n with tm.assert_cow_warning(warn_copy_on_write):\n df.fillna(5.5, inplace=True)\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "a"), arr_a)\n assert np.shares_memory(get_array(df, "b"), arr_b)\n assert view._mgr._has_no_reference(0)\n assert df._mgr._has_no_reference(0)\n tm.assert_frame_equal(view, df_orig)\n else:\n assert np.shares_memory(get_array(df, "a"), arr_a)\n assert np.shares_memory(get_array(df, "b"), arr_b)\n expected = DataFrame({"a": [1.5, 5.5], "b": 1})\n tm.assert_frame_equal(df, expected)\n\n\ndef test_fillna_interval_inplace_reference(using_copy_on_write, warn_copy_on_write):\n # Set dtype explicitly to avoid implicit cast when setting nan\n ser = Series(\n interval_range(start=0, end=5), name="a", dtype="interval[float64, right]"\n )\n ser.iloc[1] = np.nan\n\n ser_orig = ser.copy()\n view = ser[:]\n with tm.assert_cow_warning(warn_copy_on_write):\n ser.fillna(value=Interval(left=0, right=5), inplace=True)\n\n if using_copy_on_write:\n assert not np.shares_memory(\n get_array(ser, "a").left.values, get_array(view, "a").left.values\n )\n tm.assert_series_equal(view, ser_orig)\n else:\n assert np.shares_memory(\n get_array(ser, "a").left.values, get_array(view, "a").left.values\n )\n\n\ndef test_fillna_series_empty_arg(using_copy_on_write):\n ser = Series([1, np.nan, 2])\n ser_orig = ser.copy()\n result = ser.fillna({})\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(ser), get_array(result))\n else:\n assert not np.shares_memory(get_array(ser), get_array(result))\n\n ser.iloc[0] = 100.5\n tm.assert_series_equal(ser_orig, result)\n\n\ndef test_fillna_series_empty_arg_inplace(using_copy_on_write):\n ser = Series([1, np.nan, 2])\n arr = get_array(ser)\n ser.fillna({}, inplace=True)\n\n assert np.shares_memory(get_array(ser), arr)\n if using_copy_on_write:\n assert ser._mgr._has_no_reference(0)\n\n\ndef test_fillna_ea_noop_shares_memory(\n using_copy_on_write, any_numeric_ea_and_arrow_dtype\n):\n df = DataFrame({"a": [1, NA, 3], "b": 1}, dtype=any_numeric_ea_and_arrow_dtype)\n df_orig = df.copy()\n df2 = df.fillna(100)\n\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n assert not df2._mgr._has_no_reference(1)\n elif isinstance(df.dtypes.iloc[0], ArrowDtype):\n # arrow is immutable, so no-ops do not need to copy underlying array\n assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n else:\n assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n\n tm.assert_frame_equal(df_orig, df)\n\n df2.iloc[0, 1] = 100\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n assert df2._mgr._has_no_reference(1)\n assert df._mgr._has_no_reference(1)\n tm.assert_frame_equal(df_orig, df)\n\n\ndef test_fillna_inplace_ea_noop_shares_memory(\n using_copy_on_write, warn_copy_on_write, any_numeric_ea_and_arrow_dtype\n):\n df = DataFrame({"a": [1, NA, 3], "b": 1}, dtype=any_numeric_ea_and_arrow_dtype)\n df_orig = df.copy()\n view = df[:]\n with tm.assert_cow_warning(warn_copy_on_write):\n df.fillna(100, inplace=True)\n\n if isinstance(df["a"].dtype, ArrowDtype) or using_copy_on_write:\n assert not np.shares_memory(get_array(df, "a"), get_array(view, "a"))\n else:\n # MaskedArray can actually respect inplace=True\n assert np.shares_memory(get_array(df, "a"), get_array(view, "a"))\n\n assert np.shares_memory(get_array(df, "b"), get_array(view, "b"))\n if using_copy_on_write:\n assert not df._mgr._has_no_reference(1)\n assert not view._mgr._has_no_reference(1)\n\n with tm.assert_cow_warning(\n warn_copy_on_write and "pyarrow" not in any_numeric_ea_and_arrow_dtype\n ):\n df.iloc[0, 1] = 100\n if isinstance(df["a"].dtype, ArrowDtype) or using_copy_on_write:\n tm.assert_frame_equal(df_orig, view)\n else:\n # we actually have a view\n tm.assert_frame_equal(df, view)\n\n\ndef test_fillna_chained_assignment(using_copy_on_write):\n df = DataFrame({"a": [1, np.nan, 2], "b": 1})\n df_orig = df.copy()\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n df["a"].fillna(100, inplace=True)\n tm.assert_frame_equal(df, df_orig)\n\n with tm.raises_chained_assignment_error():\n df[["a"]].fillna(100, inplace=True)\n tm.assert_frame_equal(df, df_orig)\n else:\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n df[["a"]].fillna(100, inplace=True)\n\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n df[df.a > 5].fillna(100, inplace=True)\n\n with tm.assert_produces_warning(FutureWarning, match="inplace method"):\n df["a"].fillna(100, inplace=True)\n\n\n@pytest.mark.parametrize("func", ["interpolate", "ffill", "bfill"])\ndef test_interpolate_chained_assignment(using_copy_on_write, func):\n df = DataFrame({"a": [1, np.nan, 2], "b": 1})\n df_orig = df.copy()\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n getattr(df["a"], func)(inplace=True)\n tm.assert_frame_equal(df, df_orig)\n\n with tm.raises_chained_assignment_error():\n getattr(df[["a"]], func)(inplace=True)\n tm.assert_frame_equal(df, df_orig)\n else:\n with tm.assert_produces_warning(FutureWarning, match="inplace method"):\n getattr(df["a"], func)(inplace=True)\n\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n getattr(df[["a"]], func)(inplace=True)\n\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n getattr(df[df["a"] > 1], func)(inplace=True)\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_interp_fillna.py | test_interp_fillna.py | Python | 15,299 | 0.95 | 0.117783 | 0.037901 | node-utils | 102 | 2023-08-22T00:14:51.362894 | MIT | true | b4f55b21da27ad4ef48358cf4bc2bc53 |
import numpy as np\nimport pytest\n\nfrom pandas.compat import HAS_PYARROW\nfrom pandas.errors import SettingWithCopyWarning\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n Period,\n Series,\n Timestamp,\n date_range,\n option_context,\n period_range,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n\ndef test_copy(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_copy = df.copy()\n\n # the deep copy by defaults takes a shallow copy of the Index\n assert df_copy.index is not df.index\n assert df_copy.columns is not df.columns\n assert df_copy.index.is_(df.index)\n assert df_copy.columns.is_(df.columns)\n\n # the deep copy doesn't share memory\n assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))\n if using_copy_on_write:\n assert not df_copy._mgr.blocks[0].refs.has_reference()\n assert not df_copy._mgr.blocks[1].refs.has_reference()\n\n # mutating copy doesn't mutate original\n df_copy.iloc[0, 0] = 0\n assert df.iloc[0, 0] == 1\n\n\ndef test_copy_shallow(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_copy = df.copy(deep=False)\n\n # the shallow copy also makes a shallow copy of the index\n if using_copy_on_write:\n assert df_copy.index is not df.index\n assert df_copy.columns is not df.columns\n assert df_copy.index.is_(df.index)\n assert df_copy.columns.is_(df.columns)\n else:\n assert df_copy.index is df.index\n assert df_copy.columns is df.columns\n\n # the shallow copy still shares memory\n assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))\n if using_copy_on_write:\n assert df_copy._mgr.blocks[0].refs.has_reference()\n assert df_copy._mgr.blocks[1].refs.has_reference()\n\n if using_copy_on_write:\n # mutating shallow copy doesn't mutate original\n df_copy.iloc[0, 0] = 0\n assert df.iloc[0, 0] == 1\n # mutating triggered a copy-on-write -> no longer shares memory\n assert not np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))\n # but still shares memory for the other columns/blocks\n assert np.shares_memory(get_array(df_copy, "c"), get_array(df, "c"))\n else:\n # mutating shallow copy does mutate original\n with tm.assert_cow_warning(warn_copy_on_write):\n df_copy.iloc[0, 0] = 0\n assert df.iloc[0, 0] == 0\n # and still shares memory\n assert np.shares_memory(get_array(df_copy, "a"), get_array(df, "a"))\n\n\n@pytest.mark.parametrize("copy", [True, None, False])\n@pytest.mark.parametrize(\n "method",\n [\n lambda df, copy: df.rename(columns=str.lower, copy=copy),\n lambda df, copy: df.reindex(columns=["a", "c"], copy=copy),\n lambda df, copy: df.reindex_like(df, copy=copy),\n lambda df, copy: df.align(df, copy=copy)[0],\n lambda df, copy: df.set_axis(["a", "b", "c"], axis="index", copy=copy),\n lambda df, copy: df.rename_axis(index="test", copy=copy),\n lambda df, copy: df.rename_axis(columns="test", copy=copy),\n lambda df, copy: df.astype({"b": "int64"}, copy=copy),\n # lambda df, copy: df.swaplevel(0, 0, copy=copy),\n lambda df, copy: df.swapaxes(0, 0, copy=copy),\n lambda df, copy: df.truncate(0, 5, copy=copy),\n lambda df, copy: df.infer_objects(copy=copy),\n lambda df, copy: df.to_timestamp(copy=copy),\n lambda df, copy: df.to_period(freq="D", copy=copy),\n lambda df, copy: df.tz_localize("US/Central", copy=copy),\n lambda df, copy: df.tz_convert("US/Central", copy=copy),\n lambda df, copy: df.set_flags(allows_duplicate_labels=False, copy=copy),\n ],\n ids=[\n "rename",\n "reindex",\n "reindex_like",\n "align",\n "set_axis",\n "rename_axis0",\n "rename_axis1",\n "astype",\n # "swaplevel", # only series\n "swapaxes",\n "truncate",\n "infer_objects",\n "to_timestamp",\n "to_period",\n "tz_localize",\n "tz_convert",\n "set_flags",\n ],\n)\ndef test_methods_copy_keyword(\n request, method, copy, using_copy_on_write, using_array_manager\n):\n index = None\n if "to_timestamp" in request.node.callspec.id:\n index = period_range("2012-01-01", freq="D", periods=3)\n elif "to_period" in request.node.callspec.id:\n index = date_range("2012-01-01", freq="D", periods=3)\n elif "tz_localize" in request.node.callspec.id:\n index = date_range("2012-01-01", freq="D", periods=3)\n elif "tz_convert" in request.node.callspec.id:\n index = date_range("2012-01-01", freq="D", periods=3, tz="Europe/Brussels")\n\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=index)\n\n if "swapaxes" in request.node.callspec.id:\n msg = "'DataFrame.swapaxes' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df2 = method(df, copy=copy)\n else:\n df2 = method(df, copy=copy)\n\n share_memory = using_copy_on_write or copy is False\n\n if request.node.callspec.id.startswith("reindex-"):\n # TODO copy=False without CoW still returns a copy in this case\n if not using_copy_on_write and not using_array_manager and copy is False:\n share_memory = False\n\n if share_memory:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n\n@pytest.mark.parametrize("copy", [True, None, False])\n@pytest.mark.parametrize(\n "method",\n [\n lambda ser, copy: ser.rename(index={0: 100}, copy=copy),\n lambda ser, copy: ser.rename(None, copy=copy),\n lambda ser, copy: ser.reindex(index=ser.index, copy=copy),\n lambda ser, copy: ser.reindex_like(ser, copy=copy),\n lambda ser, copy: ser.align(ser, copy=copy)[0],\n lambda ser, copy: ser.set_axis(["a", "b", "c"], axis="index", copy=copy),\n lambda ser, copy: ser.rename_axis(index="test", copy=copy),\n lambda ser, copy: ser.astype("int64", copy=copy),\n lambda ser, copy: ser.swaplevel(0, 1, copy=copy),\n lambda ser, copy: ser.swapaxes(0, 0, copy=copy),\n lambda ser, copy: ser.truncate(0, 5, copy=copy),\n lambda ser, copy: ser.infer_objects(copy=copy),\n lambda ser, copy: ser.to_timestamp(copy=copy),\n lambda ser, copy: ser.to_period(freq="D", copy=copy),\n lambda ser, copy: ser.tz_localize("US/Central", copy=copy),\n lambda ser, copy: ser.tz_convert("US/Central", copy=copy),\n lambda ser, copy: ser.set_flags(allows_duplicate_labels=False, copy=copy),\n ],\n ids=[\n "rename (dict)",\n "rename",\n "reindex",\n "reindex_like",\n "align",\n "set_axis",\n "rename_axis0",\n "astype",\n "swaplevel",\n "swapaxes",\n "truncate",\n "infer_objects",\n "to_timestamp",\n "to_period",\n "tz_localize",\n "tz_convert",\n "set_flags",\n ],\n)\ndef test_methods_series_copy_keyword(request, method, copy, using_copy_on_write):\n index = None\n if "to_timestamp" in request.node.callspec.id:\n index = period_range("2012-01-01", freq="D", periods=3)\n elif "to_period" in request.node.callspec.id:\n index = date_range("2012-01-01", freq="D", periods=3)\n elif "tz_localize" in request.node.callspec.id:\n index = date_range("2012-01-01", freq="D", periods=3)\n elif "tz_convert" in request.node.callspec.id:\n index = date_range("2012-01-01", freq="D", periods=3, tz="Europe/Brussels")\n elif "swaplevel" in request.node.callspec.id:\n index = MultiIndex.from_arrays([[1, 2, 3], [4, 5, 6]])\n\n ser = Series([1, 2, 3], index=index)\n\n if "swapaxes" in request.node.callspec.id:\n msg = "'Series.swapaxes' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n ser2 = method(ser, copy=copy)\n else:\n ser2 = method(ser, copy=copy)\n\n share_memory = using_copy_on_write or copy is False\n\n if share_memory:\n assert np.shares_memory(get_array(ser2), get_array(ser))\n else:\n assert not np.shares_memory(get_array(ser2), get_array(ser))\n\n\n@pytest.mark.parametrize("copy", [True, None, False])\ndef test_transpose_copy_keyword(using_copy_on_write, copy, using_array_manager):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n result = df.transpose(copy=copy)\n share_memory = using_copy_on_write or copy is False or copy is None\n share_memory = share_memory and not using_array_manager\n\n if share_memory:\n assert np.shares_memory(get_array(df, "a"), get_array(result, 0))\n else:\n assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))\n\n\n# -----------------------------------------------------------------------------\n# DataFrame methods returning new DataFrame using shallow copy\n\n\ndef test_reset_index(using_copy_on_write):\n # Case: resetting the index (i.e. adding a new column) + mutating the\n # resulting dataframe\n df = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]}, index=[10, 11, 12]\n )\n df_orig = df.copy()\n df2 = df.reset_index()\n df2._mgr._verify_integrity()\n\n if using_copy_on_write:\n # still shares memory (df2 is a shallow copy)\n assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n # mutating df2 triggers a copy-on-write for that column / block\n df2.iloc[0, 2] = 0\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize("index", [pd.RangeIndex(0, 2), Index([1, 2])])\ndef test_reset_index_series_drop(using_copy_on_write, index):\n ser = Series([1, 2], index=index)\n ser_orig = ser.copy()\n ser2 = ser.reset_index(drop=True)\n if using_copy_on_write:\n assert np.shares_memory(get_array(ser), get_array(ser2))\n assert not ser._mgr._has_no_reference(0)\n else:\n assert not np.shares_memory(get_array(ser), get_array(ser2))\n\n ser2.iloc[0] = 100\n tm.assert_series_equal(ser, ser_orig)\n\n\ndef test_groupby_column_index_in_references():\n df = DataFrame(\n {"A": ["a", "b", "c", "d"], "B": [1, 2, 3, 4], "C": ["a", "a", "b", "b"]}\n )\n df = df.set_index("A")\n key = df["C"]\n result = df.groupby(key, observed=True).sum()\n expected = df.groupby("C", observed=True).sum()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_rename_columns(using_copy_on_write):\n # Case: renaming columns returns a new dataframe\n # + afterwards modifying the result\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df.rename(columns=str.upper)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))\n df2.iloc[0, 0] = 0\n assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))\n expected = DataFrame({"A": [0, 2, 3], "B": [4, 5, 6], "C": [0.1, 0.2, 0.3]})\n tm.assert_frame_equal(df2, expected)\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_rename_columns_modify_parent(using_copy_on_write):\n # Case: renaming columns returns a new dataframe\n # + afterwards modifying the original (parent) dataframe\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df2 = df.rename(columns=str.upper)\n df2_orig = df2.copy()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "A"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))\n df.iloc[0, 0] = 0\n assert not np.shares_memory(get_array(df2, "A"), get_array(df, "a"))\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "C"), get_array(df, "c"))\n expected = DataFrame({"a": [0, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n tm.assert_frame_equal(df, expected)\n tm.assert_frame_equal(df2, df2_orig)\n\n\ndef test_pipe(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": 1.5})\n df_orig = df.copy()\n\n def testfunc(df):\n return df\n\n df2 = df.pipe(testfunc)\n\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n expected = DataFrame({"a": [0, 2, 3], "b": 1.5})\n tm.assert_frame_equal(df, expected)\n\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n\n\ndef test_pipe_modify_df(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": 1.5})\n df_orig = df.copy()\n\n def testfunc(df):\n df.iloc[0, 0] = 100\n return df\n\n df2 = df.pipe(testfunc)\n\n assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n expected = DataFrame({"a": [100, 2, 3], "b": 1.5})\n tm.assert_frame_equal(df, expected)\n\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n\n\ndef test_reindex_columns(using_copy_on_write):\n # Case: reindexing the column returns a new dataframe\n # + afterwards modifying the result\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df.reindex(columns=["a", "c"])\n\n if using_copy_on_write:\n # still shares memory (df2 is a shallow copy)\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n # mutating df2 triggers a copy-on-write for that column\n df2.iloc[0, 0] = 0\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "index",\n [\n lambda idx: idx,\n lambda idx: idx.view(),\n lambda idx: idx.copy(),\n lambda idx: list(idx),\n ],\n ids=["identical", "view", "copy", "values"],\n)\ndef test_reindex_rows(index, using_copy_on_write):\n # Case: reindexing the rows with an index that matches the current index\n # can use a shallow copy\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df.reindex(index=index(df.index))\n\n if using_copy_on_write:\n # still shares memory (df2 is a shallow copy)\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n # mutating df2 triggers a copy-on-write for that column\n df2.iloc[0, 0] = 0\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_drop_on_column(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df.drop(columns="a")\n df2._mgr._verify_integrity()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n else:\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n df2.iloc[0, 0] = 0\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_select_dtypes(using_copy_on_write):\n # Case: selecting columns using `select_dtypes()` returns a new dataframe\n # + afterwards modifying the result\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df.select_dtypes("int64")\n df2._mgr._verify_integrity()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column/block\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "filter_kwargs", [{"items": ["a"]}, {"like": "a"}, {"regex": "a"}]\n)\ndef test_filter(using_copy_on_write, filter_kwargs):\n # Case: selecting columns using `filter()` returns a new dataframe\n # + afterwards modifying the result\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df.filter(**filter_kwargs)\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column/block\n if using_copy_on_write:\n df2.iloc[0, 0] = 0\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_shift_no_op(using_copy_on_write):\n df = DataFrame(\n [[1, 2], [3, 4], [5, 6]],\n index=date_range("2020-01-01", "2020-01-03"),\n columns=["a", "b"],\n )\n df_orig = df.copy()\n df2 = df.shift(periods=0)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n tm.assert_frame_equal(df2, df_orig)\n\n\ndef test_shift_index(using_copy_on_write):\n df = DataFrame(\n [[1, 2], [3, 4], [5, 6]],\n index=date_range("2020-01-01", "2020-01-03"),\n columns=["a", "b"],\n )\n df2 = df.shift(periods=1, axis=0)\n\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n\ndef test_shift_rows_freq(using_copy_on_write):\n df = DataFrame(\n [[1, 2], [3, 4], [5, 6]],\n index=date_range("2020-01-01", "2020-01-03"),\n columns=["a", "b"],\n )\n df_orig = df.copy()\n df_orig.index = date_range("2020-01-02", "2020-01-04")\n df2 = df.shift(periods=1, freq="1D")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n tm.assert_frame_equal(df2, df_orig)\n\n\ndef test_shift_columns(using_copy_on_write, warn_copy_on_write):\n df = DataFrame(\n [[1, 2], [3, 4], [5, 6]], columns=date_range("2020-01-01", "2020-01-02")\n )\n df2 = df.shift(periods=1, axis=1)\n\n assert np.shares_memory(get_array(df2, "2020-01-02"), get_array(df, "2020-01-01"))\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(\n get_array(df2, "2020-01-02"), get_array(df, "2020-01-01")\n )\n expected = DataFrame(\n [[np.nan, 1], [np.nan, 3], [np.nan, 5]],\n columns=date_range("2020-01-01", "2020-01-02"),\n )\n tm.assert_frame_equal(df2, expected)\n\n\ndef test_pop(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n view_original = df[:]\n result = df.pop("a")\n\n assert np.shares_memory(result.values, get_array(view_original, "a"))\n assert np.shares_memory(get_array(df, "b"), get_array(view_original, "b"))\n\n if using_copy_on_write:\n result.iloc[0] = 0\n assert not np.shares_memory(result.values, get_array(view_original, "a"))\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "b"), get_array(view_original, "b"))\n tm.assert_frame_equal(view_original, df_orig)\n else:\n expected = DataFrame({"a": [1, 2, 3], "b": [0, 5, 6], "c": [0.1, 0.2, 0.3]})\n tm.assert_frame_equal(view_original, expected)\n\n\n@pytest.mark.parametrize(\n "func",\n [\n lambda x, y: x.align(y),\n lambda x, y: x.align(y.a, axis=0),\n lambda x, y: x.align(y.a.iloc[slice(0, 1)], axis=1),\n ],\n)\ndef test_align_frame(using_copy_on_write, func):\n df = DataFrame({"a": [1, 2, 3], "b": "a"})\n df_orig = df.copy()\n df_changed = df[["b", "a"]].copy()\n df2, _ = func(df, df_changed)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_align_series(using_copy_on_write):\n ser = Series([1, 2])\n ser_orig = ser.copy()\n ser_other = ser.copy()\n ser2, ser_other_result = ser.align(ser_other)\n\n if using_copy_on_write:\n assert np.shares_memory(ser2.values, ser.values)\n assert np.shares_memory(ser_other_result.values, ser_other.values)\n else:\n assert not np.shares_memory(ser2.values, ser.values)\n assert not np.shares_memory(ser_other_result.values, ser_other.values)\n\n ser2.iloc[0] = 0\n ser_other_result.iloc[0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(ser2.values, ser.values)\n assert not np.shares_memory(ser_other_result.values, ser_other.values)\n tm.assert_series_equal(ser, ser_orig)\n tm.assert_series_equal(ser_other, ser_orig)\n\n\ndef test_align_copy_false(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n df_orig = df.copy()\n df2, df3 = df.align(df, copy=False)\n\n assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n if using_copy_on_write:\n df2.loc[0, "a"] = 0\n tm.assert_frame_equal(df, df_orig) # Original is unchanged\n\n df3.loc[0, "a"] = 0\n tm.assert_frame_equal(df, df_orig) # Original is unchanged\n\n\ndef test_align_with_series_copy_false(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n ser = Series([1, 2, 3], name="x")\n ser_orig = ser.copy()\n df_orig = df.copy()\n df2, ser2 = df.align(ser, copy=False, axis=0)\n\n assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n assert np.shares_memory(get_array(ser, "x"), get_array(ser2, "x"))\n\n if using_copy_on_write:\n df2.loc[0, "a"] = 0\n tm.assert_frame_equal(df, df_orig) # Original is unchanged\n\n ser2.loc[0] = 0\n tm.assert_series_equal(ser, ser_orig) # Original is unchanged\n\n\ndef test_to_frame(using_copy_on_write, warn_copy_on_write):\n # Case: converting a Series to a DataFrame with to_frame\n ser = Series([1, 2, 3])\n ser_orig = ser.copy()\n\n df = ser[:].to_frame()\n\n # currently this always returns a "view"\n assert np.shares_memory(ser.values, get_array(df, 0))\n\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = 0\n\n if using_copy_on_write:\n # mutating df triggers a copy-on-write for that column\n assert not np.shares_memory(ser.values, get_array(df, 0))\n tm.assert_series_equal(ser, ser_orig)\n else:\n # but currently select_dtypes() actually returns a view -> mutates parent\n expected = ser_orig.copy()\n expected.iloc[0] = 0\n tm.assert_series_equal(ser, expected)\n\n # modify original series -> don't modify dataframe\n df = ser[:].to_frame()\n with tm.assert_cow_warning(warn_copy_on_write):\n ser.iloc[0] = 0\n\n if using_copy_on_write:\n tm.assert_frame_equal(df, ser_orig.to_frame())\n else:\n expected = ser_orig.copy().to_frame()\n expected.iloc[0, 0] = 0\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize("ax", ["index", "columns"])\ndef test_swapaxes_noop(using_copy_on_write, ax):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n df_orig = df.copy()\n msg = "'DataFrame.swapaxes' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df2 = df.swapaxes(ax, ax)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column/block\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_swapaxes_single_block(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["x", "y", "z"])\n df_orig = df.copy()\n msg = "'DataFrame.swapaxes' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df2 = df.swapaxes("index", "columns")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "x"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "x"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column/block\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "x"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_swapaxes_read_only_array():\n df = DataFrame({"a": [1, 2], "b": 3})\n msg = "'DataFrame.swapaxes' is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df = df.swapaxes(axis1="index", axis2="columns")\n df.iloc[0, 0] = 100\n expected = DataFrame({0: [100, 3], 1: [2, 3]}, index=["a", "b"])\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize(\n "method, idx",\n [\n (lambda df: df.copy(deep=False).copy(deep=False), 0),\n (lambda df: df.reset_index().reset_index(), 2),\n (lambda df: df.rename(columns=str.upper).rename(columns=str.lower), 0),\n (lambda df: df.copy(deep=False).select_dtypes(include="number"), 0),\n ],\n ids=["shallow-copy", "reset_index", "rename", "select_dtypes"],\n)\ndef test_chained_methods(request, method, idx, using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n\n # when not using CoW, only the copy() variant actually gives a view\n df2_is_view = not using_copy_on_write and request.node.callspec.id == "shallow-copy"\n\n # modify df2 -> don't modify df\n df2 = method(df)\n with tm.assert_cow_warning(warn_copy_on_write and df2_is_view):\n df2.iloc[0, idx] = 0\n if not df2_is_view:\n tm.assert_frame_equal(df, df_orig)\n\n # modify df -> don't modify df2\n df2 = method(df)\n with tm.assert_cow_warning(warn_copy_on_write and df2_is_view):\n df.iloc[0, 0] = 0\n if not df2_is_view:\n tm.assert_frame_equal(df2.iloc[:, idx:], df_orig)\n\n\n@pytest.mark.parametrize("obj", [Series([1, 2], name="a"), DataFrame({"a": [1, 2]})])\ndef test_to_timestamp(using_copy_on_write, obj):\n obj.index = Index([Period("2012-1-1", freq="D"), Period("2012-1-2", freq="D")])\n\n obj_orig = obj.copy()\n obj2 = obj.to_timestamp()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))\n else:\n assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))\n\n # mutating obj2 triggers a copy-on-write for that column / block\n obj2.iloc[0] = 0\n assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))\n tm.assert_equal(obj, obj_orig)\n\n\n@pytest.mark.parametrize("obj", [Series([1, 2], name="a"), DataFrame({"a": [1, 2]})])\ndef test_to_period(using_copy_on_write, obj):\n obj.index = Index([Timestamp("2019-12-31"), Timestamp("2020-12-31")])\n\n obj_orig = obj.copy()\n obj2 = obj.to_period(freq="Y")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))\n else:\n assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))\n\n # mutating obj2 triggers a copy-on-write for that column / block\n obj2.iloc[0] = 0\n assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))\n tm.assert_equal(obj, obj_orig)\n\n\ndef test_set_index(using_copy_on_write):\n # GH 49473\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df.set_index("a")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n else:\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n\n # mutating df2 triggers a copy-on-write for that column / block\n df2.iloc[0, 1] = 0\n assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_set_index_mutating_parent_does_not_mutate_index():\n df = DataFrame({"a": [1, 2, 3], "b": 1})\n result = df.set_index("a")\n expected = result.copy()\n\n df.iloc[0, 0] = 100\n tm.assert_frame_equal(result, expected)\n\n\ndef test_add_prefix(using_copy_on_write):\n # GH 49473\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df.add_prefix("CoW_")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "CoW_a"), get_array(df, "a"))\n df2.iloc[0, 0] = 0\n\n assert not np.shares_memory(get_array(df2, "CoW_a"), get_array(df, "a"))\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "CoW_c"), get_array(df, "c"))\n expected = DataFrame(\n {"CoW_a": [0, 2, 3], "CoW_b": [4, 5, 6], "CoW_c": [0.1, 0.2, 0.3]}\n )\n tm.assert_frame_equal(df2, expected)\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_add_suffix(using_copy_on_write):\n # GH 49473\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df.add_suffix("_CoW")\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a_CoW"), get_array(df, "a"))\n df2.iloc[0, 0] = 0\n assert not np.shares_memory(get_array(df2, "a_CoW"), get_array(df, "a"))\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "c_CoW"), get_array(df, "c"))\n expected = DataFrame(\n {"a_CoW": [0, 2, 3], "b_CoW": [4, 5, 6], "c_CoW": [0.1, 0.2, 0.3]}\n )\n tm.assert_frame_equal(df2, expected)\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize("axis, val", [(0, 5.5), (1, np.nan)])\ndef test_dropna(using_copy_on_write, axis, val):\n df = DataFrame({"a": [1, 2, 3], "b": [4, val, 6], "c": "d"})\n df_orig = df.copy()\n df2 = df.dropna(axis=axis)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize("val", [5, 5.5])\ndef test_dropna_series(using_copy_on_write, val):\n ser = Series([1, val, 4])\n ser_orig = ser.copy()\n ser2 = ser.dropna()\n\n if using_copy_on_write:\n assert np.shares_memory(ser2.values, ser.values)\n else:\n assert not np.shares_memory(ser2.values, ser.values)\n\n ser2.iloc[0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(ser2.values, ser.values)\n tm.assert_series_equal(ser, ser_orig)\n\n\n@pytest.mark.parametrize(\n "method",\n [\n lambda df: df.head(),\n lambda df: df.head(2),\n lambda df: df.tail(),\n lambda df: df.tail(3),\n ],\n)\ndef test_head_tail(method, using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = method(df)\n df2._mgr._verify_integrity()\n\n if using_copy_on_write:\n # We are explicitly deviating for CoW here to make an eager copy (avoids\n # tracking references for very cheap ops)\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n\n # modify df2 to trigger CoW for that block\n with tm.assert_cow_warning(warn_copy_on_write):\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n # without CoW enabled, head and tail return views. Mutating df2 also mutates df.\n assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n with tm.assert_cow_warning(warn_copy_on_write):\n df2.iloc[0, 0] = 1\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_infer_objects(using_copy_on_write, using_infer_string):\n df = DataFrame(\n {"a": [1, 2], "b": Series(["x", "y"], dtype=object), "c": 1, "d": "x"}\n )\n df_orig = df.copy()\n df2 = df.infer_objects()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n if using_infer_string:\n assert not tm.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n else:\n assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n\n df2.iloc[0, 0] = 0\n df2.iloc[0, 1] = "d"\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_infer_objects_no_reference(using_copy_on_write, using_infer_string):\n df = DataFrame(\n {\n "a": [1, 2],\n "b": Series(["x", "y"], dtype=object),\n "c": 1,\n "d": Series(\n [Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype="object"\n ),\n "e": Series(["z", "w"], dtype=object),\n }\n )\n df = df.infer_objects()\n\n arr_a = get_array(df, "a")\n arr_b = get_array(df, "b")\n arr_d = get_array(df, "d")\n\n df.iloc[0, 0] = 0\n df.iloc[0, 1] = "d"\n df.iloc[0, 3] = Timestamp("2018-12-31")\n if using_copy_on_write:\n assert np.shares_memory(arr_a, get_array(df, "a"))\n if using_infer_string:\n # note that the underlying memory of arr_b has been copied anyway\n # because of the assignment, but the EA is updated inplace so still\n # appears the share memory\n assert tm.shares_memory(arr_b, get_array(df, "b"))\n else:\n # TODO(CoW): Block splitting causes references here\n assert not np.shares_memory(arr_b, get_array(df, "b"))\n assert np.shares_memory(arr_d, get_array(df, "d"))\n\n\ndef test_infer_objects_reference(using_copy_on_write, using_infer_string):\n df = DataFrame(\n {\n "a": [1, 2],\n "b": Series(["x", "y"], dtype=object),\n "c": 1,\n "d": Series(\n [Timestamp("2019-12-31"), Timestamp("2020-12-31")], dtype="object"\n ),\n }\n )\n view = df[:] # noqa: F841\n df = df.infer_objects()\n\n arr_a = get_array(df, "a")\n arr_b = get_array(df, "b")\n arr_d = get_array(df, "d")\n\n df.iloc[0, 0] = 0\n df.iloc[0, 1] = "d"\n df.iloc[0, 3] = Timestamp("2018-12-31")\n if using_copy_on_write:\n assert not np.shares_memory(arr_a, get_array(df, "a"))\n if not using_infer_string or HAS_PYARROW:\n assert not np.shares_memory(arr_b, get_array(df, "b"))\n assert np.shares_memory(arr_d, get_array(df, "d"))\n\n\n@pytest.mark.parametrize(\n "kwargs",\n [\n {"before": "a", "after": "b", "axis": 1},\n {"before": 0, "after": 1, "axis": 0},\n ],\n)\ndef test_truncate(using_copy_on_write, kwargs):\n df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2})\n df_orig = df.copy()\n df2 = df.truncate(**kwargs)\n df2._mgr._verify_integrity()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize("method", ["assign", "drop_duplicates"])\ndef test_assign_drop_duplicates(using_copy_on_write, method):\n df = DataFrame({"a": [1, 2, 3]})\n df_orig = df.copy()\n df2 = getattr(df, method)()\n df2._mgr._verify_integrity()\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})])\ndef test_take(using_copy_on_write, obj):\n # Check that no copy is made when we take all rows in original order\n obj_orig = obj.copy()\n obj2 = obj.take([0, 1])\n\n if using_copy_on_write:\n assert np.shares_memory(obj2.values, obj.values)\n else:\n assert not np.shares_memory(obj2.values, obj.values)\n\n obj2.iloc[0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(obj2.values, obj.values)\n tm.assert_equal(obj, obj_orig)\n\n\n@pytest.mark.parametrize("obj", [Series([1, 2]), DataFrame({"a": [1, 2]})])\ndef test_between_time(using_copy_on_write, obj):\n obj.index = date_range("2018-04-09", periods=2, freq="1D20min")\n obj_orig = obj.copy()\n obj2 = obj.between_time("0:00", "1:00")\n\n if using_copy_on_write:\n assert np.shares_memory(obj2.values, obj.values)\n else:\n assert not np.shares_memory(obj2.values, obj.values)\n\n obj2.iloc[0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(obj2.values, obj.values)\n tm.assert_equal(obj, obj_orig)\n\n\ndef test_reindex_like(using_copy_on_write):\n df = DataFrame({"a": [1, 2], "b": "a"})\n other = DataFrame({"b": "a", "a": [1, 2]})\n\n df_orig = df.copy()\n df2 = df.reindex_like(other)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df2.iloc[0, 1] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_sort_index(using_copy_on_write):\n # GH 49473\n ser = Series([1, 2, 3])\n ser_orig = ser.copy()\n ser2 = ser.sort_index()\n\n if using_copy_on_write:\n assert np.shares_memory(ser.values, ser2.values)\n else:\n assert not np.shares_memory(ser.values, ser2.values)\n\n # mutating ser triggers a copy-on-write for the column / block\n ser2.iloc[0] = 0\n assert not np.shares_memory(ser2.values, ser.values)\n tm.assert_series_equal(ser, ser_orig)\n\n\n@pytest.mark.parametrize(\n "obj, kwargs",\n [(Series([1, 2, 3], name="a"), {}), (DataFrame({"a": [1, 2, 3]}), {"by": "a"})],\n)\ndef test_sort_values(using_copy_on_write, obj, kwargs):\n obj_orig = obj.copy()\n obj2 = obj.sort_values(**kwargs)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))\n else:\n assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))\n\n # mutating df triggers a copy-on-write for the column / block\n obj2.iloc[0] = 0\n assert not np.shares_memory(get_array(obj2, "a"), get_array(obj, "a"))\n tm.assert_equal(obj, obj_orig)\n\n\n@pytest.mark.parametrize(\n "obj, kwargs",\n [(Series([1, 2, 3], name="a"), {}), (DataFrame({"a": [1, 2, 3]}), {"by": "a"})],\n)\ndef test_sort_values_inplace(using_copy_on_write, obj, kwargs, warn_copy_on_write):\n obj_orig = obj.copy()\n view = obj[:]\n obj.sort_values(inplace=True, **kwargs)\n\n assert np.shares_memory(get_array(obj, "a"), get_array(view, "a"))\n\n # mutating obj triggers a copy-on-write for the column / block\n with tm.assert_cow_warning(warn_copy_on_write):\n obj.iloc[0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(obj, "a"), get_array(view, "a"))\n tm.assert_equal(view, obj_orig)\n else:\n assert np.shares_memory(get_array(obj, "a"), get_array(view, "a"))\n\n\n@pytest.mark.parametrize("decimals", [-1, 0, 1])\ndef test_round(using_copy_on_write, warn_copy_on_write, decimals):\n df = DataFrame({"a": [1, 2], "b": "c"})\n df_orig = df.copy()\n df2 = df.round(decimals=decimals)\n\n if using_copy_on_write:\n assert tm.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n # TODO: Make inplace by using out parameter of ndarray.round?\n if decimals >= 0:\n # Ensure lazy copy if no-op\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df2.iloc[0, 1] = "d"\n df2.iloc[0, 0] = 4\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_reorder_levels(using_copy_on_write):\n index = MultiIndex.from_tuples(\n [(1, 1), (1, 2), (2, 1), (2, 2)], names=["one", "two"]\n )\n df = DataFrame({"a": [1, 2, 3, 4]}, index=index)\n df_orig = df.copy()\n df2 = df.reorder_levels(order=["two", "one"])\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_series_reorder_levels(using_copy_on_write):\n index = MultiIndex.from_tuples(\n [(1, 1), (1, 2), (2, 1), (2, 2)], names=["one", "two"]\n )\n ser = Series([1, 2, 3, 4], index=index)\n ser_orig = ser.copy()\n ser2 = ser.reorder_levels(order=["two", "one"])\n\n if using_copy_on_write:\n assert np.shares_memory(ser2.values, ser.values)\n else:\n assert not np.shares_memory(ser2.values, ser.values)\n\n ser2.iloc[0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(ser2.values, ser.values)\n tm.assert_series_equal(ser, ser_orig)\n\n\n@pytest.mark.parametrize("obj", [Series([1, 2, 3]), DataFrame({"a": [1, 2, 3]})])\ndef test_swaplevel(using_copy_on_write, obj):\n index = MultiIndex.from_tuples([(1, 1), (1, 2), (2, 1)], names=["one", "two"])\n obj.index = index\n obj_orig = obj.copy()\n obj2 = obj.swaplevel()\n\n if using_copy_on_write:\n assert np.shares_memory(obj2.values, obj.values)\n else:\n assert not np.shares_memory(obj2.values, obj.values)\n\n obj2.iloc[0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(obj2.values, obj.values)\n tm.assert_equal(obj, obj_orig)\n\n\ndef test_frame_set_axis(using_copy_on_write):\n # GH 49473\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n df2 = df.set_axis(["a", "b", "c"], axis="index")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column / block\n df2.iloc[0, 0] = 0\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_series_set_axis(using_copy_on_write):\n # GH 49473\n ser = Series([1, 2, 3])\n ser_orig = ser.copy()\n ser2 = ser.set_axis(["a", "b", "c"], axis="index")\n\n if using_copy_on_write:\n assert np.shares_memory(ser, ser2)\n else:\n assert not np.shares_memory(ser, ser2)\n\n # mutating ser triggers a copy-on-write for the column / block\n ser2.iloc[0] = 0\n assert not np.shares_memory(ser2, ser)\n tm.assert_series_equal(ser, ser_orig)\n\n\ndef test_set_flags(using_copy_on_write, warn_copy_on_write):\n ser = Series([1, 2, 3])\n ser_orig = ser.copy()\n ser2 = ser.set_flags(allows_duplicate_labels=False)\n\n assert np.shares_memory(ser, ser2)\n\n # mutating ser triggers a copy-on-write for the column / block\n with tm.assert_cow_warning(warn_copy_on_write):\n ser2.iloc[0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(ser2, ser)\n tm.assert_series_equal(ser, ser_orig)\n else:\n assert np.shares_memory(ser2, ser)\n expected = Series([0, 2, 3])\n tm.assert_series_equal(ser, expected)\n\n\n@pytest.mark.parametrize("kwargs", [{"mapper": "test"}, {"index": "test"}])\ndef test_rename_axis(using_copy_on_write, kwargs):\n df = DataFrame({"a": [1, 2, 3, 4]}, index=Index([1, 2, 3, 4], name="a"))\n df_orig = df.copy()\n df2 = df.rename_axis(**kwargs)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n df2.iloc[0, 0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize(\n "func, tz", [("tz_convert", "Europe/Berlin"), ("tz_localize", None)]\n)\ndef test_tz_convert_localize(using_copy_on_write, func, tz):\n # GH 49473\n ser = Series(\n [1, 2], index=date_range(start="2014-08-01 09:00", freq="h", periods=2, tz=tz)\n )\n ser_orig = ser.copy()\n ser2 = getattr(ser, func)("US/Central")\n\n if using_copy_on_write:\n assert np.shares_memory(ser.values, ser2.values)\n else:\n assert not np.shares_memory(ser.values, ser2.values)\n\n # mutating ser triggers a copy-on-write for the column / block\n ser2.iloc[0] = 0\n assert not np.shares_memory(ser2.values, ser.values)\n tm.assert_series_equal(ser, ser_orig)\n\n\ndef test_droplevel(using_copy_on_write):\n # GH 49473\n index = MultiIndex.from_tuples([(1, 1), (1, 2), (2, 1)], names=["one", "two"])\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]}, index=index)\n df_orig = df.copy()\n df2 = df.droplevel(0)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "c"), get_array(df, "c"))\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column / block\n df2.iloc[0, 0] = 0\n\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "b"), get_array(df, "b"))\n\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_squeeze(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3]})\n df_orig = df.copy()\n series = df.squeeze()\n\n # Should share memory regardless of CoW since squeeze is just an iloc\n assert np.shares_memory(series.values, get_array(df, "a"))\n\n # mutating squeezed df triggers a copy-on-write for that column/block\n with tm.assert_cow_warning(warn_copy_on_write):\n series.iloc[0] = 0\n if using_copy_on_write:\n assert not np.shares_memory(series.values, get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n else:\n # Without CoW the original will be modified\n assert np.shares_memory(series.values, get_array(df, "a"))\n assert df.loc[0, "a"] == 0\n\n\ndef test_items(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})\n df_orig = df.copy()\n\n # Test this twice, since the second time, the item cache will be\n # triggered, and we want to make sure it still works then.\n for i in range(2):\n for name, ser in df.items():\n assert np.shares_memory(get_array(ser, name), get_array(df, name))\n\n # mutating df triggers a copy-on-write for that column / block\n with tm.assert_cow_warning(warn_copy_on_write):\n ser.iloc[0] = 0\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(ser, name), get_array(df, name))\n tm.assert_frame_equal(df, df_orig)\n else:\n # Original frame will be modified\n assert df.loc[0, name] == 0\n\n\n@pytest.mark.parametrize("dtype", ["int64", "Int64"])\ndef test_putmask(using_copy_on_write, dtype, warn_copy_on_write):\n df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype)\n view = df[:]\n df_orig = df.copy()\n with tm.assert_cow_warning(warn_copy_on_write):\n df[df == df] = 5\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(view, "a"), get_array(df, "a"))\n tm.assert_frame_equal(view, df_orig)\n else:\n # Without CoW the original will be modified\n assert np.shares_memory(get_array(view, "a"), get_array(df, "a"))\n assert view.iloc[0, 0] == 5\n\n\n@pytest.mark.parametrize("dtype", ["int64", "Int64"])\ndef test_putmask_no_reference(using_copy_on_write, dtype):\n df = DataFrame({"a": [1, 2], "b": 1, "c": 2}, dtype=dtype)\n arr_a = get_array(df, "a")\n df[df == df] = 5\n\n if using_copy_on_write:\n assert np.shares_memory(arr_a, get_array(df, "a"))\n\n\n@pytest.mark.parametrize("dtype", ["float64", "Float64"])\ndef test_putmask_aligns_rhs_no_reference(using_copy_on_write, dtype):\n df = DataFrame({"a": [1.5, 2], "b": 1.5}, dtype=dtype)\n arr_a = get_array(df, "a")\n df[df == df] = DataFrame({"a": [5.5, 5]})\n\n if using_copy_on_write:\n assert np.shares_memory(arr_a, get_array(df, "a"))\n\n\n@pytest.mark.parametrize(\n "val, exp, warn", [(5.5, True, FutureWarning), (5, False, None)]\n)\ndef test_putmask_dont_copy_some_blocks(\n using_copy_on_write, val, exp, warn, warn_copy_on_write\n):\n df = DataFrame({"a": [1, 2], "b": 1, "c": 1.5})\n view = df[:]\n df_orig = df.copy()\n indexer = DataFrame(\n [[True, False, False], [True, False, False]], columns=list("abc")\n )\n if warn_copy_on_write:\n with tm.assert_cow_warning():\n df[indexer] = val\n else:\n with tm.assert_produces_warning(warn, match="incompatible dtype"):\n df[indexer] = val\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(view, "a"), get_array(df, "a"))\n # TODO(CoW): Could split blocks to avoid copying the whole block\n assert np.shares_memory(get_array(view, "b"), get_array(df, "b")) is exp\n assert np.shares_memory(get_array(view, "c"), get_array(df, "c"))\n assert df._mgr._has_no_reference(1) is not exp\n assert not df._mgr._has_no_reference(2)\n tm.assert_frame_equal(view, df_orig)\n elif val == 5:\n # Without CoW the original will be modified, the other case upcasts, e.g. copy\n assert np.shares_memory(get_array(view, "a"), get_array(df, "a"))\n assert np.shares_memory(get_array(view, "c"), get_array(df, "c"))\n assert view.iloc[0, 0] == 5\n\n\n@pytest.mark.parametrize("dtype", ["int64", "Int64"])\n@pytest.mark.parametrize(\n "func",\n [\n lambda ser: ser.where(ser > 0, 10),\n lambda ser: ser.mask(ser <= 0, 10),\n ],\n)\ndef test_where_mask_noop(using_copy_on_write, dtype, func):\n ser = Series([1, 2, 3], dtype=dtype)\n ser_orig = ser.copy()\n\n result = func(ser)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(ser), get_array(result))\n else:\n assert not np.shares_memory(get_array(ser), get_array(result))\n\n result.iloc[0] = 10\n if using_copy_on_write:\n assert not np.shares_memory(get_array(ser), get_array(result))\n tm.assert_series_equal(ser, ser_orig)\n\n\n@pytest.mark.parametrize("dtype", ["int64", "Int64"])\n@pytest.mark.parametrize(\n "func",\n [\n lambda ser: ser.where(ser < 0, 10),\n lambda ser: ser.mask(ser >= 0, 10),\n ],\n)\ndef test_where_mask(using_copy_on_write, dtype, func):\n ser = Series([1, 2, 3], dtype=dtype)\n ser_orig = ser.copy()\n\n result = func(ser)\n\n assert not np.shares_memory(get_array(ser), get_array(result))\n tm.assert_series_equal(ser, ser_orig)\n\n\n@pytest.mark.parametrize("dtype, val", [("int64", 10.5), ("Int64", 10)])\n@pytest.mark.parametrize(\n "func",\n [\n lambda df, val: df.where(df < 0, val),\n lambda df, val: df.mask(df >= 0, val),\n ],\n)\ndef test_where_mask_noop_on_single_column(using_copy_on_write, dtype, val, func):\n df = DataFrame({"a": [1, 2, 3], "b": [-4, -5, -6]}, dtype=dtype)\n df_orig = df.copy()\n\n result = func(df, val)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "b"), get_array(result, "b"))\n assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n else:\n assert not np.shares_memory(get_array(df, "b"), get_array(result, "b"))\n\n result.iloc[0, 1] = 10\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "b"), get_array(result, "b"))\n tm.assert_frame_equal(df, df_orig)\n\n\n@pytest.mark.parametrize("func", ["mask", "where"])\ndef test_chained_where_mask(using_copy_on_write, func):\n df = DataFrame({"a": [1, 4, 2], "b": 1})\n df_orig = df.copy()\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n getattr(df["a"], func)(df["a"] > 2, 5, inplace=True)\n tm.assert_frame_equal(df, df_orig)\n\n with tm.raises_chained_assignment_error():\n getattr(df[["a"]], func)(df["a"] > 2, 5, inplace=True)\n tm.assert_frame_equal(df, df_orig)\n else:\n with tm.assert_produces_warning(FutureWarning, match="inplace method"):\n getattr(df["a"], func)(df["a"] > 2, 5, inplace=True)\n\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n getattr(df[["a"]], func)(df["a"] > 2, 5, inplace=True)\n\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n getattr(df[df["a"] > 1], func)(df["a"] > 2, 5, inplace=True)\n\n\ndef test_asfreq_noop(using_copy_on_write):\n df = DataFrame(\n {"a": [0.0, None, 2.0, 3.0]},\n index=date_range("1/1/2000", periods=4, freq="min"),\n )\n df_orig = df.copy()\n df2 = df.asfreq(freq="min")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n # mutating df2 triggers a copy-on-write for that column / block\n df2.iloc[0, 0] = 0\n\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_iterrows(using_copy_on_write):\n df = DataFrame({"a": 0, "b": 1}, index=[1, 2, 3])\n df_orig = df.copy()\n\n for _, sub in df.iterrows():\n sub.iloc[0] = 100\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_interpolate_creates_copy(using_copy_on_write, warn_copy_on_write):\n # GH#51126\n df = DataFrame({"a": [1.5, np.nan, 3]})\n view = df[:]\n expected = df.copy()\n\n with tm.assert_cow_warning(warn_copy_on_write):\n df.ffill(inplace=True)\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = 100.5\n\n if using_copy_on_write:\n tm.assert_frame_equal(view, expected)\n else:\n expected = DataFrame({"a": [100.5, 1.5, 3]})\n tm.assert_frame_equal(view, expected)\n\n\ndef test_isetitem(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [7, 8, 9]})\n df_orig = df.copy()\n df2 = df.copy(deep=None) # Trigger a CoW\n df2.isetitem(1, np.array([-1, -2, -3])) # This is inplace\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))\n assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n else:\n assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n df2.loc[0, "a"] = 0\n tm.assert_frame_equal(df, df_orig) # Original is unchanged\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))\n else:\n assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))\n\n\n@pytest.mark.parametrize(\n "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]\n)\ndef test_isetitem_series(using_copy_on_write, dtype):\n df = DataFrame({"a": [1, 2, 3], "b": np.array([4, 5, 6], dtype=dtype)})\n ser = Series([7, 8, 9])\n ser_orig = ser.copy()\n df.isetitem(0, ser)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "a"), get_array(ser))\n assert not df._mgr._has_no_reference(0)\n\n # mutating dataframe doesn't update series\n df.loc[0, "a"] = 0\n tm.assert_series_equal(ser, ser_orig)\n\n # mutating series doesn't update dataframe\n df = DataFrame({"a": [1, 2, 3], "b": np.array([4, 5, 6], dtype=dtype)})\n ser = Series([7, 8, 9])\n df.isetitem(0, ser)\n\n ser.loc[0] = 0\n expected = DataFrame({"a": [7, 8, 9], "b": np.array([4, 5, 6], dtype=dtype)})\n tm.assert_frame_equal(df, expected)\n\n\ndef test_isetitem_frame(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": 1, "c": 2})\n rhs = DataFrame({"a": [4, 5, 6], "b": 2})\n df.isetitem([0, 1], rhs)\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "a"), get_array(rhs, "a"))\n assert np.shares_memory(get_array(df, "b"), get_array(rhs, "b"))\n assert not df._mgr._has_no_reference(0)\n else:\n assert not np.shares_memory(get_array(df, "a"), get_array(rhs, "a"))\n assert not np.shares_memory(get_array(df, "b"), get_array(rhs, "b"))\n expected = df.copy()\n rhs.iloc[0, 0] = 100\n rhs.iloc[0, 1] = 100\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize("key", ["a", ["a"]])\ndef test_get(using_copy_on_write, warn_copy_on_write, key):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n df_orig = df.copy()\n\n result = df.get(key)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n result.iloc[0] = 0\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n tm.assert_frame_equal(df, df_orig)\n else:\n # for non-CoW it depends on whether we got a Series or DataFrame if it\n # is a view or copy or triggers a warning or not\n if warn_copy_on_write:\n warn = FutureWarning if isinstance(key, str) else None\n else:\n warn = SettingWithCopyWarning if isinstance(key, list) else None\n with option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(warn):\n result.iloc[0] = 0\n\n if isinstance(key, list):\n tm.assert_frame_equal(df, df_orig)\n else:\n assert df.iloc[0, 0] == 0\n\n\n@pytest.mark.parametrize("axis, key", [(0, 0), (1, "a")])\n@pytest.mark.parametrize(\n "dtype", ["int64", "float64"], ids=["single-block", "mixed-block"]\n)\ndef test_xs(\n using_copy_on_write, warn_copy_on_write, using_array_manager, axis, key, dtype\n):\n single_block = (dtype == "int64") and not using_array_manager\n is_view = single_block or (using_array_manager and axis == 1)\n df = DataFrame(\n {"a": [1, 2, 3], "b": [4, 5, 6], "c": np.array([7, 8, 9], dtype=dtype)}\n )\n df_orig = df.copy()\n\n result = df.xs(key, axis=axis)\n\n if axis == 1 or single_block:\n assert np.shares_memory(get_array(df, "a"), get_array(result))\n elif using_copy_on_write:\n assert result._mgr._has_no_reference(0)\n\n if using_copy_on_write or (is_view and not warn_copy_on_write):\n result.iloc[0] = 0\n elif warn_copy_on_write:\n with tm.assert_cow_warning(single_block or axis == 1):\n result.iloc[0] = 0\n else:\n with option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(SettingWithCopyWarning):\n result.iloc[0] = 0\n\n if using_copy_on_write or (not single_block and axis == 0):\n tm.assert_frame_equal(df, df_orig)\n else:\n assert df.iloc[0, 0] == 0\n\n\n@pytest.mark.parametrize("axis", [0, 1])\n@pytest.mark.parametrize("key, level", [("l1", 0), (2, 1)])\ndef test_xs_multiindex(\n using_copy_on_write, warn_copy_on_write, using_array_manager, key, level, axis\n):\n arr = np.arange(18).reshape(6, 3)\n index = MultiIndex.from_product([["l1", "l2"], [1, 2, 3]], names=["lev1", "lev2"])\n df = DataFrame(arr, index=index, columns=list("abc"))\n if axis == 1:\n df = df.transpose().copy()\n df_orig = df.copy()\n\n result = df.xs(key, level=level, axis=axis)\n\n if level == 0:\n assert np.shares_memory(\n get_array(df, df.columns[0]), get_array(result, result.columns[0])\n )\n\n if warn_copy_on_write:\n warn = FutureWarning if level == 0 else None\n elif not using_copy_on_write and not using_array_manager:\n warn = SettingWithCopyWarning\n else:\n warn = None\n with option_context("chained_assignment", "warn"):\n with tm.assert_produces_warning(warn):\n result.iloc[0, 0] = 0\n\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_update_frame(using_copy_on_write, warn_copy_on_write):\n df1 = DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 5.0, 6.0]})\n df2 = DataFrame({"b": [100.0]}, index=[1])\n df1_orig = df1.copy()\n view = df1[:]\n\n # TODO(CoW) better warning message?\n with tm.assert_cow_warning(warn_copy_on_write):\n df1.update(df2)\n\n expected = DataFrame({"a": [1.0, 2.0, 3.0], "b": [4.0, 100.0, 6.0]})\n tm.assert_frame_equal(df1, expected)\n if using_copy_on_write:\n # df1 is updated, but its view not\n tm.assert_frame_equal(view, df1_orig)\n assert np.shares_memory(get_array(df1, "a"), get_array(view, "a"))\n assert not np.shares_memory(get_array(df1, "b"), get_array(view, "b"))\n else:\n tm.assert_frame_equal(view, expected)\n\n\ndef test_update_series(using_copy_on_write, warn_copy_on_write):\n ser1 = Series([1.0, 2.0, 3.0])\n ser2 = Series([100.0], index=[1])\n ser1_orig = ser1.copy()\n view = ser1[:]\n\n if warn_copy_on_write:\n with tm.assert_cow_warning():\n ser1.update(ser2)\n else:\n ser1.update(ser2)\n\n expected = Series([1.0, 100.0, 3.0])\n tm.assert_series_equal(ser1, expected)\n if using_copy_on_write:\n # ser1 is updated, but its view not\n tm.assert_series_equal(view, ser1_orig)\n else:\n tm.assert_series_equal(view, expected)\n\n\ndef test_update_chained_assignment(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3]})\n ser2 = Series([100.0], index=[1])\n df_orig = df.copy()\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n df["a"].update(ser2)\n tm.assert_frame_equal(df, df_orig)\n\n with tm.raises_chained_assignment_error():\n df[["a"]].update(ser2.to_frame())\n tm.assert_frame_equal(df, df_orig)\n else:\n with tm.assert_produces_warning(FutureWarning, match="inplace method"):\n df["a"].update(ser2)\n\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n df[["a"]].update(ser2.to_frame())\n\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n df[df["a"] > 1].update(ser2.to_frame())\n\n\ndef test_inplace_arithmetic_series(using_copy_on_write):\n ser = Series([1, 2, 3])\n ser_orig = ser.copy()\n data = get_array(ser)\n ser *= 2\n if using_copy_on_write:\n # https://github.com/pandas-dev/pandas/pull/55745\n # changed to NOT update inplace because there is no benefit (actual\n # operation already done non-inplace). This was only for the optics\n # of updating the backing array inplace, but we no longer want to make\n # that guarantee\n assert not np.shares_memory(get_array(ser), data)\n tm.assert_numpy_array_equal(data, get_array(ser_orig))\n else:\n assert np.shares_memory(get_array(ser), data)\n tm.assert_numpy_array_equal(data, get_array(ser))\n\n\ndef test_inplace_arithmetic_series_with_reference(\n using_copy_on_write, warn_copy_on_write\n):\n ser = Series([1, 2, 3])\n ser_orig = ser.copy()\n view = ser[:]\n with tm.assert_cow_warning(warn_copy_on_write):\n ser *= 2\n if using_copy_on_write:\n assert not np.shares_memory(get_array(ser), get_array(view))\n tm.assert_series_equal(ser_orig, view)\n else:\n assert np.shares_memory(get_array(ser), get_array(view))\n\n\n@pytest.mark.parametrize("copy", [True, False])\ndef test_transpose(using_copy_on_write, copy, using_array_manager):\n df = DataFrame({"a": [1, 2, 3], "b": 1})\n df_orig = df.copy()\n result = df.transpose(copy=copy)\n\n if not copy and not using_array_manager or using_copy_on_write:\n assert np.shares_memory(get_array(df, "a"), get_array(result, 0))\n else:\n assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))\n\n result.iloc[0, 0] = 100\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_transpose_different_dtypes(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": 1.5})\n df_orig = df.copy()\n result = df.T\n\n assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))\n result.iloc[0, 0] = 100\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_transpose_ea_single_column(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")\n result = df.T\n\n assert not np.shares_memory(get_array(df, "a"), get_array(result, 0))\n\n\ndef test_transform_frame(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": 1})\n df_orig = df.copy()\n\n def func(ser):\n ser.iloc[0] = 100\n return ser\n\n with tm.assert_cow_warning(warn_copy_on_write):\n df.transform(func)\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_transform_series(using_copy_on_write, warn_copy_on_write):\n ser = Series([1, 2, 3])\n ser_orig = ser.copy()\n\n def func(ser):\n ser.iloc[0] = 100\n return ser\n\n with tm.assert_cow_warning(warn_copy_on_write):\n ser.transform(func)\n if using_copy_on_write:\n tm.assert_series_equal(ser, ser_orig)\n\n\ndef test_count_read_only_array():\n df = DataFrame({"a": [1, 2], "b": 3})\n result = df.count()\n result.iloc[0] = 100\n expected = Series([100, 2], index=["a", "b"])\n tm.assert_series_equal(result, expected)\n\n\ndef test_series_view(using_copy_on_write, warn_copy_on_write):\n ser = Series([1, 2, 3])\n ser_orig = ser.copy()\n\n with tm.assert_produces_warning(FutureWarning, match="is deprecated"):\n ser2 = ser.view()\n assert np.shares_memory(get_array(ser), get_array(ser2))\n if using_copy_on_write:\n assert not ser2._mgr._has_no_reference(0)\n\n with tm.assert_cow_warning(warn_copy_on_write):\n ser2.iloc[0] = 100\n\n if using_copy_on_write:\n tm.assert_series_equal(ser_orig, ser)\n else:\n expected = Series([100, 2, 3])\n tm.assert_series_equal(ser, expected)\n\n\ndef test_insert_series(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3]})\n ser = Series([1, 2, 3])\n ser_orig = ser.copy()\n df.insert(loc=1, value=ser, column="b")\n if using_copy_on_write:\n assert np.shares_memory(get_array(ser), get_array(df, "b"))\n assert not df._mgr._has_no_reference(1)\n else:\n assert not np.shares_memory(get_array(ser), get_array(df, "b"))\n\n df.iloc[0, 1] = 100\n tm.assert_series_equal(ser, ser_orig)\n\n\ndef test_eval(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": 1})\n df_orig = df.copy()\n\n result = df.eval("c = a+b")\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n else:\n assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n\n result.iloc[0, 0] = 100\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_eval_inplace(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": 1})\n df_orig = df.copy()\n df_view = df[:]\n\n df.eval("c = a+b", inplace=True)\n assert np.shares_memory(get_array(df, "a"), get_array(df_view, "a"))\n\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = 100\n if using_copy_on_write:\n tm.assert_frame_equal(df_view, df_orig)\n\n\ndef test_apply_modify_row(using_copy_on_write, warn_copy_on_write):\n # Case: applying a function on each row as a Series object, where the\n # function mutates the row object (which needs to trigger CoW if row is a view)\n df = DataFrame({"A": [1, 2], "B": [3, 4]})\n df_orig = df.copy()\n\n def transform(row):\n row["B"] = 100\n return row\n\n with tm.assert_cow_warning(warn_copy_on_write):\n df.apply(transform, axis=1)\n\n if using_copy_on_write:\n tm.assert_frame_equal(df, df_orig)\n else:\n assert df.loc[0, "B"] == 100\n\n # row Series is a copy\n df = DataFrame({"A": [1, 2], "B": ["b", "c"]})\n df_orig = df.copy()\n\n with tm.assert_produces_warning(None):\n df.apply(transform, axis=1)\n\n tm.assert_frame_equal(df, df_orig)\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_methods.py | test_methods.py | Python | 71,834 | 0.75 | 0.139749 | 0.06295 | python-kit | 807 | 2024-09-11T07:48:55.225569 | BSD-3-Clause | true | 895e5bebdbdd318427e3a36c35819d4b |
import numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n DataFrame,\n option_context,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n\n@pytest.mark.parametrize(\n "replace_kwargs",\n [\n {"to_replace": {"a": 1, "b": 4}, "value": -1},\n # Test CoW splits blocks to avoid copying unchanged columns\n {"to_replace": {"a": 1}, "value": -1},\n {"to_replace": {"b": 4}, "value": -1},\n {"to_replace": {"b": {4: 1}}},\n # TODO: Add these in a further optimization\n # We would need to see which columns got replaced in the mask\n # which could be expensive\n # {"to_replace": {"b": 1}},\n # 1\n ],\n)\ndef test_replace(using_copy_on_write, replace_kwargs):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6], "c": [0.1, 0.2, 0.3]})\n df_orig = df.copy()\n\n df_replaced = df.replace(**replace_kwargs)\n\n if using_copy_on_write:\n if (df_replaced["b"] == df["b"]).all():\n assert np.shares_memory(get_array(df_replaced, "b"), get_array(df, "b"))\n assert tm.shares_memory(get_array(df_replaced, "c"), get_array(df, "c"))\n\n # mutating squeezed df triggers a copy-on-write for that column/block\n df_replaced.loc[0, "c"] = -1\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df_replaced, "c"), get_array(df, "c"))\n\n if "a" in replace_kwargs["to_replace"]:\n arr = get_array(df_replaced, "a")\n df_replaced.loc[0, "a"] = 100\n assert np.shares_memory(get_array(df_replaced, "a"), arr)\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_replace_regex_inplace_refs(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": ["aaa", "bbb"]})\n df_orig = df.copy()\n view = df[:]\n arr = get_array(df, "a")\n with tm.assert_cow_warning(warn_copy_on_write):\n df.replace(to_replace=r"^a.*$", value="new", inplace=True, regex=True)\n if using_copy_on_write:\n assert not tm.shares_memory(arr, get_array(df, "a"))\n assert df._mgr._has_no_reference(0)\n tm.assert_frame_equal(view, df_orig)\n else:\n assert np.shares_memory(arr, get_array(df, "a"))\n\n\ndef test_replace_regex_inplace(using_copy_on_write):\n df = DataFrame({"a": ["aaa", "bbb"]})\n arr = get_array(df, "a")\n df.replace(to_replace=r"^a.*$", value="new", inplace=True, regex=True)\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n assert tm.shares_memory(arr, get_array(df, "a"))\n\n df_orig = df.copy()\n df2 = df.replace(to_replace=r"^b.*$", value="new", regex=True)\n tm.assert_frame_equal(df_orig, df)\n assert not tm.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n\ndef test_replace_regex_inplace_no_op(using_copy_on_write):\n df = DataFrame({"a": [1, 2]})\n arr = get_array(df, "a")\n df.replace(to_replace=r"^a.$", value="new", inplace=True, regex=True)\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n assert np.shares_memory(arr, get_array(df, "a"))\n\n df_orig = df.copy()\n df2 = df.replace(to_replace=r"^x.$", value="new", regex=True)\n tm.assert_frame_equal(df_orig, df)\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n\ndef test_replace_mask_all_false_second_block(using_copy_on_write):\n df = DataFrame({"a": [1.5, 2, 3], "b": 100.5, "c": 1, "d": 2})\n df_orig = df.copy()\n\n df2 = df.replace(to_replace=1.5, value=55.5)\n\n if using_copy_on_write:\n # TODO: Block splitting would allow us to avoid copying b\n assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n else:\n assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n df2.loc[0, "c"] = 1\n tm.assert_frame_equal(df, df_orig) # Original is unchanged\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))\n # TODO: This should split and not copy the whole block\n # assert np.shares_memory(get_array(df, "d"), get_array(df2, "d"))\n\n\ndef test_replace_coerce_single_column(using_copy_on_write, using_array_manager):\n df = DataFrame({"a": [1.5, 2, 3], "b": 100.5})\n df_orig = df.copy()\n\n df2 = df.replace(to_replace=1.5, value="a")\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n elif not using_array_manager:\n assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n if using_copy_on_write:\n df2.loc[0, "b"] = 0.5\n tm.assert_frame_equal(df, df_orig) # Original is unchanged\n assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n\n\ndef test_replace_to_replace_wrong_dtype(using_copy_on_write):\n df = DataFrame({"a": [1.5, 2, 3], "b": 100.5})\n df_orig = df.copy()\n\n df2 = df.replace(to_replace="xxx", value=1.5)\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n assert np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n else:\n assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n df2.loc[0, "b"] = 0.5\n tm.assert_frame_equal(df, df_orig) # Original is unchanged\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "b"), get_array(df2, "b"))\n\n\ndef test_replace_list_categorical(using_copy_on_write):\n df = DataFrame({"a": ["a", "b", "c"]}, dtype="category")\n arr = get_array(df, "a")\n msg = (\n r"The behavior of Series\.replace \(and DataFrame.replace\) "\n "with CategoricalDtype"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.replace(["c"], value="a", inplace=True)\n assert np.shares_memory(arr.codes, get_array(df, "a").codes)\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n\n df_orig = df.copy()\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df2 = df.replace(["b"], value="a")\n assert not np.shares_memory(arr.codes, get_array(df2, "a").codes)\n\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_replace_list_inplace_refs_categorical(using_copy_on_write):\n df = DataFrame({"a": ["a", "b", "c"]}, dtype="category")\n view = df[:]\n df_orig = df.copy()\n msg = (\n r"The behavior of Series\.replace \(and DataFrame.replace\) "\n "with CategoricalDtype"\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n df.replace(["c"], value="a", inplace=True)\n if using_copy_on_write:\n assert not np.shares_memory(\n get_array(view, "a").codes, get_array(df, "a").codes\n )\n tm.assert_frame_equal(df_orig, view)\n else:\n # This could be inplace\n assert not np.shares_memory(\n get_array(view, "a").codes, get_array(df, "a").codes\n )\n\n\n@pytest.mark.parametrize("to_replace", [1.5, [1.5], []])\ndef test_replace_inplace(using_copy_on_write, to_replace):\n df = DataFrame({"a": [1.5, 2, 3]})\n arr_a = get_array(df, "a")\n df.replace(to_replace=1.5, value=15.5, inplace=True)\n\n assert np.shares_memory(get_array(df, "a"), arr_a)\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n\n\n@pytest.mark.parametrize("to_replace", [1.5, [1.5]])\ndef test_replace_inplace_reference(using_copy_on_write, to_replace, warn_copy_on_write):\n df = DataFrame({"a": [1.5, 2, 3]})\n arr_a = get_array(df, "a")\n view = df[:]\n with tm.assert_cow_warning(warn_copy_on_write):\n df.replace(to_replace=to_replace, value=15.5, inplace=True)\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "a"), arr_a)\n assert df._mgr._has_no_reference(0)\n assert view._mgr._has_no_reference(0)\n else:\n assert np.shares_memory(get_array(df, "a"), arr_a)\n\n\n@pytest.mark.parametrize("to_replace", ["a", 100.5])\ndef test_replace_inplace_reference_no_op(using_copy_on_write, to_replace):\n df = DataFrame({"a": [1.5, 2, 3]})\n arr_a = get_array(df, "a")\n view = df[:]\n df.replace(to_replace=to_replace, value=15.5, inplace=True)\n\n assert np.shares_memory(get_array(df, "a"), arr_a)\n if using_copy_on_write:\n assert not df._mgr._has_no_reference(0)\n assert not view._mgr._has_no_reference(0)\n\n\n@pytest.mark.parametrize("to_replace", [1, [1]])\n@pytest.mark.parametrize("val", [1, 1.5])\ndef test_replace_categorical_inplace_reference(using_copy_on_write, val, to_replace):\n df = DataFrame({"a": Categorical([1, 2, 3])})\n df_orig = df.copy()\n arr_a = get_array(df, "a")\n view = df[:]\n msg = (\n r"The behavior of Series\.replace \(and DataFrame.replace\) "\n "with CategoricalDtype"\n )\n warn = FutureWarning if val == 1.5 else None\n with tm.assert_produces_warning(warn, match=msg):\n df.replace(to_replace=to_replace, value=val, inplace=True)\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "a").codes, arr_a.codes)\n assert df._mgr._has_no_reference(0)\n assert view._mgr._has_no_reference(0)\n tm.assert_frame_equal(view, df_orig)\n else:\n assert np.shares_memory(get_array(df, "a").codes, arr_a.codes)\n\n\n@pytest.mark.parametrize("val", [1, 1.5])\ndef test_replace_categorical_inplace(using_copy_on_write, val):\n df = DataFrame({"a": Categorical([1, 2, 3])})\n arr_a = get_array(df, "a")\n msg = (\n r"The behavior of Series\.replace \(and DataFrame.replace\) "\n "with CategoricalDtype"\n )\n warn = FutureWarning if val == 1.5 else None\n with tm.assert_produces_warning(warn, match=msg):\n df.replace(to_replace=1, value=val, inplace=True)\n\n assert np.shares_memory(get_array(df, "a").codes, arr_a.codes)\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n\n expected = DataFrame({"a": Categorical([val, 2, 3])})\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize("val", [1, 1.5])\ndef test_replace_categorical(using_copy_on_write, val):\n df = DataFrame({"a": Categorical([1, 2, 3])})\n df_orig = df.copy()\n msg = (\n r"The behavior of Series\.replace \(and DataFrame.replace\) "\n "with CategoricalDtype"\n )\n warn = FutureWarning if val == 1.5 else None\n with tm.assert_produces_warning(warn, match=msg):\n df2 = df.replace(to_replace=1, value=val)\n\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n assert df2._mgr._has_no_reference(0)\n assert not np.shares_memory(get_array(df, "a").codes, get_array(df2, "a").codes)\n tm.assert_frame_equal(df, df_orig)\n\n arr_a = get_array(df2, "a").codes\n df2.iloc[0, 0] = 2.0\n assert np.shares_memory(get_array(df2, "a").codes, arr_a)\n\n\n@pytest.mark.parametrize("method", ["where", "mask"])\ndef test_masking_inplace(using_copy_on_write, method, warn_copy_on_write):\n df = DataFrame({"a": [1.5, 2, 3]})\n df_orig = df.copy()\n arr_a = get_array(df, "a")\n view = df[:]\n\n method = getattr(df, method)\n if warn_copy_on_write:\n with tm.assert_cow_warning():\n method(df["a"] > 1.6, -1, inplace=True)\n else:\n method(df["a"] > 1.6, -1, inplace=True)\n\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "a"), arr_a)\n assert df._mgr._has_no_reference(0)\n assert view._mgr._has_no_reference(0)\n tm.assert_frame_equal(view, df_orig)\n else:\n assert np.shares_memory(get_array(df, "a"), arr_a)\n\n\ndef test_replace_empty_list(using_copy_on_write):\n df = DataFrame({"a": [1, 2]})\n\n df2 = df.replace([], [])\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n assert not df._mgr._has_no_reference(0)\n else:\n assert not np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n\n arr_a = get_array(df, "a")\n df.replace([], [])\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "a"), arr_a)\n assert not df._mgr._has_no_reference(0)\n assert not df2._mgr._has_no_reference(0)\n\n\n@pytest.mark.parametrize("value", ["d", None])\ndef test_replace_object_list_inplace(using_copy_on_write, value):\n df = DataFrame({"a": ["a", "b", "c"]}, dtype=object)\n arr = get_array(df, "a")\n df.replace(["c"], value, inplace=True)\n if using_copy_on_write or value is None:\n assert tm.shares_memory(arr, get_array(df, "a"))\n else:\n # This could be inplace\n assert not np.shares_memory(arr, get_array(df, "a"))\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n\n\ndef test_replace_list_multiple_elements_inplace(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3]})\n arr = get_array(df, "a")\n df.replace([1, 2], 4, inplace=True)\n if using_copy_on_write:\n assert np.shares_memory(arr, get_array(df, "a"))\n assert df._mgr._has_no_reference(0)\n else:\n assert np.shares_memory(arr, get_array(df, "a"))\n\n\ndef test_replace_list_none(using_copy_on_write):\n df = DataFrame({"a": ["a", "b", "c"]})\n\n df_orig = df.copy()\n df2 = df.replace(["b"], value=None)\n tm.assert_frame_equal(df, df_orig)\n\n assert not np.shares_memory(get_array(df, "a"), get_array(df2, "a"))\n\n # replace multiple values that don't actually replace anything with None\n # https://github.com/pandas-dev/pandas/issues/59770\n df3 = df.replace(["d", "e", "f"], value=None)\n tm.assert_frame_equal(df3, df_orig)\n if using_copy_on_write:\n assert tm.shares_memory(get_array(df, "a"), get_array(df3, "a"))\n else:\n assert not tm.shares_memory(get_array(df, "a"), get_array(df3, "a"))\n\n\ndef test_replace_list_none_inplace_refs(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": ["a", "b", "c"]})\n arr = get_array(df, "a")\n df_orig = df.copy()\n view = df[:]\n with tm.assert_cow_warning(warn_copy_on_write):\n df.replace(["a"], value=None, inplace=True)\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n assert not np.shares_memory(arr, get_array(df, "a"))\n tm.assert_frame_equal(df_orig, view)\n else:\n assert np.shares_memory(arr, get_array(df, "a"))\n\n\ndef test_replace_columnwise_no_op_inplace(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})\n view = df[:]\n df_orig = df.copy()\n df.replace({"a": 10}, 100, inplace=True)\n if using_copy_on_write:\n assert np.shares_memory(get_array(view, "a"), get_array(df, "a"))\n df.iloc[0, 0] = 100\n tm.assert_frame_equal(view, df_orig)\n\n\ndef test_replace_columnwise_no_op(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})\n df_orig = df.copy()\n df2 = df.replace({"a": 10}, 100)\n if using_copy_on_write:\n assert np.shares_memory(get_array(df2, "a"), get_array(df, "a"))\n df2.iloc[0, 0] = 100\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_replace_chained_assignment(using_copy_on_write):\n df = DataFrame({"a": [1, np.nan, 2], "b": 1})\n df_orig = df.copy()\n if using_copy_on_write:\n with tm.raises_chained_assignment_error():\n df["a"].replace(1, 100, inplace=True)\n tm.assert_frame_equal(df, df_orig)\n\n with tm.raises_chained_assignment_error():\n df[["a"]].replace(1, 100, inplace=True)\n tm.assert_frame_equal(df, df_orig)\n else:\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n df[["a"]].replace(1, 100, inplace=True)\n\n with tm.assert_produces_warning(None):\n with option_context("mode.chained_assignment", None):\n df[df.a > 5].replace(1, 100, inplace=True)\n\n with tm.assert_produces_warning(FutureWarning, match="inplace method"):\n df["a"].replace(1, 100, inplace=True)\n\n\ndef test_replace_listlike(using_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})\n df_orig = df.copy()\n\n result = df.replace([200, 201], [11, 11])\n if using_copy_on_write:\n assert np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n else:\n assert not np.shares_memory(get_array(result, "a"), get_array(df, "a"))\n\n result.iloc[0, 0] = 100\n tm.assert_frame_equal(df, df)\n\n result = df.replace([200, 2], [10, 10])\n assert not np.shares_memory(get_array(df, "a"), get_array(result, "a"))\n tm.assert_frame_equal(df, df_orig)\n\n\ndef test_replace_listlike_inplace(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3]})\n arr = get_array(df, "a")\n df.replace([200, 2], [10, 11], inplace=True)\n assert np.shares_memory(get_array(df, "a"), arr)\n\n view = df[:]\n df_orig = df.copy()\n with tm.assert_cow_warning(warn_copy_on_write):\n df.replace([200, 3], [10, 11], inplace=True)\n if using_copy_on_write:\n assert not np.shares_memory(get_array(df, "a"), arr)\n tm.assert_frame_equal(view, df_orig)\n else:\n assert np.shares_memory(get_array(df, "a"), arr)\n tm.assert_frame_equal(df, view)\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_replace.py | test_replace.py | Python | 17,540 | 0.95 | 0.134694 | 0.035623 | react-lib | 614 | 2025-01-15T21:25:11.826942 | MIT | true | b1871bbd0130b9bd250dd74a2806acc8 |
import numpy as np\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n RangeIndex,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n# -----------------------------------------------------------------------------\n# Copy/view behaviour for the values that are set in a DataFrame\n\n\ndef test_set_column_with_array():\n # Case: setting an array as a new column (df[col] = arr) copies that data\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n arr = np.array([1, 2, 3], dtype="int64")\n\n df["c"] = arr\n\n # the array data is copied\n assert not np.shares_memory(get_array(df, "c"), arr)\n # and thus modifying the array does not modify the DataFrame\n arr[0] = 0\n tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))\n\n\ndef test_set_column_with_series(using_copy_on_write):\n # Case: setting a series as a new column (df[col] = s) copies that data\n # (with delayed copy with CoW)\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n ser = Series([1, 2, 3])\n\n df["c"] = ser\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "c"), get_array(ser))\n else:\n # the series data is copied\n assert not np.shares_memory(get_array(df, "c"), get_array(ser))\n\n # and modifying the series does not modify the DataFrame\n ser.iloc[0] = 0\n assert ser.iloc[0] == 0\n tm.assert_series_equal(df["c"], Series([1, 2, 3], name="c"))\n\n\ndef test_set_column_with_index(using_copy_on_write):\n # Case: setting an index as a new column (df[col] = idx) copies that data\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n idx = Index([1, 2, 3])\n\n df["c"] = idx\n\n # the index data is copied\n assert not np.shares_memory(get_array(df, "c"), idx.values)\n\n idx = RangeIndex(1, 4)\n arr = idx.values\n\n df["d"] = idx\n\n assert not np.shares_memory(get_array(df, "d"), arr)\n\n\ndef test_set_columns_with_dataframe(using_copy_on_write):\n # Case: setting a DataFrame as new columns copies that data\n # (with delayed copy with CoW)\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n df2 = DataFrame({"c": [7, 8, 9], "d": [10, 11, 12]})\n\n df[["c", "d"]] = df2\n\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "c"), get_array(df2, "c"))\n else:\n # the data is copied\n assert not np.shares_memory(get_array(df, "c"), get_array(df2, "c"))\n\n # and modifying the set DataFrame does not modify the original DataFrame\n df2.iloc[0, 0] = 0\n tm.assert_series_equal(df["c"], Series([7, 8, 9], name="c"))\n\n\ndef test_setitem_series_no_copy(using_copy_on_write):\n # Case: setting a Series as column into a DataFrame can delay copying that data\n df = DataFrame({"a": [1, 2, 3]})\n rhs = Series([4, 5, 6])\n rhs_orig = rhs.copy()\n\n # adding a new column\n df["b"] = rhs\n if using_copy_on_write:\n assert np.shares_memory(get_array(rhs), get_array(df, "b"))\n\n df.iloc[0, 1] = 100\n tm.assert_series_equal(rhs, rhs_orig)\n\n\ndef test_setitem_series_no_copy_single_block(using_copy_on_write):\n # Overwriting an existing column that is a single block\n df = DataFrame({"a": [1, 2, 3], "b": [0.1, 0.2, 0.3]})\n rhs = Series([4, 5, 6])\n rhs_orig = rhs.copy()\n\n df["a"] = rhs\n if using_copy_on_write:\n assert np.shares_memory(get_array(rhs), get_array(df, "a"))\n\n df.iloc[0, 0] = 100\n tm.assert_series_equal(rhs, rhs_orig)\n\n\ndef test_setitem_series_no_copy_split_block(using_copy_on_write):\n # Overwriting an existing column that is part of a larger block\n df = DataFrame({"a": [1, 2, 3], "b": 1})\n rhs = Series([4, 5, 6])\n rhs_orig = rhs.copy()\n\n df["b"] = rhs\n if using_copy_on_write:\n assert np.shares_memory(get_array(rhs), get_array(df, "b"))\n\n df.iloc[0, 1] = 100\n tm.assert_series_equal(rhs, rhs_orig)\n\n\ndef test_setitem_series_column_midx_broadcasting(using_copy_on_write):\n # Setting a Series to multiple columns will repeat the data\n # (currently copying the data eagerly)\n df = DataFrame(\n [[1, 2, 3], [3, 4, 5]],\n columns=MultiIndex.from_arrays([["a", "a", "b"], [1, 2, 3]]),\n )\n rhs = Series([10, 11])\n df["a"] = rhs\n assert not np.shares_memory(get_array(rhs), df._get_column_array(0))\n if using_copy_on_write:\n assert df._mgr._has_no_reference(0)\n\n\ndef test_set_column_with_inplace_operator(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n\n # this should not raise any warning\n with tm.assert_produces_warning(None):\n df["a"] += 1\n\n # when it is not in a chain, then it should produce a warning\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]})\n ser = df["a"]\n with tm.assert_cow_warning(warn_copy_on_write):\n ser += 1\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_setitem.py | test_setitem.py | Python | 4,822 | 0.95 | 0.102564 | 0.2 | vue-tools | 813 | 2024-02-29T00:42:24.747781 | MIT | true | eb84b590f1c596daef2512b065589d7d |
import numpy as np\n\nfrom pandas import DataFrame\nfrom pandas.tests.copy_view.util import get_array\n\n\ndef test_get_array_numpy():\n df = DataFrame({"a": [1, 2, 3]})\n assert np.shares_memory(get_array(df, "a"), get_array(df, "a"))\n\n\ndef test_get_array_masked():\n df = DataFrame({"a": [1, 2, 3]}, dtype="Int64")\n assert np.shares_memory(get_array(df, "a"), get_array(df, "a"))\n | .venv\Lib\site-packages\pandas\tests\copy_view\test_util.py | test_util.py | Python | 385 | 0.85 | 0.142857 | 0 | node-utils | 143 | 2024-09-07T14:07:27.389614 | MIT | true | cbdb994ed1aa04cf7fd80b2f34b3e7ab |
from pandas import (\n Categorical,\n Index,\n Series,\n)\nfrom pandas.core.arrays import BaseMaskedArray\n\n\ndef get_array(obj, col=None):\n """\n Helper method to get array for a DataFrame column or a Series.\n\n Equivalent of df[col].values, but without going through normal getitem,\n which triggers tracking references / CoW (and we might be testing that\n this is done by some other operation).\n """\n if isinstance(obj, Index):\n arr = obj._values\n elif isinstance(obj, Series) and (col is None or obj.name == col):\n arr = obj._values\n else:\n assert col is not None\n icol = obj.columns.get_loc(col)\n assert isinstance(icol, int)\n arr = obj._get_column_array(icol)\n if isinstance(arr, BaseMaskedArray):\n return arr._data\n elif isinstance(arr, Categorical):\n return arr\n return getattr(arr, "_ndarray", arr)\n | .venv\Lib\site-packages\pandas\tests\copy_view\util.py | util.py | Python | 899 | 0.85 | 0.133333 | 0 | react-lib | 924 | 2025-03-30T16:45:31.675613 | Apache-2.0 | true | 96bff04be767191802a17b9a1ad23f57 |
import pytest\n\nfrom pandas import (\n DatetimeIndex,\n Series,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Setting a value on a view:FutureWarning"\n)\n\n\n@pytest.mark.parametrize(\n "cons",\n [\n lambda x: DatetimeIndex(x),\n lambda x: DatetimeIndex(DatetimeIndex(x)),\n ],\n)\ndef test_datetimeindex(using_copy_on_write, cons):\n dt = date_range("2019-12-31", periods=3, freq="D")\n ser = Series(dt)\n idx = cons(ser)\n expected = idx.copy(deep=True)\n ser.iloc[0] = Timestamp("2020-12-31")\n if using_copy_on_write:\n tm.assert_index_equal(idx, expected)\n\n\ndef test_datetimeindex_tz_convert(using_copy_on_write):\n dt = date_range("2019-12-31", periods=3, freq="D", tz="Europe/Berlin")\n ser = Series(dt)\n idx = DatetimeIndex(ser).tz_convert("US/Eastern")\n expected = idx.copy(deep=True)\n ser.iloc[0] = Timestamp("2020-12-31", tz="Europe/Berlin")\n if using_copy_on_write:\n tm.assert_index_equal(idx, expected)\n\n\ndef test_datetimeindex_tz_localize(using_copy_on_write):\n dt = date_range("2019-12-31", periods=3, freq="D")\n ser = Series(dt)\n idx = DatetimeIndex(ser).tz_localize("Europe/Berlin")\n expected = idx.copy(deep=True)\n ser.iloc[0] = Timestamp("2020-12-31")\n if using_copy_on_write:\n tm.assert_index_equal(idx, expected)\n\n\ndef test_datetimeindex_isocalendar(using_copy_on_write):\n dt = date_range("2019-12-31", periods=3, freq="D")\n ser = Series(dt)\n df = DatetimeIndex(ser).isocalendar()\n expected = df.index.copy(deep=True)\n ser.iloc[0] = Timestamp("2020-12-31")\n if using_copy_on_write:\n tm.assert_index_equal(df.index, expected)\n\n\ndef test_index_values(using_copy_on_write):\n idx = date_range("2019-12-31", periods=3, freq="D")\n result = idx.values\n if using_copy_on_write:\n assert result.flags.writeable is False\n else:\n assert result.flags.writeable is True\n | .venv\Lib\site-packages\pandas\tests\copy_view\index\test_datetimeindex.py | test_datetimeindex.py | Python | 1,980 | 0.85 | 0.144928 | 0 | python-kit | 471 | 2025-01-21T04:45:54.864153 | Apache-2.0 | true | d2651a825a84c8c081d2c7cefbf3607a |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n)\nimport pandas._testing as tm\nfrom pandas.tests.copy_view.util import get_array\n\n\ndef index_view(index_data=[1, 2]):\n df = DataFrame({"a": index_data, "b": 1.5})\n view = df[:]\n df = df.set_index("a", drop=True)\n idx = df.index\n # df = None\n return idx, view\n\n\ndef test_set_index_update_column(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2], "b": 1})\n df = df.set_index("a", drop=False)\n expected = df.index.copy(deep=True)\n with tm.assert_cow_warning(warn_copy_on_write):\n df.iloc[0, 0] = 100\n if using_copy_on_write:\n tm.assert_index_equal(df.index, expected)\n else:\n tm.assert_index_equal(df.index, Index([100, 2], name="a"))\n\n\ndef test_set_index_drop_update_column(using_copy_on_write):\n df = DataFrame({"a": [1, 2], "b": 1.5})\n view = df[:]\n df = df.set_index("a", drop=True)\n expected = df.index.copy(deep=True)\n view.iloc[0, 0] = 100\n tm.assert_index_equal(df.index, expected)\n\n\ndef test_set_index_series(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2], "b": 1.5})\n ser = Series([10, 11])\n df = df.set_index(ser)\n expected = df.index.copy(deep=True)\n with tm.assert_cow_warning(warn_copy_on_write):\n ser.iloc[0] = 100\n if using_copy_on_write:\n tm.assert_index_equal(df.index, expected)\n else:\n tm.assert_index_equal(df.index, Index([100, 11]))\n\n\ndef test_assign_index_as_series(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2], "b": 1.5})\n ser = Series([10, 11])\n df.index = ser\n expected = df.index.copy(deep=True)\n with tm.assert_cow_warning(warn_copy_on_write):\n ser.iloc[0] = 100\n if using_copy_on_write:\n tm.assert_index_equal(df.index, expected)\n else:\n tm.assert_index_equal(df.index, Index([100, 11]))\n\n\ndef test_assign_index_as_index(using_copy_on_write, warn_copy_on_write):\n df = DataFrame({"a": [1, 2], "b": 1.5})\n ser = Series([10, 11])\n rhs_index = Index(ser)\n df.index = rhs_index\n rhs_index = None # overwrite to clear reference\n expected = df.index.copy(deep=True)\n with tm.assert_cow_warning(warn_copy_on_write):\n ser.iloc[0] = 100\n if using_copy_on_write:\n tm.assert_index_equal(df.index, expected)\n else:\n tm.assert_index_equal(df.index, Index([100, 11]))\n\n\ndef test_index_from_series(using_copy_on_write, warn_copy_on_write):\n ser = Series([1, 2])\n idx = Index(ser)\n expected = idx.copy(deep=True)\n with tm.assert_cow_warning(warn_copy_on_write):\n ser.iloc[0] = 100\n if using_copy_on_write:\n tm.assert_index_equal(idx, expected)\n else:\n tm.assert_index_equal(idx, Index([100, 2]))\n\n\ndef test_index_from_series_copy(using_copy_on_write):\n ser = Series([1, 2])\n idx = Index(ser, copy=True) # noqa: F841\n arr = get_array(ser)\n ser.iloc[0] = 100\n assert np.shares_memory(get_array(ser), arr)\n\n\ndef test_index_from_index(using_copy_on_write, warn_copy_on_write):\n ser = Series([1, 2])\n idx = Index(ser)\n idx = Index(idx)\n expected = idx.copy(deep=True)\n with tm.assert_cow_warning(warn_copy_on_write):\n ser.iloc[0] = 100\n if using_copy_on_write:\n tm.assert_index_equal(idx, expected)\n else:\n tm.assert_index_equal(idx, Index([100, 2]))\n\n\n@pytest.mark.parametrize(\n "func",\n [\n lambda x: x._shallow_copy(x._values),\n lambda x: x.view(),\n lambda x: x.take([0, 1]),\n lambda x: x.repeat([1, 1]),\n lambda x: x[slice(0, 2)],\n lambda x: x[[0, 1]],\n lambda x: x._getitem_slice(slice(0, 2)),\n lambda x: x.delete([]),\n lambda x: x.rename("b"),\n lambda x: x.astype("Int64", copy=False),\n ],\n ids=[\n "_shallow_copy",\n "view",\n "take",\n "repeat",\n "getitem_slice",\n "getitem_list",\n "_getitem_slice",\n "delete",\n "rename",\n "astype",\n ],\n)\ndef test_index_ops(using_copy_on_write, func, request):\n idx, view_ = index_view()\n expected = idx.copy(deep=True)\n if "astype" in request.node.callspec.id:\n expected = expected.astype("Int64")\n idx = func(idx)\n view_.iloc[0, 0] = 100\n if using_copy_on_write:\n tm.assert_index_equal(idx, expected, check_names=False)\n\n\ndef test_infer_objects(using_copy_on_write):\n idx, view_ = index_view(["a", "b"])\n expected = idx.copy(deep=True)\n idx = idx.infer_objects(copy=False)\n view_.iloc[0, 0] = "aaaa"\n if using_copy_on_write:\n tm.assert_index_equal(idx, expected, check_names=False)\n\n\ndef test_index_to_frame(using_copy_on_write):\n idx = Index([1, 2, 3], name="a")\n expected = idx.copy(deep=True)\n df = idx.to_frame()\n if using_copy_on_write:\n assert np.shares_memory(get_array(df, "a"), idx._values)\n assert not df._mgr._has_no_reference(0)\n else:\n assert not np.shares_memory(get_array(df, "a"), idx._values)\n\n df.iloc[0, 0] = 100\n tm.assert_index_equal(idx, expected)\n\n\ndef test_index_values(using_copy_on_write):\n idx = Index([1, 2, 3])\n result = idx.values\n if using_copy_on_write:\n assert result.flags.writeable is False\n else:\n assert result.flags.writeable is True\n | .venv\Lib\site-packages\pandas\tests\copy_view\index\test_index.py | test_index.py | Python | 5,363 | 0.95 | 0.130435 | 0.00641 | vue-tools | 772 | 2024-01-06T06:40:32.711379 | GPL-3.0 | true | 0b1eddff5aa4b293912062f251705605 |
import pytest\n\nfrom pandas import (\n Period,\n PeriodIndex,\n Series,\n period_range,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Setting a value on a view:FutureWarning"\n)\n\n\n@pytest.mark.parametrize(\n "cons",\n [\n lambda x: PeriodIndex(x),\n lambda x: PeriodIndex(PeriodIndex(x)),\n ],\n)\ndef test_periodindex(using_copy_on_write, cons):\n dt = period_range("2019-12-31", periods=3, freq="D")\n ser = Series(dt)\n idx = cons(ser)\n expected = idx.copy(deep=True)\n ser.iloc[0] = Period("2020-12-31")\n if using_copy_on_write:\n tm.assert_index_equal(idx, expected)\n | .venv\Lib\site-packages\pandas\tests\copy_view\index\test_periodindex.py | test_periodindex.py | Python | 653 | 0.85 | 0.066667 | 0 | awesome-app | 53 | 2024-12-10T18:38:46.929159 | MIT | true | 2b8fd3d1235dc75b9689e0b9a5f94cd6 |
import pytest\n\nfrom pandas import (\n Series,\n Timedelta,\n TimedeltaIndex,\n timedelta_range,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Setting a value on a view:FutureWarning"\n)\n\n\n@pytest.mark.parametrize(\n "cons",\n [\n lambda x: TimedeltaIndex(x),\n lambda x: TimedeltaIndex(TimedeltaIndex(x)),\n ],\n)\ndef test_timedeltaindex(using_copy_on_write, cons):\n dt = timedelta_range("1 day", periods=3)\n ser = Series(dt)\n idx = cons(ser)\n expected = idx.copy(deep=True)\n ser.iloc[0] = Timedelta("5 days")\n if using_copy_on_write:\n tm.assert_index_equal(idx, expected)\n | .venv\Lib\site-packages\pandas\tests\copy_view\index\test_timedeltaindex.py | test_timedeltaindex.py | Python | 661 | 0.85 | 0.066667 | 0 | vue-tools | 155 | 2023-11-04T18:07:50.026142 | Apache-2.0 | true | c6518fffebec0bf51aa95deb3394c9f1 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\index\__pycache__\test_datetimeindex.cpython-313.pyc | test_datetimeindex.cpython-313.pyc | Other | 3,671 | 0.8 | 0 | 0 | python-kit | 850 | 2025-07-05T14:56:07.194811 | Apache-2.0 | true | 4ee3907d923d1ee02768bff21b7dab8c |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\index\__pycache__\test_index.cpython-313.pyc | test_index.cpython-313.pyc | Other | 10,424 | 0.8 | 0 | 0 | react-lib | 181 | 2025-01-27T16:40:42.823544 | MIT | true | 9f6801d2d225ebcb4b45fc2311b2e9f5 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\index\__pycache__\test_periodindex.cpython-313.pyc | test_periodindex.cpython-313.pyc | Other | 1,484 | 0.7 | 0 | 0 | awesome-app | 907 | 2025-02-20T13:38:33.197291 | Apache-2.0 | true | 49c8cf146ed7cbccdd2fdc134492ccc0 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\index\__pycache__\test_timedeltaindex.cpython-313.pyc | test_timedeltaindex.cpython-313.pyc | Other | 1,479 | 0.8 | 0 | 0 | vue-tools | 36 | 2023-09-11T21:59:15.890562 | MIT | true | 51a2549311dc46d27534a199464ff87c |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\index\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 203 | 0.7 | 0 | 0 | react-lib | 67 | 2024-05-02T13:00:49.988681 | Apache-2.0 | true | 1a7f82069b6b814e682190fd74c76cc4 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_array.cpython-313.pyc | test_array.cpython-313.pyc | Other | 11,029 | 0.8 | 0 | 0.088235 | python-kit | 873 | 2024-05-14T03:45:07.141987 | Apache-2.0 | true | 291eb9271a00e452a3dcaf4544009bb5 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_astype.cpython-313.pyc | test_astype.cpython-313.pyc | Other | 16,198 | 0.95 | 0 | 0 | awesome-app | 357 | 2023-09-27T17:19:19.164144 | GPL-3.0 | true | 6b83c579cd25be4680adf787d04b94f0 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_chained_assignment_deprecation.cpython-313.pyc | test_chained_assignment_deprecation.cpython-313.pyc | Other | 8,057 | 0.8 | 0 | 0 | python-kit | 87 | 2024-08-01T16:32:34.976766 | GPL-3.0 | true | 7fe5a1255822894929b108f58e61a49e |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_clip.cpython-313.pyc | test_clip.cpython-313.pyc | Other | 6,095 | 0.8 | 0 | 0 | awesome-app | 235 | 2024-09-11T07:23:06.503960 | GPL-3.0 | true | c7158ab40f4cdcc79e21f86f1d9a3292 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_constructors.cpython-313.pyc | test_constructors.cpython-313.pyc | Other | 20,722 | 0.8 | 0 | 0.009346 | python-kit | 84 | 2025-01-03T12:50:06.923606 | GPL-3.0 | true | 65b4b966ed2897519de2d01152b4ca65 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_core_functionalities.cpython-313.pyc | test_core_functionalities.cpython-313.pyc | Other | 5,808 | 0.95 | 0 | 0 | vue-tools | 854 | 2024-07-04T15:09:39.240346 | BSD-3-Clause | true | 69ac592a9a8a5a79e0e7029e62a75a28 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_functions.cpython-313.pyc | test_functions.cpython-313.pyc | Other | 26,993 | 0.95 | 0.003448 | 0 | python-kit | 944 | 2023-12-25T13:44:36.741449 | BSD-3-Clause | true | 25f08fab3d23c7b48a4313195a71a72f |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_indexing.cpython-313.pyc | test_indexing.cpython-313.pyc | Other | 54,975 | 0.8 | 0 | 0 | react-lib | 779 | 2025-06-13T02:53:46.285537 | MIT | true | 507ffe4173a877159aa9bab1a54e05ba |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_internals.cpython-313.pyc | test_internals.cpython-313.pyc | Other | 8,006 | 0.8 | 0 | 0 | python-kit | 257 | 2024-10-21T17:33:41.690215 | BSD-3-Clause | true | a21f4684eea0df7c25bebeeb42a2bb2d |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_interp_fillna.cpython-313.pyc | test_interp_fillna.cpython-313.pyc | Other | 26,578 | 0.95 | 0 | 0.009494 | awesome-app | 564 | 2024-01-18T06:52:16.318554 | MIT | true | f6d3a77c54c3fc7168a424c79c6a6860 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_replace.cpython-313.pyc | test_replace.cpython-313.pyc | Other | 28,977 | 0.8 | 0 | 0.040293 | react-lib | 378 | 2023-09-25T21:55:53.296826 | BSD-3-Clause | true | 5b4f06c8652e6e8aecb4c208beebecfd |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_setitem.cpython-313.pyc | test_setitem.cpython-313.pyc | Other | 6,614 | 0.8 | 0 | 0.061538 | awesome-app | 833 | 2023-10-18T17:00:53.313444 | MIT | true | 6e9467eb2cc527476b3a0294c02858e6 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\test_util.cpython-313.pyc | test_util.cpython-313.pyc | Other | 996 | 0.8 | 0 | 0 | react-lib | 487 | 2025-06-03T20:19:02.984532 | Apache-2.0 | true | 5463acc22fa6339246eb071ec6747a07 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\util.cpython-313.pyc | util.cpython-313.pyc | Other | 1,464 | 0.8 | 0.0625 | 0 | react-lib | 126 | 2024-12-15T17:11:56.326518 | GPL-3.0 | true | 59b7e79db4373843d86e8454eb14ff25 |
\n\n | .venv\Lib\site-packages\pandas\tests\copy_view\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 197 | 0.7 | 0 | 0 | node-utils | 805 | 2025-05-13T14:17:44.911646 | BSD-3-Clause | true | 29a8441fa859c07df92d18cee49965b4 |
from __future__ import annotations\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import HAS_PYARROW\nimport pandas.util._test_decorators as td\n\nfrom pandas.core.dtypes.astype import astype_array\nimport pandas.core.dtypes.common as com\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n CategoricalDtypeType,\n DatetimeTZDtype,\n ExtensionDtype,\n IntervalDtype,\n PeriodDtype,\n)\nfrom pandas.core.dtypes.missing import isna\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.types import pandas_dtype\nfrom pandas.arrays import SparseArray\nfrom pandas.util.version import Version\n\n\n# EA & Actual Dtypes\ndef to_ea_dtypes(dtypes):\n """convert list of string dtypes to EA dtype"""\n return [getattr(pd, dt + "Dtype") for dt in dtypes]\n\n\ndef to_numpy_dtypes(dtypes):\n """convert list of string dtypes to numpy dtype"""\n return [getattr(np, dt) for dt in dtypes if isinstance(dt, str)]\n\n\nclass TestNumpyEADtype:\n # Passing invalid dtype, both as a string or object, must raise TypeError\n # Per issue GH15520\n @pytest.mark.parametrize("box", [pd.Timestamp, "pd.Timestamp", list])\n def test_invalid_dtype_error(self, box):\n with pytest.raises(TypeError, match="not understood"):\n com.pandas_dtype(box)\n\n @pytest.mark.parametrize(\n "dtype",\n [\n object,\n "float64",\n np.object_,\n np.dtype("object"),\n "O",\n np.float64,\n float,\n np.dtype("float64"),\n "object_",\n ],\n )\n def test_pandas_dtype_valid(self, dtype):\n assert com.pandas_dtype(dtype) == dtype\n\n @pytest.mark.parametrize(\n "dtype", ["M8[ns]", "m8[ns]", "object", "float64", "int64"]\n )\n def test_numpy_dtype(self, dtype):\n assert com.pandas_dtype(dtype) == np.dtype(dtype)\n\n def test_numpy_string_dtype(self):\n # do not parse freq-like string as period dtype\n assert com.pandas_dtype("U") == np.dtype("U")\n assert com.pandas_dtype("S") == np.dtype("S")\n\n @pytest.mark.parametrize(\n "dtype",\n [\n "datetime64[ns, US/Eastern]",\n "datetime64[ns, Asia/Tokyo]",\n "datetime64[ns, UTC]",\n # GH#33885 check that the M8 alias is understood\n "M8[ns, US/Eastern]",\n "M8[ns, Asia/Tokyo]",\n "M8[ns, UTC]",\n ],\n )\n def test_datetimetz_dtype(self, dtype):\n assert com.pandas_dtype(dtype) == DatetimeTZDtype.construct_from_string(dtype)\n assert com.pandas_dtype(dtype) == dtype\n\n def test_categorical_dtype(self):\n assert com.pandas_dtype("category") == CategoricalDtype()\n\n @pytest.mark.parametrize(\n "dtype",\n [\n "period[D]",\n "period[3M]",\n "period[us]",\n "Period[D]",\n "Period[3M]",\n "Period[us]",\n ],\n )\n def test_period_dtype(self, dtype):\n assert com.pandas_dtype(dtype) is not PeriodDtype(dtype)\n assert com.pandas_dtype(dtype) == PeriodDtype(dtype)\n assert com.pandas_dtype(dtype) == dtype\n\n\ndtypes = {\n "datetime_tz": com.pandas_dtype("datetime64[ns, US/Eastern]"),\n "datetime": com.pandas_dtype("datetime64[ns]"),\n "timedelta": com.pandas_dtype("timedelta64[ns]"),\n "period": PeriodDtype("D"),\n "integer": np.dtype(np.int64),\n "float": np.dtype(np.float64),\n "object": np.dtype(object),\n "category": com.pandas_dtype("category"),\n "string": pd.StringDtype(),\n}\n\n\n@pytest.mark.parametrize("name1,dtype1", list(dtypes.items()), ids=lambda x: str(x))\n@pytest.mark.parametrize("name2,dtype2", list(dtypes.items()), ids=lambda x: str(x))\ndef test_dtype_equal(name1, dtype1, name2, dtype2):\n # match equal to self, but not equal to other\n assert com.is_dtype_equal(dtype1, dtype1)\n if name1 != name2:\n assert not com.is_dtype_equal(dtype1, dtype2)\n\n\n@pytest.mark.parametrize("name,dtype", list(dtypes.items()), ids=lambda x: str(x))\ndef test_pyarrow_string_import_error(name, dtype):\n # GH-44276\n assert not com.is_dtype_equal(dtype, "string[pyarrow]")\n\n\n@pytest.mark.parametrize(\n "dtype1,dtype2",\n [\n (np.int8, np.int64),\n (np.int16, np.int64),\n (np.int32, np.int64),\n (np.float32, np.float64),\n (PeriodDtype("D"), PeriodDtype("2D")), # PeriodType\n (\n com.pandas_dtype("datetime64[ns, US/Eastern]"),\n com.pandas_dtype("datetime64[ns, CET]"),\n ), # Datetime\n (None, None), # gh-15941: no exception should be raised.\n ],\n)\ndef test_dtype_equal_strict(dtype1, dtype2):\n assert not com.is_dtype_equal(dtype1, dtype2)\n\n\ndef get_is_dtype_funcs():\n """\n Get all functions in pandas.core.dtypes.common that\n begin with 'is_' and end with 'dtype'\n\n """\n fnames = [f for f in dir(com) if (f.startswith("is_") and f.endswith("dtype"))]\n fnames.remove("is_string_or_object_np_dtype") # fastpath requires np.dtype obj\n return [getattr(com, fname) for fname in fnames]\n\n\n@pytest.mark.filterwarnings(\n "ignore:is_categorical_dtype is deprecated:DeprecationWarning"\n)\n@pytest.mark.parametrize("func", get_is_dtype_funcs(), ids=lambda x: x.__name__)\ndef test_get_dtype_error_catch(func):\n # see gh-15941\n #\n # No exception should be raised.\n\n msg = f"{func.__name__} is deprecated"\n warn = None\n if (\n func is com.is_int64_dtype\n or func is com.is_interval_dtype\n or func is com.is_datetime64tz_dtype\n or func is com.is_categorical_dtype\n or func is com.is_period_dtype\n ):\n warn = DeprecationWarning\n\n with tm.assert_produces_warning(warn, match=msg):\n assert not func(None)\n\n\ndef test_is_object():\n assert com.is_object_dtype(object)\n assert com.is_object_dtype(np.array([], dtype=object))\n\n assert not com.is_object_dtype(int)\n assert not com.is_object_dtype(np.array([], dtype=int))\n assert not com.is_object_dtype([1, 2, 3])\n\n\n@pytest.mark.parametrize(\n "check_scipy", [False, pytest.param(True, marks=td.skip_if_no("scipy"))]\n)\ndef test_is_sparse(check_scipy):\n msg = "is_sparse is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert com.is_sparse(SparseArray([1, 2, 3]))\n\n assert not com.is_sparse(np.array([1, 2, 3]))\n\n if check_scipy:\n import scipy.sparse\n\n assert not com.is_sparse(scipy.sparse.bsr_matrix([1, 2, 3]))\n\n\ndef test_is_scipy_sparse():\n sp_sparse = pytest.importorskip("scipy.sparse")\n\n assert com.is_scipy_sparse(sp_sparse.bsr_matrix([1, 2, 3]))\n\n assert not com.is_scipy_sparse(SparseArray([1, 2, 3]))\n\n\ndef test_is_datetime64_dtype():\n assert not com.is_datetime64_dtype(object)\n assert not com.is_datetime64_dtype([1, 2, 3])\n assert not com.is_datetime64_dtype(np.array([], dtype=int))\n\n assert com.is_datetime64_dtype(np.datetime64)\n assert com.is_datetime64_dtype(np.array([], dtype=np.datetime64))\n\n\ndef test_is_datetime64tz_dtype():\n msg = "is_datetime64tz_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert not com.is_datetime64tz_dtype(object)\n assert not com.is_datetime64tz_dtype([1, 2, 3])\n assert not com.is_datetime64tz_dtype(pd.DatetimeIndex([1, 2, 3]))\n assert com.is_datetime64tz_dtype(pd.DatetimeIndex(["2000"], tz="US/Eastern"))\n\n\ndef test_custom_ea_kind_M_not_datetime64tz():\n # GH 34986\n class NotTZDtype(ExtensionDtype):\n @property\n def kind(self) -> str:\n return "M"\n\n not_tz_dtype = NotTZDtype()\n msg = "is_datetime64tz_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert not com.is_datetime64tz_dtype(not_tz_dtype)\n assert not com.needs_i8_conversion(not_tz_dtype)\n\n\ndef test_is_timedelta64_dtype():\n assert not com.is_timedelta64_dtype(object)\n assert not com.is_timedelta64_dtype(None)\n assert not com.is_timedelta64_dtype([1, 2, 3])\n assert not com.is_timedelta64_dtype(np.array([], dtype=np.datetime64))\n assert not com.is_timedelta64_dtype("0 days")\n assert not com.is_timedelta64_dtype("0 days 00:00:00")\n assert not com.is_timedelta64_dtype(["0 days 00:00:00"])\n assert not com.is_timedelta64_dtype("NO DATE")\n\n assert com.is_timedelta64_dtype(np.timedelta64)\n assert com.is_timedelta64_dtype(pd.Series([], dtype="timedelta64[ns]"))\n assert com.is_timedelta64_dtype(pd.to_timedelta(["0 days", "1 days"]))\n\n\ndef test_is_period_dtype():\n msg = "is_period_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert not com.is_period_dtype(object)\n assert not com.is_period_dtype([1, 2, 3])\n assert not com.is_period_dtype(pd.Period("2017-01-01"))\n\n assert com.is_period_dtype(PeriodDtype(freq="D"))\n assert com.is_period_dtype(pd.PeriodIndex([], freq="Y"))\n\n\ndef test_is_interval_dtype():\n msg = "is_interval_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert not com.is_interval_dtype(object)\n assert not com.is_interval_dtype([1, 2, 3])\n\n assert com.is_interval_dtype(IntervalDtype())\n\n interval = pd.Interval(1, 2, closed="right")\n assert not com.is_interval_dtype(interval)\n assert com.is_interval_dtype(pd.IntervalIndex([interval]))\n\n\ndef test_is_categorical_dtype():\n msg = "is_categorical_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert not com.is_categorical_dtype(object)\n assert not com.is_categorical_dtype([1, 2, 3])\n\n assert com.is_categorical_dtype(CategoricalDtype())\n assert com.is_categorical_dtype(pd.Categorical([1, 2, 3]))\n assert com.is_categorical_dtype(pd.CategoricalIndex([1, 2, 3]))\n\n\n@pytest.mark.parametrize(\n "dtype, expected",\n [\n (int, False),\n (pd.Series([1, 2]), False),\n (str, True),\n (object, True),\n (np.array(["a", "b"]), True),\n (pd.StringDtype(), True),\n (pd.Index([], dtype="O"), True),\n ],\n)\ndef test_is_string_dtype(dtype, expected):\n # GH#54661\n\n result = com.is_string_dtype(dtype)\n assert result is expected\n\n\n@pytest.mark.parametrize(\n "data",\n [[(0, 1), (1, 1)], pd.Categorical([1, 2, 3]), np.array([1, 2], dtype=object)],\n)\ndef test_is_string_dtype_arraylike_with_object_elements_not_strings(data):\n # GH 15585\n assert not com.is_string_dtype(pd.Series(data))\n\n\ndef test_is_string_dtype_nullable(nullable_string_dtype):\n assert com.is_string_dtype(pd.array(["a", "b"], dtype=nullable_string_dtype))\n\n\ninteger_dtypes: list = []\n\n\n@pytest.mark.parametrize(\n "dtype",\n integer_dtypes\n + [pd.Series([1, 2])]\n + tm.ALL_INT_NUMPY_DTYPES\n + to_numpy_dtypes(tm.ALL_INT_NUMPY_DTYPES)\n + tm.ALL_INT_EA_DTYPES\n + to_ea_dtypes(tm.ALL_INT_EA_DTYPES),\n)\ndef test_is_integer_dtype(dtype):\n assert com.is_integer_dtype(dtype)\n\n\n@pytest.mark.parametrize(\n "dtype",\n [\n str,\n float,\n np.datetime64,\n np.timedelta64,\n pd.Index([1, 2.0]),\n np.array(["a", "b"]),\n np.array([], dtype=np.timedelta64),\n ],\n)\ndef test_is_not_integer_dtype(dtype):\n assert not com.is_integer_dtype(dtype)\n\n\nsigned_integer_dtypes: list = []\n\n\n@pytest.mark.parametrize(\n "dtype",\n signed_integer_dtypes\n + [pd.Series([1, 2])]\n + tm.SIGNED_INT_NUMPY_DTYPES\n + to_numpy_dtypes(tm.SIGNED_INT_NUMPY_DTYPES)\n + tm.SIGNED_INT_EA_DTYPES\n + to_ea_dtypes(tm.SIGNED_INT_EA_DTYPES),\n)\ndef test_is_signed_integer_dtype(dtype):\n assert com.is_integer_dtype(dtype)\n\n\n@pytest.mark.parametrize(\n "dtype",\n [\n str,\n float,\n np.datetime64,\n np.timedelta64,\n pd.Index([1, 2.0]),\n np.array(["a", "b"]),\n np.array([], dtype=np.timedelta64),\n ]\n + tm.UNSIGNED_INT_NUMPY_DTYPES\n + to_numpy_dtypes(tm.UNSIGNED_INT_NUMPY_DTYPES)\n + tm.UNSIGNED_INT_EA_DTYPES\n + to_ea_dtypes(tm.UNSIGNED_INT_EA_DTYPES),\n)\ndef test_is_not_signed_integer_dtype(dtype):\n assert not com.is_signed_integer_dtype(dtype)\n\n\nunsigned_integer_dtypes: list = []\n\n\n@pytest.mark.parametrize(\n "dtype",\n unsigned_integer_dtypes\n + [pd.Series([1, 2], dtype=np.uint32)]\n + tm.UNSIGNED_INT_NUMPY_DTYPES\n + to_numpy_dtypes(tm.UNSIGNED_INT_NUMPY_DTYPES)\n + tm.UNSIGNED_INT_EA_DTYPES\n + to_ea_dtypes(tm.UNSIGNED_INT_EA_DTYPES),\n)\ndef test_is_unsigned_integer_dtype(dtype):\n assert com.is_unsigned_integer_dtype(dtype)\n\n\n@pytest.mark.parametrize(\n "dtype",\n [\n str,\n float,\n np.datetime64,\n np.timedelta64,\n pd.Index([1, 2.0]),\n np.array(["a", "b"]),\n np.array([], dtype=np.timedelta64),\n ]\n + tm.SIGNED_INT_NUMPY_DTYPES\n + to_numpy_dtypes(tm.SIGNED_INT_NUMPY_DTYPES)\n + tm.SIGNED_INT_EA_DTYPES\n + to_ea_dtypes(tm.SIGNED_INT_EA_DTYPES),\n)\ndef test_is_not_unsigned_integer_dtype(dtype):\n assert not com.is_unsigned_integer_dtype(dtype)\n\n\n@pytest.mark.parametrize(\n "dtype", [np.int64, np.array([1, 2], dtype=np.int64), "Int64", pd.Int64Dtype]\n)\ndef test_is_int64_dtype(dtype):\n msg = "is_int64_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert com.is_int64_dtype(dtype)\n\n\ndef test_type_comparison_with_numeric_ea_dtype(any_numeric_ea_dtype):\n # GH#43038\n assert pandas_dtype(any_numeric_ea_dtype) == any_numeric_ea_dtype\n\n\ndef test_type_comparison_with_real_numpy_dtype(any_real_numpy_dtype):\n # GH#43038\n assert pandas_dtype(any_real_numpy_dtype) == any_real_numpy_dtype\n\n\ndef test_type_comparison_with_signed_int_ea_dtype_and_signed_int_numpy_dtype(\n any_signed_int_ea_dtype, any_signed_int_numpy_dtype\n):\n # GH#43038\n assert not pandas_dtype(any_signed_int_ea_dtype) == any_signed_int_numpy_dtype\n\n\n@pytest.mark.parametrize(\n "dtype",\n [\n str,\n float,\n np.int32,\n np.uint64,\n pd.Index([1, 2.0]),\n np.array(["a", "b"]),\n np.array([1, 2], dtype=np.uint32),\n "int8",\n "Int8",\n pd.Int8Dtype,\n ],\n)\ndef test_is_not_int64_dtype(dtype):\n msg = "is_int64_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert not com.is_int64_dtype(dtype)\n\n\ndef test_is_datetime64_any_dtype():\n assert not com.is_datetime64_any_dtype(int)\n assert not com.is_datetime64_any_dtype(str)\n assert not com.is_datetime64_any_dtype(np.array([1, 2]))\n assert not com.is_datetime64_any_dtype(np.array(["a", "b"]))\n\n assert com.is_datetime64_any_dtype(np.datetime64)\n assert com.is_datetime64_any_dtype(np.array([], dtype=np.datetime64))\n assert com.is_datetime64_any_dtype(DatetimeTZDtype("ns", "US/Eastern"))\n assert com.is_datetime64_any_dtype(\n pd.DatetimeIndex([1, 2, 3], dtype="datetime64[ns]")\n )\n\n\ndef test_is_datetime64_ns_dtype():\n assert not com.is_datetime64_ns_dtype(int)\n assert not com.is_datetime64_ns_dtype(str)\n assert not com.is_datetime64_ns_dtype(np.datetime64)\n assert not com.is_datetime64_ns_dtype(np.array([1, 2]))\n assert not com.is_datetime64_ns_dtype(np.array(["a", "b"]))\n assert not com.is_datetime64_ns_dtype(np.array([], dtype=np.datetime64))\n\n # This datetime array has the wrong unit (ps instead of ns)\n assert not com.is_datetime64_ns_dtype(np.array([], dtype="datetime64[ps]"))\n\n assert com.is_datetime64_ns_dtype(DatetimeTZDtype("ns", "US/Eastern"))\n assert com.is_datetime64_ns_dtype(\n pd.DatetimeIndex([1, 2, 3], dtype=np.dtype("datetime64[ns]"))\n )\n\n # non-nano dt64tz\n assert not com.is_datetime64_ns_dtype(DatetimeTZDtype("us", "US/Eastern"))\n\n\ndef test_is_timedelta64_ns_dtype():\n assert not com.is_timedelta64_ns_dtype(np.dtype("m8[ps]"))\n assert not com.is_timedelta64_ns_dtype(np.array([1, 2], dtype=np.timedelta64))\n\n assert com.is_timedelta64_ns_dtype(np.dtype("m8[ns]"))\n assert com.is_timedelta64_ns_dtype(np.array([1, 2], dtype="m8[ns]"))\n\n\ndef test_is_numeric_v_string_like():\n assert not com.is_numeric_v_string_like(np.array([1]), 1)\n assert not com.is_numeric_v_string_like(np.array([1]), np.array([2]))\n assert not com.is_numeric_v_string_like(np.array(["foo"]), np.array(["foo"]))\n\n assert com.is_numeric_v_string_like(np.array([1]), "foo")\n assert com.is_numeric_v_string_like(np.array([1, 2]), np.array(["foo"]))\n assert com.is_numeric_v_string_like(np.array(["foo"]), np.array([1, 2]))\n\n\ndef test_needs_i8_conversion():\n assert not com.needs_i8_conversion(str)\n assert not com.needs_i8_conversion(np.int64)\n assert not com.needs_i8_conversion(pd.Series([1, 2]))\n assert not com.needs_i8_conversion(np.array(["a", "b"]))\n\n assert not com.needs_i8_conversion(np.datetime64)\n assert com.needs_i8_conversion(np.dtype(np.datetime64))\n assert not com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]"))\n assert com.needs_i8_conversion(pd.Series([], dtype="timedelta64[ns]").dtype)\n assert not com.needs_i8_conversion(pd.DatetimeIndex(["2000"], tz="US/Eastern"))\n assert com.needs_i8_conversion(pd.DatetimeIndex(["2000"], tz="US/Eastern").dtype)\n\n\ndef test_is_numeric_dtype():\n assert not com.is_numeric_dtype(str)\n assert not com.is_numeric_dtype(np.datetime64)\n assert not com.is_numeric_dtype(np.timedelta64)\n assert not com.is_numeric_dtype(np.array(["a", "b"]))\n assert not com.is_numeric_dtype(np.array([], dtype=np.timedelta64))\n\n assert com.is_numeric_dtype(int)\n assert com.is_numeric_dtype(float)\n assert com.is_numeric_dtype(np.uint64)\n assert com.is_numeric_dtype(pd.Series([1, 2]))\n assert com.is_numeric_dtype(pd.Index([1, 2.0]))\n\n class MyNumericDType(ExtensionDtype):\n @property\n def type(self):\n return str\n\n @property\n def name(self):\n raise NotImplementedError\n\n @classmethod\n def construct_array_type(cls):\n raise NotImplementedError\n\n def _is_numeric(self) -> bool:\n return True\n\n assert com.is_numeric_dtype(MyNumericDType())\n\n\ndef test_is_any_real_numeric_dtype():\n assert not com.is_any_real_numeric_dtype(str)\n assert not com.is_any_real_numeric_dtype(bool)\n assert not com.is_any_real_numeric_dtype(complex)\n assert not com.is_any_real_numeric_dtype(object)\n assert not com.is_any_real_numeric_dtype(np.datetime64)\n assert not com.is_any_real_numeric_dtype(np.array(["a", "b", complex(1, 2)]))\n assert not com.is_any_real_numeric_dtype(pd.DataFrame([complex(1, 2), True]))\n\n assert com.is_any_real_numeric_dtype(int)\n assert com.is_any_real_numeric_dtype(float)\n assert com.is_any_real_numeric_dtype(np.array([1, 2.5]))\n\n\ndef test_is_float_dtype():\n assert not com.is_float_dtype(str)\n assert not com.is_float_dtype(int)\n assert not com.is_float_dtype(pd.Series([1, 2]))\n assert not com.is_float_dtype(np.array(["a", "b"]))\n\n assert com.is_float_dtype(float)\n assert com.is_float_dtype(pd.Index([1, 2.0]))\n\n\ndef test_is_bool_dtype():\n assert not com.is_bool_dtype(int)\n assert not com.is_bool_dtype(str)\n assert not com.is_bool_dtype(pd.Series([1, 2]))\n assert not com.is_bool_dtype(pd.Series(["a", "b"], dtype="category"))\n assert not com.is_bool_dtype(np.array(["a", "b"]))\n assert not com.is_bool_dtype(pd.Index(["a", "b"]))\n assert not com.is_bool_dtype("Int64")\n\n assert com.is_bool_dtype(bool)\n assert com.is_bool_dtype(np.bool_)\n assert com.is_bool_dtype(pd.Series([True, False], dtype="category"))\n assert com.is_bool_dtype(np.array([True, False]))\n assert com.is_bool_dtype(pd.Index([True, False]))\n\n assert com.is_bool_dtype(pd.BooleanDtype())\n assert com.is_bool_dtype(pd.array([True, False, None], dtype="boolean"))\n assert com.is_bool_dtype("boolean")\n\n\ndef test_is_bool_dtype_numpy_error():\n # GH39010\n assert not com.is_bool_dtype("0 - Name")\n\n\n@pytest.mark.parametrize(\n "check_scipy", [False, pytest.param(True, marks=td.skip_if_no("scipy"))]\n)\ndef test_is_extension_array_dtype(check_scipy):\n assert not com.is_extension_array_dtype([1, 2, 3])\n assert not com.is_extension_array_dtype(np.array([1, 2, 3]))\n assert not com.is_extension_array_dtype(pd.DatetimeIndex([1, 2, 3]))\n\n cat = pd.Categorical([1, 2, 3])\n assert com.is_extension_array_dtype(cat)\n assert com.is_extension_array_dtype(pd.Series(cat))\n assert com.is_extension_array_dtype(SparseArray([1, 2, 3]))\n assert com.is_extension_array_dtype(pd.DatetimeIndex(["2000"], tz="US/Eastern"))\n\n dtype = DatetimeTZDtype("ns", tz="US/Eastern")\n s = pd.Series([], dtype=dtype)\n assert com.is_extension_array_dtype(s)\n\n if check_scipy:\n import scipy.sparse\n\n assert not com.is_extension_array_dtype(scipy.sparse.bsr_matrix([1, 2, 3]))\n\n\ndef test_is_complex_dtype():\n assert not com.is_complex_dtype(int)\n assert not com.is_complex_dtype(str)\n assert not com.is_complex_dtype(pd.Series([1, 2]))\n assert not com.is_complex_dtype(np.array(["a", "b"]))\n\n assert com.is_complex_dtype(np.complex128)\n assert com.is_complex_dtype(complex)\n assert com.is_complex_dtype(np.array([1 + 1j, 5]))\n\n\n@pytest.mark.parametrize(\n "input_param,result",\n [\n (int, np.dtype(int)),\n ("int32", np.dtype("int32")),\n (float, np.dtype(float)),\n ("float64", np.dtype("float64")),\n (np.dtype("float64"), np.dtype("float64")),\n (str, np.dtype(str)),\n (pd.Series([1, 2], dtype=np.dtype("int16")), np.dtype("int16")),\n (pd.Series(["a", "b"], dtype=object), np.dtype(object)),\n (pd.Index([1, 2]), np.dtype("int64")),\n (pd.Index(["a", "b"], dtype=object), np.dtype(object)),\n ("category", "category"),\n (pd.Categorical(["a", "b"]).dtype, CategoricalDtype(["a", "b"])),\n (pd.Categorical(["a", "b"]), CategoricalDtype(["a", "b"])),\n (pd.CategoricalIndex(["a", "b"]).dtype, CategoricalDtype(["a", "b"])),\n (pd.CategoricalIndex(["a", "b"]), CategoricalDtype(["a", "b"])),\n (CategoricalDtype(), CategoricalDtype()),\n (pd.DatetimeIndex([1, 2]), np.dtype("=M8[ns]")),\n (pd.DatetimeIndex([1, 2]).dtype, np.dtype("=M8[ns]")),\n ("<M8[ns]", np.dtype("<M8[ns]")),\n ("datetime64[ns, Europe/London]", DatetimeTZDtype("ns", "Europe/London")),\n (PeriodDtype(freq="D"), PeriodDtype(freq="D")),\n ("period[D]", PeriodDtype(freq="D")),\n (IntervalDtype(), IntervalDtype()),\n ],\n)\ndef test_get_dtype(input_param, result):\n assert com._get_dtype(input_param) == result\n\n\n@pytest.mark.parametrize(\n "input_param,expected_error_message",\n [\n (None, "Cannot deduce dtype from null object"),\n (1, "data type not understood"),\n (1.2, "data type not understood"),\n # numpy dev changed from double-quotes to single quotes\n ("random string", "data type [\"']random string[\"'] not understood"),\n (pd.DataFrame([1, 2]), "data type not understood"),\n ],\n)\ndef test_get_dtype_fails(input_param, expected_error_message):\n # python objects\n # 2020-02-02 npdev changed error message\n expected_error_message += f"|Cannot interpret '{input_param}' as a data type"\n with pytest.raises(TypeError, match=expected_error_message):\n com._get_dtype(input_param)\n\n\n@pytest.mark.parametrize(\n "input_param,result",\n [\n (int, np.dtype(int).type),\n ("int32", np.int32),\n (float, np.dtype(float).type),\n ("float64", np.float64),\n (np.dtype("float64"), np.float64),\n (str, np.dtype(str).type),\n (pd.Series([1, 2], dtype=np.dtype("int16")), np.int16),\n (pd.Series(["a", "b"], dtype=object), np.object_),\n (pd.Index([1, 2], dtype="int64"), np.int64),\n (pd.Index(["a", "b"], dtype=object), np.object_),\n ("category", CategoricalDtypeType),\n (pd.Categorical(["a", "b"]).dtype, CategoricalDtypeType),\n (pd.Categorical(["a", "b"]), CategoricalDtypeType),\n (pd.CategoricalIndex(["a", "b"]).dtype, CategoricalDtypeType),\n (pd.CategoricalIndex(["a", "b"]), CategoricalDtypeType),\n (pd.DatetimeIndex([1, 2]), np.datetime64),\n (pd.DatetimeIndex([1, 2]).dtype, np.datetime64),\n ("<M8[ns]", np.datetime64),\n (pd.DatetimeIndex(["2000"], tz="Europe/London"), pd.Timestamp),\n (pd.DatetimeIndex(["2000"], tz="Europe/London").dtype, pd.Timestamp),\n ("datetime64[ns, Europe/London]", pd.Timestamp),\n (PeriodDtype(freq="D"), pd.Period),\n ("period[D]", pd.Period),\n (IntervalDtype(), pd.Interval),\n (None, type(None)),\n (1, type(None)),\n (1.2, type(None)),\n (pd.DataFrame([1, 2]), type(None)), # composite dtype\n ],\n)\ndef test__is_dtype_type(input_param, result):\n assert com._is_dtype_type(input_param, lambda tipo: tipo == result)\n\n\ndef test_astype_nansafe_copy_false(any_int_numpy_dtype):\n # GH#34457 use astype, not view\n arr = np.array([1, 2, 3], dtype=any_int_numpy_dtype)\n\n dtype = np.dtype("float64")\n result = astype_array(arr, dtype, copy=False)\n\n expected = np.array([1.0, 2.0, 3.0], dtype=dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n\n@pytest.mark.parametrize("from_type", [np.datetime64, np.timedelta64])\ndef test_astype_object_preserves_datetime_na(from_type):\n arr = np.array([from_type("NaT", "ns")])\n result = astype_array(arr, dtype=np.dtype("object"))\n\n assert isna(result)[0]\n\n\ndef test_validate_allhashable():\n assert com.validate_all_hashable(1, "a") is None\n\n with pytest.raises(TypeError, match="All elements must be hashable"):\n com.validate_all_hashable([])\n\n with pytest.raises(TypeError, match="list must be a hashable type"):\n com.validate_all_hashable([], error_name="list")\n\n\ndef test_pandas_dtype_numpy_warning():\n # GH#51523\n if Version(np.__version__) < Version("2.3.0.dev0"):\n ctx = tm.assert_produces_warning(\n DeprecationWarning,\n check_stacklevel=False,\n match=(\n "Converting `np.integer` or `np.signedinteger` to a dtype is deprecated"\n ),\n )\n else:\n ctx = tm.external_error_raised(TypeError)\n\n with ctx:\n pandas_dtype(np.integer)\n\n\ndef test_pandas_dtype_ea_not_instance():\n # GH 31356 GH 54592\n with tm.assert_produces_warning(UserWarning):\n assert pandas_dtype(CategoricalDtype) == CategoricalDtype()\n\n\ndef test_pandas_dtype_string_dtypes(string_storage):\n with pd.option_context("future.infer_string", True):\n # with the default string_storage setting\n result = pandas_dtype("str")\n assert result == pd.StringDtype(\n "pyarrow" if HAS_PYARROW else "python", na_value=np.nan\n )\n\n with pd.option_context("future.infer_string", True):\n # with the default string_storage setting\n result = pandas_dtype(str)\n assert result == pd.StringDtype(\n "pyarrow" if HAS_PYARROW else "python", na_value=np.nan\n )\n\n with pd.option_context("future.infer_string", True):\n with pd.option_context("string_storage", string_storage):\n result = pandas_dtype("str")\n assert result == pd.StringDtype(string_storage, na_value=np.nan)\n\n with pd.option_context("future.infer_string", True):\n with pd.option_context("string_storage", string_storage):\n result = pandas_dtype(str)\n assert result == pd.StringDtype(string_storage, na_value=np.nan)\n\n with pd.option_context("future.infer_string", False):\n with pd.option_context("string_storage", string_storage):\n result = pandas_dtype("str")\n assert result == np.dtype("U")\n\n with pd.option_context("string_storage", string_storage):\n result = pandas_dtype("string")\n assert result == pd.StringDtype(string_storage, na_value=pd.NA)\n\n\ndef test_pandas_dtype_string_dtype_alias_with_storage():\n with pytest.raises(TypeError, match="not understood"):\n pandas_dtype("str[python]")\n\n with pytest.raises(TypeError, match="not understood"):\n pandas_dtype("str[pyarrow]")\n\n result = pandas_dtype("string[python]")\n assert result == pd.StringDtype("python", na_value=pd.NA)\n\n if HAS_PYARROW:\n result = pandas_dtype("string[pyarrow]")\n assert result == pd.StringDtype("pyarrow", na_value=pd.NA)\n else:\n with pytest.raises(\n ImportError, match="required for PyArrow backed StringArray"\n ):\n pandas_dtype("string[pyarrow]")\n | .venv\Lib\site-packages\pandas\tests\dtypes\test_common.py | test_common.py | Python | 28,706 | 0.95 | 0.095954 | 0.039359 | vue-tools | 787 | 2023-08-30T00:36:37.343055 | Apache-2.0 | true | 68013d7e39ca1b0bed445ba03ef6b0be |
import pytest\n\nimport pandas.core.dtypes.concat as _concat\n\nimport pandas as pd\nfrom pandas import Series\nimport pandas._testing as tm\n\n\ndef test_concat_mismatched_categoricals_with_empty():\n # concat_compat behavior on series._values should match pd.concat on series\n ser1 = Series(["a", "b", "c"], dtype="category")\n ser2 = Series([], dtype="category")\n\n msg = "The behavior of array concatenation with empty entries is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = _concat.concat_compat([ser1._values, ser2._values])\n with tm.assert_produces_warning(FutureWarning, match=msg):\n expected = pd.concat([ser1, ser2])._values\n tm.assert_categorical_equal(result, expected)\n\n\n@pytest.mark.parametrize("copy", [True, False])\ndef test_concat_single_dataframe_tz_aware(copy):\n # https://github.com/pandas-dev/pandas/issues/25257\n df = pd.DataFrame(\n {"timestamp": [pd.Timestamp("2020-04-08 09:00:00.709949+0000", tz="UTC")]}\n )\n expected = df.copy()\n result = pd.concat([df], copy=copy)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_concat_periodarray_2d():\n pi = pd.period_range("2016-01-01", periods=36, freq="D")\n arr = pi._data.reshape(6, 6)\n\n result = _concat.concat_compat([arr[:2], arr[2:]], axis=0)\n tm.assert_period_array_equal(result, arr)\n\n result = _concat.concat_compat([arr[:, :2], arr[:, 2:]], axis=1)\n tm.assert_period_array_equal(result, arr)\n\n msg = (\n "all the input array dimensions.* for the concatenation axis must match exactly"\n )\n with pytest.raises(ValueError, match=msg):\n _concat.concat_compat([arr[:, :2], arr[:, 2:]], axis=0)\n\n with pytest.raises(ValueError, match=msg):\n _concat.concat_compat([arr[:2], arr[2:]], axis=1)\n | .venv\Lib\site-packages\pandas\tests\dtypes\test_concat.py | test_concat.py | Python | 1,799 | 0.95 | 0.078431 | 0.052632 | awesome-app | 512 | 2024-10-13T13:01:52.556252 | BSD-3-Clause | true | 8a461c5130999de29a8f8869bda6544b |
import re\nimport weakref\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs.dtypes import NpyDatetimeUnit\n\nfrom pandas.core.dtypes.base import _registry as registry\nfrom pandas.core.dtypes.common import (\n is_bool_dtype,\n is_categorical_dtype,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64_ns_dtype,\n is_datetime64tz_dtype,\n is_dtype_equal,\n is_interval_dtype,\n is_period_dtype,\n is_string_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n)\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n CategoricalIndex,\n DatetimeIndex,\n IntervalIndex,\n Series,\n SparseDtype,\n date_range,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays.sparse import SparseArray\n\n\nclass Base:\n def test_hash(self, dtype):\n hash(dtype)\n\n def test_equality_invalid(self, dtype):\n assert not dtype == "foo"\n assert not is_dtype_equal(dtype, np.int64)\n\n def test_numpy_informed(self, dtype):\n # npdev 2020-02-02 changed from "data type not understood" to\n # "Cannot interpret 'foo' as a data type"\n msg = "|".join(\n ["data type not understood", "Cannot interpret '.*' as a data type"]\n )\n with pytest.raises(TypeError, match=msg):\n np.dtype(dtype)\n\n assert not dtype == np.str_\n assert not np.str_ == dtype\n\n def test_pickle(self, dtype):\n # make sure our cache is NOT pickled\n\n # clear the cache\n type(dtype).reset_cache()\n assert not len(dtype._cache_dtypes)\n\n # force back to the cache\n result = tm.round_trip_pickle(dtype)\n if not isinstance(dtype, PeriodDtype):\n # Because PeriodDtype has a cython class as a base class,\n # it has different pickle semantics, and its cache is re-populated\n # on un-pickling.\n assert not len(dtype._cache_dtypes)\n assert result == dtype\n\n\nclass TestCategoricalDtype(Base):\n @pytest.fixture\n def dtype(self):\n """\n Class level fixture of dtype for TestCategoricalDtype\n """\n return CategoricalDtype()\n\n def test_hash_vs_equality(self, dtype):\n dtype2 = CategoricalDtype()\n assert dtype == dtype2\n assert dtype2 == dtype\n assert hash(dtype) == hash(dtype2)\n\n def test_equality(self, dtype):\n assert dtype == "category"\n assert is_dtype_equal(dtype, "category")\n assert "category" == dtype\n assert is_dtype_equal("category", dtype)\n\n assert dtype == CategoricalDtype()\n assert is_dtype_equal(dtype, CategoricalDtype())\n assert CategoricalDtype() == dtype\n assert is_dtype_equal(CategoricalDtype(), dtype)\n\n assert dtype != "foo"\n assert not is_dtype_equal(dtype, "foo")\n assert "foo" != dtype\n assert not is_dtype_equal("foo", dtype)\n\n def test_construction_from_string(self, dtype):\n result = CategoricalDtype.construct_from_string("category")\n assert is_dtype_equal(dtype, result)\n msg = "Cannot construct a 'CategoricalDtype' from 'foo'"\n with pytest.raises(TypeError, match=msg):\n CategoricalDtype.construct_from_string("foo")\n\n def test_constructor_invalid(self):\n msg = "Parameter 'categories' must be list-like"\n with pytest.raises(TypeError, match=msg):\n CategoricalDtype("category")\n\n dtype1 = CategoricalDtype(["a", "b"], ordered=True)\n dtype2 = CategoricalDtype(["x", "y"], ordered=False)\n c = Categorical([0, 1], dtype=dtype1)\n\n @pytest.mark.parametrize(\n "values, categories, ordered, dtype, expected",\n [\n [None, None, None, None, CategoricalDtype()],\n [None, ["a", "b"], True, None, dtype1],\n [c, None, None, dtype2, dtype2],\n [c, ["x", "y"], False, None, dtype2],\n ],\n )\n def test_from_values_or_dtype(self, values, categories, ordered, dtype, expected):\n result = CategoricalDtype._from_values_or_dtype(\n values, categories, ordered, dtype\n )\n assert result == expected\n\n @pytest.mark.parametrize(\n "values, categories, ordered, dtype",\n [\n [None, ["a", "b"], True, dtype2],\n [None, ["a", "b"], None, dtype2],\n [None, None, True, dtype2],\n ],\n )\n def test_from_values_or_dtype_raises(self, values, categories, ordered, dtype):\n msg = "Cannot specify `categories` or `ordered` together with `dtype`."\n with pytest.raises(ValueError, match=msg):\n CategoricalDtype._from_values_or_dtype(values, categories, ordered, dtype)\n\n def test_from_values_or_dtype_invalid_dtype(self):\n msg = "Cannot not construct CategoricalDtype from <class 'object'>"\n with pytest.raises(ValueError, match=msg):\n CategoricalDtype._from_values_or_dtype(None, None, None, object)\n\n def test_is_dtype(self, dtype):\n assert CategoricalDtype.is_dtype(dtype)\n assert CategoricalDtype.is_dtype("category")\n assert CategoricalDtype.is_dtype(CategoricalDtype())\n assert not CategoricalDtype.is_dtype("foo")\n assert not CategoricalDtype.is_dtype(np.float64)\n\n def test_basic(self, dtype):\n msg = "is_categorical_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert is_categorical_dtype(dtype)\n\n factor = Categorical(["a", "b", "b", "a", "a", "c", "c", "c"])\n\n s = Series(factor, name="A")\n\n # dtypes\n assert is_categorical_dtype(s.dtype)\n assert is_categorical_dtype(s)\n assert not is_categorical_dtype(np.dtype("float64"))\n\n def test_tuple_categories(self):\n categories = [(1, "a"), (2, "b"), (3, "c")]\n result = CategoricalDtype(categories)\n assert all(result.categories == categories)\n\n @pytest.mark.parametrize(\n "categories, expected",\n [\n ([True, False], True),\n ([True, False, None], True),\n ([True, False, "a", "b'"], False),\n ([0, 1], False),\n ],\n )\n def test_is_boolean(self, categories, expected):\n cat = Categorical(categories)\n assert cat.dtype._is_boolean is expected\n assert is_bool_dtype(cat) is expected\n assert is_bool_dtype(cat.dtype) is expected\n\n def test_dtype_specific_categorical_dtype(self):\n expected = "datetime64[ns]"\n dti = DatetimeIndex([], dtype=expected)\n result = str(Categorical(dti).categories.dtype)\n assert result == expected\n\n def test_not_string(self):\n # though CategoricalDtype has object kind, it cannot be string\n assert not is_string_dtype(CategoricalDtype())\n\n def test_repr_range_categories(self):\n rng = pd.Index(range(3))\n dtype = CategoricalDtype(categories=rng, ordered=False)\n result = repr(dtype)\n\n expected = (\n "CategoricalDtype(categories=range(0, 3), ordered=False, "\n "categories_dtype=int64)"\n )\n assert result == expected\n\n def test_update_dtype(self):\n # GH 27338\n result = CategoricalDtype(["a"]).update_dtype(Categorical(["b"], ordered=True))\n expected = CategoricalDtype(["b"], ordered=True)\n assert result == expected\n\n def test_repr(self):\n cat = Categorical(pd.Index([1, 2, 3], dtype="int32"))\n result = cat.dtype.__repr__()\n expected = (\n "CategoricalDtype(categories=[1, 2, 3], ordered=False, "\n "categories_dtype=int32)"\n )\n assert result == expected\n\n\nclass TestDatetimeTZDtype(Base):\n @pytest.fixture\n def dtype(self):\n """\n Class level fixture of dtype for TestDatetimeTZDtype\n """\n return DatetimeTZDtype("ns", "US/Eastern")\n\n def test_alias_to_unit_raises(self):\n # 23990\n with pytest.raises(ValueError, match="Passing a dtype alias"):\n DatetimeTZDtype("datetime64[ns, US/Central]")\n\n def test_alias_to_unit_bad_alias_raises(self):\n # 23990\n with pytest.raises(TypeError, match=""):\n DatetimeTZDtype("this is a bad string")\n\n with pytest.raises(TypeError, match=""):\n DatetimeTZDtype("datetime64[ns, US/NotATZ]")\n\n def test_hash_vs_equality(self, dtype):\n # make sure that we satisfy is semantics\n dtype2 = DatetimeTZDtype("ns", "US/Eastern")\n dtype3 = DatetimeTZDtype(dtype2)\n assert dtype == dtype2\n assert dtype2 == dtype\n assert dtype3 == dtype\n assert hash(dtype) == hash(dtype2)\n assert hash(dtype) == hash(dtype3)\n\n dtype4 = DatetimeTZDtype("ns", "US/Central")\n assert dtype2 != dtype4\n assert hash(dtype2) != hash(dtype4)\n\n def test_construction_non_nanosecond(self):\n res = DatetimeTZDtype("ms", "US/Eastern")\n assert res.unit == "ms"\n assert res._creso == NpyDatetimeUnit.NPY_FR_ms.value\n assert res.str == "|M8[ms]"\n assert str(res) == "datetime64[ms, US/Eastern]"\n assert res.base == np.dtype("M8[ms]")\n\n def test_day_not_supported(self):\n msg = "DatetimeTZDtype only supports s, ms, us, ns units"\n with pytest.raises(ValueError, match=msg):\n DatetimeTZDtype("D", "US/Eastern")\n\n def test_subclass(self):\n a = DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]")\n b = DatetimeTZDtype.construct_from_string("datetime64[ns, CET]")\n\n assert issubclass(type(a), type(a))\n assert issubclass(type(a), type(b))\n\n def test_compat(self, dtype):\n msg = "is_datetime64tz_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert is_datetime64tz_dtype(dtype)\n assert is_datetime64tz_dtype("datetime64[ns, US/Eastern]")\n assert is_datetime64_any_dtype(dtype)\n assert is_datetime64_any_dtype("datetime64[ns, US/Eastern]")\n assert is_datetime64_ns_dtype(dtype)\n assert is_datetime64_ns_dtype("datetime64[ns, US/Eastern]")\n assert not is_datetime64_dtype(dtype)\n assert not is_datetime64_dtype("datetime64[ns, US/Eastern]")\n\n def test_construction_from_string(self, dtype):\n result = DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]")\n assert is_dtype_equal(dtype, result)\n\n @pytest.mark.parametrize(\n "string",\n [\n "foo",\n "datetime64[ns, notatz]",\n # non-nano unit\n "datetime64[ps, UTC]",\n # dateutil str that returns None from gettz\n "datetime64[ns, dateutil/invalid]",\n ],\n )\n def test_construct_from_string_invalid_raises(self, string):\n msg = f"Cannot construct a 'DatetimeTZDtype' from '{string}'"\n with pytest.raises(TypeError, match=re.escape(msg)):\n DatetimeTZDtype.construct_from_string(string)\n\n def test_construct_from_string_wrong_type_raises(self):\n msg = "'construct_from_string' expects a string, got <class 'list'>"\n with pytest.raises(TypeError, match=msg):\n DatetimeTZDtype.construct_from_string(["datetime64[ns, notatz]"])\n\n def test_is_dtype(self, dtype):\n assert not DatetimeTZDtype.is_dtype(None)\n assert DatetimeTZDtype.is_dtype(dtype)\n assert DatetimeTZDtype.is_dtype("datetime64[ns, US/Eastern]")\n assert DatetimeTZDtype.is_dtype("M8[ns, US/Eastern]")\n assert not DatetimeTZDtype.is_dtype("foo")\n assert DatetimeTZDtype.is_dtype(DatetimeTZDtype("ns", "US/Pacific"))\n assert not DatetimeTZDtype.is_dtype(np.float64)\n\n def test_equality(self, dtype):\n assert is_dtype_equal(dtype, "datetime64[ns, US/Eastern]")\n assert is_dtype_equal(dtype, "M8[ns, US/Eastern]")\n assert is_dtype_equal(dtype, DatetimeTZDtype("ns", "US/Eastern"))\n assert not is_dtype_equal(dtype, "foo")\n assert not is_dtype_equal(dtype, DatetimeTZDtype("ns", "CET"))\n assert not is_dtype_equal(\n DatetimeTZDtype("ns", "US/Eastern"), DatetimeTZDtype("ns", "US/Pacific")\n )\n\n # numpy compat\n assert is_dtype_equal(np.dtype("M8[ns]"), "datetime64[ns]")\n\n assert dtype == "M8[ns, US/Eastern]"\n\n def test_basic(self, dtype):\n msg = "is_datetime64tz_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert is_datetime64tz_dtype(dtype)\n\n dr = date_range("20130101", periods=3, tz="US/Eastern")\n s = Series(dr, name="A")\n\n # dtypes\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert is_datetime64tz_dtype(s.dtype)\n assert is_datetime64tz_dtype(s)\n assert not is_datetime64tz_dtype(np.dtype("float64"))\n assert not is_datetime64tz_dtype(1.0)\n\n def test_dst(self):\n dr1 = date_range("2013-01-01", periods=3, tz="US/Eastern")\n s1 = Series(dr1, name="A")\n assert isinstance(s1.dtype, DatetimeTZDtype)\n\n dr2 = date_range("2013-08-01", periods=3, tz="US/Eastern")\n s2 = Series(dr2, name="A")\n assert isinstance(s2.dtype, DatetimeTZDtype)\n assert s1.dtype == s2.dtype\n\n @pytest.mark.parametrize("tz", ["UTC", "US/Eastern"])\n @pytest.mark.parametrize("constructor", ["M8", "datetime64"])\n def test_parser(self, tz, constructor):\n # pr #11245\n dtz_str = f"{constructor}[ns, {tz}]"\n result = DatetimeTZDtype.construct_from_string(dtz_str)\n expected = DatetimeTZDtype("ns", tz)\n assert result == expected\n\n def test_empty(self):\n with pytest.raises(TypeError, match="A 'tz' is required."):\n DatetimeTZDtype()\n\n def test_tz_standardize(self):\n # GH 24713\n tz = pytz.timezone("US/Eastern")\n dr = date_range("2013-01-01", periods=3, tz="US/Eastern")\n dtype = DatetimeTZDtype("ns", dr.tz)\n assert dtype.tz == tz\n dtype = DatetimeTZDtype("ns", dr[0].tz)\n assert dtype.tz == tz\n\n\nclass TestPeriodDtype(Base):\n @pytest.fixture\n def dtype(self):\n """\n Class level fixture of dtype for TestPeriodDtype\n """\n return PeriodDtype("D")\n\n def test_hash_vs_equality(self, dtype):\n # make sure that we satisfy is semantics\n dtype2 = PeriodDtype("D")\n dtype3 = PeriodDtype(dtype2)\n assert dtype == dtype2\n assert dtype2 == dtype\n assert dtype3 == dtype\n assert dtype is not dtype2\n assert dtype2 is not dtype\n assert dtype3 is not dtype\n assert hash(dtype) == hash(dtype2)\n assert hash(dtype) == hash(dtype3)\n\n def test_construction(self):\n with pytest.raises(ValueError, match="Invalid frequency: xx"):\n PeriodDtype("xx")\n\n for s in ["period[D]", "Period[D]", "D"]:\n dt = PeriodDtype(s)\n assert dt.freq == pd.tseries.offsets.Day()\n\n for s in ["period[3D]", "Period[3D]", "3D"]:\n dt = PeriodDtype(s)\n assert dt.freq == pd.tseries.offsets.Day(3)\n\n for s in [\n "period[26h]",\n "Period[26h]",\n "26h",\n "period[1D2h]",\n "Period[1D2h]",\n "1D2h",\n ]:\n dt = PeriodDtype(s)\n assert dt.freq == pd.tseries.offsets.Hour(26)\n\n def test_cannot_use_custom_businessday(self):\n # GH#52534\n msg = "C is not supported as period frequency"\n msg1 = "<CustomBusinessDay> is not supported as period frequency"\n msg2 = r"PeriodDtype\[B\] is deprecated"\n with pytest.raises(ValueError, match=msg):\n PeriodDtype("C")\n with pytest.raises(ValueError, match=msg1):\n with tm.assert_produces_warning(FutureWarning, match=msg2):\n PeriodDtype(pd.offsets.CustomBusinessDay())\n\n def test_subclass(self):\n a = PeriodDtype("period[D]")\n b = PeriodDtype("period[3D]")\n\n assert issubclass(type(a), type(a))\n assert issubclass(type(a), type(b))\n\n def test_identity(self):\n assert PeriodDtype("period[D]") == PeriodDtype("period[D]")\n assert PeriodDtype("period[D]") is not PeriodDtype("period[D]")\n\n assert PeriodDtype("period[3D]") == PeriodDtype("period[3D]")\n assert PeriodDtype("period[3D]") is not PeriodDtype("period[3D]")\n\n assert PeriodDtype("period[1s1us]") == PeriodDtype("period[1000001us]")\n assert PeriodDtype("period[1s1us]") is not PeriodDtype("period[1000001us]")\n\n def test_compat(self, dtype):\n assert not is_datetime64_ns_dtype(dtype)\n assert not is_datetime64_ns_dtype("period[D]")\n assert not is_datetime64_dtype(dtype)\n assert not is_datetime64_dtype("period[D]")\n\n def test_construction_from_string(self, dtype):\n result = PeriodDtype("period[D]")\n assert is_dtype_equal(dtype, result)\n result = PeriodDtype.construct_from_string("period[D]")\n assert is_dtype_equal(dtype, result)\n\n with pytest.raises(TypeError, match="list"):\n PeriodDtype.construct_from_string([1, 2, 3])\n\n @pytest.mark.parametrize(\n "string",\n [\n "foo",\n "period[foo]",\n "foo[D]",\n "datetime64[ns]",\n "datetime64[ns, US/Eastern]",\n ],\n )\n def test_construct_dtype_from_string_invalid_raises(self, string):\n msg = f"Cannot construct a 'PeriodDtype' from '{string}'"\n with pytest.raises(TypeError, match=re.escape(msg)):\n PeriodDtype.construct_from_string(string)\n\n def test_is_dtype(self, dtype):\n assert PeriodDtype.is_dtype(dtype)\n assert PeriodDtype.is_dtype("period[D]")\n assert PeriodDtype.is_dtype("period[3D]")\n assert PeriodDtype.is_dtype(PeriodDtype("3D"))\n assert PeriodDtype.is_dtype("period[us]")\n assert PeriodDtype.is_dtype("period[s]")\n assert PeriodDtype.is_dtype(PeriodDtype("us"))\n assert PeriodDtype.is_dtype(PeriodDtype("s"))\n\n assert not PeriodDtype.is_dtype("D")\n assert not PeriodDtype.is_dtype("3D")\n assert not PeriodDtype.is_dtype("U")\n assert not PeriodDtype.is_dtype("s")\n assert not PeriodDtype.is_dtype("foo")\n assert not PeriodDtype.is_dtype(np.object_)\n assert not PeriodDtype.is_dtype(np.int64)\n assert not PeriodDtype.is_dtype(np.float64)\n\n def test_equality(self, dtype):\n assert is_dtype_equal(dtype, "period[D]")\n assert is_dtype_equal(dtype, PeriodDtype("D"))\n assert is_dtype_equal(dtype, PeriodDtype("D"))\n assert is_dtype_equal(PeriodDtype("D"), PeriodDtype("D"))\n\n assert not is_dtype_equal(dtype, "D")\n assert not is_dtype_equal(PeriodDtype("D"), PeriodDtype("2D"))\n\n def test_basic(self, dtype):\n msg = "is_period_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert is_period_dtype(dtype)\n\n pidx = pd.period_range("2013-01-01 09:00", periods=5, freq="h")\n\n assert is_period_dtype(pidx.dtype)\n assert is_period_dtype(pidx)\n\n s = Series(pidx, name="A")\n\n assert is_period_dtype(s.dtype)\n assert is_period_dtype(s)\n\n assert not is_period_dtype(np.dtype("float64"))\n assert not is_period_dtype(1.0)\n\n def test_freq_argument_required(self):\n # GH#27388\n msg = "missing 1 required positional argument: 'freq'"\n with pytest.raises(TypeError, match=msg):\n PeriodDtype()\n\n msg = "PeriodDtype argument should be string or BaseOffset, got NoneType"\n with pytest.raises(TypeError, match=msg):\n # GH#51790\n PeriodDtype(None)\n\n def test_not_string(self):\n # though PeriodDtype has object kind, it cannot be string\n assert not is_string_dtype(PeriodDtype("D"))\n\n def test_perioddtype_caching_dateoffset_normalize(self):\n # GH 24121\n per_d = PeriodDtype(pd.offsets.YearEnd(normalize=True))\n assert per_d.freq.normalize\n\n per_d2 = PeriodDtype(pd.offsets.YearEnd(normalize=False))\n assert not per_d2.freq.normalize\n\n def test_dont_keep_ref_after_del(self):\n # GH 54184\n dtype = PeriodDtype("D")\n ref = weakref.ref(dtype)\n del dtype\n assert ref() is None\n\n\nclass TestIntervalDtype(Base):\n @pytest.fixture\n def dtype(self):\n """\n Class level fixture of dtype for TestIntervalDtype\n """\n return IntervalDtype("int64", "right")\n\n def test_hash_vs_equality(self, dtype):\n # make sure that we satisfy is semantics\n dtype2 = IntervalDtype("int64", "right")\n dtype3 = IntervalDtype(dtype2)\n assert dtype == dtype2\n assert dtype2 == dtype\n assert dtype3 == dtype\n assert dtype is not dtype2\n assert dtype2 is not dtype3\n assert dtype3 is not dtype\n assert hash(dtype) == hash(dtype2)\n assert hash(dtype) == hash(dtype3)\n\n dtype1 = IntervalDtype("interval")\n dtype2 = IntervalDtype(dtype1)\n dtype3 = IntervalDtype("interval")\n assert dtype2 == dtype1\n assert dtype2 == dtype2\n assert dtype2 == dtype3\n assert dtype2 is not dtype1\n assert dtype2 is dtype2\n assert dtype2 is not dtype3\n assert hash(dtype2) == hash(dtype1)\n assert hash(dtype2) == hash(dtype2)\n assert hash(dtype2) == hash(dtype3)\n\n @pytest.mark.parametrize(\n "subtype", ["interval[int64]", "Interval[int64]", "int64", np.dtype("int64")]\n )\n def test_construction(self, subtype):\n i = IntervalDtype(subtype, closed="right")\n assert i.subtype == np.dtype("int64")\n msg = "is_interval_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert is_interval_dtype(i)\n\n @pytest.mark.parametrize(\n "subtype", ["interval[int64]", "Interval[int64]", "int64", np.dtype("int64")]\n )\n def test_construction_allows_closed_none(self, subtype):\n # GH#38394\n dtype = IntervalDtype(subtype)\n\n assert dtype.closed is None\n\n def test_closed_mismatch(self):\n msg = "'closed' keyword does not match value specified in dtype string"\n with pytest.raises(ValueError, match=msg):\n IntervalDtype("interval[int64, left]", "right")\n\n @pytest.mark.parametrize("subtype", [None, "interval", "Interval"])\n def test_construction_generic(self, subtype):\n # generic\n i = IntervalDtype(subtype)\n assert i.subtype is None\n msg = "is_interval_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert is_interval_dtype(i)\n\n @pytest.mark.parametrize(\n "subtype",\n [\n CategoricalDtype(list("abc"), False),\n CategoricalDtype(list("wxyz"), True),\n object,\n str,\n "<U10",\n "interval[category]",\n "interval[object]",\n ],\n )\n def test_construction_not_supported(self, subtype):\n # GH 19016\n msg = (\n "category, object, and string subtypes are not supported "\n "for IntervalDtype"\n )\n with pytest.raises(TypeError, match=msg):\n IntervalDtype(subtype)\n\n @pytest.mark.parametrize("subtype", ["xx", "IntervalA", "Interval[foo]"])\n def test_construction_errors(self, subtype):\n msg = "could not construct IntervalDtype"\n with pytest.raises(TypeError, match=msg):\n IntervalDtype(subtype)\n\n def test_closed_must_match(self):\n # GH#37933\n dtype = IntervalDtype(np.float64, "left")\n\n msg = "dtype.closed and 'closed' do not match"\n with pytest.raises(ValueError, match=msg):\n IntervalDtype(dtype, closed="both")\n\n def test_closed_invalid(self):\n with pytest.raises(ValueError, match="closed must be one of"):\n IntervalDtype(np.float64, "foo")\n\n def test_construction_from_string(self, dtype):\n result = IntervalDtype("interval[int64, right]")\n assert is_dtype_equal(dtype, result)\n result = IntervalDtype.construct_from_string("interval[int64, right]")\n assert is_dtype_equal(dtype, result)\n\n @pytest.mark.parametrize("string", [0, 3.14, ("a", "b"), None])\n def test_construction_from_string_errors(self, string):\n # these are invalid entirely\n msg = f"'construct_from_string' expects a string, got {type(string)}"\n\n with pytest.raises(TypeError, match=re.escape(msg)):\n IntervalDtype.construct_from_string(string)\n\n @pytest.mark.parametrize("string", ["foo", "foo[int64]", "IntervalA"])\n def test_construction_from_string_error_subtype(self, string):\n # this is an invalid subtype\n msg = (\n "Incorrectly formatted string passed to constructor. "\n r"Valid formats include Interval or Interval\[dtype\] "\n "where dtype is numeric, datetime, or timedelta"\n )\n\n with pytest.raises(TypeError, match=msg):\n IntervalDtype.construct_from_string(string)\n\n def test_subclass(self):\n a = IntervalDtype("interval[int64, right]")\n b = IntervalDtype("interval[int64, right]")\n\n assert issubclass(type(a), type(a))\n assert issubclass(type(a), type(b))\n\n def test_is_dtype(self, dtype):\n assert IntervalDtype.is_dtype(dtype)\n assert IntervalDtype.is_dtype("interval")\n assert IntervalDtype.is_dtype(IntervalDtype("float64"))\n assert IntervalDtype.is_dtype(IntervalDtype("int64"))\n assert IntervalDtype.is_dtype(IntervalDtype(np.int64))\n assert IntervalDtype.is_dtype(IntervalDtype("float64", "left"))\n assert IntervalDtype.is_dtype(IntervalDtype("int64", "right"))\n assert IntervalDtype.is_dtype(IntervalDtype(np.int64, "both"))\n\n assert not IntervalDtype.is_dtype("D")\n assert not IntervalDtype.is_dtype("3D")\n assert not IntervalDtype.is_dtype("us")\n assert not IntervalDtype.is_dtype("S")\n assert not IntervalDtype.is_dtype("foo")\n assert not IntervalDtype.is_dtype("IntervalA")\n assert not IntervalDtype.is_dtype(np.object_)\n assert not IntervalDtype.is_dtype(np.int64)\n assert not IntervalDtype.is_dtype(np.float64)\n\n def test_equality(self, dtype):\n assert is_dtype_equal(dtype, "interval[int64, right]")\n assert is_dtype_equal(dtype, IntervalDtype("int64", "right"))\n assert is_dtype_equal(\n IntervalDtype("int64", "right"), IntervalDtype("int64", "right")\n )\n\n assert not is_dtype_equal(dtype, "interval[int64]")\n assert not is_dtype_equal(dtype, IntervalDtype("int64"))\n assert not is_dtype_equal(\n IntervalDtype("int64", "right"), IntervalDtype("int64")\n )\n\n assert not is_dtype_equal(dtype, "int64")\n assert not is_dtype_equal(\n IntervalDtype("int64", "neither"), IntervalDtype("float64", "right")\n )\n assert not is_dtype_equal(\n IntervalDtype("int64", "both"), IntervalDtype("int64", "left")\n )\n\n # invalid subtype comparisons do not raise when directly compared\n dtype1 = IntervalDtype("float64", "left")\n dtype2 = IntervalDtype("datetime64[ns, US/Eastern]", "left")\n assert dtype1 != dtype2\n assert dtype2 != dtype1\n\n @pytest.mark.parametrize(\n "subtype",\n [\n None,\n "interval",\n "Interval",\n "int64",\n "uint64",\n "float64",\n "complex128",\n "datetime64",\n "timedelta64",\n PeriodDtype("Q"),\n ],\n )\n def test_equality_generic(self, subtype):\n # GH 18980\n closed = "right" if subtype is not None else None\n dtype = IntervalDtype(subtype, closed=closed)\n assert is_dtype_equal(dtype, "interval")\n assert is_dtype_equal(dtype, IntervalDtype())\n\n @pytest.mark.parametrize(\n "subtype",\n [\n "int64",\n "uint64",\n "float64",\n "complex128",\n "datetime64",\n "timedelta64",\n PeriodDtype("Q"),\n ],\n )\n def test_name_repr(self, subtype):\n # GH 18980\n closed = "right" if subtype is not None else None\n dtype = IntervalDtype(subtype, closed=closed)\n expected = f"interval[{subtype}, {closed}]"\n assert str(dtype) == expected\n assert dtype.name == "interval"\n\n @pytest.mark.parametrize("subtype", [None, "interval", "Interval"])\n def test_name_repr_generic(self, subtype):\n # GH 18980\n dtype = IntervalDtype(subtype)\n assert str(dtype) == "interval"\n assert dtype.name == "interval"\n\n def test_basic(self, dtype):\n msg = "is_interval_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert is_interval_dtype(dtype)\n\n ii = IntervalIndex.from_breaks(range(3))\n\n assert is_interval_dtype(ii.dtype)\n assert is_interval_dtype(ii)\n\n s = Series(ii, name="A")\n\n assert is_interval_dtype(s.dtype)\n assert is_interval_dtype(s)\n\n def test_basic_dtype(self):\n msg = "is_interval_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert is_interval_dtype("interval[int64, both]")\n assert is_interval_dtype(IntervalIndex.from_tuples([(0, 1)]))\n assert is_interval_dtype(IntervalIndex.from_breaks(np.arange(4)))\n assert is_interval_dtype(\n IntervalIndex.from_breaks(date_range("20130101", periods=3))\n )\n assert not is_interval_dtype("U")\n assert not is_interval_dtype("S")\n assert not is_interval_dtype("foo")\n assert not is_interval_dtype(np.object_)\n assert not is_interval_dtype(np.int64)\n assert not is_interval_dtype(np.float64)\n\n def test_caching(self):\n # GH 54184: Caching not shown to improve performance\n IntervalDtype.reset_cache()\n dtype = IntervalDtype("int64", "right")\n assert len(IntervalDtype._cache_dtypes) == 0\n\n IntervalDtype("interval")\n assert len(IntervalDtype._cache_dtypes) == 0\n\n IntervalDtype.reset_cache()\n tm.round_trip_pickle(dtype)\n assert len(IntervalDtype._cache_dtypes) == 0\n\n def test_not_string(self):\n # GH30568: though IntervalDtype has object kind, it cannot be string\n assert not is_string_dtype(IntervalDtype())\n\n def test_unpickling_without_closed(self):\n # GH#38394\n dtype = IntervalDtype("interval")\n\n assert dtype._closed is None\n\n tm.round_trip_pickle(dtype)\n\n def test_dont_keep_ref_after_del(self):\n # GH 54184\n dtype = IntervalDtype("int64", "right")\n ref = weakref.ref(dtype)\n del dtype\n assert ref() is None\n\n\nclass TestCategoricalDtypeParametrized:\n @pytest.mark.parametrize(\n "categories",\n [\n list("abcd"),\n np.arange(1000),\n ["a", "b", 10, 2, 1.3, True],\n [True, False],\n date_range("2017", periods=4),\n ],\n )\n def test_basic(self, categories, ordered):\n c1 = CategoricalDtype(categories, ordered=ordered)\n tm.assert_index_equal(c1.categories, pd.Index(categories))\n assert c1.ordered is ordered\n\n def test_order_matters(self):\n categories = ["a", "b"]\n c1 = CategoricalDtype(categories, ordered=True)\n c2 = CategoricalDtype(categories, ordered=False)\n c3 = CategoricalDtype(categories, ordered=None)\n assert c1 is not c2\n assert c1 is not c3\n\n @pytest.mark.parametrize("ordered", [False, None])\n def test_unordered_same(self, ordered):\n c1 = CategoricalDtype(["a", "b"], ordered=ordered)\n c2 = CategoricalDtype(["b", "a"], ordered=ordered)\n assert hash(c1) == hash(c2)\n\n def test_categories(self):\n result = CategoricalDtype(["a", "b", "c"])\n tm.assert_index_equal(result.categories, pd.Index(["a", "b", "c"]))\n assert result.ordered is False\n\n def test_equal_but_different(self):\n c1 = CategoricalDtype([1, 2, 3])\n c2 = CategoricalDtype([1.0, 2.0, 3.0])\n assert c1 is not c2\n assert c1 != c2\n\n def test_equal_but_different_mixed_dtypes(self):\n c1 = CategoricalDtype([1, 2, "3"])\n c2 = CategoricalDtype(["3", 1, 2])\n assert c1 is not c2\n assert c1 == c2\n\n def test_equal_empty_ordered(self):\n c1 = CategoricalDtype([], ordered=True)\n c2 = CategoricalDtype([], ordered=True)\n assert c1 is not c2\n assert c1 == c2\n\n def test_equal_empty_unordered(self):\n c1 = CategoricalDtype([])\n c2 = CategoricalDtype([])\n assert c1 is not c2\n assert c1 == c2\n\n @pytest.mark.parametrize("v1, v2", [([1, 2, 3], [1, 2, 3]), ([1, 2, 3], [3, 2, 1])])\n def test_order_hashes_different(self, v1, v2):\n c1 = CategoricalDtype(v1, ordered=False)\n c2 = CategoricalDtype(v2, ordered=True)\n c3 = CategoricalDtype(v1, ordered=None)\n assert c1 is not c2\n assert c1 is not c3\n\n def test_nan_invalid(self):\n msg = "Categorical categories cannot be null"\n with pytest.raises(ValueError, match=msg):\n CategoricalDtype([1, 2, np.nan])\n\n def test_non_unique_invalid(self):\n msg = "Categorical categories must be unique"\n with pytest.raises(ValueError, match=msg):\n CategoricalDtype([1, 2, 1])\n\n def test_same_categories_different_order(self):\n c1 = CategoricalDtype(["a", "b"], ordered=True)\n c2 = CategoricalDtype(["b", "a"], ordered=True)\n assert c1 is not c2\n\n @pytest.mark.parametrize("ordered1", [True, False, None])\n @pytest.mark.parametrize("ordered2", [True, False, None])\n def test_categorical_equality(self, ordered1, ordered2):\n # same categories, same order\n # any combination of None/False are equal\n # True/True is the only combination with True that are equal\n c1 = CategoricalDtype(list("abc"), ordered1)\n c2 = CategoricalDtype(list("abc"), ordered2)\n result = c1 == c2\n expected = bool(ordered1) is bool(ordered2)\n assert result is expected\n\n # same categories, different order\n # any combination of None/False are equal (order doesn't matter)\n # any combination with True are not equal (different order of cats)\n c1 = CategoricalDtype(list("abc"), ordered1)\n c2 = CategoricalDtype(list("cab"), ordered2)\n result = c1 == c2\n expected = (bool(ordered1) is False) and (bool(ordered2) is False)\n assert result is expected\n\n # different categories\n c2 = CategoricalDtype([1, 2, 3], ordered2)\n assert c1 != c2\n\n # none categories\n c1 = CategoricalDtype(list("abc"), ordered1)\n c2 = CategoricalDtype(None, ordered2)\n c3 = CategoricalDtype(None, ordered1)\n assert c1 != c2\n assert c2 != c1\n assert c2 == c3\n\n def test_categorical_dtype_equality_requires_categories(self):\n # CategoricalDtype with categories=None is *not* equal to\n # any fully-initialized CategoricalDtype\n first = CategoricalDtype(["a", "b"])\n second = CategoricalDtype()\n third = CategoricalDtype(ordered=True)\n\n assert second == second\n assert third == third\n\n assert first != second\n assert second != first\n assert first != third\n assert third != first\n assert second == third\n assert third == second\n\n @pytest.mark.parametrize("categories", [list("abc"), None])\n @pytest.mark.parametrize("other", ["category", "not a category"])\n def test_categorical_equality_strings(self, categories, ordered, other):\n c1 = CategoricalDtype(categories, ordered)\n result = c1 == other\n expected = other == "category"\n assert result is expected\n\n def test_invalid_raises(self):\n with pytest.raises(TypeError, match="ordered"):\n CategoricalDtype(["a", "b"], ordered="foo")\n\n with pytest.raises(TypeError, match="'categories' must be list-like"):\n CategoricalDtype("category")\n\n def test_mixed(self):\n a = CategoricalDtype(["a", "b", 1, 2])\n b = CategoricalDtype(["a", "b", "1", "2"])\n assert hash(a) != hash(b)\n\n def test_from_categorical_dtype_identity(self):\n c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)\n # Identity test for no changes\n c2 = CategoricalDtype._from_categorical_dtype(c1)\n assert c2 is c1\n\n def test_from_categorical_dtype_categories(self):\n c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)\n # override categories\n result = CategoricalDtype._from_categorical_dtype(c1, categories=[2, 3])\n assert result == CategoricalDtype([2, 3], ordered=True)\n\n def test_from_categorical_dtype_ordered(self):\n c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)\n # override ordered\n result = CategoricalDtype._from_categorical_dtype(c1, ordered=False)\n assert result == CategoricalDtype([1, 2, 3], ordered=False)\n\n def test_from_categorical_dtype_both(self):\n c1 = Categorical([1, 2], categories=[1, 2, 3], ordered=True)\n # override ordered\n result = CategoricalDtype._from_categorical_dtype(\n c1, categories=[1, 2], ordered=False\n )\n assert result == CategoricalDtype([1, 2], ordered=False)\n\n def test_str_vs_repr(self, ordered, using_infer_string):\n c1 = CategoricalDtype(["a", "b"], ordered=ordered)\n assert str(c1) == "category"\n # Py2 will have unicode prefixes\n dtype = "str" if using_infer_string else "object"\n pat = (\n r"CategoricalDtype\(categories=\[.*\], ordered={ordered}, "\n rf"categories_dtype={dtype}\)"\n )\n assert re.match(pat.format(ordered=ordered), repr(c1))\n\n def test_categorical_categories(self):\n # GH17884\n c1 = CategoricalDtype(Categorical(["a", "b"]))\n tm.assert_index_equal(c1.categories, pd.Index(["a", "b"]))\n c1 = CategoricalDtype(CategoricalIndex(["a", "b"]))\n tm.assert_index_equal(c1.categories, pd.Index(["a", "b"]))\n\n @pytest.mark.parametrize(\n "new_categories", [list("abc"), list("cba"), list("wxyz"), None]\n )\n @pytest.mark.parametrize("new_ordered", [True, False, None])\n def test_update_dtype(self, ordered, new_categories, new_ordered):\n original_categories = list("abc")\n dtype = CategoricalDtype(original_categories, ordered)\n new_dtype = CategoricalDtype(new_categories, new_ordered)\n\n result = dtype.update_dtype(new_dtype)\n expected_categories = pd.Index(new_categories or original_categories)\n expected_ordered = new_ordered if new_ordered is not None else dtype.ordered\n\n tm.assert_index_equal(result.categories, expected_categories)\n assert result.ordered is expected_ordered\n\n def test_update_dtype_string(self, ordered):\n dtype = CategoricalDtype(list("abc"), ordered)\n expected_categories = dtype.categories\n expected_ordered = dtype.ordered\n result = dtype.update_dtype("category")\n tm.assert_index_equal(result.categories, expected_categories)\n assert result.ordered is expected_ordered\n\n @pytest.mark.parametrize("bad_dtype", ["foo", object, np.int64, PeriodDtype("Q")])\n def test_update_dtype_errors(self, bad_dtype):\n dtype = CategoricalDtype(list("abc"), False)\n msg = "a CategoricalDtype must be passed to perform an update, "\n with pytest.raises(ValueError, match=msg):\n dtype.update_dtype(bad_dtype)\n\n\n@pytest.mark.parametrize(\n "dtype", [CategoricalDtype, IntervalDtype, DatetimeTZDtype, PeriodDtype]\n)\ndef test_registry(dtype):\n assert dtype in registry.dtypes\n\n\n@pytest.mark.parametrize(\n "dtype, expected",\n [\n ("int64", None),\n ("interval", IntervalDtype()),\n ("interval[int64, neither]", IntervalDtype()),\n ("interval[datetime64[ns], left]", IntervalDtype("datetime64[ns]", "left")),\n ("period[D]", PeriodDtype("D")),\n ("category", CategoricalDtype()),\n ("datetime64[ns, US/Eastern]", DatetimeTZDtype("ns", "US/Eastern")),\n ],\n)\ndef test_registry_find(dtype, expected):\n assert registry.find(dtype) == expected\n\n\n@pytest.mark.parametrize(\n "dtype, expected",\n [\n (str, False),\n (int, False),\n (bool, True),\n (np.bool_, True),\n (np.array(["a", "b"]), False),\n (Series([1, 2]), False),\n (np.array([True, False]), True),\n (Series([True, False]), True),\n (SparseArray([True, False]), True),\n (SparseDtype(bool), True),\n ],\n)\ndef test_is_bool_dtype(dtype, expected):\n result = is_bool_dtype(dtype)\n assert result is expected\n\n\ndef test_is_bool_dtype_sparse():\n result = is_bool_dtype(Series(SparseArray([True, False])))\n assert result is True\n\n\n@pytest.mark.parametrize(\n "check",\n [\n is_categorical_dtype,\n is_datetime64tz_dtype,\n is_period_dtype,\n is_datetime64_ns_dtype,\n is_datetime64_dtype,\n is_interval_dtype,\n is_datetime64_any_dtype,\n is_string_dtype,\n is_bool_dtype,\n ],\n)\ndef test_is_dtype_no_warning(check):\n data = pd.DataFrame({"A": [1, 2]})\n\n warn = None\n msg = f"{check.__name__} is deprecated"\n if (\n check is is_categorical_dtype\n or check is is_interval_dtype\n or check is is_datetime64tz_dtype\n or check is is_period_dtype\n ):\n warn = DeprecationWarning\n\n with tm.assert_produces_warning(warn, match=msg):\n check(data)\n\n with tm.assert_produces_warning(warn, match=msg):\n check(data["A"])\n\n\ndef test_period_dtype_compare_to_string():\n # https://github.com/pandas-dev/pandas/issues/37265\n dtype = PeriodDtype(freq="M")\n assert (dtype == "period[M]") is True\n assert (dtype != "period[M]") is False\n\n\ndef test_compare_complex_dtypes():\n # GH 28050\n df = pd.DataFrame(np.arange(5).astype(np.complex128))\n msg = "'<' not supported between instances of 'complex' and 'complex'"\n\n with pytest.raises(TypeError, match=msg):\n df < df.astype(object)\n\n with pytest.raises(TypeError, match=msg):\n df.lt(df.astype(object))\n\n\ndef test_cast_string_to_complex():\n # GH 4895\n expected = pd.DataFrame(["1.0+5j", "1.5-3j"], dtype=complex)\n result = pd.DataFrame(["1.0+5j", "1.5-3j"]).astype(complex)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_complex():\n result = Categorical([1, 2 + 2j])\n expected = Categorical([1.0 + 0.0j, 2.0 + 2.0j])\n tm.assert_categorical_equal(result, expected)\n result = Categorical([1, 2, 2 + 2j])\n expected = Categorical([1.0 + 0.0j, 2.0 + 0.0j, 2.0 + 2.0j])\n tm.assert_categorical_equal(result, expected)\n\n\ndef test_multi_column_dtype_assignment():\n # GH #27583\n df = pd.DataFrame({"a": [0.0], "b": 0.0})\n expected = pd.DataFrame({"a": [0], "b": 0})\n\n df[["a", "b"]] = 0\n tm.assert_frame_equal(df, expected)\n\n df["b"] = 0\n tm.assert_frame_equal(df, expected)\n | .venv\Lib\site-packages\pandas\tests\dtypes\test_dtypes.py | test_dtypes.py | Python | 43,844 | 0.95 | 0.114263 | 0.060136 | react-lib | 209 | 2024-06-28T16:54:14.587369 | MIT | true | 544d5fc3b13c3f30dd75b07de9aaec09 |
import re\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes import generic as gt\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass TestABCClasses:\n tuples = [[1, 2, 2], ["red", "blue", "red"]]\n multi_index = pd.MultiIndex.from_arrays(tuples, names=("number", "color"))\n datetime_index = pd.to_datetime(["2000/1/1", "2010/1/1"])\n timedelta_index = pd.to_timedelta(np.arange(5), unit="s")\n period_index = pd.period_range("2000/1/1", "2010/1/1/", freq="M")\n categorical = pd.Categorical([1, 2, 3], categories=[2, 3, 1])\n categorical_df = pd.DataFrame({"values": [1, 2, 3]}, index=categorical)\n df = pd.DataFrame({"names": ["a", "b", "c"]}, index=multi_index)\n sparse_array = pd.arrays.SparseArray(np.random.default_rng(2).standard_normal(10))\n\n datetime_array = pd.core.arrays.DatetimeArray._from_sequence(datetime_index)\n timedelta_array = pd.core.arrays.TimedeltaArray._from_sequence(timedelta_index)\n\n abc_pairs = [\n ("ABCMultiIndex", multi_index),\n ("ABCDatetimeIndex", datetime_index),\n ("ABCRangeIndex", pd.RangeIndex(3)),\n ("ABCTimedeltaIndex", timedelta_index),\n ("ABCIntervalIndex", pd.interval_range(start=0, end=3)),\n (\n "ABCPeriodArray",\n pd.arrays.PeriodArray([2000, 2001, 2002], dtype="period[D]"),\n ),\n ("ABCNumpyExtensionArray", pd.arrays.NumpyExtensionArray(np.array([0, 1, 2]))),\n ("ABCPeriodIndex", period_index),\n ("ABCCategoricalIndex", categorical_df.index),\n ("ABCSeries", pd.Series([1, 2, 3])),\n ("ABCDataFrame", df),\n ("ABCCategorical", categorical),\n ("ABCDatetimeArray", datetime_array),\n ("ABCTimedeltaArray", timedelta_array),\n ]\n\n @pytest.mark.parametrize("abctype1, inst", abc_pairs)\n @pytest.mark.parametrize("abctype2, _", abc_pairs)\n def test_abc_pairs_instance_check(self, abctype1, abctype2, inst, _):\n # GH 38588, 46719\n if abctype1 == abctype2:\n assert isinstance(inst, getattr(gt, abctype2))\n assert not isinstance(type(inst), getattr(gt, abctype2))\n else:\n assert not isinstance(inst, getattr(gt, abctype2))\n\n @pytest.mark.parametrize("abctype1, inst", abc_pairs)\n @pytest.mark.parametrize("abctype2, _", abc_pairs)\n def test_abc_pairs_subclass_check(self, abctype1, abctype2, inst, _):\n # GH 38588, 46719\n if abctype1 == abctype2:\n assert issubclass(type(inst), getattr(gt, abctype2))\n\n with pytest.raises(\n TypeError, match=re.escape("issubclass() arg 1 must be a class")\n ):\n issubclass(inst, getattr(gt, abctype2))\n else:\n assert not issubclass(type(inst), getattr(gt, abctype2))\n\n abc_subclasses = {\n "ABCIndex": [\n abctype\n for abctype, _ in abc_pairs\n if "Index" in abctype and abctype != "ABCIndex"\n ],\n "ABCNDFrame": ["ABCSeries", "ABCDataFrame"],\n "ABCExtensionArray": [\n "ABCCategorical",\n "ABCDatetimeArray",\n "ABCPeriodArray",\n "ABCTimedeltaArray",\n ],\n }\n\n @pytest.mark.parametrize("parent, subs", abc_subclasses.items())\n @pytest.mark.parametrize("abctype, inst", abc_pairs)\n def test_abc_hierarchy(self, parent, subs, abctype, inst):\n # GH 38588\n if abctype in subs:\n assert isinstance(inst, getattr(gt, parent))\n else:\n assert not isinstance(inst, getattr(gt, parent))\n\n @pytest.mark.parametrize("abctype", [e for e in gt.__dict__ if e.startswith("ABC")])\n def test_abc_coverage(self, abctype):\n # GH 38588\n assert (\n abctype in (e for e, _ in self.abc_pairs) or abctype in self.abc_subclasses\n )\n\n\ndef test_setattr_warnings():\n # GH7175 - GOTCHA: You can't use dot notation to add a column...\n d = {\n "one": pd.Series([1.0, 2.0, 3.0], index=["a", "b", "c"]),\n "two": pd.Series([1.0, 2.0, 3.0, 4.0], index=["a", "b", "c", "d"]),\n }\n df = pd.DataFrame(d)\n\n with tm.assert_produces_warning(None):\n # successfully add new column\n # this should not raise a warning\n df["three"] = df.two + 1\n assert df.three.sum() > df.two.sum()\n\n with tm.assert_produces_warning(None):\n # successfully modify column in place\n # this should not raise a warning\n df.one += 1\n assert df.one.iloc[0] == 2\n\n with tm.assert_produces_warning(None):\n # successfully add an attribute to a series\n # this should not raise a warning\n df.two.not_an_index = [1, 2]\n\n with tm.assert_produces_warning(UserWarning):\n # warn when setting column to nonexistent name\n df.four = df.two + 2\n assert df.four.sum() > df.two.sum()\n | .venv\Lib\site-packages\pandas\tests\dtypes\test_generic.py | test_generic.py | Python | 4,842 | 0.95 | 0.115385 | 0.108108 | awesome-app | 239 | 2024-05-21T13:23:29.329783 | GPL-3.0 | true | 102c2843f6f2548a33e96d669050fb75 |
"""\nThese the test the public routines exposed in types/common.py\nrelated to inference and not otherwise tested in types/test_common.py\n\n"""\nimport collections\nfrom collections import namedtuple\nfrom collections.abc import Iterator\nfrom datetime import (\n date,\n datetime,\n time,\n timedelta,\n)\nfrom decimal import Decimal\nfrom fractions import Fraction\nfrom io import StringIO\nimport itertools\nfrom numbers import Number\nimport re\nimport sys\nfrom typing import (\n Generic,\n TypeVar,\n)\n\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs import (\n lib,\n missing as libmissing,\n ops as libops,\n)\nfrom pandas.compat.numpy import np_version_gt2\n\nfrom pandas.core.dtypes import inference\nfrom pandas.core.dtypes.cast import find_result_type\nfrom pandas.core.dtypes.common import (\n ensure_int32,\n is_bool,\n is_complex,\n is_datetime64_any_dtype,\n is_datetime64_dtype,\n is_datetime64_ns_dtype,\n is_datetime64tz_dtype,\n is_float,\n is_integer,\n is_number,\n is_scalar,\n is_scipy_sparse,\n is_timedelta64_dtype,\n is_timedelta64_ns_dtype,\n)\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n DateOffset,\n DatetimeIndex,\n Index,\n Interval,\n Period,\n PeriodIndex,\n Series,\n Timedelta,\n TimedeltaIndex,\n Timestamp,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n BooleanArray,\n FloatingArray,\n IntegerArray,\n)\n\n\n@pytest.fixture(params=[True, False], ids=str)\ndef coerce(request):\n return request.param\n\n\nclass MockNumpyLikeArray:\n """\n A class which is numpy-like (e.g. Pint's Quantity) but not actually numpy\n\n The key is that it is not actually a numpy array so\n ``util.is_array(mock_numpy_like_array_instance)`` returns ``False``. Other\n important properties are that the class defines a :meth:`__iter__` method\n (so that ``isinstance(abc.Iterable)`` returns ``True``) and has a\n :meth:`ndim` property, as pandas special-cases 0-dimensional arrays in some\n cases.\n\n We expect pandas to behave with respect to such duck arrays exactly as\n with real numpy arrays. In particular, a 0-dimensional duck array is *NOT*\n a scalar (`is_scalar(np.array(1)) == False`), but it is not list-like either.\n """\n\n def __init__(self, values) -> None:\n self._values = values\n\n def __iter__(self) -> Iterator:\n iter_values = iter(self._values)\n\n def it_outer():\n yield from iter_values\n\n return it_outer()\n\n def __len__(self) -> int:\n return len(self._values)\n\n def __array__(self, dtype=None, copy=None):\n return np.asarray(self._values, dtype=dtype)\n\n @property\n def ndim(self):\n return self._values.ndim\n\n @property\n def dtype(self):\n return self._values.dtype\n\n @property\n def size(self):\n return self._values.size\n\n @property\n def shape(self):\n return self._values.shape\n\n\n# collect all objects to be tested for list-like-ness; use tuples of objects,\n# whether they are list-like or not (special casing for sets), and their ID\nll_params = [\n ([1], True, "list"),\n ([], True, "list-empty"),\n ((1,), True, "tuple"),\n ((), True, "tuple-empty"),\n ({"a": 1}, True, "dict"),\n ({}, True, "dict-empty"),\n ({"a", 1}, "set", "set"),\n (set(), "set", "set-empty"),\n (frozenset({"a", 1}), "set", "frozenset"),\n (frozenset(), "set", "frozenset-empty"),\n (iter([1, 2]), True, "iterator"),\n (iter([]), True, "iterator-empty"),\n ((x for x in [1, 2]), True, "generator"),\n ((_ for _ in []), True, "generator-empty"),\n (Series([1]), True, "Series"),\n (Series([], dtype=object), True, "Series-empty"),\n # Series.str will still raise a TypeError if iterated\n (Series(["a"]).str, True, "StringMethods"),\n (Series([], dtype="O").str, True, "StringMethods-empty"),\n (Index([1]), True, "Index"),\n (Index([]), True, "Index-empty"),\n (DataFrame([[1]]), True, "DataFrame"),\n (DataFrame(), True, "DataFrame-empty"),\n (np.ndarray((2,) * 1), True, "ndarray-1d"),\n (np.array([]), True, "ndarray-1d-empty"),\n (np.ndarray((2,) * 2), True, "ndarray-2d"),\n (np.array([[]]), True, "ndarray-2d-empty"),\n (np.ndarray((2,) * 3), True, "ndarray-3d"),\n (np.array([[[]]]), True, "ndarray-3d-empty"),\n (np.ndarray((2,) * 4), True, "ndarray-4d"),\n (np.array([[[[]]]]), True, "ndarray-4d-empty"),\n (np.array(2), False, "ndarray-0d"),\n (MockNumpyLikeArray(np.ndarray((2,) * 1)), True, "duck-ndarray-1d"),\n (MockNumpyLikeArray(np.array([])), True, "duck-ndarray-1d-empty"),\n (MockNumpyLikeArray(np.ndarray((2,) * 2)), True, "duck-ndarray-2d"),\n (MockNumpyLikeArray(np.array([[]])), True, "duck-ndarray-2d-empty"),\n (MockNumpyLikeArray(np.ndarray((2,) * 3)), True, "duck-ndarray-3d"),\n (MockNumpyLikeArray(np.array([[[]]])), True, "duck-ndarray-3d-empty"),\n (MockNumpyLikeArray(np.ndarray((2,) * 4)), True, "duck-ndarray-4d"),\n (MockNumpyLikeArray(np.array([[[[]]]])), True, "duck-ndarray-4d-empty"),\n (MockNumpyLikeArray(np.array(2)), False, "duck-ndarray-0d"),\n (1, False, "int"),\n (b"123", False, "bytes"),\n (b"", False, "bytes-empty"),\n ("123", False, "string"),\n ("", False, "string-empty"),\n (str, False, "string-type"),\n (object(), False, "object"),\n (np.nan, False, "NaN"),\n (None, False, "None"),\n]\nobjs, expected, ids = zip(*ll_params)\n\n\n@pytest.fixture(params=zip(objs, expected), ids=ids)\ndef maybe_list_like(request):\n return request.param\n\n\ndef test_is_list_like(maybe_list_like):\n obj, expected = maybe_list_like\n expected = True if expected == "set" else expected\n assert inference.is_list_like(obj) == expected\n\n\ndef test_is_list_like_disallow_sets(maybe_list_like):\n obj, expected = maybe_list_like\n expected = False if expected == "set" else expected\n assert inference.is_list_like(obj, allow_sets=False) == expected\n\n\ndef test_is_list_like_recursion():\n # GH 33721\n # interpreter would crash with SIGABRT\n def list_like():\n inference.is_list_like([])\n list_like()\n\n rec_limit = sys.getrecursionlimit()\n try:\n # Limit to avoid stack overflow on Windows CI\n sys.setrecursionlimit(100)\n with tm.external_error_raised(RecursionError):\n list_like()\n finally:\n sys.setrecursionlimit(rec_limit)\n\n\ndef test_is_list_like_iter_is_none():\n # GH 43373\n # is_list_like was yielding false positives with __iter__ == None\n class NotListLike:\n def __getitem__(self, item):\n return self\n\n __iter__ = None\n\n assert not inference.is_list_like(NotListLike())\n\n\ndef test_is_list_like_generic():\n # GH 49649\n # is_list_like was yielding false positives for Generic classes in python 3.11\n T = TypeVar("T")\n\n class MyDataFrame(DataFrame, Generic[T]):\n ...\n\n tstc = MyDataFrame[int]\n tst = MyDataFrame[int]({"x": [1, 2, 3]})\n\n assert not inference.is_list_like(tstc)\n assert isinstance(tst, DataFrame)\n assert inference.is_list_like(tst)\n\n\ndef test_is_sequence():\n is_seq = inference.is_sequence\n assert is_seq((1, 2))\n assert is_seq([1, 2])\n assert not is_seq("abcd")\n assert not is_seq(np.int64)\n\n class A:\n def __getitem__(self, item):\n return 1\n\n assert not is_seq(A())\n\n\ndef test_is_array_like():\n assert inference.is_array_like(Series([], dtype=object))\n assert inference.is_array_like(Series([1, 2]))\n assert inference.is_array_like(np.array(["a", "b"]))\n assert inference.is_array_like(Index(["2016-01-01"]))\n assert inference.is_array_like(np.array([2, 3]))\n assert inference.is_array_like(MockNumpyLikeArray(np.array([2, 3])))\n\n class DtypeList(list):\n dtype = "special"\n\n assert inference.is_array_like(DtypeList())\n\n assert not inference.is_array_like([1, 2, 3])\n assert not inference.is_array_like(())\n assert not inference.is_array_like("foo")\n assert not inference.is_array_like(123)\n\n\n@pytest.mark.parametrize(\n "inner",\n [\n [],\n [1],\n (1,),\n (1, 2),\n {"a": 1},\n {1, "a"},\n Series([1]),\n Series([], dtype=object),\n Series(["a"]).str,\n (x for x in range(5)),\n ],\n)\n@pytest.mark.parametrize("outer", [list, Series, np.array, tuple])\ndef test_is_nested_list_like_passes(inner, outer):\n result = outer([inner for _ in range(5)])\n assert inference.is_list_like(result)\n\n\n@pytest.mark.parametrize(\n "obj",\n [\n "abc",\n [],\n [1],\n (1,),\n ["a"],\n "a",\n {"a"},\n [1, 2, 3],\n Series([1]),\n DataFrame({"A": [1]}),\n ([1, 2] for _ in range(5)),\n ],\n)\ndef test_is_nested_list_like_fails(obj):\n assert not inference.is_nested_list_like(obj)\n\n\n@pytest.mark.parametrize("ll", [{}, {"A": 1}, Series([1]), collections.defaultdict()])\ndef test_is_dict_like_passes(ll):\n assert inference.is_dict_like(ll)\n\n\n@pytest.mark.parametrize(\n "ll",\n [\n "1",\n 1,\n [1, 2],\n (1, 2),\n range(2),\n Index([1]),\n dict,\n collections.defaultdict,\n Series,\n ],\n)\ndef test_is_dict_like_fails(ll):\n assert not inference.is_dict_like(ll)\n\n\n@pytest.mark.parametrize("has_keys", [True, False])\n@pytest.mark.parametrize("has_getitem", [True, False])\n@pytest.mark.parametrize("has_contains", [True, False])\ndef test_is_dict_like_duck_type(has_keys, has_getitem, has_contains):\n class DictLike:\n def __init__(self, d) -> None:\n self.d = d\n\n if has_keys:\n\n def keys(self):\n return self.d.keys()\n\n if has_getitem:\n\n def __getitem__(self, key):\n return self.d.__getitem__(key)\n\n if has_contains:\n\n def __contains__(self, key) -> bool:\n return self.d.__contains__(key)\n\n d = DictLike({1: 2})\n result = inference.is_dict_like(d)\n expected = has_keys and has_getitem and has_contains\n\n assert result is expected\n\n\ndef test_is_file_like():\n class MockFile:\n pass\n\n is_file = inference.is_file_like\n\n data = StringIO("data")\n assert is_file(data)\n\n # No read / write attributes\n # No iterator attributes\n m = MockFile()\n assert not is_file(m)\n\n MockFile.write = lambda self: 0\n\n # Write attribute but not an iterator\n m = MockFile()\n assert not is_file(m)\n\n # gh-16530: Valid iterator just means we have the\n # __iter__ attribute for our purposes.\n MockFile.__iter__ = lambda self: self\n\n # Valid write-only file\n m = MockFile()\n assert is_file(m)\n\n del MockFile.write\n MockFile.read = lambda self: 0\n\n # Valid read-only file\n m = MockFile()\n assert is_file(m)\n\n # Iterator but no read / write attributes\n data = [1, 2, 3]\n assert not is_file(data)\n\n\ntest_tuple = collections.namedtuple("test_tuple", ["a", "b", "c"])\n\n\n@pytest.mark.parametrize("ll", [test_tuple(1, 2, 3)])\ndef test_is_names_tuple_passes(ll):\n assert inference.is_named_tuple(ll)\n\n\n@pytest.mark.parametrize("ll", [(1, 2, 3), "a", Series({"pi": 3.14})])\ndef test_is_names_tuple_fails(ll):\n assert not inference.is_named_tuple(ll)\n\n\ndef test_is_hashable():\n # all new-style classes are hashable by default\n class HashableClass:\n pass\n\n class UnhashableClass1:\n __hash__ = None\n\n class UnhashableClass2:\n def __hash__(self):\n raise TypeError("Not hashable")\n\n hashable = (1, 3.14, np.float64(3.14), "a", (), (1,), HashableClass())\n not_hashable = ([], UnhashableClass1())\n abc_hashable_not_really_hashable = (([],), UnhashableClass2())\n\n for i in hashable:\n assert inference.is_hashable(i)\n for i in not_hashable:\n assert not inference.is_hashable(i)\n for i in abc_hashable_not_really_hashable:\n assert not inference.is_hashable(i)\n\n # numpy.array is no longer collections.abc.Hashable as of\n # https://github.com/numpy/numpy/pull/5326, just test\n # is_hashable()\n assert not inference.is_hashable(np.array([]))\n\n\n@pytest.mark.parametrize("ll", [re.compile("ad")])\ndef test_is_re_passes(ll):\n assert inference.is_re(ll)\n\n\n@pytest.mark.parametrize("ll", ["x", 2, 3, object()])\ndef test_is_re_fails(ll):\n assert not inference.is_re(ll)\n\n\n@pytest.mark.parametrize(\n "ll", [r"a", "x", r"asdf", re.compile("adsf"), r"\u2233\s*", re.compile(r"")]\n)\ndef test_is_recompilable_passes(ll):\n assert inference.is_re_compilable(ll)\n\n\n@pytest.mark.parametrize("ll", [1, [], object()])\ndef test_is_recompilable_fails(ll):\n assert not inference.is_re_compilable(ll)\n\n\nclass TestInference:\n @pytest.mark.parametrize(\n "arr",\n [\n np.array(list("abc"), dtype="S1"),\n np.array(list("abc"), dtype="S1").astype(object),\n [b"a", np.nan, b"c"],\n ],\n )\n def test_infer_dtype_bytes(self, arr):\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "bytes"\n\n @pytest.mark.parametrize(\n "value, expected",\n [\n (float("inf"), True),\n (np.inf, True),\n (-np.inf, False),\n (1, False),\n ("a", False),\n ],\n )\n def test_isposinf_scalar(self, value, expected):\n # GH 11352\n result = libmissing.isposinf_scalar(value)\n assert result is expected\n\n @pytest.mark.parametrize(\n "value, expected",\n [\n (float("-inf"), True),\n (-np.inf, True),\n (np.inf, False),\n (1, False),\n ("a", False),\n ],\n )\n def test_isneginf_scalar(self, value, expected):\n result = libmissing.isneginf_scalar(value)\n assert result is expected\n\n @pytest.mark.parametrize(\n "convert_to_masked_nullable, exp",\n [\n (\n True,\n BooleanArray(\n np.array([True, False], dtype="bool"), np.array([False, True])\n ),\n ),\n (False, np.array([True, np.nan], dtype="object")),\n ],\n )\n def test_maybe_convert_nullable_boolean(self, convert_to_masked_nullable, exp):\n # GH 40687\n arr = np.array([True, np.nan], dtype=object)\n result = libops.maybe_convert_bool(\n arr, set(), convert_to_masked_nullable=convert_to_masked_nullable\n )\n if convert_to_masked_nullable:\n tm.assert_extension_array_equal(BooleanArray(*result), exp)\n else:\n result = result[0]\n tm.assert_numpy_array_equal(result, exp)\n\n @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])\n @pytest.mark.parametrize("coerce_numeric", [True, False])\n @pytest.mark.parametrize(\n "infinity", ["inf", "inF", "iNf", "Inf", "iNF", "InF", "INf", "INF"]\n )\n @pytest.mark.parametrize("prefix", ["", "-", "+"])\n def test_maybe_convert_numeric_infinities(\n self, coerce_numeric, infinity, prefix, convert_to_masked_nullable\n ):\n # see gh-13274\n result, _ = lib.maybe_convert_numeric(\n np.array([prefix + infinity], dtype=object),\n na_values={"", "NULL", "nan"},\n coerce_numeric=coerce_numeric,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n expected = np.array([np.inf if prefix in ["", "+"] else -np.inf])\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])\n def test_maybe_convert_numeric_infinities_raises(self, convert_to_masked_nullable):\n msg = "Unable to parse string"\n with pytest.raises(ValueError, match=msg):\n lib.maybe_convert_numeric(\n np.array(["foo_inf"], dtype=object),\n na_values={"", "NULL", "nan"},\n coerce_numeric=False,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n\n @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])\n def test_maybe_convert_numeric_post_floatify_nan(\n self, coerce, convert_to_masked_nullable\n ):\n # see gh-13314\n data = np.array(["1.200", "-999.000", "4.500"], dtype=object)\n expected = np.array([1.2, np.nan, 4.5], dtype=np.float64)\n nan_values = {-999, -999.0}\n\n out = lib.maybe_convert_numeric(\n data,\n nan_values,\n coerce,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n if convert_to_masked_nullable:\n expected = FloatingArray(expected, np.isnan(expected))\n tm.assert_extension_array_equal(expected, FloatingArray(*out))\n else:\n out = out[0]\n tm.assert_numpy_array_equal(out, expected)\n\n def test_convert_infs(self):\n arr = np.array(["inf", "inf", "inf"], dtype="O")\n result, _ = lib.maybe_convert_numeric(arr, set(), False)\n assert result.dtype == np.float64\n\n arr = np.array(["-inf", "-inf", "-inf"], dtype="O")\n result, _ = lib.maybe_convert_numeric(arr, set(), False)\n assert result.dtype == np.float64\n\n def test_scientific_no_exponent(self):\n # See PR 12215\n arr = np.array(["42E", "2E", "99e", "6e"], dtype="O")\n result, _ = lib.maybe_convert_numeric(arr, set(), False, True)\n assert np.all(np.isnan(result))\n\n def test_convert_non_hashable(self):\n # GH13324\n # make sure that we are handing non-hashables\n arr = np.array([[10.0, 2], 1.0, "apple"], dtype=object)\n result, _ = lib.maybe_convert_numeric(arr, set(), False, True)\n tm.assert_numpy_array_equal(result, np.array([np.nan, 1.0, np.nan]))\n\n def test_convert_numeric_uint64(self):\n arr = np.array([2**63], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)\n\n arr = np.array([str(2**63)], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)\n\n arr = np.array([np.uint64(2**63)], dtype=object)\n exp = np.array([2**63], dtype=np.uint64)\n tm.assert_numpy_array_equal(lib.maybe_convert_numeric(arr, set())[0], exp)\n\n @pytest.mark.parametrize(\n "arr",\n [\n np.array([2**63, np.nan], dtype=object),\n np.array([str(2**63), np.nan], dtype=object),\n np.array([np.nan, 2**63], dtype=object),\n np.array([np.nan, str(2**63)], dtype=object),\n ],\n )\n def test_convert_numeric_uint64_nan(self, coerce, arr):\n expected = arr.astype(float) if coerce else arr.copy()\n result, _ = lib.maybe_convert_numeric(arr, set(), coerce_numeric=coerce)\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])\n def test_convert_numeric_uint64_nan_values(\n self, coerce, convert_to_masked_nullable\n ):\n arr = np.array([2**63, 2**63 + 1], dtype=object)\n na_values = {2**63}\n\n expected = (\n np.array([np.nan, 2**63 + 1], dtype=float) if coerce else arr.copy()\n )\n result = lib.maybe_convert_numeric(\n arr,\n na_values,\n coerce_numeric=coerce,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n if convert_to_masked_nullable and coerce:\n expected = IntegerArray(\n np.array([0, 2**63 + 1], dtype="u8"),\n np.array([True, False], dtype="bool"),\n )\n result = IntegerArray(*result)\n else:\n result = result[0] # discard mask\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.parametrize(\n "case",\n [\n np.array([2**63, -1], dtype=object),\n np.array([str(2**63), -1], dtype=object),\n np.array([str(2**63), str(-1)], dtype=object),\n np.array([-1, 2**63], dtype=object),\n np.array([-1, str(2**63)], dtype=object),\n np.array([str(-1), str(2**63)], dtype=object),\n ],\n )\n @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])\n def test_convert_numeric_int64_uint64(\n self, case, coerce, convert_to_masked_nullable\n ):\n expected = case.astype(float) if coerce else case.copy()\n result, _ = lib.maybe_convert_numeric(\n case,\n set(),\n coerce_numeric=coerce,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.parametrize("convert_to_masked_nullable", [True, False])\n def test_convert_numeric_string_uint64(self, convert_to_masked_nullable):\n # GH32394\n result = lib.maybe_convert_numeric(\n np.array(["uint64"], dtype=object),\n set(),\n coerce_numeric=True,\n convert_to_masked_nullable=convert_to_masked_nullable,\n )\n if convert_to_masked_nullable:\n result = FloatingArray(*result)\n else:\n result = result[0]\n assert np.isnan(result)\n\n @pytest.mark.parametrize("value", [-(2**63) - 1, 2**64])\n def test_convert_int_overflow(self, value):\n # see gh-18584\n arr = np.array([value], dtype=object)\n result = lib.maybe_convert_objects(arr)\n tm.assert_numpy_array_equal(arr, result)\n\n @pytest.mark.parametrize("val", [None, np.nan, float("nan")])\n @pytest.mark.parametrize("dtype", ["M8[ns]", "m8[ns]"])\n def test_maybe_convert_objects_nat_inference(self, val, dtype):\n dtype = np.dtype(dtype)\n vals = np.array([pd.NaT, val], dtype=object)\n result = lib.maybe_convert_objects(\n vals,\n convert_non_numeric=True,\n dtype_if_all_nat=dtype,\n )\n assert result.dtype == dtype\n assert np.isnat(result).all()\n\n result = lib.maybe_convert_objects(\n vals[::-1],\n convert_non_numeric=True,\n dtype_if_all_nat=dtype,\n )\n assert result.dtype == dtype\n assert np.isnat(result).all()\n\n @pytest.mark.parametrize(\n "value, expected_dtype",\n [\n # see gh-4471\n ([2**63], np.uint64),\n # NumPy bug: can't compare uint64 to int64, as that\n # results in both casting to float64, so we should\n # make sure that this function is robust against it\n ([np.uint64(2**63)], np.uint64),\n ([2, -1], np.int64),\n ([2**63, -1], object),\n # GH#47294\n ([np.uint8(1)], np.uint8),\n ([np.uint16(1)], np.uint16),\n ([np.uint32(1)], np.uint32),\n ([np.uint64(1)], np.uint64),\n ([np.uint8(2), np.uint16(1)], np.uint16),\n ([np.uint32(2), np.uint16(1)], np.uint32),\n ([np.uint32(2), -1], object),\n ([np.uint32(2), 1], np.uint64),\n ([np.uint32(2), np.int32(1)], object),\n ],\n )\n def test_maybe_convert_objects_uint(self, value, expected_dtype):\n arr = np.array(value, dtype=object)\n exp = np.array(value, dtype=expected_dtype)\n tm.assert_numpy_array_equal(lib.maybe_convert_objects(arr), exp)\n\n def test_maybe_convert_objects_datetime(self):\n # GH27438\n arr = np.array(\n [np.datetime64("2000-01-01"), np.timedelta64(1, "s")], dtype=object\n )\n exp = arr.copy()\n out = lib.maybe_convert_objects(arr, convert_non_numeric=True)\n tm.assert_numpy_array_equal(out, exp)\n\n arr = np.array([pd.NaT, np.timedelta64(1, "s")], dtype=object)\n exp = np.array([np.timedelta64("NaT"), np.timedelta64(1, "s")], dtype="m8[ns]")\n out = lib.maybe_convert_objects(arr, convert_non_numeric=True)\n tm.assert_numpy_array_equal(out, exp)\n\n # with convert_non_numeric=True, the nan is a valid NA value for td64\n arr = np.array([np.timedelta64(1, "s"), np.nan], dtype=object)\n exp = exp[::-1]\n out = lib.maybe_convert_objects(arr, convert_non_numeric=True)\n tm.assert_numpy_array_equal(out, exp)\n\n def test_maybe_convert_objects_dtype_if_all_nat(self):\n arr = np.array([pd.NaT, pd.NaT], dtype=object)\n out = lib.maybe_convert_objects(arr, convert_non_numeric=True)\n # no dtype_if_all_nat passed -> we dont guess\n tm.assert_numpy_array_equal(out, arr)\n\n out = lib.maybe_convert_objects(\n arr,\n convert_non_numeric=True,\n dtype_if_all_nat=np.dtype("timedelta64[ns]"),\n )\n exp = np.array(["NaT", "NaT"], dtype="timedelta64[ns]")\n tm.assert_numpy_array_equal(out, exp)\n\n out = lib.maybe_convert_objects(\n arr,\n convert_non_numeric=True,\n dtype_if_all_nat=np.dtype("datetime64[ns]"),\n )\n exp = np.array(["NaT", "NaT"], dtype="datetime64[ns]")\n tm.assert_numpy_array_equal(out, exp)\n\n def test_maybe_convert_objects_dtype_if_all_nat_invalid(self):\n # we accept datetime64[ns], timedelta64[ns], and EADtype\n arr = np.array([pd.NaT, pd.NaT], dtype=object)\n\n with pytest.raises(ValueError, match="int64"):\n lib.maybe_convert_objects(\n arr,\n convert_non_numeric=True,\n dtype_if_all_nat=np.dtype("int64"),\n )\n\n @pytest.mark.parametrize("dtype", ["datetime64[ns]", "timedelta64[ns]"])\n def test_maybe_convert_objects_datetime_overflow_safe(self, dtype):\n stamp = datetime(2363, 10, 4) # Enterprise-D launch date\n if dtype == "timedelta64[ns]":\n stamp = stamp - datetime(1970, 1, 1)\n arr = np.array([stamp], dtype=object)\n\n out = lib.maybe_convert_objects(arr, convert_non_numeric=True)\n # no OutOfBoundsDatetime/OutOfBoundsTimedeltas\n tm.assert_numpy_array_equal(out, arr)\n\n def test_maybe_convert_objects_mixed_datetimes(self):\n ts = Timestamp("now")\n vals = [ts, ts.to_pydatetime(), ts.to_datetime64(), pd.NaT, np.nan, None]\n\n for data in itertools.permutations(vals):\n data = np.array(list(data), dtype=object)\n expected = DatetimeIndex(data)._data._ndarray\n result = lib.maybe_convert_objects(data, convert_non_numeric=True)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_maybe_convert_objects_timedelta64_nat(self):\n obj = np.timedelta64("NaT", "ns")\n arr = np.array([obj], dtype=object)\n assert arr[0] is obj\n\n result = lib.maybe_convert_objects(arr, convert_non_numeric=True)\n\n expected = np.array([obj], dtype="m8[ns]")\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "exp",\n [\n IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True])),\n IntegerArray(np.array([2, 0], dtype="int64"), np.array([False, True])),\n ],\n )\n def test_maybe_convert_objects_nullable_integer(self, exp):\n # GH27335\n arr = np.array([2, np.nan], dtype=object)\n result = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)\n\n tm.assert_extension_array_equal(result, exp)\n\n @pytest.mark.parametrize(\n "dtype, val", [("int64", 1), ("uint64", np.iinfo(np.int64).max + 1)]\n )\n def test_maybe_convert_objects_nullable_none(self, dtype, val):\n # GH#50043\n arr = np.array([val, None, 3], dtype="object")\n result = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)\n expected = IntegerArray(\n np.array([val, 0, 3], dtype=dtype), np.array([False, True, False])\n )\n tm.assert_extension_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "convert_to_masked_nullable, exp",\n [\n (True, IntegerArray(np.array([2, 0], dtype="i8"), np.array([False, True]))),\n (False, np.array([2, np.nan], dtype="float64")),\n ],\n )\n def test_maybe_convert_numeric_nullable_integer(\n self, convert_to_masked_nullable, exp\n ):\n # GH 40687\n arr = np.array([2, np.nan], dtype=object)\n result = lib.maybe_convert_numeric(\n arr, set(), convert_to_masked_nullable=convert_to_masked_nullable\n )\n if convert_to_masked_nullable:\n result = IntegerArray(*result)\n tm.assert_extension_array_equal(result, exp)\n else:\n result = result[0]\n tm.assert_numpy_array_equal(result, exp)\n\n @pytest.mark.parametrize(\n "convert_to_masked_nullable, exp",\n [\n (\n True,\n FloatingArray(\n np.array([2.0, 0.0], dtype="float64"), np.array([False, True])\n ),\n ),\n (False, np.array([2.0, np.nan], dtype="float64")),\n ],\n )\n def test_maybe_convert_numeric_floating_array(\n self, convert_to_masked_nullable, exp\n ):\n # GH 40687\n arr = np.array([2.0, np.nan], dtype=object)\n result = lib.maybe_convert_numeric(\n arr, set(), convert_to_masked_nullable=convert_to_masked_nullable\n )\n if convert_to_masked_nullable:\n tm.assert_extension_array_equal(FloatingArray(*result), exp)\n else:\n result = result[0]\n tm.assert_numpy_array_equal(result, exp)\n\n def test_maybe_convert_objects_bool_nan(self):\n # GH32146\n ind = Index([True, False, np.nan], dtype=object)\n exp = np.array([True, False, np.nan], dtype=object)\n out = lib.maybe_convert_objects(ind.values, safe=1)\n tm.assert_numpy_array_equal(out, exp)\n\n def test_maybe_convert_objects_nullable_boolean(self):\n # GH50047\n arr = np.array([True, False], dtype=object)\n exp = np.array([True, False])\n out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)\n tm.assert_numpy_array_equal(out, exp)\n\n arr = np.array([True, False, pd.NaT], dtype=object)\n exp = np.array([True, False, pd.NaT], dtype=object)\n out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)\n tm.assert_numpy_array_equal(out, exp)\n\n @pytest.mark.parametrize("val", [None, np.nan])\n def test_maybe_convert_objects_nullable_boolean_na(self, val):\n # GH50047\n arr = np.array([True, False, val], dtype=object)\n exp = BooleanArray(\n np.array([True, False, False]), np.array([False, False, True])\n )\n out = lib.maybe_convert_objects(arr, convert_to_nullable_dtype=True)\n tm.assert_extension_array_equal(out, exp)\n\n @pytest.mark.parametrize(\n "data0",\n [\n True,\n 1,\n 1.0,\n 1.0 + 1.0j,\n np.int8(1),\n np.int16(1),\n np.int32(1),\n np.int64(1),\n np.float16(1),\n np.float32(1),\n np.float64(1),\n np.complex64(1),\n np.complex128(1),\n ],\n )\n @pytest.mark.parametrize(\n "data1",\n [\n True,\n 1,\n 1.0,\n 1.0 + 1.0j,\n np.int8(1),\n np.int16(1),\n np.int32(1),\n np.int64(1),\n np.float16(1),\n np.float32(1),\n np.float64(1),\n np.complex64(1),\n np.complex128(1),\n ],\n )\n def test_maybe_convert_objects_itemsize(self, data0, data1):\n # GH 40908\n data = [data0, data1]\n arr = np.array(data, dtype="object")\n\n common_kind = np.result_type(type(data0), type(data1)).kind\n kind0 = "python" if not hasattr(data0, "dtype") else data0.dtype.kind\n kind1 = "python" if not hasattr(data1, "dtype") else data1.dtype.kind\n if kind0 != "python" and kind1 != "python":\n kind = common_kind\n itemsize = max(data0.dtype.itemsize, data1.dtype.itemsize)\n elif is_bool(data0) or is_bool(data1):\n kind = "bool" if (is_bool(data0) and is_bool(data1)) else "object"\n itemsize = ""\n elif is_complex(data0) or is_complex(data1):\n kind = common_kind\n itemsize = 16\n else:\n kind = common_kind\n itemsize = 8\n\n expected = np.array(data, dtype=f"{kind}{itemsize}")\n result = lib.maybe_convert_objects(arr)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_mixed_dtypes_remain_object_array(self):\n # GH14956\n arr = np.array([datetime(2015, 1, 1, tzinfo=pytz.utc), 1], dtype=object)\n result = lib.maybe_convert_objects(arr, convert_non_numeric=True)\n tm.assert_numpy_array_equal(result, arr)\n\n @pytest.mark.parametrize(\n "idx",\n [\n pd.IntervalIndex.from_breaks(range(5), closed="both"),\n pd.period_range("2016-01-01", periods=3, freq="D"),\n ],\n )\n def test_maybe_convert_objects_ea(self, idx):\n result = lib.maybe_convert_objects(\n np.array(idx, dtype=object),\n convert_non_numeric=True,\n )\n tm.assert_extension_array_equal(result, idx._data)\n\n\nclass TestTypeInference:\n # Dummy class used for testing with Python objects\n class Dummy:\n pass\n\n def test_inferred_dtype_fixture(self, any_skipna_inferred_dtype):\n # see pandas/conftest.py\n inferred_dtype, values = any_skipna_inferred_dtype\n\n # make sure the inferred dtype of the fixture is as requested\n assert inferred_dtype == lib.infer_dtype(values, skipna=True)\n\n @pytest.mark.parametrize("skipna", [True, False])\n def test_length_zero(self, skipna):\n result = lib.infer_dtype(np.array([], dtype="i4"), skipna=skipna)\n assert result == "integer"\n\n result = lib.infer_dtype([], skipna=skipna)\n assert result == "empty"\n\n # GH 18004\n arr = np.array([np.array([], dtype=object), np.array([], dtype=object)])\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == "empty"\n\n def test_integers(self):\n arr = np.array([1, 2, 3, np.int64(4), np.int32(5)], dtype="O")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "integer"\n\n arr = np.array([1, 2, 3, np.int64(4), np.int32(5), "foo"], dtype="O")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "mixed-integer"\n\n arr = np.array([1, 2, 3, 4, 5], dtype="i4")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "integer"\n\n @pytest.mark.parametrize(\n "arr, skipna",\n [\n (np.array([1, 2, np.nan, np.nan, 3], dtype="O"), False),\n (np.array([1, 2, np.nan, np.nan, 3], dtype="O"), True),\n (np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), False),\n (np.array([1, 2, 3, np.int64(4), np.int32(5), np.nan], dtype="O"), True),\n ],\n )\n def test_integer_na(self, arr, skipna):\n # GH 27392\n result = lib.infer_dtype(arr, skipna=skipna)\n expected = "integer" if skipna else "integer-na"\n assert result == expected\n\n def test_infer_dtype_skipna_default(self):\n # infer_dtype `skipna` default deprecated in GH#24050,\n # changed to True in GH#29876\n arr = np.array([1, 2, 3, np.nan], dtype=object)\n\n result = lib.infer_dtype(arr)\n assert result == "integer"\n\n def test_bools(self):\n arr = np.array([True, False, True, True, True], dtype="O")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "boolean"\n\n arr = np.array([np.bool_(True), np.bool_(False)], dtype="O")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "boolean"\n\n arr = np.array([True, False, True, "foo"], dtype="O")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "mixed"\n\n arr = np.array([True, False, True], dtype=bool)\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "boolean"\n\n arr = np.array([True, np.nan, False], dtype="O")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "boolean"\n\n result = lib.infer_dtype(arr, skipna=False)\n assert result == "mixed"\n\n def test_floats(self):\n arr = np.array([1.0, 2.0, 3.0, np.float64(4), np.float32(5)], dtype="O")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "floating"\n\n arr = np.array([1, 2, 3, np.float64(4), np.float32(5), "foo"], dtype="O")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "mixed-integer"\n\n arr = np.array([1, 2, 3, 4, 5], dtype="f4")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "floating"\n\n arr = np.array([1, 2, 3, 4, 5], dtype="f8")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "floating"\n\n def test_decimals(self):\n # GH15690\n arr = np.array([Decimal(1), Decimal(2), Decimal(3)])\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "decimal"\n\n arr = np.array([1.0, 2.0, Decimal(3)])\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "mixed"\n\n result = lib.infer_dtype(arr[::-1], skipna=True)\n assert result == "mixed"\n\n arr = np.array([Decimal(1), Decimal("NaN"), Decimal(3)])\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "decimal"\n\n arr = np.array([Decimal(1), np.nan, Decimal(3)], dtype="O")\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "decimal"\n\n # complex is compatible with nan, so skipna has no effect\n @pytest.mark.parametrize("skipna", [True, False])\n def test_complex(self, skipna):\n # gets cast to complex on array construction\n arr = np.array([1.0, 2.0, 1 + 1j])\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == "complex"\n\n arr = np.array([1.0, 2.0, 1 + 1j], dtype="O")\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == "mixed"\n\n result = lib.infer_dtype(arr[::-1], skipna=skipna)\n assert result == "mixed"\n\n # gets cast to complex on array construction\n arr = np.array([1, np.nan, 1 + 1j])\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == "complex"\n\n arr = np.array([1.0, np.nan, 1 + 1j], dtype="O")\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == "mixed"\n\n # complex with nans stays complex\n arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype="O")\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == "complex"\n\n # test smaller complex dtype; will pass through _try_infer_map fastpath\n arr = np.array([1 + 1j, np.nan, 3 + 3j], dtype=np.complex64)\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == "complex"\n\n def test_string(self):\n pass\n\n def test_unicode(self):\n arr = ["a", np.nan, "c"]\n result = lib.infer_dtype(arr, skipna=False)\n # This currently returns "mixed", but it's not clear that's optimal.\n # This could also return "string" or "mixed-string"\n assert result == "mixed"\n\n # even though we use skipna, we are only skipping those NAs that are\n # considered matching by is_string_array\n arr = ["a", np.nan, "c"]\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "string"\n\n arr = ["a", pd.NA, "c"]\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "string"\n\n arr = ["a", pd.NaT, "c"]\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "mixed"\n\n arr = ["a", "c"]\n result = lib.infer_dtype(arr, skipna=False)\n assert result == "string"\n\n @pytest.mark.parametrize(\n "dtype, missing, skipna, expected",\n [\n (float, np.nan, False, "floating"),\n (float, np.nan, True, "floating"),\n (object, np.nan, False, "floating"),\n (object, np.nan, True, "empty"),\n (object, None, False, "mixed"),\n (object, None, True, "empty"),\n ],\n )\n @pytest.mark.parametrize("box", [Series, np.array])\n def test_object_empty(self, box, missing, dtype, skipna, expected):\n # GH 23421\n arr = box([missing, missing], dtype=dtype)\n\n result = lib.infer_dtype(arr, skipna=skipna)\n assert result == expected\n\n def test_datetime(self):\n dates = [datetime(2012, 1, x) for x in range(1, 20)]\n index = Index(dates)\n assert index.inferred_type == "datetime64"\n\n def test_infer_dtype_datetime64(self):\n arr = np.array(\n [np.datetime64("2011-01-01"), np.datetime64("2011-01-01")], dtype=object\n )\n assert lib.infer_dtype(arr, skipna=True) == "datetime64"\n\n @pytest.mark.parametrize("na_value", [pd.NaT, np.nan])\n def test_infer_dtype_datetime64_with_na(self, na_value):\n # starts with nan\n arr = np.array([na_value, np.datetime64("2011-01-02")])\n assert lib.infer_dtype(arr, skipna=True) == "datetime64"\n\n arr = np.array([na_value, np.datetime64("2011-01-02"), na_value])\n assert lib.infer_dtype(arr, skipna=True) == "datetime64"\n\n @pytest.mark.parametrize(\n "arr",\n [\n np.array(\n [np.timedelta64("nat"), np.datetime64("2011-01-02")], dtype=object\n ),\n np.array(\n [np.datetime64("2011-01-02"), np.timedelta64("nat")], dtype=object\n ),\n np.array([np.datetime64("2011-01-01"), Timestamp("2011-01-02")]),\n np.array([Timestamp("2011-01-02"), np.datetime64("2011-01-01")]),\n np.array([np.nan, Timestamp("2011-01-02"), 1.1]),\n np.array([np.nan, "2011-01-01", Timestamp("2011-01-02")], dtype=object),\n np.array([np.datetime64("nat"), np.timedelta64(1, "D")], dtype=object),\n np.array([np.timedelta64(1, "D"), np.datetime64("nat")], dtype=object),\n ],\n )\n def test_infer_datetimelike_dtype_mixed(self, arr):\n assert lib.infer_dtype(arr, skipna=False) == "mixed"\n\n def test_infer_dtype_mixed_integer(self):\n arr = np.array([np.nan, Timestamp("2011-01-02"), 1])\n assert lib.infer_dtype(arr, skipna=True) == "mixed-integer"\n\n @pytest.mark.parametrize(\n "arr",\n [\n np.array([Timestamp("2011-01-01"), Timestamp("2011-01-02")]),\n np.array([datetime(2011, 1, 1), datetime(2012, 2, 1)]),\n np.array([datetime(2011, 1, 1), Timestamp("2011-01-02")]),\n ],\n )\n def test_infer_dtype_datetime(self, arr):\n assert lib.infer_dtype(arr, skipna=True) == "datetime"\n\n @pytest.mark.parametrize("na_value", [pd.NaT, np.nan])\n @pytest.mark.parametrize(\n "time_stamp", [Timestamp("2011-01-01"), datetime(2011, 1, 1)]\n )\n def test_infer_dtype_datetime_with_na(self, na_value, time_stamp):\n # starts with nan\n arr = np.array([na_value, time_stamp])\n assert lib.infer_dtype(arr, skipna=True) == "datetime"\n\n arr = np.array([na_value, time_stamp, na_value])\n assert lib.infer_dtype(arr, skipna=True) == "datetime"\n\n @pytest.mark.parametrize(\n "arr",\n [\n np.array([Timedelta("1 days"), Timedelta("2 days")]),\n np.array([np.timedelta64(1, "D"), np.timedelta64(2, "D")], dtype=object),\n np.array([timedelta(1), timedelta(2)]),\n ],\n )\n def test_infer_dtype_timedelta(self, arr):\n assert lib.infer_dtype(arr, skipna=True) == "timedelta"\n\n @pytest.mark.parametrize("na_value", [pd.NaT, np.nan])\n @pytest.mark.parametrize(\n "delta", [Timedelta("1 days"), np.timedelta64(1, "D"), timedelta(1)]\n )\n def test_infer_dtype_timedelta_with_na(self, na_value, delta):\n # starts with nan\n arr = np.array([na_value, delta])\n assert lib.infer_dtype(arr, skipna=True) == "timedelta"\n\n arr = np.array([na_value, delta, na_value])\n assert lib.infer_dtype(arr, skipna=True) == "timedelta"\n\n def test_infer_dtype_period(self):\n # GH 13664\n arr = np.array([Period("2011-01", freq="D"), Period("2011-02", freq="D")])\n assert lib.infer_dtype(arr, skipna=True) == "period"\n\n # non-homogeneous freqs -> mixed\n arr = np.array([Period("2011-01", freq="D"), Period("2011-02", freq="M")])\n assert lib.infer_dtype(arr, skipna=True) == "mixed"\n\n @pytest.mark.parametrize("klass", [pd.array, Series, Index])\n @pytest.mark.parametrize("skipna", [True, False])\n def test_infer_dtype_period_array(self, klass, skipna):\n # https://github.com/pandas-dev/pandas/issues/23553\n values = klass(\n [\n Period("2011-01-01", freq="D"),\n Period("2011-01-02", freq="D"),\n pd.NaT,\n ]\n )\n assert lib.infer_dtype(values, skipna=skipna) == "period"\n\n # periods but mixed freq\n values = klass(\n [\n Period("2011-01-01", freq="D"),\n Period("2011-01-02", freq="M"),\n pd.NaT,\n ]\n )\n # with pd.array this becomes NumpyExtensionArray which ends up\n # as "unknown-array"\n exp = "unknown-array" if klass is pd.array else "mixed"\n assert lib.infer_dtype(values, skipna=skipna) == exp\n\n def test_infer_dtype_period_mixed(self):\n arr = np.array(\n [Period("2011-01", freq="M"), np.datetime64("nat")], dtype=object\n )\n assert lib.infer_dtype(arr, skipna=False) == "mixed"\n\n arr = np.array(\n [np.datetime64("nat"), Period("2011-01", freq="M")], dtype=object\n )\n assert lib.infer_dtype(arr, skipna=False) == "mixed"\n\n @pytest.mark.parametrize("na_value", [pd.NaT, np.nan])\n def test_infer_dtype_period_with_na(self, na_value):\n # starts with nan\n arr = np.array([na_value, Period("2011-01", freq="D")])\n assert lib.infer_dtype(arr, skipna=True) == "period"\n\n arr = np.array([na_value, Period("2011-01", freq="D"), na_value])\n assert lib.infer_dtype(arr, skipna=True) == "period"\n\n def test_infer_dtype_all_nan_nat_like(self):\n arr = np.array([np.nan, np.nan])\n assert lib.infer_dtype(arr, skipna=True) == "floating"\n\n # nan and None mix are result in mixed\n arr = np.array([np.nan, np.nan, None])\n assert lib.infer_dtype(arr, skipna=True) == "empty"\n assert lib.infer_dtype(arr, skipna=False) == "mixed"\n\n arr = np.array([None, np.nan, np.nan])\n assert lib.infer_dtype(arr, skipna=True) == "empty"\n assert lib.infer_dtype(arr, skipna=False) == "mixed"\n\n # pd.NaT\n arr = np.array([pd.NaT])\n assert lib.infer_dtype(arr, skipna=False) == "datetime"\n\n arr = np.array([pd.NaT, np.nan])\n assert lib.infer_dtype(arr, skipna=False) == "datetime"\n\n arr = np.array([np.nan, pd.NaT])\n assert lib.infer_dtype(arr, skipna=False) == "datetime"\n\n arr = np.array([np.nan, pd.NaT, np.nan])\n assert lib.infer_dtype(arr, skipna=False) == "datetime"\n\n arr = np.array([None, pd.NaT, None])\n assert lib.infer_dtype(arr, skipna=False) == "datetime"\n\n # np.datetime64(nat)\n arr = np.array([np.datetime64("nat")])\n assert lib.infer_dtype(arr, skipna=False) == "datetime64"\n\n for n in [np.nan, pd.NaT, None]:\n arr = np.array([n, np.datetime64("nat"), n])\n assert lib.infer_dtype(arr, skipna=False) == "datetime64"\n\n arr = np.array([pd.NaT, n, np.datetime64("nat"), n])\n assert lib.infer_dtype(arr, skipna=False) == "datetime64"\n\n arr = np.array([np.timedelta64("nat")], dtype=object)\n assert lib.infer_dtype(arr, skipna=False) == "timedelta"\n\n for n in [np.nan, pd.NaT, None]:\n arr = np.array([n, np.timedelta64("nat"), n])\n assert lib.infer_dtype(arr, skipna=False) == "timedelta"\n\n arr = np.array([pd.NaT, n, np.timedelta64("nat"), n])\n assert lib.infer_dtype(arr, skipna=False) == "timedelta"\n\n # datetime / timedelta mixed\n arr = np.array([pd.NaT, np.datetime64("nat"), np.timedelta64("nat"), np.nan])\n assert lib.infer_dtype(arr, skipna=False) == "mixed"\n\n arr = np.array([np.timedelta64("nat"), np.datetime64("nat")], dtype=object)\n assert lib.infer_dtype(arr, skipna=False) == "mixed"\n\n def test_is_datetimelike_array_all_nan_nat_like(self):\n arr = np.array([np.nan, pd.NaT, np.datetime64("nat")])\n assert lib.is_datetime_array(arr)\n assert lib.is_datetime64_array(arr)\n assert not lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, pd.NaT, np.timedelta64("nat")])\n assert not lib.is_datetime_array(arr)\n assert not lib.is_datetime64_array(arr)\n assert lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, pd.NaT, np.datetime64("nat"), np.timedelta64("nat")])\n assert not lib.is_datetime_array(arr)\n assert not lib.is_datetime64_array(arr)\n assert not lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, pd.NaT])\n assert lib.is_datetime_array(arr)\n assert lib.is_datetime64_array(arr)\n assert lib.is_timedelta_or_timedelta64_array(arr)\n\n arr = np.array([np.nan, np.nan], dtype=object)\n assert not lib.is_datetime_array(arr)\n assert not lib.is_datetime64_array(arr)\n assert not lib.is_timedelta_or_timedelta64_array(arr)\n\n assert lib.is_datetime_with_singletz_array(\n np.array(\n [\n Timestamp("20130101", tz="US/Eastern"),\n Timestamp("20130102", tz="US/Eastern"),\n ],\n dtype=object,\n )\n )\n assert not lib.is_datetime_with_singletz_array(\n np.array(\n [\n Timestamp("20130101", tz="US/Eastern"),\n Timestamp("20130102", tz="CET"),\n ],\n dtype=object,\n )\n )\n\n @pytest.mark.parametrize(\n "func",\n [\n "is_datetime_array",\n "is_datetime64_array",\n "is_bool_array",\n "is_timedelta_or_timedelta64_array",\n "is_date_array",\n "is_time_array",\n "is_interval_array",\n ],\n )\n def test_other_dtypes_for_array(self, func):\n func = getattr(lib, func)\n arr = np.array(["foo", "bar"])\n assert not func(arr)\n assert not func(arr.reshape(2, 1))\n\n arr = np.array([1, 2])\n assert not func(arr)\n assert not func(arr.reshape(2, 1))\n\n def test_date(self):\n dates = [date(2012, 1, day) for day in range(1, 20)]\n index = Index(dates)\n assert index.inferred_type == "date"\n\n dates = [date(2012, 1, day) for day in range(1, 20)] + [np.nan]\n result = lib.infer_dtype(dates, skipna=False)\n assert result == "mixed"\n\n result = lib.infer_dtype(dates, skipna=True)\n assert result == "date"\n\n @pytest.mark.parametrize(\n "values",\n [\n [date(2020, 1, 1), Timestamp("2020-01-01")],\n [Timestamp("2020-01-01"), date(2020, 1, 1)],\n [date(2020, 1, 1), pd.NaT],\n [pd.NaT, date(2020, 1, 1)],\n ],\n )\n @pytest.mark.parametrize("skipna", [True, False])\n def test_infer_dtype_date_order_invariant(self, values, skipna):\n # https://github.com/pandas-dev/pandas/issues/33741\n result = lib.infer_dtype(values, skipna=skipna)\n assert result == "date"\n\n def test_is_numeric_array(self):\n assert lib.is_float_array(np.array([1, 2.0]))\n assert lib.is_float_array(np.array([1, 2.0, np.nan]))\n assert not lib.is_float_array(np.array([1, 2]))\n\n assert lib.is_integer_array(np.array([1, 2]))\n assert not lib.is_integer_array(np.array([1, 2.0]))\n\n def test_is_string_array(self):\n # We should only be accepting pd.NA, np.nan,\n # other floating point nans e.g. float('nan')]\n # when skipna is True.\n assert lib.is_string_array(np.array(["foo", "bar"]))\n assert not lib.is_string_array(\n np.array(["foo", "bar", pd.NA], dtype=object), skipna=False\n )\n assert lib.is_string_array(\n np.array(["foo", "bar", pd.NA], dtype=object), skipna=True\n )\n # we allow NaN/None in the StringArray constructor, so its allowed here\n assert lib.is_string_array(\n np.array(["foo", "bar", None], dtype=object), skipna=True\n )\n assert lib.is_string_array(\n np.array(["foo", "bar", np.nan], dtype=object), skipna=True\n )\n # But not e.g. datetimelike or Decimal NAs\n assert not lib.is_string_array(\n np.array(["foo", "bar", pd.NaT], dtype=object), skipna=True\n )\n assert not lib.is_string_array(\n np.array(["foo", "bar", np.datetime64("NaT")], dtype=object), skipna=True\n )\n assert not lib.is_string_array(\n np.array(["foo", "bar", Decimal("NaN")], dtype=object), skipna=True\n )\n\n assert not lib.is_string_array(\n np.array(["foo", "bar", None], dtype=object), skipna=False\n )\n assert not lib.is_string_array(\n np.array(["foo", "bar", np.nan], dtype=object), skipna=False\n )\n assert not lib.is_string_array(np.array([1, 2]))\n\n @pytest.mark.parametrize(\n "func",\n [\n "is_bool_array",\n "is_date_array",\n "is_datetime_array",\n "is_datetime64_array",\n "is_float_array",\n "is_integer_array",\n "is_interval_array",\n "is_string_array",\n "is_time_array",\n "is_timedelta_or_timedelta64_array",\n ],\n )\n def test_is_dtype_array_empty_obj(self, func):\n # https://github.com/pandas-dev/pandas/pull/60796\n func = getattr(lib, func)\n\n arr = np.empty((2, 0), dtype=object)\n assert not func(arr)\n\n arr = np.empty((0, 2), dtype=object)\n assert not func(arr)\n\n def test_to_object_array_tuples(self):\n r = (5, 6)\n values = [r]\n lib.to_object_array_tuples(values)\n\n # make sure record array works\n record = namedtuple("record", "x y")\n r = record(5, 6)\n values = [r]\n lib.to_object_array_tuples(values)\n\n def test_object(self):\n # GH 7431\n # cannot infer more than this as only a single element\n arr = np.array([None], dtype="O")\n result = lib.infer_dtype(arr, skipna=False)\n assert result == "mixed"\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "empty"\n\n def test_to_object_array_width(self):\n # see gh-13320\n rows = [[1, 2, 3], [4, 5, 6]]\n\n expected = np.array(rows, dtype=object)\n out = lib.to_object_array(rows)\n tm.assert_numpy_array_equal(out, expected)\n\n expected = np.array(rows, dtype=object)\n out = lib.to_object_array(rows, min_width=1)\n tm.assert_numpy_array_equal(out, expected)\n\n expected = np.array(\n [[1, 2, 3, None, None], [4, 5, 6, None, None]], dtype=object\n )\n out = lib.to_object_array(rows, min_width=5)\n tm.assert_numpy_array_equal(out, expected)\n\n def test_is_period(self):\n # GH#55264\n msg = "is_period is deprecated and will be removed in a future version"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert lib.is_period(Period("2011-01", freq="M"))\n assert not lib.is_period(PeriodIndex(["2011-01"], freq="M"))\n assert not lib.is_period(Timestamp("2011-01"))\n assert not lib.is_period(1)\n assert not lib.is_period(np.nan)\n\n def test_is_interval(self):\n # GH#55264\n msg = "is_interval is deprecated and will be removed in a future version"\n item = Interval(1, 2)\n with tm.assert_produces_warning(FutureWarning, match=msg):\n assert lib.is_interval(item)\n assert not lib.is_interval(pd.IntervalIndex([item]))\n assert not lib.is_interval(pd.IntervalIndex([item])._engine)\n\n def test_categorical(self):\n # GH 8974\n arr = Categorical(list("abc"))\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "categorical"\n\n result = lib.infer_dtype(Series(arr), skipna=True)\n assert result == "categorical"\n\n arr = Categorical(list("abc"), categories=["cegfab"], ordered=True)\n result = lib.infer_dtype(arr, skipna=True)\n assert result == "categorical"\n\n result = lib.infer_dtype(Series(arr), skipna=True)\n assert result == "categorical"\n\n @pytest.mark.parametrize("asobject", [True, False])\n def test_interval(self, asobject):\n idx = pd.IntervalIndex.from_breaks(range(5), closed="both")\n if asobject:\n idx = idx.astype(object)\n\n inferred = lib.infer_dtype(idx, skipna=False)\n assert inferred == "interval"\n\n inferred = lib.infer_dtype(idx._data, skipna=False)\n assert inferred == "interval"\n\n inferred = lib.infer_dtype(Series(idx, dtype=idx.dtype), skipna=False)\n assert inferred == "interval"\n\n @pytest.mark.parametrize("value", [Timestamp(0), Timedelta(0), 0, 0.0])\n def test_interval_mismatched_closed(self, value):\n first = Interval(value, value, closed="left")\n second = Interval(value, value, closed="right")\n\n # if closed match, we should infer "interval"\n arr = np.array([first, first], dtype=object)\n assert lib.infer_dtype(arr, skipna=False) == "interval"\n\n # if closed dont match, we should _not_ get "interval"\n arr2 = np.array([first, second], dtype=object)\n assert lib.infer_dtype(arr2, skipna=False) == "mixed"\n\n def test_interval_mismatched_subtype(self):\n first = Interval(0, 1, closed="left")\n second = Interval(Timestamp(0), Timestamp(1), closed="left")\n third = Interval(Timedelta(0), Timedelta(1), closed="left")\n\n arr = np.array([first, second])\n assert lib.infer_dtype(arr, skipna=False) == "mixed"\n\n arr = np.array([second, third])\n assert lib.infer_dtype(arr, skipna=False) == "mixed"\n\n arr = np.array([first, third])\n assert lib.infer_dtype(arr, skipna=False) == "mixed"\n\n # float vs int subdtype are compatible\n flt_interval = Interval(1.5, 2.5, closed="left")\n arr = np.array([first, flt_interval], dtype=object)\n assert lib.infer_dtype(arr, skipna=False) == "interval"\n\n @pytest.mark.parametrize("klass", [pd.array, Series])\n @pytest.mark.parametrize("skipna", [True, False])\n @pytest.mark.parametrize("data", [["a", "b", "c"], ["a", "b", pd.NA]])\n def test_string_dtype(self, data, skipna, klass, nullable_string_dtype):\n # StringArray\n val = klass(data, dtype=nullable_string_dtype)\n inferred = lib.infer_dtype(val, skipna=skipna)\n assert inferred == "string"\n\n @pytest.mark.parametrize("klass", [pd.array, Series])\n @pytest.mark.parametrize("skipna", [True, False])\n @pytest.mark.parametrize("data", [[True, False, True], [True, False, pd.NA]])\n def test_boolean_dtype(self, data, skipna, klass):\n # BooleanArray\n val = klass(data, dtype="boolean")\n inferred = lib.infer_dtype(val, skipna=skipna)\n assert inferred == "boolean"\n\n\nclass TestNumberScalar:\n def test_is_number(self):\n assert is_number(True)\n assert is_number(1)\n assert is_number(1.1)\n assert is_number(1 + 3j)\n assert is_number(np.int64(1))\n assert is_number(np.float64(1.1))\n assert is_number(np.complex128(1 + 3j))\n assert is_number(np.nan)\n\n assert not is_number(None)\n assert not is_number("x")\n assert not is_number(datetime(2011, 1, 1))\n assert not is_number(np.datetime64("2011-01-01"))\n assert not is_number(Timestamp("2011-01-01"))\n assert not is_number(Timestamp("2011-01-01", tz="US/Eastern"))\n assert not is_number(timedelta(1000))\n assert not is_number(Timedelta("1 days"))\n\n # questionable\n assert not is_number(np.bool_(False))\n assert is_number(np.timedelta64(1, "D"))\n\n def test_is_bool(self):\n assert is_bool(True)\n assert is_bool(False)\n assert is_bool(np.bool_(False))\n\n assert not is_bool(1)\n assert not is_bool(1.1)\n assert not is_bool(1 + 3j)\n assert not is_bool(np.int64(1))\n assert not is_bool(np.float64(1.1))\n assert not is_bool(np.complex128(1 + 3j))\n assert not is_bool(np.nan)\n assert not is_bool(None)\n assert not is_bool("x")\n assert not is_bool(datetime(2011, 1, 1))\n assert not is_bool(np.datetime64("2011-01-01"))\n assert not is_bool(Timestamp("2011-01-01"))\n assert not is_bool(Timestamp("2011-01-01", tz="US/Eastern"))\n assert not is_bool(timedelta(1000))\n assert not is_bool(np.timedelta64(1, "D"))\n assert not is_bool(Timedelta("1 days"))\n\n def test_is_integer(self):\n assert is_integer(1)\n assert is_integer(np.int64(1))\n\n assert not is_integer(True)\n assert not is_integer(1.1)\n assert not is_integer(1 + 3j)\n assert not is_integer(False)\n assert not is_integer(np.bool_(False))\n assert not is_integer(np.float64(1.1))\n assert not is_integer(np.complex128(1 + 3j))\n assert not is_integer(np.nan)\n assert not is_integer(None)\n assert not is_integer("x")\n assert not is_integer(datetime(2011, 1, 1))\n assert not is_integer(np.datetime64("2011-01-01"))\n assert not is_integer(Timestamp("2011-01-01"))\n assert not is_integer(Timestamp("2011-01-01", tz="US/Eastern"))\n assert not is_integer(timedelta(1000))\n assert not is_integer(Timedelta("1 days"))\n assert not is_integer(np.timedelta64(1, "D"))\n\n def test_is_float(self):\n assert is_float(1.1)\n assert is_float(np.float64(1.1))\n assert is_float(np.nan)\n\n assert not is_float(True)\n assert not is_float(1)\n assert not is_float(1 + 3j)\n assert not is_float(False)\n assert not is_float(np.bool_(False))\n assert not is_float(np.int64(1))\n assert not is_float(np.complex128(1 + 3j))\n assert not is_float(None)\n assert not is_float("x")\n assert not is_float(datetime(2011, 1, 1))\n assert not is_float(np.datetime64("2011-01-01"))\n assert not is_float(Timestamp("2011-01-01"))\n assert not is_float(Timestamp("2011-01-01", tz="US/Eastern"))\n assert not is_float(timedelta(1000))\n assert not is_float(np.timedelta64(1, "D"))\n assert not is_float(Timedelta("1 days"))\n\n def test_is_datetime_dtypes(self):\n ts = pd.date_range("20130101", periods=3)\n tsa = pd.date_range("20130101", periods=3, tz="US/Eastern")\n\n msg = "is_datetime64tz_dtype is deprecated"\n\n assert is_datetime64_dtype("datetime64")\n assert is_datetime64_dtype("datetime64[ns]")\n assert is_datetime64_dtype(ts)\n assert not is_datetime64_dtype(tsa)\n\n assert not is_datetime64_ns_dtype("datetime64")\n assert is_datetime64_ns_dtype("datetime64[ns]")\n assert is_datetime64_ns_dtype(ts)\n assert is_datetime64_ns_dtype(tsa)\n\n assert is_datetime64_any_dtype("datetime64")\n assert is_datetime64_any_dtype("datetime64[ns]")\n assert is_datetime64_any_dtype(ts)\n assert is_datetime64_any_dtype(tsa)\n\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert not is_datetime64tz_dtype("datetime64")\n assert not is_datetime64tz_dtype("datetime64[ns]")\n assert not is_datetime64tz_dtype(ts)\n assert is_datetime64tz_dtype(tsa)\n\n @pytest.mark.parametrize("tz", ["US/Eastern", "UTC"])\n def test_is_datetime_dtypes_with_tz(self, tz):\n dtype = f"datetime64[ns, {tz}]"\n assert not is_datetime64_dtype(dtype)\n\n msg = "is_datetime64tz_dtype is deprecated"\n with tm.assert_produces_warning(DeprecationWarning, match=msg):\n assert is_datetime64tz_dtype(dtype)\n assert is_datetime64_ns_dtype(dtype)\n assert is_datetime64_any_dtype(dtype)\n\n def test_is_timedelta(self):\n assert is_timedelta64_dtype("timedelta64")\n assert is_timedelta64_dtype("timedelta64[ns]")\n assert not is_timedelta64_ns_dtype("timedelta64")\n assert is_timedelta64_ns_dtype("timedelta64[ns]")\n\n tdi = TimedeltaIndex([1e14, 2e14], dtype="timedelta64[ns]")\n assert is_timedelta64_dtype(tdi)\n assert is_timedelta64_ns_dtype(tdi)\n assert is_timedelta64_ns_dtype(tdi.astype("timedelta64[ns]"))\n\n assert not is_timedelta64_ns_dtype(Index([], dtype=np.float64))\n assert not is_timedelta64_ns_dtype(Index([], dtype=np.int64))\n\n\nclass TestIsScalar:\n def test_is_scalar_builtin_scalars(self):\n assert is_scalar(None)\n assert is_scalar(True)\n assert is_scalar(False)\n assert is_scalar(Fraction())\n assert is_scalar(0.0)\n assert is_scalar(1)\n assert is_scalar(complex(2))\n assert is_scalar(float("NaN"))\n assert is_scalar(np.nan)\n assert is_scalar("foobar")\n assert is_scalar(b"foobar")\n assert is_scalar(datetime(2014, 1, 1))\n assert is_scalar(date(2014, 1, 1))\n assert is_scalar(time(12, 0))\n assert is_scalar(timedelta(hours=1))\n assert is_scalar(pd.NaT)\n assert is_scalar(pd.NA)\n\n def test_is_scalar_builtin_nonscalars(self):\n assert not is_scalar({})\n assert not is_scalar([])\n assert not is_scalar([1])\n assert not is_scalar(())\n assert not is_scalar((1,))\n assert not is_scalar(slice(None))\n assert not is_scalar(Ellipsis)\n\n def test_is_scalar_numpy_array_scalars(self):\n assert is_scalar(np.int64(1))\n assert is_scalar(np.float64(1.0))\n assert is_scalar(np.int32(1))\n assert is_scalar(np.complex64(2))\n assert is_scalar(np.object_("foobar"))\n assert is_scalar(np.str_("foobar"))\n assert is_scalar(np.bytes_(b"foobar"))\n assert is_scalar(np.datetime64("2014-01-01"))\n assert is_scalar(np.timedelta64(1, "h"))\n\n @pytest.mark.parametrize(\n "zerodim",\n [\n np.array(1),\n np.array("foobar"),\n np.array(np.datetime64("2014-01-01")),\n np.array(np.timedelta64(1, "h")),\n np.array(np.datetime64("NaT")),\n ],\n )\n def test_is_scalar_numpy_zerodim_arrays(self, zerodim):\n assert not is_scalar(zerodim)\n assert is_scalar(lib.item_from_zerodim(zerodim))\n\n @pytest.mark.parametrize("arr", [np.array([]), np.array([[]])])\n def test_is_scalar_numpy_arrays(self, arr):\n assert not is_scalar(arr)\n assert not is_scalar(MockNumpyLikeArray(arr))\n\n def test_is_scalar_pandas_scalars(self):\n assert is_scalar(Timestamp("2014-01-01"))\n assert is_scalar(Timedelta(hours=1))\n assert is_scalar(Period("2014-01-01"))\n assert is_scalar(Interval(left=0, right=1))\n assert is_scalar(DateOffset(days=1))\n assert is_scalar(pd.offsets.Minute(3))\n\n def test_is_scalar_pandas_containers(self):\n assert not is_scalar(Series(dtype=object))\n assert not is_scalar(Series([1]))\n assert not is_scalar(DataFrame())\n assert not is_scalar(DataFrame([[1]]))\n assert not is_scalar(Index([]))\n assert not is_scalar(Index([1]))\n assert not is_scalar(Categorical([]))\n assert not is_scalar(DatetimeIndex([])._data)\n assert not is_scalar(TimedeltaIndex([])._data)\n assert not is_scalar(DatetimeIndex([])._data.to_period("D"))\n assert not is_scalar(pd.array([1, 2, 3]))\n\n def test_is_scalar_number(self):\n # Number() is not recognied by PyNumber_Check, so by extension\n # is not recognized by is_scalar, but instances of non-abstract\n # subclasses are.\n\n class Numeric(Number):\n def __init__(self, value) -> None:\n self.value = value\n\n def __int__(self) -> int:\n return self.value\n\n num = Numeric(1)\n assert is_scalar(num)\n\n\n@pytest.mark.parametrize("unit", ["ms", "us", "ns"])\ndef test_datetimeindex_from_empty_datetime64_array(unit):\n idx = DatetimeIndex(np.array([], dtype=f"datetime64[{unit}]"))\n assert len(idx) == 0\n\n\ndef test_nan_to_nat_conversions():\n df = DataFrame(\n {"A": np.asarray(range(10), dtype="float64"), "B": Timestamp("20010101")}\n )\n df.iloc[3:6, :] = np.nan\n result = df.loc[4, "B"]\n assert result is pd.NaT\n\n s = df["B"].copy()\n s[8:9] = np.nan\n assert s[8] is pd.NaT\n\n\n@pytest.mark.filterwarnings("ignore::PendingDeprecationWarning")\ndef test_is_scipy_sparse(spmatrix):\n pytest.importorskip("scipy")\n assert is_scipy_sparse(spmatrix([[0, 1]]))\n assert not is_scipy_sparse(np.array([1]))\n\n\ndef test_ensure_int32():\n values = np.arange(10, dtype=np.int32)\n result = ensure_int32(values)\n assert result.dtype == np.int32\n\n values = np.arange(10, dtype=np.int64)\n result = ensure_int32(values)\n assert result.dtype == np.int32\n\n\n@pytest.mark.parametrize(\n "right,result",\n [\n (0, np.uint8),\n (-1, np.int16),\n (300, np.uint16),\n # For floats, we just upcast directly to float64 instead of trying to\n # find a smaller floating dtype\n (300.0, np.uint16), # for integer floats, we convert them to ints\n (300.1, np.float64),\n (np.int16(300), np.int16 if np_version_gt2 else np.uint16),\n ],\n)\ndef test_find_result_type_uint_int(right, result):\n left_dtype = np.dtype("uint8")\n assert find_result_type(left_dtype, right) == result\n\n\n@pytest.mark.parametrize(\n "right,result",\n [\n (0, np.int8),\n (-1, np.int8),\n (300, np.int16),\n # For floats, we just upcast directly to float64 instead of trying to\n # find a smaller floating dtype\n (300.0, np.int16), # for integer floats, we convert them to ints\n (300.1, np.float64),\n (np.int16(300), np.int16),\n ],\n)\ndef test_find_result_type_int_int(right, result):\n left_dtype = np.dtype("int8")\n assert find_result_type(left_dtype, right) == result\n\n\n@pytest.mark.parametrize(\n "right,result",\n [\n (300.0, np.float64),\n (np.float32(300), np.float32),\n ],\n)\ndef test_find_result_type_floats(right, result):\n left_dtype = np.dtype("float16")\n assert find_result_type(left_dtype, right) == result\n | .venv\Lib\site-packages\pandas\tests\dtypes\test_inference.py | test_inference.py | Python | 71,478 | 0.75 | 0.101834 | 0.062572 | python-kit | 621 | 2024-06-14T14:15:38.326026 | Apache-2.0 | true | 03116bb721b1b756317e67648b5d6926 |
from contextlib import nullcontext\nfrom datetime import datetime\nfrom decimal import Decimal\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import config as cf\n\nfrom pandas._libs import missing as libmissing\nfrom pandas._libs.tslibs import iNaT\nfrom pandas.compat.numpy import np_version_gte1p25\n\nfrom pandas.core.dtypes.common import (\n is_float,\n is_scalar,\n pandas_dtype,\n)\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n)\nfrom pandas.core.dtypes.missing import (\n array_equivalent,\n is_valid_na_for_dtype,\n isna,\n isnull,\n na_value_for_dtype,\n notna,\n notnull,\n)\n\nimport pandas as pd\nfrom pandas import (\n DatetimeIndex,\n Index,\n NaT,\n Series,\n TimedeltaIndex,\n date_range,\n period_range,\n)\nimport pandas._testing as tm\n\nfix_now = pd.Timestamp("2021-01-01")\nfix_utcnow = pd.Timestamp("2021-01-01", tz="UTC")\n\n\n@pytest.mark.parametrize("notna_f", [notna, notnull])\ndef test_notna_notnull(notna_f):\n assert notna_f(1.0)\n assert not notna_f(None)\n assert not notna_f(np.nan)\n\n msg = "use_inf_as_na option is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with cf.option_context("mode.use_inf_as_na", False):\n assert notna_f(np.inf)\n assert notna_f(-np.inf)\n\n arr = np.array([1.5, np.inf, 3.5, -np.inf])\n result = notna_f(arr)\n assert result.all()\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with cf.option_context("mode.use_inf_as_na", True):\n assert not notna_f(np.inf)\n assert not notna_f(-np.inf)\n\n arr = np.array([1.5, np.inf, 3.5, -np.inf])\n result = notna_f(arr)\n assert result.sum() == 2\n\n\n@pytest.mark.parametrize("null_func", [notna, notnull, isna, isnull])\n@pytest.mark.parametrize(\n "ser",\n [\n Series(\n [str(i) for i in range(5)],\n index=Index([str(i) for i in range(5)], dtype=object),\n dtype=object,\n ),\n Series(range(5), date_range("2020-01-01", periods=5)),\n Series(range(5), period_range("2020-01-01", periods=5)),\n ],\n)\ndef test_null_check_is_series(null_func, ser):\n msg = "use_inf_as_na option is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with cf.option_context("mode.use_inf_as_na", False):\n assert isinstance(null_func(ser), Series)\n\n\nclass TestIsNA:\n def test_0d_array(self):\n assert isna(np.array(np.nan))\n assert not isna(np.array(0.0))\n assert not isna(np.array(0))\n # test object dtype\n assert isna(np.array(np.nan, dtype=object))\n assert not isna(np.array(0.0, dtype=object))\n assert not isna(np.array(0, dtype=object))\n\n @pytest.mark.parametrize("shape", [(4, 0), (4,)])\n def test_empty_object(self, shape):\n arr = np.empty(shape=shape, dtype=object)\n result = isna(arr)\n expected = np.ones(shape=shape, dtype=bool)\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize("isna_f", [isna, isnull])\n def test_isna_isnull(self, isna_f):\n assert not isna_f(1.0)\n assert isna_f(None)\n assert isna_f(np.nan)\n assert float("nan")\n assert not isna_f(np.inf)\n assert not isna_f(-np.inf)\n\n # type\n assert not isna_f(type(Series(dtype=object)))\n assert not isna_f(type(Series(dtype=np.float64)))\n assert not isna_f(type(pd.DataFrame()))\n\n @pytest.mark.parametrize("isna_f", [isna, isnull])\n @pytest.mark.parametrize(\n "data",\n [\n np.arange(4, dtype=float),\n [0.0, 1.0, 0.0, 1.0],\n Series(list("abcd")),\n date_range("2020-01-01", periods=4),\n ],\n )\n @pytest.mark.parametrize(\n "index",\n [\n date_range("2020-01-01", periods=4),\n range(4),\n period_range("2020-01-01", periods=4),\n ],\n )\n def test_isna_isnull_frame(self, isna_f, data, index):\n # frame\n df = pd.DataFrame(data, index=index)\n result = isna_f(df)\n expected = df.apply(isna_f)\n tm.assert_frame_equal(result, expected)\n\n def test_isna_lists(self):\n result = isna([[False]])\n exp = np.array([[False]])\n tm.assert_numpy_array_equal(result, exp)\n\n result = isna([[1], [2]])\n exp = np.array([[False], [False]])\n tm.assert_numpy_array_equal(result, exp)\n\n # list of strings / unicode\n result = isna(["foo", "bar"])\n exp = np.array([False, False])\n tm.assert_numpy_array_equal(result, exp)\n\n result = isna(["foo", "bar"])\n exp = np.array([False, False])\n tm.assert_numpy_array_equal(result, exp)\n\n # GH20675\n result = isna([np.nan, "world"])\n exp = np.array([True, False])\n tm.assert_numpy_array_equal(result, exp)\n\n def test_isna_nat(self):\n result = isna([NaT])\n exp = np.array([True])\n tm.assert_numpy_array_equal(result, exp)\n\n result = isna(np.array([NaT], dtype=object))\n exp = np.array([True])\n tm.assert_numpy_array_equal(result, exp)\n\n def test_isna_numpy_nat(self):\n arr = np.array(\n [\n NaT,\n np.datetime64("NaT"),\n np.timedelta64("NaT"),\n np.datetime64("NaT", "s"),\n ]\n )\n result = isna(arr)\n expected = np.array([True] * 4)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_isna_datetime(self):\n assert not isna(datetime.now())\n assert notna(datetime.now())\n\n idx = date_range("1/1/1990", periods=20)\n exp = np.ones(len(idx), dtype=bool)\n tm.assert_numpy_array_equal(notna(idx), exp)\n\n idx = np.asarray(idx)\n idx[0] = iNaT\n idx = DatetimeIndex(idx)\n mask = isna(idx)\n assert mask[0]\n exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)\n tm.assert_numpy_array_equal(mask, exp)\n\n # GH 9129\n pidx = idx.to_period(freq="M")\n mask = isna(pidx)\n assert mask[0]\n exp = np.array([True] + [False] * (len(idx) - 1), dtype=bool)\n tm.assert_numpy_array_equal(mask, exp)\n\n mask = isna(pidx[1:])\n exp = np.zeros(len(mask), dtype=bool)\n tm.assert_numpy_array_equal(mask, exp)\n\n def test_isna_old_datetimelike(self):\n # isna_old should work for dt64tz, td64, and period, not just tznaive\n dti = date_range("2016-01-01", periods=3)\n dta = dti._data\n dta[-1] = NaT\n expected = np.array([False, False, True], dtype=bool)\n\n objs = [dta, dta.tz_localize("US/Eastern"), dta - dta, dta.to_period("D")]\n\n for obj in objs:\n msg = "use_inf_as_na option is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n with cf.option_context("mode.use_inf_as_na", True):\n result = isna(obj)\n\n tm.assert_numpy_array_equal(result, expected)\n\n @pytest.mark.parametrize(\n "value, expected",\n [\n (np.complex128(np.nan), True),\n (np.float64(1), False),\n (np.array([1, 1 + 0j, np.nan, 3]), np.array([False, False, True, False])),\n (\n np.array([1, 1 + 0j, np.nan, 3], dtype=object),\n np.array([False, False, True, False]),\n ),\n (\n np.array([1, 1 + 0j, np.nan, 3]).astype(object),\n np.array([False, False, True, False]),\n ),\n ],\n )\n def test_complex(self, value, expected):\n result = isna(value)\n if is_scalar(result):\n assert result is expected\n else:\n tm.assert_numpy_array_equal(result, expected)\n\n def test_datetime_other_units(self):\n idx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-02"])\n exp = np.array([False, True, False])\n tm.assert_numpy_array_equal(isna(idx), exp)\n tm.assert_numpy_array_equal(notna(idx), ~exp)\n tm.assert_numpy_array_equal(isna(idx.values), exp)\n tm.assert_numpy_array_equal(notna(idx.values), ~exp)\n\n @pytest.mark.parametrize(\n "dtype",\n [\n "datetime64[D]",\n "datetime64[h]",\n "datetime64[m]",\n "datetime64[s]",\n "datetime64[ms]",\n "datetime64[us]",\n "datetime64[ns]",\n ],\n )\n def test_datetime_other_units_astype(self, dtype):\n idx = DatetimeIndex(["2011-01-01", "NaT", "2011-01-02"])\n values = idx.values.astype(dtype)\n\n exp = np.array([False, True, False])\n tm.assert_numpy_array_equal(isna(values), exp)\n tm.assert_numpy_array_equal(notna(values), ~exp)\n\n exp = Series([False, True, False])\n s = Series(values)\n tm.assert_series_equal(isna(s), exp)\n tm.assert_series_equal(notna(s), ~exp)\n s = Series(values, dtype=object)\n tm.assert_series_equal(isna(s), exp)\n tm.assert_series_equal(notna(s), ~exp)\n\n def test_timedelta_other_units(self):\n idx = TimedeltaIndex(["1 days", "NaT", "2 days"])\n exp = np.array([False, True, False])\n tm.assert_numpy_array_equal(isna(idx), exp)\n tm.assert_numpy_array_equal(notna(idx), ~exp)\n tm.assert_numpy_array_equal(isna(idx.values), exp)\n tm.assert_numpy_array_equal(notna(idx.values), ~exp)\n\n @pytest.mark.parametrize(\n "dtype",\n [\n "timedelta64[D]",\n "timedelta64[h]",\n "timedelta64[m]",\n "timedelta64[s]",\n "timedelta64[ms]",\n "timedelta64[us]",\n "timedelta64[ns]",\n ],\n )\n def test_timedelta_other_units_dtype(self, dtype):\n idx = TimedeltaIndex(["1 days", "NaT", "2 days"])\n values = idx.values.astype(dtype)\n\n exp = np.array([False, True, False])\n tm.assert_numpy_array_equal(isna(values), exp)\n tm.assert_numpy_array_equal(notna(values), ~exp)\n\n exp = Series([False, True, False])\n s = Series(values)\n tm.assert_series_equal(isna(s), exp)\n tm.assert_series_equal(notna(s), ~exp)\n s = Series(values, dtype=object)\n tm.assert_series_equal(isna(s), exp)\n tm.assert_series_equal(notna(s), ~exp)\n\n def test_period(self):\n idx = pd.PeriodIndex(["2011-01", "NaT", "2012-01"], freq="M")\n exp = np.array([False, True, False])\n tm.assert_numpy_array_equal(isna(idx), exp)\n tm.assert_numpy_array_equal(notna(idx), ~exp)\n\n exp = Series([False, True, False])\n s = Series(idx)\n tm.assert_series_equal(isna(s), exp)\n tm.assert_series_equal(notna(s), ~exp)\n s = Series(idx, dtype=object)\n tm.assert_series_equal(isna(s), exp)\n tm.assert_series_equal(notna(s), ~exp)\n\n def test_decimal(self):\n # scalars GH#23530\n a = Decimal(1.0)\n assert isna(a) is False\n assert notna(a) is True\n\n b = Decimal("NaN")\n assert isna(b) is True\n assert notna(b) is False\n\n # array\n arr = np.array([a, b])\n expected = np.array([False, True])\n result = isna(arr)\n tm.assert_numpy_array_equal(result, expected)\n\n result = notna(arr)\n tm.assert_numpy_array_equal(result, ~expected)\n\n # series\n ser = Series(arr)\n expected = Series(expected)\n result = isna(ser)\n tm.assert_series_equal(result, expected)\n\n result = notna(ser)\n tm.assert_series_equal(result, ~expected)\n\n # index\n idx = Index(arr)\n expected = np.array([False, True])\n result = isna(idx)\n tm.assert_numpy_array_equal(result, expected)\n\n result = notna(idx)\n tm.assert_numpy_array_equal(result, ~expected)\n\n\n@pytest.mark.parametrize("dtype_equal", [True, False])\ndef test_array_equivalent(dtype_equal):\n assert array_equivalent(\n np.array([np.nan, np.nan]), np.array([np.nan, np.nan]), dtype_equal=dtype_equal\n )\n assert array_equivalent(\n np.array([np.nan, 1, np.nan]),\n np.array([np.nan, 1, np.nan]),\n dtype_equal=dtype_equal,\n )\n assert array_equivalent(\n np.array([np.nan, None], dtype="object"),\n np.array([np.nan, None], dtype="object"),\n dtype_equal=dtype_equal,\n )\n # Check the handling of nested arrays in array_equivalent_object\n assert array_equivalent(\n np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"),\n np.array([np.array([np.nan, None], dtype="object"), None], dtype="object"),\n dtype_equal=dtype_equal,\n )\n assert array_equivalent(\n np.array([np.nan, 1 + 1j], dtype="complex"),\n np.array([np.nan, 1 + 1j], dtype="complex"),\n dtype_equal=dtype_equal,\n )\n assert not array_equivalent(\n np.array([np.nan, 1 + 1j], dtype="complex"),\n np.array([np.nan, 1 + 2j], dtype="complex"),\n dtype_equal=dtype_equal,\n )\n assert not array_equivalent(\n np.array([np.nan, 1, np.nan]),\n np.array([np.nan, 2, np.nan]),\n dtype_equal=dtype_equal,\n )\n assert not array_equivalent(\n np.array(["a", "b", "c", "d"]), np.array(["e", "e"]), dtype_equal=dtype_equal\n )\n assert array_equivalent(\n Index([0, np.nan]), Index([0, np.nan]), dtype_equal=dtype_equal\n )\n assert not array_equivalent(\n Index([0, np.nan]), Index([1, np.nan]), dtype_equal=dtype_equal\n )\n\n\n@pytest.mark.parametrize("dtype_equal", [True, False])\ndef test_array_equivalent_tdi(dtype_equal):\n assert array_equivalent(\n TimedeltaIndex([0, np.nan]),\n TimedeltaIndex([0, np.nan]),\n dtype_equal=dtype_equal,\n )\n assert not array_equivalent(\n TimedeltaIndex([0, np.nan]),\n TimedeltaIndex([1, np.nan]),\n dtype_equal=dtype_equal,\n )\n\n\n@pytest.mark.parametrize("dtype_equal", [True, False])\ndef test_array_equivalent_dti(dtype_equal):\n assert array_equivalent(\n DatetimeIndex([0, np.nan]), DatetimeIndex([0, np.nan]), dtype_equal=dtype_equal\n )\n assert not array_equivalent(\n DatetimeIndex([0, np.nan]), DatetimeIndex([1, np.nan]), dtype_equal=dtype_equal\n )\n\n dti1 = DatetimeIndex([0, np.nan], tz="US/Eastern")\n dti2 = DatetimeIndex([0, np.nan], tz="CET")\n dti3 = DatetimeIndex([1, np.nan], tz="US/Eastern")\n\n assert array_equivalent(\n dti1,\n dti1,\n dtype_equal=dtype_equal,\n )\n assert not array_equivalent(\n dti1,\n dti3,\n dtype_equal=dtype_equal,\n )\n # The rest are not dtype_equal\n assert not array_equivalent(DatetimeIndex([0, np.nan]), dti1)\n assert array_equivalent(\n dti2,\n dti1,\n )\n\n assert not array_equivalent(DatetimeIndex([0, np.nan]), TimedeltaIndex([0, np.nan]))\n\n\n@pytest.mark.parametrize(\n "val", [1, 1.1, 1 + 1j, True, "abc", [1, 2], (1, 2), {1, 2}, {"a": 1}, None]\n)\ndef test_array_equivalent_series(val):\n arr = np.array([1, 2])\n msg = "elementwise comparison failed"\n cm = (\n # stacklevel is chosen to make sense when called from .equals\n tm.assert_produces_warning(FutureWarning, match=msg, check_stacklevel=False)\n if isinstance(val, str) and not np_version_gte1p25\n else nullcontext()\n )\n with cm:\n assert not array_equivalent(Series([arr, arr]), Series([arr, val]))\n\n\ndef test_array_equivalent_array_mismatched_shape():\n # to trigger the motivating bug, the first N elements of the arrays need\n # to match\n first = np.array([1, 2, 3])\n second = np.array([1, 2])\n\n left = Series([first, "a"], dtype=object)\n right = Series([second, "a"], dtype=object)\n assert not array_equivalent(left, right)\n\n\ndef test_array_equivalent_array_mismatched_dtype():\n # same shape, different dtype can still be equivalent\n first = np.array([1, 2], dtype=np.float64)\n second = np.array([1, 2])\n\n left = Series([first, "a"], dtype=object)\n right = Series([second, "a"], dtype=object)\n assert array_equivalent(left, right)\n\n\ndef test_array_equivalent_different_dtype_but_equal():\n # Unclear if this is exposed anywhere in the public-facing API\n assert array_equivalent(np.array([1, 2]), np.array([1.0, 2.0]))\n\n\n@pytest.mark.parametrize(\n "lvalue, rvalue",\n [\n # There are 3 variants for each of lvalue and rvalue. We include all\n # three for the tz-naive `now` and exclude the datetim64 variant\n # for utcnow because it drops tzinfo.\n (fix_now, fix_utcnow),\n (fix_now.to_datetime64(), fix_utcnow),\n (fix_now.to_pydatetime(), fix_utcnow),\n (fix_now, fix_utcnow),\n (fix_now.to_datetime64(), fix_utcnow.to_pydatetime()),\n (fix_now.to_pydatetime(), fix_utcnow.to_pydatetime()),\n ],\n)\ndef test_array_equivalent_tzawareness(lvalue, rvalue):\n # we shouldn't raise if comparing tzaware and tznaive datetimes\n left = np.array([lvalue], dtype=object)\n right = np.array([rvalue], dtype=object)\n\n assert not array_equivalent(left, right, strict_nan=True)\n assert not array_equivalent(left, right, strict_nan=False)\n\n\ndef test_array_equivalent_compat():\n # see gh-13388\n m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])\n n = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])\n assert array_equivalent(m, n, strict_nan=True)\n assert array_equivalent(m, n, strict_nan=False)\n\n m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])\n n = np.array([(1, 2), (4, 3)], dtype=[("a", int), ("b", float)])\n assert not array_equivalent(m, n, strict_nan=True)\n assert not array_equivalent(m, n, strict_nan=False)\n\n m = np.array([(1, 2), (3, 4)], dtype=[("a", int), ("b", float)])\n n = np.array([(1, 2), (3, 4)], dtype=[("b", int), ("a", float)])\n assert not array_equivalent(m, n, strict_nan=True)\n assert not array_equivalent(m, n, strict_nan=False)\n\n\n@pytest.mark.parametrize("dtype", ["O", "S", "U"])\ndef test_array_equivalent_str(dtype):\n assert array_equivalent(\n np.array(["A", "B"], dtype=dtype), np.array(["A", "B"], dtype=dtype)\n )\n assert not array_equivalent(\n np.array(["A", "B"], dtype=dtype), np.array(["A", "X"], dtype=dtype)\n )\n\n\n@pytest.mark.parametrize("strict_nan", [True, False])\ndef test_array_equivalent_nested(strict_nan):\n # reached in groupby aggregations, make sure we use np.any when checking\n # if the comparison is truthy\n left = np.array([np.array([50, 70, 90]), np.array([20, 30])], dtype=object)\n right = np.array([np.array([50, 70, 90]), np.array([20, 30])], dtype=object)\n\n assert array_equivalent(left, right, strict_nan=strict_nan)\n assert not array_equivalent(left, right[::-1], strict_nan=strict_nan)\n\n left = np.empty(2, dtype=object)\n left[:] = [np.array([50, 70, 90]), np.array([20, 30, 40])]\n right = np.empty(2, dtype=object)\n right[:] = [np.array([50, 70, 90]), np.array([20, 30, 40])]\n assert array_equivalent(left, right, strict_nan=strict_nan)\n assert not array_equivalent(left, right[::-1], strict_nan=strict_nan)\n\n left = np.array([np.array([50, 50, 50]), np.array([40, 40])], dtype=object)\n right = np.array([50, 40])\n assert not array_equivalent(left, right, strict_nan=strict_nan)\n\n\n@pytest.mark.filterwarnings("ignore:elementwise comparison failed:DeprecationWarning")\n@pytest.mark.parametrize("strict_nan", [True, False])\ndef test_array_equivalent_nested2(strict_nan):\n # more than one level of nesting\n left = np.array(\n [\n np.array([np.array([50, 70]), np.array([90])], dtype=object),\n np.array([np.array([20, 30])], dtype=object),\n ],\n dtype=object,\n )\n right = np.array(\n [\n np.array([np.array([50, 70]), np.array([90])], dtype=object),\n np.array([np.array([20, 30])], dtype=object),\n ],\n dtype=object,\n )\n assert array_equivalent(left, right, strict_nan=strict_nan)\n assert not array_equivalent(left, right[::-1], strict_nan=strict_nan)\n\n left = np.array([np.array([np.array([50, 50, 50])], dtype=object)], dtype=object)\n right = np.array([50])\n assert not array_equivalent(left, right, strict_nan=strict_nan)\n\n\n@pytest.mark.parametrize("strict_nan", [True, False])\ndef test_array_equivalent_nested_list(strict_nan):\n left = np.array([[50, 70, 90], [20, 30]], dtype=object)\n right = np.array([[50, 70, 90], [20, 30]], dtype=object)\n\n assert array_equivalent(left, right, strict_nan=strict_nan)\n assert not array_equivalent(left, right[::-1], strict_nan=strict_nan)\n\n left = np.array([[50, 50, 50], [40, 40]], dtype=object)\n right = np.array([50, 40])\n assert not array_equivalent(left, right, strict_nan=strict_nan)\n\n\n@pytest.mark.filterwarnings("ignore:elementwise comparison failed:DeprecationWarning")\n@pytest.mark.xfail(reason="failing")\n@pytest.mark.parametrize("strict_nan", [True, False])\ndef test_array_equivalent_nested_mixed_list(strict_nan):\n # mixed arrays / lists in left and right\n # https://github.com/pandas-dev/pandas/issues/50360\n left = np.array([np.array([1, 2, 3]), np.array([4, 5])], dtype=object)\n right = np.array([[1, 2, 3], [4, 5]], dtype=object)\n\n assert array_equivalent(left, right, strict_nan=strict_nan)\n assert not array_equivalent(left, right[::-1], strict_nan=strict_nan)\n\n # multiple levels of nesting\n left = np.array(\n [\n np.array([np.array([1, 2, 3]), np.array([4, 5])], dtype=object),\n np.array([np.array([6]), np.array([7, 8]), np.array([9])], dtype=object),\n ],\n dtype=object,\n )\n right = np.array([[[1, 2, 3], [4, 5]], [[6], [7, 8], [9]]], dtype=object)\n assert array_equivalent(left, right, strict_nan=strict_nan)\n assert not array_equivalent(left, right[::-1], strict_nan=strict_nan)\n\n # same-length lists\n subarr = np.empty(2, dtype=object)\n subarr[:] = [\n np.array([None, "b"], dtype=object),\n np.array(["c", "d"], dtype=object),\n ]\n left = np.array([subarr, None], dtype=object)\n right = np.array([[[None, "b"], ["c", "d"]], None], dtype=object)\n assert array_equivalent(left, right, strict_nan=strict_nan)\n assert not array_equivalent(left, right[::-1], strict_nan=strict_nan)\n\n\n@pytest.mark.xfail(reason="failing")\n@pytest.mark.parametrize("strict_nan", [True, False])\ndef test_array_equivalent_nested_dicts(strict_nan):\n left = np.array([{"f1": 1, "f2": np.array(["a", "b"], dtype=object)}], dtype=object)\n right = np.array(\n [{"f1": 1, "f2": np.array(["a", "b"], dtype=object)}], dtype=object\n )\n assert array_equivalent(left, right, strict_nan=strict_nan)\n assert not array_equivalent(left, right[::-1], strict_nan=strict_nan)\n\n right2 = np.array([{"f1": 1, "f2": ["a", "b"]}], dtype=object)\n assert array_equivalent(left, right2, strict_nan=strict_nan)\n assert not array_equivalent(left, right2[::-1], strict_nan=strict_nan)\n\n\ndef test_array_equivalent_index_with_tuples():\n # GH#48446\n idx1 = Index(np.array([(pd.NA, 4), (1, 1)], dtype="object"))\n idx2 = Index(np.array([(1, 1), (pd.NA, 4)], dtype="object"))\n assert not array_equivalent(idx1, idx2)\n assert not idx1.equals(idx2)\n assert not array_equivalent(idx2, idx1)\n assert not idx2.equals(idx1)\n\n idx1 = Index(np.array([(4, pd.NA), (1, 1)], dtype="object"))\n idx2 = Index(np.array([(1, 1), (4, pd.NA)], dtype="object"))\n assert not array_equivalent(idx1, idx2)\n assert not idx1.equals(idx2)\n assert not array_equivalent(idx2, idx1)\n assert not idx2.equals(idx1)\n\n\n@pytest.mark.parametrize(\n "dtype, na_value",\n [\n # Datetime-like\n (np.dtype("M8[ns]"), np.datetime64("NaT", "ns")),\n (np.dtype("m8[ns]"), np.timedelta64("NaT", "ns")),\n (DatetimeTZDtype.construct_from_string("datetime64[ns, US/Eastern]"), NaT),\n (PeriodDtype("M"), NaT),\n # Integer\n ("u1", 0),\n ("u2", 0),\n ("u4", 0),\n ("u8", 0),\n ("i1", 0),\n ("i2", 0),\n ("i4", 0),\n ("i8", 0),\n # Bool\n ("bool", False),\n # Float\n ("f2", np.nan),\n ("f4", np.nan),\n ("f8", np.nan),\n # Object\n ("O", np.nan),\n # Interval\n (IntervalDtype(), np.nan),\n ],\n)\ndef test_na_value_for_dtype(dtype, na_value):\n result = na_value_for_dtype(pandas_dtype(dtype))\n # identify check doesn't work for datetime64/timedelta64("NaT") bc they\n # are not singletons\n assert result is na_value or (\n isna(result) and isna(na_value) and type(result) is type(na_value)\n )\n\n\nclass TestNAObj:\n def _check_behavior(self, arr, expected):\n result = libmissing.isnaobj(arr)\n tm.assert_numpy_array_equal(result, expected)\n result = libmissing.isnaobj(arr, inf_as_na=True)\n tm.assert_numpy_array_equal(result, expected)\n\n arr = np.atleast_2d(arr)\n expected = np.atleast_2d(expected)\n\n result = libmissing.isnaobj(arr)\n tm.assert_numpy_array_equal(result, expected)\n result = libmissing.isnaobj(arr, inf_as_na=True)\n tm.assert_numpy_array_equal(result, expected)\n\n # Test fortran order\n arr = arr.copy(order="F")\n result = libmissing.isnaobj(arr)\n tm.assert_numpy_array_equal(result, expected)\n result = libmissing.isnaobj(arr, inf_as_na=True)\n tm.assert_numpy_array_equal(result, expected)\n\n def test_basic(self):\n arr = np.array([1, None, "foo", -5.1, NaT, np.nan])\n expected = np.array([False, True, False, False, True, True])\n\n self._check_behavior(arr, expected)\n\n def test_non_obj_dtype(self):\n arr = np.array([1, 3, np.nan, 5], dtype=float)\n expected = np.array([False, False, True, False])\n\n self._check_behavior(arr, expected)\n\n def test_empty_arr(self):\n arr = np.array([])\n expected = np.array([], dtype=bool)\n\n self._check_behavior(arr, expected)\n\n def test_empty_str_inp(self):\n arr = np.array([""]) # empty but not na\n expected = np.array([False])\n\n self._check_behavior(arr, expected)\n\n def test_empty_like(self):\n # see gh-13717: no segfaults!\n arr = np.empty_like([None])\n expected = np.array([True])\n\n self._check_behavior(arr, expected)\n\n\nm8_units = ["as", "ps", "ns", "us", "ms", "s", "m", "h", "D", "W", "M", "Y"]\n\nna_vals = (\n [\n None,\n NaT,\n float("NaN"),\n complex("NaN"),\n np.nan,\n np.float64("NaN"),\n np.float32("NaN"),\n np.complex64(np.nan),\n np.complex128(np.nan),\n np.datetime64("NaT"),\n np.timedelta64("NaT"),\n ]\n + [np.datetime64("NaT", unit) for unit in m8_units]\n + [np.timedelta64("NaT", unit) for unit in m8_units]\n)\n\ninf_vals = [\n float("inf"),\n float("-inf"),\n complex("inf"),\n complex("-inf"),\n np.inf,\n -np.inf,\n]\n\nint_na_vals = [\n # Values that match iNaT, which we treat as null in specific cases\n np.int64(NaT._value),\n int(NaT._value),\n]\n\nsometimes_na_vals = [Decimal("NaN")]\n\nnever_na_vals = [\n # float/complex values that when viewed as int64 match iNaT\n -0.0,\n np.float64("-0.0"),\n -0j,\n np.complex64(-0j),\n]\n\n\nclass TestLibMissing:\n @pytest.mark.parametrize("func", [libmissing.checknull, isna])\n @pytest.mark.parametrize(\n "value", na_vals + sometimes_na_vals # type: ignore[operator]\n )\n def test_checknull_na_vals(self, func, value):\n assert func(value)\n\n @pytest.mark.parametrize("func", [libmissing.checknull, isna])\n @pytest.mark.parametrize("value", inf_vals)\n def test_checknull_inf_vals(self, func, value):\n assert not func(value)\n\n @pytest.mark.parametrize("func", [libmissing.checknull, isna])\n @pytest.mark.parametrize("value", int_na_vals)\n def test_checknull_intna_vals(self, func, value):\n assert not func(value)\n\n @pytest.mark.parametrize("func", [libmissing.checknull, isna])\n @pytest.mark.parametrize("value", never_na_vals)\n def test_checknull_never_na_vals(self, func, value):\n assert not func(value)\n\n @pytest.mark.parametrize(\n "value", na_vals + sometimes_na_vals # type: ignore[operator]\n )\n def test_checknull_old_na_vals(self, value):\n assert libmissing.checknull(value, inf_as_na=True)\n\n @pytest.mark.parametrize("value", inf_vals)\n def test_checknull_old_inf_vals(self, value):\n assert libmissing.checknull(value, inf_as_na=True)\n\n @pytest.mark.parametrize("value", int_na_vals)\n def test_checknull_old_intna_vals(self, value):\n assert not libmissing.checknull(value, inf_as_na=True)\n\n @pytest.mark.parametrize("value", int_na_vals)\n def test_checknull_old_never_na_vals(self, value):\n assert not libmissing.checknull(value, inf_as_na=True)\n\n def test_is_matching_na(self, nulls_fixture, nulls_fixture2):\n left = nulls_fixture\n right = nulls_fixture2\n\n assert libmissing.is_matching_na(left, left)\n\n if left is right:\n assert libmissing.is_matching_na(left, right)\n elif is_float(left) and is_float(right):\n # np.nan vs float("NaN") we consider as matching\n assert libmissing.is_matching_na(left, right)\n elif type(left) is type(right):\n # e.g. both Decimal("NaN")\n assert libmissing.is_matching_na(left, right)\n else:\n assert not libmissing.is_matching_na(left, right)\n\n def test_is_matching_na_nan_matches_none(self):\n assert not libmissing.is_matching_na(None, np.nan)\n assert not libmissing.is_matching_na(np.nan, None)\n\n assert libmissing.is_matching_na(None, np.nan, nan_matches_none=True)\n assert libmissing.is_matching_na(np.nan, None, nan_matches_none=True)\n\n\nclass TestIsValidNAForDtype:\n def test_is_valid_na_for_dtype_interval(self):\n dtype = IntervalDtype("int64", "left")\n assert not is_valid_na_for_dtype(NaT, dtype)\n\n dtype = IntervalDtype("datetime64[ns]", "both")\n assert not is_valid_na_for_dtype(NaT, dtype)\n\n def test_is_valid_na_for_dtype_categorical(self):\n dtype = CategoricalDtype(categories=[0, 1, 2])\n assert is_valid_na_for_dtype(np.nan, dtype)\n\n assert not is_valid_na_for_dtype(NaT, dtype)\n assert not is_valid_na_for_dtype(np.datetime64("NaT", "ns"), dtype)\n assert not is_valid_na_for_dtype(np.timedelta64("NaT", "ns"), dtype)\n | .venv\Lib\site-packages\pandas\tests\dtypes\test_missing.py | test_missing.py | Python | 30,736 | 0.95 | 0.07909 | 0.058215 | vue-tools | 4 | 2025-04-20T20:19:52.444127 | GPL-3.0 | true | 10141bf897868450a003c50f909f6273 |
import numpy as np\n\nfrom pandas.core.dtypes.cast import can_hold_element\n\n\ndef test_can_hold_element_range(any_int_numpy_dtype):\n # GH#44261\n dtype = np.dtype(any_int_numpy_dtype)\n arr = np.array([], dtype=dtype)\n\n rng = range(2, 127)\n assert can_hold_element(arr, rng)\n\n # negatives -> can't be held by uint dtypes\n rng = range(-2, 127)\n if dtype.kind == "i":\n assert can_hold_element(arr, rng)\n else:\n assert not can_hold_element(arr, rng)\n\n rng = range(2, 255)\n if dtype == "int8":\n assert not can_hold_element(arr, rng)\n else:\n assert can_hold_element(arr, rng)\n\n rng = range(-255, 65537)\n if dtype.kind == "u":\n assert not can_hold_element(arr, rng)\n elif dtype.itemsize < 4:\n assert not can_hold_element(arr, rng)\n else:\n assert can_hold_element(arr, rng)\n\n # empty\n rng = range(-(10**10), -(10**10))\n assert len(rng) == 0\n # assert can_hold_element(arr, rng)\n\n rng = range(10**10, 10**10)\n assert len(rng) == 0\n assert can_hold_element(arr, rng)\n\n\ndef test_can_hold_element_int_values_float_ndarray():\n arr = np.array([], dtype=np.int64)\n\n element = np.array([1.0, 2.0])\n assert can_hold_element(arr, element)\n\n assert not can_hold_element(arr, element + 0.5)\n\n # integer but not losslessly castable to int64\n element = np.array([3, 2**65], dtype=np.float64)\n assert not can_hold_element(arr, element)\n\n\ndef test_can_hold_element_int8_int():\n arr = np.array([], dtype=np.int8)\n\n element = 2\n assert can_hold_element(arr, element)\n assert can_hold_element(arr, np.int8(element))\n assert can_hold_element(arr, np.uint8(element))\n assert can_hold_element(arr, np.int16(element))\n assert can_hold_element(arr, np.uint16(element))\n assert can_hold_element(arr, np.int32(element))\n assert can_hold_element(arr, np.uint32(element))\n assert can_hold_element(arr, np.int64(element))\n assert can_hold_element(arr, np.uint64(element))\n\n element = 2**9\n assert not can_hold_element(arr, element)\n assert not can_hold_element(arr, np.int16(element))\n assert not can_hold_element(arr, np.uint16(element))\n assert not can_hold_element(arr, np.int32(element))\n assert not can_hold_element(arr, np.uint32(element))\n assert not can_hold_element(arr, np.int64(element))\n assert not can_hold_element(arr, np.uint64(element))\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_can_hold_element.py | test_can_hold_element.py | Python | 2,408 | 0.95 | 0.075949 | 0.081967 | node-utils | 676 | 2024-10-06T16:40:04.750976 | BSD-3-Clause | true | 184ce19e0dd0d93774b9a4383afd5f47 |
import numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.cast import construct_1d_arraylike_from_scalar\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nfrom pandas import (\n Categorical,\n Timedelta,\n)\nimport pandas._testing as tm\n\n\ndef test_cast_1d_array_like_from_scalar_categorical():\n # see gh-19565\n #\n # Categorical result from scalar did not maintain\n # categories and ordering of the passed dtype.\n cats = ["a", "b", "c"]\n cat_type = CategoricalDtype(categories=cats, ordered=False)\n expected = Categorical(["a", "a"], categories=cats)\n\n result = construct_1d_arraylike_from_scalar("a", len(expected), cat_type)\n tm.assert_categorical_equal(result, expected)\n\n\ndef test_cast_1d_array_like_from_timestamp(fixed_now_ts):\n # check we dont lose nanoseconds\n ts = fixed_now_ts + Timedelta(1)\n res = construct_1d_arraylike_from_scalar(ts, 2, np.dtype("M8[ns]"))\n assert res[0] == ts\n\n\ndef test_cast_1d_array_like_from_timedelta():\n # check we dont lose nanoseconds\n td = Timedelta(1)\n res = construct_1d_arraylike_from_scalar(td, 2, np.dtype("m8[ns]"))\n assert res[0] == td\n\n\ndef test_cast_1d_array_like_mismatched_datetimelike():\n td = np.timedelta64("NaT", "ns")\n dt = np.datetime64("NaT", "ns")\n\n with pytest.raises(TypeError, match="Cannot cast"):\n construct_1d_arraylike_from_scalar(td, 2, dt.dtype)\n\n with pytest.raises(TypeError, match="Cannot cast"):\n construct_1d_arraylike_from_scalar(np.timedelta64(4, "ns"), 2, dt.dtype)\n\n with pytest.raises(TypeError, match="Cannot cast"):\n construct_1d_arraylike_from_scalar(dt, 2, td.dtype)\n\n with pytest.raises(TypeError, match="Cannot cast"):\n construct_1d_arraylike_from_scalar(np.datetime64(4, "ns"), 2, td.dtype)\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_construct_from_scalar.py | test_construct_from_scalar.py | Python | 1,780 | 0.95 | 0.072727 | 0.15 | node-utils | 353 | 2023-10-21T21:54:27.980338 | GPL-3.0 | true | e0f26b09cbef0b0e0beb948c4138e434 |
import numpy as np\nimport pytest\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.construction import sanitize_array\n\n\n@pytest.mark.parametrize(\n "values, dtype, expected",\n [\n ([1, 2, 3], None, np.array([1, 2, 3], dtype=np.int64)),\n (np.array([1, 2, 3]), None, np.array([1, 2, 3])),\n (["1", "2", None], None, np.array(["1", "2", None])),\n (["1", "2", None], np.dtype("str"), np.array(["1", "2", None])),\n ([1, 2, None], np.dtype("str"), np.array(["1", "2", None])),\n ],\n)\ndef test_construct_1d_ndarray_preserving_na(\n values, dtype, expected, using_infer_string\n):\n result = sanitize_array(values, index=None, dtype=dtype)\n if using_infer_string and expected.dtype == object and dtype is None:\n tm.assert_extension_array_equal(result, pd.array(expected, dtype="str"))\n else:\n tm.assert_numpy_array_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", ["m8[ns]", "M8[ns]"])\ndef test_construct_1d_ndarray_preserving_na_datetimelike(dtype):\n arr = np.arange(5, dtype=np.int64).view(dtype)\n expected = np.array(list(arr), dtype=object)\n assert all(isinstance(x, type(arr[0])) for x in expected)\n\n result = sanitize_array(arr, index=None, dtype=np.dtype(object))\n tm.assert_numpy_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_construct_ndarray.py | test_construct_ndarray.py | Python | 1,316 | 0.85 | 0.111111 | 0 | node-utils | 999 | 2025-01-29T17:04:04.553088 | GPL-3.0 | true | e3ab4e8a161a48094003e7261f43d552 |
import pytest\n\nfrom pandas.core.dtypes.cast import construct_1d_object_array_from_listlike\n\n\n@pytest.mark.parametrize("datum1", [1, 2.0, "3", (4, 5), [6, 7], None])\n@pytest.mark.parametrize("datum2", [8, 9.0, "10", (11, 12), [13, 14], None])\ndef test_cast_1d_array(datum1, datum2):\n data = [datum1, datum2]\n result = construct_1d_object_array_from_listlike(data)\n\n # Direct comparison fails: https://github.com/numpy/numpy/issues/10218\n assert result.dtype == "object"\n assert list(result) == data\n\n\n@pytest.mark.parametrize("val", [1, 2.0, None])\ndef test_cast_1d_array_invalid_scalar(val):\n with pytest.raises(TypeError, match="has no len()"):\n construct_1d_object_array_from_listlike(val)\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_construct_object_arr.py | test_construct_object_arr.py | Python | 717 | 0.95 | 0.1 | 0.071429 | react-lib | 273 | 2023-12-10T22:21:51.469234 | BSD-3-Clause | true | 0bf792570ce3b992c7b7e2f734e81b1c |
import numpy as np\n\nfrom pandas.core.dtypes.cast import dict_compat\n\nfrom pandas import Timestamp\n\n\ndef test_dict_compat():\n data_datetime64 = {np.datetime64("1990-03-15"): 1, np.datetime64("2015-03-15"): 2}\n data_unchanged = {1: 2, 3: 4, 5: 6}\n expected = {Timestamp("1990-3-15"): 1, Timestamp("2015-03-15"): 2}\n assert dict_compat(data_datetime64) == expected\n assert dict_compat(expected) == expected\n assert dict_compat(data_unchanged) == data_unchanged\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_dict_compat.py | test_dict_compat.py | Python | 476 | 0.85 | 0.071429 | 0 | react-lib | 937 | 2024-06-02T16:53:48.694484 | MIT | true | 6cd90698a1aaa98f734df2947586ef26 |
import decimal\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.cast import maybe_downcast_to_dtype\n\nfrom pandas import (\n Series,\n Timedelta,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "arr,dtype,expected",\n [\n (\n np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]),\n "infer",\n np.array([8.5, 8.6, 8.7, 8.8, 8.9999999999995]),\n ),\n (\n np.array([8.0, 8.0, 8.0, 8.0, 8.9999999999995]),\n "infer",\n np.array([8, 8, 8, 8, 9], dtype=np.int64),\n ),\n (\n np.array([8.0, 8.0, 8.0, 8.0, 9.0000000000005]),\n "infer",\n np.array([8, 8, 8, 8, 9], dtype=np.int64),\n ),\n (\n # This is a judgement call, but we do _not_ downcast Decimal\n # objects\n np.array([decimal.Decimal(0.0)]),\n "int64",\n np.array([decimal.Decimal(0.0)]),\n ),\n (\n # GH#45837\n np.array([Timedelta(days=1), Timedelta(days=2)], dtype=object),\n "infer",\n np.array([1, 2], dtype="m8[D]").astype("m8[ns]"),\n ),\n # TODO: similar for dt64, dt64tz, Period, Interval?\n ],\n)\ndef test_downcast(arr, expected, dtype):\n result = maybe_downcast_to_dtype(arr, dtype)\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_downcast_booleans():\n # see gh-16875: coercing of booleans.\n ser = Series([True, True, False])\n result = maybe_downcast_to_dtype(ser, np.dtype(np.float64))\n\n expected = ser.values\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_downcast_conversion_no_nan(any_real_numpy_dtype):\n dtype = any_real_numpy_dtype\n expected = np.array([1, 2])\n arr = np.array([1.0, 2.0], dtype=dtype)\n\n result = maybe_downcast_to_dtype(arr, "infer")\n tm.assert_almost_equal(result, expected, check_dtype=False)\n\n\ndef test_downcast_conversion_nan(float_numpy_dtype):\n dtype = float_numpy_dtype\n data = [1.0, 2.0, np.nan]\n\n expected = np.array(data, dtype=dtype)\n arr = np.array(data, dtype=dtype)\n\n result = maybe_downcast_to_dtype(arr, "infer")\n tm.assert_almost_equal(result, expected)\n\n\ndef test_downcast_conversion_empty(any_real_numpy_dtype):\n dtype = any_real_numpy_dtype\n arr = np.array([], dtype=dtype)\n result = maybe_downcast_to_dtype(arr, np.dtype("int64"))\n tm.assert_numpy_array_equal(result, np.array([], dtype=np.int64))\n\n\n@pytest.mark.parametrize("klass", [np.datetime64, np.timedelta64])\ndef test_datetime_likes_nan(klass):\n dtype = klass.__name__ + "[ns]"\n arr = np.array([1, 2, np.nan])\n\n exp = np.array([1, 2, klass("NaT")], dtype)\n res = maybe_downcast_to_dtype(arr, dtype)\n tm.assert_numpy_array_equal(res, exp)\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_downcast.py | test_downcast.py | Python | 2,778 | 0.95 | 0.072165 | 0.064935 | react-lib | 312 | 2023-10-23T03:32:02.937192 | GPL-3.0 | true | aeedd6e2a9c5e1ac399cda9e53e596ee |
import numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.cast import find_common_type\nfrom pandas.core.dtypes.common import pandas_dtype\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n IntervalDtype,\n PeriodDtype,\n)\n\nfrom pandas import (\n Categorical,\n Index,\n)\n\n\n@pytest.mark.parametrize(\n "source_dtypes,expected_common_dtype",\n [\n ((np.int64,), np.int64),\n ((np.uint64,), np.uint64),\n ((np.float32,), np.float32),\n ((object,), object),\n # Into ints.\n ((np.int16, np.int64), np.int64),\n ((np.int32, np.uint32), np.int64),\n ((np.uint16, np.uint64), np.uint64),\n # Into floats.\n ((np.float16, np.float32), np.float32),\n ((np.float16, np.int16), np.float32),\n ((np.float32, np.int16), np.float32),\n ((np.uint64, np.int64), np.float64),\n ((np.int16, np.float64), np.float64),\n ((np.float16, np.int64), np.float64),\n # Into others.\n ((np.complex128, np.int32), np.complex128),\n ((object, np.float32), object),\n ((object, np.int16), object),\n # Bool with int.\n ((np.dtype("bool"), np.int64), object),\n ((np.dtype("bool"), np.int32), object),\n ((np.dtype("bool"), np.int16), object),\n ((np.dtype("bool"), np.int8), object),\n ((np.dtype("bool"), np.uint64), object),\n ((np.dtype("bool"), np.uint32), object),\n ((np.dtype("bool"), np.uint16), object),\n ((np.dtype("bool"), np.uint8), object),\n # Bool with float.\n ((np.dtype("bool"), np.float64), object),\n ((np.dtype("bool"), np.float32), object),\n (\n (np.dtype("datetime64[ns]"), np.dtype("datetime64[ns]")),\n np.dtype("datetime64[ns]"),\n ),\n (\n (np.dtype("timedelta64[ns]"), np.dtype("timedelta64[ns]")),\n np.dtype("timedelta64[ns]"),\n ),\n (\n (np.dtype("datetime64[ns]"), np.dtype("datetime64[ms]")),\n np.dtype("datetime64[ns]"),\n ),\n (\n (np.dtype("timedelta64[ms]"), np.dtype("timedelta64[ns]")),\n np.dtype("timedelta64[ns]"),\n ),\n ((np.dtype("datetime64[ns]"), np.dtype("timedelta64[ns]")), object),\n ((np.dtype("datetime64[ns]"), np.int64), object),\n ],\n)\ndef test_numpy_dtypes(source_dtypes, expected_common_dtype):\n source_dtypes = [pandas_dtype(x) for x in source_dtypes]\n assert find_common_type(source_dtypes) == expected_common_dtype\n\n\ndef test_raises_empty_input():\n with pytest.raises(ValueError, match="no types given"):\n find_common_type([])\n\n\n@pytest.mark.parametrize(\n "dtypes,exp_type",\n [\n ([CategoricalDtype()], "category"),\n ([object, CategoricalDtype()], object),\n ([CategoricalDtype(), CategoricalDtype()], "category"),\n ],\n)\ndef test_categorical_dtype(dtypes, exp_type):\n assert find_common_type(dtypes) == exp_type\n\n\ndef test_datetimetz_dtype_match():\n dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern")\n assert find_common_type([dtype, dtype]) == "datetime64[ns, US/Eastern]"\n\n\n@pytest.mark.parametrize(\n "dtype2",\n [\n DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"),\n np.dtype("datetime64[ns]"),\n object,\n np.int64,\n ],\n)\ndef test_datetimetz_dtype_mismatch(dtype2):\n dtype = DatetimeTZDtype(unit="ns", tz="US/Eastern")\n assert find_common_type([dtype, dtype2]) == object\n assert find_common_type([dtype2, dtype]) == object\n\n\ndef test_period_dtype_match():\n dtype = PeriodDtype(freq="D")\n assert find_common_type([dtype, dtype]) == "period[D]"\n\n\n@pytest.mark.parametrize(\n "dtype2",\n [\n DatetimeTZDtype(unit="ns", tz="Asia/Tokyo"),\n PeriodDtype(freq="2D"),\n PeriodDtype(freq="h"),\n np.dtype("datetime64[ns]"),\n object,\n np.int64,\n ],\n)\ndef test_period_dtype_mismatch(dtype2):\n dtype = PeriodDtype(freq="D")\n assert find_common_type([dtype, dtype2]) == object\n assert find_common_type([dtype2, dtype]) == object\n\n\ninterval_dtypes = [\n IntervalDtype(np.int64, "right"),\n IntervalDtype(np.float64, "right"),\n IntervalDtype(np.uint64, "right"),\n IntervalDtype(DatetimeTZDtype(unit="ns", tz="US/Eastern"), "right"),\n IntervalDtype("M8[ns]", "right"),\n IntervalDtype("m8[ns]", "right"),\n]\n\n\n@pytest.mark.parametrize("left", interval_dtypes)\n@pytest.mark.parametrize("right", interval_dtypes)\ndef test_interval_dtype(left, right):\n result = find_common_type([left, right])\n\n if left is right:\n assert result is left\n\n elif left.subtype.kind in ["i", "u", "f"]:\n # i.e. numeric\n if right.subtype.kind in ["i", "u", "f"]:\n # both numeric -> common numeric subtype\n expected = IntervalDtype(np.float64, "right")\n assert result == expected\n else:\n assert result == object\n\n else:\n assert result == object\n\n\n@pytest.mark.parametrize("dtype", interval_dtypes)\ndef test_interval_dtype_with_categorical(dtype):\n obj = Index([], dtype=dtype)\n\n cat = Categorical([], categories=obj)\n\n result = find_common_type([dtype, cat.dtype])\n assert result == dtype\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_find_common_type.py | test_find_common_type.py | Python | 5,226 | 0.95 | 0.068571 | 0.047297 | vue-tools | 15 | 2024-10-24T23:18:28.154196 | MIT | true | d4fdf2ef4e75931674e7137a2712a0e6 |
import numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n NaT,\n Series,\n Timestamp,\n)\n\n\n@pytest.mark.parametrize(\n "data,exp_size",\n [\n # see gh-16362.\n ([[NaT, "a", "b", 0], [NaT, "b", "c", 1]], 8),\n ([[NaT, "a", 0], [NaT, "b", 1]], 6),\n ],\n)\ndef test_maybe_infer_to_datetimelike_df_construct(data, exp_size):\n result = DataFrame(np.array(data))\n assert result.size == exp_size\n\n\ndef test_maybe_infer_to_datetimelike_ser_construct():\n # see gh-19671.\n result = Series(["M1701", Timestamp("20130101")])\n assert result.dtype.kind == "O"\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_infer_datetimelike.py | test_infer_datetimelike.py | Python | 603 | 0.95 | 0.071429 | 0.086957 | react-lib | 256 | 2024-12-05T22:53:29.787829 | Apache-2.0 | true | 7a91e8819ad26adf1485a3dc3a06a6ee |
from datetime import (\n date,\n datetime,\n timedelta,\n)\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.cast import (\n infer_dtype_from,\n infer_dtype_from_array,\n infer_dtype_from_scalar,\n)\nfrom pandas.core.dtypes.common import is_dtype_equal\n\nfrom pandas import (\n Categorical,\n Interval,\n Period,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n)\n\n\ndef test_infer_dtype_from_int_scalar(any_int_numpy_dtype):\n # Test that infer_dtype_from_scalar is\n # returning correct dtype for int and float.\n data = np.dtype(any_int_numpy_dtype).type(12)\n dtype, val = infer_dtype_from_scalar(data)\n assert dtype == type(data)\n\n\ndef test_infer_dtype_from_float_scalar(float_numpy_dtype):\n float_numpy_dtype = np.dtype(float_numpy_dtype).type\n data = float_numpy_dtype(12)\n\n dtype, val = infer_dtype_from_scalar(data)\n assert dtype == float_numpy_dtype\n\n\n@pytest.mark.parametrize(\n "data,exp_dtype", [(12, np.int64), (np.float64(12), np.float64)]\n)\ndef test_infer_dtype_from_python_scalar(data, exp_dtype):\n dtype, val = infer_dtype_from_scalar(data)\n assert dtype == exp_dtype\n\n\n@pytest.mark.parametrize("bool_val", [True, False])\ndef test_infer_dtype_from_boolean(bool_val):\n dtype, val = infer_dtype_from_scalar(bool_val)\n assert dtype == np.bool_\n\n\ndef test_infer_dtype_from_complex(complex_dtype):\n data = np.dtype(complex_dtype).type(1)\n dtype, val = infer_dtype_from_scalar(data)\n assert dtype == np.complex128\n\n\ndef test_infer_dtype_from_datetime():\n dt64 = np.datetime64(1, "ns")\n dtype, val = infer_dtype_from_scalar(dt64)\n assert dtype == "M8[ns]"\n\n ts = Timestamp(1)\n dtype, val = infer_dtype_from_scalar(ts)\n assert dtype == "M8[ns]"\n\n dt = datetime(2000, 1, 1, 0, 0)\n dtype, val = infer_dtype_from_scalar(dt)\n assert dtype == "M8[us]"\n\n\ndef test_infer_dtype_from_timedelta():\n td64 = np.timedelta64(1, "ns")\n dtype, val = infer_dtype_from_scalar(td64)\n assert dtype == "m8[ns]"\n\n pytd = timedelta(1)\n dtype, val = infer_dtype_from_scalar(pytd)\n assert dtype == "m8[us]"\n\n td = Timedelta(1)\n dtype, val = infer_dtype_from_scalar(td)\n assert dtype == "m8[ns]"\n\n\n@pytest.mark.parametrize("freq", ["M", "D"])\ndef test_infer_dtype_from_period(freq):\n p = Period("2011-01-01", freq=freq)\n dtype, val = infer_dtype_from_scalar(p)\n\n exp_dtype = f"period[{freq}]"\n\n assert dtype == exp_dtype\n assert val == p\n\n\ndef test_infer_dtype_misc():\n dt = date(2000, 1, 1)\n dtype, val = infer_dtype_from_scalar(dt)\n assert dtype == np.object_\n\n ts = Timestamp(1, tz="US/Eastern")\n dtype, val = infer_dtype_from_scalar(ts)\n assert dtype == "datetime64[ns, US/Eastern]"\n\n\n@pytest.mark.parametrize("tz", ["UTC", "US/Eastern", "Asia/Tokyo"])\ndef test_infer_from_scalar_tz(tz):\n dt = Timestamp(1, tz=tz)\n dtype, val = infer_dtype_from_scalar(dt)\n\n exp_dtype = f"datetime64[ns, {tz}]"\n\n assert dtype == exp_dtype\n assert val == dt\n\n\n@pytest.mark.parametrize(\n "left, right, subtype",\n [\n (0, 1, "int64"),\n (0.0, 1.0, "float64"),\n (Timestamp(0), Timestamp(1), "datetime64[ns]"),\n (Timestamp(0, tz="UTC"), Timestamp(1, tz="UTC"), "datetime64[ns, UTC]"),\n (Timedelta(0), Timedelta(1), "timedelta64[ns]"),\n ],\n)\ndef test_infer_from_interval(left, right, subtype, closed):\n # GH 30337\n interval = Interval(left, right, closed)\n result_dtype, result_value = infer_dtype_from_scalar(interval)\n expected_dtype = f"interval[{subtype}, {closed}]"\n assert result_dtype == expected_dtype\n assert result_value == interval\n\n\ndef test_infer_dtype_from_scalar_errors():\n msg = "invalid ndarray passed to infer_dtype_from_scalar"\n\n with pytest.raises(ValueError, match=msg):\n infer_dtype_from_scalar(np.array([1]))\n\n\n@pytest.mark.parametrize(\n "value, expected",\n [\n ("foo", np.object_),\n (b"foo", np.object_),\n (1, np.int64),\n (1.5, np.float64),\n (np.datetime64("2016-01-01"), np.dtype("M8[s]")),\n (Timestamp("20160101"), np.dtype("M8[s]")),\n (Timestamp("20160101", tz="UTC"), "datetime64[s, UTC]"),\n ],\n)\ndef test_infer_dtype_from_scalar(value, expected, using_infer_string):\n dtype, _ = infer_dtype_from_scalar(value)\n if using_infer_string and value == "foo":\n expected = "string"\n assert is_dtype_equal(dtype, expected)\n\n with pytest.raises(TypeError, match="must be list-like"):\n infer_dtype_from_array(value)\n\n\n@pytest.mark.parametrize(\n "arr, expected",\n [\n ([1], np.dtype(int)),\n (np.array([1], dtype=np.int64), np.int64),\n ([np.nan, 1, ""], np.object_),\n (np.array([[1.0, 2.0]]), np.float64),\n (Categorical(list("aabc")), "category"),\n (Categorical([1, 2, 3]), "category"),\n (date_range("20160101", periods=3), np.dtype("=M8[ns]")),\n (\n date_range("20160101", periods=3, tz="US/Eastern"),\n "datetime64[ns, US/Eastern]",\n ),\n (Series([1.0, 2, 3]), np.float64),\n (Series(list("abc")), np.object_),\n (\n Series(date_range("20160101", periods=3, tz="US/Eastern")),\n "datetime64[ns, US/Eastern]",\n ),\n ],\n)\ndef test_infer_dtype_from_array(arr, expected, using_infer_string):\n dtype, _ = infer_dtype_from_array(arr)\n if (\n using_infer_string\n and isinstance(arr, Series)\n and arr.tolist() == ["a", "b", "c"]\n ):\n expected = "string"\n assert is_dtype_equal(dtype, expected)\n\n\n@pytest.mark.parametrize("cls", [np.datetime64, np.timedelta64])\ndef test_infer_dtype_from_scalar_zerodim_datetimelike(cls):\n # ndarray.item() can incorrectly return int instead of td64/dt64\n val = cls(1234, "ns")\n arr = np.array(val)\n\n dtype, res = infer_dtype_from_scalar(arr)\n assert dtype.type is cls\n assert isinstance(res, cls)\n\n dtype, res = infer_dtype_from(arr)\n assert dtype.type is cls\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_infer_dtype.py | test_infer_dtype.py | Python | 6,001 | 0.95 | 0.083333 | 0.023669 | python-kit | 68 | 2025-01-06T20:16:17.451639 | GPL-3.0 | true | 9e7d4c4538a0c60d41292fbeed5f306a |
from datetime import datetime\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.cast import maybe_box_native\n\nfrom pandas import (\n Interval,\n Period,\n Timedelta,\n Timestamp,\n)\n\n\n@pytest.mark.parametrize(\n "obj,expected_dtype",\n [\n (b"\x00\x10", bytes),\n (int(4), int),\n (np.uint(4), int),\n (np.int32(-4), int),\n (np.uint8(4), int),\n (float(454.98), float),\n (np.float16(0.4), float),\n (np.float64(1.4), float),\n (np.bool_(False), bool),\n (datetime(2005, 2, 25), datetime),\n (np.datetime64("2005-02-25"), Timestamp),\n (Timestamp("2005-02-25"), Timestamp),\n (np.timedelta64(1, "D"), Timedelta),\n (Timedelta(1, "D"), Timedelta),\n (Interval(0, 1), Interval),\n (Period("4Q2005"), Period),\n ],\n)\ndef test_maybe_box_native(obj, expected_dtype):\n boxed_obj = maybe_box_native(obj)\n result_dtype = type(boxed_obj)\n assert result_dtype is expected_dtype\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_maybe_box_native.py | test_maybe_box_native.py | Python | 996 | 0.85 | 0.025 | 0 | python-kit | 415 | 2024-08-08T20:02:13.963734 | GPL-3.0 | true | 381735f084f9f994ff48d4408cd75d37 |
"""\nThese test the method maybe_promote from core/dtypes/cast.py\n"""\n\nimport datetime\nfrom decimal import Decimal\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslibs import NaT\n\nfrom pandas.core.dtypes.cast import maybe_promote\nfrom pandas.core.dtypes.common import is_scalar\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\nfrom pandas.core.dtypes.missing import isna\n\nimport pandas as pd\n\n\ndef _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar=None):\n """\n Auxiliary function to unify testing of scalar/array promotion.\n\n Parameters\n ----------\n dtype : dtype\n The value to pass on as the first argument to maybe_promote.\n fill_value : scalar\n The value to pass on as the second argument to maybe_promote as\n a scalar.\n expected_dtype : dtype\n The expected dtype returned by maybe_promote (by design this is the\n same regardless of whether fill_value was passed as a scalar or in an\n array!).\n exp_val_for_scalar : scalar\n The expected value for the (potentially upcast) fill_value returned by\n maybe_promote.\n """\n assert is_scalar(fill_value)\n\n # here, we pass on fill_value as a scalar directly; the expected value\n # returned from maybe_promote is fill_value, potentially upcast to the\n # returned dtype.\n result_dtype, result_fill_value = maybe_promote(dtype, fill_value)\n expected_fill_value = exp_val_for_scalar\n\n assert result_dtype == expected_dtype\n _assert_match(result_fill_value, expected_fill_value)\n\n\ndef _assert_match(result_fill_value, expected_fill_value):\n # GH#23982/25425 require the same type in addition to equality/NA-ness\n res_type = type(result_fill_value)\n ex_type = type(expected_fill_value)\n\n if hasattr(result_fill_value, "dtype"):\n # Compare types in a way that is robust to platform-specific\n # idiosyncrasies where e.g. sometimes we get "ulonglong" as an alias\n # for "uint64" or "intc" as an alias for "int32"\n assert result_fill_value.dtype.kind == expected_fill_value.dtype.kind\n assert result_fill_value.dtype.itemsize == expected_fill_value.dtype.itemsize\n else:\n # On some builds, type comparison fails, e.g. np.int32 != np.int32\n assert res_type == ex_type or res_type.__name__ == ex_type.__name__\n\n match_value = result_fill_value == expected_fill_value\n if match_value is pd.NA:\n match_value = False\n\n # Note: type check above ensures that we have the _same_ NA value\n # for missing values, None == None (which is checked\n # through match_value above), but np.nan != np.nan and pd.NaT != pd.NaT\n match_missing = isna(result_fill_value) and isna(expected_fill_value)\n\n assert match_value or match_missing\n\n\n@pytest.mark.parametrize(\n "dtype, fill_value, expected_dtype",\n [\n # size 8\n ("int8", 1, "int8"),\n ("int8", np.iinfo("int8").max + 1, "int16"),\n ("int8", np.iinfo("int16").max + 1, "int32"),\n ("int8", np.iinfo("int32").max + 1, "int64"),\n ("int8", np.iinfo("int64").max + 1, "object"),\n ("int8", -1, "int8"),\n ("int8", np.iinfo("int8").min - 1, "int16"),\n ("int8", np.iinfo("int16").min - 1, "int32"),\n ("int8", np.iinfo("int32").min - 1, "int64"),\n ("int8", np.iinfo("int64").min - 1, "object"),\n # keep signed-ness as long as possible\n ("uint8", 1, "uint8"),\n ("uint8", np.iinfo("int8").max + 1, "uint8"),\n ("uint8", np.iinfo("uint8").max + 1, "uint16"),\n ("uint8", np.iinfo("int16").max + 1, "uint16"),\n ("uint8", np.iinfo("uint16").max + 1, "uint32"),\n ("uint8", np.iinfo("int32").max + 1, "uint32"),\n ("uint8", np.iinfo("uint32").max + 1, "uint64"),\n ("uint8", np.iinfo("int64").max + 1, "uint64"),\n ("uint8", np.iinfo("uint64").max + 1, "object"),\n # max of uint8 cannot be contained in int8\n ("uint8", -1, "int16"),\n ("uint8", np.iinfo("int8").min - 1, "int16"),\n ("uint8", np.iinfo("int16").min - 1, "int32"),\n ("uint8", np.iinfo("int32").min - 1, "int64"),\n ("uint8", np.iinfo("int64").min - 1, "object"),\n # size 16\n ("int16", 1, "int16"),\n ("int16", np.iinfo("int8").max + 1, "int16"),\n ("int16", np.iinfo("int16").max + 1, "int32"),\n ("int16", np.iinfo("int32").max + 1, "int64"),\n ("int16", np.iinfo("int64").max + 1, "object"),\n ("int16", -1, "int16"),\n ("int16", np.iinfo("int8").min - 1, "int16"),\n ("int16", np.iinfo("int16").min - 1, "int32"),\n ("int16", np.iinfo("int32").min - 1, "int64"),\n ("int16", np.iinfo("int64").min - 1, "object"),\n ("uint16", 1, "uint16"),\n ("uint16", np.iinfo("int8").max + 1, "uint16"),\n ("uint16", np.iinfo("uint8").max + 1, "uint16"),\n ("uint16", np.iinfo("int16").max + 1, "uint16"),\n ("uint16", np.iinfo("uint16").max + 1, "uint32"),\n ("uint16", np.iinfo("int32").max + 1, "uint32"),\n ("uint16", np.iinfo("uint32").max + 1, "uint64"),\n ("uint16", np.iinfo("int64").max + 1, "uint64"),\n ("uint16", np.iinfo("uint64").max + 1, "object"),\n ("uint16", -1, "int32"),\n ("uint16", np.iinfo("int8").min - 1, "int32"),\n ("uint16", np.iinfo("int16").min - 1, "int32"),\n ("uint16", np.iinfo("int32").min - 1, "int64"),\n ("uint16", np.iinfo("int64").min - 1, "object"),\n # size 32\n ("int32", 1, "int32"),\n ("int32", np.iinfo("int8").max + 1, "int32"),\n ("int32", np.iinfo("int16").max + 1, "int32"),\n ("int32", np.iinfo("int32").max + 1, "int64"),\n ("int32", np.iinfo("int64").max + 1, "object"),\n ("int32", -1, "int32"),\n ("int32", np.iinfo("int8").min - 1, "int32"),\n ("int32", np.iinfo("int16").min - 1, "int32"),\n ("int32", np.iinfo("int32").min - 1, "int64"),\n ("int32", np.iinfo("int64").min - 1, "object"),\n ("uint32", 1, "uint32"),\n ("uint32", np.iinfo("int8").max + 1, "uint32"),\n ("uint32", np.iinfo("uint8").max + 1, "uint32"),\n ("uint32", np.iinfo("int16").max + 1, "uint32"),\n ("uint32", np.iinfo("uint16").max + 1, "uint32"),\n ("uint32", np.iinfo("int32").max + 1, "uint32"),\n ("uint32", np.iinfo("uint32").max + 1, "uint64"),\n ("uint32", np.iinfo("int64").max + 1, "uint64"),\n ("uint32", np.iinfo("uint64").max + 1, "object"),\n ("uint32", -1, "int64"),\n ("uint32", np.iinfo("int8").min - 1, "int64"),\n ("uint32", np.iinfo("int16").min - 1, "int64"),\n ("uint32", np.iinfo("int32").min - 1, "int64"),\n ("uint32", np.iinfo("int64").min - 1, "object"),\n # size 64\n ("int64", 1, "int64"),\n ("int64", np.iinfo("int8").max + 1, "int64"),\n ("int64", np.iinfo("int16").max + 1, "int64"),\n ("int64", np.iinfo("int32").max + 1, "int64"),\n ("int64", np.iinfo("int64").max + 1, "object"),\n ("int64", -1, "int64"),\n ("int64", np.iinfo("int8").min - 1, "int64"),\n ("int64", np.iinfo("int16").min - 1, "int64"),\n ("int64", np.iinfo("int32").min - 1, "int64"),\n ("int64", np.iinfo("int64").min - 1, "object"),\n ("uint64", 1, "uint64"),\n ("uint64", np.iinfo("int8").max + 1, "uint64"),\n ("uint64", np.iinfo("uint8").max + 1, "uint64"),\n ("uint64", np.iinfo("int16").max + 1, "uint64"),\n ("uint64", np.iinfo("uint16").max + 1, "uint64"),\n ("uint64", np.iinfo("int32").max + 1, "uint64"),\n ("uint64", np.iinfo("uint32").max + 1, "uint64"),\n ("uint64", np.iinfo("int64").max + 1, "uint64"),\n ("uint64", np.iinfo("uint64").max + 1, "object"),\n ("uint64", -1, "object"),\n ("uint64", np.iinfo("int8").min - 1, "object"),\n ("uint64", np.iinfo("int16").min - 1, "object"),\n ("uint64", np.iinfo("int32").min - 1, "object"),\n ("uint64", np.iinfo("int64").min - 1, "object"),\n ],\n)\ndef test_maybe_promote_int_with_int(dtype, fill_value, expected_dtype):\n dtype = np.dtype(dtype)\n expected_dtype = np.dtype(expected_dtype)\n\n # output is not a generic int, but corresponds to expected_dtype\n exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_int_with_float(any_int_numpy_dtype, float_numpy_dtype):\n dtype = np.dtype(any_int_numpy_dtype)\n fill_dtype = np.dtype(float_numpy_dtype)\n\n # create array of given dtype; casts "1" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling int with float always upcasts to float64\n expected_dtype = np.float64\n # fill_value can be different float type\n exp_val_for_scalar = np.float64(fill_value)\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_float_with_int(float_numpy_dtype, any_int_numpy_dtype):\n dtype = np.dtype(float_numpy_dtype)\n fill_dtype = np.dtype(any_int_numpy_dtype)\n\n # create array of given dtype; casts "1" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling float with int always keeps float dtype\n # because: np.finfo('float32').max > np.iinfo('uint64').max\n expected_dtype = dtype\n # output is not a generic float, but corresponds to expected_dtype\n exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\n@pytest.mark.parametrize(\n "dtype, fill_value, expected_dtype",\n [\n # float filled with float\n ("float32", 1, "float32"),\n ("float32", float(np.finfo("float32").max) * 1.1, "float64"),\n ("float64", 1, "float64"),\n ("float64", float(np.finfo("float32").max) * 1.1, "float64"),\n # complex filled with float\n ("complex64", 1, "complex64"),\n ("complex64", float(np.finfo("float32").max) * 1.1, "complex128"),\n ("complex128", 1, "complex128"),\n ("complex128", float(np.finfo("float32").max) * 1.1, "complex128"),\n # float filled with complex\n ("float32", 1 + 1j, "complex64"),\n ("float32", float(np.finfo("float32").max) * (1.1 + 1j), "complex128"),\n ("float64", 1 + 1j, "complex128"),\n ("float64", float(np.finfo("float32").max) * (1.1 + 1j), "complex128"),\n # complex filled with complex\n ("complex64", 1 + 1j, "complex64"),\n ("complex64", float(np.finfo("float32").max) * (1.1 + 1j), "complex128"),\n ("complex128", 1 + 1j, "complex128"),\n ("complex128", float(np.finfo("float32").max) * (1.1 + 1j), "complex128"),\n ],\n)\ndef test_maybe_promote_float_with_float(dtype, fill_value, expected_dtype):\n dtype = np.dtype(dtype)\n expected_dtype = np.dtype(expected_dtype)\n\n # output is not a generic float, but corresponds to expected_dtype\n exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_bool_with_any(any_numpy_dtype):\n dtype = np.dtype(bool)\n fill_dtype = np.dtype(any_numpy_dtype)\n\n # create array of given dtype; casts "1" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling bool with anything but bool casts to object\n expected_dtype = np.dtype(object) if fill_dtype != bool else fill_dtype\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_any_with_bool(any_numpy_dtype):\n dtype = np.dtype(any_numpy_dtype)\n fill_value = True\n\n # filling anything but bool with bool casts to object\n expected_dtype = np.dtype(object) if dtype != bool else dtype\n # output is not a generic bool, but corresponds to expected_dtype\n exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_bytes_with_any(bytes_dtype, any_numpy_dtype):\n dtype = np.dtype(bytes_dtype)\n fill_dtype = np.dtype(any_numpy_dtype)\n\n # create array of given dtype; casts "1" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # we never use bytes dtype internally, always promote to object\n expected_dtype = np.dtype(np.object_)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_any_with_bytes(any_numpy_dtype):\n dtype = np.dtype(any_numpy_dtype)\n\n # create array of given dtype\n fill_value = b"abc"\n\n # we never use bytes dtype internally, always promote to object\n expected_dtype = np.dtype(np.object_)\n # output is not a generic bytes, but corresponds to expected_dtype\n exp_val_for_scalar = np.array([fill_value], dtype=expected_dtype)[0]\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_datetime64_with_any(datetime64_dtype, any_numpy_dtype):\n dtype = np.dtype(datetime64_dtype)\n fill_dtype = np.dtype(any_numpy_dtype)\n\n # create array of given dtype; casts "1" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling datetime with anything but datetime casts to object\n if fill_dtype.kind == "M":\n expected_dtype = dtype\n # for datetime dtypes, scalar values get cast to to_datetime64\n exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()\n else:\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\n@pytest.mark.parametrize(\n "fill_value",\n [\n pd.Timestamp("now"),\n np.datetime64("now"),\n datetime.datetime.now(),\n datetime.date.today(),\n ],\n ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],\n)\ndef test_maybe_promote_any_with_datetime64(any_numpy_dtype, fill_value):\n dtype = np.dtype(any_numpy_dtype)\n\n # filling datetime with anything but datetime casts to object\n if dtype.kind == "M":\n expected_dtype = dtype\n # for datetime dtypes, scalar values get cast to pd.Timestamp.value\n exp_val_for_scalar = pd.Timestamp(fill_value).to_datetime64()\n else:\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n if type(fill_value) is datetime.date and dtype.kind == "M":\n # Casting date to dt64 is deprecated, in 2.0 enforced to cast to object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\n@pytest.mark.parametrize(\n "fill_value",\n [\n pd.Timestamp(2023, 1, 1),\n np.datetime64("2023-01-01"),\n datetime.datetime(2023, 1, 1),\n datetime.date(2023, 1, 1),\n ],\n ids=["pd.Timestamp", "np.datetime64", "datetime.datetime", "datetime.date"],\n)\ndef test_maybe_promote_any_numpy_dtype_with_datetimetz(\n any_numpy_dtype, tz_aware_fixture, fill_value\n):\n dtype = np.dtype(any_numpy_dtype)\n fill_dtype = DatetimeTZDtype(tz=tz_aware_fixture)\n\n fill_value = pd.Series([fill_value], dtype=fill_dtype)[0]\n\n # filling any numpy dtype with datetimetz casts to object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_timedelta64_with_any(timedelta64_dtype, any_numpy_dtype):\n dtype = np.dtype(timedelta64_dtype)\n fill_dtype = np.dtype(any_numpy_dtype)\n\n # create array of given dtype; casts "1" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling timedelta with anything but timedelta casts to object\n if fill_dtype.kind == "m":\n expected_dtype = dtype\n # for timedelta dtypes, scalar values get cast to pd.Timedelta.value\n exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()\n else:\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\n@pytest.mark.parametrize(\n "fill_value",\n [pd.Timedelta(days=1), np.timedelta64(24, "h"), datetime.timedelta(1)],\n ids=["pd.Timedelta", "np.timedelta64", "datetime.timedelta"],\n)\ndef test_maybe_promote_any_with_timedelta64(any_numpy_dtype, fill_value):\n dtype = np.dtype(any_numpy_dtype)\n\n # filling anything but timedelta with timedelta casts to object\n if dtype.kind == "m":\n expected_dtype = dtype\n # for timedelta dtypes, scalar values get cast to pd.Timedelta.value\n exp_val_for_scalar = pd.Timedelta(fill_value).to_timedelta64()\n else:\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_string_with_any(string_dtype, any_numpy_dtype):\n dtype = np.dtype(string_dtype)\n fill_dtype = np.dtype(any_numpy_dtype)\n\n # create array of given dtype; casts "1" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling string with anything casts to object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_any_with_string(any_numpy_dtype):\n dtype = np.dtype(any_numpy_dtype)\n\n # create array of given dtype\n fill_value = "abc"\n\n # filling anything with a string casts to object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_object_with_any(object_dtype, any_numpy_dtype):\n dtype = np.dtype(object_dtype)\n fill_dtype = np.dtype(any_numpy_dtype)\n\n # create array of given dtype; casts "1" to correct dtype\n fill_value = np.array([1], dtype=fill_dtype)[0]\n\n # filling object with anything stays object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_any_with_object(any_numpy_dtype):\n dtype = np.dtype(any_numpy_dtype)\n\n # create array of object dtype from a scalar value (i.e. passing\n # dtypes.common.is_scalar), which can however not be cast to int/float etc.\n fill_value = pd.DateOffset(1)\n\n # filling object with anything stays object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n\n\ndef test_maybe_promote_any_numpy_dtype_with_na(any_numpy_dtype, nulls_fixture):\n fill_value = nulls_fixture\n dtype = np.dtype(any_numpy_dtype)\n\n if isinstance(fill_value, Decimal):\n # Subject to change, but ATM (When Decimal(NAN) is being added to nulls_fixture)\n # this is the existing behavior in maybe_promote,\n # hinges on is_valid_na_for_dtype\n if dtype.kind in "iufc":\n if dtype.kind in "iu":\n expected_dtype = np.dtype(np.float64)\n else:\n expected_dtype = dtype\n exp_val_for_scalar = np.nan\n else:\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n elif dtype.kind in "iu" and fill_value is not NaT:\n # integer + other missing value (np.nan / None) casts to float\n expected_dtype = np.float64\n exp_val_for_scalar = np.nan\n elif dtype == object and fill_value is NaT:\n # inserting into object does not cast the value\n # but *does* cast None to np.nan\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = fill_value\n elif dtype.kind in "mM":\n # datetime / timedelta cast all missing values to dtyped-NaT\n expected_dtype = dtype\n exp_val_for_scalar = dtype.type("NaT", "ns")\n elif fill_value is NaT:\n # NaT upcasts everything that's not datetime/timedelta to object\n expected_dtype = np.dtype(object)\n exp_val_for_scalar = NaT\n elif dtype.kind in "fc":\n # float / complex + missing value (!= NaT) stays the same\n expected_dtype = dtype\n exp_val_for_scalar = np.nan\n else:\n # all other cases cast to object, and use np.nan as missing value\n expected_dtype = np.dtype(object)\n if fill_value is pd.NA:\n exp_val_for_scalar = pd.NA\n else:\n exp_val_for_scalar = np.nan\n\n _check_promote(dtype, fill_value, expected_dtype, exp_val_for_scalar)\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\test_promote.py | test_promote.py | Python | 20,755 | 0.95 | 0.079245 | 0.16317 | node-utils | 993 | 2025-06-13T08:57:52.074020 | BSD-3-Clause | true | 7cce8412e751c7f789cb7b04e61e81f7 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_can_hold_element.cpython-313.pyc | test_can_hold_element.cpython-313.pyc | Other | 4,240 | 0.8 | 0 | 0.047619 | node-utils | 166 | 2024-07-31T13:27:51.527332 | MIT | true | 588404afd456f5efdcecab831effebac |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_construct_from_scalar.cpython-313.pyc | test_construct_from_scalar.cpython-313.pyc | Other | 2,987 | 0.8 | 0 | 0 | react-lib | 785 | 2024-10-15T09:34:14.098797 | GPL-3.0 | true | 984278c3b63523adb0e3aa5231027242 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_construct_ndarray.cpython-313.pyc | test_construct_ndarray.cpython-313.pyc | Other | 2,789 | 0.8 | 0 | 0 | awesome-app | 805 | 2024-09-09T02:01:09.392615 | Apache-2.0 | true | 43a287bb877e20016791777658281e06 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_construct_object_arr.cpython-313.pyc | test_construct_object_arr.cpython-313.pyc | Other | 1,461 | 0.8 | 0 | 0 | node-utils | 15 | 2025-01-21T21:25:49.917607 | Apache-2.0 | true | 4dd9b90a88d653a8ee2e310b4048cd8c |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_dict_compat.cpython-313.pyc | test_dict_compat.cpython-313.pyc | Other | 959 | 0.7 | 0 | 0 | vue-tools | 924 | 2024-04-09T04:51:51.834878 | MIT | true | d57f3902abc8de3731bc1bb762c71507 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_downcast.cpython-313.pyc | test_downcast.cpython-313.pyc | Other | 4,459 | 0.8 | 0 | 0.02 | node-utils | 845 | 2025-01-15T16:09:03.069809 | Apache-2.0 | true | a528eafb3001e5bc5935e2810ad8f87f |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_find_common_type.cpython-313.pyc | test_find_common_type.cpython-313.pyc | Other | 8,375 | 0.8 | 0 | 0.04902 | node-utils | 887 | 2023-12-24T14:08:13.024443 | BSD-3-Clause | true | 80a065bcd1fc5224570ca69edacb8da0 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_infer_datetimelike.cpython-313.pyc | test_infer_datetimelike.cpython-313.pyc | Other | 1,262 | 0.8 | 0 | 0 | react-lib | 615 | 2023-12-29T22:57:45.389771 | Apache-2.0 | true | d5fdf671f30c31d206701a274ed57c58 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_infer_dtype.cpython-313.pyc | test_infer_dtype.cpython-313.pyc | Other | 9,552 | 0.8 | 0 | 0 | vue-tools | 645 | 2024-01-31T19:24:42.070343 | GPL-3.0 | true | 8b034a9e52b38b4adef1b891d2e32f7a |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_maybe_box_native.cpython-313.pyc | test_maybe_box_native.cpython-313.pyc | Other | 1,716 | 0.8 | 0 | 0 | vue-tools | 371 | 2024-06-15T04:27:01.505166 | GPL-3.0 | true | 64d35bd55b44d0aeeda36e98c91aece3 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\test_promote.cpython-313.pyc | test_promote.cpython-313.pyc | Other | 24,556 | 0.95 | 0.008621 | 0.004464 | react-lib | 886 | 2023-09-15T19:16:05.904220 | MIT | true | c883a28118f4e28b4182967fe37329f6 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\cast\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 199 | 0.7 | 0 | 0 | node-utils | 287 | 2024-12-12T17:34:33.722991 | MIT | true | 70b60b688fe65f7f862b197e9413257b |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\__pycache__\test_common.cpython-313.pyc | test_common.cpython-313.pyc | Other | 55,312 | 0.75 | 0.002967 | 0.018692 | awesome-app | 653 | 2024-06-12T01:19:44.037261 | MIT | true | 51895680e16dc3bf5ab0dc9c76c3ba52 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\__pycache__\test_concat.cpython-313.pyc | test_concat.cpython-313.pyc | Other | 3,506 | 0.8 | 0.02 | 0.020408 | awesome-app | 685 | 2024-02-17T23:34:31.216241 | BSD-3-Clause | true | 55055b9cceeaf165ae6e983a85b6fb83 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\__pycache__\test_dtypes.cpython-313.pyc | test_dtypes.cpython-313.pyc | Other | 69,296 | 0.75 | 0.0112 | 0.004886 | react-lib | 490 | 2024-06-07T17:32:24.529138 | MIT | true | 4e79bf87f1bee0fb35ca8b6e72de2356 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\__pycache__\test_generic.cpython-313.pyc | test_generic.cpython-313.pyc | Other | 8,319 | 0.8 | 0.012195 | 0.113924 | react-lib | 842 | 2023-10-27T01:42:21.592519 | BSD-3-Clause | true | 36a75d36e7018cbd1f2ece95e80dd995 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\__pycache__\test_missing.cpython-313.pyc | test_missing.cpython-313.pyc | Other | 50,903 | 0.8 | 0 | 0.002618 | node-utils | 146 | 2023-12-21T02:36:58.766799 | MIT | true | 5e208429d773c8d5a3bf098ab9e551d6 |
\n\n | .venv\Lib\site-packages\pandas\tests\dtypes\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 194 | 0.7 | 0 | 0 | node-utils | 518 | 2024-07-14T15:00:04.867440 | MIT | true | 9530ee12fe21b9fa69206e51ce653561 |
import operator\n\nimport pytest\n\nfrom pandas._config.config import _get_option\n\nfrom pandas import (\n Series,\n options,\n)\n\n\n@pytest.fixture\ndef dtype():\n """A fixture providing the ExtensionDtype to validate."""\n raise NotImplementedError\n\n\n@pytest.fixture\ndef data():\n """\n Length-100 array for this type.\n\n * data[0] and data[1] should both be non missing\n * data[0] and data[1] should not be equal\n """\n raise NotImplementedError\n\n\n@pytest.fixture\ndef data_for_twos(dtype):\n """\n Length-100 array in which all the elements are two.\n\n Call pytest.skip in your fixture if the dtype does not support divmod.\n """\n if not (dtype._is_numeric or dtype.kind == "m"):\n # Object-dtypes may want to allow this, but for the most part\n # only numeric and timedelta-like dtypes will need to implement this.\n pytest.skip(f"{dtype} is not a numeric dtype")\n\n raise NotImplementedError\n\n\n@pytest.fixture\ndef data_missing():\n """Length-2 array with [NA, Valid]"""\n raise NotImplementedError\n\n\n@pytest.fixture(params=["data", "data_missing"])\ndef all_data(request, data, data_missing):\n """Parametrized fixture giving 'data' and 'data_missing'"""\n if request.param == "data":\n return data\n elif request.param == "data_missing":\n return data_missing\n\n\n@pytest.fixture\ndef data_repeated(data):\n """\n Generate many datasets.\n\n Parameters\n ----------\n data : fixture implementing `data`\n\n Returns\n -------\n Callable[[int], Generator]:\n A callable that takes a `count` argument and\n returns a generator yielding `count` datasets.\n """\n\n def gen(count):\n for _ in range(count):\n yield data\n\n return gen\n\n\n@pytest.fixture\ndef data_for_sorting():\n """\n Length-3 array with a known sort order.\n\n This should be three items [B, C, A] with\n A < B < C\n\n For boolean dtypes (for which there are only 2 values available),\n set B=C=True\n """\n raise NotImplementedError\n\n\n@pytest.fixture\ndef data_missing_for_sorting():\n """\n Length-3 array with a known sort order.\n\n This should be three items [B, NA, A] with\n A < B and NA missing.\n """\n raise NotImplementedError\n\n\n@pytest.fixture\ndef na_cmp():\n """\n Binary operator for comparing NA values.\n\n Should return a function of two arguments that returns\n True if both arguments are (scalar) NA for your type.\n\n By default, uses ``operator.is_``\n """\n return operator.is_\n\n\n@pytest.fixture\ndef na_value(dtype):\n """\n The scalar missing value for this type. Default dtype.na_value.\n\n TODO: can be removed in 3.x (see https://github.com/pandas-dev/pandas/pull/54930)\n """\n return dtype.na_value\n\n\n@pytest.fixture\ndef data_for_grouping():\n """\n Data for factorization, grouping, and unique tests.\n\n Expected to be like [B, B, NA, NA, A, A, B, C]\n\n Where A < B < C and NA is missing.\n\n If a dtype has _is_boolean = True, i.e. only 2 unique non-NA entries,\n then set C=B.\n """\n raise NotImplementedError\n\n\n@pytest.fixture(params=[True, False])\ndef box_in_series(request):\n """Whether to box the data in a Series"""\n return request.param\n\n\n@pytest.fixture(\n params=[\n lambda x: 1,\n lambda x: [1] * len(x),\n lambda x: Series([1] * len(x)),\n lambda x: x,\n ],\n ids=["scalar", "list", "series", "object"],\n)\ndef groupby_apply_op(request):\n """\n Functions to test groupby.apply().\n """\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef as_frame(request):\n """\n Boolean fixture to support Series and Series.to_frame() comparison testing.\n """\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef as_series(request):\n """\n Boolean fixture to support arr and Series(arr) comparison testing.\n """\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef use_numpy(request):\n """\n Boolean fixture to support comparison testing of ExtensionDtype array\n and numpy array.\n """\n return request.param\n\n\n@pytest.fixture(params=["ffill", "bfill"])\ndef fillna_method(request):\n """\n Parametrized fixture giving method parameters 'ffill' and 'bfill' for\n Series.fillna(method=<method>) testing.\n """\n return request.param\n\n\n@pytest.fixture(params=[True, False])\ndef as_array(request):\n """\n Boolean fixture to support ExtensionDtype _from_sequence method testing.\n """\n return request.param\n\n\n@pytest.fixture\ndef invalid_scalar(data):\n """\n A scalar that *cannot* be held by this ExtensionArray.\n\n The default should work for most subclasses, but is not guaranteed.\n\n If the array can hold any item (i.e. object dtype), then use pytest.skip.\n """\n return object.__new__(object)\n\n\n@pytest.fixture\ndef using_copy_on_write() -> bool:\n """\n Fixture to check if Copy-on-Write is enabled.\n """\n return (\n options.mode.copy_on_write is True\n and _get_option("mode.data_manager", silent=True) == "block"\n )\n | .venv\Lib\site-packages\pandas\tests\extension\conftest.py | conftest.py | Python | 5,061 | 0.95 | 0.16087 | 0.023669 | vue-tools | 505 | 2023-12-07T04:05:26.512117 | GPL-3.0 | true | 6276828961833e2144d7cc966f409c85 |
"""\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n"""\nimport string\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nimport pandas as pd\nfrom pandas import Categorical\nimport pandas._testing as tm\nfrom pandas.api.types import CategoricalDtype\nfrom pandas.tests.extension import base\n\n\ndef make_data():\n while True:\n values = np.random.default_rng(2).choice(list(string.ascii_letters), size=100)\n # ensure we meet the requirements\n # 1. first two not null\n # 2. first and second are different\n if values[0] != values[1]:\n break\n return values\n\n\n@pytest.fixture\ndef dtype():\n return CategoricalDtype()\n\n\n@pytest.fixture\ndef data():\n """Length-100 array for this type.\n\n * data[0] and data[1] should both be non missing\n * data[0] and data[1] should not be equal\n """\n return Categorical(make_data())\n\n\n@pytest.fixture\ndef data_missing():\n """Length 2 array with [NA, Valid]"""\n return Categorical([np.nan, "A"])\n\n\n@pytest.fixture\ndef data_for_sorting():\n return Categorical(["A", "B", "C"], categories=["C", "A", "B"], ordered=True)\n\n\n@pytest.fixture\ndef data_missing_for_sorting():\n return Categorical(["A", None, "B"], categories=["B", "A"], ordered=True)\n\n\n@pytest.fixture\ndef data_for_grouping():\n return Categorical(["a", "a", None, None, "b", "b", "a", "c"])\n\n\nclass TestCategorical(base.ExtensionTests):\n @pytest.mark.xfail(reason="Memory usage doesn't match")\n def test_memory_usage(self, data):\n # TODO: Is this deliberate?\n super().test_memory_usage(data)\n\n def test_contains(self, data, data_missing):\n # GH-37867\n # na value handling in Categorical.__contains__ is deprecated.\n # See base.BaseInterFaceTests.test_contains for more details.\n\n na_value = data.dtype.na_value\n # ensure data without missing values\n data = data[~data.isna()]\n\n # first elements are non-missing\n assert data[0] in data\n assert data_missing[0] in data_missing\n\n # check the presence of na_value\n assert na_value in data_missing\n assert na_value not in data\n\n # Categoricals can contain other nan-likes than na_value\n for na_value_obj in tm.NULL_OBJECTS:\n if na_value_obj is na_value:\n continue\n assert na_value_obj not in data\n # this section suffers from super method\n if not using_string_dtype():\n assert na_value_obj in data_missing\n\n def test_empty(self, dtype):\n cls = dtype.construct_array_type()\n result = cls._empty((4,), dtype=dtype)\n\n assert isinstance(result, cls)\n # the dtype we passed is not initialized, so will not match the\n # dtype on our result.\n assert result.dtype == CategoricalDtype([])\n\n @pytest.mark.skip(reason="Backwards compatibility")\n def test_getitem_scalar(self, data):\n # CategoricalDtype.type isn't "correct" since it should\n # be a parent of the elements (object). But don't want\n # to break things by changing.\n super().test_getitem_scalar(data)\n\n @pytest.mark.xfail(reason="Unobserved categories included")\n def test_value_counts(self, all_data, dropna):\n return super().test_value_counts(all_data, dropna)\n\n def test_combine_add(self, data_repeated):\n # GH 20825\n # When adding categoricals in combine, result is a string\n orig_data1, orig_data2 = data_repeated(2)\n s1 = pd.Series(orig_data1)\n s2 = pd.Series(orig_data2)\n result = s1.combine(s2, lambda x1, x2: x1 + x2)\n expected = pd.Series(\n [a + b for (a, b) in zip(list(orig_data1), list(orig_data2))]\n )\n tm.assert_series_equal(result, expected)\n\n val = s1.iloc[0]\n result = s1.combine(val, lambda x1, x2: x1 + x2)\n expected = pd.Series([a + val for a in list(orig_data1)])\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("na_action", [None, "ignore"])\n def test_map(self, data, na_action):\n result = data.map(lambda x: x, na_action=na_action)\n tm.assert_extension_array_equal(result, data)\n\n def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):\n # frame & scalar\n op_name = all_arithmetic_operators\n if op_name == "__rmod__":\n request.applymarker(\n pytest.mark.xfail(\n reason="rmod never called when string is first argument"\n )\n )\n super().test_arith_frame_with_scalar(data, op_name)\n\n def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request):\n op_name = all_arithmetic_operators\n if op_name == "__rmod__":\n request.applymarker(\n pytest.mark.xfail(\n reason="rmod never called when string is first argument"\n )\n )\n super().test_arith_series_with_scalar(data, op_name)\n\n def _compare_other(self, ser: pd.Series, data, op, other):\n op_name = f"__{op.__name__}__"\n if op_name not in ["__eq__", "__ne__"]:\n msg = "Unordered Categoricals can only compare equality or not"\n with pytest.raises(TypeError, match=msg):\n op(data, other)\n else:\n return super()._compare_other(ser, data, op, other)\n\n @pytest.mark.xfail(reason="Categorical overrides __repr__")\n @pytest.mark.parametrize("size", ["big", "small"])\n def test_array_repr(self, data, size):\n super().test_array_repr(data, size)\n\n @pytest.mark.xfail(reason="TBD")\n @pytest.mark.parametrize("as_index", [True, False])\n def test_groupby_extension_agg(self, as_index, data_for_grouping):\n super().test_groupby_extension_agg(as_index, data_for_grouping)\n\n\nclass Test2DCompat(base.NDArrayBacked2DTests):\n def test_repr_2d(self, data):\n # Categorical __repr__ doesn't include "Categorical", so we need\n # to special-case\n res = repr(data.reshape(1, -1))\n assert res.count("\nCategories") == 1\n\n res = repr(data.reshape(-1, 1))\n assert res.count("\nCategories") == 1\n | .venv\Lib\site-packages\pandas\tests\extension\test_categorical.py | test_categorical.py | Python | 6,812 | 0.95 | 0.195 | 0.152866 | node-utils | 362 | 2024-04-30T03:13:09.980852 | Apache-2.0 | true | 7f269b4b3e9d05988268e47fb062194f |
import numpy as np\nimport pytest\n\nfrom pandas.core.dtypes import dtypes\nfrom pandas.core.dtypes.common import is_extension_array_dtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.arrays import ExtensionArray\n\n\nclass DummyDtype(dtypes.ExtensionDtype):\n pass\n\n\nclass DummyArray(ExtensionArray):\n def __init__(self, data) -> None:\n self.data = data\n\n def __array__(self, dtype=None, copy=None):\n return self.data\n\n @property\n def dtype(self):\n return DummyDtype()\n\n def astype(self, dtype, copy=True):\n # we don't support anything but a single dtype\n if isinstance(dtype, DummyDtype):\n if copy:\n return type(self)(self.data)\n return self\n elif not copy:\n return np.asarray(self, dtype=dtype)\n else:\n return np.array(self, dtype=dtype, copy=copy)\n\n\nclass TestExtensionArrayDtype:\n @pytest.mark.parametrize(\n "values",\n [\n pd.Categorical([]),\n pd.Categorical([]).dtype,\n pd.Series(pd.Categorical([])),\n DummyDtype(),\n DummyArray(np.array([1, 2])),\n ],\n )\n def test_is_extension_array_dtype(self, values):\n assert is_extension_array_dtype(values)\n\n @pytest.mark.parametrize("values", [np.array([]), pd.Series(np.array([]))])\n def test_is_not_extension_array_dtype(self, values):\n assert not is_extension_array_dtype(values)\n\n\ndef test_astype():\n arr = DummyArray(np.array([1, 2, 3]))\n expected = np.array([1, 2, 3], dtype=object)\n\n result = arr.astype(object)\n tm.assert_numpy_array_equal(result, expected)\n\n result = arr.astype("object")\n tm.assert_numpy_array_equal(result, expected)\n\n\ndef test_astype_no_copy():\n arr = DummyArray(np.array([1, 2, 3], dtype=np.int64))\n result = arr.astype(arr.dtype, copy=False)\n\n assert arr is result\n\n result = arr.astype(arr.dtype)\n assert arr is not result\n\n\n@pytest.mark.parametrize("dtype", [dtypes.CategoricalDtype(), dtypes.IntervalDtype()])\ndef test_is_extension_array_dtype(dtype):\n assert isinstance(dtype, dtypes.ExtensionDtype)\n assert is_extension_array_dtype(dtype)\n\n\nclass CapturingStringArray(pd.arrays.StringArray):\n """Extend StringArray to capture arguments to __getitem__"""\n\n def __getitem__(self, item):\n self.last_item_arg = item\n return super().__getitem__(item)\n\n\ndef test_ellipsis_index():\n # GH#42430 1D slices over extension types turn into N-dimensional slices\n # over ExtensionArrays\n df = pd.DataFrame(\n {"col1": CapturingStringArray(np.array(["hello", "world"], dtype=object))}\n )\n _ = df.iloc[:1]\n\n # String comparison because there's no native way to compare slices.\n # Before the fix for GH#42430, last_item_arg would get set to the 2D slice\n # (Ellipsis, slice(None, 1, None))\n out = df["col1"].array.last_item_arg\n assert str(out) == "slice(None, 1, None)"\n | .venv\Lib\site-packages\pandas\tests\extension\test_common.py | test_common.py | Python | 2,975 | 0.95 | 0.171429 | 0.077922 | react-lib | 142 | 2025-05-06T17:21:16.381371 | MIT | true | 74bd0b1d4e4d674744df3a7dc16cfeb7 |
"""\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n"""\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.dtypes import DatetimeTZDtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.arrays import DatetimeArray\nfrom pandas.tests.extension import base\n\n\n@pytest.fixture(params=["US/Central"])\ndef dtype(request):\n return DatetimeTZDtype(unit="ns", tz=request.param)\n\n\n@pytest.fixture\ndef data(dtype):\n data = DatetimeArray._from_sequence(\n pd.date_range("2000", periods=100, tz=dtype.tz), dtype=dtype\n )\n return data\n\n\n@pytest.fixture\ndef data_missing(dtype):\n return DatetimeArray._from_sequence(\n np.array(["NaT", "2000-01-01"], dtype="datetime64[ns]"), dtype=dtype\n )\n\n\n@pytest.fixture\ndef data_for_sorting(dtype):\n a = pd.Timestamp("2000-01-01")\n b = pd.Timestamp("2000-01-02")\n c = pd.Timestamp("2000-01-03")\n return DatetimeArray._from_sequence(\n np.array([b, c, a], dtype="datetime64[ns]"), dtype=dtype\n )\n\n\n@pytest.fixture\ndef data_missing_for_sorting(dtype):\n a = pd.Timestamp("2000-01-01")\n b = pd.Timestamp("2000-01-02")\n return DatetimeArray._from_sequence(\n np.array([b, "NaT", a], dtype="datetime64[ns]"), dtype=dtype\n )\n\n\n@pytest.fixture\ndef data_for_grouping(dtype):\n """\n Expected to be like [B, B, NA, NA, A, A, B, C]\n\n Where A < B < C and NA is missing\n """\n a = pd.Timestamp("2000-01-01")\n b = pd.Timestamp("2000-01-02")\n c = pd.Timestamp("2000-01-03")\n na = "NaT"\n return DatetimeArray._from_sequence(\n np.array([b, b, na, na, a, a, b, c], dtype="datetime64[ns]"), dtype=dtype\n )\n\n\n@pytest.fixture\ndef na_cmp():\n def cmp(a, b):\n return a is pd.NaT and a is b\n\n return cmp\n\n\n# ----------------------------------------------------------------------------\nclass TestDatetimeArray(base.ExtensionTests):\n def _get_expected_exception(self, op_name, obj, other):\n if op_name in ["__sub__", "__rsub__"]:\n return None\n return super()._get_expected_exception(op_name, obj, other)\n\n def _supports_accumulation(self, ser, op_name: str) -> bool:\n return op_name in ["cummin", "cummax"]\n\n def _supports_reduction(self, obj, op_name: str) -> bool:\n return op_name in ["min", "max", "median", "mean", "std", "any", "all"]\n\n @pytest.mark.parametrize("skipna", [True, False])\n def test_reduce_series_boolean(self, data, all_boolean_reductions, skipna):\n meth = all_boolean_reductions\n msg = f"'{meth}' with datetime64 dtypes is deprecated and will raise in"\n with tm.assert_produces_warning(\n FutureWarning, match=msg, check_stacklevel=False\n ):\n super().test_reduce_series_boolean(data, all_boolean_reductions, skipna)\n\n def test_series_constructor(self, data):\n # Series construction drops any .freq attr\n data = data._with_freq(None)\n super().test_series_constructor(data)\n\n @pytest.mark.parametrize("na_action", [None, "ignore"])\n def test_map(self, data, na_action):\n result = data.map(lambda x: x, na_action=na_action)\n tm.assert_extension_array_equal(result, data)\n\n def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):\n if op_name in ["median", "mean", "std"]:\n alt = ser.astype("int64")\n\n res_op = getattr(ser, op_name)\n exp_op = getattr(alt, op_name)\n result = res_op(skipna=skipna)\n expected = exp_op(skipna=skipna)\n if op_name in ["mean", "median"]:\n # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype"\n # has no attribute "tz"\n tz = ser.dtype.tz # type: ignore[union-attr]\n expected = pd.Timestamp(expected, tz=tz)\n else:\n expected = pd.Timedelta(expected)\n tm.assert_almost_equal(result, expected)\n\n else:\n return super().check_reduce(ser, op_name, skipna)\n\n\nclass Test2DCompat(base.NDArrayBacked2DTests):\n pass\n | .venv\Lib\site-packages\pandas\tests\extension\test_datetime.py | test_datetime.py | Python | 4,614 | 0.95 | 0.173611 | 0.036036 | react-lib | 517 | 2024-02-24T16:57:42.451772 | BSD-3-Clause | true | 202e11f88845add55636df317bbe0f99 |
"""\nTests for behavior if an author does *not* implement EA methods.\n"""\nimport numpy as np\nimport pytest\n\nfrom pandas.core.arrays import ExtensionArray\n\n\nclass MyEA(ExtensionArray):\n def __init__(self, values) -> None:\n self._values = values\n\n\n@pytest.fixture\ndef data():\n arr = np.arange(10)\n return MyEA(arr)\n\n\nclass TestExtensionArray:\n def test_errors(self, data, all_arithmetic_operators):\n # invalid ops\n op_name = all_arithmetic_operators\n with pytest.raises(AttributeError):\n getattr(data, op_name)\n | .venv\Lib\site-packages\pandas\tests\extension\test_extension.py | test_extension.py | Python | 559 | 0.95 | 0.269231 | 0.052632 | awesome-app | 674 | 2024-04-16T16:41:22.766071 | MIT | true | bc5189b19a907173de69029390b6b6a4 |
"""\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n"""\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.dtypes import IntervalDtype\n\nfrom pandas import Interval\nfrom pandas.core.arrays import IntervalArray\nfrom pandas.tests.extension import base\n\nif TYPE_CHECKING:\n import pandas as pd\n\n\ndef make_data():\n N = 100\n left_array = np.random.default_rng(2).uniform(size=N).cumsum()\n right_array = left_array + np.random.default_rng(2).uniform(size=N)\n return [Interval(left, right) for left, right in zip(left_array, right_array)]\n\n\n@pytest.fixture\ndef dtype():\n return IntervalDtype()\n\n\n@pytest.fixture\ndef data():\n """Length-100 PeriodArray for semantics test."""\n return IntervalArray(make_data())\n\n\n@pytest.fixture\ndef data_missing():\n """Length 2 array with [NA, Valid]"""\n return IntervalArray.from_tuples([None, (0, 1)])\n\n\n@pytest.fixture\ndef data_for_twos():\n pytest.skip("Interval is not a numeric dtype")\n\n\n@pytest.fixture\ndef data_for_sorting():\n return IntervalArray.from_tuples([(1, 2), (2, 3), (0, 1)])\n\n\n@pytest.fixture\ndef data_missing_for_sorting():\n return IntervalArray.from_tuples([(1, 2), None, (0, 1)])\n\n\n@pytest.fixture\ndef data_for_grouping():\n a = (0, 1)\n b = (1, 2)\n c = (2, 3)\n return IntervalArray.from_tuples([b, b, None, None, a, a, b, c])\n\n\nclass TestIntervalArray(base.ExtensionTests):\n divmod_exc = TypeError\n\n def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:\n return op_name in ["min", "max"]\n\n @pytest.mark.xfail(\n reason="Raises with incorrect message bc it disallows *all* listlikes "\n "instead of just wrong-length listlikes"\n )\n def test_fillna_length_mismatch(self, data_missing):\n super().test_fillna_length_mismatch(data_missing)\n\n @pytest.mark.filterwarnings(\n "ignore:invalid value encountered in cast:RuntimeWarning"\n )\n def test_hash_pandas_object(self, data):\n super().test_hash_pandas_object(data)\n\n @pytest.mark.filterwarnings(\n "ignore:invalid value encountered in cast:RuntimeWarning"\n )\n def test_hash_pandas_object_works(self, data, as_frame):\n super().test_hash_pandas_object_works(data, as_frame)\n\n @pytest.mark.filterwarnings(\n "ignore:invalid value encountered in cast:RuntimeWarning"\n )\n @pytest.mark.parametrize("engine", ["c", "python"])\n def test_EA_types(self, engine, data, request):\n super().test_EA_types(engine, data, request)\n\n @pytest.mark.filterwarnings(\n "ignore:invalid value encountered in cast:RuntimeWarning"\n )\n def test_astype_str(self, data):\n super().test_astype_str(data)\n\n\n# TODO: either belongs in tests.arrays.interval or move into base tests.\ndef test_fillna_non_scalar_raises(data_missing):\n msg = "can only insert Interval objects and NA into an IntervalArray"\n with pytest.raises(TypeError, match=msg):\n data_missing.fillna([1, 1])\n | .venv\Lib\site-packages\pandas\tests\extension\test_interval.py | test_interval.py | Python | 3,585 | 0.95 | 0.195122 | 0.011236 | node-utils | 744 | 2025-01-24T23:25:17.729273 | MIT | true | f5dc56903d71fc058daa3e101f416c0d |
"""\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n"""\nimport warnings\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import (\n IS64,\n is_platform_windows,\n)\nfrom pandas.compat.numpy import np_version_gt2\n\nfrom pandas.core.dtypes.common import (\n is_float_dtype,\n is_signed_integer_dtype,\n is_unsigned_integer_dtype,\n)\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.arrays.boolean import BooleanDtype\nfrom pandas.core.arrays.floating import (\n Float32Dtype,\n Float64Dtype,\n)\nfrom pandas.core.arrays.integer import (\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n)\nfrom pandas.tests.extension import base\n\nis_windows_or_32bit = (is_platform_windows() and not np_version_gt2) or not IS64\n\npytestmark = [\n pytest.mark.filterwarnings(\n "ignore:invalid value encountered in divide:RuntimeWarning"\n ),\n pytest.mark.filterwarnings("ignore:Mean of empty slice:RuntimeWarning"),\n # overflow only relevant for Floating dtype cases cases\n pytest.mark.filterwarnings("ignore:overflow encountered in reduce:RuntimeWarning"),\n]\n\n\ndef make_data():\n return list(range(1, 9)) + [pd.NA] + list(range(10, 98)) + [pd.NA] + [99, 100]\n\n\ndef make_float_data():\n return (\n list(np.arange(0.1, 0.9, 0.1))\n + [pd.NA]\n + list(np.arange(1, 9.8, 0.1))\n + [pd.NA]\n + [9.9, 10.0]\n )\n\n\ndef make_bool_data():\n return [True, False] * 4 + [np.nan] + [True, False] * 44 + [np.nan] + [True, False]\n\n\n@pytest.fixture(\n params=[\n Int8Dtype,\n Int16Dtype,\n Int32Dtype,\n Int64Dtype,\n UInt8Dtype,\n UInt16Dtype,\n UInt32Dtype,\n UInt64Dtype,\n Float32Dtype,\n Float64Dtype,\n BooleanDtype,\n ]\n)\ndef dtype(request):\n return request.param()\n\n\n@pytest.fixture\ndef data(dtype):\n if dtype.kind == "f":\n data = make_float_data()\n elif dtype.kind == "b":\n data = make_bool_data()\n else:\n data = make_data()\n return pd.array(data, dtype=dtype)\n\n\n@pytest.fixture\ndef data_for_twos(dtype):\n if dtype.kind == "b":\n return pd.array(np.ones(100), dtype=dtype)\n return pd.array(np.ones(100) * 2, dtype=dtype)\n\n\n@pytest.fixture\ndef data_missing(dtype):\n if dtype.kind == "f":\n return pd.array([pd.NA, 0.1], dtype=dtype)\n elif dtype.kind == "b":\n return pd.array([np.nan, True], dtype=dtype)\n return pd.array([pd.NA, 1], dtype=dtype)\n\n\n@pytest.fixture\ndef data_for_sorting(dtype):\n if dtype.kind == "f":\n return pd.array([0.1, 0.2, 0.0], dtype=dtype)\n elif dtype.kind == "b":\n return pd.array([True, True, False], dtype=dtype)\n return pd.array([1, 2, 0], dtype=dtype)\n\n\n@pytest.fixture\ndef data_missing_for_sorting(dtype):\n if dtype.kind == "f":\n return pd.array([0.1, pd.NA, 0.0], dtype=dtype)\n elif dtype.kind == "b":\n return pd.array([True, np.nan, False], dtype=dtype)\n return pd.array([1, pd.NA, 0], dtype=dtype)\n\n\n@pytest.fixture\ndef na_cmp():\n # we are pd.NA\n return lambda x, y: x is pd.NA and y is pd.NA\n\n\n@pytest.fixture\ndef data_for_grouping(dtype):\n if dtype.kind == "f":\n b = 0.1\n a = 0.0\n c = 0.2\n elif dtype.kind == "b":\n b = True\n a = False\n c = b\n else:\n b = 1\n a = 0\n c = 2\n\n na = pd.NA\n return pd.array([b, b, na, na, a, a, b, c], dtype=dtype)\n\n\nclass TestMaskedArrays(base.ExtensionTests):\n @pytest.mark.parametrize("na_action", [None, "ignore"])\n def test_map(self, data_missing, na_action):\n result = data_missing.map(lambda x: x, na_action=na_action)\n if data_missing.dtype == Float32Dtype():\n # map roundtrips through objects, which converts to float64\n expected = data_missing.to_numpy(dtype="float64", na_value=np.nan)\n else:\n expected = data_missing.to_numpy()\n tm.assert_numpy_array_equal(result, expected)\n\n def test_map_na_action_ignore(self, data_missing_for_sorting):\n zero = data_missing_for_sorting[2]\n result = data_missing_for_sorting.map(lambda x: zero, na_action="ignore")\n if data_missing_for_sorting.dtype.kind == "b":\n expected = np.array([False, pd.NA, False], dtype=object)\n else:\n expected = np.array([zero, np.nan, zero])\n tm.assert_numpy_array_equal(result, expected)\n\n def _get_expected_exception(self, op_name, obj, other):\n try:\n dtype = tm.get_dtype(obj)\n except AttributeError:\n # passed arguments reversed\n dtype = tm.get_dtype(other)\n\n if dtype.kind == "b":\n if op_name.strip("_").lstrip("r") in ["pow", "truediv", "floordiv"]:\n # match behavior with non-masked bool dtype\n return NotImplementedError\n elif op_name in ["__sub__", "__rsub__"]:\n # exception message would include "numpy boolean subtract""\n return TypeError\n return None\n return None\n\n def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result):\n sdtype = tm.get_dtype(obj)\n expected = pointwise_result\n\n if op_name in ("eq", "ne", "le", "ge", "lt", "gt"):\n return expected.astype("boolean")\n\n if sdtype.kind in "iu":\n if op_name in ("__rtruediv__", "__truediv__", "__div__"):\n with warnings.catch_warnings():\n warnings.filterwarnings(\n "ignore",\n "Downcasting object dtype arrays",\n category=FutureWarning,\n )\n filled = expected.fillna(np.nan)\n expected = filled.astype("Float64")\n else:\n # combine method result in 'biggest' (int64) dtype\n expected = expected.astype(sdtype)\n elif sdtype.kind == "b":\n if op_name in (\n "__floordiv__",\n "__rfloordiv__",\n "__pow__",\n "__rpow__",\n "__mod__",\n "__rmod__",\n ):\n # combine keeps boolean type\n expected = expected.astype("Int8")\n\n elif op_name in ("__truediv__", "__rtruediv__"):\n # combine with bools does not generate the correct result\n # (numpy behaviour for div is to regard the bools as numeric)\n op = self.get_op_from_name(op_name)\n expected = self._combine(obj.astype(float), other, op)\n expected = expected.astype("Float64")\n\n if op_name == "__rpow__":\n # for rpow, combine does not propagate NaN\n result = getattr(obj, op_name)(other)\n expected[result.isna()] = np.nan\n else:\n # combine method result in 'biggest' (float64) dtype\n expected = expected.astype(sdtype)\n return expected\n\n def test_divmod_series_array(self, data, data_for_twos, request):\n if data.dtype.kind == "b":\n mark = pytest.mark.xfail(\n reason="Inconsistency between floordiv and divmod; we raise for "\n "floordiv but not for divmod. This matches what we do for "\n "non-masked bool dtype."\n )\n request.applymarker(mark)\n super().test_divmod_series_array(data, data_for_twos)\n\n def test_combine_le(self, data_repeated):\n # TODO: patching self is a bad pattern here\n orig_data1, orig_data2 = data_repeated(2)\n if orig_data1.dtype.kind == "b":\n self._combine_le_expected_dtype = "boolean"\n else:\n # TODO: can we make this boolean?\n self._combine_le_expected_dtype = object\n super().test_combine_le(data_repeated)\n\n def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:\n if op_name in ["any", "all"] and ser.dtype.kind != "b":\n pytest.skip(reason="Tested in tests/reductions/test_reductions.py")\n return True\n\n def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):\n # overwrite to ensure pd.NA is tested instead of np.nan\n # https://github.com/pandas-dev/pandas/issues/30958\n\n cmp_dtype = "int64"\n if ser.dtype.kind == "f":\n # Item "dtype[Any]" of "Union[dtype[Any], ExtensionDtype]" has\n # no attribute "numpy_dtype"\n cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr]\n elif ser.dtype.kind == "b":\n if op_name in ["min", "max"]:\n cmp_dtype = "bool"\n\n # TODO: prod with integer dtypes does *not* match the result we would\n # get if we used object for cmp_dtype. In that cae the object result\n # is a large integer while the non-object case overflows and returns 0\n alt = ser.dropna().astype(cmp_dtype)\n if op_name == "count":\n result = getattr(ser, op_name)()\n expected = getattr(alt, op_name)()\n else:\n result = getattr(ser, op_name)(skipna=skipna)\n expected = getattr(alt, op_name)(skipna=skipna)\n if not skipna and ser.isna().any() and op_name not in ["any", "all"]:\n expected = pd.NA\n tm.assert_almost_equal(result, expected)\n\n def _get_expected_reduction_dtype(self, arr, op_name: str, skipna: bool):\n if is_float_dtype(arr.dtype):\n cmp_dtype = arr.dtype.name\n elif op_name in ["mean", "median", "var", "std", "skew"]:\n cmp_dtype = "Float64"\n elif op_name in ["max", "min"]:\n cmp_dtype = arr.dtype.name\n elif arr.dtype in ["Int64", "UInt64"]:\n cmp_dtype = arr.dtype.name\n elif is_signed_integer_dtype(arr.dtype):\n # TODO: Why does Window Numpy 2.0 dtype depend on skipna?\n cmp_dtype = (\n "Int32"\n if (is_platform_windows() and (not np_version_gt2 or not skipna))\n or not IS64\n else "Int64"\n )\n elif is_unsigned_integer_dtype(arr.dtype):\n cmp_dtype = (\n "UInt32"\n if (is_platform_windows() and (not np_version_gt2 or not skipna))\n or not IS64\n else "UInt64"\n )\n elif arr.dtype.kind == "b":\n if op_name in ["mean", "median", "var", "std", "skew"]:\n cmp_dtype = "Float64"\n elif op_name in ["min", "max"]:\n cmp_dtype = "boolean"\n elif op_name in ["sum", "prod"]:\n cmp_dtype = (\n "Int32"\n if (is_platform_windows() and (not np_version_gt2 or not skipna))\n or not IS64\n else "Int64"\n )\n else:\n raise TypeError("not supposed to reach this")\n else:\n raise TypeError("not supposed to reach this")\n return cmp_dtype\n\n def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:\n return True\n\n def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool):\n # overwrite to ensure pd.NA is tested instead of np.nan\n # https://github.com/pandas-dev/pandas/issues/30958\n length = 64\n if is_windows_or_32bit:\n # Item "ExtensionDtype" of "Union[dtype[Any], ExtensionDtype]" has\n # no attribute "itemsize"\n if not ser.dtype.itemsize == 8: # type: ignore[union-attr]\n length = 32\n\n if ser.dtype.name.startswith("U"):\n expected_dtype = f"UInt{length}"\n elif ser.dtype.name.startswith("I"):\n expected_dtype = f"Int{length}"\n elif ser.dtype.name.startswith("F"):\n # Incompatible types in assignment (expression has type\n # "Union[dtype[Any], ExtensionDtype]", variable has type "str")\n expected_dtype = ser.dtype # type: ignore[assignment]\n elif ser.dtype.kind == "b":\n if op_name in ("cummin", "cummax"):\n expected_dtype = "boolean"\n else:\n expected_dtype = f"Int{length}"\n\n if expected_dtype == "Float32" and op_name == "cumprod" and skipna:\n # TODO: xfail?\n pytest.skip(\n f"Float32 precision lead to large differences with op {op_name} "\n f"and skipna={skipna}"\n )\n\n if op_name == "cumsum":\n result = getattr(ser, op_name)(skipna=skipna)\n expected = pd.Series(\n pd.array(\n getattr(ser.astype("float64"), op_name)(skipna=skipna),\n dtype=expected_dtype,\n )\n )\n tm.assert_series_equal(result, expected)\n elif op_name in ["cummax", "cummin"]:\n result = getattr(ser, op_name)(skipna=skipna)\n expected = pd.Series(\n pd.array(\n getattr(ser.astype("float64"), op_name)(skipna=skipna),\n dtype=ser.dtype,\n )\n )\n tm.assert_series_equal(result, expected)\n elif op_name == "cumprod":\n result = getattr(ser[:12], op_name)(skipna=skipna)\n expected = pd.Series(\n pd.array(\n getattr(ser[:12].astype("float64"), op_name)(skipna=skipna),\n dtype=expected_dtype,\n )\n )\n tm.assert_series_equal(result, expected)\n\n else:\n raise NotImplementedError(f"{op_name} not supported")\n\n\nclass Test2DCompat(base.Dim2CompatTests):\n pass\n | .venv\Lib\site-packages\pandas\tests\extension\test_masked.py | test_masked.py | Python | 14,338 | 0.95 | 0.172662 | 0.080556 | vue-tools | 222 | 2024-04-05T01:58:10.737072 | MIT | true | 7a36ed0cbefac30727907f51a035af94 |
"""\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\nNote: we do not bother with base.BaseIndexTests because NumpyExtensionArray\nwill never be held in an Index.\n"""\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.dtypes import NumpyEADtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.types import is_object_dtype\nfrom pandas.core.arrays.numpy_ import NumpyExtensionArray\nfrom pandas.tests.extension import base\n\norig_assert_attr_equal = tm.assert_attr_equal\n\n\ndef _assert_attr_equal(attr: str, left, right, obj: str = "Attributes"):\n """\n patch tm.assert_attr_equal so NumpyEADtype("object") is closed enough to\n np.dtype("object")\n """\n if attr == "dtype":\n lattr = getattr(left, "dtype", None)\n rattr = getattr(right, "dtype", None)\n if isinstance(lattr, NumpyEADtype) and not isinstance(rattr, NumpyEADtype):\n left = left.astype(lattr.numpy_dtype)\n elif isinstance(rattr, NumpyEADtype) and not isinstance(lattr, NumpyEADtype):\n right = right.astype(rattr.numpy_dtype)\n\n orig_assert_attr_equal(attr, left, right, obj)\n\n\n@pytest.fixture(params=["float", "object"])\ndef dtype(request):\n return NumpyEADtype(np.dtype(request.param))\n\n\n@pytest.fixture\ndef allow_in_pandas(monkeypatch):\n """\n A monkeypatch to tells pandas to let us in.\n\n By default, passing a NumpyExtensionArray to an index / series / frame\n constructor will unbox that NumpyExtensionArray to an ndarray, and treat\n it as a non-EA column. We don't want people using EAs without\n reason.\n\n The mechanism for this is a check against ABCNumpyExtensionArray\n in each constructor.\n\n But, for testing, we need to allow them in pandas. So we patch\n the _typ of NumpyExtensionArray, so that we evade the ABCNumpyExtensionArray\n check.\n """\n with monkeypatch.context() as m:\n m.setattr(NumpyExtensionArray, "_typ", "extension")\n m.setattr(tm.asserters, "assert_attr_equal", _assert_attr_equal)\n yield\n\n\n@pytest.fixture\ndef data(allow_in_pandas, dtype):\n if dtype.numpy_dtype == "object":\n return pd.Series([(i,) for i in range(100)]).array\n return NumpyExtensionArray(np.arange(1, 101, dtype=dtype._dtype))\n\n\n@pytest.fixture\ndef data_missing(allow_in_pandas, dtype):\n if dtype.numpy_dtype == "object":\n return NumpyExtensionArray(np.array([np.nan, (1,)], dtype=object))\n return NumpyExtensionArray(np.array([np.nan, 1.0]))\n\n\n@pytest.fixture\ndef na_cmp():\n def cmp(a, b):\n return np.isnan(a) and np.isnan(b)\n\n return cmp\n\n\n@pytest.fixture\ndef data_for_sorting(allow_in_pandas, dtype):\n """Length-3 array with a known sort order.\n\n This should be three items [B, C, A] with\n A < B < C\n """\n if dtype.numpy_dtype == "object":\n # Use an empty tuple for first element, then remove,\n # to disable np.array's shape inference.\n return NumpyExtensionArray(np.array([(), (2,), (3,), (1,)], dtype=object)[1:])\n return NumpyExtensionArray(np.array([1, 2, 0]))\n\n\n@pytest.fixture\ndef data_missing_for_sorting(allow_in_pandas, dtype):\n """Length-3 array with a known sort order.\n\n This should be three items [B, NA, A] with\n A < B and NA missing.\n """\n if dtype.numpy_dtype == "object":\n return NumpyExtensionArray(np.array([(1,), np.nan, (0,)], dtype=object))\n return NumpyExtensionArray(np.array([1, np.nan, 0]))\n\n\n@pytest.fixture\ndef data_for_grouping(allow_in_pandas, dtype):\n """Data for factorization, grouping, and unique tests.\n\n Expected to be like [B, B, NA, NA, A, A, B, C]\n\n Where A < B < C and NA is missing\n """\n if dtype.numpy_dtype == "object":\n a, b, c = (1,), (2,), (3,)\n else:\n a, b, c = np.arange(3)\n return NumpyExtensionArray(\n np.array([b, b, np.nan, np.nan, a, a, b, c], dtype=dtype.numpy_dtype)\n )\n\n\n@pytest.fixture\ndef data_for_twos(dtype):\n if dtype.kind == "O":\n pytest.skip(f"{dtype} is not a numeric dtype")\n arr = np.ones(100) * 2\n return NumpyExtensionArray._from_sequence(arr, dtype=dtype)\n\n\n@pytest.fixture\ndef skip_numpy_object(dtype, request):\n """\n Tests for NumpyExtensionArray with nested data. Users typically won't create\n these objects via `pd.array`, but they can show up through `.array`\n on a Series with nested data. Many of the base tests fail, as they aren't\n appropriate for nested data.\n\n This fixture allows these tests to be skipped when used as a usefixtures\n marker to either an individual test or a test class.\n """\n if dtype == "object":\n mark = pytest.mark.xfail(reason="Fails for object dtype")\n request.applymarker(mark)\n\n\nskip_nested = pytest.mark.usefixtures("skip_numpy_object")\n\n\nclass TestNumpyExtensionArray(base.ExtensionTests):\n @pytest.mark.skip(reason="We don't register our dtype")\n # We don't want to register. This test should probably be split in two.\n def test_from_dtype(self, data):\n pass\n\n @skip_nested\n def test_series_constructor_scalar_with_index(self, data, dtype):\n # ValueError: Length of passed values is 1, index implies 3.\n super().test_series_constructor_scalar_with_index(data, dtype)\n\n def test_check_dtype(self, data, request, using_infer_string):\n if data.dtype.numpy_dtype == "object":\n request.applymarker(\n pytest.mark.xfail(\n reason=f"NumpyExtensionArray expectedly clashes with a "\n f"NumPy name: {data.dtype.numpy_dtype}"\n )\n )\n super().test_check_dtype(data)\n\n def test_is_not_object_type(self, dtype, request):\n if dtype.numpy_dtype == "object":\n # Different from BaseDtypeTests.test_is_not_object_type\n # because NumpyEADtype(object) is an object type\n assert is_object_dtype(dtype)\n else:\n super().test_is_not_object_type(dtype)\n\n @skip_nested\n def test_getitem_scalar(self, data):\n # AssertionError\n super().test_getitem_scalar(data)\n\n @skip_nested\n def test_shift_fill_value(self, data):\n # np.array shape inference. Shift implementation fails.\n super().test_shift_fill_value(data)\n\n @skip_nested\n def test_fillna_copy_frame(self, data_missing):\n # The "scalar" for this array isn't a scalar.\n super().test_fillna_copy_frame(data_missing)\n\n @skip_nested\n def test_fillna_copy_series(self, data_missing):\n # The "scalar" for this array isn't a scalar.\n super().test_fillna_copy_series(data_missing)\n\n @skip_nested\n def test_searchsorted(self, data_for_sorting, as_series):\n # TODO: NumpyExtensionArray.searchsorted calls ndarray.searchsorted which\n # isn't quite what we want in nested data cases. Instead we need to\n # adapt something like libindex._bin_search.\n super().test_searchsorted(data_for_sorting, as_series)\n\n @pytest.mark.xfail(reason="NumpyExtensionArray.diff may fail on dtype")\n def test_diff(self, data, periods):\n return super().test_diff(data, periods)\n\n def test_insert(self, data, request):\n if data.dtype.numpy_dtype == object:\n mark = pytest.mark.xfail(reason="Dimension mismatch in np.concatenate")\n request.applymarker(mark)\n\n super().test_insert(data)\n\n @skip_nested\n def test_insert_invalid(self, data, invalid_scalar):\n # NumpyExtensionArray[object] can hold anything, so skip\n super().test_insert_invalid(data, invalid_scalar)\n\n divmod_exc = None\n series_scalar_exc = None\n frame_scalar_exc = None\n series_array_exc = None\n\n def test_divmod(self, data):\n divmod_exc = None\n if data.dtype.kind == "O":\n divmod_exc = TypeError\n self.divmod_exc = divmod_exc\n super().test_divmod(data)\n\n def test_divmod_series_array(self, data):\n ser = pd.Series(data)\n exc = None\n if data.dtype.kind == "O":\n exc = TypeError\n self.divmod_exc = exc\n self._check_divmod_op(ser, divmod, data)\n\n def test_arith_series_with_scalar(self, data, all_arithmetic_operators, request):\n opname = all_arithmetic_operators\n series_scalar_exc = None\n if data.dtype.numpy_dtype == object:\n if opname in ["__mul__", "__rmul__"]:\n mark = pytest.mark.xfail(\n reason="the Series.combine step raises but not the Series method."\n )\n request.node.add_marker(mark)\n series_scalar_exc = TypeError\n self.series_scalar_exc = series_scalar_exc\n super().test_arith_series_with_scalar(data, all_arithmetic_operators)\n\n def test_arith_series_with_array(self, data, all_arithmetic_operators):\n opname = all_arithmetic_operators\n series_array_exc = None\n if data.dtype.numpy_dtype == object and opname not in ["__add__", "__radd__"]:\n series_array_exc = TypeError\n self.series_array_exc = series_array_exc\n super().test_arith_series_with_array(data, all_arithmetic_operators)\n\n def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):\n opname = all_arithmetic_operators\n frame_scalar_exc = None\n if data.dtype.numpy_dtype == object:\n if opname in ["__mul__", "__rmul__"]:\n mark = pytest.mark.xfail(\n reason="the Series.combine step raises but not the Series method."\n )\n request.node.add_marker(mark)\n frame_scalar_exc = TypeError\n self.frame_scalar_exc = frame_scalar_exc\n super().test_arith_frame_with_scalar(data, all_arithmetic_operators)\n\n def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:\n if ser.dtype.kind == "O":\n return op_name in ["sum", "min", "max", "any", "all"]\n return True\n\n def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):\n res_op = getattr(ser, op_name)\n # avoid coercing int -> float. Just cast to the actual numpy type.\n # error: Item "ExtensionDtype" of "dtype[Any] | ExtensionDtype" has\n # no attribute "numpy_dtype"\n cmp_dtype = ser.dtype.numpy_dtype # type: ignore[union-attr]\n alt = ser.astype(cmp_dtype)\n exp_op = getattr(alt, op_name)\n if op_name == "count":\n result = res_op()\n expected = exp_op()\n else:\n result = res_op(skipna=skipna)\n expected = exp_op(skipna=skipna)\n tm.assert_almost_equal(result, expected)\n\n @pytest.mark.skip("TODO: tests not written yet")\n @pytest.mark.parametrize("skipna", [True, False])\n def test_reduce_frame(self, data, all_numeric_reductions, skipna):\n pass\n\n @skip_nested\n def test_fillna_series(self, data_missing):\n # Non-scalar "scalar" values.\n super().test_fillna_series(data_missing)\n\n @skip_nested\n def test_fillna_frame(self, data_missing):\n # Non-scalar "scalar" values.\n super().test_fillna_frame(data_missing)\n\n @skip_nested\n def test_setitem_invalid(self, data, invalid_scalar):\n # object dtype can hold anything, so doesn't raise\n super().test_setitem_invalid(data, invalid_scalar)\n\n @skip_nested\n def test_setitem_sequence_broadcasts(self, data, box_in_series):\n # ValueError: cannot set using a list-like indexer with a different\n # length than the value\n super().test_setitem_sequence_broadcasts(data, box_in_series)\n\n @skip_nested\n @pytest.mark.parametrize("setter", ["loc", None])\n def test_setitem_mask_broadcast(self, data, setter):\n # ValueError: cannot set using a list-like indexer with a different\n # length than the value\n super().test_setitem_mask_broadcast(data, setter)\n\n @skip_nested\n def test_setitem_scalar_key_sequence_raise(self, data):\n # Failed: DID NOT RAISE <class 'ValueError'>\n super().test_setitem_scalar_key_sequence_raise(data)\n\n # TODO: there is some issue with NumpyExtensionArray, therefore,\n # skip the setitem test for now, and fix it later (GH 31446)\n\n @skip_nested\n @pytest.mark.parametrize(\n "mask",\n [\n np.array([True, True, True, False, False]),\n pd.array([True, True, True, False, False], dtype="boolean"),\n ],\n ids=["numpy-array", "boolean-array"],\n )\n def test_setitem_mask(self, data, mask, box_in_series):\n super().test_setitem_mask(data, mask, box_in_series)\n\n @skip_nested\n @pytest.mark.parametrize(\n "idx",\n [[0, 1, 2], pd.array([0, 1, 2], dtype="Int64"), np.array([0, 1, 2])],\n ids=["list", "integer-array", "numpy-array"],\n )\n def test_setitem_integer_array(self, data, idx, box_in_series):\n super().test_setitem_integer_array(data, idx, box_in_series)\n\n @pytest.mark.parametrize(\n "idx, box_in_series",\n [\n ([0, 1, 2, pd.NA], False),\n pytest.param([0, 1, 2, pd.NA], True, marks=pytest.mark.xfail),\n (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),\n (pd.array([0, 1, 2, pd.NA], dtype="Int64"), False),\n ],\n ids=["list-False", "list-True", "integer-array-False", "integer-array-True"],\n )\n def test_setitem_integer_with_missing_raises(self, data, idx, box_in_series):\n super().test_setitem_integer_with_missing_raises(data, idx, box_in_series)\n\n @skip_nested\n def test_setitem_slice(self, data, box_in_series):\n super().test_setitem_slice(data, box_in_series)\n\n @skip_nested\n def test_setitem_loc_iloc_slice(self, data):\n super().test_setitem_loc_iloc_slice(data)\n\n def test_setitem_with_expansion_dataframe_column(self, data, full_indexer):\n # https://github.com/pandas-dev/pandas/issues/32395\n df = expected = pd.DataFrame({"data": pd.Series(data)})\n result = pd.DataFrame(index=df.index)\n\n # because result has object dtype, the attempt to do setting inplace\n # is successful, and object dtype is retained\n key = full_indexer(df)\n result.loc[key, "data"] = df["data"]\n\n # base class method has expected = df; NumpyExtensionArray behaves oddly because\n # we patch _typ for these tests.\n if data.dtype.numpy_dtype != object:\n if not isinstance(key, slice) or key != slice(None):\n expected = pd.DataFrame({"data": data.to_numpy()})\n tm.assert_frame_equal(result, expected, check_column_type=False)\n\n @pytest.mark.xfail(reason="NumpyEADtype is unpacked")\n def test_index_from_listlike_with_dtype(self, data):\n super().test_index_from_listlike_with_dtype(data)\n\n @skip_nested\n @pytest.mark.parametrize("engine", ["c", "python"])\n def test_EA_types(self, engine, data, request):\n super().test_EA_types(engine, data, request)\n\n\nclass Test2DCompat(base.NDArrayBacked2DTests):\n pass\n | .venv\Lib\site-packages\pandas\tests\extension\test_numpy.py | test_numpy.py | Python | 15,586 | 0.95 | 0.213615 | 0.093023 | vue-tools | 692 | 2024-11-06T09:32:57.200346 | BSD-3-Clause | true | 8d8cf26842a8c8f9bb1b182d6de9e83c |
"""\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n"""\nfrom __future__ import annotations\n\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import (\n Period,\n iNaT,\n)\nfrom pandas.compat import is_platform_windows\nfrom pandas.compat.numpy import np_version_gte1p24\n\nfrom pandas.core.dtypes.dtypes import PeriodDtype\n\nimport pandas._testing as tm\nfrom pandas.core.arrays import PeriodArray\nfrom pandas.tests.extension import base\n\nif TYPE_CHECKING:\n import pandas as pd\n\n\n@pytest.fixture(params=["D", "2D"])\ndef dtype(request):\n return PeriodDtype(freq=request.param)\n\n\n@pytest.fixture\ndef data(dtype):\n return PeriodArray(np.arange(1970, 2070), dtype=dtype)\n\n\n@pytest.fixture\ndef data_for_sorting(dtype):\n return PeriodArray([2018, 2019, 2017], dtype=dtype)\n\n\n@pytest.fixture\ndef data_missing(dtype):\n return PeriodArray([iNaT, 2017], dtype=dtype)\n\n\n@pytest.fixture\ndef data_missing_for_sorting(dtype):\n return PeriodArray([2018, iNaT, 2017], dtype=dtype)\n\n\n@pytest.fixture\ndef data_for_grouping(dtype):\n B = 2018\n NA = iNaT\n A = 2017\n C = 2019\n return PeriodArray([B, B, NA, NA, A, A, B, C], dtype=dtype)\n\n\nclass TestPeriodArray(base.ExtensionTests):\n def _get_expected_exception(self, op_name, obj, other):\n if op_name in ("__sub__", "__rsub__"):\n return None\n return super()._get_expected_exception(op_name, obj, other)\n\n def _supports_accumulation(self, ser, op_name: str) -> bool:\n return op_name in ["cummin", "cummax"]\n\n def _supports_reduction(self, obj, op_name: str) -> bool:\n return op_name in ["min", "max", "median"]\n\n def check_reduce(self, ser: pd.Series, op_name: str, skipna: bool):\n if op_name == "median":\n res_op = getattr(ser, op_name)\n\n alt = ser.astype("int64")\n\n exp_op = getattr(alt, op_name)\n result = res_op(skipna=skipna)\n expected = exp_op(skipna=skipna)\n # error: Item "dtype[Any]" of "dtype[Any] | ExtensionDtype" has no\n # attribute "freq"\n freq = ser.dtype.freq # type: ignore[union-attr]\n expected = Period._from_ordinal(int(expected), freq=freq)\n tm.assert_almost_equal(result, expected)\n\n else:\n return super().check_reduce(ser, op_name, skipna)\n\n @pytest.mark.parametrize("periods", [1, -2])\n def test_diff(self, data, periods):\n if is_platform_windows() and np_version_gte1p24:\n with tm.assert_produces_warning(RuntimeWarning, check_stacklevel=False):\n super().test_diff(data, periods)\n else:\n super().test_diff(data, periods)\n\n @pytest.mark.parametrize("na_action", [None, "ignore"])\n def test_map(self, data, na_action):\n result = data.map(lambda x: x, na_action=na_action)\n tm.assert_extension_array_equal(result, data)\n\n\nclass Test2DCompat(base.NDArrayBacked2DTests):\n pass\n | .venv\Lib\site-packages\pandas\tests\extension\test_period.py | test_period.py | Python | 3,528 | 0.95 | 0.193277 | 0.023256 | node-utils | 231 | 2025-06-04T06:53:31.475917 | Apache-2.0 | true | f3ee22bef04ab5f3cd8434e200f388f5 |
"""\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n"""\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import PerformanceWarning\n\nimport pandas as pd\nfrom pandas import SparseDtype\nimport pandas._testing as tm\nfrom pandas.arrays import SparseArray\nfrom pandas.tests.extension import base\n\n\ndef make_data(fill_value):\n rng = np.random.default_rng(2)\n if np.isnan(fill_value):\n data = rng.uniform(size=100)\n else:\n data = rng.integers(1, 100, size=100, dtype=int)\n if data[0] == data[1]:\n data[0] += 1\n\n data[2::3] = fill_value\n return data\n\n\n@pytest.fixture\ndef dtype():\n return SparseDtype()\n\n\n@pytest.fixture(params=[0, np.nan])\ndef data(request):\n """Length-100 PeriodArray for semantics test."""\n res = SparseArray(make_data(request.param), fill_value=request.param)\n return res\n\n\n@pytest.fixture\ndef data_for_twos():\n return SparseArray(np.ones(100) * 2)\n\n\n@pytest.fixture(params=[0, np.nan])\ndef data_missing(request):\n """Length 2 array with [NA, Valid]"""\n return SparseArray([np.nan, 1], fill_value=request.param)\n\n\n@pytest.fixture(params=[0, np.nan])\ndef data_repeated(request):\n """Return different versions of data for count times"""\n\n def gen(count):\n for _ in range(count):\n yield SparseArray(make_data(request.param), fill_value=request.param)\n\n yield gen\n\n\n@pytest.fixture(params=[0, np.nan])\ndef data_for_sorting(request):\n return SparseArray([2, 3, 1], fill_value=request.param)\n\n\n@pytest.fixture(params=[0, np.nan])\ndef data_missing_for_sorting(request):\n return SparseArray([2, np.nan, 1], fill_value=request.param)\n\n\n@pytest.fixture\ndef na_cmp():\n return lambda left, right: pd.isna(left) and pd.isna(right)\n\n\n@pytest.fixture(params=[0, np.nan])\ndef data_for_grouping(request):\n return SparseArray([1, 1, np.nan, np.nan, 2, 2, 1, 3], fill_value=request.param)\n\n\n@pytest.fixture(params=[0, np.nan])\ndef data_for_compare(request):\n return SparseArray([0, 0, np.nan, -2, -1, 4, 2, 3, 0, 0], fill_value=request.param)\n\n\nclass TestSparseArray(base.ExtensionTests):\n def _supports_reduction(self, obj, op_name: str) -> bool:\n return True\n\n @pytest.mark.parametrize("skipna", [True, False])\n def test_reduce_series_numeric(self, data, all_numeric_reductions, skipna, request):\n if all_numeric_reductions in [\n "prod",\n "median",\n "var",\n "std",\n "sem",\n "skew",\n "kurt",\n ]:\n mark = pytest.mark.xfail(\n reason="This should be viable but is not implemented"\n )\n request.node.add_marker(mark)\n elif (\n all_numeric_reductions in ["sum", "max", "min", "mean"]\n and data.dtype.kind == "f"\n and not skipna\n ):\n mark = pytest.mark.xfail(reason="getting a non-nan float")\n request.node.add_marker(mark)\n\n super().test_reduce_series_numeric(data, all_numeric_reductions, skipna)\n\n @pytest.mark.parametrize("skipna", [True, False])\n def test_reduce_frame(self, data, all_numeric_reductions, skipna, request):\n if all_numeric_reductions in [\n "prod",\n "median",\n "var",\n "std",\n "sem",\n "skew",\n "kurt",\n ]:\n mark = pytest.mark.xfail(\n reason="This should be viable but is not implemented"\n )\n request.node.add_marker(mark)\n elif (\n all_numeric_reductions in ["sum", "max", "min", "mean"]\n and data.dtype.kind == "f"\n and not skipna\n ):\n mark = pytest.mark.xfail(reason="ExtensionArray NA mask are different")\n request.node.add_marker(mark)\n\n super().test_reduce_frame(data, all_numeric_reductions, skipna)\n\n def _check_unsupported(self, data):\n if data.dtype == SparseDtype(int, 0):\n pytest.skip("Can't store nan in int array.")\n\n def test_concat_mixed_dtypes(self, data):\n # https://github.com/pandas-dev/pandas/issues/20762\n # This should be the same, aside from concat([sparse, float])\n df1 = pd.DataFrame({"A": data[:3]})\n df2 = pd.DataFrame({"A": [1, 2, 3]})\n df3 = pd.DataFrame({"A": ["a", "b", "c"]}).astype("category")\n dfs = [df1, df2, df3]\n\n # dataframes\n result = pd.concat(dfs)\n expected = pd.concat(\n [x.apply(lambda s: np.asarray(s).astype(object)) for x in dfs]\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.filterwarnings(\n "ignore:The previous implementation of stack is deprecated"\n )\n @pytest.mark.parametrize(\n "columns",\n [\n ["A", "B"],\n pd.MultiIndex.from_tuples(\n [("A", "a"), ("A", "b")], names=["outer", "inner"]\n ),\n ],\n )\n @pytest.mark.parametrize("future_stack", [True, False])\n def test_stack(self, data, columns, future_stack):\n super().test_stack(data, columns, future_stack)\n\n def test_concat_columns(self, data, na_value):\n self._check_unsupported(data)\n super().test_concat_columns(data, na_value)\n\n def test_concat_extension_arrays_copy_false(self, data, na_value):\n self._check_unsupported(data)\n super().test_concat_extension_arrays_copy_false(data, na_value)\n\n def test_align(self, data, na_value):\n self._check_unsupported(data)\n super().test_align(data, na_value)\n\n def test_align_frame(self, data, na_value):\n self._check_unsupported(data)\n super().test_align_frame(data, na_value)\n\n def test_align_series_frame(self, data, na_value):\n self._check_unsupported(data)\n super().test_align_series_frame(data, na_value)\n\n def test_merge(self, data, na_value):\n self._check_unsupported(data)\n super().test_merge(data, na_value)\n\n def test_get(self, data):\n ser = pd.Series(data, index=[2 * i for i in range(len(data))])\n if np.isnan(ser.values.fill_value):\n assert np.isnan(ser.get(4)) and np.isnan(ser.iloc[2])\n else:\n assert ser.get(4) == ser.iloc[2]\n assert ser.get(2) == ser.iloc[1]\n\n def test_reindex(self, data, na_value):\n self._check_unsupported(data)\n super().test_reindex(data, na_value)\n\n def test_isna(self, data_missing):\n sarr = SparseArray(data_missing)\n expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value))\n expected = SparseArray([True, False], dtype=expected_dtype)\n result = sarr.isna()\n tm.assert_sp_array_equal(result, expected)\n\n # test isna for arr without na\n sarr = sarr.fillna(0)\n expected_dtype = SparseDtype(bool, pd.isna(data_missing.dtype.fill_value))\n expected = SparseArray([False, False], fill_value=False, dtype=expected_dtype)\n tm.assert_equal(sarr.isna(), expected)\n\n def test_fillna_limit_backfill(self, data_missing):\n warns = (PerformanceWarning, FutureWarning)\n with tm.assert_produces_warning(warns, check_stacklevel=False):\n super().test_fillna_limit_backfill(data_missing)\n\n def test_fillna_no_op_returns_copy(self, data, request):\n if np.isnan(data.fill_value):\n request.applymarker(\n pytest.mark.xfail(reason="returns array with different fill value")\n )\n super().test_fillna_no_op_returns_copy(data)\n\n @pytest.mark.xfail(reason="Unsupported")\n def test_fillna_series(self, data_missing):\n # this one looks doable.\n # TODO: this fails bc we do not pass through data_missing. If we did,\n # the 0-fill case would xpass\n super().test_fillna_series()\n\n def test_fillna_frame(self, data_missing):\n # Have to override to specify that fill_value will change.\n fill_value = data_missing[1]\n\n result = pd.DataFrame({"A": data_missing, "B": [1, 2]}).fillna(fill_value)\n\n if pd.isna(data_missing.fill_value):\n dtype = SparseDtype(data_missing.dtype, fill_value)\n else:\n dtype = data_missing.dtype\n\n expected = pd.DataFrame(\n {\n "A": data_missing._from_sequence([fill_value, fill_value], dtype=dtype),\n "B": [1, 2],\n }\n )\n\n tm.assert_frame_equal(result, expected)\n\n _combine_le_expected_dtype = "Sparse[bool]"\n\n def test_fillna_copy_frame(self, data_missing, using_copy_on_write):\n arr = data_missing.take([1, 1])\n df = pd.DataFrame({"A": arr}, copy=False)\n\n filled_val = df.iloc[0, 0]\n result = df.fillna(filled_val)\n\n if hasattr(df._mgr, "blocks"):\n if using_copy_on_write:\n assert df.values.base is result.values.base\n else:\n assert df.values.base is not result.values.base\n assert df.A._values.to_dense() is arr.to_dense()\n\n def test_fillna_copy_series(self, data_missing, using_copy_on_write):\n arr = data_missing.take([1, 1])\n ser = pd.Series(arr, copy=False)\n\n filled_val = ser[0]\n result = ser.fillna(filled_val)\n\n if using_copy_on_write:\n assert ser._values is result._values\n\n else:\n assert ser._values is not result._values\n assert ser._values.to_dense() is arr.to_dense()\n\n @pytest.mark.xfail(reason="Not Applicable")\n def test_fillna_length_mismatch(self, data_missing):\n super().test_fillna_length_mismatch(data_missing)\n\n def test_where_series(self, data, na_value):\n assert data[0] != data[1]\n cls = type(data)\n a, b = data[:2]\n\n ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))\n\n cond = np.array([True, True, False, False])\n result = ser.where(cond)\n\n new_dtype = SparseDtype("float", 0.0)\n expected = pd.Series(\n cls._from_sequence([a, a, na_value, na_value], dtype=new_dtype)\n )\n tm.assert_series_equal(result, expected)\n\n other = cls._from_sequence([a, b, a, b], dtype=data.dtype)\n cond = np.array([True, False, True, True])\n result = ser.where(cond, other)\n expected = pd.Series(cls._from_sequence([a, b, b, b], dtype=data.dtype))\n tm.assert_series_equal(result, expected)\n\n def test_searchsorted(self, data_for_sorting, as_series):\n with tm.assert_produces_warning(PerformanceWarning, check_stacklevel=False):\n super().test_searchsorted(data_for_sorting, as_series)\n\n def test_shift_0_periods(self, data):\n # GH#33856 shifting with periods=0 should return a copy, not same obj\n result = data.shift(0)\n\n data._sparse_values[0] = data._sparse_values[1]\n assert result._sparse_values[0] != result._sparse_values[1]\n\n @pytest.mark.parametrize("method", ["argmax", "argmin"])\n def test_argmin_argmax_all_na(self, method, data, na_value):\n # overriding because Sparse[int64, 0] cannot handle na_value\n self._check_unsupported(data)\n super().test_argmin_argmax_all_na(method, data, na_value)\n\n @pytest.mark.fails_arm_wheels\n @pytest.mark.parametrize("box", [pd.array, pd.Series, pd.DataFrame])\n def test_equals(self, data, na_value, as_series, box):\n self._check_unsupported(data)\n super().test_equals(data, na_value, as_series, box)\n\n @pytest.mark.fails_arm_wheels\n def test_equals_same_data_different_object(self, data):\n super().test_equals_same_data_different_object(data)\n\n @pytest.mark.parametrize(\n "func, na_action, expected",\n [\n (lambda x: x, None, SparseArray([1.0, np.nan])),\n (lambda x: x, "ignore", SparseArray([1.0, np.nan])),\n (str, None, SparseArray(["1.0", "nan"], fill_value="nan")),\n (str, "ignore", SparseArray(["1.0", np.nan])),\n ],\n )\n def test_map(self, func, na_action, expected):\n # GH52096\n data = SparseArray([1, np.nan])\n result = data.map(func, na_action=na_action)\n tm.assert_extension_array_equal(result, expected)\n\n @pytest.mark.parametrize("na_action", [None, "ignore"])\n def test_map_raises(self, data, na_action):\n # GH52096\n msg = "fill value in the sparse values not supported"\n with pytest.raises(ValueError, match=msg):\n data.map(lambda x: np.nan, na_action=na_action)\n\n @pytest.mark.xfail(raises=TypeError, reason="no sparse StringDtype")\n def test_astype_string(self, data, nullable_string_dtype):\n # TODO: this fails bc we do not pass through nullable_string_dtype;\n # If we did, the 0-cases would xpass\n super().test_astype_string(data)\n\n series_scalar_exc = None\n frame_scalar_exc = None\n divmod_exc = None\n series_array_exc = None\n\n def _skip_if_different_combine(self, data):\n if data.fill_value == 0:\n # arith ops call on dtype.fill_value so that the sparsity\n # is maintained. Combine can't be called on a dtype in\n # general, so we can't make the expected. This is tested elsewhere\n pytest.skip("Incorrected expected from Series.combine and tested elsewhere")\n\n def test_arith_series_with_scalar(self, data, all_arithmetic_operators):\n self._skip_if_different_combine(data)\n super().test_arith_series_with_scalar(data, all_arithmetic_operators)\n\n def test_arith_series_with_array(self, data, all_arithmetic_operators):\n self._skip_if_different_combine(data)\n super().test_arith_series_with_array(data, all_arithmetic_operators)\n\n def test_arith_frame_with_scalar(self, data, all_arithmetic_operators, request):\n if data.dtype.fill_value != 0:\n pass\n elif all_arithmetic_operators.strip("_") not in [\n "mul",\n "rmul",\n "floordiv",\n "rfloordiv",\n "pow",\n "mod",\n "rmod",\n ]:\n mark = pytest.mark.xfail(reason="result dtype.fill_value mismatch")\n request.applymarker(mark)\n super().test_arith_frame_with_scalar(data, all_arithmetic_operators)\n\n def _compare_other(\n self, ser: pd.Series, data_for_compare: SparseArray, comparison_op, other\n ):\n op = comparison_op\n\n result = op(data_for_compare, other)\n if isinstance(other, pd.Series):\n assert isinstance(result, pd.Series)\n assert isinstance(result.dtype, SparseDtype)\n else:\n assert isinstance(result, SparseArray)\n assert result.dtype.subtype == np.bool_\n\n if isinstance(other, pd.Series):\n fill_value = op(data_for_compare.fill_value, other._values.fill_value)\n expected = SparseArray(\n op(data_for_compare.to_dense(), np.asarray(other)),\n fill_value=fill_value,\n dtype=np.bool_,\n )\n\n else:\n fill_value = np.all(\n op(np.asarray(data_for_compare.fill_value), np.asarray(other))\n )\n\n expected = SparseArray(\n op(data_for_compare.to_dense(), np.asarray(other)),\n fill_value=fill_value,\n dtype=np.bool_,\n )\n if isinstance(other, pd.Series):\n # error: Incompatible types in assignment\n expected = pd.Series(expected) # type: ignore[assignment]\n tm.assert_equal(result, expected)\n\n def test_scalar(self, data_for_compare: SparseArray, comparison_op):\n ser = pd.Series(data_for_compare)\n self._compare_other(ser, data_for_compare, comparison_op, 0)\n self._compare_other(ser, data_for_compare, comparison_op, 1)\n self._compare_other(ser, data_for_compare, comparison_op, -1)\n self._compare_other(ser, data_for_compare, comparison_op, np.nan)\n\n def test_array(self, data_for_compare: SparseArray, comparison_op, request):\n if data_for_compare.dtype.fill_value == 0 and comparison_op.__name__ in [\n "eq",\n "ge",\n "le",\n ]:\n mark = pytest.mark.xfail(reason="Wrong fill_value")\n request.applymarker(mark)\n\n arr = np.linspace(-4, 5, 10)\n ser = pd.Series(data_for_compare)\n self._compare_other(ser, data_for_compare, comparison_op, arr)\n\n def test_sparse_array(self, data_for_compare: SparseArray, comparison_op, request):\n if data_for_compare.dtype.fill_value == 0 and comparison_op.__name__ != "gt":\n mark = pytest.mark.xfail(reason="Wrong fill_value")\n request.applymarker(mark)\n\n ser = pd.Series(data_for_compare)\n arr = data_for_compare + 1\n self._compare_other(ser, data_for_compare, comparison_op, arr)\n arr = data_for_compare * 2\n self._compare_other(ser, data_for_compare, comparison_op, arr)\n\n @pytest.mark.xfail(reason="Different repr")\n def test_array_repr(self, data, size):\n super().test_array_repr(data, size)\n\n @pytest.mark.xfail(reason="result does not match expected")\n @pytest.mark.parametrize("as_index", [True, False])\n def test_groupby_extension_agg(self, as_index, data_for_grouping):\n super().test_groupby_extension_agg(as_index, data_for_grouping)\n\n\ndef test_array_type_with_arg(dtype):\n assert dtype.construct_array_type() is SparseArray\n | .venv\Lib\site-packages\pandas\tests\extension\test_sparse.py | test_sparse.py | Python | 18,011 | 0.95 | 0.166998 | 0.044776 | awesome-app | 646 | 2024-01-27T08:43:59.963673 | Apache-2.0 | true | 3204ddc1292401699f6c8e8f62e9f3a1 |
"""\nThis file contains a minimal set of tests for compliance with the extension\narray interface test suite, and should contain no other tests.\nThe test suite for the full functionality of the array is located in\n`pandas/tests/arrays/`.\n\nThe tests in this file are inherited from the BaseExtensionTests, and only\nminimal tweaks should be applied to get the tests passing (by overwriting a\nparent method).\n\nAdditional tests should either be added to one of the BaseExtensionTests\nclasses (if they are relevant for the extension interface for all dtypes), or\nbe added to the array-specific tests in `pandas/tests/arrays/`.\n\n"""\nfrom __future__ import annotations\n\nimport string\nfrom typing import cast\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import HAS_PYARROW\n\nfrom pandas.core.dtypes.base import StorageExtensionDtype\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.api.types import is_string_dtype\nfrom pandas.core.arrays import ArrowStringArray\nfrom pandas.core.arrays.string_ import StringDtype\nfrom pandas.tests.extension import base\n\n\ndef maybe_split_array(arr, chunked):\n if not chunked:\n return arr\n elif arr.dtype.storage != "pyarrow":\n return arr\n\n pa = pytest.importorskip("pyarrow")\n\n arrow_array = arr._pa_array\n split = len(arrow_array) // 2\n arrow_array = pa.chunked_array(\n [*arrow_array[:split].chunks, *arrow_array[split:].chunks]\n )\n assert arrow_array.num_chunks == 2\n return type(arr)(arrow_array)\n\n\n@pytest.fixture(params=[True, False])\ndef chunked(request):\n return request.param\n\n\n@pytest.fixture\ndef dtype(string_dtype_arguments):\n storage, na_value = string_dtype_arguments\n return StringDtype(storage=storage, na_value=na_value)\n\n\n@pytest.fixture\ndef data(dtype, chunked):\n strings = np.random.default_rng(2).choice(list(string.ascii_letters), size=100)\n while strings[0] == strings[1]:\n strings = np.random.default_rng(2).choice(list(string.ascii_letters), size=100)\n\n arr = dtype.construct_array_type()._from_sequence(strings, dtype=dtype)\n return maybe_split_array(arr, chunked)\n\n\n@pytest.fixture\ndef data_missing(dtype, chunked):\n """Length 2 array with [NA, Valid]"""\n arr = dtype.construct_array_type()._from_sequence([pd.NA, "A"], dtype=dtype)\n return maybe_split_array(arr, chunked)\n\n\n@pytest.fixture\ndef data_for_sorting(dtype, chunked):\n arr = dtype.construct_array_type()._from_sequence(["B", "C", "A"], dtype=dtype)\n return maybe_split_array(arr, chunked)\n\n\n@pytest.fixture\ndef data_missing_for_sorting(dtype, chunked):\n arr = dtype.construct_array_type()._from_sequence(["B", pd.NA, "A"], dtype=dtype)\n return maybe_split_array(arr, chunked)\n\n\n@pytest.fixture\ndef data_for_grouping(dtype, chunked):\n arr = dtype.construct_array_type()._from_sequence(\n ["B", "B", pd.NA, pd.NA, "A", "A", "B", "C"], dtype=dtype\n )\n return maybe_split_array(arr, chunked)\n\n\nclass TestStringArray(base.ExtensionTests):\n def test_eq_with_str(self, dtype):\n super().test_eq_with_str(dtype)\n\n if dtype.na_value is pd.NA:\n # only the NA-variant supports parametrized string alias\n assert dtype == f"string[{dtype.storage}]"\n elif dtype.storage == "pyarrow":\n with tm.assert_produces_warning(FutureWarning):\n assert dtype == "string[pyarrow_numpy]"\n\n def test_is_not_string_type(self, dtype):\n # Different from BaseDtypeTests.test_is_not_string_type\n # because StringDtype is a string type\n assert is_string_dtype(dtype)\n\n def test_is_dtype_from_name(self, dtype, using_infer_string):\n if dtype.na_value is np.nan and not using_infer_string:\n result = type(dtype).is_dtype(dtype.name)\n assert result is False\n else:\n super().test_is_dtype_from_name(dtype)\n\n def test_construct_from_string_own_name(self, dtype, using_infer_string):\n if dtype.na_value is np.nan and not using_infer_string:\n with pytest.raises(TypeError, match="Cannot construct a 'StringDtype'"):\n dtype.construct_from_string(dtype.name)\n else:\n super().test_construct_from_string_own_name(dtype)\n\n def test_view(self, data):\n if data.dtype.storage == "pyarrow":\n pytest.skip(reason="2D support not implemented for ArrowStringArray")\n super().test_view(data)\n\n def test_from_dtype(self, data):\n # base test uses string representation of dtype\n pass\n\n def test_transpose(self, data):\n if data.dtype.storage == "pyarrow":\n pytest.skip(reason="2D support not implemented for ArrowStringArray")\n super().test_transpose(data)\n\n def test_setitem_preserves_views(self, data):\n if data.dtype.storage == "pyarrow":\n pytest.skip(reason="2D support not implemented for ArrowStringArray")\n super().test_setitem_preserves_views(data)\n\n def test_dropna_array(self, data_missing):\n result = data_missing.dropna()\n expected = data_missing[[1]]\n tm.assert_extension_array_equal(result, expected)\n\n def test_fillna_no_op_returns_copy(self, data):\n data = data[~data.isna()]\n\n valid = data[0]\n result = data.fillna(valid)\n assert result is not data\n tm.assert_extension_array_equal(result, data)\n\n result = data.fillna(method="backfill")\n assert result is not data\n tm.assert_extension_array_equal(result, data)\n\n def _get_expected_exception(\n self, op_name: str, obj, other\n ) -> type[Exception] | tuple[type[Exception], ...] | None:\n if op_name in [\n "__mod__",\n "__rmod__",\n "__divmod__",\n "__rdivmod__",\n "__pow__",\n "__rpow__",\n ]:\n return TypeError\n elif op_name in ["__mul__", "__rmul__"]:\n # Can only multiply strings by integers\n return TypeError\n elif op_name in [\n "__truediv__",\n "__rtruediv__",\n "__floordiv__",\n "__rfloordiv__",\n "__sub__",\n "__rsub__",\n ]:\n return TypeError\n\n return None\n\n def _supports_reduction(self, ser: pd.Series, op_name: str) -> bool:\n return (\n op_name in ["min", "max", "sum"]\n or ser.dtype.na_value is np.nan # type: ignore[union-attr]\n and op_name in ("any", "all")\n )\n\n def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:\n assert isinstance(ser.dtype, StorageExtensionDtype)\n return op_name in ["cummin", "cummax", "cumsum"]\n\n def _cast_pointwise_result(self, op_name: str, obj, other, pointwise_result):\n dtype = cast(StringDtype, tm.get_dtype(obj))\n if op_name in ["__add__", "__radd__"]:\n cast_to = dtype\n elif dtype.na_value is np.nan:\n cast_to = np.bool_ # type: ignore[assignment]\n elif dtype.storage == "pyarrow":\n cast_to = "boolean[pyarrow]" # type: ignore[assignment]\n else:\n cast_to = "boolean" # type: ignore[assignment]\n return pointwise_result.astype(cast_to)\n\n def test_compare_scalar(self, data, comparison_op):\n ser = pd.Series(data)\n self._compare_other(ser, data, comparison_op, "abc")\n\n def test_combine_add(self, data_repeated, using_infer_string, request):\n dtype = next(data_repeated(1)).dtype\n if using_infer_string and (\n (dtype.na_value is pd.NA) and dtype.storage == "python"\n ):\n mark = pytest.mark.xfail(\n reason="The pointwise operation result will be inferred to "\n "string[nan, pyarrow], which does not match the input dtype"\n )\n request.applymarker(mark)\n super().test_combine_add(data_repeated)\n\n def test_arith_series_with_array(\n self, data, all_arithmetic_operators, using_infer_string, request\n ):\n dtype = data.dtype\n if (\n using_infer_string\n and all_arithmetic_operators == "__radd__"\n and (\n (dtype.na_value is pd.NA) or (dtype.storage == "python" and HAS_PYARROW)\n )\n ):\n mark = pytest.mark.xfail(\n reason="The pointwise operation result will be inferred to "\n "string[nan, pyarrow], which does not match the input dtype"\n )\n request.applymarker(mark)\n super().test_arith_series_with_array(data, all_arithmetic_operators)\n\n\nclass Test2DCompat(base.Dim2CompatTests):\n @pytest.fixture(autouse=True)\n def arrow_not_supported(self, data):\n if isinstance(data, ArrowStringArray):\n pytest.skip(reason="2D support not implemented for ArrowStringArray")\n\n\ndef test_searchsorted_with_na_raises(data_for_sorting, as_series):\n # GH50447\n b, c, a = data_for_sorting\n arr = data_for_sorting.take([2, 0, 1]) # to get [a, b, c]\n arr[-1] = pd.NA\n\n if as_series:\n arr = pd.Series(arr)\n\n msg = (\n "searchsorted requires array to be sorted, "\n "which is impossible with NAs present."\n )\n with pytest.raises(ValueError, match=msg):\n arr.searchsorted(b)\n | .venv\Lib\site-packages\pandas\tests\extension\test_string.py | test_string.py | Python | 9,279 | 0.95 | 0.190476 | 0.027523 | vue-tools | 940 | 2024-06-25T21:47:59.982341 | MIT | true | 03460d62d309bd9e0af070ed87cc36e4 |
"""\nTest extension array that has custom attribute information (not stored on the dtype).\n\n"""\nfrom __future__ import annotations\n\nimport numbers\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\n\nfrom pandas.core.dtypes.base import ExtensionDtype\n\nimport pandas as pd\nfrom pandas.core.arrays import ExtensionArray\n\nif TYPE_CHECKING:\n from pandas._typing import type_t\n\n\nclass FloatAttrDtype(ExtensionDtype):\n type = float\n name = "float_attr"\n na_value = np.nan\n\n @classmethod\n def construct_array_type(cls) -> type_t[FloatAttrArray]:\n """\n Return the array type associated with this dtype.\n\n Returns\n -------\n type\n """\n return FloatAttrArray\n\n\nclass FloatAttrArray(ExtensionArray):\n dtype = FloatAttrDtype()\n __array_priority__ = 1000\n\n def __init__(self, values, attr=None) -> None:\n if not isinstance(values, np.ndarray):\n raise TypeError("Need to pass a numpy array of float64 dtype as values")\n if not values.dtype == "float64":\n raise TypeError("Need to pass a numpy array of float64 dtype as values")\n self.data = values\n self.attr = attr\n\n @classmethod\n def _from_sequence(cls, scalars, *, dtype=None, copy=False):\n if not copy:\n data = np.asarray(scalars, dtype="float64")\n else:\n data = np.array(scalars, dtype="float64", copy=copy)\n return cls(data)\n\n def __getitem__(self, item):\n if isinstance(item, numbers.Integral):\n return self.data[item]\n else:\n # slice, list-like, mask\n item = pd.api.indexers.check_array_indexer(self, item)\n return type(self)(self.data[item], self.attr)\n\n def __len__(self) -> int:\n return len(self.data)\n\n def isna(self):\n return np.isnan(self.data)\n\n def take(self, indexer, allow_fill=False, fill_value=None):\n from pandas.api.extensions import take\n\n data = self.data\n if allow_fill and fill_value is None:\n fill_value = self.dtype.na_value\n\n result = take(data, indexer, fill_value=fill_value, allow_fill=allow_fill)\n return type(self)(result, self.attr)\n\n def copy(self):\n return type(self)(self.data.copy(), self.attr)\n\n @classmethod\n def _concat_same_type(cls, to_concat):\n data = np.concatenate([x.data for x in to_concat])\n attr = to_concat[0].attr if len(to_concat) else None\n return cls(data, attr)\n | .venv\Lib\site-packages\pandas\tests\extension\array_with_attr\array.py | array.py | Python | 2,496 | 0.95 | 0.213483 | 0.014925 | react-lib | 350 | 2025-01-05T08:57:52.089113 | MIT | true | 469d6a911486e0d861ead416eb6ba6d4 |
import numpy as np\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.tests.extension.array_with_attr import FloatAttrArray\n\n\ndef test_concat_with_all_na():\n # https://github.com/pandas-dev/pandas/pull/47762\n # ensure that attribute of the column array is preserved (when it gets\n # preserved in reindexing the array) during merge/concat\n arr = FloatAttrArray(np.array([np.nan, np.nan], dtype="float64"), attr="test")\n\n df1 = pd.DataFrame({"col": arr, "key": [0, 1]})\n df2 = pd.DataFrame({"key": [0, 1], "col2": [1, 2]})\n result = pd.merge(df1, df2, on="key")\n expected = pd.DataFrame({"col": arr, "key": [0, 1], "col2": [1, 2]})\n tm.assert_frame_equal(result, expected)\n assert result["col"].array.attr == "test"\n\n df1 = pd.DataFrame({"col": arr, "key": [0, 1]})\n df2 = pd.DataFrame({"key": [0, 2], "col2": [1, 2]})\n result = pd.merge(df1, df2, on="key")\n expected = pd.DataFrame({"col": arr.take([0]), "key": [0], "col2": [1]})\n tm.assert_frame_equal(result, expected)\n assert result["col"].array.attr == "test"\n\n result = pd.concat([df1.set_index("key"), df2.set_index("key")], axis=1)\n expected = pd.DataFrame(\n {"col": arr.take([0, 1, -1]), "col2": [1, np.nan, 2], "key": [0, 1, 2]}\n ).set_index("key")\n tm.assert_frame_equal(result, expected)\n assert result["col"].array.attr == "test"\n | .venv\Lib\site-packages\pandas\tests\extension\array_with_attr\test_array_with_attr.py | test_array_with_attr.py | Python | 1,373 | 0.95 | 0.030303 | 0.111111 | node-utils | 180 | 2023-08-01T02:43:30.397264 | BSD-3-Clause | true | 9915ed093c8557caae5810114d963a61 |
from pandas.tests.extension.array_with_attr.array import (\n FloatAttrArray,\n FloatAttrDtype,\n)\n\n__all__ = ["FloatAttrArray", "FloatAttrDtype"]\n | .venv\Lib\site-packages\pandas\tests\extension\array_with_attr\__init__.py | __init__.py | Python | 149 | 0.85 | 0 | 0 | node-utils | 735 | 2023-08-19T15:28:42.367662 | MIT | true | c8bfaeeab8d54651335d8e016662c032 |
\n\n | .venv\Lib\site-packages\pandas\tests\extension\array_with_attr\__pycache__\array.cpython-313.pyc | array.cpython-313.pyc | Other | 4,709 | 0.8 | 0 | 0 | python-kit | 522 | 2024-03-13T00:34:04.738079 | Apache-2.0 | true | a8d2e3a9840b8e740d2b8e2c4dc3cfc1 |
\n\n | .venv\Lib\site-packages\pandas\tests\extension\array_with_attr\__pycache__\test_array_with_attr.cpython-313.pyc | test_array_with_attr.cpython-313.pyc | Other | 2,422 | 0.8 | 0 | 0 | python-kit | 975 | 2024-10-14T11:12:56.253336 | GPL-3.0 | true | f514014e098ecfa4218000f1d92d2ec3 |
\n\n | .venv\Lib\site-packages\pandas\tests\extension\array_with_attr\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 366 | 0.7 | 0 | 0 | awesome-app | 167 | 2024-04-18T20:07:25.227507 | BSD-3-Clause | true | 9c32c93b6d4302c0b42a2671c818643f |
import pytest\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\nclass BaseAccumulateTests:\n """\n Accumulation specific tests. Generally these only\n make sense for numeric/boolean operations.\n """\n\n def _supports_accumulation(self, ser: pd.Series, op_name: str) -> bool:\n # Do we expect this accumulation to be supported for this dtype?\n # We default to assuming "no"; subclass authors should override here.\n return False\n\n def check_accumulate(self, ser: pd.Series, op_name: str, skipna: bool):\n try:\n alt = ser.astype("float64")\n except (TypeError, ValueError):\n # e.g. Period can't be cast to float64 (TypeError)\n # String can't be cast to float64 (ValueError)\n alt = ser.astype(object)\n\n result = getattr(ser, op_name)(skipna=skipna)\n expected = getattr(alt, op_name)(skipna=skipna)\n tm.assert_series_equal(result, expected, check_dtype=False)\n\n @pytest.mark.parametrize("skipna", [True, False])\n def test_accumulate_series(self, data, all_numeric_accumulations, skipna):\n op_name = all_numeric_accumulations\n ser = pd.Series(data)\n\n if self._supports_accumulation(ser, op_name):\n self.check_accumulate(ser, op_name, skipna)\n else:\n with pytest.raises((NotImplementedError, TypeError)):\n # TODO: require TypeError for things that will _never_ work?\n getattr(ser, op_name)(skipna=skipna)\n | .venv\Lib\site-packages\pandas\tests\extension\base\accumulate.py | accumulate.py | Python | 1,501 | 0.95 | 0.225 | 0.15625 | react-lib | 693 | 2024-11-26T05:37:44.467851 | Apache-2.0 | true | 078bfbb048aa9dc1e9fbf7affd436ec7 |
class BaseExtensionTests:\n pass\n | .venv\Lib\site-packages\pandas\tests\extension\base\base.py | base.py | Python | 35 | 0.65 | 0.5 | 0 | vue-tools | 973 | 2024-02-11T03:44:47.136914 | GPL-3.0 | true | c9cae0567208b748af99a30b192adfea |
import numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nimport pandas._testing as tm\nfrom pandas.core.internals.blocks import NumpyBlock\n\n\nclass BaseCastingTests:\n """Casting to and from ExtensionDtypes"""\n\n def test_astype_object_series(self, all_data):\n ser = pd.Series(all_data, name="A")\n result = ser.astype(object)\n assert result.dtype == np.dtype(object)\n if hasattr(result._mgr, "blocks"):\n blk = result._mgr.blocks[0]\n assert isinstance(blk, NumpyBlock)\n assert blk.is_object\n assert isinstance(result._mgr.array, np.ndarray)\n assert result._mgr.array.dtype == np.dtype(object)\n\n def test_astype_object_frame(self, all_data):\n df = pd.DataFrame({"A": all_data})\n\n result = df.astype(object)\n if hasattr(result._mgr, "blocks"):\n blk = result._mgr.blocks[0]\n assert isinstance(blk, NumpyBlock), type(blk)\n assert blk.is_object\n assert isinstance(result._mgr.arrays[0], np.ndarray)\n assert result._mgr.arrays[0].dtype == np.dtype(object)\n\n # check that we can compare the dtypes\n comp = result.dtypes == df.dtypes\n assert not comp.any()\n\n def test_tolist(self, data):\n result = pd.Series(data).tolist()\n expected = list(data)\n assert result == expected\n\n def test_astype_str(self, data):\n result = pd.Series(data[:2]).astype(str)\n expected = pd.Series([str(x) for x in data[:2]], dtype=str)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "nullable_string_dtype",\n [\n "string[python]",\n pytest.param("string[pyarrow]", marks=td.skip_if_no("pyarrow")),\n ],\n )\n def test_astype_string(self, data, nullable_string_dtype):\n # GH-33465, GH#45326 as of 2.0 we decode bytes instead of calling str(obj)\n result = pd.Series(data[:5]).astype(nullable_string_dtype)\n expected = pd.Series(\n [str(x) if not isinstance(x, bytes) else x.decode() for x in data[:5]],\n dtype=nullable_string_dtype,\n )\n tm.assert_series_equal(result, expected)\n\n def test_to_numpy(self, data):\n expected = np.asarray(data)\n\n result = data.to_numpy()\n tm.assert_equal(result, expected)\n\n result = pd.Series(data).to_numpy()\n tm.assert_equal(result, expected)\n\n def test_astype_empty_dataframe(self, dtype):\n # https://github.com/pandas-dev/pandas/issues/33113\n df = pd.DataFrame()\n result = df.astype(dtype)\n tm.assert_frame_equal(result, df)\n\n @pytest.mark.parametrize("copy", [True, False])\n def test_astype_own_type(self, data, copy):\n # ensure that astype returns the original object for equal dtype and copy=False\n # https://github.com/pandas-dev/pandas/issues/28488\n result = data.astype(data.dtype, copy=copy)\n assert (result is data) is (not copy)\n tm.assert_extension_array_equal(result, data)\n | .venv\Lib\site-packages\pandas\tests\extension\base\casting.py | casting.py | Python | 3,077 | 0.95 | 0.172414 | 0.070423 | awesome-app | 798 | 2024-08-22T18:56:20.413111 | GPL-3.0 | true | 2dce0a86f17376dc4b0261cbb16722c8 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.