content stringlengths 1 103k ⌀ | path stringlengths 8 216 | filename stringlengths 2 179 | language stringclasses 15
values | size_bytes int64 2 189k | quality_score float64 0.5 0.95 | complexity float64 0 1 | documentation_ratio float64 0 1 | repository stringclasses 5
values | stars int64 0 1k | created_date stringdate 2023-07-10 19:21:08 2025-07-09 19:11:45 | license stringclasses 4
values | is_test bool 2
classes | file_hash stringlengths 32 32 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
from textwrap import dedent\n\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Series,\n)\n\npytest.importorskip("jinja2")\nfrom pandas.io.formats.style import Styler\n\n\n@pytest.fixture\ndef df():\n return DataFrame(\n {"A": [0, 1], "B": [-0.61, -1.22], "C": Series(["ab", "cd"], dtype=object)}\n )\n\n\n@pytest.fixture\ndef styler(df):\n return Styler(df, uuid_len=0, precision=2)\n\n\ndef test_basic_string(styler):\n result = styler.to_string()\n expected = dedent(\n """\\n A B C\n 0 0 -0.61 ab\n 1 1 -1.22 cd\n """\n )\n assert result == expected\n\n\ndef test_string_delimiter(styler):\n result = styler.to_string(delimiter=";")\n expected = dedent(\n """\\n ;A;B;C\n 0;0;-0.61;ab\n 1;1;-1.22;cd\n """\n )\n assert result == expected\n\n\ndef test_concat(styler):\n result = styler.concat(styler.data.agg(["sum"]).style).to_string()\n expected = dedent(\n """\\n A B C\n 0 0 -0.61 ab\n 1 1 -1.22 cd\n sum 1 -1.830000 abcd\n """\n )\n assert result == expected\n\n\ndef test_concat_recursion(styler):\n df = styler.data\n styler1 = styler\n styler2 = Styler(df.agg(["sum"]), uuid_len=0, precision=3)\n styler3 = Styler(df.agg(["sum"]), uuid_len=0, precision=4)\n result = styler1.concat(styler2.concat(styler3)).to_string()\n expected = dedent(\n """\\n A B C\n 0 0 -0.61 ab\n 1 1 -1.22 cd\n sum 1 -1.830 abcd\n sum 1 -1.8300 abcd\n """\n )\n assert result == expected\n\n\ndef test_concat_chain(styler):\n df = styler.data\n styler1 = styler\n styler2 = Styler(df.agg(["sum"]), uuid_len=0, precision=3)\n styler3 = Styler(df.agg(["sum"]), uuid_len=0, precision=4)\n result = styler1.concat(styler2).concat(styler3).to_string()\n expected = dedent(\n """\\n A B C\n 0 0 -0.61 ab\n 1 1 -1.22 cd\n sum 1 -1.830 abcd\n sum 1 -1.8300 abcd\n """\n )\n assert result == expected\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\test_to_string.py | test_to_string.py | Python | 1,910 | 0.85 | 0.072917 | 0 | node-utils | 483 | 2024-05-26T06:11:07.362800 | BSD-3-Clause | true | dd001d1c8ee76ed84bf231244f53d914 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_bar.cpython-313.pyc | test_bar.cpython-313.pyc | Other | 16,952 | 0.95 | 0 | 0.007843 | python-kit | 390 | 2025-05-02T10:55:45.898425 | MIT | true | 397a26f8455170a4ee355c072c145c4f |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_exceptions.cpython-313.pyc | test_exceptions.cpython-313.pyc | Other | 2,450 | 0.95 | 0 | 0 | python-kit | 400 | 2024-01-28T18:48:26.902641 | BSD-3-Clause | true | df5e19a232d0a53912a23e534e3cdb50 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_format.cpython-313.pyc | test_format.cpython-313.pyc | Other | 32,718 | 0.95 | 0.007229 | 0.002481 | react-lib | 203 | 2025-06-12T07:38:47.213381 | BSD-3-Clause | true | c3d41a6cc2ad6bc32284510e73974a56 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_highlight.cpython-313.pyc | test_highlight.cpython-313.pyc | Other | 9,437 | 0.95 | 0 | 0 | python-kit | 271 | 2025-01-21T12:00:02.105755 | GPL-3.0 | true | ac52f553ff8efd24a20d1bac1787e5a3 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_html.cpython-313.pyc | test_html.cpython-313.pyc | Other | 42,013 | 0.95 | 0.195522 | 0.027439 | python-kit | 710 | 2024-11-19T10:25:53.724812 | BSD-3-Clause | true | 7301b2b1f02cca79a1c86df5cf077ab8 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_matplotlib.cpython-313.pyc | test_matplotlib.cpython-313.pyc | Other | 16,056 | 0.95 | 0.009852 | 0.020202 | vue-tools | 466 | 2024-02-29T00:22:09.830791 | MIT | true | 672a008640e6608196c09cda59409bc2 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_non_unique.cpython-313.pyc | test_non_unique.cpython-313.pyc | Other | 6,768 | 0.95 | 0.02381 | 0 | awesome-app | 971 | 2023-09-16T08:24:44.456496 | Apache-2.0 | true | 45be7e776d1fbf7dd117ea8072325487 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_style.cpython-313.pyc | test_style.cpython-313.pyc | Other | 86,248 | 0.75 | 0.027167 | 0.016043 | vue-tools | 251 | 2024-11-07T23:52:41.666311 | MIT | true | dfd550e45d5f6edcbb9b41a7d738af3f |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_tooltip.cpython-313.pyc | test_tooltip.cpython-313.pyc | Other | 3,600 | 0.95 | 0.114754 | 0 | vue-tools | 838 | 2025-03-29T16:33:05.031210 | MIT | true | 76efb4831cae201e7576599d4a2f9df2 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_to_latex.cpython-313.pyc | test_to_latex.cpython-313.pyc | Other | 42,061 | 0.95 | 0 | 0.00722 | node-utils | 880 | 2025-05-20T19:55:25.645506 | GPL-3.0 | true | c4d0c4550a818910cf91d59688c13b66 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\test_to_string.cpython-313.pyc | test_to_string.cpython-313.pyc | Other | 3,280 | 0.85 | 0 | 0 | node-utils | 67 | 2024-10-05T10:26:44.514859 | GPL-3.0 | true | a3ca78b2232d354900ebba2008376e7e |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\style\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 204 | 0.7 | 0 | 0 | node-utils | 309 | 2024-09-14T16:35:12.553234 | BSD-3-Clause | true | b483d8d2247f99e66e03c743c240ccdf |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_console.cpython-313.pyc | test_console.cpython-313.pyc | Other | 4,199 | 0.8 | 0 | 0 | awesome-app | 893 | 2024-12-01T19:15:01.499862 | Apache-2.0 | true | d0ad5c83dbe1f30fe3ce1e710675e963 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_css.cpython-313.pyc | test_css.cpython-313.pyc | Other | 8,327 | 0.8 | 0 | 0.034483 | vue-tools | 259 | 2025-06-10T12:00:28.045585 | Apache-2.0 | true | f445f7a1a6d2caba428aeb7a582d1800 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_eng_formatting.cpython-313.pyc | test_eng_formatting.cpython-313.pyc | Other | 11,012 | 0.8 | 0.004717 | 0.029412 | awesome-app | 462 | 2024-12-06T12:37:40.060152 | MIT | true | 35f1a7fdf67d3edf7e7b9cd1c9abbf69 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_format.cpython-313.pyc | test_format.cpython-313.pyc | Other | 104,106 | 0.75 | 0.004304 | 0.005117 | node-utils | 283 | 2025-06-15T00:07:16.614773 | BSD-3-Clause | true | f7d3097acac41e5ebab26e96a597242a |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_ipython_compat.cpython-313.pyc | test_ipython_compat.cpython-313.pyc | Other | 5,195 | 0.8 | 0 | 0 | node-utils | 815 | 2025-05-04T17:24:42.856138 | GPL-3.0 | true | 978767b0a9e37e137b5fccdc3a6b3a28 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_printing.cpython-313.pyc | test_printing.cpython-313.pyc | Other | 7,891 | 0.8 | 0 | 0 | python-kit | 242 | 2024-03-03T13:59:33.736919 | BSD-3-Clause | true | 5024e021ed78cef1eb9885c53b8f6adc |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_to_csv.cpython-313.pyc | test_to_csv.cpython-313.pyc | Other | 39,740 | 0.8 | 0.003521 | 0.005474 | python-kit | 741 | 2024-06-01T18:36:51.141997 | Apache-2.0 | true | 4a010d2eb0c9cac640c57285d3f1b33e |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_to_excel.cpython-313.pyc | test_to_excel.cpython-313.pyc | Other | 15,336 | 0.95 | 0 | 0.054622 | python-kit | 70 | 2024-03-12T12:27:20.002644 | Apache-2.0 | true | ad48e2be0e1346fdd1c48d0d9c8adb20 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_to_html.cpython-313.pyc | test_to_html.cpython-313.pyc | Other | 57,340 | 0.75 | 0.012308 | 0.017828 | node-utils | 645 | 2023-12-27T16:00:19.207029 | Apache-2.0 | true | 3c9fb6039996db752e95dd199bcae19b |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_to_latex.cpython-313.pyc | test_to_latex.cpython-313.pyc | Other | 55,189 | 0.75 | 0.008621 | 0.00175 | react-lib | 417 | 2023-11-06T17:47:21.853664 | Apache-2.0 | true | 72c0c73f79cb3721c8bf5785b59839a8 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_to_markdown.cpython-313.pyc | test_to_markdown.cpython-313.pyc | Other | 4,644 | 0.95 | 0.016667 | 0.016949 | python-kit | 365 | 2025-02-17T22:31:58.396064 | GPL-3.0 | true | 3b25b89b2604b867d709fe2784677ce9 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\test_to_string.cpython-313.pyc | test_to_string.cpython-313.pyc | Other | 54,369 | 0.95 | 0.001314 | 0.005427 | python-kit | 99 | 2025-01-31T04:46:35.829259 | GPL-3.0 | true | 89a692d5bd63e74c230ee8cb6ba8e008 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\formats\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 198 | 0.7 | 0 | 0 | node-utils | 111 | 2025-03-18T02:12:10.732814 | BSD-3-Clause | true | 8ad9332bb3a5153650ec3663cc0ef454 |
import pytest\n\n\n@pytest.fixture(params=["split", "records", "index", "columns", "values"])\ndef orient(request):\n """\n Fixture for orients excluding the table format.\n """\n return request.param\n | .venv\Lib\site-packages\pandas\tests\io\json\conftest.py | conftest.py | Python | 205 | 0.85 | 0.222222 | 0 | react-lib | 746 | 2024-03-06T21:23:08.924658 | MIT | true | 9a4a920d08f054f263cb7baa03c96605 |
from io import (\n BytesIO,\n StringIO,\n)\n\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nimport pandas._testing as tm\n\n\ndef test_compression_roundtrip(compression):\n df = pd.DataFrame(\n [[0.123456, 0.234567, 0.567567], [12.32112, 123123.2, 321321.2]],\n index=["A", "B"],\n columns=["X", "Y", "Z"],\n )\n\n with tm.ensure_clean() as path:\n df.to_json(path, compression=compression)\n tm.assert_frame_equal(df, pd.read_json(path, compression=compression))\n\n # explicitly ensure file was compressed.\n with tm.decompress_file(path, compression) as fh:\n result = fh.read().decode("utf8")\n data = StringIO(result)\n tm.assert_frame_equal(df, pd.read_json(data))\n\n\ndef test_read_zipped_json(datapath):\n uncompressed_path = datapath("io", "json", "data", "tsframe_v012.json")\n uncompressed_df = pd.read_json(uncompressed_path)\n\n compressed_path = datapath("io", "json", "data", "tsframe_v012.json.zip")\n compressed_df = pd.read_json(compressed_path, compression="zip")\n\n tm.assert_frame_equal(uncompressed_df, compressed_df)\n\n\n@td.skip_if_not_us_locale\n@pytest.mark.single_cpu\ndef test_with_s3_url(compression, s3_public_bucket, s3so):\n # Bucket created in tests/io/conftest.py\n df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))\n\n with tm.ensure_clean() as path:\n df.to_json(path, compression=compression)\n with open(path, "rb") as f:\n s3_public_bucket.put_object(Key="test-1", Body=f)\n\n roundtripped_df = pd.read_json(\n f"s3://{s3_public_bucket.name}/test-1",\n compression=compression,\n storage_options=s3so,\n )\n tm.assert_frame_equal(df, roundtripped_df)\n\n\ndef test_lines_with_compression(compression):\n with tm.ensure_clean() as path:\n df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))\n df.to_json(path, orient="records", lines=True, compression=compression)\n roundtripped_df = pd.read_json(path, lines=True, compression=compression)\n tm.assert_frame_equal(df, roundtripped_df)\n\n\ndef test_chunksize_with_compression(compression):\n with tm.ensure_clean() as path:\n df = pd.read_json(StringIO('{"a": ["foo", "bar", "baz"], "b": [4, 5, 6]}'))\n df.to_json(path, orient="records", lines=True, compression=compression)\n\n with pd.read_json(\n path, lines=True, chunksize=1, compression=compression\n ) as res:\n roundtripped_df = pd.concat(res)\n tm.assert_frame_equal(df, roundtripped_df)\n\n\ndef test_write_unsupported_compression_type():\n df = pd.read_json(StringIO('{"a": [1, 2, 3], "b": [4, 5, 6]}'))\n with tm.ensure_clean() as path:\n msg = "Unrecognized compression type: unsupported"\n with pytest.raises(ValueError, match=msg):\n df.to_json(path, compression="unsupported")\n\n\ndef test_read_unsupported_compression_type():\n with tm.ensure_clean() as path:\n msg = "Unrecognized compression type: unsupported"\n with pytest.raises(ValueError, match=msg):\n pd.read_json(path, compression="unsupported")\n\n\n@pytest.mark.parametrize(\n "infer_string", [False, pytest.param(True, marks=td.skip_if_no("pyarrow"))]\n)\n@pytest.mark.parametrize("to_infer", [True, False])\n@pytest.mark.parametrize("read_infer", [True, False])\ndef test_to_json_compression(\n compression_only, read_infer, to_infer, compression_to_extension, infer_string\n):\n with pd.option_context("future.infer_string", infer_string):\n # see gh-15008\n compression = compression_only\n\n # We'll complete file extension subsequently.\n filename = "test."\n filename += compression_to_extension[compression]\n\n df = pd.DataFrame({"A": [1]})\n\n to_compression = "infer" if to_infer else compression\n read_compression = "infer" if read_infer else compression\n\n with tm.ensure_clean(filename) as path:\n df.to_json(path, compression=to_compression)\n result = pd.read_json(path, compression=read_compression)\n tm.assert_frame_equal(result, df)\n\n\ndef test_to_json_compression_mode(compression):\n # GH 39985 (read_json does not support user-provided binary files)\n expected = pd.DataFrame({"A": [1]})\n\n with BytesIO() as buffer:\n expected.to_json(buffer, compression=compression)\n # df = pd.read_json(buffer, compression=compression)\n # tm.assert_frame_equal(expected, df)\n | .venv\Lib\site-packages\pandas\tests\io\json\test_compression.py | test_compression.py | Python | 4,506 | 0.95 | 0.084615 | 0.072165 | python-kit | 706 | 2024-07-13T09:02:02.346586 | Apache-2.0 | true | 89766527da0e56bfb1653ccc98916750 |
"""\nTests for the deprecated keyword arguments for `read_json`.\n"""\nfrom io import StringIO\n\nimport pandas as pd\nimport pandas._testing as tm\n\nfrom pandas.io.json import read_json\n\n\ndef test_good_kwargs():\n df = pd.DataFrame({"A": [2, 4, 6], "B": [3, 6, 9]}, index=[0, 1, 2])\n\n with tm.assert_produces_warning(None):\n data1 = StringIO(df.to_json(orient="split"))\n tm.assert_frame_equal(df, read_json(data1, orient="split"))\n data2 = StringIO(df.to_json(orient="columns"))\n tm.assert_frame_equal(df, read_json(data2, orient="columns"))\n data3 = StringIO(df.to_json(orient="index"))\n tm.assert_frame_equal(df, read_json(data3, orient="index"))\n | .venv\Lib\site-packages\pandas\tests\io\json\test_deprecated_kwargs.py | test_deprecated_kwargs.py | Python | 690 | 0.85 | 0.142857 | 0 | python-kit | 603 | 2025-03-23T20:53:42.529539 | GPL-3.0 | true | 85c13286b7fcd0b5299af923897d8181 |
"""Tests for Table Schema integration."""\nfrom collections import OrderedDict\nfrom io import StringIO\nimport json\n\nimport numpy as np\nimport pytest\n\nfrom pandas.core.dtypes.dtypes import (\n CategoricalDtype,\n DatetimeTZDtype,\n PeriodDtype,\n)\n\nimport pandas as pd\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\nfrom pandas.io.json._table_schema import (\n as_json_table_type,\n build_table_schema,\n convert_json_field_to_pandas_type,\n convert_pandas_type_to_json_field,\n set_default_names,\n)\n\n\n@pytest.fixture\ndef df_schema():\n return DataFrame(\n {\n "A": [1, 2, 3, 4],\n "B": ["a", "b", "c", "c"],\n "C": pd.date_range("2016-01-01", freq="d", periods=4),\n "D": pd.timedelta_range("1h", periods=4, freq="min"),\n },\n index=pd.Index(range(4), name="idx"),\n )\n\n\n@pytest.fixture\ndef df_table():\n return DataFrame(\n {\n "A": [1, 2, 3, 4],\n "B": ["a", "b", "c", "c"],\n "C": pd.date_range("2016-01-01", freq="d", periods=4),\n "D": pd.timedelta_range("1h", periods=4, freq="min"),\n "E": pd.Series(pd.Categorical(["a", "b", "c", "c"])),\n "F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)),\n "G": [1.0, 2.0, 3, 4.0],\n "H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"),\n },\n index=pd.Index(range(4), name="idx"),\n )\n\n\nclass TestBuildSchema:\n def test_build_table_schema(self, df_schema, using_infer_string):\n result = build_table_schema(df_schema, version=False)\n expected = {\n "fields": [\n {"name": "idx", "type": "integer"},\n {"name": "A", "type": "integer"},\n {"name": "B", "type": "string"},\n {"name": "C", "type": "datetime"},\n {"name": "D", "type": "duration"},\n ],\n "primaryKey": ["idx"],\n }\n if using_infer_string:\n expected["fields"][2] = {"name": "B", "type": "any", "extDtype": "str"}\n assert result == expected\n result = build_table_schema(df_schema)\n assert "pandas_version" in result\n\n def test_series(self):\n s = pd.Series([1, 2, 3], name="foo")\n result = build_table_schema(s, version=False)\n expected = {\n "fields": [\n {"name": "index", "type": "integer"},\n {"name": "foo", "type": "integer"},\n ],\n "primaryKey": ["index"],\n }\n assert result == expected\n result = build_table_schema(s)\n assert "pandas_version" in result\n\n def test_series_unnamed(self):\n result = build_table_schema(pd.Series([1, 2, 3]), version=False)\n expected = {\n "fields": [\n {"name": "index", "type": "integer"},\n {"name": "values", "type": "integer"},\n ],\n "primaryKey": ["index"],\n }\n assert result == expected\n\n def test_multiindex(self, df_schema, using_infer_string):\n df = df_schema\n idx = pd.MultiIndex.from_product([("a", "b"), (1, 2)])\n df.index = idx\n\n result = build_table_schema(df, version=False)\n expected = {\n "fields": [\n {"name": "level_0", "type": "string"},\n {"name": "level_1", "type": "integer"},\n {"name": "A", "type": "integer"},\n {"name": "B", "type": "string"},\n {"name": "C", "type": "datetime"},\n {"name": "D", "type": "duration"},\n ],\n "primaryKey": ["level_0", "level_1"],\n }\n if using_infer_string:\n expected["fields"][0] = {\n "name": "level_0",\n "type": "any",\n "extDtype": "str",\n }\n expected["fields"][3] = {"name": "B", "type": "any", "extDtype": "str"}\n assert result == expected\n\n df.index.names = ["idx0", None]\n expected["fields"][0]["name"] = "idx0"\n expected["primaryKey"] = ["idx0", "level_1"]\n result = build_table_schema(df, version=False)\n assert result == expected\n\n\nclass TestTableSchemaType:\n @pytest.mark.parametrize("int_type", [int, np.int16, np.int32, np.int64])\n def test_as_json_table_type_int_data(self, int_type):\n int_data = [1, 2, 3]\n assert as_json_table_type(np.array(int_data, dtype=int_type).dtype) == "integer"\n\n @pytest.mark.parametrize("float_type", [float, np.float16, np.float32, np.float64])\n def test_as_json_table_type_float_data(self, float_type):\n float_data = [1.0, 2.0, 3.0]\n assert (\n as_json_table_type(np.array(float_data, dtype=float_type).dtype) == "number"\n )\n\n @pytest.mark.parametrize("bool_type", [bool, np.bool_])\n def test_as_json_table_type_bool_data(self, bool_type):\n bool_data = [True, False]\n assert (\n as_json_table_type(np.array(bool_data, dtype=bool_type).dtype) == "boolean"\n )\n\n @pytest.mark.parametrize(\n "date_data",\n [\n pd.to_datetime(["2016"]),\n pd.to_datetime(["2016"], utc=True),\n pd.Series(pd.to_datetime(["2016"])),\n pd.Series(pd.to_datetime(["2016"], utc=True)),\n pd.period_range("2016", freq="Y", periods=3),\n ],\n )\n def test_as_json_table_type_date_data(self, date_data):\n assert as_json_table_type(date_data.dtype) == "datetime"\n\n @pytest.mark.parametrize(\n "str_data",\n [pd.Series(["a", "b"], dtype=object), pd.Index(["a", "b"], dtype=object)],\n )\n def test_as_json_table_type_string_data(self, str_data):\n assert as_json_table_type(str_data.dtype) == "string"\n\n @pytest.mark.parametrize(\n "cat_data",\n [\n pd.Categorical(["a"]),\n pd.Categorical([1]),\n pd.Series(pd.Categorical([1])),\n pd.CategoricalIndex([1]),\n pd.Categorical([1]),\n ],\n )\n def test_as_json_table_type_categorical_data(self, cat_data):\n assert as_json_table_type(cat_data.dtype) == "any"\n\n # ------\n # dtypes\n # ------\n @pytest.mark.parametrize("int_dtype", [int, np.int16, np.int32, np.int64])\n def test_as_json_table_type_int_dtypes(self, int_dtype):\n assert as_json_table_type(int_dtype) == "integer"\n\n @pytest.mark.parametrize("float_dtype", [float, np.float16, np.float32, np.float64])\n def test_as_json_table_type_float_dtypes(self, float_dtype):\n assert as_json_table_type(float_dtype) == "number"\n\n @pytest.mark.parametrize("bool_dtype", [bool, np.bool_])\n def test_as_json_table_type_bool_dtypes(self, bool_dtype):\n assert as_json_table_type(bool_dtype) == "boolean"\n\n @pytest.mark.parametrize(\n "date_dtype",\n [\n np.dtype("<M8[ns]"),\n PeriodDtype("D"),\n DatetimeTZDtype("ns", "US/Central"),\n ],\n )\n def test_as_json_table_type_date_dtypes(self, date_dtype):\n # TODO: datedate.date? datetime.time?\n assert as_json_table_type(date_dtype) == "datetime"\n\n @pytest.mark.parametrize("td_dtype", [np.dtype("<m8[ns]")])\n def test_as_json_table_type_timedelta_dtypes(self, td_dtype):\n assert as_json_table_type(td_dtype) == "duration"\n\n @pytest.mark.parametrize("str_dtype", [object]) # TODO(GH#14904) flesh out dtypes?\n def test_as_json_table_type_string_dtypes(self, str_dtype):\n assert as_json_table_type(str_dtype) == "string"\n\n def test_as_json_table_type_categorical_dtypes(self):\n assert as_json_table_type(pd.Categorical(["a"]).dtype) == "any"\n assert as_json_table_type(CategoricalDtype()) == "any"\n\n\nclass TestTableOrient:\n def test_build_series(self):\n s = pd.Series([1, 2], name="a")\n s.index.name = "id"\n result = s.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n\n assert "pandas_version" in result["schema"]\n result["schema"].pop("pandas_version")\n\n fields = [{"name": "id", "type": "integer"}, {"name": "a", "type": "integer"}]\n\n schema = {"fields": fields, "primaryKey": ["id"]}\n\n expected = OrderedDict(\n [\n ("schema", schema),\n (\n "data",\n [\n OrderedDict([("id", 0), ("a", 1)]),\n OrderedDict([("id", 1), ("a", 2)]),\n ],\n ),\n ]\n )\n\n assert result == expected\n\n def test_read_json_from_to_json_results(self):\n # GH32383\n df = DataFrame(\n {\n "_id": {"row_0": 0},\n "category": {"row_0": "Goods"},\n "recommender_id": {"row_0": 3},\n "recommender_name_jp": {"row_0": "浦田"},\n "recommender_name_en": {"row_0": "Urata"},\n "name_jp": {"row_0": "博多人形(松尾吉将まつお よしまさ)"},\n "name_en": {"row_0": "Hakata Dolls Matsuo"},\n }\n )\n\n result1 = pd.read_json(StringIO(df.to_json()))\n result2 = DataFrame.from_dict(json.loads(df.to_json()))\n tm.assert_frame_equal(result1, df)\n tm.assert_frame_equal(result2, df)\n\n def test_to_json(self, df_table, using_infer_string):\n df = df_table\n df.index.name = "idx"\n result = df.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n\n assert "pandas_version" in result["schema"]\n result["schema"].pop("pandas_version")\n\n fields = [\n {"name": "idx", "type": "integer"},\n {"name": "A", "type": "integer"},\n {"name": "B", "type": "string"},\n {"name": "C", "type": "datetime"},\n {"name": "D", "type": "duration"},\n {\n "constraints": {"enum": ["a", "b", "c"]},\n "name": "E",\n "ordered": False,\n "type": "any",\n },\n {\n "constraints": {"enum": ["a", "b", "c"]},\n "name": "F",\n "ordered": True,\n "type": "any",\n },\n {"name": "G", "type": "number"},\n {"name": "H", "type": "datetime", "tz": "US/Central"},\n ]\n\n if using_infer_string:\n fields[2] = {"name": "B", "type": "any", "extDtype": "str"}\n\n schema = {"fields": fields, "primaryKey": ["idx"]}\n data = [\n OrderedDict(\n [\n ("idx", 0),\n ("A", 1),\n ("B", "a"),\n ("C", "2016-01-01T00:00:00.000"),\n ("D", "P0DT1H0M0S"),\n ("E", "a"),\n ("F", "a"),\n ("G", 1.0),\n ("H", "2016-01-01T06:00:00.000Z"),\n ]\n ),\n OrderedDict(\n [\n ("idx", 1),\n ("A", 2),\n ("B", "b"),\n ("C", "2016-01-02T00:00:00.000"),\n ("D", "P0DT1H1M0S"),\n ("E", "b"),\n ("F", "b"),\n ("G", 2.0),\n ("H", "2016-01-02T06:00:00.000Z"),\n ]\n ),\n OrderedDict(\n [\n ("idx", 2),\n ("A", 3),\n ("B", "c"),\n ("C", "2016-01-03T00:00:00.000"),\n ("D", "P0DT1H2M0S"),\n ("E", "c"),\n ("F", "c"),\n ("G", 3.0),\n ("H", "2016-01-03T06:00:00.000Z"),\n ]\n ),\n OrderedDict(\n [\n ("idx", 3),\n ("A", 4),\n ("B", "c"),\n ("C", "2016-01-04T00:00:00.000"),\n ("D", "P0DT1H3M0S"),\n ("E", "c"),\n ("F", "c"),\n ("G", 4.0),\n ("H", "2016-01-04T06:00:00.000Z"),\n ]\n ),\n ]\n expected = OrderedDict([("schema", schema), ("data", data)])\n\n assert result == expected\n\n def test_to_json_float_index(self):\n data = pd.Series(1, index=[1.0, 2.0])\n result = data.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n result["schema"].pop("pandas_version")\n\n expected = OrderedDict(\n [\n (\n "schema",\n {\n "fields": [\n {"name": "index", "type": "number"},\n {"name": "values", "type": "integer"},\n ],\n "primaryKey": ["index"],\n },\n ),\n (\n "data",\n [\n OrderedDict([("index", 1.0), ("values", 1)]),\n OrderedDict([("index", 2.0), ("values", 1)]),\n ],\n ),\n ]\n )\n\n assert result == expected\n\n def test_to_json_period_index(self):\n idx = pd.period_range("2016", freq="Q-JAN", periods=2)\n data = pd.Series(1, idx)\n result = data.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n result["schema"].pop("pandas_version")\n\n fields = [\n {"freq": "QE-JAN", "name": "index", "type": "datetime"},\n {"name": "values", "type": "integer"},\n ]\n\n schema = {"fields": fields, "primaryKey": ["index"]}\n data = [\n OrderedDict([("index", "2015-11-01T00:00:00.000"), ("values", 1)]),\n OrderedDict([("index", "2016-02-01T00:00:00.000"), ("values", 1)]),\n ]\n expected = OrderedDict([("schema", schema), ("data", data)])\n\n assert result == expected\n\n def test_to_json_categorical_index(self):\n data = pd.Series(1, pd.CategoricalIndex(["a", "b"]))\n result = data.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n result["schema"].pop("pandas_version")\n\n expected = OrderedDict(\n [\n (\n "schema",\n {\n "fields": [\n {\n "name": "index",\n "type": "any",\n "constraints": {"enum": ["a", "b"]},\n "ordered": False,\n },\n {"name": "values", "type": "integer"},\n ],\n "primaryKey": ["index"],\n },\n ),\n (\n "data",\n [\n OrderedDict([("index", "a"), ("values", 1)]),\n OrderedDict([("index", "b"), ("values", 1)]),\n ],\n ),\n ]\n )\n\n assert result == expected\n\n def test_date_format_raises(self, df_table):\n msg = (\n "Trying to write with `orient='table'` and `date_format='epoch'`. Table "\n "Schema requires dates to be formatted with `date_format='iso'`"\n )\n with pytest.raises(ValueError, match=msg):\n df_table.to_json(orient="table", date_format="epoch")\n\n # others work\n df_table.to_json(orient="table", date_format="iso")\n df_table.to_json(orient="table")\n\n def test_convert_pandas_type_to_json_field_int(self, index_or_series):\n kind = index_or_series\n data = [1, 2, 3]\n result = convert_pandas_type_to_json_field(kind(data, name="name"))\n expected = {"name": "name", "type": "integer"}\n assert result == expected\n\n def test_convert_pandas_type_to_json_field_float(self, index_or_series):\n kind = index_or_series\n data = [1.0, 2.0, 3.0]\n result = convert_pandas_type_to_json_field(kind(data, name="name"))\n expected = {"name": "name", "type": "number"}\n assert result == expected\n\n @pytest.mark.parametrize(\n "dt_args,extra_exp", [({}, {}), ({"utc": True}, {"tz": "UTC"})]\n )\n @pytest.mark.parametrize("wrapper", [None, pd.Series])\n def test_convert_pandas_type_to_json_field_datetime(\n self, dt_args, extra_exp, wrapper\n ):\n data = [1.0, 2.0, 3.0]\n data = pd.to_datetime(data, **dt_args)\n if wrapper is pd.Series:\n data = pd.Series(data, name="values")\n result = convert_pandas_type_to_json_field(data)\n expected = {"name": "values", "type": "datetime"}\n expected.update(extra_exp)\n assert result == expected\n\n def test_convert_pandas_type_to_json_period_range(self):\n arr = pd.period_range("2016", freq="Y-DEC", periods=4)\n result = convert_pandas_type_to_json_field(arr)\n expected = {"name": "values", "type": "datetime", "freq": "YE-DEC"}\n assert result == expected\n\n @pytest.mark.parametrize("kind", [pd.Categorical, pd.CategoricalIndex])\n @pytest.mark.parametrize("ordered", [True, False])\n def test_convert_pandas_type_to_json_field_categorical(self, kind, ordered):\n data = ["a", "b", "c"]\n if kind is pd.Categorical:\n arr = pd.Series(kind(data, ordered=ordered), name="cats")\n elif kind is pd.CategoricalIndex:\n arr = kind(data, ordered=ordered, name="cats")\n\n result = convert_pandas_type_to_json_field(arr)\n expected = {\n "name": "cats",\n "type": "any",\n "constraints": {"enum": data},\n "ordered": ordered,\n }\n assert result == expected\n\n @pytest.mark.parametrize(\n "inp,exp",\n [\n ({"type": "integer"}, "int64"),\n ({"type": "number"}, "float64"),\n ({"type": "boolean"}, "bool"),\n ({"type": "duration"}, "timedelta64"),\n ({"type": "datetime"}, "datetime64[ns]"),\n ({"type": "datetime", "tz": "US/Hawaii"}, "datetime64[ns, US/Hawaii]"),\n ({"type": "any"}, "object"),\n (\n {\n "type": "any",\n "constraints": {"enum": ["a", "b", "c"]},\n "ordered": False,\n },\n CategoricalDtype(categories=["a", "b", "c"], ordered=False),\n ),\n (\n {\n "type": "any",\n "constraints": {"enum": ["a", "b", "c"]},\n "ordered": True,\n },\n CategoricalDtype(categories=["a", "b", "c"], ordered=True),\n ),\n ({"type": "string"}, "object"),\n ],\n )\n def test_convert_json_field_to_pandas_type(self, inp, exp):\n field = {"name": "foo"}\n field.update(inp)\n assert convert_json_field_to_pandas_type(field) == exp\n\n @pytest.mark.parametrize("inp", ["geopoint", "geojson", "fake_type"])\n def test_convert_json_field_to_pandas_type_raises(self, inp):\n field = {"type": inp}\n with pytest.raises(\n ValueError, match=f"Unsupported or invalid field type: {inp}"\n ):\n convert_json_field_to_pandas_type(field)\n\n def test_categorical(self):\n s = pd.Series(pd.Categorical(["a", "b", "a"]))\n s.index.name = "idx"\n result = s.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n result["schema"].pop("pandas_version")\n\n fields = [\n {"name": "idx", "type": "integer"},\n {\n "constraints": {"enum": ["a", "b"]},\n "name": "values",\n "ordered": False,\n "type": "any",\n },\n ]\n\n expected = OrderedDict(\n [\n ("schema", {"fields": fields, "primaryKey": ["idx"]}),\n (\n "data",\n [\n OrderedDict([("idx", 0), ("values", "a")]),\n OrderedDict([("idx", 1), ("values", "b")]),\n OrderedDict([("idx", 2), ("values", "a")]),\n ],\n ),\n ]\n )\n\n assert result == expected\n\n @pytest.mark.parametrize(\n "idx,nm,prop",\n [\n (pd.Index([1]), "index", "name"),\n (pd.Index([1], name="myname"), "myname", "name"),\n (\n pd.MultiIndex.from_product([("a", "b"), ("c", "d")]),\n ["level_0", "level_1"],\n "names",\n ),\n (\n pd.MultiIndex.from_product(\n [("a", "b"), ("c", "d")], names=["n1", "n2"]\n ),\n ["n1", "n2"],\n "names",\n ),\n (\n pd.MultiIndex.from_product(\n [("a", "b"), ("c", "d")], names=["n1", None]\n ),\n ["n1", "level_1"],\n "names",\n ),\n ],\n )\n def test_set_names_unset(self, idx, nm, prop):\n data = pd.Series(1, idx)\n result = set_default_names(data)\n assert getattr(result.index, prop) == nm\n\n @pytest.mark.parametrize(\n "idx",\n [\n pd.Index([], name="index"),\n pd.MultiIndex.from_arrays([["foo"], ["bar"]], names=("level_0", "level_1")),\n pd.MultiIndex.from_arrays([["foo"], ["bar"]], names=("foo", "level_1")),\n ],\n )\n def test_warns_non_roundtrippable_names(self, idx):\n # GH 19130\n df = DataFrame(index=idx)\n df.index.name = "index"\n with tm.assert_produces_warning():\n set_default_names(df)\n\n def test_timestamp_in_columns(self):\n df = DataFrame(\n [[1, 2]], columns=[pd.Timestamp("2016"), pd.Timedelta(10, unit="s")]\n )\n result = df.to_json(orient="table")\n js = json.loads(result)\n assert js["schema"]["fields"][1]["name"] == "2016-01-01T00:00:00.000"\n assert js["schema"]["fields"][2]["name"] == "P0DT0H0M10S"\n\n @pytest.mark.parametrize(\n "case",\n [\n pd.Series([1], index=pd.Index([1], name="a"), name="a"),\n DataFrame({"A": [1]}, index=pd.Index([1], name="A")),\n DataFrame(\n {"A": [1]},\n index=pd.MultiIndex.from_arrays([["a"], [1]], names=["A", "a"]),\n ),\n ],\n )\n def test_overlapping_names(self, case):\n with pytest.raises(ValueError, match="Overlapping"):\n case.to_json(orient="table")\n\n def test_mi_falsey_name(self):\n # GH 16203\n df = DataFrame(\n np.random.default_rng(2).standard_normal((4, 4)),\n index=pd.MultiIndex.from_product([("A", "B"), ("a", "b")]),\n )\n result = [x["name"] for x in build_table_schema(df)["fields"]]\n assert result == ["level_0", "level_1", 0, 1, 2, 3]\n\n\nclass TestTableOrientReader:\n @pytest.mark.parametrize(\n "index_nm",\n [None, "idx", pytest.param("index", marks=pytest.mark.xfail), "level_0"],\n )\n @pytest.mark.parametrize(\n "vals",\n [\n {"ints": [1, 2, 3, 4]},\n {"objects": ["a", "b", "c", "d"]},\n {"objects": ["1", "2", "3", "4"]},\n {"date_ranges": pd.date_range("2016-01-01", freq="d", periods=4)},\n {"categoricals": pd.Series(pd.Categorical(["a", "b", "c", "c"]))},\n {\n "ordered_cats": pd.Series(\n pd.Categorical(["a", "b", "c", "c"], ordered=True)\n )\n },\n {"floats": [1.0, 2.0, 3.0, 4.0]},\n {"floats": [1.1, 2.2, 3.3, 4.4]},\n {"bools": [True, False, False, True]},\n {\n "timezones": pd.date_range(\n "2016-01-01", freq="d", periods=4, tz="US/Central"\n ) # added in # GH 35973\n },\n ],\n )\n def test_read_json_table_orient(self, index_nm, vals, recwarn):\n df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))\n out = df.to_json(orient="table")\n result = pd.read_json(out, orient="table")\n tm.assert_frame_equal(df, result)\n\n @pytest.mark.parametrize("index_nm", [None, "idx", "index"])\n @pytest.mark.parametrize(\n "vals",\n [{"timedeltas": pd.timedelta_range("1h", periods=4, freq="min")}],\n )\n def test_read_json_table_orient_raises(self, index_nm, vals, recwarn):\n df = DataFrame(vals, index=pd.Index(range(4), name=index_nm))\n out = df.to_json(orient="table")\n with pytest.raises(NotImplementedError, match="can not yet read "):\n pd.read_json(out, orient="table")\n\n @pytest.mark.parametrize(\n "index_nm",\n [None, "idx", pytest.param("index", marks=pytest.mark.xfail), "level_0"],\n )\n @pytest.mark.parametrize(\n "vals",\n [\n {"ints": [1, 2, 3, 4]},\n {"objects": ["a", "b", "c", "d"]},\n {"objects": ["1", "2", "3", "4"]},\n {"date_ranges": pd.date_range("2016-01-01", freq="d", periods=4)},\n {"categoricals": pd.Series(pd.Categorical(["a", "b", "c", "c"]))},\n {\n "ordered_cats": pd.Series(\n pd.Categorical(["a", "b", "c", "c"], ordered=True)\n )\n },\n {"floats": [1.0, 2.0, 3.0, 4.0]},\n {"floats": [1.1, 2.2, 3.3, 4.4]},\n {"bools": [True, False, False, True]},\n {\n "timezones": pd.date_range(\n "2016-01-01", freq="d", periods=4, tz="US/Central"\n ) # added in # GH 35973\n },\n ],\n )\n def test_read_json_table_period_orient(self, index_nm, vals, recwarn):\n df = DataFrame(\n vals,\n index=pd.Index(\n (pd.Period(f"2022Q{q}") for q in range(1, 5)), name=index_nm\n ),\n )\n out = df.to_json(orient="table")\n result = pd.read_json(out, orient="table")\n tm.assert_frame_equal(df, result)\n\n @pytest.mark.parametrize(\n "idx",\n [\n pd.Index(range(4)),\n pd.date_range(\n "2020-08-30",\n freq="d",\n periods=4,\n )._with_freq(None),\n pd.date_range(\n "2020-08-30", freq="d", periods=4, tz="US/Central"\n )._with_freq(None),\n pd.MultiIndex.from_product(\n [\n pd.date_range("2020-08-30", freq="d", periods=2, tz="US/Central"),\n ["x", "y"],\n ],\n ),\n ],\n )\n @pytest.mark.parametrize(\n "vals",\n [\n {"floats": [1.1, 2.2, 3.3, 4.4]},\n {"dates": pd.date_range("2020-08-30", freq="d", periods=4)},\n {\n "timezones": pd.date_range(\n "2020-08-30", freq="d", periods=4, tz="Europe/London"\n )\n },\n ],\n )\n def test_read_json_table_timezones_orient(self, idx, vals, recwarn):\n # GH 35973\n df = DataFrame(vals, index=idx)\n out = df.to_json(orient="table")\n result = pd.read_json(out, orient="table")\n tm.assert_frame_equal(df, result)\n\n def test_comprehensive(self):\n df = DataFrame(\n {\n "A": [1, 2, 3, 4],\n "B": ["a", "b", "c", "c"],\n "C": pd.date_range("2016-01-01", freq="d", periods=4),\n # 'D': pd.timedelta_range('1h', periods=4, freq='min'),\n "E": pd.Series(pd.Categorical(["a", "b", "c", "c"])),\n "F": pd.Series(pd.Categorical(["a", "b", "c", "c"], ordered=True)),\n "G": [1.1, 2.2, 3.3, 4.4],\n "H": pd.date_range("2016-01-01", freq="d", periods=4, tz="US/Central"),\n "I": [True, False, False, True],\n },\n index=pd.Index(range(4), name="idx"),\n )\n\n out = StringIO(df.to_json(orient="table"))\n result = pd.read_json(out, orient="table")\n tm.assert_frame_equal(df, result)\n\n @pytest.mark.parametrize(\n "index_names",\n [[None, None], ["foo", "bar"], ["foo", None], [None, "foo"], ["index", "foo"]],\n )\n def test_multiindex(self, index_names):\n # GH 18912\n df = DataFrame(\n [["Arr", "alpha", [1, 2, 3, 4]], ["Bee", "Beta", [10, 20, 30, 40]]],\n index=[["A", "B"], ["Null", "Eins"]],\n columns=["Aussprache", "Griechisch", "Args"],\n )\n df.index.names = index_names\n out = StringIO(df.to_json(orient="table"))\n result = pd.read_json(out, orient="table")\n tm.assert_frame_equal(df, result)\n\n def test_empty_frame_roundtrip(self):\n # GH 21287\n df = DataFrame(columns=["a", "b", "c"])\n expected = df.copy()\n out = StringIO(df.to_json(orient="table"))\n result = pd.read_json(out, orient="table")\n tm.assert_frame_equal(expected, result)\n\n def test_read_json_orient_table_old_schema_version(self):\n df_json = """\n {\n "schema":{\n "fields":[\n {"name":"index","type":"integer"},\n {"name":"a","type":"string"}\n ],\n "primaryKey":["index"],\n "pandas_version":"0.20.0"\n },\n "data":[\n {"index":0,"a":1},\n {"index":1,"a":2.0},\n {"index":2,"a":"s"}\n ]\n }\n """\n expected = DataFrame({"a": [1, 2.0, "s"]})\n result = pd.read_json(StringIO(df_json), orient="table")\n tm.assert_frame_equal(expected, result)\n\n @pytest.mark.parametrize("freq", ["M", "2M", "Q", "2Q", "Y", "2Y"])\n def test_read_json_table_orient_period_depr_freq(self, freq, recwarn):\n # GH#9586\n df = DataFrame(\n {"ints": [1, 2]},\n index=pd.PeriodIndex(["2020-01", "2021-06"], freq=freq),\n )\n out = df.to_json(orient="table")\n result = pd.read_json(out, orient="table")\n tm.assert_frame_equal(df, result)\n | .venv\Lib\site-packages\pandas\tests\io\json\test_json_table_schema.py | test_json_table_schema.py | Python | 30,664 | 0.95 | 0.068729 | 0.016477 | react-lib | 685 | 2025-03-12T11:22:53.127970 | GPL-3.0 | true | b6136023f32045e89230d9ad47db61f5 |
"""Tests for ExtensionDtype Table Schema integration."""\n\nfrom collections import OrderedDict\nimport datetime as dt\nimport decimal\nfrom io import StringIO\nimport json\n\nimport pytest\n\nfrom pandas import (\n NA,\n DataFrame,\n Index,\n array,\n read_json,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays.integer import Int64Dtype\nfrom pandas.core.arrays.string_ import StringDtype\nfrom pandas.core.series import Series\nfrom pandas.tests.extension.date import (\n DateArray,\n DateDtype,\n)\nfrom pandas.tests.extension.decimal.array import (\n DecimalArray,\n DecimalDtype,\n)\n\nfrom pandas.io.json._table_schema import (\n as_json_table_type,\n build_table_schema,\n)\n\n\nclass TestBuildSchema:\n def test_build_table_schema(self):\n df = DataFrame(\n {\n "A": DateArray([dt.date(2021, 10, 10)]),\n "B": DecimalArray([decimal.Decimal(10)]),\n "C": array(["pandas"], dtype="string"),\n "D": array([10], dtype="Int64"),\n }\n )\n result = build_table_schema(df, version=False)\n expected = {\n "fields": [\n {"name": "index", "type": "integer"},\n {"name": "A", "type": "any", "extDtype": "DateDtype"},\n {"name": "B", "type": "number", "extDtype": "decimal"},\n {"name": "C", "type": "any", "extDtype": "string"},\n {"name": "D", "type": "integer", "extDtype": "Int64"},\n ],\n "primaryKey": ["index"],\n }\n assert result == expected\n result = build_table_schema(df)\n assert "pandas_version" in result\n\n\nclass TestTableSchemaType:\n @pytest.mark.parametrize(\n "date_data",\n [\n DateArray([dt.date(2021, 10, 10)]),\n DateArray(dt.date(2021, 10, 10)),\n Series(DateArray(dt.date(2021, 10, 10))),\n ],\n )\n def test_as_json_table_type_ext_date_array_dtype(self, date_data):\n assert as_json_table_type(date_data.dtype) == "any"\n\n def test_as_json_table_type_ext_date_dtype(self):\n assert as_json_table_type(DateDtype()) == "any"\n\n @pytest.mark.parametrize(\n "decimal_data",\n [\n DecimalArray([decimal.Decimal(10)]),\n Series(DecimalArray([decimal.Decimal(10)])),\n ],\n )\n def test_as_json_table_type_ext_decimal_array_dtype(self, decimal_data):\n assert as_json_table_type(decimal_data.dtype) == "number"\n\n def test_as_json_table_type_ext_decimal_dtype(self):\n assert as_json_table_type(DecimalDtype()) == "number"\n\n @pytest.mark.parametrize(\n "string_data",\n [\n array(["pandas"], dtype="string"),\n Series(array(["pandas"], dtype="string")),\n ],\n )\n def test_as_json_table_type_ext_string_array_dtype(self, string_data):\n assert as_json_table_type(string_data.dtype) == "any"\n\n def test_as_json_table_type_ext_string_dtype(self):\n assert as_json_table_type(StringDtype()) == "any"\n\n @pytest.mark.parametrize(\n "integer_data",\n [\n array([10], dtype="Int64"),\n Series(array([10], dtype="Int64")),\n ],\n )\n def test_as_json_table_type_ext_integer_array_dtype(self, integer_data):\n assert as_json_table_type(integer_data.dtype) == "integer"\n\n def test_as_json_table_type_ext_integer_dtype(self):\n assert as_json_table_type(Int64Dtype()) == "integer"\n\n\nclass TestTableOrient:\n @pytest.fixture\n def da(self):\n return DateArray([dt.date(2021, 10, 10)])\n\n @pytest.fixture\n def dc(self):\n return DecimalArray([decimal.Decimal(10)])\n\n @pytest.fixture\n def sa(self):\n return array(["pandas"], dtype="string")\n\n @pytest.fixture\n def ia(self):\n return array([10], dtype="Int64")\n\n @pytest.fixture\n def df(self, da, dc, sa, ia):\n return DataFrame(\n {\n "A": da,\n "B": dc,\n "C": sa,\n "D": ia,\n }\n )\n\n def test_build_date_series(self, da):\n s = Series(da, name="a")\n s.index.name = "id"\n result = s.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n\n assert "pandas_version" in result["schema"]\n result["schema"].pop("pandas_version")\n\n fields = [\n {"name": "id", "type": "integer"},\n {"name": "a", "type": "any", "extDtype": "DateDtype"},\n ]\n\n schema = {"fields": fields, "primaryKey": ["id"]}\n\n expected = OrderedDict(\n [\n ("schema", schema),\n ("data", [OrderedDict([("id", 0), ("a", "2021-10-10T00:00:00.000")])]),\n ]\n )\n\n assert result == expected\n\n def test_build_decimal_series(self, dc):\n s = Series(dc, name="a")\n s.index.name = "id"\n result = s.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n\n assert "pandas_version" in result["schema"]\n result["schema"].pop("pandas_version")\n\n fields = [\n {"name": "id", "type": "integer"},\n {"name": "a", "type": "number", "extDtype": "decimal"},\n ]\n\n schema = {"fields": fields, "primaryKey": ["id"]}\n\n expected = OrderedDict(\n [\n ("schema", schema),\n ("data", [OrderedDict([("id", 0), ("a", 10.0)])]),\n ]\n )\n\n assert result == expected\n\n def test_build_string_series(self, sa):\n s = Series(sa, name="a")\n s.index.name = "id"\n result = s.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n\n assert "pandas_version" in result["schema"]\n result["schema"].pop("pandas_version")\n\n fields = [\n {"name": "id", "type": "integer"},\n {"name": "a", "type": "any", "extDtype": "string"},\n ]\n\n schema = {"fields": fields, "primaryKey": ["id"]}\n\n expected = OrderedDict(\n [\n ("schema", schema),\n ("data", [OrderedDict([("id", 0), ("a", "pandas")])]),\n ]\n )\n\n assert result == expected\n\n def test_build_int64_series(self, ia):\n s = Series(ia, name="a")\n s.index.name = "id"\n result = s.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n\n assert "pandas_version" in result["schema"]\n result["schema"].pop("pandas_version")\n\n fields = [\n {"name": "id", "type": "integer"},\n {"name": "a", "type": "integer", "extDtype": "Int64"},\n ]\n\n schema = {"fields": fields, "primaryKey": ["id"]}\n\n expected = OrderedDict(\n [\n ("schema", schema),\n ("data", [OrderedDict([("id", 0), ("a", 10)])]),\n ]\n )\n\n assert result == expected\n\n def test_to_json(self, df):\n df = df.copy()\n df.index.name = "idx"\n result = df.to_json(orient="table", date_format="iso")\n result = json.loads(result, object_pairs_hook=OrderedDict)\n\n assert "pandas_version" in result["schema"]\n result["schema"].pop("pandas_version")\n\n fields = [\n OrderedDict({"name": "idx", "type": "integer"}),\n OrderedDict({"name": "A", "type": "any", "extDtype": "DateDtype"}),\n OrderedDict({"name": "B", "type": "number", "extDtype": "decimal"}),\n OrderedDict({"name": "C", "type": "any", "extDtype": "string"}),\n OrderedDict({"name": "D", "type": "integer", "extDtype": "Int64"}),\n ]\n\n schema = OrderedDict({"fields": fields, "primaryKey": ["idx"]})\n data = [\n OrderedDict(\n [\n ("idx", 0),\n ("A", "2021-10-10T00:00:00.000"),\n ("B", 10.0),\n ("C", "pandas"),\n ("D", 10),\n ]\n )\n ]\n expected = OrderedDict([("schema", schema), ("data", data)])\n\n assert result == expected\n\n def test_json_ext_dtype_reading_roundtrip(self):\n # GH#40255\n df = DataFrame(\n {\n "a": Series([2, NA], dtype="Int64"),\n "b": Series([1.5, NA], dtype="Float64"),\n "c": Series([True, NA], dtype="boolean"),\n },\n index=Index([1, NA], dtype="Int64"),\n )\n expected = df.copy()\n data_json = df.to_json(orient="table", indent=4)\n result = read_json(StringIO(data_json), orient="table")\n tm.assert_frame_equal(result, expected)\n\n def test_json_ext_dtype_reading(self):\n # GH#40255\n data_json = """{\n "schema":{\n "fields":[\n {\n "name":"a",\n "type":"integer",\n "extDtype":"Int64"\n }\n ],\n },\n "data":[\n {\n "a":2\n },\n {\n "a":null\n }\n ]\n }"""\n result = read_json(StringIO(data_json), orient="table")\n expected = DataFrame({"a": Series([2, NA], dtype="Int64")})\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\json\test_json_table_schema_ext_dtype.py | test_json_table_schema_ext_dtype.py | Python | 9,500 | 0.95 | 0.078864 | 0.007547 | awesome-app | 333 | 2024-07-25T22:11:50.019479 | GPL-3.0 | true | 2448e8be63eed046f944c63c9cf0faa6 |
import json\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n Series,\n json_normalize,\n)\nimport pandas._testing as tm\n\nfrom pandas.io.json._normalize import nested_to_record\n\n\n@pytest.fixture\ndef deep_nested():\n # deeply nested data\n return [\n {\n "country": "USA",\n "states": [\n {\n "name": "California",\n "cities": [\n {"name": "San Francisco", "pop": 12345},\n {"name": "Los Angeles", "pop": 12346},\n ],\n },\n {\n "name": "Ohio",\n "cities": [\n {"name": "Columbus", "pop": 1234},\n {"name": "Cleveland", "pop": 1236},\n ],\n },\n ],\n },\n {\n "country": "Germany",\n "states": [\n {"name": "Bayern", "cities": [{"name": "Munich", "pop": 12347}]},\n {\n "name": "Nordrhein-Westfalen",\n "cities": [\n {"name": "Duesseldorf", "pop": 1238},\n {"name": "Koeln", "pop": 1239},\n ],\n },\n ],\n },\n ]\n\n\n@pytest.fixture\ndef state_data():\n return [\n {\n "counties": [\n {"name": "Dade", "population": 12345},\n {"name": "Broward", "population": 40000},\n {"name": "Palm Beach", "population": 60000},\n ],\n "info": {"governor": "Rick Scott"},\n "shortname": "FL",\n "state": "Florida",\n },\n {\n "counties": [\n {"name": "Summit", "population": 1234},\n {"name": "Cuyahoga", "population": 1337},\n ],\n "info": {"governor": "John Kasich"},\n "shortname": "OH",\n "state": "Ohio",\n },\n ]\n\n\n@pytest.fixture\ndef author_missing_data():\n return [\n {"info": None},\n {\n "info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},\n "author_name": {"first": "Jane", "last_name": "Doe"},\n },\n ]\n\n\n@pytest.fixture\ndef missing_metadata():\n return [\n {\n "name": "Alice",\n "addresses": [\n {\n "number": 9562,\n "street": "Morris St.",\n "city": "Massillon",\n "state": "OH",\n "zip": 44646,\n }\n ],\n "previous_residences": {"cities": [{"city_name": "Foo York City"}]},\n },\n {\n "addresses": [\n {\n "number": 8449,\n "street": "Spring St.",\n "city": "Elizabethton",\n "state": "TN",\n "zip": 37643,\n }\n ],\n "previous_residences": {"cities": [{"city_name": "Barmingham"}]},\n },\n ]\n\n\n@pytest.fixture\ndef max_level_test_input_data():\n """\n input data to test json_normalize with max_level param\n """\n return [\n {\n "CreatedBy": {"Name": "User001"},\n "Lookup": {\n "TextField": "Some text",\n "UserField": {"Id": "ID001", "Name": "Name001"},\n },\n "Image": {"a": "b"},\n }\n ]\n\n\nclass TestJSONNormalize:\n def test_simple_records(self):\n recs = [\n {"a": 1, "b": 2, "c": 3},\n {"a": 4, "b": 5, "c": 6},\n {"a": 7, "b": 8, "c": 9},\n {"a": 10, "b": 11, "c": 12},\n ]\n\n result = json_normalize(recs)\n expected = DataFrame(recs)\n\n tm.assert_frame_equal(result, expected)\n\n def test_simple_normalize(self, state_data):\n result = json_normalize(state_data[0], "counties")\n expected = DataFrame(state_data[0]["counties"])\n tm.assert_frame_equal(result, expected)\n\n result = json_normalize(state_data, "counties")\n\n expected = []\n for rec in state_data:\n expected.extend(rec["counties"])\n expected = DataFrame(expected)\n\n tm.assert_frame_equal(result, expected)\n\n result = json_normalize(state_data, "counties", meta="state")\n expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])\n\n tm.assert_frame_equal(result, expected)\n\n def test_fields_list_type_normalize(self):\n parse_metadata_fields_list_type = [\n {"values": [1, 2, 3], "metadata": {"listdata": [1, 2]}}\n ]\n result = json_normalize(\n parse_metadata_fields_list_type,\n record_path=["values"],\n meta=[["metadata", "listdata"]],\n )\n expected = DataFrame(\n {0: [1, 2, 3], "metadata.listdata": [[1, 2], [1, 2], [1, 2]]}\n )\n tm.assert_frame_equal(result, expected)\n\n def test_empty_array(self):\n result = json_normalize([])\n expected = DataFrame()\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "data, record_path, exception_type",\n [\n ([{"a": 0}, {"a": 1}], None, None),\n ({"a": [{"a": 0}, {"a": 1}]}, "a", None),\n ('{"a": [{"a": 0}, {"a": 1}]}', None, NotImplementedError),\n (None, None, NotImplementedError),\n ],\n )\n def test_accepted_input(self, data, record_path, exception_type):\n if exception_type is not None:\n with pytest.raises(exception_type, match=""):\n json_normalize(data, record_path=record_path)\n else:\n result = json_normalize(data, record_path=record_path)\n expected = DataFrame([0, 1], columns=["a"])\n tm.assert_frame_equal(result, expected)\n\n def test_simple_normalize_with_separator(self, deep_nested):\n # GH 14883\n result = json_normalize({"A": {"A": 1, "B": 2}})\n expected = DataFrame([[1, 2]], columns=["A.A", "A.B"])\n tm.assert_frame_equal(result.reindex_like(expected), expected)\n\n result = json_normalize({"A": {"A": 1, "B": 2}}, sep="_")\n expected = DataFrame([[1, 2]], columns=["A_A", "A_B"])\n tm.assert_frame_equal(result.reindex_like(expected), expected)\n\n result = json_normalize({"A": {"A": 1, "B": 2}}, sep="\u03c3")\n expected = DataFrame([[1, 2]], columns=["A\u03c3A", "A\u03c3B"])\n tm.assert_frame_equal(result.reindex_like(expected), expected)\n\n result = json_normalize(\n deep_nested,\n ["states", "cities"],\n meta=["country", ["states", "name"]],\n sep="_",\n )\n expected = Index(["name", "pop", "country", "states_name"]).sort_values()\n assert result.columns.sort_values().equals(expected)\n\n def test_normalize_with_multichar_separator(self):\n # GH #43831\n data = {"a": [1, 2], "b": {"b_1": 2, "b_2": (3, 4)}}\n result = json_normalize(data, sep="__")\n expected = DataFrame([[[1, 2], 2, (3, 4)]], columns=["a", "b__b_1", "b__b_2"])\n tm.assert_frame_equal(result, expected)\n\n def test_value_array_record_prefix(self):\n # GH 21536\n result = json_normalize({"A": [1, 2]}, "A", record_prefix="Prefix.")\n expected = DataFrame([[1], [2]], columns=["Prefix.0"])\n tm.assert_frame_equal(result, expected)\n\n def test_nested_object_record_path(self):\n # GH 22706\n data = {\n "state": "Florida",\n "info": {\n "governor": "Rick Scott",\n "counties": [\n {"name": "Dade", "population": 12345},\n {"name": "Broward", "population": 40000},\n {"name": "Palm Beach", "population": 60000},\n ],\n },\n }\n result = json_normalize(data, record_path=["info", "counties"])\n expected = DataFrame(\n [["Dade", 12345], ["Broward", 40000], ["Palm Beach", 60000]],\n columns=["name", "population"],\n )\n tm.assert_frame_equal(result, expected)\n\n def test_more_deeply_nested(self, deep_nested):\n result = json_normalize(\n deep_nested, ["states", "cities"], meta=["country", ["states", "name"]]\n )\n ex_data = {\n "country": ["USA"] * 4 + ["Germany"] * 3,\n "states.name": [\n "California",\n "California",\n "Ohio",\n "Ohio",\n "Bayern",\n "Nordrhein-Westfalen",\n "Nordrhein-Westfalen",\n ],\n "name": [\n "San Francisco",\n "Los Angeles",\n "Columbus",\n "Cleveland",\n "Munich",\n "Duesseldorf",\n "Koeln",\n ],\n "pop": [12345, 12346, 1234, 1236, 12347, 1238, 1239],\n }\n\n expected = DataFrame(ex_data, columns=result.columns)\n tm.assert_frame_equal(result, expected)\n\n def test_shallow_nested(self):\n data = [\n {\n "state": "Florida",\n "shortname": "FL",\n "info": {"governor": "Rick Scott"},\n "counties": [\n {"name": "Dade", "population": 12345},\n {"name": "Broward", "population": 40000},\n {"name": "Palm Beach", "population": 60000},\n ],\n },\n {\n "state": "Ohio",\n "shortname": "OH",\n "info": {"governor": "John Kasich"},\n "counties": [\n {"name": "Summit", "population": 1234},\n {"name": "Cuyahoga", "population": 1337},\n ],\n },\n ]\n\n result = json_normalize(\n data, "counties", ["state", "shortname", ["info", "governor"]]\n )\n ex_data = {\n "name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"],\n "state": ["Florida"] * 3 + ["Ohio"] * 2,\n "shortname": ["FL", "FL", "FL", "OH", "OH"],\n "info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2,\n "population": [12345, 40000, 60000, 1234, 1337],\n }\n expected = DataFrame(ex_data, columns=result.columns)\n tm.assert_frame_equal(result, expected)\n\n def test_nested_meta_path_with_nested_record_path(self, state_data):\n # GH 27220\n result = json_normalize(\n data=state_data,\n record_path=["counties"],\n meta=["state", "shortname", ["info", "governor"]],\n errors="ignore",\n )\n\n ex_data = {\n "name": ["Dade", "Broward", "Palm Beach", "Summit", "Cuyahoga"],\n "population": [12345, 40000, 60000, 1234, 1337],\n "state": ["Florida"] * 3 + ["Ohio"] * 2,\n "shortname": ["FL"] * 3 + ["OH"] * 2,\n "info.governor": ["Rick Scott"] * 3 + ["John Kasich"] * 2,\n }\n\n expected = DataFrame(ex_data)\n tm.assert_frame_equal(result, expected)\n\n def test_meta_name_conflict(self):\n data = [\n {\n "foo": "hello",\n "bar": "there",\n "data": [\n {"foo": "something", "bar": "else"},\n {"foo": "something2", "bar": "else2"},\n ],\n }\n ]\n\n msg = r"Conflicting metadata name (foo|bar), need distinguishing prefix"\n with pytest.raises(ValueError, match=msg):\n json_normalize(data, "data", meta=["foo", "bar"])\n\n result = json_normalize(data, "data", meta=["foo", "bar"], meta_prefix="meta")\n\n for val in ["metafoo", "metabar", "foo", "bar"]:\n assert val in result\n\n def test_meta_parameter_not_modified(self):\n # GH 18610\n data = [\n {\n "foo": "hello",\n "bar": "there",\n "data": [\n {"foo": "something", "bar": "else"},\n {"foo": "something2", "bar": "else2"},\n ],\n }\n ]\n\n COLUMNS = ["foo", "bar"]\n result = json_normalize(data, "data", meta=COLUMNS, meta_prefix="meta")\n\n assert COLUMNS == ["foo", "bar"]\n for val in ["metafoo", "metabar", "foo", "bar"]:\n assert val in result\n\n def test_record_prefix(self, state_data):\n result = json_normalize(state_data[0], "counties")\n expected = DataFrame(state_data[0]["counties"])\n tm.assert_frame_equal(result, expected)\n\n result = json_normalize(\n state_data, "counties", meta="state", record_prefix="county_"\n )\n\n expected = []\n for rec in state_data:\n expected.extend(rec["counties"])\n expected = DataFrame(expected)\n expected = expected.rename(columns=lambda x: "county_" + x)\n expected["state"] = np.array(["Florida", "Ohio"]).repeat([3, 2])\n\n tm.assert_frame_equal(result, expected)\n\n def test_non_ascii_key(self):\n testjson = (\n b'[{"\xc3\x9cnic\xc3\xb8de":0,"sub":{"A":1, "B":2}},'\n b'{"\xc3\x9cnic\xc3\xb8de":1,"sub":{"A":3, "B":4}}]'\n ).decode("utf8")\n\n testdata = {\n b"\xc3\x9cnic\xc3\xb8de".decode("utf8"): [0, 1],\n "sub.A": [1, 3],\n "sub.B": [2, 4],\n }\n expected = DataFrame(testdata)\n\n result = json_normalize(json.loads(testjson))\n tm.assert_frame_equal(result, expected)\n\n def test_missing_field(self, author_missing_data):\n # GH20030:\n result = json_normalize(author_missing_data)\n ex_data = [\n {\n "info": np.nan,\n "info.created_at": np.nan,\n "info.last_updated": np.nan,\n "author_name.first": np.nan,\n "author_name.last_name": np.nan,\n },\n {\n "info": None,\n "info.created_at": "11/08/1993",\n "info.last_updated": "26/05/2012",\n "author_name.first": "Jane",\n "author_name.last_name": "Doe",\n },\n ]\n expected = DataFrame(ex_data)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "max_level,expected",\n [\n (\n 0,\n [\n {\n "TextField": "Some text",\n "UserField": {"Id": "ID001", "Name": "Name001"},\n "CreatedBy": {"Name": "User001"},\n "Image": {"a": "b"},\n },\n {\n "TextField": "Some text",\n "UserField": {"Id": "ID001", "Name": "Name001"},\n "CreatedBy": {"Name": "User001"},\n "Image": {"a": "b"},\n },\n ],\n ),\n (\n 1,\n [\n {\n "TextField": "Some text",\n "UserField.Id": "ID001",\n "UserField.Name": "Name001",\n "CreatedBy": {"Name": "User001"},\n "Image": {"a": "b"},\n },\n {\n "TextField": "Some text",\n "UserField.Id": "ID001",\n "UserField.Name": "Name001",\n "CreatedBy": {"Name": "User001"},\n "Image": {"a": "b"},\n },\n ],\n ),\n ],\n )\n def test_max_level_with_records_path(self, max_level, expected):\n # GH23843: Enhanced JSON normalize\n test_input = [\n {\n "CreatedBy": {"Name": "User001"},\n "Lookup": [\n {\n "TextField": "Some text",\n "UserField": {"Id": "ID001", "Name": "Name001"},\n },\n {\n "TextField": "Some text",\n "UserField": {"Id": "ID001", "Name": "Name001"},\n },\n ],\n "Image": {"a": "b"},\n "tags": [\n {"foo": "something", "bar": "else"},\n {"foo": "something2", "bar": "else2"},\n ],\n }\n ]\n\n result = json_normalize(\n test_input,\n record_path=["Lookup"],\n meta=[["CreatedBy"], ["Image"]],\n max_level=max_level,\n )\n expected_df = DataFrame(data=expected, columns=result.columns.values)\n tm.assert_equal(expected_df, result)\n\n def test_nested_flattening_consistent(self):\n # see gh-21537\n df1 = json_normalize([{"A": {"B": 1}}])\n df2 = json_normalize({"dummy": [{"A": {"B": 1}}]}, "dummy")\n\n # They should be the same.\n tm.assert_frame_equal(df1, df2)\n\n def test_nonetype_record_path(self, nulls_fixture):\n # see gh-30148\n # should not raise TypeError\n result = json_normalize(\n [\n {"state": "Texas", "info": nulls_fixture},\n {"state": "Florida", "info": [{"i": 2}]},\n ],\n record_path=["info"],\n )\n expected = DataFrame({"i": 2}, index=[0])\n tm.assert_equal(result, expected)\n\n @pytest.mark.parametrize("value", ["false", "true", "{}", "1", '"text"'])\n def test_non_list_record_path_errors(self, value):\n # see gh-30148, GH 26284\n parsed_value = json.loads(value)\n test_input = {"state": "Texas", "info": parsed_value}\n test_path = "info"\n msg = (\n f"{test_input} has non list value {parsed_value} for path {test_path}. "\n "Must be list or null."\n )\n with pytest.raises(TypeError, match=msg):\n json_normalize([test_input], record_path=[test_path])\n\n def test_meta_non_iterable(self):\n # GH 31507\n data = """[{"id": 99, "data": [{"one": 1, "two": 2}]}]"""\n\n result = json_normalize(json.loads(data), record_path=["data"], meta=["id"])\n expected = DataFrame(\n {"one": [1], "two": [2], "id": np.array([99], dtype=object)}\n )\n tm.assert_frame_equal(result, expected)\n\n def test_generator(self, state_data):\n # GH35923 Fix pd.json_normalize to not skip the first element of a\n # generator input\n def generator_data():\n yield from state_data[0]["counties"]\n\n result = json_normalize(generator_data())\n expected = DataFrame(state_data[0]["counties"])\n\n tm.assert_frame_equal(result, expected)\n\n def test_top_column_with_leading_underscore(self):\n # 49861\n data = {"_id": {"a1": 10, "l2": {"l3": 0}}, "gg": 4}\n result = json_normalize(data, sep="_")\n expected = DataFrame([[4, 10, 0]], columns=["gg", "_id_a1", "_id_l2_l3"])\n\n tm.assert_frame_equal(result, expected)\n\n\nclass TestNestedToRecord:\n def test_flat_stays_flat(self):\n recs = [{"flat1": 1, "flat2": 2}, {"flat3": 3, "flat2": 4}]\n result = nested_to_record(recs)\n expected = recs\n assert result == expected\n\n def test_one_level_deep_flattens(self):\n data = {"flat1": 1, "dict1": {"c": 1, "d": 2}}\n\n result = nested_to_record(data)\n expected = {"dict1.c": 1, "dict1.d": 2, "flat1": 1}\n\n assert result == expected\n\n def test_nested_flattens(self):\n data = {\n "flat1": 1,\n "dict1": {"c": 1, "d": 2},\n "nested": {"e": {"c": 1, "d": 2}, "d": 2},\n }\n\n result = nested_to_record(data)\n expected = {\n "dict1.c": 1,\n "dict1.d": 2,\n "flat1": 1,\n "nested.d": 2,\n "nested.e.c": 1,\n "nested.e.d": 2,\n }\n\n assert result == expected\n\n def test_json_normalize_errors(self, missing_metadata):\n # GH14583:\n # If meta keys are not always present a new option to set\n # errors='ignore' has been implemented\n\n msg = (\n "Key 'name' not found. To replace missing values of "\n "'name' with np.nan, pass in errors='ignore'"\n )\n with pytest.raises(KeyError, match=msg):\n json_normalize(\n data=missing_metadata,\n record_path="addresses",\n meta="name",\n errors="raise",\n )\n\n def test_missing_meta(self, missing_metadata):\n # GH25468\n # If metadata is nullable with errors set to ignore, the null values\n # should be numpy.nan values\n result = json_normalize(\n data=missing_metadata, record_path="addresses", meta="name", errors="ignore"\n )\n ex_data = [\n [9562, "Morris St.", "Massillon", "OH", 44646, "Alice"],\n [8449, "Spring St.", "Elizabethton", "TN", 37643, np.nan],\n ]\n columns = ["number", "street", "city", "state", "zip", "name"]\n expected = DataFrame(ex_data, columns=columns)\n tm.assert_frame_equal(result, expected)\n\n def test_missing_nested_meta(self):\n # GH44312\n # If errors="ignore" and nested metadata is null, we should return nan\n data = {"meta": "foo", "nested_meta": None, "value": [{"rec": 1}, {"rec": 2}]}\n result = json_normalize(\n data,\n record_path="value",\n meta=["meta", ["nested_meta", "leaf"]],\n errors="ignore",\n )\n ex_data = [[1, "foo", np.nan], [2, "foo", np.nan]]\n columns = ["rec", "meta", "nested_meta.leaf"]\n expected = DataFrame(ex_data, columns=columns).astype(\n {"nested_meta.leaf": object}\n )\n tm.assert_frame_equal(result, expected)\n\n # If errors="raise" and nested metadata is null, we should raise with the\n # key of the first missing level\n with pytest.raises(KeyError, match="'leaf' not found"):\n json_normalize(\n data,\n record_path="value",\n meta=["meta", ["nested_meta", "leaf"]],\n errors="raise",\n )\n\n def test_missing_meta_multilevel_record_path_errors_raise(self, missing_metadata):\n # GH41876\n # Ensure errors='raise' works as intended even when a record_path of length\n # greater than one is passed in\n msg = (\n "Key 'name' not found. To replace missing values of "\n "'name' with np.nan, pass in errors='ignore'"\n )\n with pytest.raises(KeyError, match=msg):\n json_normalize(\n data=missing_metadata,\n record_path=["previous_residences", "cities"],\n meta="name",\n errors="raise",\n )\n\n def test_missing_meta_multilevel_record_path_errors_ignore(self, missing_metadata):\n # GH41876\n # Ensure errors='ignore' works as intended even when a record_path of length\n # greater than one is passed in\n result = json_normalize(\n data=missing_metadata,\n record_path=["previous_residences", "cities"],\n meta="name",\n errors="ignore",\n )\n ex_data = [\n ["Foo York City", "Alice"],\n ["Barmingham", np.nan],\n ]\n columns = ["city_name", "name"]\n expected = DataFrame(ex_data, columns=columns)\n tm.assert_frame_equal(result, expected)\n\n def test_donot_drop_nonevalues(self):\n # GH21356\n data = [\n {"info": None, "author_name": {"first": "Smith", "last_name": "Appleseed"}},\n {\n "info": {"created_at": "11/08/1993", "last_updated": "26/05/2012"},\n "author_name": {"first": "Jane", "last_name": "Doe"},\n },\n ]\n result = nested_to_record(data)\n expected = [\n {\n "info": None,\n "author_name.first": "Smith",\n "author_name.last_name": "Appleseed",\n },\n {\n "author_name.first": "Jane",\n "author_name.last_name": "Doe",\n "info.created_at": "11/08/1993",\n "info.last_updated": "26/05/2012",\n },\n ]\n\n assert result == expected\n\n def test_nonetype_top_level_bottom_level(self):\n # GH21158: If inner level json has a key with a null value\n # make sure it does not do a new_d.pop twice and except\n data = {\n "id": None,\n "location": {\n "country": {\n "state": {\n "id": None,\n "town.info": {\n "id": None,\n "region": None,\n "x": 49.151580810546875,\n "y": -33.148521423339844,\n "z": 27.572303771972656,\n },\n }\n }\n },\n }\n result = nested_to_record(data)\n expected = {\n "id": None,\n "location.country.state.id": None,\n "location.country.state.town.info.id": None,\n "location.country.state.town.info.region": None,\n "location.country.state.town.info.x": 49.151580810546875,\n "location.country.state.town.info.y": -33.148521423339844,\n "location.country.state.town.info.z": 27.572303771972656,\n }\n assert result == expected\n\n def test_nonetype_multiple_levels(self):\n # GH21158: If inner level json has a key with a null value\n # make sure it does not do a new_d.pop twice and except\n data = {\n "id": None,\n "location": {\n "id": None,\n "country": {\n "id": None,\n "state": {\n "id": None,\n "town.info": {\n "region": None,\n "x": 49.151580810546875,\n "y": -33.148521423339844,\n "z": 27.572303771972656,\n },\n },\n },\n },\n }\n result = nested_to_record(data)\n expected = {\n "id": None,\n "location.id": None,\n "location.country.id": None,\n "location.country.state.id": None,\n "location.country.state.town.info.region": None,\n "location.country.state.town.info.x": 49.151580810546875,\n "location.country.state.town.info.y": -33.148521423339844,\n "location.country.state.town.info.z": 27.572303771972656,\n }\n assert result == expected\n\n @pytest.mark.parametrize(\n "max_level, expected",\n [\n (\n None,\n [\n {\n "CreatedBy.Name": "User001",\n "Lookup.TextField": "Some text",\n "Lookup.UserField.Id": "ID001",\n "Lookup.UserField.Name": "Name001",\n "Image.a": "b",\n }\n ],\n ),\n (\n 0,\n [\n {\n "CreatedBy": {"Name": "User001"},\n "Lookup": {\n "TextField": "Some text",\n "UserField": {"Id": "ID001", "Name": "Name001"},\n },\n "Image": {"a": "b"},\n }\n ],\n ),\n (\n 1,\n [\n {\n "CreatedBy.Name": "User001",\n "Lookup.TextField": "Some text",\n "Lookup.UserField": {"Id": "ID001", "Name": "Name001"},\n "Image.a": "b",\n }\n ],\n ),\n ],\n )\n def test_with_max_level(self, max_level, expected, max_level_test_input_data):\n # GH23843: Enhanced JSON normalize\n output = nested_to_record(max_level_test_input_data, max_level=max_level)\n assert output == expected\n\n def test_with_large_max_level(self):\n # GH23843: Enhanced JSON normalize\n max_level = 100\n input_data = [\n {\n "CreatedBy": {\n "user": {\n "name": {"firstname": "Leo", "LastName": "Thomson"},\n "family_tree": {\n "father": {\n "name": "Father001",\n "father": {\n "Name": "Father002",\n "father": {\n "name": "Father003",\n "father": {"Name": "Father004"},\n },\n },\n }\n },\n }\n }\n }\n ]\n expected = [\n {\n "CreatedBy.user.name.firstname": "Leo",\n "CreatedBy.user.name.LastName": "Thomson",\n "CreatedBy.user.family_tree.father.name": "Father001",\n "CreatedBy.user.family_tree.father.father.Name": "Father002",\n "CreatedBy.user.family_tree.father.father.father.name": "Father003",\n "CreatedBy.user.family_tree.father.father.father.father.Name": "Father004", # noqa: E501\n }\n ]\n output = nested_to_record(input_data, max_level=max_level)\n assert output == expected\n\n def test_series_non_zero_index(self):\n # GH 19020\n data = {\n 0: {"id": 1, "name": "Foo", "elements": {"a": 1}},\n 1: {"id": 2, "name": "Bar", "elements": {"b": 2}},\n 2: {"id": 3, "name": "Baz", "elements": {"c": 3}},\n }\n s = Series(data)\n s.index = [1, 2, 3]\n result = json_normalize(s)\n expected = DataFrame(\n {\n "id": [1, 2, 3],\n "name": ["Foo", "Bar", "Baz"],\n "elements.a": [1.0, np.nan, np.nan],\n "elements.b": [np.nan, 2.0, np.nan],\n "elements.c": [np.nan, np.nan, 3.0],\n }\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\json\test_normalize.py | test_normalize.py | Python | 30,816 | 0.95 | 0.057332 | 0.051408 | awesome-app | 916 | 2024-03-18T17:28:33.299746 | MIT | true | 06006a3a0ac80f9fe3ee5dc552e42943 |
import datetime\nfrom datetime import timedelta\nfrom decimal import Decimal\nfrom io import (\n BytesIO,\n StringIO,\n)\nimport json\nimport os\nimport sys\nimport time\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas.compat import IS64\nimport pandas.util._test_decorators as td\n\nimport pandas as pd\nfrom pandas import (\n NA,\n DataFrame,\n DatetimeIndex,\n Index,\n RangeIndex,\n Series,\n Timestamp,\n date_range,\n read_json,\n)\nimport pandas._testing as tm\n\nfrom pandas.io.json import ujson_dumps\n\n\ndef test_literal_json_deprecation():\n # PR 53409\n expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])\n\n jsonl = """{"a": 1, "b": 2}\n {"a": 3, "b": 4}\n {"a": 5, "b": 6}\n {"a": 7, "b": 8}"""\n\n msg = (\n "Passing literal json to 'read_json' is deprecated and "\n "will be removed in a future version. To read from a "\n "literal string, wrap it in a 'StringIO' object."\n )\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n try:\n read_json(jsonl, lines=False)\n except ValueError:\n pass\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n read_json(expected.to_json(), lines=False)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=True)\n tm.assert_frame_equal(result, expected)\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n try:\n result = read_json(\n '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n',\n lines=False,\n )\n except ValueError:\n pass\n\n with tm.assert_produces_warning(FutureWarning, match=msg):\n try:\n result = read_json('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n', lines=False)\n except ValueError:\n pass\n tm.assert_frame_equal(result, expected)\n\n\ndef assert_json_roundtrip_equal(result, expected, orient):\n if orient in ("records", "values"):\n expected = expected.reset_index(drop=True)\n if orient == "values":\n expected.columns = range(len(expected.columns))\n tm.assert_frame_equal(result, expected)\n\n\nclass TestPandasContainer:\n @pytest.fixture\n def categorical_frame(self):\n data = {\n c: np.random.default_rng(i).standard_normal(30)\n for i, c in enumerate(list("ABCD"))\n }\n cat = ["bah"] * 5 + ["bar"] * 5 + ["baz"] * 5 + ["foo"] * 15\n data["E"] = list(reversed(cat))\n data["sort"] = np.arange(30, dtype="int64")\n return DataFrame(data, index=pd.CategoricalIndex(cat, name="E"))\n\n @pytest.fixture\n def datetime_series(self):\n # Same as usual datetime_series, but with index freq set to None,\n # since that doesn't round-trip, see GH#33711\n ser = Series(\n 1.1 * np.arange(10, dtype=np.float64),\n index=date_range("2020-01-01", periods=10),\n name="ts",\n )\n ser.index = ser.index._with_freq(None)\n return ser\n\n @pytest.fixture\n def datetime_frame(self):\n # Same as usual datetime_frame, but with index freq set to None,\n # since that doesn't round-trip, see GH#33711\n df = DataFrame(\n np.random.default_rng(2).standard_normal((30, 4)),\n columns=Index(list("ABCD")),\n index=date_range("2000-01-01", periods=30, freq="B"),\n )\n df.index = df.index._with_freq(None)\n return df\n\n def test_frame_double_encoded_labels(self, orient):\n df = DataFrame(\n [["a", "b"], ["c", "d"]],\n index=['index " 1', "index / 2"],\n columns=["a \\ b", "y / z"],\n )\n\n data = StringIO(df.to_json(orient=orient))\n result = read_json(data, orient=orient)\n expected = df.copy()\n assert_json_roundtrip_equal(result, expected, orient)\n\n @pytest.mark.parametrize("orient", ["split", "records", "values"])\n def test_frame_non_unique_index(self, orient):\n df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])\n data = StringIO(df.to_json(orient=orient))\n result = read_json(data, orient=orient)\n expected = df.copy()\n\n assert_json_roundtrip_equal(result, expected, orient)\n\n @pytest.mark.parametrize("orient", ["index", "columns"])\n def test_frame_non_unique_index_raises(self, orient):\n df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 1], columns=["x", "y"])\n msg = f"DataFrame index must be unique for orient='{orient}'"\n with pytest.raises(ValueError, match=msg):\n df.to_json(orient=orient)\n\n @pytest.mark.parametrize("orient", ["split", "values"])\n @pytest.mark.parametrize(\n "data",\n [\n [["a", "b"], ["c", "d"]],\n [[1.5, 2.5], [3.5, 4.5]],\n [[1, 2.5], [3, 4.5]],\n [[Timestamp("20130101"), 3.5], [Timestamp("20130102"), 4.5]],\n ],\n )\n def test_frame_non_unique_columns(self, orient, data):\n df = DataFrame(data, index=[1, 2], columns=["x", "x"])\n\n result = read_json(\n StringIO(df.to_json(orient=orient)), orient=orient, convert_dates=["x"]\n )\n if orient == "values":\n expected = DataFrame(data)\n if expected.iloc[:, 0].dtype == "datetime64[ns]":\n # orient == "values" by default will write Timestamp objects out\n # in milliseconds; these are internally stored in nanosecond,\n # so divide to get where we need\n # TODO: a to_epoch method would also solve; see GH 14772\n expected.isetitem(0, expected.iloc[:, 0].astype(np.int64) // 1000000)\n elif orient == "split":\n expected = df\n expected.columns = ["x", "x.1"]\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("orient", ["index", "columns", "records"])\n def test_frame_non_unique_columns_raises(self, orient):\n df = DataFrame([["a", "b"], ["c", "d"]], index=[1, 2], columns=["x", "x"])\n\n msg = f"DataFrame columns must be unique for orient='{orient}'"\n with pytest.raises(ValueError, match=msg):\n df.to_json(orient=orient)\n\n def test_frame_default_orient(self, float_frame):\n assert float_frame.to_json() == float_frame.to_json(orient="columns")\n\n @pytest.mark.parametrize("dtype", [False, float])\n @pytest.mark.parametrize("convert_axes", [True, False])\n def test_roundtrip_simple(self, orient, convert_axes, dtype, float_frame):\n data = StringIO(float_frame.to_json(orient=orient))\n result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)\n\n expected = float_frame\n\n assert_json_roundtrip_equal(result, expected, orient)\n\n @pytest.mark.parametrize("dtype", [False, np.int64])\n @pytest.mark.parametrize("convert_axes", [True, False])\n def test_roundtrip_intframe(self, orient, convert_axes, dtype, int_frame):\n data = StringIO(int_frame.to_json(orient=orient))\n result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)\n expected = int_frame\n assert_json_roundtrip_equal(result, expected, orient)\n\n @pytest.mark.parametrize("dtype", [None, np.float64, int, "U3"])\n @pytest.mark.parametrize("convert_axes", [True, False])\n def test_roundtrip_str_axes(self, orient, convert_axes, dtype):\n df = DataFrame(\n np.zeros((200, 4)),\n columns=[str(i) for i in range(4)],\n index=[str(i) for i in range(200)],\n dtype=dtype,\n )\n\n data = StringIO(df.to_json(orient=orient))\n result = read_json(data, orient=orient, convert_axes=convert_axes, dtype=dtype)\n\n expected = df.copy()\n if not dtype:\n expected = expected.astype(np.int64)\n\n # index columns, and records orients cannot fully preserve the string\n # dtype for axes as the index and column labels are used as keys in\n # JSON objects. JSON keys are by definition strings, so there's no way\n # to disambiguate whether those keys actually were strings or numeric\n # beforehand and numeric wins out.\n if convert_axes and (orient in ("index", "columns")):\n expected.columns = expected.columns.astype(np.int64)\n expected.index = expected.index.astype(np.int64)\n elif orient == "records" and convert_axes:\n expected.columns = expected.columns.astype(np.int64)\n elif convert_axes and orient == "split":\n expected.columns = expected.columns.astype(np.int64)\n\n assert_json_roundtrip_equal(result, expected, orient)\n\n @pytest.mark.parametrize("convert_axes", [True, False])\n def test_roundtrip_categorical(\n self, request, orient, categorical_frame, convert_axes, using_infer_string\n ):\n # TODO: create a better frame to test with and improve coverage\n if orient in ("index", "columns"):\n request.applymarker(\n pytest.mark.xfail(\n reason=f"Can't have duplicate index values for orient '{orient}')"\n )\n )\n\n data = StringIO(categorical_frame.to_json(orient=orient))\n result = read_json(data, orient=orient, convert_axes=convert_axes)\n\n expected = categorical_frame.copy()\n expected.index = expected.index.astype(\n str if not using_infer_string else "str"\n ) # Categorical not preserved\n expected.index.name = None # index names aren't preserved in JSON\n assert_json_roundtrip_equal(result, expected, orient)\n\n @pytest.mark.parametrize("convert_axes", [True, False])\n def test_roundtrip_empty(self, orient, convert_axes):\n empty_frame = DataFrame()\n data = StringIO(empty_frame.to_json(orient=orient))\n result = read_json(data, orient=orient, convert_axes=convert_axes)\n if orient == "split":\n idx = Index([], dtype=(float if convert_axes else object))\n expected = DataFrame(index=idx, columns=idx)\n elif orient in ["index", "columns"]:\n expected = DataFrame()\n else:\n expected = empty_frame.copy()\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("convert_axes", [True, False])\n def test_roundtrip_timestamp(self, orient, convert_axes, datetime_frame):\n # TODO: improve coverage with date_format parameter\n data = StringIO(datetime_frame.to_json(orient=orient))\n result = read_json(data, orient=orient, convert_axes=convert_axes)\n expected = datetime_frame.copy()\n\n if not convert_axes: # one off for ts handling\n # DTI gets converted to epoch values\n idx = expected.index.view(np.int64) // 1000000\n if orient != "split": # TODO: handle consistently across orients\n idx = idx.astype(str)\n\n expected.index = idx\n\n assert_json_roundtrip_equal(result, expected, orient)\n\n @pytest.mark.parametrize("convert_axes", [True, False])\n def test_roundtrip_mixed(self, orient, convert_axes):\n index = Index(["a", "b", "c", "d", "e"])\n values = {\n "A": [0.0, 1.0, 2.0, 3.0, 4.0],\n "B": [0.0, 1.0, 0.0, 1.0, 0.0],\n "C": ["foo1", "foo2", "foo3", "foo4", "foo5"],\n "D": [True, False, True, False, True],\n }\n\n df = DataFrame(data=values, index=index)\n\n data = StringIO(df.to_json(orient=orient))\n result = read_json(data, orient=orient, convert_axes=convert_axes)\n\n expected = df.copy()\n expected = expected.assign(**expected.select_dtypes("number").astype(np.int64))\n\n assert_json_roundtrip_equal(result, expected, orient)\n\n @pytest.mark.xfail(\n reason="#50456 Column multiindex is stored and loaded differently",\n raises=AssertionError,\n )\n @pytest.mark.parametrize(\n "columns",\n [\n [["2022", "2022"], ["JAN", "FEB"]],\n [["2022", "2023"], ["JAN", "JAN"]],\n [["2022", "2022"], ["JAN", "JAN"]],\n ],\n )\n def test_roundtrip_multiindex(self, columns):\n df = DataFrame(\n [[1, 2], [3, 4]],\n columns=pd.MultiIndex.from_arrays(columns),\n )\n data = StringIO(df.to_json(orient="split"))\n result = read_json(data, orient="split")\n tm.assert_frame_equal(result, df)\n\n @pytest.mark.parametrize(\n "data,msg,orient",\n [\n ('{"key":b:a:d}', "Expected object or value", "columns"),\n # too few indices\n (\n '{"columns":["A","B"],'\n '"index":["2","3"],'\n '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',\n "|".join(\n [\n r"Length of values \(3\) does not match length of index \(2\)",\n ]\n ),\n "split",\n ),\n # too many columns\n (\n '{"columns":["A","B","C"],'\n '"index":["1","2","3"],'\n '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',\n "3 columns passed, passed data had 2 columns",\n "split",\n ),\n # bad key\n (\n '{"badkey":["A","B"],'\n '"index":["2","3"],'\n '"data":[[1.0,"1"],[2.0,"2"],[null,"3"]]}',\n r"unexpected key\(s\): badkey",\n "split",\n ),\n ],\n )\n def test_frame_from_json_bad_data_raises(self, data, msg, orient):\n with pytest.raises(ValueError, match=msg):\n read_json(StringIO(data), orient=orient)\n\n @pytest.mark.parametrize("dtype", [True, False])\n @pytest.mark.parametrize("convert_axes", [True, False])\n def test_frame_from_json_missing_data(self, orient, convert_axes, dtype):\n num_df = DataFrame([[1, 2], [4, 5, 6]])\n\n result = read_json(\n StringIO(num_df.to_json(orient=orient)),\n orient=orient,\n convert_axes=convert_axes,\n dtype=dtype,\n )\n assert np.isnan(result.iloc[0, 2])\n\n obj_df = DataFrame([["1", "2"], ["4", "5", "6"]])\n result = read_json(\n StringIO(obj_df.to_json(orient=orient)),\n orient=orient,\n convert_axes=convert_axes,\n dtype=dtype,\n )\n assert np.isnan(result.iloc[0, 2])\n\n @pytest.mark.parametrize("dtype", [True, False])\n def test_frame_read_json_dtype_missing_value(self, dtype):\n # GH28501 Parse missing values using read_json with dtype=False\n # to NaN instead of None\n result = read_json(StringIO("[null]"), dtype=dtype)\n expected = DataFrame([np.nan])\n\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("inf", [np.inf, -np.inf])\n @pytest.mark.parametrize("dtype", [True, False])\n def test_frame_infinity(self, inf, dtype):\n # infinities get mapped to nulls which get mapped to NaNs during\n # deserialisation\n df = DataFrame([[1, 2], [4, 5, 6]])\n df.loc[0, 2] = inf\n\n data = StringIO(df.to_json())\n result = read_json(data, dtype=dtype)\n assert np.isnan(result.iloc[0, 2])\n\n @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865")\n @pytest.mark.parametrize(\n "value,precision,expected_val",\n [\n (0.95, 1, 1.0),\n (1.95, 1, 2.0),\n (-1.95, 1, -2.0),\n (0.995, 2, 1.0),\n (0.9995, 3, 1.0),\n (0.99999999999999944, 15, 1.0),\n ],\n )\n def test_frame_to_json_float_precision(self, value, precision, expected_val):\n df = DataFrame([{"a_float": value}])\n encoded = df.to_json(double_precision=precision)\n assert encoded == f'{{"a_float":{{"0":{expected_val}}}}}'\n\n def test_frame_to_json_except(self):\n df = DataFrame([1, 2, 3])\n msg = "Invalid value 'garbage' for option 'orient'"\n with pytest.raises(ValueError, match=msg):\n df.to_json(orient="garbage")\n\n def test_frame_empty(self):\n df = DataFrame(columns=["jim", "joe"])\n assert not df._is_mixed_type\n\n data = StringIO(df.to_json())\n result = read_json(data, dtype=dict(df.dtypes))\n tm.assert_frame_equal(result, df, check_index_type=False)\n\n def test_frame_empty_to_json(self):\n # GH 7445\n df = DataFrame({"test": []}, index=[])\n result = df.to_json(orient="columns")\n expected = '{"test":{}}'\n assert result == expected\n\n def test_frame_empty_mixedtype(self):\n # mixed type\n df = DataFrame(columns=["jim", "joe"])\n df["joe"] = df["joe"].astype("i8")\n assert df._is_mixed_type\n data = df.to_json()\n tm.assert_frame_equal(\n read_json(StringIO(data), dtype=dict(df.dtypes)),\n df,\n check_index_type=False,\n )\n\n def test_frame_mixedtype_orient(self): # GH10289\n vals = [\n [10, 1, "foo", 0.1, 0.01],\n [20, 2, "bar", 0.2, 0.02],\n [30, 3, "baz", 0.3, 0.03],\n [40, 4, "qux", 0.4, 0.04],\n ]\n\n df = DataFrame(\n vals, index=list("abcd"), columns=["1st", "2nd", "3rd", "4th", "5th"]\n )\n\n assert df._is_mixed_type\n right = df.copy()\n\n for orient in ["split", "index", "columns"]:\n inp = StringIO(df.to_json(orient=orient))\n left = read_json(inp, orient=orient, convert_axes=False)\n tm.assert_frame_equal(left, right)\n\n right.index = RangeIndex(len(df))\n inp = StringIO(df.to_json(orient="records"))\n left = read_json(inp, orient="records", convert_axes=False)\n tm.assert_frame_equal(left, right)\n\n right.columns = RangeIndex(df.shape[1])\n inp = StringIO(df.to_json(orient="values"))\n left = read_json(inp, orient="values", convert_axes=False)\n tm.assert_frame_equal(left, right)\n\n def test_v12_compat(self, datapath):\n dti = date_range("2000-01-03", "2000-01-07")\n # freq doesn't roundtrip\n dti = DatetimeIndex(np.asarray(dti), freq=None)\n df = DataFrame(\n [\n [1.56808523, 0.65727391, 1.81021139, -0.17251653],\n [-0.2550111, -0.08072427, -0.03202878, -0.17581665],\n [1.51493992, 0.11805825, 1.629455, -1.31506612],\n [-0.02765498, 0.44679743, 0.33192641, -0.27885413],\n [0.05951614, -2.69652057, 1.28163262, 0.34703478],\n ],\n columns=["A", "B", "C", "D"],\n index=dti,\n )\n df["date"] = Timestamp("19920106 18:21:32.12").as_unit("ns")\n df.iloc[3, df.columns.get_loc("date")] = Timestamp("20130101")\n df["modified"] = df["date"]\n df.iloc[1, df.columns.get_loc("modified")] = pd.NaT\n\n dirpath = datapath("io", "json", "data")\n v12_json = os.path.join(dirpath, "tsframe_v012.json")\n df_unser = read_json(v12_json)\n tm.assert_frame_equal(df, df_unser)\n\n df_iso = df.drop(["modified"], axis=1)\n v12_iso_json = os.path.join(dirpath, "tsframe_iso_v012.json")\n df_unser_iso = read_json(v12_iso_json)\n tm.assert_frame_equal(df_iso, df_unser_iso, check_column_type=False)\n\n def test_blocks_compat_GH9037(self, using_infer_string):\n index = date_range("20000101", periods=10, freq="h")\n # freq doesn't round-trip\n index = DatetimeIndex(list(index), freq=None)\n\n df_mixed = DataFrame(\n {\n "float_1": [\n -0.92077639,\n 0.77434435,\n 1.25234727,\n 0.61485564,\n -0.60316077,\n 0.24653374,\n 0.28668979,\n -2.51969012,\n 0.95748401,\n -1.02970536,\n ],\n "int_1": [\n 19680418,\n 75337055,\n 99973684,\n 65103179,\n 79373900,\n 40314334,\n 21290235,\n 4991321,\n 41903419,\n 16008365,\n ],\n "str_1": [\n "78c608f1",\n "64a99743",\n "13d2ff52",\n "ca7f4af2",\n "97236474",\n "bde7e214",\n "1a6bde47",\n "b1190be5",\n "7a669144",\n "8d64d068",\n ],\n "float_2": [\n -0.0428278,\n -1.80872357,\n 3.36042349,\n -0.7573685,\n -0.48217572,\n 0.86229683,\n 1.08935819,\n 0.93898739,\n -0.03030452,\n 1.43366348,\n ],\n "str_2": [\n "14f04af9",\n "d085da90",\n "4bcfac83",\n "81504caf",\n "2ffef4a9",\n "08e2f5c4",\n "07e1af03",\n "addbd4a7",\n "1f6a09ba",\n "4bfc4d87",\n ],\n "int_2": [\n 86967717,\n 98098830,\n 51927505,\n 20372254,\n 12601730,\n 20884027,\n 34193846,\n 10561746,\n 24867120,\n 76131025,\n ],\n },\n index=index,\n )\n\n # JSON deserialisation always creates unicode strings\n df_mixed.columns = df_mixed.columns.astype(\n np.str_ if not using_infer_string else "str"\n )\n data = StringIO(df_mixed.to_json(orient="split"))\n df_roundtrip = read_json(data, orient="split")\n tm.assert_frame_equal(\n df_mixed,\n df_roundtrip,\n check_index_type=True,\n check_column_type=True,\n by_blocks=True,\n check_exact=True,\n )\n\n def test_frame_nonprintable_bytes(self):\n # GH14256: failing column caused segfaults, if it is not the last one\n\n class BinaryThing:\n def __init__(self, hexed) -> None:\n self.hexed = hexed\n self.binary = bytes.fromhex(hexed)\n\n def __str__(self) -> str:\n return self.hexed\n\n hexed = "574b4454ba8c5eb4f98a8f45"\n binthing = BinaryThing(hexed)\n\n # verify the proper conversion of printable content\n df_printable = DataFrame({"A": [binthing.hexed]})\n assert df_printable.to_json() == f'{{"A":{{"0":"{hexed}"}}}}'\n\n # check if non-printable content throws appropriate Exception\n df_nonprintable = DataFrame({"A": [binthing]})\n msg = "Unsupported UTF-8 sequence length when encoding string"\n with pytest.raises(OverflowError, match=msg):\n df_nonprintable.to_json()\n\n # the same with multiple columns threw segfaults\n df_mixed = DataFrame({"A": [binthing], "B": [1]}, columns=["A", "B"])\n with pytest.raises(OverflowError, match=msg):\n df_mixed.to_json()\n\n # default_handler should resolve exceptions for non-string types\n result = df_nonprintable.to_json(default_handler=str)\n expected = f'{{"A":{{"0":"{hexed}"}}}}'\n assert result == expected\n assert (\n df_mixed.to_json(default_handler=str)\n == f'{{"A":{{"0":"{hexed}"}},"B":{{"0":1}}}}'\n )\n\n def test_label_overflow(self):\n # GH14256: buffer length not checked when writing label\n result = DataFrame({"bar" * 100000: [1], "foo": [1337]}).to_json()\n expected = f'{{"{"bar" * 100000}":{{"0":1}},"foo":{{"0":1337}}}}'\n assert result == expected\n\n def test_series_non_unique_index(self):\n s = Series(["a", "b"], index=[1, 1])\n\n msg = "Series index must be unique for orient='index'"\n with pytest.raises(ValueError, match=msg):\n s.to_json(orient="index")\n\n tm.assert_series_equal(\n s,\n read_json(\n StringIO(s.to_json(orient="split")), orient="split", typ="series"\n ),\n )\n unserialized = read_json(\n StringIO(s.to_json(orient="records")), orient="records", typ="series"\n )\n tm.assert_equal(s.values, unserialized.values)\n\n def test_series_default_orient(self, string_series):\n assert string_series.to_json() == string_series.to_json(orient="index")\n\n def test_series_roundtrip_simple(self, orient, string_series, using_infer_string):\n data = StringIO(string_series.to_json(orient=orient))\n result = read_json(data, typ="series", orient=orient)\n\n expected = string_series\n if using_infer_string and orient in ("split", "index", "columns"):\n # These schemas don't contain dtypes, so we infer string\n expected.index = expected.index.astype("str")\n if orient in ("values", "records"):\n expected = expected.reset_index(drop=True)\n if orient != "split":\n expected.name = None\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", [False, None])\n def test_series_roundtrip_object(self, orient, dtype, object_series):\n data = StringIO(object_series.to_json(orient=orient))\n result = read_json(data, typ="series", orient=orient, dtype=dtype)\n\n expected = object_series\n if orient in ("values", "records"):\n expected = expected.reset_index(drop=True)\n if orient != "split":\n expected.name = None\n\n if using_string_dtype():\n expected = expected.astype("str")\n\n tm.assert_series_equal(result, expected)\n\n def test_series_roundtrip_empty(self, orient):\n empty_series = Series([], index=[], dtype=np.float64)\n data = StringIO(empty_series.to_json(orient=orient))\n result = read_json(data, typ="series", orient=orient)\n\n expected = empty_series.reset_index(drop=True)\n if orient in ("split"):\n expected.index = expected.index.astype(np.float64)\n\n tm.assert_series_equal(result, expected)\n\n def test_series_roundtrip_timeseries(self, orient, datetime_series):\n data = StringIO(datetime_series.to_json(orient=orient))\n result = read_json(data, typ="series", orient=orient)\n\n expected = datetime_series\n if orient in ("values", "records"):\n expected = expected.reset_index(drop=True)\n if orient != "split":\n expected.name = None\n\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", [np.float64, int])\n def test_series_roundtrip_numeric(self, orient, dtype):\n s = Series(range(6), index=["a", "b", "c", "d", "e", "f"])\n data = StringIO(s.to_json(orient=orient))\n result = read_json(data, typ="series", orient=orient)\n\n expected = s.copy()\n if orient in ("values", "records"):\n expected = expected.reset_index(drop=True)\n\n tm.assert_series_equal(result, expected)\n\n def test_series_to_json_except(self):\n s = Series([1, 2, 3])\n msg = "Invalid value 'garbage' for option 'orient'"\n with pytest.raises(ValueError, match=msg):\n s.to_json(orient="garbage")\n\n def test_series_from_json_precise_float(self):\n s = Series([4.56, 4.56, 4.56])\n result = read_json(StringIO(s.to_json()), typ="series", precise_float=True)\n tm.assert_series_equal(result, s, check_index_type=False)\n\n def test_series_with_dtype(self):\n # GH 21986\n s = Series([4.56, 4.56, 4.56])\n result = read_json(StringIO(s.to_json()), typ="series", dtype=np.int64)\n expected = Series([4] * 3)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "dtype,expected",\n [\n (True, Series(["2000-01-01"], dtype="datetime64[ns]")),\n (False, Series([946684800000])),\n ],\n )\n def test_series_with_dtype_datetime(self, dtype, expected):\n s = Series(["2000-01-01"], dtype="datetime64[ns]")\n data = StringIO(s.to_json())\n result = read_json(data, typ="series", dtype=dtype)\n tm.assert_series_equal(result, expected)\n\n def test_frame_from_json_precise_float(self):\n df = DataFrame([[4.56, 4.56, 4.56], [4.56, 4.56, 4.56]])\n result = read_json(StringIO(df.to_json()), precise_float=True)\n tm.assert_frame_equal(result, df)\n\n def test_typ(self):\n s = Series(range(6), index=["a", "b", "c", "d", "e", "f"], dtype="int64")\n result = read_json(StringIO(s.to_json()), typ=None)\n tm.assert_series_equal(result, s)\n\n def test_reconstruction_index(self):\n df = DataFrame([[1, 2, 3], [4, 5, 6]])\n result = read_json(StringIO(df.to_json()))\n tm.assert_frame_equal(result, df)\n\n df = DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=["A", "B", "C"])\n result = read_json(StringIO(df.to_json()))\n tm.assert_frame_equal(result, df)\n\n def test_path(self, float_frame, int_frame, datetime_frame):\n with tm.ensure_clean("test.json") as path:\n for df in [float_frame, int_frame, datetime_frame]:\n df.to_json(path)\n read_json(path)\n\n def test_axis_dates(self, datetime_series, datetime_frame):\n # frame\n json = StringIO(datetime_frame.to_json())\n result = read_json(json)\n tm.assert_frame_equal(result, datetime_frame)\n\n # series\n json = StringIO(datetime_series.to_json())\n result = read_json(json, typ="series")\n tm.assert_series_equal(result, datetime_series, check_names=False)\n assert result.name is None\n\n def test_convert_dates(self, datetime_series, datetime_frame):\n # frame\n df = datetime_frame\n df["date"] = Timestamp("20130101").as_unit("ns")\n\n json = StringIO(df.to_json())\n result = read_json(json)\n tm.assert_frame_equal(result, df)\n\n df["foo"] = 1.0\n json = StringIO(df.to_json(date_unit="ns"))\n\n result = read_json(json, convert_dates=False)\n expected = df.copy()\n expected["date"] = expected["date"].values.view("i8")\n expected["foo"] = expected["foo"].astype("int64")\n tm.assert_frame_equal(result, expected)\n\n # series\n ts = Series(Timestamp("20130101").as_unit("ns"), index=datetime_series.index)\n json = StringIO(ts.to_json())\n result = read_json(json, typ="series")\n tm.assert_series_equal(result, ts)\n\n @pytest.mark.parametrize("date_format", ["epoch", "iso"])\n @pytest.mark.parametrize("as_object", [True, False])\n @pytest.mark.parametrize("date_typ", [datetime.date, datetime.datetime, Timestamp])\n def test_date_index_and_values(self, date_format, as_object, date_typ):\n data = [date_typ(year=2020, month=1, day=1), pd.NaT]\n if as_object:\n data.append("a")\n\n ser = Series(data, index=data)\n result = ser.to_json(date_format=date_format)\n\n if date_format == "epoch":\n expected = '{"1577836800000":1577836800000,"null":null}'\n else:\n expected = (\n '{"2020-01-01T00:00:00.000":"2020-01-01T00:00:00.000","null":null}'\n )\n\n if as_object:\n expected = expected.replace("}", ',"a":"a"}')\n\n assert result == expected\n\n @pytest.mark.parametrize(\n "infer_word",\n [\n "trade_time",\n "date",\n "datetime",\n "sold_at",\n "modified",\n "timestamp",\n "timestamps",\n ],\n )\n def test_convert_dates_infer(self, infer_word):\n # GH10747\n\n data = [{"id": 1, infer_word: 1036713600000}, {"id": 2}]\n expected = DataFrame(\n [[1, Timestamp("2002-11-08")], [2, pd.NaT]], columns=["id", infer_word]\n )\n\n result = read_json(StringIO(ujson_dumps(data)))[["id", infer_word]]\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "date,date_unit",\n [\n ("20130101 20:43:42.123", None),\n ("20130101 20:43:42", "s"),\n ("20130101 20:43:42.123", "ms"),\n ("20130101 20:43:42.123456", "us"),\n ("20130101 20:43:42.123456789", "ns"),\n ],\n )\n def test_date_format_frame(self, date, date_unit, datetime_frame):\n df = datetime_frame\n\n df["date"] = Timestamp(date).as_unit("ns")\n df.iloc[1, df.columns.get_loc("date")] = pd.NaT\n df.iloc[5, df.columns.get_loc("date")] = pd.NaT\n if date_unit:\n json = df.to_json(date_format="iso", date_unit=date_unit)\n else:\n json = df.to_json(date_format="iso")\n\n result = read_json(StringIO(json))\n expected = df.copy()\n tm.assert_frame_equal(result, expected)\n\n def test_date_format_frame_raises(self, datetime_frame):\n df = datetime_frame\n msg = "Invalid value 'foo' for option 'date_unit'"\n with pytest.raises(ValueError, match=msg):\n df.to_json(date_format="iso", date_unit="foo")\n\n @pytest.mark.parametrize(\n "date,date_unit",\n [\n ("20130101 20:43:42.123", None),\n ("20130101 20:43:42", "s"),\n ("20130101 20:43:42.123", "ms"),\n ("20130101 20:43:42.123456", "us"),\n ("20130101 20:43:42.123456789", "ns"),\n ],\n )\n def test_date_format_series(self, date, date_unit, datetime_series):\n ts = Series(Timestamp(date).as_unit("ns"), index=datetime_series.index)\n ts.iloc[1] = pd.NaT\n ts.iloc[5] = pd.NaT\n if date_unit:\n json = ts.to_json(date_format="iso", date_unit=date_unit)\n else:\n json = ts.to_json(date_format="iso")\n\n result = read_json(StringIO(json), typ="series")\n expected = ts.copy()\n tm.assert_series_equal(result, expected)\n\n def test_date_format_series_raises(self, datetime_series):\n ts = Series(Timestamp("20130101 20:43:42.123"), index=datetime_series.index)\n msg = "Invalid value 'foo' for option 'date_unit'"\n with pytest.raises(ValueError, match=msg):\n ts.to_json(date_format="iso", date_unit="foo")\n\n @pytest.mark.parametrize("unit", ["s", "ms", "us", "ns"])\n def test_date_unit(self, unit, datetime_frame):\n df = datetime_frame\n df["date"] = Timestamp("20130101 20:43:42").as_unit("ns")\n dl = df.columns.get_loc("date")\n df.iloc[1, dl] = Timestamp("19710101 20:43:42")\n df.iloc[2, dl] = Timestamp("21460101 20:43:42")\n df.iloc[4, dl] = pd.NaT\n\n json = df.to_json(date_format="epoch", date_unit=unit)\n\n # force date unit\n result = read_json(StringIO(json), date_unit=unit)\n tm.assert_frame_equal(result, df)\n\n # detect date unit\n result = read_json(StringIO(json), date_unit=None)\n tm.assert_frame_equal(result, df)\n\n @pytest.mark.parametrize("unit", ["s", "ms", "us"])\n def test_iso_non_nano_datetimes(self, unit):\n # Test that numpy datetimes\n # in an Index or a column with non-nano resolution can be serialized\n # correctly\n # GH53686\n index = DatetimeIndex(\n [np.datetime64("2023-01-01T11:22:33.123456", unit)],\n dtype=f"datetime64[{unit}]",\n )\n df = DataFrame(\n {\n "date": Series(\n [np.datetime64("2022-01-01T11:22:33.123456", unit)],\n dtype=f"datetime64[{unit}]",\n index=index,\n ),\n "date_obj": Series(\n [np.datetime64("2023-01-01T11:22:33.123456", unit)],\n dtype=object,\n index=index,\n ),\n },\n )\n\n buf = StringIO()\n df.to_json(buf, date_format="iso", date_unit=unit)\n buf.seek(0)\n\n # read_json always reads datetimes in nanosecond resolution\n # TODO: check_dtype/check_index_type should be removable\n # once read_json gets non-nano support\n tm.assert_frame_equal(\n read_json(buf, convert_dates=["date", "date_obj"]),\n df,\n check_index_type=False,\n check_dtype=False,\n )\n\n def test_weird_nested_json(self):\n # this used to core dump the parser\n s = r"""{\n "status": "success",\n "data": {\n "posts": [\n {\n "id": 1,\n "title": "A blog post",\n "body": "Some useful content"\n },\n {\n "id": 2,\n "title": "Another blog post",\n "body": "More content"\n }\n ]\n }\n }"""\n read_json(StringIO(s))\n\n def test_doc_example(self):\n dfj2 = DataFrame(\n np.random.default_rng(2).standard_normal((5, 2)), columns=list("AB")\n )\n dfj2["date"] = Timestamp("20130101")\n dfj2["ints"] = range(5)\n dfj2["bools"] = True\n dfj2.index = date_range("20130101", periods=5)\n\n json = StringIO(dfj2.to_json())\n result = read_json(json, dtype={"ints": np.int64, "bools": np.bool_})\n tm.assert_frame_equal(result, result)\n\n def test_round_trip_exception(self, datapath):\n # GH 3867\n path = datapath("io", "json", "data", "teams.csv")\n df = pd.read_csv(path)\n s = df.to_json()\n\n result = read_json(StringIO(s))\n res = result.reindex(index=df.index, columns=df.columns)\n msg = "The 'downcast' keyword in fillna is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=msg):\n res = res.fillna(np.nan, downcast=False)\n tm.assert_frame_equal(res, df)\n\n @pytest.mark.network\n @pytest.mark.single_cpu\n @pytest.mark.parametrize(\n "field,dtype",\n [\n ["created_at", pd.DatetimeTZDtype(tz="UTC")],\n ["closed_at", "datetime64[ns]"],\n ["updated_at", pd.DatetimeTZDtype(tz="UTC")],\n ],\n )\n def test_url(self, field, dtype, httpserver):\n data = '{"created_at": ["2023-06-23T18:21:36Z"], "closed_at": ["2023-06-23T18:21:36"], "updated_at": ["2023-06-23T18:21:36Z"]}\n' # noqa: E501\n httpserver.serve_content(content=data)\n result = read_json(httpserver.url, convert_dates=True)\n assert result[field].dtype == dtype\n\n def test_timedelta(self):\n converter = lambda x: pd.to_timedelta(x, unit="ms")\n\n ser = Series([timedelta(23), timedelta(seconds=5)])\n assert ser.dtype == "timedelta64[ns]"\n\n result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)\n tm.assert_series_equal(result, ser)\n\n ser = Series([timedelta(23), timedelta(seconds=5)], index=Index([0, 1]))\n assert ser.dtype == "timedelta64[ns]"\n result = read_json(StringIO(ser.to_json()), typ="series").apply(converter)\n tm.assert_series_equal(result, ser)\n\n frame = DataFrame([timedelta(23), timedelta(seconds=5)])\n assert frame[0].dtype == "timedelta64[ns]"\n tm.assert_frame_equal(\n frame, read_json(StringIO(frame.to_json())).apply(converter)\n )\n\n def test_timedelta2(self):\n frame = DataFrame(\n {\n "a": [timedelta(days=23), timedelta(seconds=5)],\n "b": [1, 2],\n "c": date_range(start="20130101", periods=2),\n }\n )\n data = StringIO(frame.to_json(date_unit="ns"))\n result = read_json(data)\n result["a"] = pd.to_timedelta(result.a, unit="ns")\n result["c"] = pd.to_datetime(result.c)\n tm.assert_frame_equal(frame, result)\n\n def test_mixed_timedelta_datetime(self):\n td = timedelta(23)\n ts = Timestamp("20130101")\n frame = DataFrame({"a": [td, ts]}, dtype=object)\n\n expected = DataFrame(\n {"a": [pd.Timedelta(td).as_unit("ns")._value, ts.as_unit("ns")._value]}\n )\n data = StringIO(frame.to_json(date_unit="ns"))\n result = read_json(data, dtype={"a": "int64"})\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n @pytest.mark.parametrize("as_object", [True, False])\n @pytest.mark.parametrize("date_format", ["iso", "epoch"])\n @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])\n def test_timedelta_to_json(self, as_object, date_format, timedelta_typ):\n # GH28156: to_json not correctly formatting Timedelta\n data = [timedelta_typ(days=1), timedelta_typ(days=2), pd.NaT]\n if as_object:\n data.append("a")\n\n ser = Series(data, index=data)\n if date_format == "iso":\n expected = (\n '{"P1DT0H0M0S":"P1DT0H0M0S","P2DT0H0M0S":"P2DT0H0M0S","null":null}'\n )\n else:\n expected = '{"86400000":86400000,"172800000":172800000,"null":null}'\n\n if as_object:\n expected = expected.replace("}", ',"a":"a"}')\n\n result = ser.to_json(date_format=date_format)\n assert result == expected\n\n @pytest.mark.parametrize("as_object", [True, False])\n @pytest.mark.parametrize("timedelta_typ", [pd.Timedelta, timedelta])\n def test_timedelta_to_json_fractional_precision(self, as_object, timedelta_typ):\n data = [timedelta_typ(milliseconds=42)]\n ser = Series(data, index=data)\n if as_object:\n ser = ser.astype(object)\n\n result = ser.to_json()\n expected = '{"42":42}'\n assert result == expected\n\n def test_default_handler(self):\n value = object()\n frame = DataFrame({"a": [7, value]})\n expected = DataFrame({"a": [7, str(value)]})\n result = read_json(StringIO(frame.to_json(default_handler=str)))\n tm.assert_frame_equal(expected, result, check_index_type=False)\n\n def test_default_handler_indirect(self):\n def default(obj):\n if isinstance(obj, complex):\n return [("mathjs", "Complex"), ("re", obj.real), ("im", obj.imag)]\n return str(obj)\n\n df_list = [\n 9,\n DataFrame(\n {"a": [1, "STR", complex(4, -5)], "b": [float("nan"), None, "N/A"]},\n columns=["a", "b"],\n ),\n ]\n expected = (\n '[9,[[1,null],["STR",null],[[["mathjs","Complex"],'\n '["re",4.0],["im",-5.0]],"N\\/A"]]]'\n )\n assert (\n ujson_dumps(df_list, default_handler=default, orient="values") == expected\n )\n\n def test_default_handler_numpy_unsupported_dtype(self):\n # GH12554 to_json raises 'Unhandled numpy dtype 15'\n df = DataFrame(\n {"a": [1, 2.3, complex(4, -5)], "b": [float("nan"), None, complex(1.2, 0)]},\n columns=["a", "b"],\n )\n expected = (\n '[["(1+0j)","(nan+0j)"],'\n '["(2.3+0j)","(nan+0j)"],'\n '["(4-5j)","(1.2+0j)"]]'\n )\n assert df.to_json(default_handler=str, orient="values") == expected\n\n def test_default_handler_raises(self):\n msg = "raisin"\n\n def my_handler_raises(obj):\n raise TypeError(msg)\n\n with pytest.raises(TypeError, match=msg):\n DataFrame({"a": [1, 2, object()]}).to_json(\n default_handler=my_handler_raises\n )\n with pytest.raises(TypeError, match=msg):\n DataFrame({"a": [1, 2, complex(4, -5)]}).to_json(\n default_handler=my_handler_raises\n )\n\n def test_categorical(self):\n # GH4377 df.to_json segfaults with non-ndarray blocks\n df = DataFrame({"A": ["a", "b", "c", "a", "b", "b", "a"]})\n df["B"] = df["A"]\n expected = df.to_json()\n\n df["B"] = df["A"].astype("category")\n assert expected == df.to_json()\n\n s = df["A"]\n sc = df["B"]\n assert s.to_json() == sc.to_json()\n\n def test_datetime_tz(self):\n # GH4377 df.to_json segfaults with non-ndarray blocks\n tz_range = date_range("20130101", periods=3, tz="US/Eastern")\n tz_naive = tz_range.tz_convert("utc").tz_localize(None)\n\n df = DataFrame({"A": tz_range, "B": date_range("20130101", periods=3)})\n\n df_naive = df.copy()\n df_naive["A"] = tz_naive\n expected = df_naive.to_json()\n assert expected == df.to_json()\n\n stz = Series(tz_range)\n s_naive = Series(tz_naive)\n assert stz.to_json() == s_naive.to_json()\n\n def test_sparse(self):\n # GH4377 df.to_json segfaults with non-ndarray blocks\n df = DataFrame(np.random.default_rng(2).standard_normal((10, 4)))\n df.loc[:8] = np.nan\n\n sdf = df.astype("Sparse")\n expected = df.to_json()\n assert expected == sdf.to_json()\n\n s = Series(np.random.default_rng(2).standard_normal(10))\n s.loc[:8] = np.nan\n ss = s.astype("Sparse")\n\n expected = s.to_json()\n assert expected == ss.to_json()\n\n @pytest.mark.parametrize(\n "ts",\n [\n Timestamp("2013-01-10 05:00:00Z"),\n Timestamp("2013-01-10 00:00:00", tz="US/Eastern"),\n Timestamp("2013-01-10 00:00:00-0500"),\n ],\n )\n def test_tz_is_utc(self, ts):\n exp = '"2013-01-10T05:00:00.000Z"'\n\n assert ujson_dumps(ts, iso_dates=True) == exp\n dt = ts.to_pydatetime()\n assert ujson_dumps(dt, iso_dates=True) == exp\n\n def test_tz_is_naive(self):\n ts = Timestamp("2013-01-10 05:00:00")\n exp = '"2013-01-10T05:00:00.000"'\n\n assert ujson_dumps(ts, iso_dates=True) == exp\n dt = ts.to_pydatetime()\n assert ujson_dumps(dt, iso_dates=True) == exp\n\n @pytest.mark.parametrize(\n "tz_range",\n [\n date_range("2013-01-01 05:00:00Z", periods=2),\n date_range("2013-01-01 00:00:00", periods=2, tz="US/Eastern"),\n date_range("2013-01-01 00:00:00-0500", periods=2),\n ],\n )\n def test_tz_range_is_utc(self, tz_range):\n exp = '["2013-01-01T05:00:00.000Z","2013-01-02T05:00:00.000Z"]'\n dfexp = (\n '{"DT":{'\n '"0":"2013-01-01T05:00:00.000Z",'\n '"1":"2013-01-02T05:00:00.000Z"}}'\n )\n\n assert ujson_dumps(tz_range, iso_dates=True) == exp\n dti = DatetimeIndex(tz_range)\n # Ensure datetimes in object array are serialized correctly\n # in addition to the normal DTI case\n assert ujson_dumps(dti, iso_dates=True) == exp\n assert ujson_dumps(dti.astype(object), iso_dates=True) == exp\n df = DataFrame({"DT": dti})\n result = ujson_dumps(df, iso_dates=True)\n assert result == dfexp\n assert ujson_dumps(df.astype({"DT": object}), iso_dates=True)\n\n def test_tz_range_is_naive(self):\n dti = date_range("2013-01-01 05:00:00", periods=2)\n\n exp = '["2013-01-01T05:00:00.000","2013-01-02T05:00:00.000"]'\n dfexp = '{"DT":{"0":"2013-01-01T05:00:00.000","1":"2013-01-02T05:00:00.000"}}'\n\n # Ensure datetimes in object array are serialized correctly\n # in addition to the normal DTI case\n assert ujson_dumps(dti, iso_dates=True) == exp\n assert ujson_dumps(dti.astype(object), iso_dates=True) == exp\n df = DataFrame({"DT": dti})\n result = ujson_dumps(df, iso_dates=True)\n assert result == dfexp\n assert ujson_dumps(df.astype({"DT": object}), iso_dates=True)\n\n def test_read_inline_jsonl(self):\n # GH9180\n\n result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True)\n expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.single_cpu\n @td.skip_if_not_us_locale\n def test_read_s3_jsonl(self, s3_public_bucket_with_data, s3so):\n # GH17200\n\n result = read_json(\n f"s3n://{s3_public_bucket_with_data.name}/items.jsonl",\n lines=True,\n storage_options=s3so,\n )\n expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n def test_read_local_jsonl(self):\n # GH17200\n with tm.ensure_clean("tmp_items.json") as path:\n with open(path, "w", encoding="utf-8") as infile:\n infile.write('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n')\n result = read_json(path, lines=True)\n expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n def test_read_jsonl_unicode_chars(self):\n # GH15132: non-ascii unicode characters\n # \u201d == RIGHT DOUBLE QUOTATION MARK\n\n # simulate file handle\n json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'\n json = StringIO(json)\n result = read_json(json, lines=True)\n expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n # simulate string\n json = StringIO('{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n')\n result = read_json(json, lines=True)\n expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("bigNum", [sys.maxsize + 1, -(sys.maxsize + 2)])\n def test_to_json_large_numbers(self, bigNum):\n # GH34473\n series = Series(bigNum, dtype=object, index=["articleId"])\n json = series.to_json()\n expected = '{"articleId":' + str(bigNum) + "}"\n assert json == expected\n\n df = DataFrame(bigNum, dtype=object, index=["articleId"], columns=[0])\n json = df.to_json()\n expected = '{"0":{"articleId":' + str(bigNum) + "}}"\n assert json == expected\n\n @pytest.mark.parametrize("bigNum", [-(2**63) - 1, 2**64])\n def test_read_json_large_numbers(self, bigNum):\n # GH20599, 26068\n json = StringIO('{"articleId":' + str(bigNum) + "}")\n msg = r"Value is too small|Value is too big"\n with pytest.raises(ValueError, match=msg):\n read_json(json)\n\n json = StringIO('{"0":{"articleId":' + str(bigNum) + "}}")\n with pytest.raises(ValueError, match=msg):\n read_json(json)\n\n def test_read_json_large_numbers2(self):\n # GH18842\n json = '{"articleId": "1404366058080022500245"}'\n json = StringIO(json)\n result = read_json(json, typ="series")\n expected = Series(1.404366e21, index=["articleId"])\n tm.assert_series_equal(result, expected)\n\n json = '{"0": {"articleId": "1404366058080022500245"}}'\n json = StringIO(json)\n result = read_json(json)\n expected = DataFrame(1.404366e21, index=["articleId"], columns=[0])\n tm.assert_frame_equal(result, expected)\n\n def test_to_jsonl(self):\n # GH9180\n df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])\n result = df.to_json(orient="records", lines=True)\n expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n'\n assert result == expected\n\n df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])\n result = df.to_json(orient="records", lines=True)\n expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'\n assert result == expected\n tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)\n\n # GH15096: escaped characters in columns and data\n df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])\n result = df.to_json(orient="records", lines=True)\n expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'\n assert result == expected\n\n tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)\n\n # TODO: there is a near-identical test for pytables; can we share?\n @pytest.mark.xfail(reason="GH#13774 encoding kwarg not supported", raises=TypeError)\n @pytest.mark.parametrize(\n "val",\n [\n [b"E\xc9, 17", b"", b"a", b"b", b"c"],\n [b"E\xc9, 17", b"a", b"b", b"c"],\n [b"EE, 17", b"", b"a", b"b", b"c"],\n [b"E\xc9, 17", b"\xf8\xfc", b"a", b"b", b"c"],\n [b"", b"a", b"b", b"c"],\n [b"\xf8\xfc", b"a", b"b", b"c"],\n [b"A\xf8\xfc", b"", b"a", b"b", b"c"],\n [np.nan, b"", b"b", b"c"],\n [b"A\xf8\xfc", np.nan, b"", b"b", b"c"],\n ],\n )\n @pytest.mark.parametrize("dtype", ["category", object])\n def test_latin_encoding(self, dtype, val):\n # GH 13774\n ser = Series(\n [x.decode("latin-1") if isinstance(x, bytes) else x for x in val],\n dtype=dtype,\n )\n encoding = "latin-1"\n with tm.ensure_clean("test.json") as path:\n ser.to_json(path, encoding=encoding)\n retr = read_json(StringIO(path), encoding=encoding)\n tm.assert_series_equal(ser, retr, check_categorical=False)\n\n def test_data_frame_size_after_to_json(self):\n # GH15344\n df = DataFrame({"a": [str(1)]})\n\n size_before = df.memory_usage(index=True, deep=True).sum()\n df.to_json()\n size_after = df.memory_usage(index=True, deep=True).sum()\n\n assert size_before == size_after\n\n @pytest.mark.parametrize(\n "index", [None, [1, 2], [1.0, 2.0], ["a", "b"], ["1", "2"], ["1.", "2."]]\n )\n @pytest.mark.parametrize("columns", [["a", "b"], ["1", "2"], ["1.", "2."]])\n def test_from_json_to_json_table_index_and_columns(self, index, columns):\n # GH25433 GH25435\n expected = DataFrame([[1, 2], [3, 4]], index=index, columns=columns)\n dfjson = expected.to_json(orient="table")\n\n result = read_json(StringIO(dfjson), orient="table")\n tm.assert_frame_equal(result, expected)\n\n def test_from_json_to_json_table_dtypes(self):\n # GH21345\n expected = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})\n dfjson = expected.to_json(orient="table")\n result = read_json(StringIO(dfjson), orient="table")\n tm.assert_frame_equal(result, expected)\n\n # TODO: We are casting to string which coerces None to NaN before casting back\n # to object, ending up with incorrect na values\n @pytest.mark.xfail(using_string_dtype(), reason="incorrect na conversion")\n @pytest.mark.parametrize("orient", ["split", "records", "index", "columns"])\n def test_to_json_from_json_columns_dtypes(self, orient):\n # GH21892 GH33205\n expected = DataFrame.from_dict(\n {\n "Integer": Series([1, 2, 3], dtype="int64"),\n "Float": Series([None, 2.0, 3.0], dtype="float64"),\n "Object": Series([None, "", "c"], dtype="object"),\n "Bool": Series([True, False, True], dtype="bool"),\n "Category": Series(["a", "b", None], dtype="category"),\n "Datetime": Series(\n ["2020-01-01", None, "2020-01-03"], dtype="datetime64[ns]"\n ),\n }\n )\n dfjson = expected.to_json(orient=orient)\n\n result = read_json(\n StringIO(dfjson),\n orient=orient,\n dtype={\n "Integer": "int64",\n "Float": "float64",\n "Object": "object",\n "Bool": "bool",\n "Category": "category",\n "Datetime": "datetime64[ns]",\n },\n )\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize("dtype", [True, {"b": int, "c": int}])\n def test_read_json_table_dtype_raises(self, dtype):\n # GH21345\n df = DataFrame({"a": [1, 2], "b": [3.0, 4.0], "c": ["5", "6"]})\n dfjson = df.to_json(orient="table")\n msg = "cannot pass both dtype and orient='table'"\n with pytest.raises(ValueError, match=msg):\n read_json(dfjson, orient="table", dtype=dtype)\n\n @pytest.mark.parametrize("orient", ["index", "columns", "records", "values"])\n def test_read_json_table_empty_axes_dtype(self, orient):\n # GH28558\n\n expected = DataFrame()\n result = read_json(StringIO("{}"), orient=orient, convert_axes=True)\n tm.assert_index_equal(result.index, expected.index)\n tm.assert_index_equal(result.columns, expected.columns)\n\n def test_read_json_table_convert_axes_raises(self):\n # GH25433 GH25435\n df = DataFrame([[1, 2], [3, 4]], index=[1.0, 2.0], columns=["1.", "2."])\n dfjson = df.to_json(orient="table")\n msg = "cannot pass both convert_axes and orient='table'"\n with pytest.raises(ValueError, match=msg):\n read_json(dfjson, orient="table", convert_axes=True)\n\n @pytest.mark.parametrize(\n "data, expected",\n [\n (\n DataFrame([[1, 2], [4, 5]], columns=["a", "b"]),\n {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},\n ),\n (\n DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo"),\n {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},\n ),\n (\n DataFrame(\n [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]\n ),\n {"columns": ["a", "b"], "data": [[1, 2], [4, 5]]},\n ),\n (Series([1, 2, 3], name="A"), {"name": "A", "data": [1, 2, 3]}),\n (\n Series([1, 2, 3], name="A").rename_axis("foo"),\n {"name": "A", "data": [1, 2, 3]},\n ),\n (\n Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]]),\n {"name": "A", "data": [1, 2]},\n ),\n ],\n )\n def test_index_false_to_json_split(self, data, expected):\n # GH 17394\n # Testing index=False in to_json with orient='split'\n\n result = data.to_json(orient="split", index=False)\n result = json.loads(result)\n\n assert result == expected\n\n @pytest.mark.parametrize(\n "data",\n [\n (DataFrame([[1, 2], [4, 5]], columns=["a", "b"])),\n (DataFrame([[1, 2], [4, 5]], columns=["a", "b"]).rename_axis("foo")),\n (\n DataFrame(\n [[1, 2], [4, 5]], columns=["a", "b"], index=[["a", "b"], ["c", "d"]]\n )\n ),\n (Series([1, 2, 3], name="A")),\n (Series([1, 2, 3], name="A").rename_axis("foo")),\n (Series([1, 2], name="A", index=[["a", "b"], ["c", "d"]])),\n ],\n )\n def test_index_false_to_json_table(self, data):\n # GH 17394\n # Testing index=False in to_json with orient='table'\n\n result = data.to_json(orient="table", index=False)\n result = json.loads(result)\n\n expected = {\n "schema": pd.io.json.build_table_schema(data, index=False),\n "data": DataFrame(data).to_dict(orient="records"),\n }\n\n assert result == expected\n\n @pytest.mark.parametrize("orient", ["index", "columns"])\n def test_index_false_error_to_json(self, orient):\n # GH 17394, 25513\n # Testing error message from to_json with index=False\n\n df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"])\n\n msg = (\n "'index=False' is only valid when 'orient' is 'split', "\n "'table', 'records', or 'values'"\n )\n with pytest.raises(ValueError, match=msg):\n df.to_json(orient=orient, index=False)\n\n @pytest.mark.parametrize("orient", ["records", "values"])\n def test_index_true_error_to_json(self, orient):\n # GH 25513\n # Testing error message from to_json with index=True\n\n df = DataFrame([[1, 2], [4, 5]], columns=["a", "b"])\n\n msg = (\n "'index=True' is only valid when 'orient' is 'split', "\n "'table', 'index', or 'columns'"\n )\n with pytest.raises(ValueError, match=msg):\n df.to_json(orient=orient, index=True)\n\n @pytest.mark.parametrize("orient", ["split", "table"])\n @pytest.mark.parametrize("index", [True, False])\n def test_index_false_from_json_to_json(self, orient, index):\n # GH25170\n # Test index=False in from_json to_json\n expected = DataFrame({"a": [1, 2], "b": [3, 4]})\n dfjson = expected.to_json(orient=orient, index=index)\n result = read_json(StringIO(dfjson), orient=orient)\n tm.assert_frame_equal(result, expected)\n\n def test_read_timezone_information(self):\n # GH 25546\n result = read_json(\n StringIO('{"2019-01-01T11:00:00.000Z":88}'), typ="series", orient="index"\n )\n exp_dti = DatetimeIndex(["2019-01-01 11:00:00"], dtype="M8[ns, UTC]")\n expected = Series([88], index=exp_dti)\n tm.assert_series_equal(result, expected)\n\n @pytest.mark.parametrize(\n "url",\n [\n "s3://example-fsspec/",\n "gcs://another-fsspec/file.json",\n "https://example-site.com/data",\n "some-protocol://data.txt",\n ],\n )\n def test_read_json_with_url_value(self, url):\n # GH 36271\n result = read_json(StringIO(f'{{"url":{{"0":"{url}"}}}}'))\n expected = DataFrame({"url": [url]})\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "compression",\n ["", ".gz", ".bz2", ".tar"],\n )\n def test_read_json_with_very_long_file_path(self, compression):\n # GH 46718\n long_json_path = f'{"a" * 1000}.json{compression}'\n with pytest.raises(\n FileNotFoundError, match=f"File {long_json_path} does not exist"\n ):\n # path too long for Windows is handled in file_exists() but raises in\n # _get_data_from_filepath()\n read_json(long_json_path)\n\n @pytest.mark.parametrize(\n "date_format,key", [("epoch", 86400000), ("iso", "P1DT0H0M0S")]\n )\n def test_timedelta_as_label(self, date_format, key):\n df = DataFrame([[1]], columns=[pd.Timedelta("1D")])\n expected = f'{{"{key}":{{"0":1}}}}'\n result = df.to_json(date_format=date_format)\n\n assert result == expected\n\n @pytest.mark.parametrize(\n "orient,expected",\n [\n ("index", "{\"('a', 'b')\":{\"('c', 'd')\":1}}"),\n ("columns", "{\"('c', 'd')\":{\"('a', 'b')\":1}}"),\n # TODO: the below have separate encoding procedures\n pytest.param(\n "split",\n "",\n marks=pytest.mark.xfail(\n reason="Produces JSON but not in a consistent manner"\n ),\n ),\n pytest.param(\n "table",\n "",\n marks=pytest.mark.xfail(\n reason="Produces JSON but not in a consistent manner"\n ),\n ),\n ],\n )\n def test_tuple_labels(self, orient, expected):\n # GH 20500\n df = DataFrame([[1]], index=[("a", "b")], columns=[("c", "d")])\n result = df.to_json(orient=orient)\n assert result == expected\n\n @pytest.mark.parametrize("indent", [1, 2, 4])\n def test_to_json_indent(self, indent):\n # GH 12004\n df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])\n\n result = df.to_json(indent=indent)\n spaces = " " * indent\n expected = f"""{{\n{spaces}"a":{{\n{spaces}{spaces}"0":"foo",\n{spaces}{spaces}"1":"baz"\n{spaces}}},\n{spaces}"b":{{\n{spaces}{spaces}"0":"bar",\n{spaces}{spaces}"1":"qux"\n{spaces}}}\n}}"""\n\n assert result == expected\n\n @pytest.mark.skipif(\n using_string_dtype(),\n reason="Adjust expected when infer_string is default, no bug here, "\n "just a complicated parametrization",\n )\n @pytest.mark.parametrize(\n "orient,expected",\n [\n (\n "split",\n """{\n "columns":[\n "a",\n "b"\n ],\n "index":[\n 0,\n 1\n ],\n "data":[\n [\n "foo",\n "bar"\n ],\n [\n "baz",\n "qux"\n ]\n ]\n}""",\n ),\n (\n "records",\n """[\n {\n "a":"foo",\n "b":"bar"\n },\n {\n "a":"baz",\n "b":"qux"\n }\n]""",\n ),\n (\n "index",\n """{\n "0":{\n "a":"foo",\n "b":"bar"\n },\n "1":{\n "a":"baz",\n "b":"qux"\n }\n}""",\n ),\n (\n "columns",\n """{\n "a":{\n "0":"foo",\n "1":"baz"\n },\n "b":{\n "0":"bar",\n "1":"qux"\n }\n}""",\n ),\n (\n "values",\n """[\n [\n "foo",\n "bar"\n ],\n [\n "baz",\n "qux"\n ]\n]""",\n ),\n (\n "table",\n """{\n "schema":{\n "fields":[\n {\n "name":"index",\n "type":"integer"\n },\n {\n "name":"a",\n "type":"string"\n },\n {\n "name":"b",\n "type":"string"\n }\n ],\n "primaryKey":[\n "index"\n ],\n "pandas_version":"1.4.0"\n },\n "data":[\n {\n "index":0,\n "a":"foo",\n "b":"bar"\n },\n {\n "index":1,\n "a":"baz",\n "b":"qux"\n }\n ]\n}""",\n ),\n ],\n )\n def test_json_indent_all_orients(self, orient, expected):\n # GH 12004\n df = DataFrame([["foo", "bar"], ["baz", "qux"]], columns=["a", "b"])\n result = df.to_json(orient=orient, indent=4)\n assert result == expected\n\n def test_json_negative_indent_raises(self):\n with pytest.raises(ValueError, match="must be a nonnegative integer"):\n DataFrame().to_json(indent=-1)\n\n def test_emca_262_nan_inf_support(self):\n # GH 12213\n data = StringIO(\n '["a", NaN, "NaN", Infinity, "Infinity", -Infinity, "-Infinity"]'\n )\n result = read_json(data)\n expected = DataFrame(\n ["a", None, "NaN", np.inf, "Infinity", -np.inf, "-Infinity"]\n )\n tm.assert_frame_equal(result, expected)\n\n def test_frame_int_overflow(self):\n # GH 30320\n encoded_json = json.dumps([{"col": "31900441201190696999"}, {"col": "Text"}])\n expected = DataFrame({"col": ["31900441201190696999", "Text"]})\n result = read_json(StringIO(encoded_json))\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.parametrize(\n "dataframe,expected",\n [\n (\n DataFrame({"x": [1, 2, 3], "y": ["a", "b", "c"]}),\n '{"(0, \'x\')":1,"(0, \'y\')":"a","(1, \'x\')":2,'\n '"(1, \'y\')":"b","(2, \'x\')":3,"(2, \'y\')":"c"}',\n )\n ],\n )\n def test_json_multiindex(self, dataframe, expected):\n series = dataframe.stack(future_stack=True)\n result = series.to_json(orient="index")\n assert result == expected\n\n @pytest.mark.single_cpu\n def test_to_s3(self, s3_public_bucket, s3so):\n # GH 28375\n mock_bucket_name, target_file = s3_public_bucket.name, "test.json"\n df = DataFrame({"x": [1, 2, 3], "y": [2, 4, 6]})\n df.to_json(f"s3://{mock_bucket_name}/{target_file}", storage_options=s3so)\n timeout = 5\n while True:\n if target_file in (obj.key for obj in s3_public_bucket.objects.all()):\n break\n time.sleep(0.1)\n timeout -= 0.1\n assert timeout > 0, "Timed out waiting for file to appear on moto"\n\n def test_json_pandas_nulls(self, nulls_fixture, request):\n # GH 31615\n if isinstance(nulls_fixture, Decimal):\n mark = pytest.mark.xfail(reason="not implemented")\n request.applymarker(mark)\n\n result = DataFrame([[nulls_fixture]]).to_json()\n assert result == '{"0":{"0":null}}'\n\n def test_readjson_bool_series(self):\n # GH31464\n result = read_json(StringIO("[true, true, false]"), typ="series")\n expected = Series([True, True, False])\n tm.assert_series_equal(result, expected)\n\n def test_to_json_multiindex_escape(self):\n # GH 15273\n df = DataFrame(\n True,\n index=date_range("2017-01-20", "2017-01-23"),\n columns=["foo", "bar"],\n ).stack(future_stack=True)\n result = df.to_json()\n expected = (\n "{\"(Timestamp('2017-01-20 00:00:00'), 'foo')\":true,"\n "\"(Timestamp('2017-01-20 00:00:00'), 'bar')\":true,"\n "\"(Timestamp('2017-01-21 00:00:00'), 'foo')\":true,"\n "\"(Timestamp('2017-01-21 00:00:00'), 'bar')\":true,"\n "\"(Timestamp('2017-01-22 00:00:00'), 'foo')\":true,"\n "\"(Timestamp('2017-01-22 00:00:00'), 'bar')\":true,"\n "\"(Timestamp('2017-01-23 00:00:00'), 'foo')\":true,"\n "\"(Timestamp('2017-01-23 00:00:00'), 'bar')\":true}"\n )\n assert result == expected\n\n def test_to_json_series_of_objects(self):\n class _TestObject:\n def __init__(self, a, b, _c, d) -> None:\n self.a = a\n self.b = b\n self._c = _c\n self.d = d\n\n def e(self):\n return 5\n\n # JSON keys should be all non-callable non-underscore attributes, see GH-42768\n series = Series([_TestObject(a=1, b=2, _c=3, d=4)])\n assert json.loads(series.to_json()) == {"0": {"a": 1, "b": 2, "d": 4}}\n\n @pytest.mark.parametrize(\n "data,expected",\n [\n (\n Series({0: -6 + 8j, 1: 0 + 1j, 2: 9 - 5j}),\n '{"0":{"imag":8.0,"real":-6.0},'\n '"1":{"imag":1.0,"real":0.0},'\n '"2":{"imag":-5.0,"real":9.0}}',\n ),\n (\n Series({0: -9.39 + 0.66j, 1: 3.95 + 9.32j, 2: 4.03 - 0.17j}),\n '{"0":{"imag":0.66,"real":-9.39},'\n '"1":{"imag":9.32,"real":3.95},'\n '"2":{"imag":-0.17,"real":4.03}}',\n ),\n (\n DataFrame([[-2 + 3j, -1 - 0j], [4 - 3j, -0 - 10j]]),\n '{"0":{"0":{"imag":3.0,"real":-2.0},'\n '"1":{"imag":-3.0,"real":4.0}},'\n '"1":{"0":{"imag":0.0,"real":-1.0},'\n '"1":{"imag":-10.0,"real":0.0}}}',\n ),\n (\n DataFrame(\n [[-0.28 + 0.34j, -1.08 - 0.39j], [0.41 - 0.34j, -0.78 - 1.35j]]\n ),\n '{"0":{"0":{"imag":0.34,"real":-0.28},'\n '"1":{"imag":-0.34,"real":0.41}},'\n '"1":{"0":{"imag":-0.39,"real":-1.08},'\n '"1":{"imag":-1.35,"real":-0.78}}}',\n ),\n ],\n )\n def test_complex_data_tojson(self, data, expected):\n # GH41174\n result = data.to_json()\n assert result == expected\n\n def test_json_uint64(self):\n # GH21073\n expected = (\n '{"columns":["col1"],"index":[0,1],'\n '"data":[[13342205958987758245],[12388075603347835679]]}'\n )\n df = DataFrame(data={"col1": [13342205958987758245, 12388075603347835679]})\n result = df.to_json(orient="split")\n assert result == expected\n\n @pytest.mark.xfail(using_string_dtype(), reason="TODO(infer_string)", strict=False)\n def test_read_json_dtype_backend(\n self, string_storage, dtype_backend, orient, using_infer_string\n ):\n # GH#50750\n df = DataFrame(\n {\n "a": Series([1, np.nan, 3], dtype="Int64"),\n "b": Series([1, 2, 3], dtype="Int64"),\n "c": Series([1.5, np.nan, 2.5], dtype="Float64"),\n "d": Series([1.5, 2.0, 2.5], dtype="Float64"),\n "e": [True, False, None],\n "f": [True, False, True],\n "g": ["a", "b", "c"],\n "h": ["a", "b", None],\n }\n )\n\n out = df.to_json(orient=orient)\n with pd.option_context("mode.string_storage", string_storage):\n result = read_json(\n StringIO(out), dtype_backend=dtype_backend, orient=orient\n )\n\n if dtype_backend == "pyarrow":\n pa = pytest.importorskip("pyarrow")\n string_dtype = pd.ArrowDtype(pa.string())\n else:\n string_dtype = pd.StringDtype(string_storage)\n\n expected = DataFrame(\n {\n "a": Series([1, np.nan, 3], dtype="Int64"),\n "b": Series([1, 2, 3], dtype="Int64"),\n "c": Series([1.5, np.nan, 2.5], dtype="Float64"),\n "d": Series([1.5, 2.0, 2.5], dtype="Float64"),\n "e": Series([True, False, NA], dtype="boolean"),\n "f": Series([True, False, True], dtype="boolean"),\n "g": Series(["a", "b", "c"], dtype=string_dtype),\n "h": Series(["a", "b", None], dtype=string_dtype),\n }\n )\n\n if dtype_backend == "pyarrow":\n pa = pytest.importorskip("pyarrow")\n from pandas.arrays import ArrowExtensionArray\n\n expected = DataFrame(\n {\n col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))\n for col in expected.columns\n }\n )\n\n if orient == "values":\n expected.columns = list(range(8))\n\n # the storage of the str columns' Index is also affected by the\n # string_storage setting -> ignore that for checking the result\n tm.assert_frame_equal(result, expected, check_column_type=False)\n\n @pytest.mark.parametrize("orient", ["split", "records", "index"])\n def test_read_json_nullable_series(self, string_storage, dtype_backend, orient):\n # GH#50750\n pa = pytest.importorskip("pyarrow")\n ser = Series([1, np.nan, 3], dtype="Int64")\n\n out = ser.to_json(orient=orient)\n with pd.option_context("mode.string_storage", string_storage):\n result = read_json(\n StringIO(out), dtype_backend=dtype_backend, orient=orient, typ="series"\n )\n\n expected = Series([1, np.nan, 3], dtype="Int64")\n\n if dtype_backend == "pyarrow":\n from pandas.arrays import ArrowExtensionArray\n\n expected = Series(ArrowExtensionArray(pa.array(expected, from_pandas=True)))\n\n tm.assert_series_equal(result, expected)\n\n def test_invalid_dtype_backend(self):\n msg = (\n "dtype_backend numpy is invalid, only 'numpy_nullable' and "\n "'pyarrow' are allowed."\n )\n with pytest.raises(ValueError, match=msg):\n read_json("test", dtype_backend="numpy")\n\n\ndef test_invalid_engine():\n # GH 48893\n ser = Series(range(1))\n out = ser.to_json()\n with pytest.raises(ValueError, match="The engine type foo"):\n read_json(out, engine="foo")\n\n\ndef test_pyarrow_engine_lines_false():\n # GH 48893\n ser = Series(range(1))\n out = ser.to_json()\n with pytest.raises(ValueError, match="currently pyarrow engine only supports"):\n read_json(out, engine="pyarrow", lines=False)\n\n\ndef test_json_roundtrip_string_inference(orient):\n df = DataFrame(\n [["a", "b"], ["c", "d"]], index=["row 1", "row 2"], columns=["col 1", "col 2"]\n )\n out = df.to_json()\n with pd.option_context("future.infer_string", True):\n result = read_json(StringIO(out))\n dtype = pd.StringDtype(na_value=np.nan)\n expected = DataFrame(\n [["a", "b"], ["c", "d"]],\n dtype=dtype,\n index=Index(["row 1", "row 2"], dtype=dtype),\n columns=Index(["col 1", "col 2"], dtype=dtype),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_json_pos_args_deprecation():\n # GH-54229\n df = DataFrame({"a": [1, 2, 3]})\n msg = (\n r"Starting with pandas version 3.0 all arguments of to_json except for the "\n r"argument 'path_or_buf' will be keyword-only."\n )\n with tm.assert_produces_warning(FutureWarning, match=msg):\n buf = BytesIO()\n df.to_json(buf, "split")\n\n\n@td.skip_if_no("pyarrow")\ndef test_to_json_ea_null():\n # GH#57224\n df = DataFrame(\n {\n "a": Series([1, NA], dtype="int64[pyarrow]"),\n "b": Series([2, NA], dtype="Int64"),\n }\n )\n result = df.to_json(orient="records", lines=True)\n expected = """{"a":1,"b":2}\n{"a":null,"b":null}\n"""\n assert result == expected\n\n\ndef test_read_json_lines_rangeindex():\n # GH 57429\n data = """\n{"a": 1, "b": 2}\n{"a": 3, "b": 4}\n"""\n result = read_json(StringIO(data), lines=True).index\n expected = RangeIndex(2)\n tm.assert_index_equal(result, expected, exact=True)\n | .venv\Lib\site-packages\pandas\tests\io\json\test_pandas.py | test_pandas.py | Python | 77,668 | 0.75 | 0.093693 | 0.064312 | node-utils | 637 | 2025-02-16T23:34:44.620805 | MIT | true | fe98fc5aa9cf3f39d10cd6f0fe81ff94 |
from collections.abc import Iterator\nfrom io import StringIO\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n read_json,\n)\nimport pandas._testing as tm\n\nfrom pandas.io.json._json import JsonReader\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\n\n@pytest.fixture\ndef lines_json_df():\n df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n return df.to_json(lines=True, orient="records")\n\n\n@pytest.fixture(params=["ujson", "pyarrow"])\ndef engine(request):\n if request.param == "pyarrow":\n pytest.importorskip("pyarrow.json")\n return request.param\n\n\ndef test_read_jsonl():\n # GH9180\n result = read_json(StringIO('{"a": 1, "b": 2}\n{"b":2, "a" :1}\n'), lines=True)\n expected = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_jsonl_engine_pyarrow(datapath, engine):\n result = read_json(\n datapath("io", "json", "data", "line_delimited.json"),\n lines=True,\n engine=engine,\n )\n expected = DataFrame({"a": [1, 3, 5], "b": [2, 4, 6]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_datetime(request, engine):\n # GH33787\n if engine == "pyarrow":\n # GH 48893\n reason = "Pyarrow only supports a file path as an input and line delimited json"\n request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))\n\n df = DataFrame(\n [([1, 2], ["2020-03-05", "2020-04-08T09:58:49+00:00"], "hector")],\n columns=["accounts", "date", "name"],\n )\n json_line = df.to_json(lines=True, orient="records")\n\n if engine == "pyarrow":\n result = read_json(StringIO(json_line), engine=engine)\n else:\n result = read_json(StringIO(json_line), engine=engine)\n expected = DataFrame(\n [[1, "2020-03-05", "hector"], [2, "2020-04-08T09:58:49+00:00", "hector"]],\n columns=["accounts", "date", "name"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_jsonl_unicode_chars():\n # GH15132: non-ascii unicode characters\n # \u201d == RIGHT DOUBLE QUOTATION MARK\n\n # simulate file handle\n json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'\n json = StringIO(json)\n result = read_json(json, lines=True)\n expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n # simulate string\n json = '{"a": "foo”", "b": "bar"}\n{"a": "foo", "b": "bar"}\n'\n result = read_json(StringIO(json), lines=True)\n expected = DataFrame([["foo\u201d", "bar"], ["foo", "bar"]], columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_to_jsonl():\n # GH9180\n df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])\n result = df.to_json(orient="records", lines=True)\n expected = '{"a":1,"b":2}\n{"a":1,"b":2}\n'\n assert result == expected\n\n df = DataFrame([["foo}", "bar"], ['foo"', "bar"]], columns=["a", "b"])\n result = df.to_json(orient="records", lines=True)\n expected = '{"a":"foo}","b":"bar"}\n{"a":"foo\\"","b":"bar"}\n'\n assert result == expected\n tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)\n\n # GH15096: escaped characters in columns and data\n df = DataFrame([["foo\\", "bar"], ['foo"', "bar"]], columns=["a\\", "b"])\n result = df.to_json(orient="records", lines=True)\n expected = '{"a\\\\":"foo\\\\","b":"bar"}\n{"a\\\\":"foo\\"","b":"bar"}\n'\n assert result == expected\n tm.assert_frame_equal(read_json(StringIO(result), lines=True), df)\n\n\ndef test_to_jsonl_count_new_lines():\n # GH36888\n df = DataFrame([[1, 2], [1, 2]], columns=["a", "b"])\n actual_new_lines_count = df.to_json(orient="records", lines=True).count("\n")\n expected_new_lines_count = 2\n assert actual_new_lines_count == expected_new_lines_count\n\n\n@pytest.mark.parametrize("chunksize", [1, 1.0])\ndef test_readjson_chunks(request, lines_json_df, chunksize, engine):\n # Basic test that read_json(chunks=True) gives the same result as\n # read_json(chunks=False)\n # GH17048: memory usage when lines=True\n\n if engine == "pyarrow":\n # GH 48893\n reason = (\n "Pyarrow only supports a file path as an input and line delimited json"\n "and doesn't support chunksize parameter."\n )\n request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))\n\n unchunked = read_json(StringIO(lines_json_df), lines=True)\n with read_json(\n StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine\n ) as reader:\n chunked = pd.concat(reader)\n\n tm.assert_frame_equal(chunked, unchunked)\n\n\ndef test_readjson_chunksize_requires_lines(lines_json_df, engine):\n msg = "chunksize can only be passed if lines=True"\n with pytest.raises(ValueError, match=msg):\n with read_json(\n StringIO(lines_json_df), lines=False, chunksize=2, engine=engine\n ) as _:\n pass\n\n\ndef test_readjson_chunks_series(request, engine):\n if engine == "pyarrow":\n # GH 48893\n reason = (\n "Pyarrow only supports a file path as an input and line delimited json"\n "and doesn't support chunksize parameter."\n )\n request.applymarker(pytest.mark.xfail(reason=reason))\n\n # Test reading line-format JSON to Series with chunksize param\n s = pd.Series({"A": 1, "B": 2})\n\n strio = StringIO(s.to_json(lines=True, orient="records"))\n unchunked = read_json(strio, lines=True, typ="Series", engine=engine)\n\n strio = StringIO(s.to_json(lines=True, orient="records"))\n with read_json(\n strio, lines=True, typ="Series", chunksize=1, engine=engine\n ) as reader:\n chunked = pd.concat(reader)\n\n tm.assert_series_equal(chunked, unchunked)\n\n\ndef test_readjson_each_chunk(request, lines_json_df, engine):\n if engine == "pyarrow":\n # GH 48893\n reason = (\n "Pyarrow only supports a file path as an input and line delimited json"\n "and doesn't support chunksize parameter."\n )\n request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))\n\n # Other tests check that the final result of read_json(chunksize=True)\n # is correct. This checks the intermediate chunks.\n with read_json(\n StringIO(lines_json_df), lines=True, chunksize=2, engine=engine\n ) as reader:\n chunks = list(reader)\n assert chunks[0].shape == (2, 2)\n assert chunks[1].shape == (1, 2)\n\n\ndef test_readjson_chunks_from_file(request, engine):\n if engine == "pyarrow":\n # GH 48893\n reason = (\n "Pyarrow only supports a file path as an input and line delimited json"\n "and doesn't support chunksize parameter."\n )\n request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))\n\n with tm.ensure_clean("test.json") as path:\n df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n df.to_json(path, lines=True, orient="records")\n with read_json(path, lines=True, chunksize=1, engine=engine) as reader:\n chunked = pd.concat(reader)\n unchunked = read_json(path, lines=True, engine=engine)\n tm.assert_frame_equal(unchunked, chunked)\n\n\n@pytest.mark.parametrize("chunksize", [None, 1])\ndef test_readjson_chunks_closes(chunksize):\n with tm.ensure_clean("test.json") as path:\n df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n df.to_json(path, lines=True, orient="records")\n reader = JsonReader(\n path,\n orient=None,\n typ="frame",\n dtype=True,\n convert_axes=True,\n convert_dates=True,\n keep_default_dates=True,\n precise_float=False,\n date_unit=None,\n encoding=None,\n lines=True,\n chunksize=chunksize,\n compression=None,\n nrows=None,\n )\n with reader:\n reader.read()\n assert (\n reader.handles.handle.closed\n ), f"didn't close stream with chunksize = {chunksize}"\n\n\n@pytest.mark.parametrize("chunksize", [0, -1, 2.2, "foo"])\ndef test_readjson_invalid_chunksize(lines_json_df, chunksize, engine):\n msg = r"'chunksize' must be an integer >=1"\n\n with pytest.raises(ValueError, match=msg):\n with read_json(\n StringIO(lines_json_df), lines=True, chunksize=chunksize, engine=engine\n ) as _:\n pass\n\n\n@pytest.mark.parametrize("chunksize", [None, 1, 2])\ndef test_readjson_chunks_multiple_empty_lines(chunksize):\n j = """\n\n {"A":1,"B":4}\n\n\n\n {"A":2,"B":5}\n\n\n\n\n\n\n\n {"A":3,"B":6}\n """\n orig = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]})\n test = read_json(StringIO(j), lines=True, chunksize=chunksize)\n if chunksize is not None:\n with test:\n test = pd.concat(test)\n tm.assert_frame_equal(orig, test, obj=f"chunksize: {chunksize}")\n\n\ndef test_readjson_unicode(request, monkeypatch, engine):\n if engine == "pyarrow":\n # GH 48893\n reason = (\n "Pyarrow only supports a file path as an input and line delimited json"\n "and doesn't support chunksize parameter."\n )\n request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))\n\n with tm.ensure_clean("test.json") as path:\n monkeypatch.setattr("locale.getpreferredencoding", lambda do_setlocale: "cp949")\n with open(path, "w", encoding="utf-8") as f:\n f.write('{"£©µÀÆÖÞßéöÿ":["АБВГДабвгд가"]}')\n\n result = read_json(path, engine=engine)\n expected = DataFrame({"£©µÀÆÖÞßéöÿ": ["АБВГДабвгд가"]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("nrows", [1, 2])\ndef test_readjson_nrows(nrows, engine):\n # GH 33916\n # Test reading line-format JSON to Series with nrows param\n jsonl = """{"a": 1, "b": 2}\n {"a": 3, "b": 4}\n {"a": 5, "b": 6}\n {"a": 7, "b": 8}"""\n result = read_json(StringIO(jsonl), lines=True, nrows=nrows)\n expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("nrows,chunksize", [(2, 2), (4, 2)])\ndef test_readjson_nrows_chunks(request, nrows, chunksize, engine):\n # GH 33916\n # Test reading line-format JSON to Series with nrows and chunksize param\n if engine == "pyarrow":\n # GH 48893\n reason = (\n "Pyarrow only supports a file path as an input and line delimited json"\n "and doesn't support chunksize parameter."\n )\n request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))\n\n jsonl = """{"a": 1, "b": 2}\n {"a": 3, "b": 4}\n {"a": 5, "b": 6}\n {"a": 7, "b": 8}"""\n\n if engine != "pyarrow":\n with read_json(\n StringIO(jsonl), lines=True, nrows=nrows, chunksize=chunksize, engine=engine\n ) as reader:\n chunked = pd.concat(reader)\n else:\n with read_json(\n jsonl, lines=True, nrows=nrows, chunksize=chunksize, engine=engine\n ) as reader:\n chunked = pd.concat(reader)\n expected = DataFrame({"a": [1, 3, 5, 7], "b": [2, 4, 6, 8]}).iloc[:nrows]\n tm.assert_frame_equal(chunked, expected)\n\n\ndef test_readjson_nrows_requires_lines(engine):\n # GH 33916\n # Test ValueError raised if nrows is set without setting lines in read_json\n jsonl = """{"a": 1, "b": 2}\n {"a": 3, "b": 4}\n {"a": 5, "b": 6}\n {"a": 7, "b": 8}"""\n msg = "nrows can only be passed if lines=True"\n with pytest.raises(ValueError, match=msg):\n read_json(jsonl, lines=False, nrows=2, engine=engine)\n\n\ndef test_readjson_lines_chunks_fileurl(request, datapath, engine):\n # GH 27135\n # Test reading line-format JSON from file url\n if engine == "pyarrow":\n # GH 48893\n reason = (\n "Pyarrow only supports a file path as an input and line delimited json"\n "and doesn't support chunksize parameter."\n )\n request.applymarker(pytest.mark.xfail(reason=reason, raises=ValueError))\n\n df_list_expected = [\n DataFrame([[1, 2]], columns=["a", "b"], index=[0]),\n DataFrame([[3, 4]], columns=["a", "b"], index=[1]),\n DataFrame([[5, 6]], columns=["a", "b"], index=[2]),\n ]\n os_path = datapath("io", "json", "data", "line_delimited.json")\n file_url = Path(os_path).as_uri()\n with read_json(file_url, lines=True, chunksize=1, engine=engine) as url_reader:\n for index, chuck in enumerate(url_reader):\n tm.assert_frame_equal(chuck, df_list_expected[index])\n\n\ndef test_chunksize_is_incremental():\n # See https://github.com/pandas-dev/pandas/issues/34548\n jsonl = (\n """{"a": 1, "b": 2}\n {"a": 3, "b": 4}\n {"a": 5, "b": 6}\n {"a": 7, "b": 8}\n"""\n * 1000\n )\n\n class MyReader:\n def __init__(self, contents) -> None:\n self.read_count = 0\n self.stringio = StringIO(contents)\n\n def read(self, *args):\n self.read_count += 1\n return self.stringio.read(*args)\n\n def __iter__(self) -> Iterator:\n self.read_count += 1\n return iter(self.stringio)\n\n reader = MyReader(jsonl)\n assert len(list(read_json(reader, lines=True, chunksize=100))) > 1\n assert reader.read_count > 10\n\n\n@pytest.mark.parametrize("orient_", ["split", "index", "table"])\ndef test_to_json_append_orient(orient_):\n # GH 35849\n # Test ValueError when orient is not 'records'\n df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})\n msg = (\n r"mode='a' \(append\) is only supported when "\n "lines is True and orient is 'records'"\n )\n with pytest.raises(ValueError, match=msg):\n df.to_json(mode="a", orient=orient_)\n\n\ndef test_to_json_append_lines():\n # GH 35849\n # Test ValueError when lines is not True\n df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})\n msg = (\n r"mode='a' \(append\) is only supported when "\n "lines is True and orient is 'records'"\n )\n with pytest.raises(ValueError, match=msg):\n df.to_json(mode="a", lines=False, orient="records")\n\n\n@pytest.mark.parametrize("mode_", ["r", "x"])\ndef test_to_json_append_mode(mode_):\n # GH 35849\n # Test ValueError when mode is not supported option\n df = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})\n msg = (\n f"mode={mode_} is not a valid option."\n "Only 'w' and 'a' are currently supported."\n )\n with pytest.raises(ValueError, match=msg):\n df.to_json(mode=mode_, lines=False, orient="records")\n\n\ndef test_to_json_append_output_consistent_columns():\n # GH 35849\n # Testing that resulting output reads in as expected.\n # Testing same columns, new rows\n df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})\n df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})\n\n expected = DataFrame({"col1": [1, 2, 3, 4], "col2": ["a", "b", "c", "d"]})\n with tm.ensure_clean("test.json") as path:\n # Save dataframes to the same file\n df1.to_json(path, lines=True, orient="records")\n df2.to_json(path, mode="a", lines=True, orient="records")\n\n # Read path file\n result = read_json(path, lines=True)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_to_json_append_output_inconsistent_columns():\n # GH 35849\n # Testing that resulting output reads in as expected.\n # Testing one new column, one old column, new rows\n df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})\n df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})\n\n expected = DataFrame(\n {\n "col1": [1, 2, None, None],\n "col2": ["a", "b", "e", "f"],\n "col3": [np.nan, np.nan, "!", "#"],\n }\n )\n with tm.ensure_clean("test.json") as path:\n # Save dataframes to the same file\n df1.to_json(path, mode="a", lines=True, orient="records")\n df3.to_json(path, mode="a", lines=True, orient="records")\n\n # Read path file\n result = read_json(path, lines=True)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_to_json_append_output_different_columns():\n # GH 35849\n # Testing that resulting output reads in as expected.\n # Testing same, differing and new columns\n df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})\n df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})\n df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})\n df4 = DataFrame({"col4": [True, False]})\n\n expected = DataFrame(\n {\n "col1": [1, 2, 3, 4, None, None, None, None],\n "col2": ["a", "b", "c", "d", "e", "f", np.nan, np.nan],\n "col3": [np.nan, np.nan, np.nan, np.nan, "!", "#", np.nan, np.nan],\n "col4": [None, None, None, None, None, None, True, False],\n }\n ).astype({"col4": "float"})\n with tm.ensure_clean("test.json") as path:\n # Save dataframes to the same file\n df1.to_json(path, mode="a", lines=True, orient="records")\n df2.to_json(path, mode="a", lines=True, orient="records")\n df3.to_json(path, mode="a", lines=True, orient="records")\n df4.to_json(path, mode="a", lines=True, orient="records")\n\n # Read path file\n result = read_json(path, lines=True)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_to_json_append_output_different_columns_reordered():\n # GH 35849\n # Testing that resulting output reads in as expected.\n # Testing specific result column order.\n df1 = DataFrame({"col1": [1, 2], "col2": ["a", "b"]})\n df2 = DataFrame({"col1": [3, 4], "col2": ["c", "d"]})\n df3 = DataFrame({"col2": ["e", "f"], "col3": ["!", "#"]})\n df4 = DataFrame({"col4": [True, False]})\n\n # df4, df3, df2, df1 (in that order)\n expected = DataFrame(\n {\n "col4": [True, False, None, None, None, None, None, None],\n "col2": [np.nan, np.nan, "e", "f", "c", "d", "a", "b"],\n "col3": [np.nan, np.nan, "!", "#", np.nan, np.nan, np.nan, np.nan],\n "col1": [None, None, None, None, 3, 4, 1, 2],\n }\n ).astype({"col4": "float"})\n with tm.ensure_clean("test.json") as path:\n # Save dataframes to the same file\n df4.to_json(path, mode="a", lines=True, orient="records")\n df3.to_json(path, mode="a", lines=True, orient="records")\n df2.to_json(path, mode="a", lines=True, orient="records")\n df1.to_json(path, mode="a", lines=True, orient="records")\n\n # Read path file\n result = read_json(path, lines=True)\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\json\test_readlines.py | test_readlines.py | Python | 18,819 | 0.95 | 0.090239 | 0.1373 | node-utils | 942 | 2024-10-15T16:20:37.574981 | GPL-3.0 | true | 50550f7f0bda50d9bb60d8fd5ddb41b0 |
import calendar\nimport datetime\nimport decimal\nimport json\nimport locale\nimport math\nimport re\nimport time\n\nimport dateutil\nimport numpy as np\nimport pytest\nimport pytz\n\nimport pandas._libs.json as ujson\nfrom pandas.compat import IS64\n\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n NaT,\n PeriodIndex,\n Series,\n Timedelta,\n Timestamp,\n date_range,\n)\nimport pandas._testing as tm\n\n\ndef _clean_dict(d):\n """\n Sanitize dictionary for JSON by converting all keys to strings.\n\n Parameters\n ----------\n d : dict\n The dictionary to convert.\n\n Returns\n -------\n cleaned_dict : dict\n """\n return {str(k): v for k, v in d.items()}\n\n\n@pytest.fixture(\n params=[None, "split", "records", "values", "index"] # Column indexed by default.\n)\ndef orient(request):\n return request.param\n\n\nclass TestUltraJSONTests:\n @pytest.mark.skipif(not IS64, reason="not compliant on 32-bit, xref #15865")\n def test_encode_decimal(self):\n sut = decimal.Decimal("1337.1337")\n encoded = ujson.ujson_dumps(sut, double_precision=15)\n decoded = ujson.ujson_loads(encoded)\n assert decoded == 1337.1337\n\n sut = decimal.Decimal("0.95")\n encoded = ujson.ujson_dumps(sut, double_precision=1)\n assert encoded == "1.0"\n\n decoded = ujson.ujson_loads(encoded)\n assert decoded == 1.0\n\n sut = decimal.Decimal("0.94")\n encoded = ujson.ujson_dumps(sut, double_precision=1)\n assert encoded == "0.9"\n\n decoded = ujson.ujson_loads(encoded)\n assert decoded == 0.9\n\n sut = decimal.Decimal("1.95")\n encoded = ujson.ujson_dumps(sut, double_precision=1)\n assert encoded == "2.0"\n\n decoded = ujson.ujson_loads(encoded)\n assert decoded == 2.0\n\n sut = decimal.Decimal("-1.95")\n encoded = ujson.ujson_dumps(sut, double_precision=1)\n assert encoded == "-2.0"\n\n decoded = ujson.ujson_loads(encoded)\n assert decoded == -2.0\n\n sut = decimal.Decimal("0.995")\n encoded = ujson.ujson_dumps(sut, double_precision=2)\n assert encoded == "1.0"\n\n decoded = ujson.ujson_loads(encoded)\n assert decoded == 1.0\n\n sut = decimal.Decimal("0.9995")\n encoded = ujson.ujson_dumps(sut, double_precision=3)\n assert encoded == "1.0"\n\n decoded = ujson.ujson_loads(encoded)\n assert decoded == 1.0\n\n sut = decimal.Decimal("0.99999999999999944")\n encoded = ujson.ujson_dumps(sut, double_precision=15)\n assert encoded == "1.0"\n\n decoded = ujson.ujson_loads(encoded)\n assert decoded == 1.0\n\n @pytest.mark.parametrize("ensure_ascii", [True, False])\n def test_encode_string_conversion(self, ensure_ascii):\n string_input = "A string \\ / \b \f \n \r \t </script> &"\n not_html_encoded = '"A string \\\\ \\/ \\b \\f \\n \\r \\t <\\/script> &"'\n html_encoded = (\n '"A string \\\\ \\/ \\b \\f \\n \\r \\t \\u003c\\/script\\u003e \\u0026"'\n )\n\n def helper(expected_output, **encode_kwargs):\n output = ujson.ujson_dumps(\n string_input, ensure_ascii=ensure_ascii, **encode_kwargs\n )\n\n assert output == expected_output\n assert string_input == json.loads(output)\n assert string_input == ujson.ujson_loads(output)\n\n # Default behavior assumes encode_html_chars=False.\n helper(not_html_encoded)\n\n # Make sure explicit encode_html_chars=False works.\n helper(not_html_encoded, encode_html_chars=False)\n\n # Make sure explicit encode_html_chars=True does the encoding.\n helper(html_encoded, encode_html_chars=True)\n\n @pytest.mark.parametrize(\n "long_number", [-4342969734183514, -12345678901234.56789012, -528656961.4399388]\n )\n def test_double_long_numbers(self, long_number):\n sut = {"a": long_number}\n encoded = ujson.ujson_dumps(sut, double_precision=15)\n\n decoded = ujson.ujson_loads(encoded)\n assert sut == decoded\n\n def test_encode_non_c_locale(self):\n lc_category = locale.LC_NUMERIC\n\n # We just need one of these locales to work.\n for new_locale in ("it_IT.UTF-8", "Italian_Italy"):\n if tm.can_set_locale(new_locale, lc_category):\n with tm.set_locale(new_locale, lc_category):\n assert ujson.ujson_loads(ujson.ujson_dumps(4.78e60)) == 4.78e60\n assert ujson.ujson_loads("4.78", precise_float=True) == 4.78\n break\n\n def test_decimal_decode_test_precise(self):\n sut = {"a": 4.56}\n encoded = ujson.ujson_dumps(sut)\n decoded = ujson.ujson_loads(encoded, precise_float=True)\n assert sut == decoded\n\n def test_encode_double_tiny_exponential(self):\n num = 1e-40\n assert num == ujson.ujson_loads(ujson.ujson_dumps(num))\n num = 1e-100\n assert num == ujson.ujson_loads(ujson.ujson_dumps(num))\n num = -1e-45\n assert num == ujson.ujson_loads(ujson.ujson_dumps(num))\n num = -1e-145\n assert np.allclose(num, ujson.ujson_loads(ujson.ujson_dumps(num)))\n\n @pytest.mark.parametrize("unicode_key", ["key1", "بن"])\n def test_encode_dict_with_unicode_keys(self, unicode_key):\n unicode_dict = {unicode_key: "value1"}\n assert unicode_dict == ujson.ujson_loads(ujson.ujson_dumps(unicode_dict))\n\n @pytest.mark.parametrize(\n "double_input", [math.pi, -math.pi] # Should work with negatives too.\n )\n def test_encode_double_conversion(self, double_input):\n output = ujson.ujson_dumps(double_input)\n assert round(double_input, 5) == round(json.loads(output), 5)\n assert round(double_input, 5) == round(ujson.ujson_loads(output), 5)\n\n def test_encode_with_decimal(self):\n decimal_input = 1.0\n output = ujson.ujson_dumps(decimal_input)\n\n assert output == "1.0"\n\n def test_encode_array_of_nested_arrays(self):\n nested_input = [[[[]]]] * 20\n output = ujson.ujson_dumps(nested_input)\n\n assert nested_input == json.loads(output)\n assert nested_input == ujson.ujson_loads(output)\n\n def test_encode_array_of_doubles(self):\n doubles_input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10\n output = ujson.ujson_dumps(doubles_input)\n\n assert doubles_input == json.loads(output)\n assert doubles_input == ujson.ujson_loads(output)\n\n def test_double_precision(self):\n double_input = 30.012345678901234\n output = ujson.ujson_dumps(double_input, double_precision=15)\n\n assert double_input == json.loads(output)\n assert double_input == ujson.ujson_loads(output)\n\n for double_precision in (3, 9):\n output = ujson.ujson_dumps(double_input, double_precision=double_precision)\n rounded_input = round(double_input, double_precision)\n\n assert rounded_input == json.loads(output)\n assert rounded_input == ujson.ujson_loads(output)\n\n @pytest.mark.parametrize(\n "invalid_val",\n [\n 20,\n -1,\n "9",\n None,\n ],\n )\n def test_invalid_double_precision(self, invalid_val):\n double_input = 30.12345678901234567890\n expected_exception = ValueError if isinstance(invalid_val, int) else TypeError\n msg = (\n r"Invalid value '.*' for option 'double_precision', max is '15'|"\n r"an integer is required \(got type |"\n r"object cannot be interpreted as an integer"\n )\n with pytest.raises(expected_exception, match=msg):\n ujson.ujson_dumps(double_input, double_precision=invalid_val)\n\n def test_encode_string_conversion2(self):\n string_input = "A string \\ / \b \f \n \r \t"\n output = ujson.ujson_dumps(string_input)\n\n assert string_input == json.loads(output)\n assert string_input == ujson.ujson_loads(output)\n assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'\n\n @pytest.mark.parametrize(\n "unicode_input",\n ["Räksmörgås اسامة بن محمد بن عوض بن لادن", "\xe6\x97\xa5\xd1\x88"],\n )\n def test_encode_unicode_conversion(self, unicode_input):\n enc = ujson.ujson_dumps(unicode_input)\n dec = ujson.ujson_loads(enc)\n\n assert enc == json.dumps(unicode_input)\n assert dec == json.loads(enc)\n\n def test_encode_control_escaping(self):\n escaped_input = "\x19"\n enc = ujson.ujson_dumps(escaped_input)\n dec = ujson.ujson_loads(enc)\n\n assert escaped_input == dec\n assert enc == json.dumps(escaped_input)\n\n def test_encode_unicode_surrogate_pair(self):\n surrogate_input = "\xf0\x90\x8d\x86"\n enc = ujson.ujson_dumps(surrogate_input)\n dec = ujson.ujson_loads(enc)\n\n assert enc == json.dumps(surrogate_input)\n assert dec == json.loads(enc)\n\n def test_encode_unicode_4bytes_utf8(self):\n four_bytes_input = "\xf0\x91\x80\xb0TRAILINGNORMAL"\n enc = ujson.ujson_dumps(four_bytes_input)\n dec = ujson.ujson_loads(enc)\n\n assert enc == json.dumps(four_bytes_input)\n assert dec == json.loads(enc)\n\n def test_encode_unicode_4bytes_utf8highest(self):\n four_bytes_input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"\n enc = ujson.ujson_dumps(four_bytes_input)\n\n dec = ujson.ujson_loads(enc)\n\n assert enc == json.dumps(four_bytes_input)\n assert dec == json.loads(enc)\n\n def test_encode_unicode_error(self):\n string = "'\udac0'"\n msg = (\n r"'utf-8' codec can't encode character '\\udac0' "\n r"in position 1: surrogates not allowed"\n )\n with pytest.raises(UnicodeEncodeError, match=msg):\n ujson.ujson_dumps([string])\n\n def test_encode_array_in_array(self):\n arr_in_arr_input = [[[[]]]]\n output = ujson.ujson_dumps(arr_in_arr_input)\n\n assert arr_in_arr_input == json.loads(output)\n assert output == json.dumps(arr_in_arr_input)\n assert arr_in_arr_input == ujson.ujson_loads(output)\n\n @pytest.mark.parametrize(\n "num_input",\n [\n 31337,\n -31337, # Negative number.\n -9223372036854775808, # Large negative number.\n ],\n )\n def test_encode_num_conversion(self, num_input):\n output = ujson.ujson_dumps(num_input)\n assert num_input == json.loads(output)\n assert output == json.dumps(num_input)\n assert num_input == ujson.ujson_loads(output)\n\n def test_encode_list_conversion(self):\n list_input = [1, 2, 3, 4]\n output = ujson.ujson_dumps(list_input)\n\n assert list_input == json.loads(output)\n assert list_input == ujson.ujson_loads(output)\n\n def test_encode_dict_conversion(self):\n dict_input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}\n output = ujson.ujson_dumps(dict_input)\n\n assert dict_input == json.loads(output)\n assert dict_input == ujson.ujson_loads(output)\n\n @pytest.mark.parametrize("builtin_value", [None, True, False])\n def test_encode_builtin_values_conversion(self, builtin_value):\n output = ujson.ujson_dumps(builtin_value)\n assert builtin_value == json.loads(output)\n assert output == json.dumps(builtin_value)\n assert builtin_value == ujson.ujson_loads(output)\n\n def test_encode_datetime_conversion(self):\n datetime_input = datetime.datetime.fromtimestamp(time.time())\n output = ujson.ujson_dumps(datetime_input, date_unit="s")\n expected = calendar.timegm(datetime_input.utctimetuple())\n\n assert int(expected) == json.loads(output)\n assert int(expected) == ujson.ujson_loads(output)\n\n def test_encode_date_conversion(self):\n date_input = datetime.date.fromtimestamp(time.time())\n output = ujson.ujson_dumps(date_input, date_unit="s")\n\n tup = (date_input.year, date_input.month, date_input.day, 0, 0, 0)\n expected = calendar.timegm(tup)\n\n assert int(expected) == json.loads(output)\n assert int(expected) == ujson.ujson_loads(output)\n\n @pytest.mark.parametrize(\n "test",\n [datetime.time(), datetime.time(1, 2, 3), datetime.time(10, 12, 15, 343243)],\n )\n def test_encode_time_conversion_basic(self, test):\n output = ujson.ujson_dumps(test)\n expected = f'"{test.isoformat()}"'\n assert expected == output\n\n def test_encode_time_conversion_pytz(self):\n # see gh-11473: to_json segfaults with timezone-aware datetimes\n test = datetime.time(10, 12, 15, 343243, pytz.utc)\n output = ujson.ujson_dumps(test)\n expected = f'"{test.isoformat()}"'\n assert expected == output\n\n def test_encode_time_conversion_dateutil(self):\n # see gh-11473: to_json segfaults with timezone-aware datetimes\n test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())\n output = ujson.ujson_dumps(test)\n expected = f'"{test.isoformat()}"'\n assert expected == output\n\n @pytest.mark.parametrize(\n "decoded_input", [NaT, np.datetime64("NaT"), np.nan, np.inf, -np.inf]\n )\n def test_encode_as_null(self, decoded_input):\n assert ujson.ujson_dumps(decoded_input) == "null", "Expected null"\n\n def test_datetime_units(self):\n val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)\n stamp = Timestamp(val).as_unit("ns")\n\n roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="s"))\n assert roundtrip == stamp._value // 10**9\n\n roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ms"))\n assert roundtrip == stamp._value // 10**6\n\n roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="us"))\n assert roundtrip == stamp._value // 10**3\n\n roundtrip = ujson.ujson_loads(ujson.ujson_dumps(val, date_unit="ns"))\n assert roundtrip == stamp._value\n\n msg = "Invalid value 'foo' for option 'date_unit'"\n with pytest.raises(ValueError, match=msg):\n ujson.ujson_dumps(val, date_unit="foo")\n\n def test_encode_to_utf8(self):\n unencoded = "\xe6\x97\xa5\xd1\x88"\n\n enc = ujson.ujson_dumps(unencoded, ensure_ascii=False)\n dec = ujson.ujson_loads(enc)\n\n assert enc == json.dumps(unencoded, ensure_ascii=False)\n assert dec == json.loads(enc)\n\n def test_decode_from_unicode(self):\n unicode_input = '{"obj": 31337}'\n\n dec1 = ujson.ujson_loads(unicode_input)\n dec2 = ujson.ujson_loads(str(unicode_input))\n\n assert dec1 == dec2\n\n def test_encode_recursion_max(self):\n # 8 is the max recursion depth\n\n class O2:\n member = 0\n\n class O1:\n member = 0\n\n decoded_input = O1()\n decoded_input.member = O2()\n decoded_input.member.member = decoded_input\n\n with pytest.raises(OverflowError, match="Maximum recursion level reached"):\n ujson.ujson_dumps(decoded_input)\n\n def test_decode_jibberish(self):\n jibberish = "fdsa sda v9sa fdsa"\n msg = "Unexpected character found when decoding 'false'"\n with pytest.raises(ValueError, match=msg):\n ujson.ujson_loads(jibberish)\n\n @pytest.mark.parametrize(\n "broken_json",\n [\n "[", # Broken array start.\n "{", # Broken object start.\n "]", # Broken array end.\n "}", # Broken object end.\n ],\n )\n def test_decode_broken_json(self, broken_json):\n msg = "Expected object or value"\n with pytest.raises(ValueError, match=msg):\n ujson.ujson_loads(broken_json)\n\n @pytest.mark.parametrize("too_big_char", ["[", "{"])\n def test_decode_depth_too_big(self, too_big_char):\n with pytest.raises(ValueError, match="Reached object decoding depth limit"):\n ujson.ujson_loads(too_big_char * (1024 * 1024))\n\n @pytest.mark.parametrize(\n "bad_string",\n [\n '"TESTING', # Unterminated.\n '"TESTING\\"', # Unterminated escape.\n "tru", # Broken True.\n "fa", # Broken False.\n "n", # Broken None.\n ],\n )\n def test_decode_bad_string(self, bad_string):\n msg = (\n "Unexpected character found when decoding|"\n "Unmatched ''\"' when when decoding 'string'"\n )\n with pytest.raises(ValueError, match=msg):\n ujson.ujson_loads(bad_string)\n\n @pytest.mark.parametrize(\n "broken_json, err_msg",\n [\n (\n '{{1337:""}}',\n "Key name of object must be 'string' when decoding 'object'",\n ),\n ('{{"key":"}', "Unmatched ''\"' when when decoding 'string'"),\n ("[[[true", "Unexpected character found when decoding array value (2)"),\n ],\n )\n def test_decode_broken_json_leak(self, broken_json, err_msg):\n for _ in range(1000):\n with pytest.raises(ValueError, match=re.escape(err_msg)):\n ujson.ujson_loads(broken_json)\n\n @pytest.mark.parametrize(\n "invalid_dict",\n [\n "{{{{31337}}}}", # No key.\n '{{{{"key":}}}}', # No value.\n '{{{{"key"}}}}', # No colon or value.\n ],\n )\n def test_decode_invalid_dict(self, invalid_dict):\n msg = (\n "Key name of object must be 'string' when decoding 'object'|"\n "No ':' found when decoding object value|"\n "Expected object or value"\n )\n with pytest.raises(ValueError, match=msg):\n ujson.ujson_loads(invalid_dict)\n\n @pytest.mark.parametrize(\n "numeric_int_as_str", ["31337", "-31337"] # Should work with negatives.\n )\n def test_decode_numeric_int(self, numeric_int_as_str):\n assert int(numeric_int_as_str) == ujson.ujson_loads(numeric_int_as_str)\n\n def test_encode_null_character(self):\n wrapped_input = "31337 \x00 1337"\n output = ujson.ujson_dumps(wrapped_input)\n\n assert wrapped_input == json.loads(output)\n assert output == json.dumps(wrapped_input)\n assert wrapped_input == ujson.ujson_loads(output)\n\n alone_input = "\x00"\n output = ujson.ujson_dumps(alone_input)\n\n assert alone_input == json.loads(output)\n assert output == json.dumps(alone_input)\n assert alone_input == ujson.ujson_loads(output)\n assert '" \\u0000\\r\\n "' == ujson.ujson_dumps(" \u0000\r\n ")\n\n def test_decode_null_character(self):\n wrapped_input = '"31337 \\u0000 31337"'\n assert ujson.ujson_loads(wrapped_input) == json.loads(wrapped_input)\n\n def test_encode_list_long_conversion(self):\n long_input = [\n 9223372036854775807,\n 9223372036854775807,\n 9223372036854775807,\n 9223372036854775807,\n 9223372036854775807,\n 9223372036854775807,\n ]\n output = ujson.ujson_dumps(long_input)\n\n assert long_input == json.loads(output)\n assert long_input == ujson.ujson_loads(output)\n\n @pytest.mark.parametrize("long_input", [9223372036854775807, 18446744073709551615])\n def test_encode_long_conversion(self, long_input):\n output = ujson.ujson_dumps(long_input)\n\n assert long_input == json.loads(output)\n assert output == json.dumps(long_input)\n assert long_input == ujson.ujson_loads(output)\n\n @pytest.mark.parametrize("bigNum", [2**64, -(2**63) - 1])\n def test_dumps_ints_larger_than_maxsize(self, bigNum):\n encoding = ujson.ujson_dumps(bigNum)\n assert str(bigNum) == encoding\n\n with pytest.raises(\n ValueError,\n match="Value is too big|Value is too small",\n ):\n assert ujson.ujson_loads(encoding) == bigNum\n\n @pytest.mark.parametrize(\n "int_exp", ["1337E40", "1.337E40", "1337E+9", "1.337e+40", "1.337E-4"]\n )\n def test_decode_numeric_int_exp(self, int_exp):\n assert ujson.ujson_loads(int_exp) == json.loads(int_exp)\n\n def test_loads_non_str_bytes_raises(self):\n msg = "a bytes-like object is required, not 'NoneType'"\n with pytest.raises(TypeError, match=msg):\n ujson.ujson_loads(None)\n\n @pytest.mark.parametrize("val", [3590016419, 2**31, 2**32, (2**32) - 1])\n def test_decode_number_with_32bit_sign_bit(self, val):\n # Test that numbers that fit within 32 bits but would have the\n # sign bit set (2**31 <= x < 2**32) are decoded properly.\n doc = f'{{"id": {val}}}'\n assert ujson.ujson_loads(doc)["id"] == val\n\n def test_encode_big_escape(self):\n # Make sure no Exception is raised.\n for _ in range(10):\n base = "\u00e5".encode()\n escape_input = base * 1024 * 1024 * 2\n ujson.ujson_dumps(escape_input)\n\n def test_decode_big_escape(self):\n # Make sure no Exception is raised.\n for _ in range(10):\n base = "\u00e5".encode()\n quote = b'"'\n\n escape_input = quote + (base * 1024 * 1024 * 2) + quote\n ujson.ujson_loads(escape_input)\n\n def test_to_dict(self):\n d = {"key": 31337}\n\n class DictTest:\n def toDict(self):\n return d\n\n o = DictTest()\n output = ujson.ujson_dumps(o)\n\n dec = ujson.ujson_loads(output)\n assert dec == d\n\n def test_default_handler(self):\n class _TestObject:\n def __init__(self, val) -> None:\n self.val = val\n\n @property\n def recursive_attr(self):\n return _TestObject("recursive_attr")\n\n def __str__(self) -> str:\n return str(self.val)\n\n msg = "Maximum recursion level reached"\n with pytest.raises(OverflowError, match=msg):\n ujson.ujson_dumps(_TestObject("foo"))\n assert '"foo"' == ujson.ujson_dumps(_TestObject("foo"), default_handler=str)\n\n def my_handler(_):\n return "foobar"\n\n assert '"foobar"' == ujson.ujson_dumps(\n _TestObject("foo"), default_handler=my_handler\n )\n\n def my_handler_raises(_):\n raise TypeError("I raise for anything")\n\n with pytest.raises(TypeError, match="I raise for anything"):\n ujson.ujson_dumps(_TestObject("foo"), default_handler=my_handler_raises)\n\n def my_int_handler(_):\n return 42\n\n assert (\n ujson.ujson_loads(\n ujson.ujson_dumps(_TestObject("foo"), default_handler=my_int_handler)\n )\n == 42\n )\n\n def my_obj_handler(_):\n return datetime.datetime(2013, 2, 3)\n\n assert ujson.ujson_loads(\n ujson.ujson_dumps(datetime.datetime(2013, 2, 3))\n ) == ujson.ujson_loads(\n ujson.ujson_dumps(_TestObject("foo"), default_handler=my_obj_handler)\n )\n\n obj_list = [_TestObject("foo"), _TestObject("bar")]\n assert json.loads(json.dumps(obj_list, default=str)) == ujson.ujson_loads(\n ujson.ujson_dumps(obj_list, default_handler=str)\n )\n\n def test_encode_object(self):\n class _TestObject:\n def __init__(self, a, b, _c, d) -> None:\n self.a = a\n self.b = b\n self._c = _c\n self.d = d\n\n def e(self):\n return 5\n\n # JSON keys should be all non-callable non-underscore attributes, see GH-42768\n test_object = _TestObject(a=1, b=2, _c=3, d=4)\n assert ujson.ujson_loads(ujson.ujson_dumps(test_object)) == {\n "a": 1,\n "b": 2,\n "d": 4,\n }\n\n def test_ujson__name__(self):\n # GH 52898\n assert ujson.__name__ == "pandas._libs.json"\n\n\nclass TestNumpyJSONTests:\n @pytest.mark.parametrize("bool_input", [True, False])\n def test_bool(self, bool_input):\n b = bool(bool_input)\n assert ujson.ujson_loads(ujson.ujson_dumps(b)) == b\n\n def test_bool_array(self):\n bool_array = np.array(\n [True, False, True, True, False, True, False, False], dtype=bool\n )\n output = np.array(ujson.ujson_loads(ujson.ujson_dumps(bool_array)), dtype=bool)\n tm.assert_numpy_array_equal(bool_array, output)\n\n def test_int(self, any_int_numpy_dtype):\n klass = np.dtype(any_int_numpy_dtype).type\n num = klass(1)\n\n assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num\n\n def test_int_array(self, any_int_numpy_dtype):\n arr = np.arange(100, dtype=int)\n arr_input = arr.astype(any_int_numpy_dtype)\n\n arr_output = np.array(\n ujson.ujson_loads(ujson.ujson_dumps(arr_input)), dtype=any_int_numpy_dtype\n )\n tm.assert_numpy_array_equal(arr_input, arr_output)\n\n def test_int_max(self, any_int_numpy_dtype):\n if any_int_numpy_dtype in ("int64", "uint64") and not IS64:\n pytest.skip("Cannot test 64-bit integer on 32-bit platform")\n\n klass = np.dtype(any_int_numpy_dtype).type\n\n # uint64 max will always overflow,\n # as it's encoded to signed.\n if any_int_numpy_dtype == "uint64":\n num = np.iinfo("int64").max\n else:\n num = np.iinfo(any_int_numpy_dtype).max\n\n assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num\n\n def test_float(self, float_numpy_dtype):\n klass = np.dtype(float_numpy_dtype).type\n num = klass(256.2013)\n\n assert klass(ujson.ujson_loads(ujson.ujson_dumps(num))) == num\n\n def test_float_array(self, float_numpy_dtype):\n arr = np.arange(12.5, 185.72, 1.7322, dtype=float)\n float_input = arr.astype(float_numpy_dtype)\n\n float_output = np.array(\n ujson.ujson_loads(ujson.ujson_dumps(float_input, double_precision=15)),\n dtype=float_numpy_dtype,\n )\n tm.assert_almost_equal(float_input, float_output)\n\n def test_float_max(self, float_numpy_dtype):\n klass = np.dtype(float_numpy_dtype).type\n num = klass(np.finfo(float_numpy_dtype).max / 10)\n\n tm.assert_almost_equal(\n klass(ujson.ujson_loads(ujson.ujson_dumps(num, double_precision=15))), num\n )\n\n def test_array_basic(self):\n arr = np.arange(96)\n arr = arr.reshape((2, 2, 2, 2, 3, 2))\n\n tm.assert_numpy_array_equal(\n np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr\n )\n\n @pytest.mark.parametrize("shape", [(10, 10), (5, 5, 4), (100, 1)])\n def test_array_reshaped(self, shape):\n arr = np.arange(100)\n arr = arr.reshape(shape)\n\n tm.assert_numpy_array_equal(\n np.array(ujson.ujson_loads(ujson.ujson_dumps(arr))), arr\n )\n\n def test_array_list(self):\n arr_list = [\n "a",\n [],\n {},\n {},\n [],\n 42,\n 97.8,\n ["a", "b"],\n {"key": "val"},\n ]\n arr = np.array(arr_list, dtype=object)\n result = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=object)\n tm.assert_numpy_array_equal(result, arr)\n\n def test_array_float(self):\n dtype = np.float32\n\n arr = np.arange(100.202, 200.202, 1, dtype=dtype)\n arr = arr.reshape((5, 5, 4))\n\n arr_out = np.array(ujson.ujson_loads(ujson.ujson_dumps(arr)), dtype=dtype)\n tm.assert_almost_equal(arr, arr_out)\n\n def test_0d_array(self):\n # gh-18878\n msg = re.escape(\n "array(1) (numpy-scalar) is not JSON serializable at the moment"\n )\n with pytest.raises(TypeError, match=msg):\n ujson.ujson_dumps(np.array(1))\n\n def test_array_long_double(self):\n msg = re.compile(\n "1234.5.* \\(numpy-scalar\\) is not JSON serializable at the moment"\n )\n with pytest.raises(TypeError, match=msg):\n ujson.ujson_dumps(np.longdouble(1234.5))\n\n\nclass TestPandasJSONTests:\n def test_dataframe(self, orient):\n dtype = np.int64\n\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6]],\n index=["a", "b"],\n columns=["x", "y", "z"],\n dtype=dtype,\n )\n encode_kwargs = {} if orient is None else {"orient": orient}\n assert (df.dtypes == dtype).all()\n\n output = ujson.ujson_loads(ujson.ujson_dumps(df, **encode_kwargs))\n assert (df.dtypes == dtype).all()\n\n # Ensure proper DataFrame initialization.\n if orient == "split":\n dec = _clean_dict(output)\n output = DataFrame(**dec)\n else:\n output = DataFrame(output)\n\n # Corrections to enable DataFrame comparison.\n if orient == "values":\n df.columns = [0, 1, 2]\n df.index = [0, 1]\n elif orient == "records":\n df.index = [0, 1]\n elif orient == "index":\n df = df.transpose()\n\n assert (df.dtypes == dtype).all()\n tm.assert_frame_equal(output, df)\n\n def test_dataframe_nested(self, orient):\n df = DataFrame(\n [[1, 2, 3], [4, 5, 6]], index=["a", "b"], columns=["x", "y", "z"]\n )\n\n nested = {"df1": df, "df2": df.copy()}\n kwargs = {} if orient is None else {"orient": orient}\n\n exp = {\n "df1": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)),\n "df2": ujson.ujson_loads(ujson.ujson_dumps(df, **kwargs)),\n }\n assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp\n\n def test_series(self, orient):\n dtype = np.int64\n s = Series(\n [10, 20, 30, 40, 50, 60],\n name="series",\n index=[6, 7, 8, 9, 10, 15],\n dtype=dtype,\n ).sort_values()\n assert s.dtype == dtype\n\n encode_kwargs = {} if orient is None else {"orient": orient}\n\n output = ujson.ujson_loads(ujson.ujson_dumps(s, **encode_kwargs))\n assert s.dtype == dtype\n\n if orient == "split":\n dec = _clean_dict(output)\n output = Series(**dec)\n else:\n output = Series(output)\n\n if orient in (None, "index"):\n s.name = None\n output = output.sort_values()\n s.index = ["6", "7", "8", "9", "10", "15"]\n elif orient in ("records", "values"):\n s.name = None\n s.index = [0, 1, 2, 3, 4, 5]\n\n assert s.dtype == dtype\n tm.assert_series_equal(output, s)\n\n def test_series_nested(self, orient):\n s = Series(\n [10, 20, 30, 40, 50, 60], name="series", index=[6, 7, 8, 9, 10, 15]\n ).sort_values()\n nested = {"s1": s, "s2": s.copy()}\n kwargs = {} if orient is None else {"orient": orient}\n\n exp = {\n "s1": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)),\n "s2": ujson.ujson_loads(ujson.ujson_dumps(s, **kwargs)),\n }\n assert ujson.ujson_loads(ujson.ujson_dumps(nested, **kwargs)) == exp\n\n def test_index(self):\n i = Index([23, 45, 18, 98, 43, 11], name="index")\n\n # Column indexed.\n output = Index(ujson.ujson_loads(ujson.ujson_dumps(i)), name="index")\n tm.assert_index_equal(i, output)\n\n dec = _clean_dict(ujson.ujson_loads(ujson.ujson_dumps(i, orient="split")))\n output = Index(**dec)\n\n tm.assert_index_equal(i, output)\n assert i.name == output.name\n\n tm.assert_index_equal(i, output)\n assert i.name == output.name\n\n output = Index(\n ujson.ujson_loads(ujson.ujson_dumps(i, orient="values")), name="index"\n )\n tm.assert_index_equal(i, output)\n\n output = Index(\n ujson.ujson_loads(ujson.ujson_dumps(i, orient="records")), name="index"\n )\n tm.assert_index_equal(i, output)\n\n output = Index(\n ujson.ujson_loads(ujson.ujson_dumps(i, orient="index")), name="index"\n )\n tm.assert_index_equal(i, output)\n\n def test_datetime_index(self):\n date_unit = "ns"\n\n # freq doesn't round-trip\n rng = DatetimeIndex(list(date_range("1/1/2000", periods=20)), freq=None)\n encoded = ujson.ujson_dumps(rng, date_unit=date_unit)\n\n decoded = DatetimeIndex(np.array(ujson.ujson_loads(encoded)))\n tm.assert_index_equal(rng, decoded)\n\n ts = Series(np.random.default_rng(2).standard_normal(len(rng)), index=rng)\n decoded = Series(ujson.ujson_loads(ujson.ujson_dumps(ts, date_unit=date_unit)))\n\n idx_values = decoded.index.values.astype(np.int64)\n decoded.index = DatetimeIndex(idx_values)\n tm.assert_series_equal(ts, decoded)\n\n @pytest.mark.parametrize(\n "invalid_arr",\n [\n "[31337,]", # Trailing comma.\n "[,31337]", # Leading comma.\n "[]]", # Unmatched bracket.\n "[,]", # Only comma.\n ],\n )\n def test_decode_invalid_array(self, invalid_arr):\n msg = (\n "Expected object or value|Trailing data|"\n "Unexpected character found when decoding array value"\n )\n with pytest.raises(ValueError, match=msg):\n ujson.ujson_loads(invalid_arr)\n\n @pytest.mark.parametrize("arr", [[], [31337]])\n def test_decode_array(self, arr):\n assert arr == ujson.ujson_loads(str(arr))\n\n @pytest.mark.parametrize("extreme_num", [9223372036854775807, -9223372036854775808])\n def test_decode_extreme_numbers(self, extreme_num):\n assert extreme_num == ujson.ujson_loads(str(extreme_num))\n\n @pytest.mark.parametrize("too_extreme_num", [f"{2**64}", f"{-2**63-1}"])\n def test_decode_too_extreme_numbers(self, too_extreme_num):\n with pytest.raises(\n ValueError,\n match="Value is too big|Value is too small",\n ):\n ujson.ujson_loads(too_extreme_num)\n\n def test_decode_with_trailing_whitespaces(self):\n assert {} == ujson.ujson_loads("{}\n\t ")\n\n def test_decode_with_trailing_non_whitespaces(self):\n with pytest.raises(ValueError, match="Trailing data"):\n ujson.ujson_loads("{}\n\t a")\n\n @pytest.mark.parametrize("value", [f"{2**64}", f"{-2**63-1}"])\n def test_decode_array_with_big_int(self, value):\n with pytest.raises(\n ValueError,\n match="Value is too big|Value is too small",\n ):\n ujson.ujson_loads(value)\n\n @pytest.mark.parametrize(\n "float_number",\n [\n 1.1234567893,\n 1.234567893,\n 1.34567893,\n 1.4567893,\n 1.567893,\n 1.67893,\n 1.7893,\n 1.893,\n 1.3,\n ],\n )\n @pytest.mark.parametrize("sign", [-1, 1])\n def test_decode_floating_point(self, sign, float_number):\n float_number *= sign\n tm.assert_almost_equal(\n float_number, ujson.ujson_loads(str(float_number)), rtol=1e-15\n )\n\n def test_encode_big_set(self):\n s = set()\n\n for x in range(100000):\n s.add(x)\n\n # Make sure no Exception is raised.\n ujson.ujson_dumps(s)\n\n def test_encode_empty_set(self):\n assert "[]" == ujson.ujson_dumps(set())\n\n def test_encode_set(self):\n s = {1, 2, 3, 4, 5, 6, 7, 8, 9}\n enc = ujson.ujson_dumps(s)\n dec = ujson.ujson_loads(enc)\n\n for v in dec:\n assert v in s\n\n @pytest.mark.parametrize(\n "td",\n [\n Timedelta(days=366),\n Timedelta(days=-1),\n Timedelta(hours=13, minutes=5, seconds=5),\n Timedelta(hours=13, minutes=20, seconds=30),\n Timedelta(days=-1, nanoseconds=5),\n Timedelta(nanoseconds=1),\n Timedelta(microseconds=1, nanoseconds=1),\n Timedelta(milliseconds=1, microseconds=1, nanoseconds=1),\n Timedelta(milliseconds=999, microseconds=999, nanoseconds=999),\n ],\n )\n def test_encode_timedelta_iso(self, td):\n # GH 28256\n result = ujson.ujson_dumps(td, iso_dates=True)\n expected = f'"{td.isoformat()}"'\n\n assert result == expected\n\n def test_encode_periodindex(self):\n # GH 46683\n p = PeriodIndex(["2022-04-06", "2022-04-07"], freq="D")\n df = DataFrame(index=p)\n assert df.to_json() == "{}"\n | .venv\Lib\site-packages\pandas\tests\io\json\test_ujson.py | test_ujson.py | Python | 36,424 | 0.95 | 0.124195 | 0.026467 | node-utils | 942 | 2024-09-28T04:18:59.092434 | MIT | true | 9748b386dcc1807f54c0095b52d01b78 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\json\__pycache__\conftest.cpython-313.pyc | conftest.cpython-313.pyc | Other | 538 | 0.7 | 0.125 | 0 | python-kit | 21 | 2024-12-17T01:04:16.108724 | BSD-3-Clause | true | 3e39b4db6b9db55f8654293c454d026d |
\n\n | .venv\Lib\site-packages\pandas\tests\io\json\__pycache__\test_compression.cpython-313.pyc | test_compression.cpython-313.pyc | Other | 7,725 | 0.8 | 0 | 0 | awesome-app | 848 | 2024-12-22T22:27:27.459831 | GPL-3.0 | true | cc34c2dc966dfb18537e5e3c2169a2e8 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\json\__pycache__\test_deprecated_kwargs.cpython-313.pyc | test_deprecated_kwargs.cpython-313.pyc | Other | 1,441 | 0.8 | 0.166667 | 0.090909 | react-lib | 278 | 2024-01-07T05:08:23.981002 | Apache-2.0 | true | 87972b7cc3ba7b92b40d9200e267eadb |
\n\n | .venv\Lib\site-packages\pandas\tests\io\json\__pycache__\test_json_table_schema.cpython-313.pyc | test_json_table_schema.cpython-313.pyc | Other | 38,681 | 0.95 | 0.002392 | 0 | node-utils | 384 | 2023-11-19T01:20:54.687159 | MIT | true | 95a1516c00d2e62094261723d93cb46b |
\n\n | .venv\Lib\site-packages\pandas\tests\io\json\__pycache__\test_json_table_schema_ext_dtype.cpython-313.pyc | test_json_table_schema_ext_dtype.cpython-313.pyc | Other | 13,218 | 0.8 | 0.00641 | 0 | awesome-app | 441 | 2023-09-30T02:59:34.574144 | MIT | true | 902be76ade18684dea7e2d5b0de9e127 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\json\__pycache__\test_normalize.cpython-313.pyc | test_normalize.cpython-313.pyc | Other | 28,017 | 0.8 | 0.003165 | 0.009677 | node-utils | 879 | 2024-10-23T04:58:54.606277 | Apache-2.0 | true | 3f1fdf3a2eaa8e23dbc07fcd1f70e2b5 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\json\__pycache__\test_pandas.cpython-313.pyc | test_pandas.cpython-313.pyc | Other | 105,406 | 0.75 | 0.006826 | 0.010619 | react-lib | 254 | 2024-01-07T03:36:12.558188 | MIT | true | fff37554b40308867ef713619fa05127 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\json\__pycache__\test_readlines.cpython-313.pyc | test_readlines.cpython-313.pyc | Other | 24,864 | 0.95 | 0.00545 | 0 | react-lib | 958 | 2024-08-11T04:47:58.336554 | MIT | true | 06acea50169fcddf5bd60f37bf261f29 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\json\__pycache__\test_ujson.cpython-313.pyc | test_ujson.cpython-313.pyc | Other | 62,103 | 0.75 | 0.008333 | 0 | awesome-app | 284 | 2024-03-24T12:42:31.275097 | Apache-2.0 | true | c96d0c44e50cd0a01473b84b2064d2ac |
\n\n | .venv\Lib\site-packages\pandas\tests\io\json\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 195 | 0.7 | 0 | 0 | awesome-app | 795 | 2024-01-20T10:07:19.094919 | MIT | true | 06d42f6edd49cb2ce10265bf509e884b |
from __future__ import annotations\n\nimport os\n\nimport pytest\n\nfrom pandas.compat import HAS_PYARROW\nfrom pandas.compat._optional import VERSIONS\n\nfrom pandas import (\n read_csv,\n read_table,\n)\nimport pandas._testing as tm\n\n\nclass BaseParser:\n engine: str | None = None\n low_memory = True\n float_precision_choices: list[str | None] = []\n\n def update_kwargs(self, kwargs):\n kwargs = kwargs.copy()\n kwargs.update({"engine": self.engine, "low_memory": self.low_memory})\n\n return kwargs\n\n def read_csv(self, *args, **kwargs):\n kwargs = self.update_kwargs(kwargs)\n return read_csv(*args, **kwargs)\n\n def read_csv_check_warnings(\n self,\n warn_type: type[Warning],\n warn_msg: str,\n *args,\n raise_on_extra_warnings=True,\n check_stacklevel: bool = True,\n **kwargs,\n ):\n # We need to check the stacklevel here instead of in the tests\n # since this is where read_csv is called and where the warning\n # should point to.\n kwargs = self.update_kwargs(kwargs)\n with tm.assert_produces_warning(\n warn_type,\n match=warn_msg,\n raise_on_extra_warnings=raise_on_extra_warnings,\n check_stacklevel=check_stacklevel,\n ):\n return read_csv(*args, **kwargs)\n\n def read_table(self, *args, **kwargs):\n kwargs = self.update_kwargs(kwargs)\n return read_table(*args, **kwargs)\n\n def read_table_check_warnings(\n self,\n warn_type: type[Warning],\n warn_msg: str,\n *args,\n raise_on_extra_warnings=True,\n **kwargs,\n ):\n # We need to check the stacklevel here instead of in the tests\n # since this is where read_table is called and where the warning\n # should point to.\n kwargs = self.update_kwargs(kwargs)\n with tm.assert_produces_warning(\n warn_type, match=warn_msg, raise_on_extra_warnings=raise_on_extra_warnings\n ):\n return read_table(*args, **kwargs)\n\n\nclass CParser(BaseParser):\n engine = "c"\n float_precision_choices = [None, "high", "round_trip"]\n\n\nclass CParserHighMemory(CParser):\n low_memory = False\n\n\nclass CParserLowMemory(CParser):\n low_memory = True\n\n\nclass PythonParser(BaseParser):\n engine = "python"\n float_precision_choices = [None]\n\n\nclass PyArrowParser(BaseParser):\n engine = "pyarrow"\n float_precision_choices = [None]\n\n\n@pytest.fixture\ndef csv_dir_path(datapath):\n """\n The directory path to the data files needed for parser tests.\n """\n return datapath("io", "parser", "data")\n\n\n@pytest.fixture\ndef csv1(datapath):\n """\n The path to the data file "test1.csv" needed for parser tests.\n """\n return os.path.join(datapath("io", "data", "csv"), "test1.csv")\n\n\n_cParserHighMemory = CParserHighMemory\n_cParserLowMemory = CParserLowMemory\n_pythonParser = PythonParser\n_pyarrowParser = PyArrowParser\n\n_py_parsers_only = [_pythonParser]\n_c_parsers_only = [_cParserHighMemory, _cParserLowMemory]\n_pyarrow_parsers_only = [\n pytest.param(\n _pyarrowParser,\n marks=[\n pytest.mark.single_cpu,\n pytest.mark.skipif(not HAS_PYARROW, reason="pyarrow is not installed"),\n ],\n )\n]\n\n_all_parsers = [*_c_parsers_only, *_py_parsers_only, *_pyarrow_parsers_only]\n\n_py_parser_ids = ["python"]\n_c_parser_ids = ["c_high", "c_low"]\n_pyarrow_parsers_ids = ["pyarrow"]\n\n_all_parser_ids = [*_c_parser_ids, *_py_parser_ids, *_pyarrow_parsers_ids]\n\n\n@pytest.fixture(params=_all_parsers, ids=_all_parser_ids)\ndef all_parsers(request):\n """\n Fixture all of the CSV parsers.\n """\n parser = request.param()\n if parser.engine == "pyarrow":\n pytest.importorskip("pyarrow", VERSIONS["pyarrow"])\n # Try finding a way to disable threads all together\n # for more stable CI runs\n import pyarrow\n\n pyarrow.set_cpu_count(1)\n return parser\n\n\n@pytest.fixture(params=_c_parsers_only, ids=_c_parser_ids)\ndef c_parser_only(request):\n """\n Fixture all of the CSV parsers using the C engine.\n """\n return request.param()\n\n\n@pytest.fixture(params=_py_parsers_only, ids=_py_parser_ids)\ndef python_parser_only(request):\n """\n Fixture all of the CSV parsers using the Python engine.\n """\n return request.param()\n\n\n@pytest.fixture(params=_pyarrow_parsers_only, ids=_pyarrow_parsers_ids)\ndef pyarrow_parser_only(request):\n """\n Fixture all of the CSV parsers using the Pyarrow engine.\n """\n return request.param()\n\n\ndef _get_all_parser_float_precision_combinations():\n """\n Return all allowable parser and float precision\n combinations and corresponding ids.\n """\n params = []\n ids = []\n for parser, parser_id in zip(_all_parsers, _all_parser_ids):\n if hasattr(parser, "values"):\n # Wrapped in pytest.param, get the actual parser back\n parser = parser.values[0]\n for precision in parser.float_precision_choices:\n # Re-wrap in pytest.param for pyarrow\n mark = (\n [\n pytest.mark.single_cpu,\n pytest.mark.skipif(\n not HAS_PYARROW, reason="pyarrow is not installed"\n ),\n ]\n if parser.engine == "pyarrow"\n else ()\n )\n param = pytest.param((parser(), precision), marks=mark)\n params.append(param)\n ids.append(f"{parser_id}-{precision}")\n\n return {"params": params, "ids": ids}\n\n\n@pytest.fixture(\n params=_get_all_parser_float_precision_combinations()["params"],\n ids=_get_all_parser_float_precision_combinations()["ids"],\n)\ndef all_parsers_all_precisions(request):\n """\n Fixture for all allowable combinations of parser\n and float precision\n """\n return request.param\n\n\n_utf_values = [8, 16, 32]\n\n_encoding_seps = ["", "-", "_"]\n_encoding_prefixes = ["utf", "UTF"]\n\n_encoding_fmts = [\n f"{prefix}{sep}{{0}}" for sep in _encoding_seps for prefix in _encoding_prefixes\n]\n\n\n@pytest.fixture(params=_utf_values)\ndef utf_value(request):\n """\n Fixture for all possible integer values for a UTF encoding.\n """\n return request.param\n\n\n@pytest.fixture(params=_encoding_fmts)\ndef encoding_fmt(request):\n """\n Fixture for all possible string formats of a UTF encoding.\n """\n return request.param\n\n\n@pytest.fixture(\n params=[\n ("-1,0", -1.0),\n ("-1,2e0", -1.2),\n ("-1e0", -1.0),\n ("+1e0", 1.0),\n ("+1e+0", 1.0),\n ("+1e-1", 0.1),\n ("+,1e1", 1.0),\n ("+1,e0", 1.0),\n ("-,1e1", -1.0),\n ("-1,e0", -1.0),\n ("0,1", 0.1),\n ("1,", 1.0),\n (",1", 0.1),\n ("-,1", -0.1),\n ("1_,", 1.0),\n ("1_234,56", 1234.56),\n ("1_234,56e0", 1234.56),\n # negative cases; must not parse as float\n ("_", "_"),\n ("-_", "-_"),\n ("-_1", "-_1"),\n ("-_1e0", "-_1e0"),\n ("_1", "_1"),\n ("_1,", "_1,"),\n ("_1,_", "_1,_"),\n ("_1e0", "_1e0"),\n ("1,2e_1", "1,2e_1"),\n ("1,2e1_0", "1,2e1_0"),\n ("1,_2", "1,_2"),\n (",1__2", ",1__2"),\n (",1e", ",1e"),\n ("-,1e", "-,1e"),\n ("1_000,000_000", "1_000,000_000"),\n ("1,e1_2", "1,e1_2"),\n ("e11,2", "e11,2"),\n ("1e11,2", "1e11,2"),\n ("1,2,2", "1,2,2"),\n ("1,2_1", "1,2_1"),\n ("1,2e-10e1", "1,2e-10e1"),\n ("--1,2", "--1,2"),\n ("1a_2,1", "1a_2,1"),\n ("1,2E-1", 0.12),\n ("1,2E1", 12.0),\n ]\n)\ndef numeric_decimal(request):\n """\n Fixture for all numeric formats which should get recognized. The first entry\n represents the value to read while the second represents the expected result.\n """\n return request.param\n\n\n@pytest.fixture\ndef pyarrow_xfail(request):\n """\n Fixture that xfails a test if the engine is pyarrow.\n\n Use if failure is do to unsupported keywords or inconsistent results.\n """\n if "all_parsers" in request.fixturenames:\n parser = request.getfixturevalue("all_parsers")\n elif "all_parsers_all_precisions" in request.fixturenames:\n # Return value is tuple of (engine, precision)\n parser = request.getfixturevalue("all_parsers_all_precisions")[0]\n else:\n return\n if parser.engine == "pyarrow":\n mark = pytest.mark.xfail(reason="pyarrow doesn't support this.")\n request.applymarker(mark)\n\n\n@pytest.fixture\ndef pyarrow_skip(request):\n """\n Fixture that skips a test if the engine is pyarrow.\n\n Use if failure is do a parsing failure from pyarrow.csv.read_csv\n """\n if "all_parsers" in request.fixturenames:\n parser = request.getfixturevalue("all_parsers")\n elif "all_parsers_all_precisions" in request.fixturenames:\n # Return value is tuple of (engine, precision)\n parser = request.getfixturevalue("all_parsers_all_precisions")[0]\n else:\n return\n if parser.engine == "pyarrow":\n pytest.skip(reason="https://github.com/apache/arrow/issues/38676")\n | .venv\Lib\site-packages\pandas\tests\io\parser\conftest.py | conftest.py | Python | 9,144 | 0.95 | 0.145401 | 0.061818 | react-lib | 535 | 2023-08-19T16:00:36.269564 | MIT | true | 58ab69529c95451c10aeda335feb922b |
"""\nTests that comments are properly handled during parsing\nfor all of the parsers defined in parsers.py\n"""\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize("na_values", [None, ["NaN"]])\ndef test_comment(all_parsers, na_values):\n parser = all_parsers\n data = """A,B,C\n1,2.,4.#hello world\n5.,NaN,10.0\n"""\n expected = DataFrame(\n [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]\n )\n if parser.engine == "pyarrow":\n msg = "The 'comment' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), comment="#", na_values=na_values)\n return\n result = parser.read_csv(StringIO(data), comment="#", na_values=na_values)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "read_kwargs", [{}, {"lineterminator": "*"}, {"delim_whitespace": True}]\n)\ndef test_line_comment(all_parsers, read_kwargs, request):\n parser = all_parsers\n data = """# empty\nA,B,C\n1,2.,4.#hello world\n#ignore this line\n5.,NaN,10.0\n"""\n warn = None\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n\n if read_kwargs.get("delim_whitespace"):\n data = data.replace(",", " ")\n warn = FutureWarning\n elif read_kwargs.get("lineterminator"):\n data = data.replace("\n", read_kwargs.get("lineterminator"))\n\n read_kwargs["comment"] = "#"\n if parser.engine == "pyarrow":\n if "lineterminator" in read_kwargs:\n msg = (\n "The 'lineterminator' option is not supported with the 'pyarrow' engine"\n )\n else:\n msg = "The 'comment' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(\n warn, match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(StringIO(data), **read_kwargs)\n return\n elif parser.engine == "python" and read_kwargs.get("lineterminator"):\n msg = r"Custom line terminators not supported in python parser \(yet\)"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(\n warn, match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(StringIO(data), **read_kwargs)\n return\n\n with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False):\n result = parser.read_csv(StringIO(data), **read_kwargs)\n\n expected = DataFrame(\n [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_comment_skiprows(all_parsers):\n parser = all_parsers\n data = """# empty\nrandom line\n# second empty line\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n"""\n # This should ignore the first four lines (including comments).\n expected = DataFrame(\n [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]\n )\n if parser.engine == "pyarrow":\n msg = "The 'comment' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), comment="#", skiprows=4)\n return\n\n result = parser.read_csv(StringIO(data), comment="#", skiprows=4)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_comment_header(all_parsers):\n parser = all_parsers\n data = """# empty\n# second empty line\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n"""\n # Header should begin at the second non-comment line.\n expected = DataFrame(\n [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]\n )\n if parser.engine == "pyarrow":\n msg = "The 'comment' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), comment="#", header=1)\n return\n result = parser.read_csv(StringIO(data), comment="#", header=1)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_comment_skiprows_header(all_parsers):\n parser = all_parsers\n data = """# empty\n# second empty line\n# third empty line\nX,Y,Z\n1,2,3\nA,B,C\n1,2.,4.\n5.,NaN,10.0\n"""\n # Skiprows should skip the first 4 lines (including comments),\n # while header should start from the second non-commented line,\n # starting with line 5.\n expected = DataFrame(\n [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"]\n )\n if parser.engine == "pyarrow":\n msg = "The 'comment' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), comment="#", skiprows=4, header=1)\n return\n\n result = parser.read_csv(StringIO(data), comment="#", skiprows=4, header=1)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("comment_char", ["#", "~", "&", "^", "*", "@"])\ndef test_custom_comment_char(all_parsers, comment_char):\n parser = all_parsers\n data = "a,b,c\n1,2,3#ignore this!\n4,5,6#ignorethistoo"\n\n if parser.engine == "pyarrow":\n msg = "The 'comment' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data.replace("#", comment_char)), comment=comment_char\n )\n return\n result = parser.read_csv(\n StringIO(data.replace("#", comment_char)), comment=comment_char\n )\n\n expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["a", "b", "c"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("header", ["infer", None])\ndef test_comment_first_line(all_parsers, header):\n # see gh-4623\n parser = all_parsers\n data = "# notes\na,b,c\n# more notes\n1,2,3"\n\n if header is None:\n expected = DataFrame({0: ["a", "1"], 1: ["b", "2"], 2: ["c", "3"]})\n else:\n expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])\n\n if parser.engine == "pyarrow":\n msg = "The 'comment' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), comment="#", header=header)\n return\n result = parser.read_csv(StringIO(data), comment="#", header=header)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_comment_char_in_default_value(all_parsers, request):\n # GH#34002\n if all_parsers.engine == "c":\n reason = "see gh-34002: works on the python engine but not the c engine"\n # NA value containing comment char is interpreted as comment\n request.applymarker(pytest.mark.xfail(reason=reason, raises=AssertionError))\n parser = all_parsers\n\n data = (\n "# this is a comment\n"\n "col1,col2,col3,col4\n"\n "1,2,3,4#inline comment\n"\n "4,5#,6,10\n"\n "7,8,#N/A,11\n"\n )\n if parser.engine == "pyarrow":\n msg = "The 'comment' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), comment="#", na_values="#N/A")\n return\n result = parser.read_csv(StringIO(data), comment="#", na_values="#N/A")\n expected = DataFrame(\n {\n "col1": [1, 4, 7],\n "col2": [2, 5, 8],\n "col3": [3.0, np.nan, np.nan],\n "col4": [4.0, np.nan, 11.0],\n }\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_comment.py | test_comment.py | Python | 7,560 | 0.95 | 0.096916 | 0.065657 | python-kit | 974 | 2024-11-20T18:50:33.868867 | MIT | true | bf988395ccc69ee8409b4ccae331c311 |
"""\nTests compressed data parsing functionality for all\nof the parsers defined in parsers.py\n"""\n\nimport os\nfrom pathlib import Path\nimport tarfile\nimport zipfile\n\nimport pytest\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\n\n@pytest.fixture(params=[True, False])\ndef buffer(request):\n return request.param\n\n\n@pytest.fixture\ndef parser_and_data(all_parsers, csv1):\n parser = all_parsers\n\n with open(csv1, "rb") as f:\n data = f.read()\n expected = parser.read_csv(csv1)\n\n return parser, data, expected\n\n\n@pytest.mark.parametrize("compression", ["zip", "infer", "zip2"])\ndef test_zip(parser_and_data, compression):\n parser, data, expected = parser_and_data\n\n with tm.ensure_clean("test_file.zip") as path:\n with zipfile.ZipFile(path, mode="w") as tmp:\n tmp.writestr("test_file", data)\n\n if compression == "zip2":\n with open(path, "rb") as f:\n result = parser.read_csv(f, compression="zip")\n else:\n result = parser.read_csv(path, compression=compression)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("compression", ["zip", "infer"])\ndef test_zip_error_multiple_files(parser_and_data, compression):\n parser, data, expected = parser_and_data\n\n with tm.ensure_clean("combined_zip.zip") as path:\n inner_file_names = ["test_file", "second_file"]\n\n with zipfile.ZipFile(path, mode="w") as tmp:\n for file_name in inner_file_names:\n tmp.writestr(file_name, data)\n\n with pytest.raises(ValueError, match="Multiple files"):\n parser.read_csv(path, compression=compression)\n\n\ndef test_zip_error_no_files(parser_and_data):\n parser, _, _ = parser_and_data\n\n with tm.ensure_clean() as path:\n with zipfile.ZipFile(path, mode="w"):\n pass\n\n with pytest.raises(ValueError, match="Zero files"):\n parser.read_csv(path, compression="zip")\n\n\ndef test_zip_error_invalid_zip(parser_and_data):\n parser, _, _ = parser_and_data\n\n with tm.ensure_clean() as path:\n with open(path, "rb") as f:\n with pytest.raises(zipfile.BadZipFile, match="File is not a zip file"):\n parser.read_csv(f, compression="zip")\n\n\n@pytest.mark.parametrize("filename", [None, "test.{ext}"])\ndef test_compression(\n request,\n parser_and_data,\n compression_only,\n buffer,\n filename,\n compression_to_extension,\n):\n parser, data, expected = parser_and_data\n compress_type = compression_only\n\n ext = compression_to_extension[compress_type]\n filename = filename if filename is None else filename.format(ext=ext)\n\n if filename and buffer:\n request.applymarker(\n pytest.mark.xfail(\n reason="Cannot deduce compression from buffer of compressed data."\n )\n )\n\n with tm.ensure_clean(filename=filename) as path:\n tm.write_to_compressed(compress_type, path, data)\n compression = "infer" if filename else compress_type\n\n if buffer:\n with open(path, "rb") as f:\n result = parser.read_csv(f, compression=compression)\n else:\n result = parser.read_csv(path, compression=compression)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("ext", [None, "gz", "bz2"])\ndef test_infer_compression(all_parsers, csv1, buffer, ext):\n # see gh-9770\n parser = all_parsers\n kwargs = {"index_col": 0, "parse_dates": True}\n\n expected = parser.read_csv(csv1, **kwargs)\n kwargs["compression"] = "infer"\n\n if buffer:\n with open(csv1, encoding="utf-8") as f:\n result = parser.read_csv(f, **kwargs)\n else:\n ext = "." + ext if ext else ""\n result = parser.read_csv(csv1 + ext, **kwargs)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_compression_utf_encoding(all_parsers, csv_dir_path, utf_value, encoding_fmt):\n # see gh-18071, gh-24130\n parser = all_parsers\n encoding = encoding_fmt.format(utf_value)\n path = os.path.join(csv_dir_path, f"utf{utf_value}_ex_small.zip")\n\n result = parser.read_csv(path, encoding=encoding, compression="zip", sep="\t")\n expected = DataFrame(\n {\n "Country": ["Venezuela", "Venezuela"],\n "Twitter": ["Hugo Chávez Frías", "Henrique Capriles R."],\n }\n )\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("invalid_compression", ["sfark", "bz3", "zipper"])\ndef test_invalid_compression(all_parsers, invalid_compression):\n parser = all_parsers\n compress_kwargs = {"compression": invalid_compression}\n\n msg = f"Unrecognized compression type: {invalid_compression}"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv("test_file.zip", **compress_kwargs)\n\n\ndef test_compression_tar_archive(all_parsers, csv_dir_path):\n parser = all_parsers\n path = os.path.join(csv_dir_path, "tar_csv.tar.gz")\n df = parser.read_csv(path)\n assert list(df.columns) == ["a"]\n\n\ndef test_ignore_compression_extension(all_parsers):\n parser = all_parsers\n df = DataFrame({"a": [0, 1]})\n with tm.ensure_clean("test.csv") as path_csv:\n with tm.ensure_clean("test.csv.zip") as path_zip:\n # make sure to create un-compressed file with zip extension\n df.to_csv(path_csv, index=False)\n Path(path_zip).write_text(\n Path(path_csv).read_text(encoding="utf-8"), encoding="utf-8"\n )\n\n tm.assert_frame_equal(parser.read_csv(path_zip, compression=None), df)\n\n\ndef test_writes_tar_gz(all_parsers):\n parser = all_parsers\n data = DataFrame(\n {\n "Country": ["Venezuela", "Venezuela"],\n "Twitter": ["Hugo Chávez Frías", "Henrique Capriles R."],\n }\n )\n with tm.ensure_clean("test.tar.gz") as tar_path:\n data.to_csv(tar_path, index=False)\n\n # test that read_csv infers .tar.gz to gzip:\n tm.assert_frame_equal(parser.read_csv(tar_path), data)\n\n # test that file is indeed gzipped:\n with tarfile.open(tar_path, "r:gz") as tar:\n result = parser.read_csv(\n tar.extractfile(tar.getnames()[0]), compression="infer"\n )\n tm.assert_frame_equal(result, data)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_compression.py | test_compression.py | Python | 6,403 | 0.95 | 0.104265 | 0.032258 | react-lib | 858 | 2024-02-03T15:40:49.050064 | Apache-2.0 | true | b8797eeb63e816e87df5ff50750a78e3 |
import numpy as np\nimport pytest\n\nfrom pandas.errors import DtypeWarning\n\nimport pandas._testing as tm\nfrom pandas.core.arrays import ArrowExtensionArray\n\nfrom pandas.io.parsers.c_parser_wrapper import _concatenate_chunks\n\n\ndef test_concatenate_chunks_pyarrow():\n # GH#51876\n pa = pytest.importorskip("pyarrow")\n chunks = [\n {0: ArrowExtensionArray(pa.array([1.5, 2.5]))},\n {0: ArrowExtensionArray(pa.array([1, 2]))},\n ]\n result = _concatenate_chunks(chunks)\n expected = ArrowExtensionArray(pa.array([1.5, 2.5, 1.0, 2.0]))\n tm.assert_extension_array_equal(result[0], expected)\n\n\ndef test_concatenate_chunks_pyarrow_strings():\n # GH#51876\n pa = pytest.importorskip("pyarrow")\n chunks = [\n {0: ArrowExtensionArray(pa.array([1.5, 2.5]))},\n {0: ArrowExtensionArray(pa.array(["a", "b"]))},\n ]\n with tm.assert_produces_warning(DtypeWarning, match="have mixed types"):\n result = _concatenate_chunks(chunks)\n expected = np.concatenate(\n [np.array([1.5, 2.5], dtype=object), np.array(["a", "b"])]\n )\n tm.assert_numpy_array_equal(result[0], expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_concatenate_chunks.py | test_concatenate_chunks.py | Python | 1,128 | 0.95 | 0.055556 | 0.068966 | react-lib | 252 | 2024-11-10T21:56:09.799974 | MIT | true | f8268983022d02663b16df0822f3a87e |
"""\nTests column conversion functionality during parsing\nfor all of the parsers defined in parsers.py\n"""\nfrom io import StringIO\n\nfrom dateutil.parser import parse\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Index,\n)\nimport pandas._testing as tm\n\n\ndef test_converters_type_must_be_dict(all_parsers):\n parser = all_parsers\n data = """index,A,B,C,D\nfoo,2,3,4,5\n"""\n if parser.engine == "pyarrow":\n msg = "The 'converters' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), converters=0)\n return\n with pytest.raises(TypeError, match="Type converters.+"):\n parser.read_csv(StringIO(data), converters=0)\n\n\n@pytest.mark.parametrize("column", [3, "D"])\n@pytest.mark.parametrize(\n "converter", [parse, lambda x: int(x.split("/")[2])] # Produce integer.\n)\ndef test_converters(all_parsers, column, converter):\n parser = all_parsers\n data = """A,B,C,D\na,1,2,01/01/2009\nb,3,4,01/02/2009\nc,4,5,01/03/2009\n"""\n if parser.engine == "pyarrow":\n msg = "The 'converters' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), converters={column: converter})\n return\n\n result = parser.read_csv(StringIO(data), converters={column: converter})\n\n expected = parser.read_csv(StringIO(data))\n expected["D"] = expected["D"].map(converter)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_converters_no_implicit_conv(all_parsers):\n # see gh-2184\n parser = all_parsers\n data = """000102,1.2,A\n001245,2,B"""\n\n converters = {0: lambda x: x.strip()}\n\n if parser.engine == "pyarrow":\n msg = "The 'converters' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), header=None, converters=converters)\n return\n\n result = parser.read_csv(StringIO(data), header=None, converters=converters)\n\n # Column 0 should not be casted to numeric and should remain as object.\n expected = DataFrame([["000102", 1.2, "A"], ["001245", 2, "B"]])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_converters_euro_decimal_format(all_parsers):\n # see gh-583\n converters = {}\n parser = all_parsers\n\n data = """Id;Number1;Number2;Text1;Text2;Number3\n1;1521,1541;187101,9543;ABC;poi;4,7387\n2;121,12;14897,76;DEF;uyt;0,3773\n3;878,158;108013,434;GHI;rez;2,7356"""\n converters["Number1"] = converters["Number2"] = converters[\n "Number3"\n ] = lambda x: float(x.replace(",", "."))\n\n if parser.engine == "pyarrow":\n msg = "The 'converters' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), sep=";", converters=converters)\n return\n\n result = parser.read_csv(StringIO(data), sep=";", converters=converters)\n expected = DataFrame(\n [\n [1, 1521.1541, 187101.9543, "ABC", "poi", 4.7387],\n [2, 121.12, 14897.76, "DEF", "uyt", 0.3773],\n [3, 878.158, 108013.434, "GHI", "rez", 2.7356],\n ],\n columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_converters_corner_with_nans(all_parsers):\n parser = all_parsers\n data = """id,score,days\n1,2,12\n2,2-5,\n3,,14+\n4,6-12,2"""\n\n # Example converters.\n def convert_days(x):\n x = x.strip()\n\n if not x:\n return np.nan\n\n is_plus = x.endswith("+")\n\n if is_plus:\n x = int(x[:-1]) + 1\n else:\n x = int(x)\n\n return x\n\n def convert_days_sentinel(x):\n x = x.strip()\n\n if not x:\n return np.nan\n\n is_plus = x.endswith("+")\n\n if is_plus:\n x = int(x[:-1]) + 1\n else:\n x = int(x)\n\n return x\n\n def convert_score(x):\n x = x.strip()\n\n if not x:\n return np.nan\n\n if x.find("-") > 0:\n val_min, val_max = map(int, x.split("-"))\n val = 0.5 * (val_min + val_max)\n else:\n val = float(x)\n\n return val\n\n results = []\n\n for day_converter in [convert_days, convert_days_sentinel]:\n if parser.engine == "pyarrow":\n msg = "The 'converters' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data),\n converters={"score": convert_score, "days": day_converter},\n na_values=["", None],\n )\n continue\n\n result = parser.read_csv(\n StringIO(data),\n converters={"score": convert_score, "days": day_converter},\n na_values=["", None],\n )\n assert pd.isna(result["days"][1])\n results.append(result)\n\n if parser.engine != "pyarrow":\n tm.assert_frame_equal(results[0], results[1])\n\n\n@pytest.mark.parametrize("conv_f", [lambda x: x, str])\ndef test_converter_index_col_bug(all_parsers, conv_f):\n # see gh-1835 , GH#40589\n parser = all_parsers\n data = "A;B\n1;2\n3;4"\n\n if parser.engine == "pyarrow":\n msg = "The 'converters' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data), sep=";", index_col="A", converters={"A": conv_f}\n )\n return\n\n rs = parser.read_csv(\n StringIO(data), sep=";", index_col="A", converters={"A": conv_f}\n )\n\n xp = DataFrame({"B": [2, 4]}, index=Index(["1", "3"], name="A"))\n tm.assert_frame_equal(rs, xp)\n\n\ndef test_converter_identity_object(all_parsers):\n # GH#40589\n parser = all_parsers\n data = "A,B\n1,2\n3,4"\n\n if parser.engine == "pyarrow":\n msg = "The 'converters' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), converters={"A": lambda x: x})\n return\n\n rs = parser.read_csv(StringIO(data), converters={"A": lambda x: x})\n\n xp = DataFrame({"A": ["1", "3"], "B": [2, 4]})\n tm.assert_frame_equal(rs, xp)\n\n\ndef test_converter_multi_index(all_parsers):\n # GH 42446\n parser = all_parsers\n data = "A,B,B\nX,Y,Z\n1,2,3"\n\n if parser.engine == "pyarrow":\n msg = "The 'converters' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data),\n header=list(range(2)),\n converters={\n ("A", "X"): np.int32,\n ("B", "Y"): np.int32,\n ("B", "Z"): np.float32,\n },\n )\n return\n\n result = parser.read_csv(\n StringIO(data),\n header=list(range(2)),\n converters={\n ("A", "X"): np.int32,\n ("B", "Y"): np.int32,\n ("B", "Z"): np.float32,\n },\n )\n\n expected = DataFrame(\n {\n ("A", "X"): np.int32([1]),\n ("B", "Y"): np.int32([2]),\n ("B", "Z"): np.float32([3]),\n }\n )\n\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_converters.py | test_converters.py | Python | 7,437 | 0.95 | 0.106464 | 0.033816 | awesome-app | 885 | 2024-04-04T00:42:53.918106 | BSD-3-Clause | true | ae51a1eb69751a38a0a0ed4f19a06a28 |
"""\nTests that apply specifically to the CParser. Unless specifically stated\nas a CParser-specific issue, the goal is to eventually move as many of\nthese tests out of this module as soon as the Python parser can accept\nfurther arguments when parsing.\n"""\nfrom decimal import Decimal\nfrom io import (\n BytesIO,\n StringIO,\n TextIOWrapper,\n)\nimport mmap\nimport os\nimport tarfile\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat.numpy import np_version_gte1p24\nfrom pandas.errors import (\n ParserError,\n ParserWarning,\n)\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n DataFrame,\n concat,\n)\nimport pandas._testing as tm\n\n\n@pytest.mark.parametrize(\n "malformed",\n ["1\r1\r1\r 1\r 1\r", "1\r1\r1\r 1\r 1\r11\r", "1\r1\r1\r 1\r 1\r11\r1\r"],\n ids=["words pointer", "stream pointer", "lines pointer"],\n)\ndef test_buffer_overflow(c_parser_only, malformed):\n # see gh-9205: test certain malformed input files that cause\n # buffer overflows in tokenizer.c\n msg = "Buffer overflow caught - possible malformed input file."\n parser = c_parser_only\n\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(malformed))\n\n\ndef test_delim_whitespace_custom_terminator(c_parser_only):\n # See gh-12912\n data = "a b c~1 2 3~4 5 6~7 8 9"\n parser = c_parser_only\n\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n df = parser.read_csv(StringIO(data), lineterminator="~", delim_whitespace=True)\n expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"])\n tm.assert_frame_equal(df, expected)\n\n\ndef test_dtype_and_names_error(c_parser_only):\n # see gh-8833: passing both dtype and names\n # resulting in an error reporting issue\n parser = c_parser_only\n data = """\n1.0 1\n2.0 2\n3.0 3\n"""\n # base cases\n result = parser.read_csv(StringIO(data), sep=r"\s+", header=None)\n expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]])\n tm.assert_frame_equal(result, expected)\n\n result = parser.read_csv(StringIO(data), sep=r"\s+", header=None, names=["a", "b"])\n expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n # fallback casting\n result = parser.read_csv(\n StringIO(data), sep=r"\s+", header=None, names=["a", "b"], dtype={"a": np.int32}\n )\n expected = DataFrame([[1, 1], [2, 2], [3, 3]], columns=["a", "b"])\n expected["a"] = expected["a"].astype(np.int32)\n tm.assert_frame_equal(result, expected)\n\n data = """\n1.0 1\nnan 2\n3.0 3\n"""\n # fallback casting, but not castable\n warning = RuntimeWarning if np_version_gte1p24 else None\n with pytest.raises(ValueError, match="cannot safely convert"):\n with tm.assert_produces_warning(warning, check_stacklevel=False):\n parser.read_csv(\n StringIO(data),\n sep=r"\s+",\n header=None,\n names=["a", "b"],\n dtype={"a": np.int32},\n )\n\n\n@pytest.mark.parametrize(\n "match,kwargs",\n [\n # For each of these cases, all of the dtypes are valid, just unsupported.\n (\n (\n "the dtype datetime64 is not supported for parsing, "\n "pass this column using parse_dates instead"\n ),\n {"dtype": {"A": "datetime64", "B": "float64"}},\n ),\n (\n (\n "the dtype datetime64 is not supported for parsing, "\n "pass this column using parse_dates instead"\n ),\n {"dtype": {"A": "datetime64", "B": "float64"}, "parse_dates": ["B"]},\n ),\n (\n "the dtype timedelta64 is not supported for parsing",\n {"dtype": {"A": "timedelta64", "B": "float64"}},\n ),\n (\n f"the dtype {tm.ENDIAN}U8 is not supported for parsing",\n {"dtype": {"A": "U8"}},\n ),\n ],\n ids=["dt64-0", "dt64-1", "td64", f"{tm.ENDIAN}U8"],\n)\ndef test_unsupported_dtype(c_parser_only, match, kwargs):\n parser = c_parser_only\n df = DataFrame(\n np.random.default_rng(2).random((5, 2)),\n columns=list("AB"),\n index=["1A", "1B", "1C", "1D", "1E"],\n )\n\n with tm.ensure_clean("__unsupported_dtype__.csv") as path:\n df.to_csv(path)\n\n with pytest.raises(TypeError, match=match):\n parser.read_csv(path, index_col=0, **kwargs)\n\n\n@td.skip_if_32bit\n@pytest.mark.slow\n# test numbers between 1 and 2\n@pytest.mark.parametrize("num", np.linspace(1.0, 2.0, num=21))\ndef test_precise_conversion(c_parser_only, num):\n parser = c_parser_only\n\n normal_errors = []\n precise_errors = []\n\n def error(val: float, actual_val: Decimal) -> Decimal:\n return abs(Decimal(f"{val:.100}") - actual_val)\n\n # 25 decimal digits of precision\n text = f"a\n{num:.25}"\n\n normal_val = float(\n parser.read_csv(StringIO(text), float_precision="legacy")["a"][0]\n )\n precise_val = float(parser.read_csv(StringIO(text), float_precision="high")["a"][0])\n roundtrip_val = float(\n parser.read_csv(StringIO(text), float_precision="round_trip")["a"][0]\n )\n actual_val = Decimal(text[2:])\n\n normal_errors.append(error(normal_val, actual_val))\n precise_errors.append(error(precise_val, actual_val))\n\n # round-trip should match float()\n assert roundtrip_val == float(text[2:])\n\n assert sum(precise_errors) <= sum(normal_errors)\n assert max(precise_errors) <= max(normal_errors)\n\n\ndef test_usecols_dtypes(c_parser_only, using_infer_string):\n parser = c_parser_only\n data = """\\n1,2,3\n4,5,6\n7,8,9\n10,11,12"""\n\n result = parser.read_csv(\n StringIO(data),\n usecols=(0, 1, 2),\n names=("a", "b", "c"),\n header=None,\n converters={"a": str},\n dtype={"b": int, "c": float},\n )\n result2 = parser.read_csv(\n StringIO(data),\n usecols=(0, 2),\n names=("a", "b", "c"),\n header=None,\n converters={"a": str},\n dtype={"b": int, "c": float},\n )\n\n if using_infer_string:\n assert (result.dtypes == ["string", int, float]).all()\n assert (result2.dtypes == ["string", float]).all()\n else:\n assert (result.dtypes == [object, int, float]).all()\n assert (result2.dtypes == [object, float]).all()\n\n\ndef test_disable_bool_parsing(c_parser_only):\n # see gh-2090\n\n parser = c_parser_only\n data = """A,B,C\nYes,No,Yes\nNo,Yes,Yes\nYes,,Yes\nNo,No,No"""\n\n result = parser.read_csv(StringIO(data), dtype=object)\n assert (result.dtypes == object).all()\n\n result = parser.read_csv(StringIO(data), dtype=object, na_filter=False)\n assert result["B"][2] == ""\n\n\ndef test_custom_lineterminator(c_parser_only):\n parser = c_parser_only\n data = "a,b,c~1,2,3~4,5,6"\n\n result = parser.read_csv(StringIO(data), lineterminator="~")\n expected = parser.read_csv(StringIO(data.replace("~", "\n")))\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_ragged_csv(c_parser_only):\n parser = c_parser_only\n data = """1,2,3\n1,2,3,4\n1,2,3,4,5\n1,2\n1,2,3,4"""\n\n nice_data = """1,2,3,,\n1,2,3,4,\n1,2,3,4,5\n1,2,,,\n1,2,3,4,"""\n result = parser.read_csv(\n StringIO(data), header=None, names=["a", "b", "c", "d", "e"]\n )\n\n expected = parser.read_csv(\n StringIO(nice_data), header=None, names=["a", "b", "c", "d", "e"]\n )\n\n tm.assert_frame_equal(result, expected)\n\n # too many columns, cause segfault if not careful\n data = "1,2\n3,4,5"\n\n result = parser.read_csv(StringIO(data), header=None, names=range(50))\n expected = parser.read_csv(StringIO(data), header=None, names=range(3)).reindex(\n columns=range(50)\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_tokenize_CR_with_quoting(c_parser_only):\n # see gh-3453\n parser = c_parser_only\n data = ' a,b,c\r"a,b","e,d","f,f"'\n\n result = parser.read_csv(StringIO(data), header=None)\n expected = parser.read_csv(StringIO(data.replace("\r", "\n")), header=None)\n tm.assert_frame_equal(result, expected)\n\n result = parser.read_csv(StringIO(data))\n expected = parser.read_csv(StringIO(data.replace("\r", "\n")))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize("count", [3 * 2**n for n in range(6)])\ndef test_grow_boundary_at_cap(c_parser_only, count):\n # See gh-12494\n #\n # Cause of error was that the C parser\n # was not increasing the buffer size when\n # the desired space would fill the buffer\n # to capacity, which would later cause a\n # buffer overflow error when checking the\n # EOF terminator of the CSV stream.\n # 3 * 2^n commas was observed to break the parser\n parser = c_parser_only\n\n with StringIO("," * count) as s:\n expected = DataFrame(columns=[f"Unnamed: {i}" for i in range(count + 1)])\n df = parser.read_csv(s)\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.slow\n@pytest.mark.parametrize("encoding", [None, "utf-8"])\ndef test_parse_trim_buffers(c_parser_only, encoding):\n # This test is part of a bugfix for gh-13703. It attempts to\n # to stress the system memory allocator, to cause it to move the\n # stream buffer and either let the OS reclaim the region, or let\n # other memory requests of parser otherwise modify the contents\n # of memory space, where it was formally located.\n # This test is designed to cause a `segfault` with unpatched\n # `tokenizer.c`. Sometimes the test fails on `segfault`, other\n # times it fails due to memory corruption, which causes the\n # loaded DataFrame to differ from the expected one.\n\n # Also force 'utf-8' encoding, so that `_string_convert` would take\n # a different execution branch.\n\n parser = c_parser_only\n\n # Generate a large mixed-type CSV file on-the-fly (one record is\n # approx 1.5KiB).\n record_ = (\n """9999-9,99:99,,,,ZZ,ZZ,,,ZZZ-ZZZZ,.Z-ZZZZ,-9.99,,,9.99,Z"""\n """ZZZZ,,-99,9,ZZZ-ZZZZ,ZZ-ZZZZ,,9.99,ZZZ-ZZZZZ,ZZZ-ZZZZZ,"""\n """ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,9"""\n """99,ZZZ-ZZZZ,,ZZ-ZZZZ,,,,,ZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,,,9,9,"""\n """9,9,99,99,999,999,ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZ,9,ZZ-ZZZZ,9."""\n """99,ZZ-ZZZZ,ZZ-ZZZZ,,,,ZZZZ,,,ZZ,ZZ,,,,,,,,,,,,,9,,,999."""\n """99,999.99,,,ZZZZZ,,,Z9,,,,,,,ZZZ,ZZZ,,,,,,,,,,,ZZZZZ,ZZ"""\n """ZZZ,ZZZ-ZZZZZZ,ZZZ-ZZZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZZZ,ZZ-ZZ"""\n """ZZ,,,999999,999999,ZZZ,ZZZ,,,ZZZ,ZZZ,999.99,999.99,,,,Z"""\n """ZZ-ZZZ,ZZZ-ZZZ,-9.99,-9.99,9,9,,99,,9.99,9.99,9,9,9.99,"""\n """9.99,,,,9.99,9.99,,99,,99,9.99,9.99,,,ZZZ,ZZZ,,999.99,,"""\n """999.99,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,ZZZZZ,ZZZZZ,ZZZ,ZZZ,9,9,"""\n """,,,,,ZZZ-ZZZZ,ZZZ999Z,,,999.99,,999.99,ZZZ-ZZZZ,,,9.999"""\n """,9.999,9.999,9.999,-9.999,-9.999,-9.999,-9.999,9.999,9."""\n """999,9.999,9.999,9.999,9.999,9.999,9.999,99999,ZZZ-ZZZZ,"""\n """,9.99,ZZZ,,,,,,,,ZZZ,,,,,9,,,,9,,,,,,,,,,ZZZ-ZZZZ,ZZZ-Z"""\n """ZZZ,,ZZZZZ,ZZZZZ,ZZZZZ,ZZZZZ,,,9.99,,ZZ-ZZZZ,ZZ-ZZZZ,ZZ"""\n """,999,,,,ZZ-ZZZZ,ZZZ,ZZZ,ZZZ-ZZZZ,ZZZ-ZZZZ,,,99.99,99.99"""\n """,,,9.99,9.99,9.99,9.99,ZZZ-ZZZZ,,,ZZZ-ZZZZZ,,,,,-9.99,-"""\n """9.99,-9.99,-9.99,,,,,,,,,ZZZ-ZZZZ,,9,9.99,9.99,99ZZ,,-9"""\n """.99,-9.99,ZZZ-ZZZZ,,,,,,,ZZZ-ZZZZ,9.99,9.99,9999,,,,,,,"""\n """,,,-9.9,Z/Z-ZZZZ,999.99,9.99,,999.99,ZZ-ZZZZ,ZZ-ZZZZ,9."""\n """99,9.99,9.99,9.99,9.99,9.99,,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ-ZZ"""\n """ZZZ,ZZZ-ZZZZZ,ZZZ-ZZZZZ,ZZZ,ZZZ,ZZZ,ZZZ,9.99,,,-9.99,ZZ"""\n """-ZZZZ,-999.99,,-9999,,999.99,,,,999.99,99.99,,,ZZ-ZZZZZ"""\n """ZZZ,ZZ-ZZZZ-ZZZZZZZ,,,,ZZ-ZZ-ZZZZZZZZ,ZZZZZZZZ,ZZZ-ZZZZ"""\n """,9999,999.99,ZZZ-ZZZZ,-9.99,-9.99,ZZZ-ZZZZ,99:99:99,,99"""\n """,99,,9.99,,-99.99,,,,,,9.99,ZZZ-ZZZZ,-9.99,-9.99,9.99,9"""\n """.99,,ZZZ,,,,,,,ZZZ,ZZZ,,,,,"""\n )\n\n # Set the number of lines so that a call to `parser_trim_buffers`\n # is triggered: after a couple of full chunks are consumed a\n # relatively small 'residual' chunk would cause reallocation\n # within the parser.\n chunksize, n_lines = 128, 2 * 128 + 15\n csv_data = "\n".join([record_] * n_lines) + "\n"\n\n # We will use StringIO to load the CSV from this text buffer.\n # pd.read_csv() will iterate over the file in chunks and will\n # finally read a residual chunk of really small size.\n\n # Generate the expected output: manually create the dataframe\n # by splitting by comma and repeating the `n_lines` times.\n row = tuple(val_ if val_ else np.nan for val_ in record_.split(","))\n expected = DataFrame(\n [row for _ in range(n_lines)], dtype=object, columns=None, index=None\n )\n\n # Iterate over the CSV file in chunks of `chunksize` lines\n with parser.read_csv(\n StringIO(csv_data),\n header=None,\n dtype=object,\n chunksize=chunksize,\n encoding=encoding,\n ) as chunks_:\n result = concat(chunks_, axis=0, ignore_index=True)\n\n # Check for data corruption if there was no segfault\n tm.assert_frame_equal(result, expected)\n\n\ndef test_internal_null_byte(c_parser_only):\n # see gh-14012\n #\n # The null byte ('\x00') should not be used as a\n # true line terminator, escape character, or comment\n # character, only as a placeholder to indicate that\n # none was specified.\n #\n # This test should be moved to test_common.py ONLY when\n # Python's csv class supports parsing '\x00'.\n parser = c_parser_only\n\n names = ["a", "b", "c"]\n data = "1,2,3\n4,\x00,6\n7,8,9"\n expected = DataFrame([[1, 2.0, 3], [4, np.nan, 6], [7, 8, 9]], columns=names)\n\n result = parser.read_csv(StringIO(data), names=names)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_nrows_large(c_parser_only):\n # gh-7626 - Read only nrows of data in for large inputs (>262144b)\n parser = c_parser_only\n header_narrow = "\t".join(["COL_HEADER_" + str(i) for i in range(10)]) + "\n"\n data_narrow = "\t".join(["somedatasomedatasomedata1" for _ in range(10)]) + "\n"\n header_wide = "\t".join(["COL_HEADER_" + str(i) for i in range(15)]) + "\n"\n data_wide = "\t".join(["somedatasomedatasomedata2" for _ in range(15)]) + "\n"\n test_input = header_narrow + data_narrow * 1050 + header_wide + data_wide * 2\n\n df = parser.read_csv(StringIO(test_input), sep="\t", nrows=1010)\n\n assert df.size == 1010 * 10\n\n\ndef test_float_precision_round_trip_with_text(c_parser_only):\n # see gh-15140\n parser = c_parser_only\n df = parser.read_csv(StringIO("a"), header=None, float_precision="round_trip")\n tm.assert_frame_equal(df, DataFrame({0: ["a"]}))\n\n\ndef test_large_difference_in_columns(c_parser_only):\n # see gh-14125\n parser = c_parser_only\n\n count = 10000\n large_row = ("X," * count)[:-1] + "\n"\n normal_row = "XXXXXX XXXXXX,111111111111111\n"\n test_input = (large_row + normal_row * 6)[:-1]\n\n result = parser.read_csv(StringIO(test_input), header=None, usecols=[0])\n rows = test_input.split("\n")\n\n expected = DataFrame([row.split(",")[0] for row in rows])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_data_after_quote(c_parser_only):\n # see gh-15910\n parser = c_parser_only\n\n data = 'a\n1\n"b"a'\n result = parser.read_csv(StringIO(data))\n\n expected = DataFrame({"a": ["1", "ba"]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_comment_whitespace_delimited(c_parser_only):\n parser = c_parser_only\n test_input = """\\n1 2\n2 2 3\n3 2 3 # 3 fields\n4 2 3# 3 fields\n5 2 # 2 fields\n6 2# 2 fields\n7 # 1 field, NaN\n8# 1 field, NaN\n9 2 3 # skipped line\n# comment"""\n with tm.assert_produces_warning(\n ParserWarning, match="Skipping line", check_stacklevel=False\n ):\n df = parser.read_csv(\n StringIO(test_input),\n comment="#",\n header=None,\n delimiter="\\s+",\n skiprows=0,\n on_bad_lines="warn",\n )\n expected = DataFrame([[1, 2], [5, 2], [6, 2], [7, np.nan], [8, np.nan]])\n tm.assert_frame_equal(df, expected)\n\n\ndef test_file_like_no_next(c_parser_only):\n # gh-16530: the file-like need not have a "next" or "__next__"\n # attribute despite having an "__iter__" attribute.\n #\n # NOTE: This is only true for the C engine, not Python engine.\n class NoNextBuffer(StringIO):\n def __next__(self):\n raise AttributeError("No next method")\n\n next = __next__\n\n parser = c_parser_only\n data = "a\n1"\n\n expected = DataFrame({"a": [1]})\n result = parser.read_csv(NoNextBuffer(data))\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_buffer_rd_bytes_bad_unicode(c_parser_only):\n # see gh-22748\n t = BytesIO(b"\xB0")\n t = TextIOWrapper(t, encoding="ascii", errors="surrogateescape")\n msg = "'utf-8' codec can't encode character"\n with pytest.raises(UnicodeError, match=msg):\n c_parser_only.read_csv(t, encoding="UTF-8")\n\n\n@pytest.mark.parametrize("tar_suffix", [".tar", ".tar.gz"])\ndef test_read_tarfile(c_parser_only, csv_dir_path, tar_suffix):\n # see gh-16530\n #\n # Unfortunately, Python's CSV library can't handle\n # tarfile objects (expects string, not bytes when\n # iterating through a file-like).\n parser = c_parser_only\n tar_path = os.path.join(csv_dir_path, "tar_csv" + tar_suffix)\n\n with tarfile.open(tar_path, "r") as tar:\n data_file = tar.extractfile("tar_data.csv")\n\n out = parser.read_csv(data_file)\n expected = DataFrame({"a": [1]})\n tm.assert_frame_equal(out, expected)\n\n\ndef test_chunk_whitespace_on_boundary(c_parser_only):\n # see gh-9735: this issue is C parser-specific (bug when\n # parsing whitespace and characters at chunk boundary)\n #\n # This test case has a field too large for the Python parser / CSV library.\n parser = c_parser_only\n\n chunk1 = "a" * (1024 * 256 - 2) + "\na"\n chunk2 = "\n a"\n result = parser.read_csv(StringIO(chunk1 + chunk2), header=None)\n\n expected = DataFrame(["a" * (1024 * 256 - 2), "a", " a"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_file_handles_mmap(c_parser_only, csv1):\n # gh-14418\n #\n # Don't close user provided file handles.\n parser = c_parser_only\n\n with open(csv1, encoding="utf-8") as f:\n with mmap.mmap(f.fileno(), 0, access=mmap.ACCESS_READ) as m:\n parser.read_csv(m)\n assert not m.closed\n\n\ndef test_file_binary_mode(c_parser_only):\n # see gh-23779\n parser = c_parser_only\n expected = DataFrame([[1, 2, 3], [4, 5, 6]])\n\n with tm.ensure_clean() as path:\n with open(path, "w", encoding="utf-8") as f:\n f.write("1,2,3\n4,5,6")\n\n with open(path, "rb") as f:\n result = parser.read_csv(f, header=None)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_unix_style_breaks(c_parser_only):\n # GH 11020\n parser = c_parser_only\n with tm.ensure_clean() as path:\n with open(path, "w", newline="\n", encoding="utf-8") as f:\n f.write("blah\n\ncol_1,col_2,col_3\n\n")\n result = parser.read_csv(path, skiprows=2, encoding="utf-8", engine="c")\n expected = DataFrame(columns=["col_1", "col_2", "col_3"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])\n@pytest.mark.parametrize(\n "data,thousands,decimal",\n [\n (\n """A|B|C\n1|2,334.01|5\n10|13|10.\n""",\n ",",\n ".",\n ),\n (\n """A|B|C\n1|2.334,01|5\n10|13|10,\n""",\n ".",\n ",",\n ),\n ],\n)\ndef test_1000_sep_with_decimal(\n c_parser_only, data, thousands, decimal, float_precision\n):\n parser = c_parser_only\n expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})\n\n result = parser.read_csv(\n StringIO(data),\n sep="|",\n thousands=thousands,\n decimal=decimal,\n float_precision=float_precision,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_float_precision_options(c_parser_only):\n # GH 17154, 36228\n parser = c_parser_only\n s = "foo\n243.164\n"\n df = parser.read_csv(StringIO(s))\n df2 = parser.read_csv(StringIO(s), float_precision="high")\n\n tm.assert_frame_equal(df, df2)\n\n df3 = parser.read_csv(StringIO(s), float_precision="legacy")\n\n assert not df.iloc[0, 0] == df3.iloc[0, 0]\n\n msg = "Unrecognized float_precision option: junk"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(s), float_precision="junk")\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_c_parser_only.py | test_c_parser_only.py | Python | 20,721 | 0.95 | 0.083462 | 0.155894 | python-kit | 997 | 2024-12-20T08:38:21.948140 | Apache-2.0 | true | 118ce5cda0d0d3bd19519db8c1951a08 |
"""\nTests that dialects are properly handled during parsing\nfor all of the parsers defined in parsers.py\n"""\n\nimport csv\nfrom io import StringIO\n\nimport pytest\n\nfrom pandas.errors import ParserWarning\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\n\n@pytest.fixture\ndef custom_dialect():\n dialect_name = "weird"\n dialect_kwargs = {\n "doublequote": False,\n "escapechar": "~",\n "delimiter": ":",\n "skipinitialspace": False,\n "quotechar": "`",\n "quoting": 3,\n }\n return dialect_name, dialect_kwargs\n\n\ndef test_dialect(all_parsers):\n parser = all_parsers\n data = """\\nlabel1,label2,label3\nindex1,"a,c,e\nindex2,b,d,f\n"""\n\n dia = csv.excel()\n dia.quoting = csv.QUOTE_NONE\n\n if parser.engine == "pyarrow":\n msg = "The 'dialect' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), dialect=dia)\n return\n\n df = parser.read_csv(StringIO(data), dialect=dia)\n\n data = """\\nlabel1,label2,label3\nindex1,a,c,e\nindex2,b,d,f\n"""\n exp = parser.read_csv(StringIO(data))\n exp.replace("a", '"a', inplace=True)\n tm.assert_frame_equal(df, exp)\n\n\ndef test_dialect_str(all_parsers):\n dialect_name = "mydialect"\n parser = all_parsers\n data = """\\nfruit:vegetable\napple:broccoli\npear:tomato\n"""\n exp = DataFrame({"fruit": ["apple", "pear"], "vegetable": ["broccoli", "tomato"]})\n\n with tm.with_csv_dialect(dialect_name, delimiter=":"):\n if parser.engine == "pyarrow":\n msg = "The 'dialect' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), dialect=dialect_name)\n return\n\n df = parser.read_csv(StringIO(data), dialect=dialect_name)\n tm.assert_frame_equal(df, exp)\n\n\ndef test_invalid_dialect(all_parsers):\n class InvalidDialect:\n pass\n\n data = "a\n1"\n parser = all_parsers\n msg = "Invalid dialect"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), dialect=InvalidDialect)\n\n\n@pytest.mark.parametrize(\n "arg",\n [None, "doublequote", "escapechar", "skipinitialspace", "quotechar", "quoting"],\n)\n@pytest.mark.parametrize("value", ["dialect", "default", "other"])\ndef test_dialect_conflict_except_delimiter(all_parsers, custom_dialect, arg, value):\n # see gh-23761.\n dialect_name, dialect_kwargs = custom_dialect\n parser = all_parsers\n\n expected = DataFrame({"a": [1], "b": [2]})\n data = "a:b\n1:2"\n\n warning_klass = None\n kwds = {}\n\n # arg=None tests when we pass in the dialect without any other arguments.\n if arg is not None:\n if value == "dialect": # No conflict --> no warning.\n kwds[arg] = dialect_kwargs[arg]\n elif value == "default": # Default --> no warning.\n from pandas.io.parsers.base_parser import parser_defaults\n\n kwds[arg] = parser_defaults[arg]\n else: # Non-default + conflict with dialect --> warning.\n warning_klass = ParserWarning\n kwds[arg] = "blah"\n\n with tm.with_csv_dialect(dialect_name, **dialect_kwargs):\n if parser.engine == "pyarrow":\n msg = "The 'dialect' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv_check_warnings(\n # No warning bc we raise\n None,\n "Conflicting values for",\n StringIO(data),\n dialect=dialect_name,\n **kwds,\n )\n return\n result = parser.read_csv_check_warnings(\n warning_klass,\n "Conflicting values for",\n StringIO(data),\n dialect=dialect_name,\n **kwds,\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "kwargs,warning_klass",\n [\n ({"sep": ","}, None), # sep is default --> sep_override=True\n ({"sep": "."}, ParserWarning), # sep isn't default --> sep_override=False\n ({"delimiter": ":"}, None), # No conflict\n ({"delimiter": None}, None), # Default arguments --> sep_override=True\n ({"delimiter": ","}, ParserWarning), # Conflict\n ({"delimiter": "."}, ParserWarning), # Conflict\n ],\n ids=[\n "sep-override-true",\n "sep-override-false",\n "delimiter-no-conflict",\n "delimiter-default-arg",\n "delimiter-conflict",\n "delimiter-conflict2",\n ],\n)\ndef test_dialect_conflict_delimiter(all_parsers, custom_dialect, kwargs, warning_klass):\n # see gh-23761.\n dialect_name, dialect_kwargs = custom_dialect\n parser = all_parsers\n\n expected = DataFrame({"a": [1], "b": [2]})\n data = "a:b\n1:2"\n\n with tm.with_csv_dialect(dialect_name, **dialect_kwargs):\n if parser.engine == "pyarrow":\n msg = "The 'dialect' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv_check_warnings(\n # no warning bc we raise\n None,\n "Conflicting values for 'delimiter'",\n StringIO(data),\n dialect=dialect_name,\n **kwargs,\n )\n return\n result = parser.read_csv_check_warnings(\n warning_klass,\n "Conflicting values for 'delimiter'",\n StringIO(data),\n dialect=dialect_name,\n **kwargs,\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_dialect.py | test_dialect.py | Python | 5,844 | 0.95 | 0.092308 | 0.055215 | node-utils | 725 | 2024-11-04T06:43:20.495483 | MIT | true | 20c194e0846bd2d7966228cec8ec9f16 |
"""\nTests encoding functionality during parsing\nfor all of the parsers defined in parsers.py\n"""\nfrom io import (\n BytesIO,\n TextIOWrapper,\n)\nimport os\nimport tempfile\nimport uuid\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n read_csv,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\ndef test_bytes_io_input(all_parsers):\n encoding = "cp1255"\n parser = all_parsers\n\n data = BytesIO("שלום:1234\n562:123".encode(encoding))\n result = parser.read_csv(data, sep=":", encoding=encoding)\n\n expected = DataFrame([[562, 123]], columns=["שלום", "1234"])\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_read_csv_unicode(all_parsers):\n parser = all_parsers\n data = BytesIO("\u0141aski, Jan;1".encode())\n\n result = parser.read_csv(data, sep=";", encoding="utf-8", header=None)\n expected = DataFrame([["\u0141aski, Jan", 1]])\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\n@pytest.mark.parametrize("sep", [",", "\t"])\n@pytest.mark.parametrize("encoding", ["utf-16", "utf-16le", "utf-16be"])\ndef test_utf16_bom_skiprows(all_parsers, sep, encoding):\n # see gh-2298\n parser = all_parsers\n data = """skip this\nskip this too\nA,B,C\n1,2,3\n4,5,6""".replace(\n ",", sep\n )\n path = f"__{uuid.uuid4()}__.csv"\n kwargs = {"sep": sep, "skiprows": 2}\n utf8 = "utf-8"\n\n with tm.ensure_clean(path) as path:\n bytes_data = data.encode(encoding)\n\n with open(path, "wb") as f:\n f.write(bytes_data)\n\n with TextIOWrapper(BytesIO(data.encode(utf8)), encoding=utf8) as bytes_buffer:\n result = parser.read_csv(path, encoding=encoding, **kwargs)\n expected = parser.read_csv(bytes_buffer, encoding=utf8, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_utf16_example(all_parsers, csv_dir_path):\n path = os.path.join(csv_dir_path, "utf16_ex.txt")\n parser = all_parsers\n result = parser.read_csv(path, encoding="utf-16", sep="\t")\n assert len(result) == 50\n\n\ndef test_unicode_encoding(all_parsers, csv_dir_path):\n path = os.path.join(csv_dir_path, "unicode_series.csv")\n parser = all_parsers\n\n result = parser.read_csv(path, header=None, encoding="latin-1")\n result = result.set_index(0)\n got = result[1][1632]\n\n expected = "\xc1 k\xf6ldum klaka (Cold Fever) (1994)"\n assert got == expected\n\n\n@pytest.mark.parametrize(\n "data,kwargs,expected",\n [\n # Basic test\n ("a\n1", {}, DataFrame({"a": [1]})),\n # "Regular" quoting\n ('"a"\n1', {"quotechar": '"'}, DataFrame({"a": [1]})),\n # Test in a data row instead of header\n ("b\n1", {"names": ["a"]}, DataFrame({"a": ["b", "1"]})),\n # Test in empty data row with skipping\n ("\n1", {"names": ["a"], "skip_blank_lines": True}, DataFrame({"a": [1]})),\n # Test in empty data row without skipping\n (\n "\n1",\n {"names": ["a"], "skip_blank_lines": False},\n DataFrame({"a": [np.nan, 1]}),\n ),\n ],\n)\ndef test_utf8_bom(all_parsers, data, kwargs, expected, request):\n # see gh-4793\n parser = all_parsers\n bom = "\ufeff"\n utf8 = "utf-8"\n\n def _encode_data_with_bom(_data):\n bom_data = (bom + _data).encode(utf8)\n return BytesIO(bom_data)\n\n if (\n parser.engine == "pyarrow"\n and data == "\n1"\n and kwargs.get("skip_blank_lines", True)\n ):\n # CSV parse error: Empty CSV file or block: cannot infer number of columns\n pytest.skip(reason="https://github.com/apache/arrow/issues/38676")\n\n result = parser.read_csv(_encode_data_with_bom(data), encoding=utf8, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_utf_aliases(all_parsers, utf_value, encoding_fmt):\n # see gh-13549\n expected = DataFrame({"mb_num": [4.8], "multibyte": ["test"]})\n parser = all_parsers\n\n encoding = encoding_fmt.format(utf_value)\n data = "mb_num,multibyte\n4.8,test".encode(encoding)\n\n result = parser.read_csv(BytesIO(data), encoding=encoding)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "file_path,encoding",\n [\n (("io", "data", "csv", "test1.csv"), "utf-8"),\n (("io", "parser", "data", "unicode_series.csv"), "latin-1"),\n (("io", "parser", "data", "sauron.SHIFT_JIS.csv"), "shiftjis"),\n ],\n)\ndef test_binary_mode_file_buffers(all_parsers, file_path, encoding, datapath):\n # gh-23779: Python csv engine shouldn't error on files opened in binary.\n # gh-31575: Python csv engine shouldn't error on files opened in raw binary.\n parser = all_parsers\n\n fpath = datapath(*file_path)\n expected = parser.read_csv(fpath, encoding=encoding)\n\n with open(fpath, encoding=encoding) as fa:\n result = parser.read_csv(fa)\n assert not fa.closed\n tm.assert_frame_equal(expected, result)\n\n with open(fpath, mode="rb") as fb:\n result = parser.read_csv(fb, encoding=encoding)\n assert not fb.closed\n tm.assert_frame_equal(expected, result)\n\n with open(fpath, mode="rb", buffering=0) as fb:\n result = parser.read_csv(fb, encoding=encoding)\n assert not fb.closed\n tm.assert_frame_equal(expected, result)\n\n\n@pytest.mark.parametrize("pass_encoding", [True, False])\ndef test_encoding_temp_file(all_parsers, utf_value, encoding_fmt, pass_encoding):\n # see gh-24130\n parser = all_parsers\n encoding = encoding_fmt.format(utf_value)\n\n if parser.engine == "pyarrow" and pass_encoding is True and utf_value in [16, 32]:\n # FIXME: this is bad!\n pytest.skip("These cases freeze")\n\n expected = DataFrame({"foo": ["bar"]})\n\n with tm.ensure_clean(mode="w+", encoding=encoding, return_filelike=True) as f:\n f.write("foo\nbar")\n f.seek(0)\n\n result = parser.read_csv(f, encoding=encoding if pass_encoding else None)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_encoding_named_temp_file(all_parsers):\n # see gh-31819\n parser = all_parsers\n encoding = "shift-jis"\n\n title = "てすと"\n data = "こむ"\n\n expected = DataFrame({title: [data]})\n\n with tempfile.NamedTemporaryFile() as f:\n f.write(f"{title}\n{data}".encode(encoding))\n\n f.seek(0)\n\n result = parser.read_csv(f, encoding=encoding)\n tm.assert_frame_equal(result, expected)\n assert not f.closed\n\n\n@pytest.mark.parametrize(\n "encoding", ["utf-8", "utf-16", "utf-16-be", "utf-16-le", "utf-32"]\n)\ndef test_parse_encoded_special_characters(encoding):\n # GH16218 Verify parsing of data with encoded special characters\n # Data contains a Unicode 'FULLWIDTH COLON' (U+FF1A) at position (0,"a")\n data = "a\tb\n:foo\t0\nbar\t1\nbaz\t2" # noqa: RUF001\n encoded_data = BytesIO(data.encode(encoding))\n result = read_csv(encoded_data, delimiter="\t", encoding=encoding)\n\n expected = DataFrame(\n data=[[":foo", 0], ["bar", 1], ["baz", 2]], # noqa: RUF001\n columns=["a", "b"],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("encoding", ["utf-8", None, "utf-16", "cp1255", "latin-1"])\ndef test_encoding_memory_map(all_parsers, encoding):\n # GH40986\n parser = all_parsers\n expected = DataFrame(\n {\n "name": ["Raphael", "Donatello", "Miguel Angel", "Leonardo"],\n "mask": ["red", "purple", "orange", "blue"],\n "weapon": ["sai", "bo staff", "nunchunk", "katana"],\n }\n )\n with tm.ensure_clean() as file:\n expected.to_csv(file, index=False, encoding=encoding)\n\n if parser.engine == "pyarrow":\n msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(file, encoding=encoding, memory_map=True)\n return\n\n df = parser.read_csv(file, encoding=encoding, memory_map=True)\n tm.assert_frame_equal(df, expected)\n\n\ndef test_chunk_splits_multibyte_char(all_parsers):\n """\n Chunk splits a multibyte character with memory_map=True\n\n GH 43540\n """\n parser = all_parsers\n # DEFAULT_CHUNKSIZE = 262144, defined in parsers.pyx\n df = DataFrame(data=["a" * 127] * 2048)\n\n # Put two-bytes utf-8 encoded character "ą" at the end of chunk\n # utf-8 encoding of "ą" is b'\xc4\x85'\n df.iloc[2047] = "a" * 127 + "ą"\n with tm.ensure_clean("bug-gh43540.csv") as fname:\n df.to_csv(fname, index=False, header=False, encoding="utf-8")\n\n if parser.engine == "pyarrow":\n msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(fname, header=None, memory_map=True)\n return\n\n dfr = parser.read_csv(fname, header=None, memory_map=True)\n tm.assert_frame_equal(dfr, df)\n\n\ndef test_readcsv_memmap_utf8(all_parsers):\n """\n GH 43787\n\n Test correct handling of UTF-8 chars when memory_map=True and encoding is UTF-8\n """\n lines = []\n line_length = 128\n start_char = " "\n end_char = "\U00010080"\n # This for loop creates a list of 128-char strings\n # consisting of consecutive Unicode chars\n for lnum in range(ord(start_char), ord(end_char), line_length):\n line = "".join([chr(c) for c in range(lnum, lnum + 0x80)]) + "\n"\n try:\n line.encode("utf-8")\n except UnicodeEncodeError:\n continue\n lines.append(line)\n parser = all_parsers\n df = DataFrame(lines)\n with tm.ensure_clean("utf8test.csv") as fname:\n df.to_csv(fname, index=False, header=False, encoding="utf-8")\n\n if parser.engine == "pyarrow":\n msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(fname, header=None, memory_map=True, encoding="utf-8")\n return\n\n dfr = parser.read_csv(fname, header=None, memory_map=True, encoding="utf-8")\n tm.assert_frame_equal(df, dfr)\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\n@pytest.mark.parametrize("mode", ["w+b", "w+t"])\ndef test_not_readable(all_parsers, mode):\n # GH43439\n parser = all_parsers\n content = b"abcd"\n if "t" in mode:\n content = "abcd"\n with tempfile.SpooledTemporaryFile(mode=mode, encoding="utf-8") as handle:\n handle.write(content)\n handle.seek(0)\n df = parser.read_csv(handle)\n expected = DataFrame([], columns=["abcd"])\n tm.assert_frame_equal(df, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_encoding.py | test_encoding.py | Python | 10,782 | 0.95 | 0.083086 | 0.086142 | python-kit | 897 | 2025-01-14T21:37:55.704995 | GPL-3.0 | true | 830220414ebea1bbabbbc149f7710bd8 |
"""\nTests that the file header is properly handled or inferred\nduring parsing for all of the parsers defined in parsers.py\n"""\n\nfrom collections import namedtuple\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import ParserError\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_read_with_bad_header(all_parsers):\n parser = all_parsers\n msg = r"but only \d+ lines in file"\n\n with pytest.raises(ValueError, match=msg):\n s = StringIO(",,")\n parser.read_csv(s, header=[10])\n\n\ndef test_negative_header(all_parsers):\n # see gh-27779\n parser = all_parsers\n data = """1,2,3,4,5\n6,7,8,9,10\n11,12,13,14,15\n"""\n with pytest.raises(\n ValueError,\n match="Passing negative integer to header is invalid. "\n "For no header, use header=None instead",\n ):\n parser.read_csv(StringIO(data), header=-1)\n\n\n@pytest.mark.parametrize("header", [([-1, 2, 4]), ([-5, 0])])\ndef test_negative_multi_index_header(all_parsers, header):\n # see gh-27779\n parser = all_parsers\n data = """1,2,3,4,5\n 6,7,8,9,10\n 11,12,13,14,15\n """\n with pytest.raises(\n ValueError, match="cannot specify multi-index header with negative integers"\n ):\n parser.read_csv(StringIO(data), header=header)\n\n\n@pytest.mark.parametrize("header", [True, False])\ndef test_bool_header_arg(all_parsers, header):\n # see gh-6114\n parser = all_parsers\n data = """\\nMyColumn\na\nb\na\nb"""\n msg = "Passing a bool to header is invalid"\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), header=header)\n\n\n@xfail_pyarrow # AssertionError: DataFrame are different\ndef test_header_with_index_col(all_parsers):\n parser = all_parsers\n data = """foo,1,2,3\nbar,4,5,6\nbaz,7,8,9\n"""\n names = ["A", "B", "C"]\n result = parser.read_csv(StringIO(data), names=names)\n\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n index=["foo", "bar", "baz"],\n columns=["A", "B", "C"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_header_not_first_line(all_parsers):\n parser = all_parsers\n data = """got,to,ignore,this,line\ngot,to,ignore,this,line\nindex,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\n"""\n data2 = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\n"""\n\n result = parser.read_csv(StringIO(data), header=2, index_col=0)\n expected = parser.read_csv(StringIO(data2), header=0, index_col=0)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_header_multi_index(all_parsers):\n parser = all_parsers\n\n data = """\\nC0,,C_l0_g0,C_l0_g1,C_l0_g2\n\nC1,,C_l1_g0,C_l1_g1,C_l1_g2\nC2,,C_l2_g0,C_l2_g1,C_l2_g2\nC3,,C_l3_g0,C_l3_g1,C_l3_g2\nR0,R1,,,\nR_l0_g0,R_l1_g0,R0C0,R0C1,R0C2\nR_l0_g1,R_l1_g1,R1C0,R1C1,R1C2\nR_l0_g2,R_l1_g2,R2C0,R2C1,R2C2\nR_l0_g3,R_l1_g3,R3C0,R3C1,R3C2\nR_l0_g4,R_l1_g4,R4C0,R4C1,R4C2\n"""\n result = parser.read_csv(StringIO(data), header=[0, 1, 2, 3], index_col=[0, 1])\n data_gen_f = lambda r, c: f"R{r}C{c}"\n\n data = [[data_gen_f(r, c) for c in range(3)] for r in range(5)]\n index = MultiIndex.from_arrays(\n [[f"R_l0_g{i}" for i in range(5)], [f"R_l1_g{i}" for i in range(5)]],\n names=["R0", "R1"],\n )\n columns = MultiIndex.from_arrays(\n [\n [f"C_l0_g{i}" for i in range(3)],\n [f"C_l1_g{i}" for i in range(3)],\n [f"C_l2_g{i}" for i in range(3)],\n [f"C_l3_g{i}" for i in range(3)],\n ],\n names=["C0", "C1", "C2", "C3"],\n )\n expected = DataFrame(data, columns=columns, index=index)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "kwargs,msg",\n [\n (\n {"index_col": ["foo", "bar"]},\n (\n "index_col must only contain "\n "row numbers when specifying "\n "a multi-index header"\n ),\n ),\n (\n {"index_col": [0, 1], "names": ["foo", "bar"]},\n ("cannot specify names when specifying a multi-index header"),\n ),\n (\n {"index_col": [0, 1], "usecols": ["foo", "bar"]},\n ("cannot specify usecols when specifying a multi-index header"),\n ),\n ],\n)\ndef test_header_multi_index_invalid(all_parsers, kwargs, msg):\n data = """\\nC0,,C_l0_g0,C_l0_g1,C_l0_g2\n\nC1,,C_l1_g0,C_l1_g1,C_l1_g2\nC2,,C_l2_g0,C_l2_g1,C_l2_g2\nC3,,C_l3_g0,C_l3_g1,C_l3_g2\nR0,R1,,,\nR_l0_g0,R_l1_g0,R0C0,R0C1,R0C2\nR_l0_g1,R_l1_g1,R1C0,R1C1,R1C2\nR_l0_g2,R_l1_g2,R2C0,R2C1,R2C2\nR_l0_g3,R_l1_g3,R3C0,R3C1,R3C2\nR_l0_g4,R_l1_g4,R4C0,R4C1,R4C2\n"""\n parser = all_parsers\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), header=[0, 1, 2, 3], **kwargs)\n\n\n_TestTuple = namedtuple("_TestTuple", ["first", "second"])\n\n\n@xfail_pyarrow # TypeError: an integer is required\n@pytest.mark.parametrize(\n "kwargs",\n [\n {"header": [0, 1]},\n {\n "skiprows": 3,\n "names": [\n ("a", "q"),\n ("a", "r"),\n ("a", "s"),\n ("b", "t"),\n ("c", "u"),\n ("c", "v"),\n ],\n },\n {\n "skiprows": 3,\n "names": [\n _TestTuple("a", "q"),\n _TestTuple("a", "r"),\n _TestTuple("a", "s"),\n _TestTuple("b", "t"),\n _TestTuple("c", "u"),\n _TestTuple("c", "v"),\n ],\n },\n ],\n)\ndef test_header_multi_index_common_format1(all_parsers, kwargs):\n parser = all_parsers\n expected = DataFrame(\n [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],\n index=["one", "two"],\n columns=MultiIndex.from_tuples(\n [("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]\n ),\n )\n data = """,a,a,a,b,c,c\n,q,r,s,t,u,v\n,,,,,,\none,1,2,3,4,5,6\ntwo,7,8,9,10,11,12"""\n\n result = parser.read_csv(StringIO(data), index_col=0, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\n@pytest.mark.parametrize(\n "kwargs",\n [\n {"header": [0, 1]},\n {\n "skiprows": 2,\n "names": [\n ("a", "q"),\n ("a", "r"),\n ("a", "s"),\n ("b", "t"),\n ("c", "u"),\n ("c", "v"),\n ],\n },\n {\n "skiprows": 2,\n "names": [\n _TestTuple("a", "q"),\n _TestTuple("a", "r"),\n _TestTuple("a", "s"),\n _TestTuple("b", "t"),\n _TestTuple("c", "u"),\n _TestTuple("c", "v"),\n ],\n },\n ],\n)\ndef test_header_multi_index_common_format2(all_parsers, kwargs):\n parser = all_parsers\n expected = DataFrame(\n [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],\n index=["one", "two"],\n columns=MultiIndex.from_tuples(\n [("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]\n ),\n )\n data = """,a,a,a,b,c,c\n,q,r,s,t,u,v\none,1,2,3,4,5,6\ntwo,7,8,9,10,11,12"""\n\n result = parser.read_csv(StringIO(data), index_col=0, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\n@pytest.mark.parametrize(\n "kwargs",\n [\n {"header": [0, 1]},\n {\n "skiprows": 2,\n "names": [\n ("a", "q"),\n ("a", "r"),\n ("a", "s"),\n ("b", "t"),\n ("c", "u"),\n ("c", "v"),\n ],\n },\n {\n "skiprows": 2,\n "names": [\n _TestTuple("a", "q"),\n _TestTuple("a", "r"),\n _TestTuple("a", "s"),\n _TestTuple("b", "t"),\n _TestTuple("c", "u"),\n _TestTuple("c", "v"),\n ],\n },\n ],\n)\ndef test_header_multi_index_common_format3(all_parsers, kwargs):\n parser = all_parsers\n expected = DataFrame(\n [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]],\n index=["one", "two"],\n columns=MultiIndex.from_tuples(\n [("a", "q"), ("a", "r"), ("a", "s"), ("b", "t"), ("c", "u"), ("c", "v")]\n ),\n )\n expected = expected.reset_index(drop=True)\n data = """a,a,a,b,c,c\nq,r,s,t,u,v\n1,2,3,4,5,6\n7,8,9,10,11,12"""\n\n result = parser.read_csv(StringIO(data), index_col=None, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_header_multi_index_common_format_malformed1(all_parsers):\n parser = all_parsers\n expected = DataFrame(\n np.array([[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"),\n index=Index([1, 7]),\n columns=MultiIndex(\n levels=[["a", "b", "c"], ["r", "s", "t", "u", "v"]],\n codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],\n names=["a", "q"],\n ),\n )\n data = """a,a,a,b,c,c\nq,r,s,t,u,v\n1,2,3,4,5,6\n7,8,9,10,11,12"""\n\n result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0)\n tm.assert_frame_equal(expected, result)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_header_multi_index_common_format_malformed2(all_parsers):\n parser = all_parsers\n expected = DataFrame(\n np.array([[2, 3, 4, 5, 6], [8, 9, 10, 11, 12]], dtype="int64"),\n index=Index([1, 7]),\n columns=MultiIndex(\n levels=[["a", "b", "c"], ["r", "s", "t", "u", "v"]],\n codes=[[0, 0, 1, 2, 2], [0, 1, 2, 3, 4]],\n names=[None, "q"],\n ),\n )\n\n data = """,a,a,b,c,c\nq,r,s,t,u,v\n1,2,3,4,5,6\n7,8,9,10,11,12"""\n\n result = parser.read_csv(StringIO(data), header=[0, 1], index_col=0)\n tm.assert_frame_equal(expected, result)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_header_multi_index_common_format_malformed3(all_parsers):\n parser = all_parsers\n expected = DataFrame(\n np.array([[3, 4, 5, 6], [9, 10, 11, 12]], dtype="int64"),\n index=MultiIndex(levels=[[1, 7], [2, 8]], codes=[[0, 1], [0, 1]]),\n columns=MultiIndex(\n levels=[["a", "b", "c"], ["s", "t", "u", "v"]],\n codes=[[0, 1, 2, 2], [0, 1, 2, 3]],\n names=[None, "q"],\n ),\n )\n data = """,a,a,b,c,c\nq,r,s,t,u,v\n1,2,3,4,5,6\n7,8,9,10,11,12"""\n\n result = parser.read_csv(StringIO(data), header=[0, 1], index_col=[0, 1])\n tm.assert_frame_equal(expected, result)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_header_multi_index_blank_line(all_parsers):\n # GH 40442\n parser = all_parsers\n data = [[None, None], [1, 2], [3, 4]]\n columns = MultiIndex.from_tuples([("a", "A"), ("b", "B")])\n expected = DataFrame(data, columns=columns)\n data = "a,b\nA,B\n,\n1,2\n3,4"\n result = parser.read_csv(StringIO(data), header=[0, 1])\n tm.assert_frame_equal(expected, result)\n\n\n@pytest.mark.parametrize(\n "data,header", [("1,2,3\n4,5,6", None), ("foo,bar,baz\n1,2,3\n4,5,6", 0)]\n)\ndef test_header_names_backward_compat(all_parsers, data, header, request):\n # see gh-2539\n parser = all_parsers\n\n if parser.engine == "pyarrow" and header is not None:\n mark = pytest.mark.xfail(reason="DataFrame.columns are different")\n request.applymarker(mark)\n\n expected = parser.read_csv(StringIO("1,2,3\n4,5,6"), names=["a", "b", "c"])\n\n result = parser.read_csv(StringIO(data), names=["a", "b", "c"], header=header)\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block: cannot infer\n@pytest.mark.parametrize("kwargs", [{}, {"index_col": False}])\ndef test_read_only_header_no_rows(all_parsers, kwargs):\n # See gh-7773\n parser = all_parsers\n expected = DataFrame(columns=["a", "b", "c"])\n\n result = parser.read_csv(StringIO("a,b,c"), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "kwargs,names",\n [\n ({}, [0, 1, 2, 3, 4]),\n (\n {"names": ["foo", "bar", "baz", "quux", "panda"]},\n ["foo", "bar", "baz", "quux", "panda"],\n ),\n ],\n)\ndef test_no_header(all_parsers, kwargs, names):\n parser = all_parsers\n data = """1,2,3,4,5\n6,7,8,9,10\n11,12,13,14,15\n"""\n expected = DataFrame(\n [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]], columns=names\n )\n result = parser.read_csv(StringIO(data), header=None, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("header", [["a", "b"], "string_header"])\ndef test_non_int_header(all_parsers, header):\n # see gh-16338\n msg = "header must be integer or list of integers"\n data = """1,2\n3,4"""\n parser = all_parsers\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), header=header)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_singleton_header(all_parsers):\n # see gh-7757\n data = """a,b,c\n0,1,2\n1,2,3"""\n parser = all_parsers\n\n expected = DataFrame({"a": [0, 1], "b": [1, 2], "c": [2, 3]})\n result = parser.read_csv(StringIO(data), header=[0])\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\n@pytest.mark.parametrize(\n "data,expected",\n [\n (\n "A,A,A,B\none,one,one,two\n0,40,34,0.1",\n DataFrame(\n [[0, 40, 34, 0.1]],\n columns=MultiIndex.from_tuples(\n [("A", "one"), ("A", "one.1"), ("A", "one.2"), ("B", "two")]\n ),\n ),\n ),\n (\n "A,A,A,B\none,one,one.1,two\n0,40,34,0.1",\n DataFrame(\n [[0, 40, 34, 0.1]],\n columns=MultiIndex.from_tuples(\n [("A", "one"), ("A", "one.1"), ("A", "one.1.1"), ("B", "two")]\n ),\n ),\n ),\n (\n "A,A,A,B,B\none,one,one.1,two,two\n0,40,34,0.1,0.1",\n DataFrame(\n [[0, 40, 34, 0.1, 0.1]],\n columns=MultiIndex.from_tuples(\n [\n ("A", "one"),\n ("A", "one.1"),\n ("A", "one.1.1"),\n ("B", "two"),\n ("B", "two.1"),\n ]\n ),\n ),\n ),\n ],\n)\ndef test_mangles_multi_index(all_parsers, data, expected):\n # see gh-18062\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), header=[0, 1])\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is requireds\n@pytest.mark.parametrize("index_col", [None, [0]])\n@pytest.mark.parametrize(\n "columns", [None, (["", "Unnamed"]), (["Unnamed", ""]), (["Unnamed", "NotUnnamed"])]\n)\ndef test_multi_index_unnamed(all_parsers, index_col, columns):\n # see gh-23687\n #\n # When specifying a multi-index header, make sure that\n # we don't error just because one of the rows in our header\n # has ALL column names containing the string "Unnamed". The\n # correct condition to check is whether the row contains\n # ALL columns that did not have names (and instead were given\n # placeholder ones).\n parser = all_parsers\n header = [0, 1]\n\n if index_col is None:\n data = ",".join(columns or ["", ""]) + "\n0,1\n2,3\n4,5\n"\n else:\n data = ",".join([""] + (columns or ["", ""])) + "\n,0,1\n0,2,3\n1,4,5\n"\n\n result = parser.read_csv(StringIO(data), header=header, index_col=index_col)\n exp_columns = []\n\n if columns is None:\n columns = ["", "", ""]\n\n for i, col in enumerate(columns):\n if not col: # Unnamed.\n col = f"Unnamed: {i if index_col is None else i + 1}_level_0"\n\n exp_columns.append(col)\n\n columns = MultiIndex.from_tuples(zip(exp_columns, ["0", "1"]))\n expected = DataFrame([[2, 3], [4, 5]], columns=columns)\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Expected 2 columns, got 3\ndef test_names_longer_than_header_but_equal_with_data_rows(all_parsers):\n # GH#38453\n parser = all_parsers\n data = """a, b\n1,2,3\n5,6,4\n"""\n result = parser.read_csv(StringIO(data), header=0, names=["A", "B", "C"])\n expected = DataFrame({"A": [1, 5], "B": [2, 6], "C": [3, 4]})\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_read_csv_multiindex_columns(all_parsers):\n # GH#6051\n parser = all_parsers\n\n s1 = "Male, Male, Male, Female, Female\nR, R, L, R, R\n.86, .67, .88, .78, .81"\n s2 = (\n "Male, Male, Male, Female, Female\n"\n "R, R, L, R, R\n"\n ".86, .67, .88, .78, .81\n"\n ".86, .67, .88, .78, .82"\n )\n\n mi = MultiIndex.from_tuples(\n [\n ("Male", "R"),\n (" Male", " R"),\n (" Male", " L"),\n (" Female", " R"),\n (" Female", " R.1"),\n ]\n )\n expected = DataFrame(\n [[0.86, 0.67, 0.88, 0.78, 0.81], [0.86, 0.67, 0.88, 0.78, 0.82]], columns=mi\n )\n\n df1 = parser.read_csv(StringIO(s1), header=[0, 1])\n tm.assert_frame_equal(df1, expected.iloc[:1])\n df2 = parser.read_csv(StringIO(s2), header=[0, 1])\n tm.assert_frame_equal(df2, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_read_csv_multi_header_length_check(all_parsers):\n # GH#43102\n parser = all_parsers\n\n case = """row11,row12,row13\nrow21,row22, row23\nrow31,row32\n"""\n\n with pytest.raises(\n ParserError, match="Header rows must have an equal number of columns."\n ):\n parser.read_csv(StringIO(case), header=[0, 2])\n\n\n@skip_pyarrow # CSV parse error: Expected 3 columns, got 2\ndef test_header_none_and_implicit_index(all_parsers):\n # GH#22144\n parser = all_parsers\n data = "x,1,5\ny,2\nz,3\n"\n result = parser.read_csv(StringIO(data), names=["a", "b"], header=None)\n expected = DataFrame(\n {"a": [1, 2, 3], "b": [5, np.nan, np.nan]}, index=["x", "y", "z"]\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # regex mismatch "CSV parse error: Expected 2 columns, got "\ndef test_header_none_and_implicit_index_in_second_row(all_parsers):\n # GH#22144\n parser = all_parsers\n data = "x,1\ny,2,5\nz,3\n"\n with pytest.raises(ParserError, match="Expected 2 fields in line 2, saw 3"):\n parser.read_csv(StringIO(data), names=["a", "b"], header=None)\n\n\ndef test_header_none_and_on_bad_lines_skip(all_parsers):\n # GH#22144\n parser = all_parsers\n data = "x,1\ny,2,5\nz,3\n"\n result = parser.read_csv(\n StringIO(data), names=["a", "b"], header=None, on_bad_lines="skip"\n )\n expected = DataFrame({"a": ["x", "z"], "b": [1, 3]})\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is requireds\ndef test_header_missing_rows(all_parsers):\n # GH#47400\n parser = all_parsers\n data = """a,b\n1,2\n"""\n msg = r"Passed header=\[0,1,2\], len of 3, but only 2 lines in file"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), header=[0, 1, 2])\n\n\n# ValueError: The 'delim_whitespace' option is not supported with the 'pyarrow' engine\n@xfail_pyarrow\ndef test_header_multiple_whitespaces(all_parsers):\n # GH#54931\n parser = all_parsers\n data = """aa bb(1,1) cc(1,1)\n 0 2 3.5"""\n\n result = parser.read_csv(StringIO(data), sep=r"\s+")\n expected = DataFrame({"aa": [0], "bb(1,1)": 2, "cc(1,1)": 3.5})\n tm.assert_frame_equal(result, expected)\n\n\n# ValueError: The 'delim_whitespace' option is not supported with the 'pyarrow' engine\n@xfail_pyarrow\ndef test_header_delim_whitespace(all_parsers):\n # GH#54918\n parser = all_parsers\n data = """a,b\n1,2\n3,4\n """\n\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(StringIO(data), delim_whitespace=True)\n expected = DataFrame({"a,b": ["1,2", "3,4"]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_usecols_no_header_pyarrow(pyarrow_parser_only):\n parser = pyarrow_parser_only\n data = """\na,i,x\nb,j,y\n"""\n result = parser.read_csv(\n StringIO(data),\n header=None,\n usecols=[0, 1],\n dtype="string[pyarrow]",\n dtype_backend="pyarrow",\n engine="pyarrow",\n )\n expected = DataFrame([["a", "i"], ["b", "j"]], dtype="string[pyarrow]")\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_header.py | test_header.py | Python | 21,029 | 0.95 | 0.06412 | 0.044728 | vue-tools | 420 | 2024-07-29T19:48:26.034448 | GPL-3.0 | true | 321a33c622fcefd65ccf9ce1ff0ae447 |
"""\nTests that the specified index column (a.k.a "index_col")\nis properly handled or inferred during parsing for all of\nthe parsers defined in parsers.py\n"""\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\n@pytest.mark.parametrize("with_header", [True, False])\ndef test_index_col_named(all_parsers, with_header):\n parser = all_parsers\n no_header = """\\nKORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""\n header = "ID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\n"\n\n if with_header:\n data = header + no_header\n\n result = parser.read_csv(StringIO(data), index_col="ID")\n expected = parser.read_csv(StringIO(data), header=0).set_index("ID")\n tm.assert_frame_equal(result, expected)\n else:\n data = no_header\n msg = "Index ID invalid"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), index_col="ID")\n\n\ndef test_index_col_named2(all_parsers):\n parser = all_parsers\n data = """\\n1,2,3,4,hello\n5,6,7,8,world\n9,10,11,12,foo\n"""\n\n expected = DataFrame(\n {"a": [1, 5, 9], "b": [2, 6, 10], "c": [3, 7, 11], "d": [4, 8, 12]},\n index=Index(["hello", "world", "foo"], name="message"),\n )\n names = ["a", "b", "c", "d", "message"]\n\n result = parser.read_csv(StringIO(data), names=names, index_col=["message"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_index_col_is_true(all_parsers):\n # see gh-9798\n data = "a,b\n1,2"\n parser = all_parsers\n\n msg = "The value of index_col couldn't be 'True'"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), index_col=True)\n\n\n@skip_pyarrow # CSV parse error: Expected 3 columns, got 4\ndef test_infer_index_col(all_parsers):\n data = """A,B,C\nfoo,1,2,3\nbar,4,5,6\nbaz,7,8,9\n"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data))\n\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n index=["foo", "bar", "baz"],\n columns=["A", "B", "C"],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\n@pytest.mark.parametrize(\n "index_col,kwargs",\n [\n (None, {"columns": ["x", "y", "z"]}),\n (False, {"columns": ["x", "y", "z"]}),\n (0, {"columns": ["y", "z"], "index": Index([], name="x")}),\n (1, {"columns": ["x", "z"], "index": Index([], name="y")}),\n ("x", {"columns": ["y", "z"], "index": Index([], name="x")}),\n ("y", {"columns": ["x", "z"], "index": Index([], name="y")}),\n (\n [0, 1],\n {\n "columns": ["z"],\n "index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]),\n },\n ),\n (\n ["x", "y"],\n {\n "columns": ["z"],\n "index": MultiIndex.from_arrays([[]] * 2, names=["x", "y"]),\n },\n ),\n (\n [1, 0],\n {\n "columns": ["z"],\n "index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]),\n },\n ),\n (\n ["y", "x"],\n {\n "columns": ["z"],\n "index": MultiIndex.from_arrays([[]] * 2, names=["y", "x"]),\n },\n ),\n ],\n)\ndef test_index_col_empty_data(all_parsers, index_col, kwargs):\n data = "x,y,z"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=index_col)\n\n expected = DataFrame(**kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_empty_with_index_col_false(all_parsers):\n # see gh-10413\n data = "x,y"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=False)\n\n expected = DataFrame(columns=["x", "y"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "index_names",\n [\n ["", ""],\n ["foo", ""],\n ["", "bar"],\n ["foo", "bar"],\n ["NotReallyUnnamed", "Unnamed: 0"],\n ],\n)\ndef test_multi_index_naming(all_parsers, index_names, request):\n parser = all_parsers\n\n if parser.engine == "pyarrow" and "" in index_names:\n mark = pytest.mark.xfail(reason="One case raises, others are wrong")\n request.applymarker(mark)\n\n # We don't want empty index names being replaced with "Unnamed: 0"\n data = ",".join(index_names + ["col\na,c,1\na,d,2\nb,c,3\nb,d,4"])\n result = parser.read_csv(StringIO(data), index_col=[0, 1])\n\n expected = DataFrame(\n {"col": [1, 2, 3, 4]}, index=MultiIndex.from_product([["a", "b"], ["c", "d"]])\n )\n expected.index.names = [name if name else None for name in index_names]\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: Found non-unique column index\ndef test_multi_index_naming_not_all_at_beginning(all_parsers):\n parser = all_parsers\n data = ",Unnamed: 2,\na,c,1\na,d,2\nb,c,3\nb,d,4"\n result = parser.read_csv(StringIO(data), index_col=[0, 2])\n\n expected = DataFrame(\n {"Unnamed: 2": ["c", "d", "c", "d"]},\n index=MultiIndex(\n levels=[["a", "b"], [1, 2, 3, 4]], codes=[[0, 0, 1, 1], [0, 1, 2, 3]]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: Found non-unique column index\ndef test_no_multi_index_level_names_empty(all_parsers):\n # GH 10984\n parser = all_parsers\n midx = MultiIndex.from_tuples([("A", 1, 2), ("A", 1, 2), ("B", 1, 2)])\n expected = DataFrame(\n np.random.default_rng(2).standard_normal((3, 3)),\n index=midx,\n columns=["x", "y", "z"],\n )\n with tm.ensure_clean() as path:\n expected.to_csv(path)\n result = parser.read_csv(path, index_col=[0, 1, 2])\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_header_with_index_col(all_parsers):\n # GH 33476\n parser = all_parsers\n data = """\nI11,A,A\nI12,B,B\nI2,1,3\n"""\n midx = MultiIndex.from_tuples([("A", "B"), ("A", "B.1")], names=["I11", "I12"])\n idx = Index(["I2"])\n expected = DataFrame([[1, 3]], index=idx, columns=midx)\n\n result = parser.read_csv(StringIO(data), index_col=0, header=[0, 1])\n tm.assert_frame_equal(result, expected)\n\n col_idx = Index(["A", "A.1"])\n idx = Index(["I12", "I2"], name="I11")\n expected = DataFrame([["B", "B"], ["1", "3"]], index=idx, columns=col_idx)\n\n result = parser.read_csv(StringIO(data), index_col="I11", header=0)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.slow\ndef test_index_col_large_csv(all_parsers, monkeypatch):\n # https://github.com/pandas-dev/pandas/issues/37094\n parser = all_parsers\n\n ARR_LEN = 100\n df = DataFrame(\n {\n "a": range(ARR_LEN + 1),\n "b": np.random.default_rng(2).standard_normal(ARR_LEN + 1),\n }\n )\n\n with tm.ensure_clean() as path:\n df.to_csv(path, index=False)\n with monkeypatch.context() as m:\n m.setattr("pandas.core.algorithms._MINIMUM_COMP_ARR_LEN", ARR_LEN)\n result = parser.read_csv(path, index_col=[0])\n\n tm.assert_frame_equal(result, df.set_index("a"))\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_index_col_multiindex_columns_no_data(all_parsers):\n # GH#38292\n parser = all_parsers\n result = parser.read_csv(\n StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1], index_col=0\n )\n expected = DataFrame(\n [],\n index=Index([]),\n columns=MultiIndex.from_arrays(\n [["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_index_col_header_no_data(all_parsers):\n # GH#38292\n parser = all_parsers\n result = parser.read_csv(StringIO("a0,a1,a2\n"), header=[0], index_col=0)\n expected = DataFrame(\n [],\n columns=["a1", "a2"],\n index=Index([], name="a0"),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_multiindex_columns_no_data(all_parsers):\n # GH#38292\n parser = all_parsers\n result = parser.read_csv(StringIO("a0,a1,a2\nb0,b1,b2\n"), header=[0, 1])\n expected = DataFrame(\n [], columns=MultiIndex.from_arrays([["a0", "a1", "a2"], ["b0", "b1", "b2"]])\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_multiindex_columns_index_col_with_data(all_parsers):\n # GH#38292\n parser = all_parsers\n result = parser.read_csv(\n StringIO("a0,a1,a2\nb0,b1,b2\ndata,data,data"), header=[0, 1], index_col=0\n )\n expected = DataFrame(\n [["data", "data"]],\n columns=MultiIndex.from_arrays(\n [["a1", "a2"], ["b1", "b2"]], names=["a0", "b0"]\n ),\n index=Index(["data"]),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_infer_types_boolean_sum(all_parsers):\n # GH#44079\n parser = all_parsers\n result = parser.read_csv(\n StringIO("0,1"),\n names=["a", "b"],\n index_col=["a"],\n dtype={"a": "UInt8"},\n )\n expected = DataFrame(\n data={\n "a": [\n 0,\n ],\n "b": [1],\n }\n ).set_index("a")\n # Not checking index type now, because the C parser will return a\n # index column of dtype 'object', and the Python parser will return a\n # index column of dtype 'int64'.\n tm.assert_frame_equal(result, expected, check_index_type=False)\n\n\n@pytest.mark.parametrize("dtype, val", [(object, "01"), ("int64", 1)])\ndef test_specify_dtype_for_index_col(all_parsers, dtype, val, request):\n # GH#9435\n data = "a,b\n01,2"\n parser = all_parsers\n if dtype == object and parser.engine == "pyarrow":\n request.applymarker(\n pytest.mark.xfail(reason="Cannot disable type-inference for pyarrow engine")\n )\n result = parser.read_csv(StringIO(data), index_col="a", dtype={"a": dtype})\n expected = DataFrame({"b": [2]}, index=Index([val], name="a", dtype=dtype))\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_multiindex_columns_not_leading_index_col(all_parsers):\n # GH#38549\n parser = all_parsers\n data = """a,b,c,d\ne,f,g,h\nx,y,1,2\n"""\n result = parser.read_csv(\n StringIO(data),\n header=[0, 1],\n index_col=1,\n )\n cols = MultiIndex.from_tuples(\n [("a", "e"), ("c", "g"), ("d", "h")], names=["b", "f"]\n )\n expected = DataFrame([["x", 1, 2]], columns=cols, index=["y"])\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_index_col.py | test_index_col.py | Python | 11,514 | 0.95 | 0.066489 | 0.050473 | vue-tools | 513 | 2024-08-22T13:52:28.718341 | GPL-3.0 | true | 82232b5492f7744f015f978f4f33d8dd |
"""\nTests that duplicate columns are handled appropriately when parsed by the\nCSV engine. In general, the expected result is that they are either thoroughly\nde-duplicated (if mangling requested) or ignored otherwise.\n"""\nfrom io import StringIO\n\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n)\nimport pandas._testing as tm\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\n\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\n\n@xfail_pyarrow # ValueError: Found non-unique column index\ndef test_basic(all_parsers):\n parser = all_parsers\n\n data = "a,a,b,b,b\n1,2,3,4,5"\n result = parser.read_csv(StringIO(data), sep=",")\n\n expected = DataFrame([[1, 2, 3, 4, 5]], columns=["a", "a.1", "b", "b.1", "b.2"])\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: Found non-unique column index\ndef test_basic_names(all_parsers):\n # See gh-7160\n parser = all_parsers\n\n data = "a,b,a\n0,1,2\n3,4,5"\n expected = DataFrame([[0, 1, 2], [3, 4, 5]], columns=["a", "b", "a.1"])\n\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_basic_names_raise(all_parsers):\n # See gh-7160\n parser = all_parsers\n\n data = "0,1,2\n3,4,5"\n with pytest.raises(ValueError, match="Duplicate names"):\n parser.read_csv(StringIO(data), names=["a", "b", "a"])\n\n\n@xfail_pyarrow # ValueError: Found non-unique column index\n@pytest.mark.parametrize(\n "data,expected",\n [\n ("a,a,a.1\n1,2,3", DataFrame([[1, 2, 3]], columns=["a", "a.2", "a.1"])),\n (\n "a,a,a.1,a.1.1,a.1.1.1,a.1.1.1.1\n1,2,3,4,5,6",\n DataFrame(\n [[1, 2, 3, 4, 5, 6]],\n columns=["a", "a.2", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1"],\n ),\n ),\n (\n "a,a,a.3,a.1,a.2,a,a\n1,2,3,4,5,6,7",\n DataFrame(\n [[1, 2, 3, 4, 5, 6, 7]],\n columns=["a", "a.4", "a.3", "a.1", "a.2", "a.5", "a.6"],\n ),\n ),\n ],\n)\ndef test_thorough_mangle_columns(all_parsers, data, expected):\n # see gh-17060\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data,names,expected",\n [\n (\n "a,b,b\n1,2,3",\n ["a.1", "a.1", "a.1.1"],\n DataFrame(\n [["a", "b", "b"], ["1", "2", "3"]], columns=["a.1", "a.1.1", "a.1.1.1"]\n ),\n ),\n (\n "a,b,c,d,e,f\n1,2,3,4,5,6",\n ["a", "a", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1"],\n DataFrame(\n [["a", "b", "c", "d", "e", "f"], ["1", "2", "3", "4", "5", "6"]],\n columns=["a", "a.1", "a.1.1", "a.1.1.1", "a.1.1.1.1", "a.1.1.1.1.1"],\n ),\n ),\n (\n "a,b,c,d,e,f,g\n1,2,3,4,5,6,7",\n ["a", "a", "a.3", "a.1", "a.2", "a", "a"],\n DataFrame(\n [\n ["a", "b", "c", "d", "e", "f", "g"],\n ["1", "2", "3", "4", "5", "6", "7"],\n ],\n columns=["a", "a.1", "a.3", "a.1.1", "a.2", "a.2.1", "a.3.1"],\n ),\n ),\n ],\n)\ndef test_thorough_mangle_names(all_parsers, data, names, expected):\n # see gh-17095\n parser = all_parsers\n\n with pytest.raises(ValueError, match="Duplicate names"):\n parser.read_csv(StringIO(data), names=names)\n\n\n@xfail_pyarrow # AssertionError: DataFrame.columns are different\ndef test_mangled_unnamed_placeholders(all_parsers):\n # xref gh-13017\n orig_key = "0"\n parser = all_parsers\n\n orig_value = [1, 2, 3]\n df = DataFrame({orig_key: orig_value})\n\n # This test recursively updates `df`.\n for i in range(3):\n expected = DataFrame(columns=Index([], dtype="str"))\n\n for j in range(i + 1):\n col_name = "Unnamed: 0" + f".{1*j}" * min(j, 1)\n expected.insert(loc=0, column=col_name, value=[0, 1, 2])\n\n expected[orig_key] = orig_value\n df = parser.read_csv(StringIO(df.to_csv()))\n\n tm.assert_frame_equal(df, expected)\n\n\n@xfail_pyarrow # ValueError: Found non-unique column index\ndef test_mangle_dupe_cols_already_exists(all_parsers):\n # GH#14704\n parser = all_parsers\n\n data = "a,a,a.1,a,a.3,a.1,a.1.1\n1,2,3,4,5,6,7"\n result = parser.read_csv(StringIO(data))\n expected = DataFrame(\n [[1, 2, 3, 4, 5, 6, 7]],\n columns=["a", "a.2", "a.1", "a.4", "a.3", "a.1.2", "a.1.1"],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: Found non-unique column index\ndef test_mangle_dupe_cols_already_exists_unnamed_col(all_parsers):\n # GH#14704\n parser = all_parsers\n\n data = ",Unnamed: 0,,Unnamed: 2\n1,2,3,4"\n result = parser.read_csv(StringIO(data))\n expected = DataFrame(\n [[1, 2, 3, 4]],\n columns=["Unnamed: 0.1", "Unnamed: 0", "Unnamed: 2.1", "Unnamed: 2"],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("usecol, engine", [([0, 1, 1], "python"), ([0, 1, 1], "c")])\ndef test_mangle_cols_names(all_parsers, usecol, engine):\n # GH 11823\n parser = all_parsers\n data = "1,2,3"\n names = ["A", "A", "B"]\n with pytest.raises(ValueError, match="Duplicate names"):\n parser.read_csv(StringIO(data), names=names, usecols=usecol, engine=engine)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_mangle_dupes.py | test_mangle_dupes.py | Python | 5,440 | 0.95 | 0.065934 | 0.062069 | vue-tools | 990 | 2023-09-11T23:21:15.270366 | GPL-3.0 | true | 5d35fb4af2da4ae676cad239a5dec8cc |
"""\nTests multithreading behaviour for reading and\nparsing files for each parser defined in parsers.py\n"""\nfrom contextlib import ExitStack\nfrom io import BytesIO\nfrom multiprocessing.pool import ThreadPool\n\nimport numpy as np\nimport pytest\n\nimport pandas as pd\nfrom pandas import DataFrame\nimport pandas._testing as tm\nfrom pandas.util.version import Version\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\n\n# We'll probably always skip these for pyarrow\n# Maybe we'll add our own tests for pyarrow too\npytestmark = [\n pytest.mark.single_cpu,\n pytest.mark.slow,\n]\n\n\n@pytest.mark.filterwarnings("ignore:Passing a BlockManager:DeprecationWarning")\ndef test_multi_thread_string_io_read_csv(all_parsers, request):\n # see gh-11786\n parser = all_parsers\n if parser.engine == "pyarrow":\n pa = pytest.importorskip("pyarrow")\n if Version(pa.__version__) < Version("16.0"):\n request.applymarker(\n pytest.mark.xfail(reason="# ValueError: Found non-unique column index")\n )\n max_row_range = 100\n num_files = 10\n\n bytes_to_df = (\n "\n".join([f"{i:d},{i:d},{i:d}" for i in range(max_row_range)]).encode()\n for _ in range(num_files)\n )\n\n # Read all files in many threads.\n with ExitStack() as stack:\n files = [stack.enter_context(BytesIO(b)) for b in bytes_to_df]\n\n pool = stack.enter_context(ThreadPool(8))\n\n results = pool.map(parser.read_csv, files)\n first_result = results[0]\n\n for result in results:\n tm.assert_frame_equal(first_result, result)\n\n\ndef _generate_multi_thread_dataframe(parser, path, num_rows, num_tasks):\n """\n Generate a DataFrame via multi-thread.\n\n Parameters\n ----------\n parser : BaseParser\n The parser object to use for reading the data.\n path : str\n The location of the CSV file to read.\n num_rows : int\n The number of rows to read per task.\n num_tasks : int\n The number of tasks to use for reading this DataFrame.\n\n Returns\n -------\n df : DataFrame\n """\n\n def reader(arg):\n """\n Create a reader for part of the CSV.\n\n Parameters\n ----------\n arg : tuple\n A tuple of the following:\n\n * start : int\n The starting row to start for parsing CSV\n * nrows : int\n The number of rows to read.\n\n Returns\n -------\n df : DataFrame\n """\n start, nrows = arg\n\n if not start:\n return parser.read_csv(\n path, index_col=0, header=0, nrows=nrows, parse_dates=["date"]\n )\n\n return parser.read_csv(\n path,\n index_col=0,\n header=None,\n skiprows=int(start) + 1,\n nrows=nrows,\n parse_dates=[9],\n )\n\n tasks = [\n (num_rows * i // num_tasks, num_rows // num_tasks) for i in range(num_tasks)\n ]\n\n with ThreadPool(processes=num_tasks) as pool:\n results = pool.map(reader, tasks)\n\n header = results[0].columns\n\n for r in results[1:]:\n r.columns = header\n\n final_dataframe = pd.concat(results)\n return final_dataframe\n\n\n@xfail_pyarrow # ValueError: The 'nrows' option is not supported\ndef test_multi_thread_path_multipart_read_csv(all_parsers):\n # see gh-11786\n num_tasks = 4\n num_rows = 48\n\n parser = all_parsers\n file_name = "__thread_pool_reader__.csv"\n df = DataFrame(\n {\n "a": np.random.default_rng(2).random(num_rows),\n "b": np.random.default_rng(2).random(num_rows),\n "c": np.random.default_rng(2).random(num_rows),\n "d": np.random.default_rng(2).random(num_rows),\n "e": np.random.default_rng(2).random(num_rows),\n "foo": ["foo"] * num_rows,\n "bar": ["bar"] * num_rows,\n "baz": ["baz"] * num_rows,\n "date": pd.date_range("20000101 09:00:00", periods=num_rows, freq="s"),\n "int": np.arange(num_rows, dtype="int64"),\n }\n )\n\n with tm.ensure_clean(file_name) as path:\n df.to_csv(path)\n\n final_dataframe = _generate_multi_thread_dataframe(\n parser, path, num_rows, num_tasks\n )\n tm.assert_frame_equal(df, final_dataframe)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_multi_thread.py | test_multi_thread.py | Python | 4,315 | 0.95 | 0.133758 | 0.055556 | vue-tools | 441 | 2024-04-02T23:11:24.331784 | BSD-3-Clause | true | b1a0d5aaa51541aede15c97094c8052b |
"""\nTests that NA values are properly handled during\nparsing for all of the parsers defined in parsers.py\n"""\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.parsers import STR_NA_VALUES\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\ndef test_string_nas(all_parsers):\n parser = all_parsers\n data = """A,B,C\na,b,c\nd,,f\n,g,h\n"""\n result = parser.read_csv(StringIO(data))\n expected = DataFrame(\n [["a", "b", "c"], ["d", np.nan, "f"], [np.nan, "g", "h"]],\n columns=["A", "B", "C"],\n )\n if parser.engine == "pyarrow":\n expected.loc[2, "A"] = None\n expected.loc[1, "B"] = None\n tm.assert_frame_equal(result, expected)\n\n\ndef test_detect_string_na(all_parsers):\n parser = all_parsers\n data = """A,B\nfoo,bar\nNA,baz\nNaN,nan\n"""\n expected = DataFrame(\n [["foo", "bar"], [np.nan, "baz"], [np.nan, np.nan]], columns=["A", "B"]\n )\n if parser.engine == "pyarrow":\n expected.loc[[1, 2], "A"] = None\n expected.loc[2, "B"] = None\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "na_values",\n [\n ["-999.0", "-999"],\n [-999, -999.0],\n [-999.0, -999],\n ["-999.0"],\n ["-999"],\n [-999.0],\n [-999],\n ],\n)\n@pytest.mark.parametrize(\n "data",\n [\n """A,B\n-999,1.2\n2,-999\n3,4.5\n""",\n """A,B\n-999,1.200\n2,-999.000\n3,4.500\n""",\n ],\n)\ndef test_non_string_na_values(all_parsers, data, na_values, request):\n # see gh-3611: with an odd float format, we can't match\n # the string "999.0" exactly but still need float matching\n parser = all_parsers\n expected = DataFrame([[np.nan, 1.2], [2.0, np.nan], [3.0, 4.5]], columns=["A", "B"])\n\n if parser.engine == "pyarrow" and not all(isinstance(x, str) for x in na_values):\n msg = "The 'pyarrow' engine requires all na_values to be strings"\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), na_values=na_values)\n return\n elif parser.engine == "pyarrow" and "-999.000" in data:\n # bc the pyarrow engine does not include the float-ified version\n # of "-999" -> -999, it does not match the entry with the trailing\n # zeros, so "-999.000" is not treated as null.\n mark = pytest.mark.xfail(\n reason="pyarrow engined does not recognize equivalent floats"\n )\n request.applymarker(mark)\n\n result = parser.read_csv(StringIO(data), na_values=na_values)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_default_na_values(all_parsers):\n _NA_VALUES = {\n "-1.#IND",\n "1.#QNAN",\n "1.#IND",\n "-1.#QNAN",\n "#N/A",\n "N/A",\n "n/a",\n "NA",\n "<NA>",\n "#NA",\n "NULL",\n "null",\n "NaN",\n "nan",\n "-NaN",\n "-nan",\n "#N/A N/A",\n "",\n "None",\n }\n assert _NA_VALUES == STR_NA_VALUES\n\n parser = all_parsers\n nv = len(_NA_VALUES)\n\n def f(i, v):\n if i == 0:\n buf = ""\n elif i > 0:\n buf = "".join([","] * i)\n\n buf = f"{buf}{v}"\n\n if i < nv - 1:\n joined = "".join([","] * (nv - i - 1))\n buf = f"{buf}{joined}"\n\n return buf\n\n data = StringIO("\n".join([f(i, v) for i, v in enumerate(_NA_VALUES)]))\n expected = DataFrame(np.nan, columns=range(nv), index=range(nv))\n\n result = parser.read_csv(data, header=None)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("na_values", ["baz", ["baz"]])\ndef test_custom_na_values(all_parsers, na_values):\n parser = all_parsers\n data = """A,B,C\nignore,this,row\n1,NA,3\n-1.#IND,5,baz\n7,8,NaN\n"""\n expected = DataFrame(\n [[1.0, np.nan, 3], [np.nan, 5, np.nan], [7, 8, np.nan]], columns=["A", "B", "C"]\n )\n if parser.engine == "pyarrow":\n msg = "skiprows argument must be an integer when using engine='pyarrow'"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1])\n return\n\n result = parser.read_csv(StringIO(data), na_values=na_values, skiprows=[1])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_bool_na_values(all_parsers):\n data = """A,B,C\nTrue,False,True\nNA,True,False\nFalse,NA,True"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data))\n expected = DataFrame(\n {\n "A": np.array([True, np.nan, False], dtype=object),\n "B": np.array([False, True, np.nan], dtype=object),\n "C": [True, False, True],\n }\n )\n if parser.engine == "pyarrow":\n expected.loc[1, "A"] = None\n expected.loc[2, "B"] = None\n tm.assert_frame_equal(result, expected)\n\n\ndef test_na_value_dict(all_parsers):\n data = """A,B,C\nfoo,bar,NA\nbar,foo,foo\nfoo,bar,NA\nbar,foo,foo"""\n parser = all_parsers\n\n if parser.engine == "pyarrow":\n msg = "pyarrow engine doesn't support passing a dict for na_values"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]})\n return\n\n df = parser.read_csv(StringIO(data), na_values={"A": ["foo"], "B": ["bar"]})\n expected = DataFrame(\n {\n "A": [np.nan, "bar", np.nan, "bar"],\n "B": [np.nan, "foo", np.nan, "foo"],\n "C": [np.nan, "foo", np.nan, "foo"],\n }\n )\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize(\n "index_col,expected",\n [\n (\n [0],\n DataFrame({"b": [np.nan], "c": [1], "d": [5]}, index=Index([0], name="a")),\n ),\n (\n [0, 2],\n DataFrame(\n {"b": [np.nan], "d": [5]},\n index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]),\n ),\n ),\n (\n ["a", "c"],\n DataFrame(\n {"b": [np.nan], "d": [5]},\n index=MultiIndex.from_tuples([(0, 1)], names=["a", "c"]),\n ),\n ),\n ],\n)\ndef test_na_value_dict_multi_index(all_parsers, index_col, expected):\n data = """\\na,b,c,d\n0,NA,1,5\n"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data), na_values=set(), index_col=index_col)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "kwargs,expected",\n [\n (\n {},\n DataFrame(\n {\n "A": ["a", "b", np.nan, "d", "e", np.nan, "g"],\n "B": [1, 2, 3, 4, 5, 6, 7],\n "C": ["one", "two", "three", np.nan, "five", np.nan, "seven"],\n }\n ),\n ),\n (\n {"na_values": {"A": [], "C": []}, "keep_default_na": False},\n DataFrame(\n {\n "A": ["a", "b", "", "d", "e", "nan", "g"],\n "B": [1, 2, 3, 4, 5, 6, 7],\n "C": ["one", "two", "three", "nan", "five", "", "seven"],\n }\n ),\n ),\n (\n {"na_values": ["a"], "keep_default_na": False},\n DataFrame(\n {\n "A": [np.nan, "b", "", "d", "e", "nan", "g"],\n "B": [1, 2, 3, 4, 5, 6, 7],\n "C": ["one", "two", "three", "nan", "five", "", "seven"],\n }\n ),\n ),\n (\n {"na_values": {"A": [], "C": []}},\n DataFrame(\n {\n "A": ["a", "b", np.nan, "d", "e", np.nan, "g"],\n "B": [1, 2, 3, 4, 5, 6, 7],\n "C": ["one", "two", "three", np.nan, "five", np.nan, "seven"],\n }\n ),\n ),\n ],\n)\ndef test_na_values_keep_default(\n all_parsers, kwargs, expected, request, using_infer_string\n):\n data = """\\nA,B,C\na,1,one\nb,2,two\n,3,three\nd,4,nan\ne,5,five\nnan,6,\ng,7,seven\n"""\n parser = all_parsers\n if parser.engine == "pyarrow":\n if "na_values" in kwargs and isinstance(kwargs["na_values"], dict):\n msg = "The pyarrow engine doesn't support passing a dict for na_values"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n return\n if not using_infer_string or "na_values" in kwargs:\n mark = pytest.mark.xfail()\n request.applymarker(mark)\n\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_no_na_values_no_keep_default(all_parsers):\n # see gh-4318: passing na_values=None and\n # keep_default_na=False yields 'None" as a na_value\n data = """\\nA,B,C\na,1,None\nb,2,two\n,3,None\nd,4,nan\ne,5,five\nnan,6,\ng,7,seven\n"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data), keep_default_na=False)\n\n expected = DataFrame(\n {\n "A": ["a", "b", "", "d", "e", "nan", "g"],\n "B": [1, 2, 3, 4, 5, 6, 7],\n "C": ["None", "two", "None", "nan", "five", "", "seven"],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_no_keep_default_na_dict_na_values(all_parsers):\n # see gh-19227\n data = "a,b\n,2"\n parser = all_parsers\n\n if parser.engine == "pyarrow":\n msg = "The pyarrow engine doesn't support passing a dict for na_values"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data), na_values={"b": ["2"]}, keep_default_na=False\n )\n return\n\n result = parser.read_csv(\n StringIO(data), na_values={"b": ["2"]}, keep_default_na=False\n )\n expected = DataFrame({"a": [""], "b": [np.nan]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_no_keep_default_na_dict_na_scalar_values(all_parsers):\n # see gh-19227\n #\n # Scalar values shouldn't cause the parsing to crash or fail.\n data = "a,b\n1,2"\n parser = all_parsers\n\n if parser.engine == "pyarrow":\n msg = "The pyarrow engine doesn't support passing a dict for na_values"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False)\n return\n\n df = parser.read_csv(StringIO(data), na_values={"b": 2}, keep_default_na=False)\n expected = DataFrame({"a": [1], "b": [np.nan]})\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize("col_zero_na_values", [113125, "113125"])\ndef test_no_keep_default_na_dict_na_values_diff_reprs(all_parsers, col_zero_na_values):\n # see gh-19227\n data = """\\n113125,"blah","/blaha",kjsdkj,412.166,225.874,214.008\n729639,"qwer","",asdfkj,466.681,,252.373\n"""\n parser = all_parsers\n expected = DataFrame(\n {\n 0: [np.nan, 729639.0],\n 1: [np.nan, "qwer"],\n 2: ["/blaha", np.nan],\n 3: ["kjsdkj", "asdfkj"],\n 4: [412.166, 466.681],\n 5: ["225.874", ""],\n 6: [np.nan, 252.373],\n }\n )\n\n if parser.engine == "pyarrow":\n msg = "The pyarrow engine doesn't support passing a dict for na_values"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data),\n header=None,\n keep_default_na=False,\n na_values={2: "", 6: "214.008", 1: "blah", 0: col_zero_na_values},\n )\n return\n\n result = parser.read_csv(\n StringIO(data),\n header=None,\n keep_default_na=False,\n na_values={2: "", 6: "214.008", 1: "blah", 0: col_zero_na_values},\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "na_filter,row_data",\n [\n (True, [[1, "A"], [np.nan, np.nan], [3, "C"]]),\n (False, [["1", "A"], ["nan", "B"], ["3", "C"]]),\n ],\n)\ndef test_na_values_na_filter_override(\n request, all_parsers, na_filter, row_data, using_infer_string\n):\n parser = all_parsers\n if parser.engine == "pyarrow":\n # mismatched dtypes in both cases, FutureWarning in the True case\n if not (using_infer_string and na_filter):\n mark = pytest.mark.xfail(reason="pyarrow doesn't support this.")\n request.applymarker(mark)\n data = """\\nA,B\n1,A\nnan,B\n3,C\n"""\n result = parser.read_csv(StringIO(data), na_values=["B"], na_filter=na_filter)\n\n expected = DataFrame(row_data, columns=["A", "B"])\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Expected 8 columns, got 5:\ndef test_na_trailing_columns(all_parsers):\n parser = all_parsers\n data = """Date,Currency,Symbol,Type,Units,UnitPrice,Cost,Tax\n2012-03-14,USD,AAPL,BUY,1000\n2012-05-12,USD,SBUX,SELL,500"""\n\n # Trailing columns should be all NaN.\n result = parser.read_csv(StringIO(data))\n expected = DataFrame(\n [\n ["2012-03-14", "USD", "AAPL", "BUY", 1000, np.nan, np.nan, np.nan],\n ["2012-05-12", "USD", "SBUX", "SELL", 500, np.nan, np.nan, np.nan],\n ],\n columns=[\n "Date",\n "Currency",\n "Symbol",\n "Type",\n "Units",\n "UnitPrice",\n "Cost",\n "Tax",\n ],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "na_values,row_data",\n [\n (1, [[np.nan, 2.0], [2.0, np.nan]]),\n ({"a": 2, "b": 1}, [[1.0, 2.0], [np.nan, np.nan]]),\n ],\n)\ndef test_na_values_scalar(all_parsers, na_values, row_data):\n # see gh-12224\n parser = all_parsers\n names = ["a", "b"]\n data = "1,2\n2,1"\n\n if parser.engine == "pyarrow" and isinstance(na_values, dict):\n if isinstance(na_values, dict):\n err = ValueError\n msg = "The pyarrow engine doesn't support passing a dict for na_values"\n else:\n err = TypeError\n msg = "The 'pyarrow' engine requires all na_values to be strings"\n with pytest.raises(err, match=msg):\n parser.read_csv(StringIO(data), names=names, na_values=na_values)\n return\n elif parser.engine == "pyarrow":\n msg = "The 'pyarrow' engine requires all na_values to be strings"\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), names=names, na_values=na_values)\n return\n\n result = parser.read_csv(StringIO(data), names=names, na_values=na_values)\n expected = DataFrame(row_data, columns=names)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_na_values_dict_aliasing(all_parsers):\n parser = all_parsers\n na_values = {"a": 2, "b": 1}\n na_values_copy = na_values.copy()\n\n names = ["a", "b"]\n data = "1,2\n2,1"\n\n expected = DataFrame([[1.0, 2.0], [np.nan, np.nan]], columns=names)\n\n if parser.engine == "pyarrow":\n msg = "The pyarrow engine doesn't support passing a dict for na_values"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), names=names, na_values=na_values)\n return\n\n result = parser.read_csv(StringIO(data), names=names, na_values=na_values)\n\n tm.assert_frame_equal(result, expected)\n tm.assert_dict_equal(na_values, na_values_copy)\n\n\ndef test_na_values_dict_col_index(all_parsers):\n # see gh-14203\n data = "a\nfoo\n1"\n parser = all_parsers\n na_values = {0: "foo"}\n\n if parser.engine == "pyarrow":\n msg = "The pyarrow engine doesn't support passing a dict for na_values"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), na_values=na_values)\n return\n\n result = parser.read_csv(StringIO(data), na_values=na_values)\n expected = DataFrame({"a": [np.nan, 1]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data,kwargs,expected",\n [\n (\n str(2**63) + "\n" + str(2**63 + 1),\n {"na_values": [2**63]},\n DataFrame([str(2**63), str(2**63 + 1)]),\n ),\n (str(2**63) + ",1" + "\n,2", {}, DataFrame([[str(2**63), 1], ["", 2]])),\n (str(2**63) + "\n1", {"na_values": [2**63]}, DataFrame([np.nan, 1])),\n ],\n)\ndef test_na_values_uint64(all_parsers, data, kwargs, expected, request):\n # see gh-14983\n parser = all_parsers\n\n if parser.engine == "pyarrow" and "na_values" in kwargs:\n msg = "The 'pyarrow' engine requires all na_values to be strings"\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), header=None, **kwargs)\n return\n elif parser.engine == "pyarrow":\n mark = pytest.mark.xfail(reason="Returns float64 instead of object")\n request.applymarker(mark)\n\n result = parser.read_csv(StringIO(data), header=None, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_na_values_no_default_with_index(all_parsers):\n # see gh-15835\n data = "a,1\nb,2"\n parser = all_parsers\n expected = DataFrame({"1": [2]}, index=Index(["b"], name="a"))\n\n result = parser.read_csv(StringIO(data), index_col=0, keep_default_na=False)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "na_filter,index_data", [(False, ["", "5"]), (True, [np.nan, 5.0])]\n)\ndef test_no_na_filter_on_index(all_parsers, na_filter, index_data, request):\n # see gh-5239\n #\n # Don't parse NA-values in index unless na_filter=True\n parser = all_parsers\n data = "a,b,c\n1,,3\n4,5,6"\n\n if parser.engine == "pyarrow" and na_filter is False:\n mark = pytest.mark.xfail(reason="mismatched index result")\n request.applymarker(mark)\n\n expected = DataFrame({"a": [1, 4], "c": [3, 6]}, index=Index(index_data, name="b"))\n result = parser.read_csv(StringIO(data), index_col=[1], na_filter=na_filter)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_inf_na_values_with_int_index(all_parsers):\n # see gh-17128\n parser = all_parsers\n data = "idx,col1,col2\n1,3,4\n2,inf,-inf"\n\n # Don't fail with OverflowError with inf's and integer index column.\n out = parser.read_csv(StringIO(data), index_col=[0], na_values=["inf", "-inf"])\n expected = DataFrame(\n {"col1": [3, np.nan], "col2": [4, np.nan]}, index=Index([1, 2], name="idx")\n )\n tm.assert_frame_equal(out, expected)\n\n\n@xfail_pyarrow # mismatched shape\n@pytest.mark.parametrize("na_filter", [True, False])\ndef test_na_values_with_dtype_str_and_na_filter(all_parsers, na_filter):\n # see gh-20377\n parser = all_parsers\n data = "a,b,c\n1,,3\n4,5,6"\n\n # na_filter=True --> missing value becomes NaN.\n # na_filter=False --> missing value remains empty string.\n empty = np.nan if na_filter else ""\n expected = DataFrame({"a": ["1", "4"], "b": [empty, "5"], "c": ["3", "6"]})\n\n result = parser.read_csv(StringIO(data), na_filter=na_filter, dtype=str)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # mismatched exception message\n@pytest.mark.parametrize(\n "data, na_values",\n [\n ("false,1\n,1\ntrue", None),\n ("false,1\nnull,1\ntrue", None),\n ("false,1\nnan,1\ntrue", None),\n ("false,1\nfoo,1\ntrue", "foo"),\n ("false,1\nfoo,1\ntrue", ["foo"]),\n ("false,1\nfoo,1\ntrue", {"a": "foo"}),\n ],\n)\ndef test_cast_NA_to_bool_raises_error(all_parsers, data, na_values):\n parser = all_parsers\n msg = "|".join(\n [\n "Bool column has NA values in column [0a]",\n "cannot safely convert passed user dtype of "\n "bool for object dtyped data in column 0",\n ]\n )\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data),\n header=None,\n names=["a", "b"],\n dtype={"a": "bool"},\n na_values=na_values,\n )\n\n\n# TODO: this test isn't about the na_values keyword, it is about the empty entries\n# being returned with NaN entries, whereas the pyarrow engine returns "nan"\n@xfail_pyarrow # mismatched shapes\ndef test_str_nan_dropped(all_parsers):\n # see gh-21131\n parser = all_parsers\n\n data = """File: small.csv,,\n10010010233,0123,654\nfoo,,bar\n01001000155,4530,898"""\n\n result = parser.read_csv(\n StringIO(data),\n header=None,\n names=["col1", "col2", "col3"],\n dtype={"col1": str, "col2": str, "col3": str},\n ).dropna()\n\n expected = DataFrame(\n {\n "col1": ["10010010233", "01001000155"],\n "col2": ["0123", "4530"],\n "col3": ["654", "898"],\n },\n index=[1, 3],\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nan_multi_index(all_parsers):\n # GH 42446\n parser = all_parsers\n data = "A,B,B\nX,Y,Z\n1,2,inf"\n\n if parser.engine == "pyarrow":\n msg = "The pyarrow engine doesn't support passing a dict for na_values"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data), header=list(range(2)), na_values={("B", "Z"): "inf"}\n )\n return\n\n result = parser.read_csv(\n StringIO(data), header=list(range(2)), na_values={("B", "Z"): "inf"}\n )\n\n expected = DataFrame(\n {\n ("A", "X"): [1],\n ("B", "Y"): [2],\n ("B", "Z"): [np.nan],\n }\n )\n\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # Failed: DID NOT RAISE <class 'ValueError'>; it casts the NaN to False\ndef test_bool_and_nan_to_bool(all_parsers):\n # GH#42808\n parser = all_parsers\n data = """0\nNaN\nTrue\nFalse\n"""\n with pytest.raises(ValueError, match="NA values"):\n parser.read_csv(StringIO(data), dtype="bool")\n\n\ndef test_bool_and_nan_to_int(all_parsers):\n # GH#42808\n parser = all_parsers\n data = """0\nNaN\nTrue\nFalse\n"""\n with pytest.raises(ValueError, match="convert|NoneType"):\n parser.read_csv(StringIO(data), dtype="int")\n\n\ndef test_bool_and_nan_to_float(all_parsers):\n # GH#42808\n parser = all_parsers\n data = """0\nNaN\nTrue\nFalse\n"""\n result = parser.read_csv(StringIO(data), dtype="float")\n expected = DataFrame.from_dict({"0": [np.nan, 1.0, 0.0]})\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_na_values.py | test_na_values.py | Python | 22,460 | 0.95 | 0.087179 | 0.049327 | awesome-app | 693 | 2024-10-27T09:00:05.615877 | BSD-3-Clause | true | ac138b7898aea6d0155324498a735259 |
"""\nTests parsers ability to read and parse non-local files\nand hence require a network connection to be read.\n"""\nfrom io import BytesIO\nimport logging\nimport re\n\nimport numpy as np\nimport pytest\n\nimport pandas.util._test_decorators as td\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\nfrom pandas.io.feather_format import read_feather\nfrom pandas.io.parsers import read_csv\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\n\n@pytest.mark.network\n@pytest.mark.single_cpu\n@pytest.mark.parametrize("mode", ["explicit", "infer"])\n@pytest.mark.parametrize("engine", ["python", "c"])\ndef test_compressed_urls(\n httpserver,\n datapath,\n salaries_table,\n mode,\n engine,\n compression_only,\n compression_to_extension,\n):\n # test reading compressed urls with various engines and\n # extension inference\n if compression_only == "tar":\n pytest.skip("TODO: Add tar salaraies.csv to pandas/io/parsers/data")\n\n extension = compression_to_extension[compression_only]\n with open(datapath("io", "parser", "data", "salaries.csv" + extension), "rb") as f:\n httpserver.serve_content(content=f.read())\n\n url = httpserver.url + "/salaries.csv" + extension\n\n if mode != "explicit":\n compression_only = mode\n\n url_table = read_csv(url, sep="\t", compression=compression_only, engine=engine)\n tm.assert_frame_equal(url_table, salaries_table)\n\n\n@pytest.mark.network\n@pytest.mark.single_cpu\ndef test_url_encoding_csv(httpserver, datapath):\n """\n read_csv should honor the requested encoding for URLs.\n\n GH 10424\n """\n with open(datapath("io", "parser", "data", "unicode_series.csv"), "rb") as f:\n httpserver.serve_content(content=f.read())\n df = read_csv(httpserver.url, encoding="latin-1", header=None)\n assert df.loc[15, 1] == "Á köldum klaka (Cold Fever) (1994)"\n\n\n@pytest.fixture\ndef tips_df(datapath):\n """DataFrame with the tips dataset."""\n return read_csv(datapath("io", "data", "csv", "tips.csv"))\n\n\n@pytest.mark.single_cpu\n@pytest.mark.usefixtures("s3_resource")\n@td.skip_if_not_us_locale()\nclass TestS3:\n def test_parse_public_s3_bucket(self, s3_public_bucket_with_data, tips_df, s3so):\n # more of an integration test due to the not-public contents portion\n # can probably mock this though.\n pytest.importorskip("s3fs")\n for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:\n df = read_csv(\n f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext,\n compression=comp,\n storage_options=s3so,\n )\n assert isinstance(df, DataFrame)\n assert not df.empty\n tm.assert_frame_equal(df, tips_df)\n\n def test_parse_private_s3_bucket(self, s3_private_bucket_with_data, tips_df, s3so):\n # Read public file from bucket with not-public contents\n pytest.importorskip("s3fs")\n df = read_csv(\n f"s3://{s3_private_bucket_with_data.name}/tips.csv", storage_options=s3so\n )\n assert isinstance(df, DataFrame)\n assert not df.empty\n tm.assert_frame_equal(df, tips_df)\n\n def test_parse_public_s3n_bucket(self, s3_public_bucket_with_data, tips_df, s3so):\n # Read from AWS s3 as "s3n" URL\n df = read_csv(\n f"s3n://{s3_public_bucket_with_data.name}/tips.csv",\n nrows=10,\n storage_options=s3so,\n )\n assert isinstance(df, DataFrame)\n assert not df.empty\n tm.assert_frame_equal(tips_df.iloc[:10], df)\n\n def test_parse_public_s3a_bucket(self, s3_public_bucket_with_data, tips_df, s3so):\n # Read from AWS s3 as "s3a" URL\n df = read_csv(\n f"s3a://{s3_public_bucket_with_data.name}/tips.csv",\n nrows=10,\n storage_options=s3so,\n )\n assert isinstance(df, DataFrame)\n assert not df.empty\n tm.assert_frame_equal(tips_df.iloc[:10], df)\n\n def test_parse_public_s3_bucket_nrows(\n self, s3_public_bucket_with_data, tips_df, s3so\n ):\n for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:\n df = read_csv(\n f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext,\n nrows=10,\n compression=comp,\n storage_options=s3so,\n )\n assert isinstance(df, DataFrame)\n assert not df.empty\n tm.assert_frame_equal(tips_df.iloc[:10], df)\n\n def test_parse_public_s3_bucket_chunked(\n self, s3_public_bucket_with_data, tips_df, s3so\n ):\n # Read with a chunksize\n chunksize = 5\n for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:\n with read_csv(\n f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext,\n chunksize=chunksize,\n compression=comp,\n storage_options=s3so,\n ) as df_reader:\n assert df_reader.chunksize == chunksize\n for i_chunk in [0, 1, 2]:\n # Read a couple of chunks and make sure we see them\n # properly.\n df = df_reader.get_chunk()\n assert isinstance(df, DataFrame)\n assert not df.empty\n true_df = tips_df.iloc[\n chunksize * i_chunk : chunksize * (i_chunk + 1)\n ]\n tm.assert_frame_equal(true_df, df)\n\n def test_parse_public_s3_bucket_chunked_python(\n self, s3_public_bucket_with_data, tips_df, s3so\n ):\n # Read with a chunksize using the Python parser\n chunksize = 5\n for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:\n with read_csv(\n f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext,\n chunksize=chunksize,\n compression=comp,\n engine="python",\n storage_options=s3so,\n ) as df_reader:\n assert df_reader.chunksize == chunksize\n for i_chunk in [0, 1, 2]:\n # Read a couple of chunks and make sure we see them properly.\n df = df_reader.get_chunk()\n assert isinstance(df, DataFrame)\n assert not df.empty\n true_df = tips_df.iloc[\n chunksize * i_chunk : chunksize * (i_chunk + 1)\n ]\n tm.assert_frame_equal(true_df, df)\n\n def test_parse_public_s3_bucket_python(\n self, s3_public_bucket_with_data, tips_df, s3so\n ):\n for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:\n df = read_csv(\n f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext,\n engine="python",\n compression=comp,\n storage_options=s3so,\n )\n assert isinstance(df, DataFrame)\n assert not df.empty\n tm.assert_frame_equal(df, tips_df)\n\n def test_infer_s3_compression(self, s3_public_bucket_with_data, tips_df, s3so):\n for ext in ["", ".gz", ".bz2"]:\n df = read_csv(\n f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext,\n engine="python",\n compression="infer",\n storage_options=s3so,\n )\n assert isinstance(df, DataFrame)\n assert not df.empty\n tm.assert_frame_equal(df, tips_df)\n\n def test_parse_public_s3_bucket_nrows_python(\n self, s3_public_bucket_with_data, tips_df, s3so\n ):\n for ext, comp in [("", None), (".gz", "gzip"), (".bz2", "bz2")]:\n df = read_csv(\n f"s3://{s3_public_bucket_with_data.name}/tips.csv" + ext,\n engine="python",\n nrows=10,\n compression=comp,\n storage_options=s3so,\n )\n assert isinstance(df, DataFrame)\n assert not df.empty\n tm.assert_frame_equal(tips_df.iloc[:10], df)\n\n def test_read_s3_fails(self, s3so):\n msg = "The specified bucket does not exist"\n with pytest.raises(OSError, match=msg):\n read_csv("s3://nyqpug/asdf.csv", storage_options=s3so)\n\n def test_read_s3_fails_private(self, s3_private_bucket, s3so):\n msg = "The specified bucket does not exist"\n # Receive a permission error when trying to read a private bucket.\n # It's irrelevant here that this isn't actually a table.\n with pytest.raises(OSError, match=msg):\n read_csv(f"s3://{s3_private_bucket.name}/file.csv")\n\n @pytest.mark.xfail(reason="GH#39155 s3fs upgrade", strict=False)\n def test_write_s3_csv_fails(self, tips_df, s3so):\n # GH 32486\n # Attempting to write to an invalid S3 path should raise\n import botocore\n\n # GH 34087\n # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html\n # Catch a ClientError since AWS Service Errors are defined dynamically\n error = (FileNotFoundError, botocore.exceptions.ClientError)\n\n with pytest.raises(error, match="The specified bucket does not exist"):\n tips_df.to_csv(\n "s3://an_s3_bucket_data_doesnt_exit/not_real.csv", storage_options=s3so\n )\n\n @pytest.mark.xfail(reason="GH#39155 s3fs upgrade", strict=False)\n def test_write_s3_parquet_fails(self, tips_df, s3so):\n # GH 27679\n # Attempting to write to an invalid S3 path should raise\n pytest.importorskip("pyarrow")\n import botocore\n\n # GH 34087\n # https://boto3.amazonaws.com/v1/documentation/api/latest/guide/error-handling.html\n # Catch a ClientError since AWS Service Errors are defined dynamically\n error = (FileNotFoundError, botocore.exceptions.ClientError)\n\n with pytest.raises(error, match="The specified bucket does not exist"):\n tips_df.to_parquet(\n "s3://an_s3_bucket_data_doesnt_exit/not_real.parquet",\n storage_options=s3so,\n )\n\n @pytest.mark.single_cpu\n def test_read_csv_handles_boto_s3_object(\n self, s3_public_bucket_with_data, tips_file\n ):\n # see gh-16135\n\n s3_object = s3_public_bucket_with_data.Object("tips.csv")\n\n with BytesIO(s3_object.get()["Body"].read()) as buffer:\n result = read_csv(buffer, encoding="utf8")\n assert isinstance(result, DataFrame)\n assert not result.empty\n\n expected = read_csv(tips_file)\n tm.assert_frame_equal(result, expected)\n\n @pytest.mark.single_cpu\n def test_read_csv_chunked_download(self, s3_public_bucket, caplog, s3so):\n # 8 MB, S3FS uses 5MB chunks\n df = DataFrame(np.zeros((100000, 4)), columns=list("abcd"))\n with BytesIO(df.to_csv().encode("utf-8")) as buf:\n s3_public_bucket.put_object(Key="large-file.csv", Body=buf)\n uri = f"{s3_public_bucket.name}/large-file.csv"\n match_re = re.compile(rf"^Fetch: {uri}, 0-(?P<stop>\d+)$")\n with caplog.at_level(logging.DEBUG, logger="s3fs"):\n read_csv(\n f"s3://{uri}",\n nrows=5,\n storage_options=s3so,\n )\n for log in caplog.messages:\n if match := re.match(match_re, log):\n # Less than 8 MB\n assert int(match.group("stop")) < 8000000\n\n def test_read_s3_with_hash_in_key(self, s3_public_bucket_with_data, tips_df, s3so):\n # GH 25945\n result = read_csv(\n f"s3://{s3_public_bucket_with_data.name}/tips#1.csv", storage_options=s3so\n )\n tm.assert_frame_equal(tips_df, result)\n\n def test_read_feather_s3_file_path(\n self, s3_public_bucket_with_data, feather_file, s3so\n ):\n # GH 29055\n pytest.importorskip("pyarrow")\n expected = read_feather(feather_file)\n res = read_feather(\n f"s3://{s3_public_bucket_with_data.name}/simple_dataset.feather",\n storage_options=s3so,\n )\n tm.assert_frame_equal(expected, res)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_network.py | test_network.py | Python | 12,319 | 0.95 | 0.110092 | 0.101754 | vue-tools | 379 | 2023-09-19T04:44:48.334378 | GPL-3.0 | true | 1445c1fabfadd3fadcfc1f971770f603 |
"""\nTests date parsing functionality for all of the\nparsers defined in parsers.py\n"""\n\nfrom datetime import (\n date,\n datetime,\n timedelta,\n timezone,\n)\nfrom io import StringIO\n\nfrom dateutil.parser import parse as du_parse\nimport numpy as np\nimport pytest\nimport pytz\n\nfrom pandas._libs.tslibs import parsing\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n Index,\n MultiIndex,\n Series,\n Timestamp,\n)\nimport pandas._testing as tm\nfrom pandas.core.indexes.datetimes import date_range\nfrom pandas.core.tools.datetimes import start_caching_at\n\nfrom pandas.io.parsers import read_csv\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\n@xfail_pyarrow\ndef test_read_csv_with_custom_date_parser(all_parsers):\n # GH36111\n def __custom_date_parser(time):\n time = time.astype(np.float64)\n time = time.astype(int) # convert float seconds to int type\n return pd.to_timedelta(time, unit="s")\n\n testdata = StringIO(\n """time e n h\n 41047.00 -98573.7297 871458.0640 389.0089\n 41048.00 -98573.7299 871458.0640 389.0089\n 41049.00 -98573.7300 871458.0642 389.0088\n 41050.00 -98573.7299 871458.0643 389.0088\n 41051.00 -98573.7302 871458.0640 389.0086\n """\n )\n result = all_parsers.read_csv_check_warnings(\n FutureWarning,\n "Please use 'date_format' instead",\n testdata,\n delim_whitespace=True,\n parse_dates=True,\n date_parser=__custom_date_parser,\n index_col="time",\n )\n time = [41047, 41048, 41049, 41050, 41051]\n time = pd.TimedeltaIndex([pd.to_timedelta(i, unit="s") for i in time], name="time")\n expected = DataFrame(\n {\n "e": [-98573.7297, -98573.7299, -98573.7300, -98573.7299, -98573.7302],\n "n": [871458.0640, 871458.0640, 871458.0642, 871458.0643, 871458.0640],\n "h": [389.0089, 389.0089, 389.0088, 389.0088, 389.0086],\n },\n index=time,\n )\n\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\ndef test_read_csv_with_custom_date_parser_parse_dates_false(all_parsers):\n # GH44366\n def __custom_date_parser(time):\n time = time.astype(np.float64)\n time = time.astype(int) # convert float seconds to int type\n return pd.to_timedelta(time, unit="s")\n\n testdata = StringIO(\n """time e\n 41047.00 -93.77\n 41048.00 -95.79\n 41049.00 -98.73\n 41050.00 -93.99\n 41051.00 -97.72\n """\n )\n result = all_parsers.read_csv_check_warnings(\n FutureWarning,\n "Please use 'date_format' instead",\n testdata,\n delim_whitespace=True,\n parse_dates=False,\n date_parser=__custom_date_parser,\n index_col="time",\n )\n time = Series([41047.00, 41048.00, 41049.00, 41050.00, 41051.00], name="time")\n expected = DataFrame(\n {"e": [-93.77, -95.79, -98.73, -93.99, -97.72]},\n index=time,\n )\n\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\ndef test_separator_date_conflict(all_parsers):\n # Regression test for gh-4678\n #\n # Make sure thousands separator and\n # date parsing do not conflict.\n parser = all_parsers\n data = "06-02-2013;13:00;1-000.215"\n expected = DataFrame(\n [[datetime(2013, 6, 2, 13, 0, 0), 1000.215]], columns=["Date", 2]\n )\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n df = parser.read_csv(\n StringIO(data),\n sep=";",\n thousands="-",\n parse_dates={"Date": [0, 1]},\n header=None,\n )\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize("keep_date_col", [True, False])\ndef test_multiple_date_col_custom(all_parsers, keep_date_col, request):\n data = """\\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n"""\n parser = all_parsers\n\n if keep_date_col and parser.engine == "pyarrow":\n # For this to pass, we need to disable auto-inference on the date columns\n # in parse_dates. We have no way of doing this though\n mark = pytest.mark.xfail(\n reason="pyarrow doesn't support disabling auto-inference on column numbers."\n )\n request.applymarker(mark)\n\n def date_parser(*date_cols):\n """\n Test date parser.\n\n Parameters\n ----------\n date_cols : args\n The list of data columns to parse.\n\n Returns\n -------\n parsed : Series\n """\n return parsing.try_parse_dates(\n parsing.concat_date_cols(date_cols), parser=du_parse\n )\n\n kwds = {\n "header": None,\n "date_parser": date_parser,\n "parse_dates": {"actual": [1, 2], "nominal": [1, 3]},\n "keep_date_col": keep_date_col,\n "names": ["X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8"],\n }\n result = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n **kwds,\n raise_on_extra_warnings=False,\n )\n\n expected = DataFrame(\n [\n [\n datetime(1999, 1, 27, 19, 0),\n datetime(1999, 1, 27, 18, 56),\n "KORD",\n "19990127",\n " 19:00:00",\n " 18:56:00",\n 0.81,\n 2.81,\n 7.2,\n 0.0,\n 280.0,\n ],\n [\n datetime(1999, 1, 27, 20, 0),\n datetime(1999, 1, 27, 19, 56),\n "KORD",\n "19990127",\n " 20:00:00",\n " 19:56:00",\n 0.01,\n 2.21,\n 7.2,\n 0.0,\n 260.0,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 20, 56),\n "KORD",\n "19990127",\n " 21:00:00",\n " 20:56:00",\n -0.59,\n 2.21,\n 5.7,\n 0.0,\n 280.0,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 21, 18),\n "KORD",\n "19990127",\n " 21:00:00",\n " 21:18:00",\n -0.99,\n 2.01,\n 3.6,\n 0.0,\n 270.0,\n ],\n [\n datetime(1999, 1, 27, 22, 0),\n datetime(1999, 1, 27, 21, 56),\n "KORD",\n "19990127",\n " 22:00:00",\n " 21:56:00",\n -0.59,\n 1.71,\n 5.1,\n 0.0,\n 290.0,\n ],\n [\n datetime(1999, 1, 27, 23, 0),\n datetime(1999, 1, 27, 22, 56),\n "KORD",\n "19990127",\n " 23:00:00",\n " 22:56:00",\n -0.59,\n 1.71,\n 4.6,\n 0.0,\n 280.0,\n ],\n ],\n columns=[\n "actual",\n "nominal",\n "X0",\n "X1",\n "X2",\n "X3",\n "X4",\n "X5",\n "X6",\n "X7",\n "X8",\n ],\n )\n\n if not keep_date_col:\n expected = expected.drop(["X1", "X2", "X3"], axis=1)\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("container", [list, tuple, Index, Series])\n@pytest.mark.parametrize("dim", [1, 2])\ndef test_concat_date_col_fail(container, dim):\n msg = "not all elements from date_cols are numpy arrays"\n value = "19990127"\n\n date_cols = tuple(container([value]) for _ in range(dim))\n\n with pytest.raises(ValueError, match=msg):\n parsing.concat_date_cols(date_cols)\n\n\n@pytest.mark.parametrize("keep_date_col", [True, False])\ndef test_multiple_date_col(all_parsers, keep_date_col, request):\n data = """\\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n"""\n parser = all_parsers\n\n if keep_date_col and parser.engine == "pyarrow":\n # For this to pass, we need to disable auto-inference on the date columns\n # in parse_dates. We have no way of doing this though\n mark = pytest.mark.xfail(\n reason="pyarrow doesn't support disabling auto-inference on column numbers."\n )\n request.applymarker(mark)\n\n depr_msg = "The 'keep_date_col' keyword in pd.read_csv is deprecated"\n\n kwds = {\n "header": None,\n "parse_dates": [[1, 2], [1, 3]],\n "keep_date_col": keep_date_col,\n "names": ["X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7", "X8"],\n }\n with tm.assert_produces_warning(\n (DeprecationWarning, FutureWarning), match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(StringIO(data), **kwds)\n\n expected = DataFrame(\n [\n [\n datetime(1999, 1, 27, 19, 0),\n datetime(1999, 1, 27, 18, 56),\n "KORD",\n "19990127",\n " 19:00:00",\n " 18:56:00",\n 0.81,\n 2.81,\n 7.2,\n 0.0,\n 280.0,\n ],\n [\n datetime(1999, 1, 27, 20, 0),\n datetime(1999, 1, 27, 19, 56),\n "KORD",\n "19990127",\n " 20:00:00",\n " 19:56:00",\n 0.01,\n 2.21,\n 7.2,\n 0.0,\n 260.0,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 20, 56),\n "KORD",\n "19990127",\n " 21:00:00",\n " 20:56:00",\n -0.59,\n 2.21,\n 5.7,\n 0.0,\n 280.0,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 21, 18),\n "KORD",\n "19990127",\n " 21:00:00",\n " 21:18:00",\n -0.99,\n 2.01,\n 3.6,\n 0.0,\n 270.0,\n ],\n [\n datetime(1999, 1, 27, 22, 0),\n datetime(1999, 1, 27, 21, 56),\n "KORD",\n "19990127",\n " 22:00:00",\n " 21:56:00",\n -0.59,\n 1.71,\n 5.1,\n 0.0,\n 290.0,\n ],\n [\n datetime(1999, 1, 27, 23, 0),\n datetime(1999, 1, 27, 22, 56),\n "KORD",\n "19990127",\n " 23:00:00",\n " 22:56:00",\n -0.59,\n 1.71,\n 4.6,\n 0.0,\n 280.0,\n ],\n ],\n columns=[\n "X1_X2",\n "X1_X3",\n "X0",\n "X1",\n "X2",\n "X3",\n "X4",\n "X5",\n "X6",\n "X7",\n "X8",\n ],\n )\n\n if not keep_date_col:\n expected = expected.drop(["X1", "X2", "X3"], axis=1)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_date_col_as_index_col(all_parsers):\n data = """\\nKORD,19990127 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\n"""\n parser = all_parsers\n kwds = {\n "header": None,\n "parse_dates": [1],\n "index_col": 1,\n "names": ["X0", "X1", "X2", "X3", "X4", "X5", "X6", "X7"],\n }\n result = parser.read_csv(StringIO(data), **kwds)\n\n index = Index(\n [\n datetime(1999, 1, 27, 19, 0),\n datetime(1999, 1, 27, 20, 0),\n datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 22, 0),\n ],\n name="X1",\n )\n expected = DataFrame(\n [\n ["KORD", " 18:56:00", 0.81, 2.81, 7.2, 0.0, 280.0],\n ["KORD", " 19:56:00", 0.01, 2.21, 7.2, 0.0, 260.0],\n ["KORD", " 20:56:00", -0.59, 2.21, 5.7, 0.0, 280.0],\n ["KORD", " 21:18:00", -0.99, 2.01, 3.6, 0.0, 270.0],\n ["KORD", " 21:56:00", -0.59, 1.71, 5.1, 0.0, 290.0],\n ],\n columns=["X0", "X2", "X3", "X4", "X5", "X6", "X7"],\n index=index,\n )\n if parser.engine == "pyarrow":\n # https://github.com/pandas-dev/pandas/issues/44231\n # pyarrow 6.0 starts to infer time type\n expected["X2"] = pd.to_datetime("1970-01-01" + expected["X2"]).dt.time\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_cols_int_cast(all_parsers):\n data = (\n "KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"\n "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"\n "KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"\n "KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"\n "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"\n "KORD,19990127, 23:00:00, 22:56:00, -0.5900"\n )\n parse_dates = {"actual": [1, 2], "nominal": [1, 3]}\n parser = all_parsers\n\n kwds = {\n "header": None,\n "parse_dates": parse_dates,\n "date_parser": pd.to_datetime,\n }\n result = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n **kwds,\n raise_on_extra_warnings=False,\n )\n\n expected = DataFrame(\n [\n [datetime(1999, 1, 27, 19, 0), datetime(1999, 1, 27, 18, 56), "KORD", 0.81],\n [datetime(1999, 1, 27, 20, 0), datetime(1999, 1, 27, 19, 56), "KORD", 0.01],\n [\n datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 20, 56),\n "KORD",\n -0.59,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 21, 18),\n "KORD",\n -0.99,\n ],\n [\n datetime(1999, 1, 27, 22, 0),\n datetime(1999, 1, 27, 21, 56),\n "KORD",\n -0.59,\n ],\n [\n datetime(1999, 1, 27, 23, 0),\n datetime(1999, 1, 27, 22, 56),\n "KORD",\n -0.59,\n ],\n ],\n columns=["actual", "nominal", 0, 4],\n )\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_date_col_timestamp_parse(all_parsers):\n parser = all_parsers\n data = """05/31/2012,15:30:00.029,1306.25,1,E,0,,1306.25\n05/31/2012,15:30:00.029,1306.25,8,E,0,,1306.25"""\n\n result = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n parse_dates=[[0, 1]],\n header=None,\n date_parser=Timestamp,\n raise_on_extra_warnings=False,\n )\n expected = DataFrame(\n [\n [\n Timestamp("05/31/2012, 15:30:00.029"),\n 1306.25,\n 1,\n "E",\n 0,\n np.nan,\n 1306.25,\n ],\n [\n Timestamp("05/31/2012, 15:30:00.029"),\n 1306.25,\n 8,\n "E",\n 0,\n np.nan,\n 1306.25,\n ],\n ],\n columns=["0_1", 2, 3, 4, 5, 6, 7],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\ndef test_multiple_date_cols_with_header(all_parsers):\n parser = all_parsers\n data = """\\nID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000"""\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]})\n expected = DataFrame(\n [\n [\n datetime(1999, 1, 27, 19, 0),\n "KORD",\n " 18:56:00",\n 0.81,\n 2.81,\n 7.2,\n 0.0,\n 280.0,\n ],\n [\n datetime(1999, 1, 27, 20, 0),\n "KORD",\n " 19:56:00",\n 0.01,\n 2.21,\n 7.2,\n 0.0,\n 260.0,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n "KORD",\n " 20:56:00",\n -0.59,\n 2.21,\n 5.7,\n 0.0,\n 280.0,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n "KORD",\n " 21:18:00",\n -0.99,\n 2.01,\n 3.6,\n 0.0,\n 270.0,\n ],\n [\n datetime(1999, 1, 27, 22, 0),\n "KORD",\n " 21:56:00",\n -0.59,\n 1.71,\n 5.1,\n 0.0,\n 290.0,\n ],\n [\n datetime(1999, 1, 27, 23, 0),\n "KORD",\n " 22:56:00",\n -0.59,\n 1.71,\n 4.6,\n 0.0,\n 280.0,\n ],\n ],\n columns=[\n "nominal",\n "ID",\n "ActualTime",\n "TDew",\n "TAir",\n "Windspeed",\n "Precip",\n "WindDir",\n ],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data,parse_dates,msg",\n [\n (\n """\\ndate_NominalTime,date,NominalTime\nKORD1,19990127, 19:00:00\nKORD2,19990127, 20:00:00""",\n [[1, 2]],\n ("New date column already in dict date_NominalTime"),\n ),\n (\n """\\nID,date,nominalTime\nKORD,19990127, 19:00:00\nKORD,19990127, 20:00:00""",\n {"ID": [1, 2]},\n "Date column ID already in dict",\n ),\n ],\n)\ndef test_multiple_date_col_name_collision(all_parsers, data, parse_dates, msg):\n parser = all_parsers\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(\n (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(StringIO(data), parse_dates=parse_dates)\n\n\ndef test_date_parser_int_bug(all_parsers):\n # see gh-3071\n parser = all_parsers\n data = (\n "posix_timestamp,elapsed,sys,user,queries,query_time,rows,"\n "accountid,userid,contactid,level,silo,method\n"\n "1343103150,0.062353,0,4,6,0.01690,3,"\n "12345,1,-1,3,invoice_InvoiceResource,search\n"\n )\n\n result = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n index_col=0,\n parse_dates=[0],\n # Note: we must pass tz and then drop the tz attribute\n # (if we don't CI will flake out depending on the runner's local time)\n date_parser=lambda x: datetime.fromtimestamp(int(x), tz=timezone.utc).replace(\n tzinfo=None\n ),\n raise_on_extra_warnings=False,\n )\n expected = DataFrame(\n [\n [\n 0.062353,\n 0,\n 4,\n 6,\n 0.01690,\n 3,\n 12345,\n 1,\n -1,\n 3,\n "invoice_InvoiceResource",\n "search",\n ]\n ],\n columns=[\n "elapsed",\n "sys",\n "user",\n "queries",\n "query_time",\n "rows",\n "accountid",\n "userid",\n "contactid",\n "level",\n "silo",\n "method",\n ],\n index=Index([Timestamp("2012-07-24 04:12:30")], name="posix_timestamp"),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\ndef test_nat_parse(all_parsers):\n # see gh-3062\n parser = all_parsers\n df = DataFrame(\n {\n "A": np.arange(10, dtype="float64"),\n "B": Timestamp("20010101").as_unit("ns"),\n }\n )\n df.iloc[3:6, :] = np.nan\n\n with tm.ensure_clean("__nat_parse_.csv") as path:\n df.to_csv(path)\n\n result = parser.read_csv(path, index_col=0, parse_dates=["B"])\n tm.assert_frame_equal(result, df)\n\n\n@skip_pyarrow\ndef test_csv_custom_parser(all_parsers):\n data = """A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n"""\n parser = all_parsers\n result = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n date_parser=lambda x: datetime.strptime(x, "%Y%m%d"),\n )\n expected = parser.read_csv(StringIO(data), parse_dates=True)\n tm.assert_frame_equal(result, expected)\n result = parser.read_csv(StringIO(data), date_format="%Y%m%d")\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\ndef test_parse_dates_implicit_first_col(all_parsers):\n data = """A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data), parse_dates=True)\n\n expected = parser.read_csv(StringIO(data), index_col=0, parse_dates=True)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\ndef test_parse_dates_string(all_parsers):\n data = """date,A,B,C\n20090101,a,1,2\n20090102,b,3,4\n20090103,c,4,5\n"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col="date", parse_dates=["date"])\n # freq doesn't round-trip\n index = date_range("1/1/2009", periods=3, name="date")._with_freq(None)\n\n expected = DataFrame(\n {"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]}, index=index\n )\n tm.assert_frame_equal(result, expected)\n\n\n# Bug in https://github.com/dateutil/dateutil/issues/217\n# has been addressed, but we just don't pass in the `yearfirst`\n@pytest.mark.xfail(reason="yearfirst is not surfaced in read_*")\n@pytest.mark.parametrize("parse_dates", [[["date", "time"]], [[0, 1]]])\ndef test_yy_format_with_year_first(all_parsers, parse_dates):\n data = """date,time,B,C\n090131,0010,1,2\n090228,1020,3,4\n090331,0830,5,6\n"""\n parser = all_parsers\n result = parser.read_csv_check_warnings(\n UserWarning,\n "Could not infer format",\n StringIO(data),\n index_col=0,\n parse_dates=parse_dates,\n )\n index = DatetimeIndex(\n [\n datetime(2009, 1, 31, 0, 10, 0),\n datetime(2009, 2, 28, 10, 20, 0),\n datetime(2009, 3, 31, 8, 30, 0),\n ],\n dtype=object,\n name="date_time",\n )\n expected = DataFrame({"B": [1, 3, 5], "C": [2, 4, 6]}, index=index)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\n@pytest.mark.parametrize("parse_dates", [[0, 2], ["a", "c"]])\ndef test_parse_dates_column_list(all_parsers, parse_dates):\n data = "a,b,c\n01/01/2010,1,15/02/2010"\n parser = all_parsers\n\n expected = DataFrame(\n {"a": [datetime(2010, 1, 1)], "b": [1], "c": [datetime(2010, 2, 15)]}\n )\n expected = expected.set_index(["a", "b"])\n\n result = parser.read_csv(\n StringIO(data), index_col=[0, 1], parse_dates=parse_dates, dayfirst=True\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\n@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])\ndef test_multi_index_parse_dates(all_parsers, index_col):\n data = """index1,index2,A,B,C\n20090101,one,a,1,2\n20090101,two,b,3,4\n20090101,three,c,4,5\n20090102,one,a,1,2\n20090102,two,b,3,4\n20090102,three,c,4,5\n20090103,one,a,1,2\n20090103,two,b,3,4\n20090103,three,c,4,5\n"""\n parser = all_parsers\n index = MultiIndex.from_product(\n [\n (datetime(2009, 1, 1), datetime(2009, 1, 2), datetime(2009, 1, 3)),\n ("one", "two", "three"),\n ],\n names=["index1", "index2"],\n )\n\n # Out of order.\n if index_col == [1, 0]:\n index = index.swaplevel(0, 1)\n\n expected = DataFrame(\n [\n ["a", 1, 2],\n ["b", 3, 4],\n ["c", 4, 5],\n ["a", 1, 2],\n ["b", 3, 4],\n ["c", 4, 5],\n ["a", 1, 2],\n ["b", 3, 4],\n ["c", 4, 5],\n ],\n columns=["A", "B", "C"],\n index=index,\n )\n result = parser.read_csv_check_warnings(\n UserWarning,\n "Could not infer format",\n StringIO(data),\n index_col=index_col,\n parse_dates=True,\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\n@pytest.mark.parametrize("kwargs", [{"dayfirst": True}, {"day_first": True}])\ndef test_parse_dates_custom_euro_format(all_parsers, kwargs):\n parser = all_parsers\n data = """foo,bar,baz\n31/01/2010,1,2\n01/02/2010,1,NA\n02/02/2010,1,2\n"""\n if "dayfirst" in kwargs:\n df = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n names=["time", "Q", "NTU"],\n date_parser=lambda d: du_parse(d, **kwargs),\n header=0,\n index_col=0,\n parse_dates=True,\n na_values=["NA"],\n )\n exp_index = Index(\n [datetime(2010, 1, 31), datetime(2010, 2, 1), datetime(2010, 2, 2)],\n name="time",\n )\n expected = DataFrame(\n {"Q": [1, 1, 1], "NTU": [2, np.nan, 2]},\n index=exp_index,\n columns=["Q", "NTU"],\n )\n tm.assert_frame_equal(df, expected)\n else:\n msg = "got an unexpected keyword argument 'day_first'"\n with pytest.raises(TypeError, match=msg):\n parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n names=["time", "Q", "NTU"],\n date_parser=lambda d: du_parse(d, **kwargs),\n skiprows=[0],\n index_col=0,\n parse_dates=True,\n na_values=["NA"],\n )\n\n\ndef test_parse_tz_aware(all_parsers):\n # See gh-1693\n parser = all_parsers\n data = "Date,x\n2012-06-13T01:39:00Z,0.5"\n\n result = parser.read_csv(StringIO(data), index_col=0, parse_dates=True)\n # TODO: make unit check more specific\n if parser.engine == "pyarrow":\n result.index = result.index.as_unit("ns")\n expected = DataFrame(\n {"x": [0.5]}, index=Index([Timestamp("2012-06-13 01:39:00+00:00")], name="Date")\n )\n if parser.engine == "pyarrow":\n expected_tz = pytz.utc\n else:\n expected_tz = timezone.utc\n tm.assert_frame_equal(result, expected)\n assert result.index.tz is expected_tz\n\n\n@xfail_pyarrow\n@pytest.mark.parametrize(\n "parse_dates,index_col",\n [({"nominal": [1, 2]}, "nominal"), ({"nominal": [1, 2]}, 0), ([[1, 2]], 0)],\n)\ndef test_multiple_date_cols_index(all_parsers, parse_dates, index_col):\n parser = all_parsers\n data = """\nID,date,NominalTime,ActualTime,TDew,TAir,Windspeed,Precip,WindDir\nKORD1,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD2,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD3,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD4,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD5,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD6,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n"""\n expected = DataFrame(\n [\n [\n datetime(1999, 1, 27, 19, 0),\n "KORD1",\n " 18:56:00",\n 0.81,\n 2.81,\n 7.2,\n 0.0,\n 280.0,\n ],\n [\n datetime(1999, 1, 27, 20, 0),\n "KORD2",\n " 19:56:00",\n 0.01,\n 2.21,\n 7.2,\n 0.0,\n 260.0,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n "KORD3",\n " 20:56:00",\n -0.59,\n 2.21,\n 5.7,\n 0.0,\n 280.0,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n "KORD4",\n " 21:18:00",\n -0.99,\n 2.01,\n 3.6,\n 0.0,\n 270.0,\n ],\n [\n datetime(1999, 1, 27, 22, 0),\n "KORD5",\n " 21:56:00",\n -0.59,\n 1.71,\n 5.1,\n 0.0,\n 290.0,\n ],\n [\n datetime(1999, 1, 27, 23, 0),\n "KORD6",\n " 22:56:00",\n -0.59,\n 1.71,\n 4.6,\n 0.0,\n 280.0,\n ],\n ],\n columns=[\n "nominal",\n "ID",\n "ActualTime",\n "TDew",\n "TAir",\n "Windspeed",\n "Precip",\n "WindDir",\n ],\n )\n expected = expected.set_index("nominal")\n\n if not isinstance(parse_dates, dict):\n expected.index.name = "date_NominalTime"\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(data), parse_dates=parse_dates, index_col=index_col\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\ndef test_multiple_date_cols_chunked(all_parsers):\n parser = all_parsers\n data = """\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n"""\n\n expected = DataFrame(\n [\n [\n datetime(1999, 1, 27, 19, 0),\n "KORD",\n " 18:56:00",\n 0.81,\n 2.81,\n 7.2,\n 0.0,\n 280.0,\n ],\n [\n datetime(1999, 1, 27, 20, 0),\n "KORD",\n " 19:56:00",\n 0.01,\n 2.21,\n 7.2,\n 0.0,\n 260.0,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n "KORD",\n " 20:56:00",\n -0.59,\n 2.21,\n 5.7,\n 0.0,\n 280.0,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n "KORD",\n " 21:18:00",\n -0.99,\n 2.01,\n 3.6,\n 0.0,\n 270.0,\n ],\n [\n datetime(1999, 1, 27, 22, 0),\n "KORD",\n " 21:56:00",\n -0.59,\n 1.71,\n 5.1,\n 0.0,\n 290.0,\n ],\n [\n datetime(1999, 1, 27, 23, 0),\n "KORD",\n " 22:56:00",\n -0.59,\n 1.71,\n 4.6,\n 0.0,\n 280.0,\n ],\n ],\n columns=["nominal", "ID", "actualTime", "A", "B", "C", "D", "E"],\n )\n expected = expected.set_index("nominal")\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n with parser.read_csv(\n StringIO(data),\n parse_dates={"nominal": [1, 2]},\n index_col="nominal",\n chunksize=2,\n ) as reader:\n chunks = list(reader)\n\n tm.assert_frame_equal(chunks[0], expected[:2])\n tm.assert_frame_equal(chunks[1], expected[2:4])\n tm.assert_frame_equal(chunks[2], expected[4:])\n\n\ndef test_multiple_date_col_named_index_compat(all_parsers):\n parser = all_parsers\n data = """\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n"""\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with tm.assert_produces_warning(\n (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False\n ):\n with_indices = parser.read_csv(\n StringIO(data), parse_dates={"nominal": [1, 2]}, index_col="nominal"\n )\n\n with tm.assert_produces_warning(\n (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False\n ):\n with_names = parser.read_csv(\n StringIO(data),\n index_col="nominal",\n parse_dates={"nominal": ["date", "nominalTime"]},\n )\n tm.assert_frame_equal(with_indices, with_names)\n\n\ndef test_multiple_date_col_multiple_index_compat(all_parsers):\n parser = all_parsers\n data = """\\nID,date,nominalTime,actualTime,A,B,C,D,E\nKORD,19990127, 19:00:00, 18:56:00, 0.8100, 2.8100, 7.2000, 0.0000, 280.0000\nKORD,19990127, 20:00:00, 19:56:00, 0.0100, 2.2100, 7.2000, 0.0000, 260.0000\nKORD,19990127, 21:00:00, 20:56:00, -0.5900, 2.2100, 5.7000, 0.0000, 280.0000\nKORD,19990127, 21:00:00, 21:18:00, -0.9900, 2.0100, 3.6000, 0.0000, 270.0000\nKORD,19990127, 22:00:00, 21:56:00, -0.5900, 1.7100, 5.1000, 0.0000, 290.0000\nKORD,19990127, 23:00:00, 22:56:00, -0.5900, 1.7100, 4.6000, 0.0000, 280.0000\n"""\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with tm.assert_produces_warning(\n (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(data), index_col=["nominal", "ID"], parse_dates={"nominal": [1, 2]}\n )\n with tm.assert_produces_warning(\n (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False\n ):\n expected = parser.read_csv(StringIO(data), parse_dates={"nominal": [1, 2]})\n\n expected = expected.set_index(["nominal", "ID"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("kwargs", [{}, {"index_col": "C"}])\ndef test_read_with_parse_dates_scalar_non_bool(all_parsers, kwargs):\n # see gh-5636\n parser = all_parsers\n msg = (\n "Only booleans, lists, and dictionaries "\n "are accepted for the 'parse_dates' parameter"\n )\n data = """A,B,C\n 1,2,2003-11-1"""\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), parse_dates="C", **kwargs)\n\n\n@pytest.mark.parametrize("parse_dates", [(1,), np.array([4, 5]), {1, 3}])\ndef test_read_with_parse_dates_invalid_type(all_parsers, parse_dates):\n parser = all_parsers\n msg = (\n "Only booleans, lists, and dictionaries "\n "are accepted for the 'parse_dates' parameter"\n )\n data = """A,B,C\n 1,2,2003-11-1"""\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), parse_dates=(1,))\n\n\n@pytest.mark.parametrize("cache_dates", [True, False])\n@pytest.mark.parametrize("value", ["nan", ""])\ndef test_bad_date_parse(all_parsers, cache_dates, value):\n # if we have an invalid date make sure that we handle this with\n # and w/o the cache properly\n parser = all_parsers\n s = StringIO((f"{value},\n") * (start_caching_at + 1))\n\n parser.read_csv(\n s,\n header=None,\n names=["foo", "bar"],\n parse_dates=["foo"],\n cache_dates=cache_dates,\n )\n\n\n@pytest.mark.parametrize("cache_dates", [True, False])\n@pytest.mark.parametrize("value", ["0"])\ndef test_bad_date_parse_with_warning(all_parsers, cache_dates, value):\n # if we have an invalid date make sure that we handle this with\n # and w/o the cache properly.\n parser = all_parsers\n s = StringIO((f"{value},\n") * 50000)\n\n if parser.engine == "pyarrow":\n # pyarrow reads "0" as 0 (of type int64), and so\n # pandas doesn't try to guess the datetime format\n # TODO: parse dates directly in pyarrow, see\n # https://github.com/pandas-dev/pandas/issues/48017\n warn = None\n elif cache_dates:\n # Note: warning is not raised if 'cache_dates', because here there is only a\n # single unique date and hence no risk of inconsistent parsing.\n warn = None\n else:\n warn = UserWarning\n parser.read_csv_check_warnings(\n warn,\n "Could not infer format",\n s,\n header=None,\n names=["foo", "bar"],\n parse_dates=["foo"],\n cache_dates=cache_dates,\n raise_on_extra_warnings=False,\n )\n\n\n@xfail_pyarrow\ndef test_parse_dates_empty_string(all_parsers):\n # see gh-2263\n parser = all_parsers\n data = "Date,test\n2012-01-01,1\n,2"\n result = parser.read_csv(StringIO(data), parse_dates=["Date"], na_filter=False)\n\n expected = DataFrame(\n [[datetime(2012, 1, 1), 1], [pd.NaT, 2]], columns=["Date", "test"]\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "reader", ["read_csv_check_warnings", "read_table_check_warnings"]\n)\ndef test_parse_dates_infer_datetime_format_warning(all_parsers, reader):\n # GH 49024, 51017\n parser = all_parsers\n data = "Date,test\n2012-01-01,1\n,2"\n\n getattr(parser, reader)(\n FutureWarning,\n "The argument 'infer_datetime_format' is deprecated",\n StringIO(data),\n parse_dates=["Date"],\n infer_datetime_format=True,\n sep=",",\n raise_on_extra_warnings=False,\n )\n\n\n@pytest.mark.parametrize(\n "reader", ["read_csv_check_warnings", "read_table_check_warnings"]\n)\ndef test_parse_dates_date_parser_and_date_format(all_parsers, reader):\n # GH 50601\n parser = all_parsers\n data = "Date,test\n2012-01-01,1\n,2"\n msg = "Cannot use both 'date_parser' and 'date_format'"\n with pytest.raises(TypeError, match=msg):\n getattr(parser, reader)(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n parse_dates=["Date"],\n date_parser=pd.to_datetime,\n date_format="ISO8601",\n sep=",",\n )\n\n\n@xfail_pyarrow\n@pytest.mark.parametrize(\n "data,kwargs,expected",\n [\n (\n "a\n04.15.2016",\n {"parse_dates": ["a"]},\n DataFrame([datetime(2016, 4, 15)], columns=["a"]),\n ),\n (\n "a\n04.15.2016",\n {"parse_dates": True, "index_col": 0},\n DataFrame(index=DatetimeIndex(["2016-04-15"], name="a"), columns=[]),\n ),\n (\n "a,b\n04.15.2016,09.16.2013",\n {"parse_dates": ["a", "b"]},\n DataFrame(\n [[datetime(2016, 4, 15), datetime(2013, 9, 16)]], columns=["a", "b"]\n ),\n ),\n (\n "a,b\n04.15.2016,09.16.2013",\n {"parse_dates": True, "index_col": [0, 1]},\n DataFrame(\n index=MultiIndex.from_tuples(\n [(datetime(2016, 4, 15), datetime(2013, 9, 16))], names=["a", "b"]\n ),\n columns=[],\n ),\n ),\n ],\n)\ndef test_parse_dates_no_convert_thousands(all_parsers, data, kwargs, expected):\n # see gh-14066\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), thousands=".", **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\ndef test_parse_date_time_multi_level_column_name(all_parsers):\n data = """\\nD,T,A,B\ndate, time,a,b\n2001-01-05, 09:00:00, 0.0, 10.\n2001-01-06, 00:00:00, 1.0, 11.\n"""\n parser = all_parsers\n result = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n header=[0, 1],\n parse_dates={"date_time": [0, 1]},\n date_parser=pd.to_datetime,\n )\n\n expected_data = [\n [datetime(2001, 1, 5, 9, 0, 0), 0.0, 10.0],\n [datetime(2001, 1, 6, 0, 0, 0), 1.0, 11.0],\n ]\n expected = DataFrame(expected_data, columns=["date_time", ("A", "a"), ("B", "b")])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data,kwargs,expected",\n [\n (\n """\\ndate,time,a,b\n2001-01-05, 10:00:00, 0.0, 10.\n2001-01-05, 00:00:00, 1., 11.\n""",\n {"header": 0, "parse_dates": {"date_time": [0, 1]}},\n DataFrame(\n [\n [datetime(2001, 1, 5, 10, 0, 0), 0.0, 10],\n [datetime(2001, 1, 5, 0, 0, 0), 1.0, 11.0],\n ],\n columns=["date_time", "a", "b"],\n ),\n ),\n (\n (\n "KORD,19990127, 19:00:00, 18:56:00, 0.8100\n"\n "KORD,19990127, 20:00:00, 19:56:00, 0.0100\n"\n "KORD,19990127, 21:00:00, 20:56:00, -0.5900\n"\n "KORD,19990127, 21:00:00, 21:18:00, -0.9900\n"\n "KORD,19990127, 22:00:00, 21:56:00, -0.5900\n"\n "KORD,19990127, 23:00:00, 22:56:00, -0.5900"\n ),\n {"header": None, "parse_dates": {"actual": [1, 2], "nominal": [1, 3]}},\n DataFrame(\n [\n [\n datetime(1999, 1, 27, 19, 0),\n datetime(1999, 1, 27, 18, 56),\n "KORD",\n 0.81,\n ],\n [\n datetime(1999, 1, 27, 20, 0),\n datetime(1999, 1, 27, 19, 56),\n "KORD",\n 0.01,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 20, 56),\n "KORD",\n -0.59,\n ],\n [\n datetime(1999, 1, 27, 21, 0),\n datetime(1999, 1, 27, 21, 18),\n "KORD",\n -0.99,\n ],\n [\n datetime(1999, 1, 27, 22, 0),\n datetime(1999, 1, 27, 21, 56),\n "KORD",\n -0.59,\n ],\n [\n datetime(1999, 1, 27, 23, 0),\n datetime(1999, 1, 27, 22, 56),\n "KORD",\n -0.59,\n ],\n ],\n columns=["actual", "nominal", 0, 4],\n ),\n ),\n ],\n)\ndef test_parse_date_time(all_parsers, data, kwargs, expected):\n parser = all_parsers\n result = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n date_parser=pd.to_datetime,\n **kwargs,\n raise_on_extra_warnings=False,\n )\n\n # Python can sometimes be flaky about how\n # the aggregated columns are entered, so\n # this standardizes the order.\n result = result[expected.columns]\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_fields(all_parsers):\n parser = all_parsers\n data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11."\n result = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n header=0,\n parse_dates={"ymd": [0, 1, 2]},\n date_parser=lambda x: x,\n raise_on_extra_warnings=False,\n )\n\n expected = DataFrame(\n [[datetime(2001, 1, 10), 10.0], [datetime(2001, 2, 1), 11.0]],\n columns=["ymd", "a"],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n ("key", "value", "warn"),\n [\n (\n "date_parser",\n lambda x: pd.to_datetime(x, format="%Y %m %d %H %M %S"),\n FutureWarning,\n ),\n ("date_format", "%Y %m %d %H %M %S", None),\n ],\n)\ndef test_parse_date_all_fields(all_parsers, key, value, warn):\n parser = all_parsers\n data = """\\nyear,month,day,hour,minute,second,a,b\n2001,01,05,10,00,0,0.0,10.\n2001,01,5,10,0,00,1.,11.\n"""\n result = parser.read_csv_check_warnings(\n warn,\n "use 'date_format' instead",\n StringIO(data),\n header=0,\n parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]},\n **{key: value},\n raise_on_extra_warnings=False,\n )\n expected = DataFrame(\n [\n [datetime(2001, 1, 5, 10, 0, 0), 0.0, 10.0],\n [datetime(2001, 1, 5, 10, 0, 0), 1.0, 11.0],\n ],\n columns=["ymdHMS", "a", "b"],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n ("key", "value", "warn"),\n [\n (\n "date_parser",\n lambda x: pd.to_datetime(x, format="%Y %m %d %H %M %S.%f"),\n FutureWarning,\n ),\n ("date_format", "%Y %m %d %H %M %S.%f", None),\n ],\n)\ndef test_datetime_fractional_seconds(all_parsers, key, value, warn):\n parser = all_parsers\n data = """\\nyear,month,day,hour,minute,second,a,b\n2001,01,05,10,00,0.123456,0.0,10.\n2001,01,5,10,0,0.500000,1.,11.\n"""\n result = parser.read_csv_check_warnings(\n warn,\n "use 'date_format' instead",\n StringIO(data),\n header=0,\n parse_dates={"ymdHMS": [0, 1, 2, 3, 4, 5]},\n **{key: value},\n raise_on_extra_warnings=False,\n )\n expected = DataFrame(\n [\n [datetime(2001, 1, 5, 10, 0, 0, microsecond=123456), 0.0, 10.0],\n [datetime(2001, 1, 5, 10, 0, 0, microsecond=500000), 1.0, 11.0],\n ],\n columns=["ymdHMS", "a", "b"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_generic(all_parsers):\n parser = all_parsers\n data = "year,month,day,a\n2001,01,10,10.\n2001,02,1,11."\n\n def parse_function(yy, mm):\n return [date(year=int(y), month=int(m), day=1) for y, m in zip(yy, mm)]\n\n result = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n header=0,\n parse_dates={"ym": [0, 1]},\n date_parser=parse_function,\n raise_on_extra_warnings=False,\n )\n expected = DataFrame(\n [[date(2001, 1, 1), 10, 10.0], [date(2001, 2, 1), 1, 11.0]],\n columns=["ym", "day", "a"],\n )\n expected["ym"] = expected["ym"].astype("datetime64[ns]")\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\ndef test_date_parser_resolution_if_not_ns(all_parsers):\n # see gh-10245\n parser = all_parsers\n data = """\\ndate,time,prn,rxstatus\n2013-11-03,19:00:00,126,00E80000\n2013-11-03,19:00:00,23,00E80000\n2013-11-03,19:00:00,13,00E80000\n"""\n\n def date_parser(dt, time):\n try:\n arr = dt + "T" + time\n except TypeError:\n # dt & time are date/time objects\n arr = [datetime.combine(d, t) for d, t in zip(dt, time)]\n return np.array(arr, dtype="datetime64[s]")\n\n result = parser.read_csv_check_warnings(\n FutureWarning,\n "use 'date_format' instead",\n StringIO(data),\n date_parser=date_parser,\n parse_dates={"datetime": ["date", "time"]},\n index_col=["datetime", "prn"],\n )\n\n datetimes = np.array(["2013-11-03T19:00:00"] * 3, dtype="datetime64[s]")\n expected = DataFrame(\n data={"rxstatus": ["00E80000"] * 3},\n index=MultiIndex.from_arrays(\n [datetimes, [126, 23, 13]],\n names=["datetime", "prn"],\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_date_column_with_empty_string(all_parsers):\n # see gh-6428\n parser = all_parsers\n data = "case,opdate\n7,10/18/2006\n7,10/18/2008\n621, "\n result = parser.read_csv(StringIO(data), parse_dates=["opdate"])\n\n expected_data = [[7, "10/18/2006"], [7, "10/18/2008"], [621, " "]]\n expected = DataFrame(expected_data, columns=["case", "opdate"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data,expected",\n [\n (\n "a\n135217135789158401\n1352171357E+5",\n DataFrame({"a": [135217135789158401, 135217135700000]}, dtype="float64"),\n ),\n (\n "a\n99999999999\n123456789012345\n1234E+0",\n DataFrame({"a": [99999999999, 123456789012345, 1234]}, dtype="float64"),\n ),\n ],\n)\n@pytest.mark.parametrize("parse_dates", [True, False])\ndef test_parse_date_float(all_parsers, data, expected, parse_dates):\n # see gh-2697\n #\n # Date parsing should fail, so we leave the data untouched\n # (i.e. float precision should remain unchanged).\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), parse_dates=parse_dates)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_timezone(all_parsers):\n # see gh-22256\n parser = all_parsers\n data = """dt,val\n 2018-01-04 09:01:00+09:00,23350\n 2018-01-04 09:02:00+09:00,23400\n 2018-01-04 09:03:00+09:00,23400\n 2018-01-04 09:04:00+09:00,23400\n 2018-01-04 09:05:00+09:00,23400"""\n result = parser.read_csv(StringIO(data), parse_dates=["dt"])\n\n dti = date_range(\n start="2018-01-04 09:01:00",\n end="2018-01-04 09:05:00",\n freq="1min",\n tz=timezone(timedelta(minutes=540)),\n )._with_freq(None)\n expected_data = {"dt": dti, "val": [23350, 23400, 23400, 23400, 23400]}\n\n expected = DataFrame(expected_data)\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # pandas.errors.ParserError: CSV parse error\n@pytest.mark.parametrize(\n "date_string",\n ["32/32/2019", "02/30/2019", "13/13/2019", "13/2019", "a3/11/2018", "10/11/2o17"],\n)\ndef test_invalid_parse_delimited_date(all_parsers, date_string):\n parser = all_parsers\n expected = DataFrame({0: [date_string]}, dtype="str")\n result = parser.read_csv(\n StringIO(date_string),\n header=None,\n parse_dates=[0],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "date_string,dayfirst,expected",\n [\n # %d/%m/%Y; month > 12 thus replacement\n ("13/02/2019", True, datetime(2019, 2, 13)),\n # %m/%d/%Y; day > 12 thus there will be no replacement\n ("02/13/2019", False, datetime(2019, 2, 13)),\n # %d/%m/%Y; dayfirst==True thus replacement\n ("04/02/2019", True, datetime(2019, 2, 4)),\n ],\n)\ndef test_parse_delimited_date_swap_no_warning(\n all_parsers, date_string, dayfirst, expected, request\n):\n parser = all_parsers\n expected = DataFrame({0: [expected]}, dtype="datetime64[ns]")\n if parser.engine == "pyarrow":\n if not dayfirst:\n # "CSV parse error: Empty CSV file or block"\n pytest.skip(reason="https://github.com/apache/arrow/issues/38676")\n msg = "The 'dayfirst' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(date_string), header=None, dayfirst=dayfirst, parse_dates=[0]\n )\n return\n\n result = parser.read_csv(\n StringIO(date_string), header=None, dayfirst=dayfirst, parse_dates=[0]\n )\n tm.assert_frame_equal(result, expected)\n\n\n# ArrowInvalid: CSV parse error: Empty CSV file or block: cannot infer number of columns\n@skip_pyarrow\n@pytest.mark.parametrize(\n "date_string,dayfirst,expected",\n [\n # %d/%m/%Y; month > 12\n ("13/02/2019", False, datetime(2019, 2, 13)),\n # %m/%d/%Y; day > 12\n ("02/13/2019", True, datetime(2019, 2, 13)),\n ],\n)\ndef test_parse_delimited_date_swap_with_warning(\n all_parsers, date_string, dayfirst, expected\n):\n parser = all_parsers\n expected = DataFrame({0: [expected]}, dtype="datetime64[ns]")\n warning_msg = (\n "Parsing dates in .* format when dayfirst=.* was specified. "\n "Pass `dayfirst=.*` or specify a format to silence this warning."\n )\n result = parser.read_csv_check_warnings(\n UserWarning,\n warning_msg,\n StringIO(date_string),\n header=None,\n dayfirst=dayfirst,\n parse_dates=[0],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_multiple_delimited_dates_with_swap_warnings():\n # GH46210\n with pytest.raises(\n ValueError,\n match=(\n r'^time data "31/05/2000" doesn\'t match format "%m/%d/%Y", '\n r"at position 1. You might want to try:"\n ),\n ):\n pd.to_datetime(["01/01/2000", "31/05/2000", "31/05/2001", "01/02/2000"])\n\n\n# ArrowKeyError: Column 'fdate1' in include_columns does not exist in CSV file\n@skip_pyarrow\n@pytest.mark.parametrize(\n "names, usecols, parse_dates, missing_cols",\n [\n (None, ["val"], ["date", "time"], "date, time"),\n (None, ["val"], [0, "time"], "time"),\n (None, ["val"], [["date", "time"]], "date, time"),\n (None, ["val"], [[0, "time"]], "time"),\n (None, ["val"], {"date": [0, "time"]}, "time"),\n (None, ["val"], {"date": ["date", "time"]}, "date, time"),\n (None, ["val"], [["date", "time"], "date"], "date, time"),\n (["date1", "time1", "temperature"], None, ["date", "time"], "date, time"),\n (\n ["date1", "time1", "temperature"],\n ["date1", "temperature"],\n ["date1", "time"],\n "time",\n ),\n ],\n)\ndef test_missing_parse_dates_column_raises(\n all_parsers, names, usecols, parse_dates, missing_cols\n):\n # gh-31251 column names provided in parse_dates could be missing.\n parser = all_parsers\n content = StringIO("date,time,val\n2020-01-31,04:20:32,32\n")\n msg = f"Missing column provided to 'parse_dates': '{missing_cols}'"\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n warn = FutureWarning\n if isinstance(parse_dates, list) and all(\n isinstance(x, (int, str)) for x in parse_dates\n ):\n warn = None\n\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(warn, match=depr_msg, check_stacklevel=False):\n parser.read_csv(\n content, sep=",", names=names, usecols=usecols, parse_dates=parse_dates\n )\n\n\n@xfail_pyarrow # mismatched shape\ndef test_date_parser_and_names(all_parsers):\n # GH#33699\n parser = all_parsers\n data = StringIO("""x,y\n1,2""")\n warn = UserWarning\n if parser.engine == "pyarrow":\n # DeprecationWarning for passing a Manager object\n warn = (UserWarning, DeprecationWarning)\n result = parser.read_csv_check_warnings(\n warn,\n "Could not infer format",\n data,\n parse_dates=["B"],\n names=["B"],\n )\n expected = DataFrame({"B": ["y", "2"]}, index=["x", "1"])\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\ndef test_date_parser_multiindex_columns(all_parsers):\n parser = all_parsers\n data = """a,b\n1,2\n2019-12-31,6"""\n result = parser.read_csv(StringIO(data), parse_dates=[("a", "1")], header=[0, 1])\n expected = DataFrame(\n {("a", "1"): Timestamp("2019-12-31").as_unit("ns"), ("b", "2"): [6]}\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\n@pytest.mark.parametrize(\n "parse_spec, col_name",\n [\n ([[("a", "1"), ("b", "2")]], ("a_b", "1_2")),\n ({("foo", "1"): [("a", "1"), ("b", "2")]}, ("foo", "1")),\n ],\n)\ndef test_date_parser_multiindex_columns_combine_cols(all_parsers, parse_spec, col_name):\n parser = all_parsers\n data = """a,b,c\n1,2,3\n2019-12,-31,6"""\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(data),\n parse_dates=parse_spec,\n header=[0, 1],\n )\n expected = DataFrame(\n {col_name: Timestamp("2019-12-31").as_unit("ns"), ("c", "3"): [6]}\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_date_parser_usecols_thousands(all_parsers):\n # GH#39365\n data = """A,B,C\n 1,3,20-09-01-01\n 2,4,20-09-01-01\n """\n\n parser = all_parsers\n\n if parser.engine == "pyarrow":\n # DeprecationWarning for passing a Manager object\n msg = "The 'thousands' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data),\n parse_dates=[1],\n usecols=[1, 2],\n thousands="-",\n )\n return\n\n result = parser.read_csv_check_warnings(\n UserWarning,\n "Could not infer format",\n StringIO(data),\n parse_dates=[1],\n usecols=[1, 2],\n thousands="-",\n )\n expected = DataFrame({"B": [3, 4], "C": [Timestamp("20-09-2001 01:00:00")] * 2})\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # mismatched shape\ndef test_parse_dates_and_keep_original_column(all_parsers):\n # GH#13378\n parser = all_parsers\n data = """A\n20150908\n20150909\n"""\n depr_msg = "The 'keep_date_col' keyword in pd.read_csv is deprecated"\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(data), parse_dates={"date": ["A"]}, keep_date_col=True\n )\n expected_data = [Timestamp("2015-09-08"), Timestamp("2015-09-09")]\n expected = DataFrame({"date": expected_data, "A": expected_data})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dayfirst_warnings():\n # GH 12585\n\n # CASE 1: valid input\n input = "date\n31/12/2014\n10/03/2011"\n expected = DatetimeIndex(\n ["2014-12-31", "2011-03-10"], dtype="datetime64[ns]", freq=None, name="date"\n )\n warning_msg = (\n "Parsing dates in .* format when dayfirst=.* was specified. "\n "Pass `dayfirst=.*` or specify a format to silence this warning."\n )\n\n # A. dayfirst arg correct, no warning\n res1 = read_csv(\n StringIO(input), parse_dates=["date"], dayfirst=True, index_col="date"\n ).index\n tm.assert_index_equal(expected, res1)\n\n # B. dayfirst arg incorrect, warning\n with tm.assert_produces_warning(UserWarning, match=warning_msg):\n res2 = read_csv(\n StringIO(input), parse_dates=["date"], dayfirst=False, index_col="date"\n ).index\n tm.assert_index_equal(expected, res2)\n\n # CASE 2: invalid input\n # cannot consistently process with single format\n # return to user unaltered\n\n # first in DD/MM/YYYY, second in MM/DD/YYYY\n input = "date\n31/12/2014\n03/30/2011"\n expected = Index(["31/12/2014", "03/30/2011"], dtype="str", name="date")\n\n # A. use dayfirst=True\n res5 = read_csv(\n StringIO(input), parse_dates=["date"], dayfirst=True, index_col="date"\n ).index\n tm.assert_index_equal(expected, res5)\n\n # B. use dayfirst=False\n with tm.assert_produces_warning(UserWarning, match=warning_msg):\n res6 = read_csv(\n StringIO(input), parse_dates=["date"], dayfirst=False, index_col="date"\n ).index\n tm.assert_index_equal(expected, res6)\n\n\n@pytest.mark.parametrize(\n "date_string, dayfirst",\n [\n pytest.param(\n "31/1/2014",\n False,\n id="second date is single-digit",\n ),\n pytest.param(\n "1/31/2014",\n True,\n id="first date is single-digit",\n ),\n ],\n)\ndef test_dayfirst_warnings_no_leading_zero(date_string, dayfirst):\n # GH47880\n initial_value = f"date\n{date_string}"\n expected = DatetimeIndex(\n ["2014-01-31"], dtype="datetime64[ns]", freq=None, name="date"\n )\n warning_msg = (\n "Parsing dates in .* format when dayfirst=.* was specified. "\n "Pass `dayfirst=.*` or specify a format to silence this warning."\n )\n with tm.assert_produces_warning(UserWarning, match=warning_msg):\n res = read_csv(\n StringIO(initial_value),\n parse_dates=["date"],\n index_col="date",\n dayfirst=dayfirst,\n ).index\n tm.assert_index_equal(expected, res)\n\n\n@skip_pyarrow # CSV parse error: Expected 3 columns, got 4\ndef test_infer_first_column_as_index(all_parsers):\n # GH#11019\n parser = all_parsers\n data = "a,b,c\n1970-01-01,2,3,4"\n result = parser.read_csv(\n StringIO(data),\n parse_dates=["a"],\n )\n expected = DataFrame({"a": "2", "b": 3, "c": 4}, index=["1970-01-01"])\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # pyarrow engine doesn't support passing a dict for na_values\n@pytest.mark.parametrize(\n ("key", "value", "warn"),\n [\n ("date_parser", lambda x: pd.to_datetime(x, format="%Y-%m-%d"), FutureWarning),\n ("date_format", "%Y-%m-%d", None),\n ],\n)\ndef test_replace_nans_before_parsing_dates(all_parsers, key, value, warn):\n # GH#26203\n parser = all_parsers\n data = """Test\n2012-10-01\n0\n2015-05-15\n#\n2017-09-09\n"""\n result = parser.read_csv_check_warnings(\n warn,\n "use 'date_format' instead",\n StringIO(data),\n na_values={"Test": ["#", "0"]},\n parse_dates=["Test"],\n **{key: value},\n )\n expected = DataFrame(\n {\n "Test": [\n Timestamp("2012-10-01"),\n pd.NaT,\n Timestamp("2015-05-15"),\n pd.NaT,\n Timestamp("2017-09-09"),\n ]\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # string[python] instead of dt64[ns]\ndef test_parse_dates_and_string_dtype(all_parsers):\n # GH#34066\n parser = all_parsers\n data = """a,b\n1,2019-12-31\n"""\n result = parser.read_csv(StringIO(data), dtype="string", parse_dates=["b"])\n expected = DataFrame({"a": ["1"], "b": [Timestamp("2019-12-31")]})\n expected["a"] = expected["a"].astype("string")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_dot_separated_dates(all_parsers):\n # https://github.com/pandas-dev/pandas/issues/2586\n parser = all_parsers\n data = """a,b\n27.03.2003 14:55:00.000,1\n03.08.2003 15:20:00.000,2"""\n if parser.engine == "pyarrow":\n expected_index = Index(\n ["27.03.2003 14:55:00.000", "03.08.2003 15:20:00.000"],\n dtype="str",\n name="a",\n )\n warn = None\n else:\n expected_index = DatetimeIndex(\n ["2003-03-27 14:55:00", "2003-08-03 15:20:00"],\n dtype="datetime64[ns]",\n name="a",\n )\n warn = UserWarning\n msg = r"when dayfirst=False \(the default\) was specified"\n result = parser.read_csv_check_warnings(\n warn,\n msg,\n StringIO(data),\n parse_dates=True,\n index_col=0,\n raise_on_extra_warnings=False,\n )\n expected = DataFrame({"b": [1, 2]}, index=expected_index)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_dates_dict_format(all_parsers):\n # GH#51240\n parser = all_parsers\n data = """a,b\n2019-12-31,31-12-2019\n2020-12-31,31-12-2020"""\n\n result = parser.read_csv(\n StringIO(data),\n date_format={"a": "%Y-%m-%d", "b": "%d-%m-%Y"},\n parse_dates=["a", "b"],\n )\n expected = DataFrame(\n {\n "a": [Timestamp("2019-12-31"), Timestamp("2020-12-31")],\n "b": [Timestamp("2019-12-31"), Timestamp("2020-12-31")],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "key, parse_dates", [("a_b", [[0, 1]]), ("foo", {"foo": [0, 1]})]\n)\ndef test_parse_dates_dict_format_two_columns(all_parsers, key, parse_dates):\n # GH#51240\n parser = all_parsers\n data = """a,b\n31-,12-2019\n31-,12-2020"""\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with tm.assert_produces_warning(\n (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(data), date_format={key: "%d- %m-%Y"}, parse_dates=parse_dates\n )\n expected = DataFrame(\n {\n key: [Timestamp("2019-12-31"), Timestamp("2020-12-31")],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # object dtype index\ndef test_parse_dates_dict_format_index(all_parsers):\n # GH#51240\n parser = all_parsers\n data = """a,b\n2019-12-31,31-12-2019\n2020-12-31,31-12-2020"""\n\n result = parser.read_csv(\n StringIO(data), date_format={"a": "%Y-%m-%d"}, parse_dates=True, index_col=0\n )\n expected = DataFrame(\n {\n "b": ["31-12-2019", "31-12-2020"],\n },\n index=Index([Timestamp("2019-12-31"), Timestamp("2020-12-31")], name="a"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_dates_arrow_engine(all_parsers):\n # GH#53295\n parser = all_parsers\n data = """a,b\n2000-01-01 00:00:00,1\n2000-01-01 00:00:01,1"""\n\n result = parser.read_csv(StringIO(data), parse_dates=["a"])\n # TODO: make unit check more specific\n if parser.engine == "pyarrow":\n result["a"] = result["a"].dt.as_unit("ns")\n expected = DataFrame(\n {\n "a": [\n Timestamp("2000-01-01 00:00:00"),\n Timestamp("2000-01-01 00:00:01"),\n ],\n "b": 1,\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # object dtype index\ndef test_from_csv_with_mixed_offsets(all_parsers):\n parser = all_parsers\n data = "a\n2020-01-01T00:00:00+01:00\n2020-01-01T00:00:00+00:00"\n result = parser.read_csv(StringIO(data), parse_dates=["a"])["a"]\n expected = Series(\n [\n Timestamp("2020-01-01 00:00:00+01:00"),\n Timestamp("2020-01-01 00:00:00+00:00"),\n ],\n name="a",\n index=[0, 1],\n )\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_parse_dates.py | test_parse_dates.py | Python | 69,728 | 0.75 | 0.053846 | 0.045262 | python-kit | 425 | 2024-01-01T09:32:38.493769 | GPL-3.0 | true | 10d075b019ad3bc393a6e4a000ec329b |
"""\nTests that apply specifically to the Python parser. Unless specifically\nstated as a Python-specific issue, the goal is to eventually move as many of\nthese tests out of this module as soon as the C parser can accept further\narguments when parsing.\n"""\nfrom __future__ import annotations\n\nimport csv\nfrom io import (\n BytesIO,\n StringIO,\n TextIOWrapper,\n)\nfrom typing import TYPE_CHECKING\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import (\n ParserError,\n ParserWarning,\n)\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n)\nimport pandas._testing as tm\n\nif TYPE_CHECKING:\n from collections.abc import Iterator\n\n\ndef test_default_separator(python_parser_only):\n # see gh-17333\n #\n # csv.Sniffer in Python treats "o" as separator.\n data = "aob\n1o2\n3o4"\n parser = python_parser_only\n expected = DataFrame({"a": [1, 3], "b": [2, 4]})\n\n result = parser.read_csv(StringIO(data), sep=None)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("skipfooter", ["foo", 1.5, True])\ndef test_invalid_skipfooter_non_int(python_parser_only, skipfooter):\n # see gh-15925 (comment)\n data = "a\n1\n2"\n parser = python_parser_only\n msg = "skipfooter must be an integer"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=skipfooter)\n\n\ndef test_invalid_skipfooter_negative(python_parser_only):\n # see gh-15925 (comment)\n data = "a\n1\n2"\n parser = python_parser_only\n msg = "skipfooter cannot be negative"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=-1)\n\n\n@pytest.mark.parametrize("kwargs", [{"sep": None}, {"delimiter": "|"}])\ndef test_sniff_delimiter(python_parser_only, kwargs):\n data = """index|A|B|C\nfoo|1|2|3\nbar|4|5|6\nbaz|7|8|9\n"""\n parser = python_parser_only\n result = parser.read_csv(StringIO(data), index_col=0, **kwargs)\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=["A", "B", "C"],\n index=Index(["foo", "bar", "baz"], name="index"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_sniff_delimiter_comment(python_parser_only):\n data = """# comment line\nindex|A|B|C\n# comment line\nfoo|1|2|3 # ignore | this\nbar|4|5|6\nbaz|7|8|9\n"""\n parser = python_parser_only\n result = parser.read_csv(StringIO(data), index_col=0, sep=None, comment="#")\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=["A", "B", "C"],\n index=Index(["foo", "bar", "baz"], name="index"),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("encoding", [None, "utf-8"])\ndef test_sniff_delimiter_encoding(python_parser_only, encoding):\n parser = python_parser_only\n data = """ignore this\nignore this too\nindex|A|B|C\nfoo|1|2|3\nbar|4|5|6\nbaz|7|8|9\n"""\n\n if encoding is not None:\n data = data.encode(encoding)\n data = BytesIO(data)\n data = TextIOWrapper(data, encoding=encoding)\n else:\n data = StringIO(data)\n\n result = parser.read_csv(data, index_col=0, sep=None, skiprows=2, encoding=encoding)\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n columns=["A", "B", "C"],\n index=Index(["foo", "bar", "baz"], name="index"),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_single_line(python_parser_only):\n # see gh-6607: sniff separator\n parser = python_parser_only\n result = parser.read_csv(StringIO("1,2"), names=["a", "b"], header=None, sep=None)\n\n expected = DataFrame({"a": [1], "b": [2]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("kwargs", [{"skipfooter": 2}, {"nrows": 3}])\ndef test_skipfooter(python_parser_only, kwargs):\n # see gh-6607\n data = """A,B,C\n1,2,3\n4,5,6\n7,8,9\nwant to skip this\nalso also skip this\n"""\n parser = python_parser_only\n result = parser.read_csv(StringIO(data), **kwargs)\n\n expected = DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["A", "B", "C"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "compression,klass", [("gzip", "GzipFile"), ("bz2", "BZ2File")]\n)\ndef test_decompression_regex_sep(python_parser_only, csv1, compression, klass):\n # see gh-6607\n parser = python_parser_only\n\n with open(csv1, "rb") as f:\n data = f.read()\n\n data = data.replace(b",", b"::")\n expected = parser.read_csv(csv1)\n\n module = pytest.importorskip(compression)\n klass = getattr(module, klass)\n\n with tm.ensure_clean() as path:\n with klass(path, mode="wb") as tmp:\n tmp.write(data)\n\n result = parser.read_csv(path, sep="::", compression=compression)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_buglet_4x_multi_index(python_parser_only):\n # see gh-6607\n data = """ A B C D E\none two three four\na b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640\na q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744\nx q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""\n parser = python_parser_only\n\n expected = DataFrame(\n [\n [-0.5109, -2.3358, -0.4645, 0.05076, 0.3640],\n [0.4473, 1.4152, 0.2834, 1.00661, 0.1744],\n [-0.6662, -0.5243, -0.3580, 0.89145, 2.5838],\n ],\n columns=["A", "B", "C", "D", "E"],\n index=MultiIndex.from_tuples(\n [("a", "b", 10.0032, 5), ("a", "q", 20, 4), ("x", "q", 30, 3)],\n names=["one", "two", "three", "four"],\n ),\n )\n result = parser.read_csv(StringIO(data), sep=r"\s+")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_buglet_4x_multi_index2(python_parser_only):\n # see gh-6893\n data = " A B C\na b c\n1 3 7 0 3 6\n3 1 4 1 5 9"\n parser = python_parser_only\n\n expected = DataFrame.from_records(\n [(1, 3, 7, 0, 3, 6), (3, 1, 4, 1, 5, 9)],\n columns=list("abcABC"),\n index=list("abc"),\n )\n result = parser.read_csv(StringIO(data), sep=r"\s+")\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("add_footer", [True, False])\ndef test_skipfooter_with_decimal(python_parser_only, add_footer):\n # see gh-6971\n data = "1#2\n3#4"\n parser = python_parser_only\n expected = DataFrame({"a": [1.2, 3.4]})\n\n if add_footer:\n # The stray footer line should not mess with the\n # casting of the first two lines if we skip it.\n kwargs = {"skipfooter": 1}\n data += "\nFooter"\n else:\n kwargs = {}\n\n result = parser.read_csv(StringIO(data), names=["a"], decimal="#", **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "sep", ["::", "#####", "!!!", "123", "#1!c5", "%!c!d", "@@#4:2", "_!pd#_"]\n)\n@pytest.mark.parametrize(\n "encoding", ["utf-16", "utf-16-be", "utf-16-le", "utf-32", "cp037"]\n)\ndef test_encoding_non_utf8_multichar_sep(python_parser_only, sep, encoding):\n # see gh-3404\n expected = DataFrame({"a": [1], "b": [2]})\n parser = python_parser_only\n\n data = "1" + sep + "2"\n encoded_data = data.encode(encoding)\n\n result = parser.read_csv(\n BytesIO(encoded_data), sep=sep, names=["a", "b"], encoding=encoding\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE])\ndef test_multi_char_sep_quotes(python_parser_only, quoting):\n # see gh-13374\n kwargs = {"sep": ",,"}\n parser = python_parser_only\n\n data = 'a,,b\n1,,a\n2,,"2,,b"'\n\n if quoting == csv.QUOTE_NONE:\n msg = "Expected 2 fields in line 3, saw 3"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), quoting=quoting, **kwargs)\n else:\n msg = "ignored when a multi-char delimiter is used"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), quoting=quoting, **kwargs)\n\n\ndef test_none_delimiter(python_parser_only):\n # see gh-13374 and gh-17465\n parser = python_parser_only\n data = "a,b,c\n0,1,2\n3,4,5,6\n7,8,9"\n expected = DataFrame({"a": [0, 7], "b": [1, 8], "c": [2, 9]})\n\n # We expect the third line in the data to be\n # skipped because it is malformed, but we do\n # not expect any errors to occur.\n with tm.assert_produces_warning(\n ParserWarning, match="Skipping line 3", check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(data), header=0, sep=None, on_bad_lines="warn"\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("data", ['a\n1\n"b"a', 'a,b,c\ncat,foo,bar\ndog,foo,"baz'])\n@pytest.mark.parametrize("skipfooter", [0, 1])\ndef test_skipfooter_bad_row(python_parser_only, data, skipfooter):\n # see gh-13879 and gh-15910\n parser = python_parser_only\n if skipfooter:\n msg = "parsing errors in the skipped footer rows"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=skipfooter)\n else:\n msg = "unexpected end of data|expected after"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=skipfooter)\n\n\ndef test_malformed_skipfooter(python_parser_only):\n parser = python_parser_only\n data = """ignore\nA,B,C\n1,2,3 # comment\n1,2,3,4,5\n2,3,4\nfooter\n"""\n msg = "Expected 3 fields in line 4, saw 5"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), header=1, comment="#", skipfooter=1)\n\n\ndef test_python_engine_file_no_next(python_parser_only):\n parser = python_parser_only\n\n class NoNextBuffer:\n def __init__(self, csv_data) -> None:\n self.data = csv_data\n\n def __iter__(self) -> Iterator:\n return self.data.__iter__()\n\n def read(self):\n return self.data\n\n def readline(self):\n return self.data\n\n parser.read_csv(NoNextBuffer("a\n1"))\n\n\n@pytest.mark.parametrize("bad_line_func", [lambda x: ["2", "3"], lambda x: x[:2]])\ndef test_on_bad_lines_callable(python_parser_only, bad_line_func):\n # GH 5686\n parser = python_parser_only\n data = """a,b\n1,2\n2,3,4,5,6\n3,4\n"""\n bad_sio = StringIO(data)\n result = parser.read_csv(bad_sio, on_bad_lines=bad_line_func)\n expected = DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_callable_write_to_external_list(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = """a,b\n1,2\n2,3,4,5,6\n3,4\n"""\n bad_sio = StringIO(data)\n lst = []\n\n def bad_line_func(bad_line: list[str]) -> list[str]:\n lst.append(bad_line)\n return ["2", "3"]\n\n result = parser.read_csv(bad_sio, on_bad_lines=bad_line_func)\n expected = DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]})\n tm.assert_frame_equal(result, expected)\n assert lst == [["2", "3", "4", "5", "6"]]\n\n\n@pytest.mark.parametrize("bad_line_func", [lambda x: ["foo", "bar"], lambda x: x[:2]])\n@pytest.mark.parametrize("sep", [",", "111"])\ndef test_on_bad_lines_callable_iterator_true(python_parser_only, bad_line_func, sep):\n # GH 5686\n # iterator=True has a separate code path than iterator=False\n parser = python_parser_only\n data = f"""\n0{sep}1\nhi{sep}there\nfoo{sep}bar{sep}baz\ngood{sep}bye\n"""\n bad_sio = StringIO(data)\n result_iter = parser.read_csv(\n bad_sio, on_bad_lines=bad_line_func, chunksize=1, iterator=True, sep=sep\n )\n expecteds = [\n {"0": "hi", "1": "there"},\n {"0": "foo", "1": "bar"},\n {"0": "good", "1": "bye"},\n ]\n for i, (result, expected) in enumerate(zip(result_iter, expecteds)):\n expected = DataFrame(expected, index=range(i, i + 1))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_callable_dont_swallow_errors(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = """a,b\n1,2\n2,3,4,5,6\n3,4\n"""\n bad_sio = StringIO(data)\n msg = "This function is buggy."\n\n def bad_line_func(bad_line):\n raise ValueError(msg)\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(bad_sio, on_bad_lines=bad_line_func)\n\n\ndef test_on_bad_lines_callable_not_expected_length(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = """a,b\n1,2\n2,3,4,5,6\n3,4\n"""\n bad_sio = StringIO(data)\n\n result = parser.read_csv_check_warnings(\n ParserWarning, "Length of header or names", bad_sio, on_bad_lines=lambda x: x\n )\n expected = DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_callable_returns_none(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = """a,b\n1,2\n2,3,4,5,6\n3,4\n"""\n bad_sio = StringIO(data)\n\n result = parser.read_csv(bad_sio, on_bad_lines=lambda x: None)\n expected = DataFrame({"a": [1, 3], "b": [2, 4]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_on_bad_lines_index_col_inferred(python_parser_only):\n # GH 5686\n parser = python_parser_only\n data = """a,b\n1,2,3\n4,5,6\n"""\n bad_sio = StringIO(data)\n\n result = parser.read_csv(bad_sio, on_bad_lines=lambda x: ["99", "99"])\n expected = DataFrame({"a": [2, 5], "b": [3, 6]}, index=[1, 4])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_index_col_false_and_header_none(python_parser_only):\n # GH#46955\n parser = python_parser_only\n data = """\n0.5,0.03\n0.1,0.2,0.3,2\n"""\n result = parser.read_csv_check_warnings(\n ParserWarning,\n "Length of header",\n StringIO(data),\n sep=",",\n header=None,\n index_col=False,\n )\n expected = DataFrame({0: [0.5, 0.1], 1: [0.03, 0.2]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_header_int_do_not_infer_multiindex_names_on_different_line(python_parser_only):\n # GH#46569\n parser = python_parser_only\n data = StringIO("a\na,b\nc,d,e\nf,g,h")\n result = parser.read_csv_check_warnings(\n ParserWarning, "Length of header", data, engine="python", index_col=False\n )\n expected = DataFrame({"a": ["a", "c", "f"]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dtype", [{"a": object}, {"a": str, "b": np.int64, "c": np.int64}]\n)\ndef test_no_thousand_convert_with_dot_for_non_numeric_cols(python_parser_only, dtype):\n # GH#50270\n parser = python_parser_only\n data = """\\na;b;c\n0000.7995;16.000;0\n3.03.001.00514;0;4.000\n4923.600.041;23.000;131"""\n result = parser.read_csv(\n StringIO(data),\n sep=";",\n dtype=dtype,\n thousands=".",\n )\n expected = DataFrame(\n {\n "a": ["0000.7995", "3.03.001.00514", "4923.600.041"],\n "b": [16000, 0, 23000],\n "c": [0, 4000, 131],\n }\n )\n if dtype["a"] == object:\n expected["a"] = expected["a"].astype(object)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dtype,expected",\n [\n (\n {"a": str, "b": np.float64, "c": np.int64},\n DataFrame(\n {\n "b": [16000.1, 0, 23000],\n "c": [0, 4001, 131],\n }\n ),\n ),\n (\n str,\n DataFrame(\n {\n "b": ["16,000.1", "0", "23,000"],\n "c": ["0", "4,001", "131"],\n }\n ),\n ),\n ],\n)\ndef test_no_thousand_convert_for_non_numeric_cols(python_parser_only, dtype, expected):\n # GH#50270\n parser = python_parser_only\n data = """a;b;c\n0000,7995;16,000.1;0\n3,03,001,00514;0;4,001\n4923,600,041;23,000;131\n"""\n result = parser.read_csv(\n StringIO(data),\n sep=";",\n dtype=dtype,\n thousands=",",\n )\n expected.insert(0, "a", ["0000,7995", "3,03,001,00514", "4923,600,041"])\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_python_parser_only.py | test_python_parser_only.py | Python | 15,979 | 0.95 | 0.079505 | 0.070213 | python-kit | 265 | 2025-07-01T00:51:48.296270 | Apache-2.0 | true | 31cfe90a6f52f32c523dcea0a774ac22 |
"""\nTests that quoting specifications are properly handled\nduring parsing for all of the parsers defined in parsers.py\n"""\n\nimport csv\nfrom io import StringIO\n\nimport pytest\n\nfrom pandas.compat import PY311\nfrom pandas.errors import ParserError\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\n@pytest.mark.parametrize(\n "kwargs,msg",\n [\n ({"quotechar": "foo"}, '"quotechar" must be a(n)? 1-character string'),\n (\n {"quotechar": None, "quoting": csv.QUOTE_MINIMAL},\n "quotechar must be set if quoting enabled",\n ),\n ({"quotechar": 2}, '"quotechar" must be string( or None)?, not int'),\n ],\n)\n@skip_pyarrow # ParserError: CSV parse error: Empty CSV file or block\ndef test_bad_quote_char(all_parsers, kwargs, msg):\n data = "1,2,3"\n parser = all_parsers\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n\n\n@pytest.mark.parametrize(\n "quoting,msg",\n [\n ("foo", '"quoting" must be an integer|Argument'),\n (10, 'bad "quoting" value'), # quoting must be in the range [0, 3]\n ],\n)\n@xfail_pyarrow # ValueError: The 'quoting' option is not supported\ndef test_bad_quoting(all_parsers, quoting, msg):\n data = "1,2,3"\n parser = all_parsers\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), quoting=quoting)\n\n\ndef test_quote_char_basic(all_parsers):\n parser = all_parsers\n data = 'a,b,c\n1,2,"cat"'\n expected = DataFrame([[1, 2, "cat"]], columns=["a", "b", "c"])\n\n result = parser.read_csv(StringIO(data), quotechar='"')\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("quote_char", ["~", "*", "%", "$", "@", "P"])\ndef test_quote_char_various(all_parsers, quote_char):\n parser = all_parsers\n expected = DataFrame([[1, 2, "cat"]], columns=["a", "b", "c"])\n\n data = 'a,b,c\n1,2,"cat"'\n new_data = data.replace('"', quote_char)\n\n result = parser.read_csv(StringIO(new_data), quotechar=quote_char)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: The 'quoting' option is not supported\n@pytest.mark.parametrize("quoting", [csv.QUOTE_MINIMAL, csv.QUOTE_NONE])\n@pytest.mark.parametrize("quote_char", ["", None])\ndef test_null_quote_char(all_parsers, quoting, quote_char):\n kwargs = {"quotechar": quote_char, "quoting": quoting}\n data = "a,b,c\n1,2,3"\n parser = all_parsers\n\n if quoting != csv.QUOTE_NONE:\n # Sanity checking.\n msg = (\n '"quotechar" must be a 1-character string'\n if PY311 and all_parsers.engine == "python" and quote_char == ""\n else "quotechar must be set if quoting enabled"\n )\n\n with pytest.raises(TypeError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n elif not (PY311 and all_parsers.engine == "python"):\n # Python 3.11+ doesn't support null/blank quote chars in their csv parsers\n expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "kwargs,exp_data",\n [\n ({}, [[1, 2, "foo"]]), # Test default.\n # QUOTE_MINIMAL only applies to CSV writing, so no effect on reading.\n ({"quotechar": '"', "quoting": csv.QUOTE_MINIMAL}, [[1, 2, "foo"]]),\n # QUOTE_MINIMAL only applies to CSV writing, so no effect on reading.\n ({"quotechar": '"', "quoting": csv.QUOTE_ALL}, [[1, 2, "foo"]]),\n # QUOTE_NONE tells the reader to do no special handling\n # of quote characters and leave them alone.\n ({"quotechar": '"', "quoting": csv.QUOTE_NONE}, [[1, 2, '"foo"']]),\n # QUOTE_NONNUMERIC tells the reader to cast\n # all non-quoted fields to float\n ({"quotechar": '"', "quoting": csv.QUOTE_NONNUMERIC}, [[1.0, 2.0, "foo"]]),\n ],\n)\n@xfail_pyarrow # ValueError: The 'quoting' option is not supported\ndef test_quoting_various(all_parsers, kwargs, exp_data):\n data = '1,2,"foo"'\n parser = all_parsers\n columns = ["a", "b", "c"]\n\n result = parser.read_csv(StringIO(data), names=columns, **kwargs)\n expected = DataFrame(exp_data, columns=columns)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "doublequote,exp_data", [(True, [[3, '4 " 5']]), (False, [[3, '4 " 5"']])]\n)\ndef test_double_quote(all_parsers, doublequote, exp_data, request):\n parser = all_parsers\n data = 'a,b\n3,"4 "" 5"'\n\n if parser.engine == "pyarrow" and not doublequote:\n mark = pytest.mark.xfail(reason="Mismatched result")\n request.applymarker(mark)\n\n result = parser.read_csv(StringIO(data), quotechar='"', doublequote=doublequote)\n expected = DataFrame(exp_data, columns=["a", "b"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("quotechar", ['"', "\u0001"])\ndef test_quotechar_unicode(all_parsers, quotechar):\n # see gh-14477\n data = "a\n1"\n parser = all_parsers\n expected = DataFrame({"a": [1]})\n\n result = parser.read_csv(StringIO(data), quotechar=quotechar)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("balanced", [True, False])\ndef test_unbalanced_quoting(all_parsers, balanced, request):\n # see gh-22789.\n parser = all_parsers\n data = 'a,b,c\n1,2,"3'\n\n if parser.engine == "pyarrow" and not balanced:\n mark = pytest.mark.xfail(reason="Mismatched result")\n request.applymarker(mark)\n\n if balanced:\n # Re-balance the quoting and read in without errors.\n expected = DataFrame([[1, 2, 3]], columns=["a", "b", "c"])\n result = parser.read_csv(StringIO(data + '"'))\n tm.assert_frame_equal(result, expected)\n else:\n msg = (\n "EOF inside string starting at row 1"\n if parser.engine == "c"\n else "unexpected end of data"\n )\n\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data))\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_quoting.py | test_quoting.py | Python | 6,244 | 0.95 | 0.098361 | 0.075342 | node-utils | 422 | 2025-04-12T22:34:16.487390 | BSD-3-Clause | true | cf5c98bcaa2e67e0e46f532bf5c32de2 |
"""\nTests the 'read_fwf' function in parsers.py. This\ntest suite is independent of the others because the\nengine is set to 'python-fwf' internally.\n"""\n\nfrom datetime import datetime\nfrom io import (\n BytesIO,\n StringIO,\n)\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import EmptyDataError\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n DatetimeIndex,\n)\nimport pandas._testing as tm\n\nfrom pandas.io.common import urlopen\nfrom pandas.io.parsers import (\n read_csv,\n read_fwf,\n)\n\n\ndef test_basic():\n data = """\\nA B C D\n201158 360.242940 149.910199 11950.7\n201159 444.953632 166.985655 11788.4\n201160 364.136849 183.628767 11806.2\n201161 413.836124 184.375703 11916.8\n201162 502.953953 173.237159 12468.3\n"""\n result = read_fwf(StringIO(data))\n expected = DataFrame(\n [\n [201158, 360.242940, 149.910199, 11950.7],\n [201159, 444.953632, 166.985655, 11788.4],\n [201160, 364.136849, 183.628767, 11806.2],\n [201161, 413.836124, 184.375703, 11916.8],\n [201162, 502.953953, 173.237159, 12468.3],\n ],\n columns=["A", "B", "C", "D"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_colspecs():\n data = """\\nA B C D E\n201158 360.242940 149.910199 11950.7\n201159 444.953632 166.985655 11788.4\n201160 364.136849 183.628767 11806.2\n201161 413.836124 184.375703 11916.8\n201162 502.953953 173.237159 12468.3\n"""\n colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]\n result = read_fwf(StringIO(data), colspecs=colspecs)\n\n expected = DataFrame(\n [\n [2011, 58, 360.242940, 149.910199, 11950.7],\n [2011, 59, 444.953632, 166.985655, 11788.4],\n [2011, 60, 364.136849, 183.628767, 11806.2],\n [2011, 61, 413.836124, 184.375703, 11916.8],\n [2011, 62, 502.953953, 173.237159, 12468.3],\n ],\n columns=["A", "B", "C", "D", "E"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_widths():\n data = """\\nA B C D E\n2011 58 360.242940 149.910199 11950.7\n2011 59 444.953632 166.985655 11788.4\n2011 60 364.136849 183.628767 11806.2\n2011 61 413.836124 184.375703 11916.8\n2011 62 502.953953 173.237159 12468.3\n"""\n result = read_fwf(StringIO(data), widths=[5, 5, 13, 13, 7])\n\n expected = DataFrame(\n [\n [2011, 58, 360.242940, 149.910199, 11950.7],\n [2011, 59, 444.953632, 166.985655, 11788.4],\n [2011, 60, 364.136849, 183.628767, 11806.2],\n [2011, 61, 413.836124, 184.375703, 11916.8],\n [2011, 62, 502.953953, 173.237159, 12468.3],\n ],\n columns=["A", "B", "C", "D", "E"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_non_space_filler():\n # From Thomas Kluyver:\n #\n # Apparently, some non-space filler characters can be seen, this is\n # supported by specifying the 'delimiter' character:\n #\n # http://publib.boulder.ibm.com/infocenter/dmndhelp/v6r1mx/index.jsp?topic=/com.ibm.wbit.612.help.config.doc/topics/rfixwidth.html\n data = """\\nA~~~~B~~~~C~~~~~~~~~~~~D~~~~~~~~~~~~E\n201158~~~~360.242940~~~149.910199~~~11950.7\n201159~~~~444.953632~~~166.985655~~~11788.4\n201160~~~~364.136849~~~183.628767~~~11806.2\n201161~~~~413.836124~~~184.375703~~~11916.8\n201162~~~~502.953953~~~173.237159~~~12468.3\n"""\n colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]\n result = read_fwf(StringIO(data), colspecs=colspecs, delimiter="~")\n\n expected = DataFrame(\n [\n [2011, 58, 360.242940, 149.910199, 11950.7],\n [2011, 59, 444.953632, 166.985655, 11788.4],\n [2011, 60, 364.136849, 183.628767, 11806.2],\n [2011, 61, 413.836124, 184.375703, 11916.8],\n [2011, 62, 502.953953, 173.237159, 12468.3],\n ],\n columns=["A", "B", "C", "D", "E"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_over_specified():\n data = """\\nA B C D E\n201158 360.242940 149.910199 11950.7\n201159 444.953632 166.985655 11788.4\n201160 364.136849 183.628767 11806.2\n201161 413.836124 184.375703 11916.8\n201162 502.953953 173.237159 12468.3\n"""\n colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]\n\n with pytest.raises(ValueError, match="must specify only one of"):\n read_fwf(StringIO(data), colspecs=colspecs, widths=[6, 10, 10, 7])\n\n\ndef test_under_specified():\n data = """\\nA B C D E\n201158 360.242940 149.910199 11950.7\n201159 444.953632 166.985655 11788.4\n201160 364.136849 183.628767 11806.2\n201161 413.836124 184.375703 11916.8\n201162 502.953953 173.237159 12468.3\n"""\n with pytest.raises(ValueError, match="Must specify either"):\n read_fwf(StringIO(data), colspecs=None, widths=None)\n\n\ndef test_read_csv_compat():\n csv_data = """\\nA,B,C,D,E\n2011,58,360.242940,149.910199,11950.7\n2011,59,444.953632,166.985655,11788.4\n2011,60,364.136849,183.628767,11806.2\n2011,61,413.836124,184.375703,11916.8\n2011,62,502.953953,173.237159,12468.3\n"""\n expected = read_csv(StringIO(csv_data), engine="python")\n\n fwf_data = """\\nA B C D E\n201158 360.242940 149.910199 11950.7\n201159 444.953632 166.985655 11788.4\n201160 364.136849 183.628767 11806.2\n201161 413.836124 184.375703 11916.8\n201162 502.953953 173.237159 12468.3\n"""\n colspecs = [(0, 4), (4, 8), (8, 20), (21, 33), (34, 43)]\n result = read_fwf(StringIO(fwf_data), colspecs=colspecs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_bytes_io_input():\n data = BytesIO("שלום\nשלום".encode()) # noqa: RUF001\n result = read_fwf(data, widths=[2, 2], encoding="utf8")\n expected = DataFrame([["של", "ום"]], columns=["של", "ום"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_fwf_colspecs_is_list_or_tuple():\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n\n msg = "column specifications must be a list or tuple.+"\n\n with pytest.raises(TypeError, match=msg):\n read_fwf(StringIO(data), colspecs={"a": 1}, delimiter=",")\n\n\ndef test_fwf_colspecs_is_list_or_tuple_of_two_element_tuples():\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n\n msg = "Each column specification must be.+"\n\n with pytest.raises(TypeError, match=msg):\n read_fwf(StringIO(data), colspecs=[("a", 1)])\n\n\n@pytest.mark.parametrize(\n "colspecs,exp_data",\n [\n ([(0, 3), (3, None)], [[123, 456], [456, 789]]),\n ([(None, 3), (3, 6)], [[123, 456], [456, 789]]),\n ([(0, None), (3, None)], [[123456, 456], [456789, 789]]),\n ([(None, None), (3, 6)], [[123456, 456], [456789, 789]]),\n ],\n)\ndef test_fwf_colspecs_none(colspecs, exp_data):\n # see gh-7079\n data = """\\n123456\n456789\n"""\n expected = DataFrame(exp_data)\n\n result = read_fwf(StringIO(data), colspecs=colspecs, header=None)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "infer_nrows,exp_data",\n [\n # infer_nrows --> colspec == [(2, 3), (5, 6)]\n (1, [[1, 2], [3, 8]]),\n # infer_nrows > number of rows\n (10, [[1, 2], [123, 98]]),\n ],\n)\ndef test_fwf_colspecs_infer_nrows(infer_nrows, exp_data):\n # see gh-15138\n data = """\\n 1 2\n123 98\n"""\n expected = DataFrame(exp_data)\n\n result = read_fwf(StringIO(data), infer_nrows=infer_nrows, header=None)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_fwf_regression():\n # see gh-3594\n #\n # Turns out "T060" is parsable as a datetime slice!\n tz_list = [1, 10, 20, 30, 60, 80, 100]\n widths = [16] + [8] * len(tz_list)\n names = ["SST"] + [f"T{z:03d}" for z in tz_list[1:]]\n\n data = """ 2009164202000 9.5403 9.4105 8.6571 7.8372 6.0612 5.8843 5.5192\n2009164203000 9.5435 9.2010 8.6167 7.8176 6.0804 5.8728 5.4869\n2009164204000 9.5873 9.1326 8.4694 7.5889 6.0422 5.8526 5.4657\n2009164205000 9.5810 9.0896 8.4009 7.4652 6.0322 5.8189 5.4379\n2009164210000 9.6034 9.0897 8.3822 7.4905 6.0908 5.7904 5.4039\n"""\n\n with tm.assert_produces_warning(FutureWarning, match="use 'date_format' instead"):\n result = read_fwf(\n StringIO(data),\n index_col=0,\n header=None,\n names=names,\n widths=widths,\n parse_dates=True,\n date_parser=lambda s: datetime.strptime(s, "%Y%j%H%M%S"),\n )\n expected = DataFrame(\n [\n [9.5403, 9.4105, 8.6571, 7.8372, 6.0612, 5.8843, 5.5192],\n [9.5435, 9.2010, 8.6167, 7.8176, 6.0804, 5.8728, 5.4869],\n [9.5873, 9.1326, 8.4694, 7.5889, 6.0422, 5.8526, 5.4657],\n [9.5810, 9.0896, 8.4009, 7.4652, 6.0322, 5.8189, 5.4379],\n [9.6034, 9.0897, 8.3822, 7.4905, 6.0908, 5.7904, 5.4039],\n ],\n index=DatetimeIndex(\n [\n "2009-06-13 20:20:00",\n "2009-06-13 20:30:00",\n "2009-06-13 20:40:00",\n "2009-06-13 20:50:00",\n "2009-06-13 21:00:00",\n ]\n ),\n columns=["SST", "T010", "T020", "T030", "T060", "T080", "T100"],\n )\n tm.assert_frame_equal(result, expected)\n result = read_fwf(\n StringIO(data),\n index_col=0,\n header=None,\n names=names,\n widths=widths,\n parse_dates=True,\n date_format="%Y%j%H%M%S",\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_fwf_for_uint8():\n data = """1421302965.213420 PRI=3 PGN=0xef00 DST=0x17 SRC=0x28 04 154 00 00 00 00 00 127\n1421302964.226776 PRI=6 PGN=0xf002 SRC=0x47 243 00 00 255 247 00 00 71""" # noqa: E501\n df = read_fwf(\n StringIO(data),\n colspecs=[(0, 17), (25, 26), (33, 37), (49, 51), (58, 62), (63, 1000)],\n names=["time", "pri", "pgn", "dst", "src", "data"],\n converters={\n "pgn": lambda x: int(x, 16),\n "src": lambda x: int(x, 16),\n "dst": lambda x: int(x, 16),\n "data": lambda x: len(x.split(" ")),\n },\n )\n\n expected = DataFrame(\n [\n [1421302965.213420, 3, 61184, 23, 40, 8],\n [1421302964.226776, 6, 61442, None, 71, 8],\n ],\n columns=["time", "pri", "pgn", "dst", "src", "data"],\n )\n expected["dst"] = expected["dst"].astype(object)\n tm.assert_frame_equal(df, expected)\n\n\n@pytest.mark.parametrize("comment", ["#", "~", "!"])\ndef test_fwf_comment(comment):\n data = """\\n 1 2. 4 #hello world\n 5 NaN 10.0\n"""\n data = data.replace("#", comment)\n\n colspecs = [(0, 3), (4, 9), (9, 25)]\n expected = DataFrame([[1, 2.0, 4], [5, np.nan, 10.0]])\n\n result = read_fwf(StringIO(data), colspecs=colspecs, header=None, comment=comment)\n tm.assert_almost_equal(result, expected)\n\n\ndef test_fwf_skip_blank_lines():\n data = """\n\nA B C D\n\n201158 360.242940 149.910199 11950.7\n201159 444.953632 166.985655 11788.4\n\n\n201162 502.953953 173.237159 12468.3\n\n"""\n result = read_fwf(StringIO(data), skip_blank_lines=True)\n expected = DataFrame(\n [\n [201158, 360.242940, 149.910199, 11950.7],\n [201159, 444.953632, 166.985655, 11788.4],\n [201162, 502.953953, 173.237159, 12468.3],\n ],\n columns=["A", "B", "C", "D"],\n )\n tm.assert_frame_equal(result, expected)\n\n data = """\\nA B C D\n201158 360.242940 149.910199 11950.7\n201159 444.953632 166.985655 11788.4\n\n\n201162 502.953953 173.237159 12468.3\n"""\n result = read_fwf(StringIO(data), skip_blank_lines=False)\n expected = DataFrame(\n [\n [201158, 360.242940, 149.910199, 11950.7],\n [201159, 444.953632, 166.985655, 11788.4],\n [np.nan, np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan, np.nan],\n [201162, 502.953953, 173.237159, 12468.3],\n ],\n columns=["A", "B", "C", "D"],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("thousands", [",", "#", "~"])\ndef test_fwf_thousands(thousands):\n data = """\\n 1 2,334.0 5\n10 13 10.\n"""\n data = data.replace(",", thousands)\n\n colspecs = [(0, 3), (3, 11), (12, 16)]\n expected = DataFrame([[1, 2334.0, 5], [10, 13, 10.0]])\n\n result = read_fwf(\n StringIO(data), header=None, colspecs=colspecs, thousands=thousands\n )\n tm.assert_almost_equal(result, expected)\n\n\n@pytest.mark.parametrize("header", [True, False])\ndef test_bool_header_arg(header):\n # see gh-6114\n data = """\\nMyColumn\n a\n b\n a\n b"""\n\n msg = "Passing a bool to header is invalid"\n with pytest.raises(TypeError, match=msg):\n read_fwf(StringIO(data), header=header)\n\n\ndef test_full_file():\n # File with all values.\n test = """index A B C\n2000-01-03T00:00:00 0.980268513777 3 foo\n2000-01-04T00:00:00 1.04791624281 -4 bar\n2000-01-05T00:00:00 0.498580885705 73 baz\n2000-01-06T00:00:00 1.12020151869 1 foo\n2000-01-07T00:00:00 0.487094399463 0 bar\n2000-01-10T00:00:00 0.836648671666 2 baz\n2000-01-11T00:00:00 0.157160753327 34 foo"""\n colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))\n expected = read_fwf(StringIO(test), colspecs=colspecs)\n\n result = read_fwf(StringIO(test))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_full_file_with_missing():\n # File with missing values.\n test = """index A B C\n2000-01-03T00:00:00 0.980268513777 3 foo\n2000-01-04T00:00:00 1.04791624281 -4 bar\n 0.498580885705 73 baz\n2000-01-06T00:00:00 1.12020151869 1 foo\n2000-01-07T00:00:00 0 bar\n2000-01-10T00:00:00 0.836648671666 2 baz\n 34"""\n colspecs = ((0, 19), (21, 35), (38, 40), (42, 45))\n expected = read_fwf(StringIO(test), colspecs=colspecs)\n\n result = read_fwf(StringIO(test))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_full_file_with_spaces():\n # File with spaces in columns.\n test = """\nAccount Name Balance CreditLimit AccountCreated\n101 Keanu Reeves 9315.45 10000.00 1/17/1998\n312 Gerard Butler 90.00 1000.00 8/6/2003\n868 Jennifer Love Hewitt 0 17000.00 5/25/1985\n761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006\n317 Bill Murray 789.65 5000.00 2/5/2007\n""".strip(\n "\r\n"\n )\n colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))\n expected = read_fwf(StringIO(test), colspecs=colspecs)\n\n result = read_fwf(StringIO(test))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_full_file_with_spaces_and_missing():\n # File with spaces and missing values in columns.\n test = """\nAccount Name Balance CreditLimit AccountCreated\n101 10000.00 1/17/1998\n312 Gerard Butler 90.00 1000.00 8/6/2003\n868 5/25/1985\n761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006\n317 Bill Murray 789.65\n""".strip(\n "\r\n"\n )\n colspecs = ((0, 7), (8, 28), (30, 38), (42, 53), (56, 70))\n expected = read_fwf(StringIO(test), colspecs=colspecs)\n\n result = read_fwf(StringIO(test))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_messed_up_data():\n # Completely messed up file.\n test = """\n Account Name Balance Credit Limit Account Created\n 101 10000.00 1/17/1998\n 312 Gerard Butler 90.00 1000.00\n\n 761 Jada Pinkett-Smith 49654.87 100000.00 12/5/2006\n 317 Bill Murray 789.65\n""".strip(\n "\r\n"\n )\n colspecs = ((2, 10), (15, 33), (37, 45), (49, 61), (64, 79))\n expected = read_fwf(StringIO(test), colspecs=colspecs)\n\n result = read_fwf(StringIO(test))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_multiple_delimiters():\n test = r"""\ncol1~~~~~col2 col3++++++++++++++++++col4\n~~22.....11.0+++foo~~~~~~~~~~Keanu Reeves\n 33+++122.33\\\bar.........Gerard Butler\n++44~~~~12.01 baz~~Jennifer Love Hewitt\n~~55 11+++foo++++Jada Pinkett-Smith\n..66++++++.03~~~bar Bill Murray\n""".strip(\n "\r\n"\n )\n delimiter = " +~.\\"\n colspecs = ((0, 4), (7, 13), (15, 19), (21, 41))\n expected = read_fwf(StringIO(test), colspecs=colspecs, delimiter=delimiter)\n\n result = read_fwf(StringIO(test), delimiter=delimiter)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_variable_width_unicode():\n data = """\nשלום שלום\nום שלל\nשל ום\n""".strip(\n "\r\n"\n )\n encoding = "utf8"\n kwargs = {"header": None, "encoding": encoding}\n\n expected = read_fwf(\n BytesIO(data.encode(encoding)), colspecs=[(0, 4), (5, 9)], **kwargs\n )\n result = read_fwf(BytesIO(data.encode(encoding)), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", [{}, {"a": "float64", "b": str, "c": "int32"}])\ndef test_dtype(dtype):\n data = """ a b c\n1 2 3.2\n3 4 5.2\n"""\n colspecs = [(0, 5), (5, 10), (10, None)]\n result = read_fwf(StringIO(data), colspecs=colspecs, dtype=dtype)\n\n expected = DataFrame(\n {"a": [1, 3], "b": [2, 4], "c": [3.2, 5.2]}, columns=["a", "b", "c"]\n )\n\n for col, dt in dtype.items():\n expected[col] = expected[col].astype(dt)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_skiprows_inference():\n # see gh-11256\n data = """\nText contained in the file header\n\nDataCol1 DataCol2\n 0.0 1.0\n 101.6 956.1\n""".strip()\n skiprows = 2\n\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n expected = read_csv(StringIO(data), skiprows=skiprows, delim_whitespace=True)\n\n result = read_fwf(StringIO(data), skiprows=skiprows)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_skiprows_by_index_inference():\n data = """\nTo be skipped\nNot To Be Skipped\nOnce more to be skipped\n123 34 8 123\n456 78 9 456\n""".strip()\n skiprows = [0, 2]\n\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n expected = read_csv(StringIO(data), skiprows=skiprows, delim_whitespace=True)\n\n result = read_fwf(StringIO(data), skiprows=skiprows)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_skiprows_inference_empty():\n data = """\nAA BBB C\n12 345 6\n78 901 2\n""".strip()\n\n msg = "No rows from which to infer column width"\n with pytest.raises(EmptyDataError, match=msg):\n read_fwf(StringIO(data), skiprows=3)\n\n\ndef test_whitespace_preservation():\n # see gh-16772\n header = None\n csv_data = """\n a ,bbb\n cc,dd """\n\n fwf_data = """\n a bbb\n ccdd """\n result = read_fwf(\n StringIO(fwf_data), widths=[3, 3], header=header, skiprows=[0], delimiter="\n\t"\n )\n expected = read_csv(StringIO(csv_data), header=header)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_default_delimiter():\n header = None\n csv_data = """\na,bbb\ncc,dd"""\n\n fwf_data = """\na \tbbb\ncc\tdd """\n result = read_fwf(StringIO(fwf_data), widths=[3, 3], header=header, skiprows=[0])\n expected = read_csv(StringIO(csv_data), header=header)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("infer", [True, False])\ndef test_fwf_compression(compression_only, infer, compression_to_extension):\n data = """1111111111\n 2222222222\n 3333333333""".strip()\n\n compression = compression_only\n extension = compression_to_extension[compression]\n\n kwargs = {"widths": [5, 5], "names": ["one", "two"]}\n expected = read_fwf(StringIO(data), **kwargs)\n\n data = bytes(data, encoding="utf-8")\n\n with tm.ensure_clean(filename="tmp." + extension) as path:\n tm.write_to_compressed(compression, path, data)\n\n if infer is not None:\n kwargs["compression"] = "infer" if infer else compression\n\n result = read_fwf(path, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_binary_mode():\n """\n read_fwf supports opening files in binary mode.\n\n GH 18035.\n """\n data = """aaa aaa aaa\nbba bab b a"""\n df_reference = DataFrame(\n [["bba", "bab", "b a"]], columns=["aaa", "aaa.1", "aaa.2"], index=[0]\n )\n with tm.ensure_clean() as path:\n Path(path).write_text(data, encoding="utf-8")\n with open(path, "rb") as file:\n df = read_fwf(file)\n file.seek(0)\n tm.assert_frame_equal(df, df_reference)\n\n\n@pytest.mark.parametrize("memory_map", [True, False])\ndef test_encoding_mmap(memory_map):\n """\n encoding should be working, even when using a memory-mapped file.\n\n GH 23254.\n """\n encoding = "iso8859_1"\n with tm.ensure_clean() as path:\n Path(path).write_bytes(" 1 A Ä 2\n".encode(encoding))\n df = read_fwf(\n path,\n header=None,\n widths=[2, 2, 2, 2],\n encoding=encoding,\n memory_map=memory_map,\n )\n df_reference = DataFrame([[1, "A", "Ä", 2]])\n tm.assert_frame_equal(df, df_reference)\n\n\n@pytest.mark.parametrize(\n "colspecs, names, widths, index_col",\n [\n (\n [(0, 6), (6, 12), (12, 18), (18, None)],\n list("abcde"),\n None,\n None,\n ),\n (\n None,\n list("abcde"),\n [6] * 4,\n None,\n ),\n (\n [(0, 6), (6, 12), (12, 18), (18, None)],\n list("abcde"),\n None,\n True,\n ),\n (\n None,\n list("abcde"),\n [6] * 4,\n False,\n ),\n (\n None,\n list("abcde"),\n [6] * 4,\n True,\n ),\n (\n [(0, 6), (6, 12), (12, 18), (18, None)],\n list("abcde"),\n None,\n False,\n ),\n ],\n)\ndef test_len_colspecs_len_names(colspecs, names, widths, index_col):\n # GH#40830\n data = """col1 col2 col3 col4\n bab ba 2"""\n msg = "Length of colspecs must match length of names"\n with pytest.raises(ValueError, match=msg):\n read_fwf(\n StringIO(data),\n colspecs=colspecs,\n names=names,\n widths=widths,\n index_col=index_col,\n )\n\n\n@pytest.mark.parametrize(\n "colspecs, names, widths, index_col, expected",\n [\n (\n [(0, 6), (6, 12), (12, 18), (18, None)],\n list("abc"),\n None,\n 0,\n DataFrame(\n index=["col1", "ba"],\n columns=["a", "b", "c"],\n data=[["col2", "col3", "col4"], ["b ba", "2", np.nan]],\n ),\n ),\n (\n [(0, 6), (6, 12), (12, 18), (18, None)],\n list("ab"),\n None,\n [0, 1],\n DataFrame(\n index=[["col1", "ba"], ["col2", "b ba"]],\n columns=["a", "b"],\n data=[["col3", "col4"], ["2", np.nan]],\n ),\n ),\n (\n [(0, 6), (6, 12), (12, 18), (18, None)],\n list("a"),\n None,\n [0, 1, 2],\n DataFrame(\n index=[["col1", "ba"], ["col2", "b ba"], ["col3", "2"]],\n columns=["a"],\n data=[["col4"], [np.nan]],\n ),\n ),\n (\n None,\n list("abc"),\n [6] * 4,\n 0,\n DataFrame(\n index=["col1", "ba"],\n columns=["a", "b", "c"],\n data=[["col2", "col3", "col4"], ["b ba", "2", np.nan]],\n ),\n ),\n (\n None,\n list("ab"),\n [6] * 4,\n [0, 1],\n DataFrame(\n index=[["col1", "ba"], ["col2", "b ba"]],\n columns=["a", "b"],\n data=[["col3", "col4"], ["2", np.nan]],\n ),\n ),\n (\n None,\n list("a"),\n [6] * 4,\n [0, 1, 2],\n DataFrame(\n index=[["col1", "ba"], ["col2", "b ba"], ["col3", "2"]],\n columns=["a"],\n data=[["col4"], [np.nan]],\n ),\n ),\n ],\n)\ndef test_len_colspecs_len_names_with_index_col(\n colspecs, names, widths, index_col, expected\n):\n # GH#40830\n data = """col1 col2 col3 col4\n bab ba 2"""\n result = read_fwf(\n StringIO(data),\n colspecs=colspecs,\n names=names,\n widths=widths,\n index_col=index_col,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_colspecs_with_comment():\n # GH 14135\n result = read_fwf(\n StringIO("#\nA1K\n"), colspecs=[(1, 2), (2, 3)], comment="#", header=None\n )\n expected = DataFrame([[1, "K"]], columns=[0, 1])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_skip_rows_and_n_rows():\n # GH#44021\n data = """a\tb\n1\t a\n2\t b\n3\t c\n4\t d\n5\t e\n6\t f\n """\n result = read_fwf(StringIO(data), nrows=4, skiprows=[2, 4])\n expected = DataFrame({"a": [1, 3, 5, 6], "b": ["a", "c", "e", "f"]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_skiprows_with_iterator():\n # GH#10261, GH#56323\n data = """0\n1\n2\n3\n4\n5\n6\n7\n8\n9\n """\n df_iter = read_fwf(\n StringIO(data),\n colspecs=[(0, 2)],\n names=["a"],\n iterator=True,\n chunksize=2,\n skiprows=[0, 1, 2, 6, 9],\n )\n expected_frames = [\n DataFrame({"a": [3, 4]}),\n DataFrame({"a": [5, 7]}, index=[2, 3]),\n DataFrame({"a": [8]}, index=[4]),\n ]\n for i, result in enumerate(df_iter):\n tm.assert_frame_equal(result, expected_frames[i])\n\n\ndef test_names_and_infer_colspecs():\n # GH#45337\n data = """X Y Z\n 959.0 345 22.2\n """\n result = read_fwf(StringIO(data), skiprows=1, usecols=[0, 2], names=["a", "b"])\n expected = DataFrame({"a": [959.0], "b": 22.2})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_widths_and_usecols():\n # GH#46580\n data = """0 1 n -0.4100.1\n0 2 p 0.2 90.1\n0 3 n -0.3140.4"""\n result = read_fwf(\n StringIO(data),\n header=None,\n usecols=(0, 1, 3),\n widths=(3, 5, 1, 5, 5),\n index_col=False,\n names=("c0", "c1", "c3"),\n )\n expected = DataFrame(\n {\n "c0": 0,\n "c1": [1, 2, 3],\n "c3": [-0.4, 0.2, -0.3],\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dtype_backend(string_storage, dtype_backend):\n # GH#50289\n data = """a b c d e f g h i\n1 2.5 True a\n3 4.5 False b True 6 7.5 a"""\n with pd.option_context("mode.string_storage", string_storage):\n result = read_fwf(StringIO(data), dtype_backend=dtype_backend)\n\n if dtype_backend == "pyarrow":\n pa = pytest.importorskip("pyarrow")\n string_dtype = pd.ArrowDtype(pa.string())\n else:\n string_dtype = pd.StringDtype(string_storage)\n\n expected = DataFrame(\n {\n "a": pd.Series([1, 3], dtype="Int64"),\n "b": pd.Series([2.5, 4.5], dtype="Float64"),\n "c": pd.Series([True, False], dtype="boolean"),\n "d": pd.Series(["a", "b"], dtype=string_dtype),\n "e": pd.Series([pd.NA, True], dtype="boolean"),\n "f": pd.Series([pd.NA, 6], dtype="Int64"),\n "g": pd.Series([pd.NA, 7.5], dtype="Float64"),\n "h": pd.Series([None, "a"], dtype=string_dtype),\n "i": pd.Series([pd.NA, pd.NA], dtype="Int64"),\n }\n )\n if dtype_backend == "pyarrow":\n pa = pytest.importorskip("pyarrow")\n from pandas.arrays import ArrowExtensionArray\n\n expected = DataFrame(\n {\n col: ArrowExtensionArray(pa.array(expected[col], from_pandas=True))\n for col in expected.columns\n }\n )\n expected["i"] = ArrowExtensionArray(pa.array([None, None]))\n\n # the storage of the str columns' Index is also affected by the\n # string_storage setting -> ignore that for checking the result\n tm.assert_frame_equal(result, expected, check_column_type=False)\n\n\ndef test_invalid_dtype_backend():\n msg = (\n "dtype_backend numpy is invalid, only 'numpy_nullable' and "\n "'pyarrow' are allowed."\n )\n with pytest.raises(ValueError, match=msg):\n read_fwf("test", dtype_backend="numpy")\n\n\n@pytest.mark.network\n@pytest.mark.single_cpu\ndef test_url_urlopen(httpserver):\n data = """\\nA B C D\n201158 360.242940 149.910199 11950.7\n201159 444.953632 166.985655 11788.4\n201160 364.136849 183.628767 11806.2\n201161 413.836124 184.375703 11916.8\n201162 502.953953 173.237159 12468.3\n"""\n httpserver.serve_content(content=data)\n expected = pd.Index(list("ABCD"))\n with urlopen(httpserver.url) as f:\n result = read_fwf(f).columns\n\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_read_fwf.py | test_read_fwf.py | Python | 29,873 | 0.95 | 0.052224 | 0.035147 | react-lib | 894 | 2024-10-05T04:14:09.524501 | BSD-3-Clause | true | 7cad1794adfcb61363cd154c9aad5e33 |
"""\nTests that skipped rows are properly handled during\nparsing for all of the parsers defined in parsers.py\n"""\n\nfrom datetime import datetime\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import EmptyDataError\n\nfrom pandas import (\n DataFrame,\n Index,\n)\nimport pandas._testing as tm\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\n\n@xfail_pyarrow # ValueError: skiprows argument must be an integer\n@pytest.mark.parametrize("skiprows", [list(range(6)), 6])\ndef test_skip_rows_bug(all_parsers, skiprows):\n # see gh-505\n parser = all_parsers\n text = """#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n#foo,a,b,c\n1/1/2000,1.,2.,3.\n1/2/2000,4,5,6\n1/3/2000,7,8,9\n"""\n result = parser.read_csv(\n StringIO(text), skiprows=skiprows, header=None, index_col=0, parse_dates=True\n )\n index = Index(\n [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], name=0\n )\n\n expected = DataFrame(\n np.arange(1.0, 10.0).reshape((3, 3)), columns=[1, 2, 3], index=index\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: skiprows argument must be an integer\ndef test_deep_skip_rows(all_parsers):\n # see gh-4382\n parser = all_parsers\n data = "a,b,c\n" + "\n".join(\n [",".join([str(i), str(i + 1), str(i + 2)]) for i in range(10)]\n )\n condensed_data = "a,b,c\n" + "\n".join(\n [",".join([str(i), str(i + 1), str(i + 2)]) for i in [0, 1, 2, 3, 4, 6, 8, 9]]\n )\n\n result = parser.read_csv(StringIO(data), skiprows=[6, 8])\n condensed_result = parser.read_csv(StringIO(condensed_data))\n tm.assert_frame_equal(result, condensed_result)\n\n\n@xfail_pyarrow # AssertionError: DataFrame are different\ndef test_skip_rows_blank(all_parsers):\n # see gh-9832\n parser = all_parsers\n text = """#foo,a,b,c\n#foo,a,b,c\n\n#foo,a,b,c\n#foo,a,b,c\n\n1/1/2000,1.,2.,3.\n1/2/2000,4,5,6\n1/3/2000,7,8,9\n"""\n data = parser.read_csv(\n StringIO(text), skiprows=6, header=None, index_col=0, parse_dates=True\n )\n index = Index(\n [datetime(2000, 1, 1), datetime(2000, 1, 2), datetime(2000, 1, 3)], name=0\n )\n\n expected = DataFrame(\n np.arange(1.0, 10.0).reshape((3, 3)), columns=[1, 2, 3], index=index\n )\n tm.assert_frame_equal(data, expected)\n\n\n@pytest.mark.parametrize(\n "data,kwargs,expected",\n [\n (\n """id,text,num_lines\n1,"line 11\nline 12",2\n2,"line 21\nline 22",2\n3,"line 31",1""",\n {"skiprows": [1]},\n DataFrame(\n [[2, "line 21\nline 22", 2], [3, "line 31", 1]],\n columns=["id", "text", "num_lines"],\n ),\n ),\n (\n "a,b,c\n~a\n b~,~e\n d~,~f\n f~\n1,2,~12\n 13\n 14~",\n {"quotechar": "~", "skiprows": [2]},\n DataFrame([["a\n b", "e\n d", "f\n f"]], columns=["a", "b", "c"]),\n ),\n (\n (\n "Text,url\n~example\n "\n "sentence\n one~,url1\n~"\n "example\n sentence\n two~,url2\n~"\n "example\n sentence\n three~,url3"\n ),\n {"quotechar": "~", "skiprows": [1, 3]},\n DataFrame([["example\n sentence\n two", "url2"]], columns=["Text", "url"]),\n ),\n ],\n)\n@xfail_pyarrow # ValueError: skiprows argument must be an integer\ndef test_skip_row_with_newline(all_parsers, data, kwargs, expected):\n # see gh-12775 and gh-10911\n parser = all_parsers\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: skiprows argument must be an integer\ndef test_skip_row_with_quote(all_parsers):\n # see gh-12775 and gh-10911\n parser = all_parsers\n data = """id,text,num_lines\n1,"line '11' line 12",2\n2,"line '21' line 22",2\n3,"line '31' line 32",1"""\n\n exp_data = [[2, "line '21' line 22", 2], [3, "line '31' line 32", 1]]\n expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])\n\n result = parser.read_csv(StringIO(data), skiprows=[1])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data,exp_data",\n [\n (\n """id,text,num_lines\n1,"line \n'11' line 12",2\n2,"line \n'21' line 22",2\n3,"line \n'31' line 32",1""",\n [[2, "line \n'21' line 22", 2], [3, "line \n'31' line 32", 1]],\n ),\n (\n """id,text,num_lines\n1,"line '11\n' line 12",2\n2,"line '21\n' line 22",2\n3,"line '31\n' line 32",1""",\n [[2, "line '21\n' line 22", 2], [3, "line '31\n' line 32", 1]],\n ),\n (\n """id,text,num_lines\n1,"line '11\n' \r\tline 12",2\n2,"line '21\n' \r\tline 22",2\n3,"line '31\n' \r\tline 32",1""",\n [[2, "line '21\n' \r\tline 22", 2], [3, "line '31\n' \r\tline 32", 1]],\n ),\n ],\n)\n@xfail_pyarrow # ValueError: skiprows argument must be an integer\ndef test_skip_row_with_newline_and_quote(all_parsers, data, exp_data):\n # see gh-12775 and gh-10911\n parser = all_parsers\n result = parser.read_csv(StringIO(data), skiprows=[1])\n\n expected = DataFrame(exp_data, columns=["id", "text", "num_lines"])\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: The 'delim_whitespace' option is not supported\n@pytest.mark.parametrize(\n "lineterminator", ["\n", "\r\n", "\r"] # "LF" # "CRLF" # "CR"\n)\ndef test_skiprows_lineterminator(all_parsers, lineterminator, request):\n # see gh-9079\n parser = all_parsers\n data = "\n".join(\n [\n "SMOSMANIA ThetaProbe-ML2X ",\n "2007/01/01 01:00 0.2140 U M ",\n "2007/01/01 02:00 0.2141 M O ",\n "2007/01/01 04:00 0.2142 D M ",\n ]\n )\n expected = DataFrame(\n [\n ["2007/01/01", "01:00", 0.2140, "U", "M"],\n ["2007/01/01", "02:00", 0.2141, "M", "O"],\n ["2007/01/01", "04:00", 0.2142, "D", "M"],\n ],\n columns=["date", "time", "var", "flag", "oflag"],\n )\n\n if parser.engine == "python" and lineterminator == "\r":\n mark = pytest.mark.xfail(reason="'CR' not respect with the Python parser yet")\n request.applymarker(mark)\n\n data = data.replace("\n", lineterminator)\n\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(data),\n skiprows=1,\n delim_whitespace=True,\n names=["date", "time", "var", "flag", "oflag"],\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # AssertionError: DataFrame are different\ndef test_skiprows_infield_quote(all_parsers):\n # see gh-14459\n parser = all_parsers\n data = 'a"\nb"\na\n1'\n expected = DataFrame({"a": [1]})\n\n result = parser.read_csv(StringIO(data), skiprows=2)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: skiprows argument must be an integer\n@pytest.mark.parametrize(\n "kwargs,expected",\n [\n ({}, DataFrame({"1": [3, 5]})),\n ({"header": 0, "names": ["foo"]}, DataFrame({"foo": [3, 5]})),\n ],\n)\ndef test_skip_rows_callable(all_parsers, kwargs, expected):\n parser = all_parsers\n data = "a\n1\n2\n3\n4\n5"\n\n result = parser.read_csv(StringIO(data), skiprows=lambda x: x % 2 == 0, **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: skiprows argument must be an integer\ndef test_skip_rows_callable_not_in(all_parsers):\n parser = all_parsers\n data = "0,a\n1,b\n2,c\n3,d\n4,e"\n expected = DataFrame([[1, "b"], [3, "d"]])\n\n result = parser.read_csv(\n StringIO(data), header=None, skiprows=lambda x: x not in [1, 3]\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: skiprows argument must be an integer\ndef test_skip_rows_skip_all(all_parsers):\n parser = all_parsers\n data = "a\n1\n2\n3\n4\n5"\n msg = "No columns to parse from file"\n\n with pytest.raises(EmptyDataError, match=msg):\n parser.read_csv(StringIO(data), skiprows=lambda x: True)\n\n\n@xfail_pyarrow # ValueError: skiprows argument must be an integer\ndef test_skip_rows_bad_callable(all_parsers):\n msg = "by zero"\n parser = all_parsers\n data = "a\n1\n2\n3\n4\n5"\n\n with pytest.raises(ZeroDivisionError, match=msg):\n parser.read_csv(StringIO(data), skiprows=lambda x: 1 / 0)\n\n\n@xfail_pyarrow # ValueError: skiprows argument must be an integer\ndef test_skip_rows_and_n_rows(all_parsers):\n # GH#44021\n data = """a,b\n1,a\n2,b\n3,c\n4,d\n5,e\n6,f\n7,g\n8,h\n"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data), nrows=5, skiprows=[2, 4, 6])\n expected = DataFrame({"a": [1, 3, 5, 7, 8], "b": ["a", "c", "e", "g", "h"]})\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\ndef test_skip_rows_with_chunks(all_parsers):\n # GH 55677\n data = """col_a\n10\n20\n30\n40\n50\n60\n70\n80\n90\n100\n"""\n parser = all_parsers\n reader = parser.read_csv(\n StringIO(data), engine=parser, skiprows=lambda x: x in [1, 4, 5], chunksize=4\n )\n df1 = next(reader)\n df2 = next(reader)\n\n tm.assert_frame_equal(df1, DataFrame({"col_a": [20, 30, 60, 70]}))\n tm.assert_frame_equal(df2, DataFrame({"col_a": [80, 90, 100]}, index=[4, 5, 6]))\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_skiprows.py | test_skiprows.py | Python | 9,457 | 0.95 | 0.053892 | 0.06338 | react-lib | 352 | 2024-12-04T19:27:22.177654 | BSD-3-Clause | true | 87abdd46f4e9b3c694e379bffd11f2bd |
"""\nTests the TextReader class in parsers.pyx, which\nis integral to the C engine in parsers.py\n"""\nfrom io import (\n BytesIO,\n StringIO,\n)\n\nimport numpy as np\nimport pytest\n\nimport pandas._libs.parsers as parser\nfrom pandas._libs.parsers import TextReader\nfrom pandas.errors import ParserWarning\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\nfrom pandas.io.parsers import (\n TextFileReader,\n read_csv,\n)\nfrom pandas.io.parsers.c_parser_wrapper import ensure_dtype_objs\n\n\nclass TestTextReader:\n @pytest.fixture\n def csv_path(self, datapath):\n return datapath("io", "data", "csv", "test1.csv")\n\n def test_file_handle(self, csv_path):\n with open(csv_path, "rb") as f:\n reader = TextReader(f)\n reader.read()\n\n def test_file_handle_mmap(self, csv_path):\n # this was never using memory_map=True\n with open(csv_path, "rb") as f:\n reader = TextReader(f, header=None)\n reader.read()\n\n def test_StringIO(self, csv_path):\n with open(csv_path, "rb") as f:\n text = f.read()\n src = BytesIO(text)\n reader = TextReader(src, header=None)\n reader.read()\n\n def test_string_factorize(self):\n # should this be optional?\n data = "a\nb\na\nb\na"\n reader = TextReader(StringIO(data), header=None)\n result = reader.read()\n assert len(set(map(id, result[0]))) == 2\n\n def test_skipinitialspace(self):\n data = "a, b\na, b\na, b\na, b"\n\n reader = TextReader(StringIO(data), skipinitialspace=True, header=None)\n result = reader.read()\n\n tm.assert_numpy_array_equal(\n result[0], np.array(["a", "a", "a", "a"], dtype=np.object_)\n )\n tm.assert_numpy_array_equal(\n result[1], np.array(["b", "b", "b", "b"], dtype=np.object_)\n )\n\n def test_parse_booleans(self):\n data = "True\nFalse\nTrue\nTrue"\n\n reader = TextReader(StringIO(data), header=None)\n result = reader.read()\n\n assert result[0].dtype == np.bool_\n\n def test_delimit_whitespace(self):\n data = 'a b\na\t\t "b"\n"a"\t \t b'\n\n reader = TextReader(StringIO(data), delim_whitespace=True, header=None)\n result = reader.read()\n\n tm.assert_numpy_array_equal(\n result[0], np.array(["a", "a", "a"], dtype=np.object_)\n )\n tm.assert_numpy_array_equal(\n result[1], np.array(["b", "b", "b"], dtype=np.object_)\n )\n\n def test_embedded_newline(self):\n data = 'a\n"hello\nthere"\nthis'\n\n reader = TextReader(StringIO(data), header=None)\n result = reader.read()\n\n expected = np.array(["a", "hello\nthere", "this"], dtype=np.object_)\n tm.assert_numpy_array_equal(result[0], expected)\n\n def test_euro_decimal(self):\n data = "12345,67\n345,678"\n\n reader = TextReader(StringIO(data), delimiter=":", decimal=",", header=None)\n result = reader.read()\n\n expected = np.array([12345.67, 345.678])\n tm.assert_almost_equal(result[0], expected)\n\n def test_integer_thousands(self):\n data = "123,456\n12,500"\n\n reader = TextReader(StringIO(data), delimiter=":", thousands=",", header=None)\n result = reader.read()\n\n expected = np.array([123456, 12500], dtype=np.int64)\n tm.assert_almost_equal(result[0], expected)\n\n def test_integer_thousands_alt(self):\n data = "123.456\n12.500"\n\n reader = TextFileReader(\n StringIO(data), delimiter=":", thousands=".", header=None\n )\n result = reader.read()\n\n expected = DataFrame([123456, 12500])\n tm.assert_frame_equal(result, expected)\n\n def test_skip_bad_lines(self):\n # too many lines, see #2430 for why\n data = "a:b:c\nd:e:f\ng:h:i\nj:k:l:m\nl:m:n\no:p:q:r"\n\n reader = TextReader(StringIO(data), delimiter=":", header=None)\n msg = r"Error tokenizing data\. C error: Expected 3 fields in line 4, saw 4"\n with pytest.raises(parser.ParserError, match=msg):\n reader.read()\n\n reader = TextReader(\n StringIO(data), delimiter=":", header=None, on_bad_lines=2 # Skip\n )\n result = reader.read()\n expected = {\n 0: np.array(["a", "d", "g", "l"], dtype=object),\n 1: np.array(["b", "e", "h", "m"], dtype=object),\n 2: np.array(["c", "f", "i", "n"], dtype=object),\n }\n assert_array_dicts_equal(result, expected)\n\n with tm.assert_produces_warning(ParserWarning, match="Skipping line"):\n reader = TextReader(\n StringIO(data), delimiter=":", header=None, on_bad_lines=1 # Warn\n )\n reader.read()\n\n def test_header_not_enough_lines(self):\n data = "skip this\nskip this\na,b,c\n1,2,3\n4,5,6"\n\n reader = TextReader(StringIO(data), delimiter=",", header=2)\n header = reader.header\n expected = [["a", "b", "c"]]\n assert header == expected\n\n recs = reader.read()\n expected = {\n 0: np.array([1, 4], dtype=np.int64),\n 1: np.array([2, 5], dtype=np.int64),\n 2: np.array([3, 6], dtype=np.int64),\n }\n assert_array_dicts_equal(recs, expected)\n\n def test_escapechar(self):\n data = '\\"hello world"\n\\"hello world"\n\\"hello world"'\n\n reader = TextReader(StringIO(data), delimiter=",", header=None, escapechar="\\")\n result = reader.read()\n expected = {0: np.array(['"hello world"'] * 3, dtype=object)}\n assert_array_dicts_equal(result, expected)\n\n def test_eof_has_eol(self):\n # handling of new line at EOF\n pass\n\n def test_na_substitution(self):\n pass\n\n def test_numpy_string_dtype(self):\n data = """\\na,1\naa,2\naaa,3\naaaa,4\naaaaa,5"""\n\n def _make_reader(**kwds):\n if "dtype" in kwds:\n kwds["dtype"] = ensure_dtype_objs(kwds["dtype"])\n return TextReader(StringIO(data), delimiter=",", header=None, **kwds)\n\n reader = _make_reader(dtype="S5,i4")\n result = reader.read()\n\n assert result[0].dtype == "S5"\n\n ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaaa"], dtype="S5")\n assert (result[0] == ex_values).all()\n assert result[1].dtype == "i4"\n\n reader = _make_reader(dtype="S4")\n result = reader.read()\n assert result[0].dtype == "S4"\n ex_values = np.array(["a", "aa", "aaa", "aaaa", "aaaa"], dtype="S4")\n assert (result[0] == ex_values).all()\n assert result[1].dtype == "S4"\n\n def test_pass_dtype(self):\n data = """\\none,two\n1,a\n2,b\n3,c\n4,d"""\n\n def _make_reader(**kwds):\n if "dtype" in kwds:\n kwds["dtype"] = ensure_dtype_objs(kwds["dtype"])\n return TextReader(StringIO(data), delimiter=",", **kwds)\n\n reader = _make_reader(dtype={"one": "u1", 1: "S1"})\n result = reader.read()\n assert result[0].dtype == "u1"\n assert result[1].dtype == "S1"\n\n reader = _make_reader(dtype={"one": np.uint8, 1: object})\n result = reader.read()\n assert result[0].dtype == "u1"\n assert result[1].dtype == "O"\n\n reader = _make_reader(dtype={"one": np.dtype("u1"), 1: np.dtype("O")})\n result = reader.read()\n assert result[0].dtype == "u1"\n assert result[1].dtype == "O"\n\n def test_usecols(self):\n data = """\\na,b,c\n1,2,3\n4,5,6\n7,8,9\n10,11,12"""\n\n def _make_reader(**kwds):\n return TextReader(StringIO(data), delimiter=",", **kwds)\n\n reader = _make_reader(usecols=(1, 2))\n result = reader.read()\n\n exp = _make_reader().read()\n assert len(result) == 2\n assert (result[1] == exp[1]).all()\n assert (result[2] == exp[2]).all()\n\n @pytest.mark.parametrize(\n "text, kwargs",\n [\n ("a,b,c\r1,2,3\r4,5,6\r7,8,9\r10,11,12", {"delimiter": ","}),\n (\n "a b c\r1 2 3\r4 5 6\r7 8 9\r10 11 12",\n {"delim_whitespace": True},\n ),\n ("a,b,c\r1,2,3\r4,5,6\r,88,9\r10,11,12", {"delimiter": ","}),\n (\n (\n "A,B,C,D,E,F,G,H,I,J,K,L,M,N,O\r"\n "AAAAA,BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0\r"\n ",BBBBB,0,0,0,0,0,0,0,0,0,0,0,0,0"\n ),\n {"delimiter": ","},\n ),\n ("A B C\r 2 3\r4 5 6", {"delim_whitespace": True}),\n ("A B C\r2 3\r4 5 6", {"delim_whitespace": True}),\n ],\n )\n def test_cr_delimited(self, text, kwargs):\n nice_text = text.replace("\r", "\r\n")\n result = TextReader(StringIO(text), **kwargs).read()\n expected = TextReader(StringIO(nice_text), **kwargs).read()\n assert_array_dicts_equal(result, expected)\n\n def test_empty_field_eof(self):\n data = "a,b,c\n1,2,3\n4,,"\n\n result = TextReader(StringIO(data), delimiter=",").read()\n\n expected = {\n 0: np.array([1, 4], dtype=np.int64),\n 1: np.array(["2", ""], dtype=object),\n 2: np.array(["3", ""], dtype=object),\n }\n assert_array_dicts_equal(result, expected)\n\n @pytest.mark.parametrize("repeat", range(10))\n def test_empty_field_eof_mem_access_bug(self, repeat):\n # GH5664\n a = DataFrame([["b"], [np.nan]], columns=["a"], index=["a", "c"])\n b = DataFrame([[1, 1, 1, 0], [1, 1, 1, 0]], columns=list("abcd"), index=[1, 1])\n c = DataFrame(\n [\n [1, 2, 3, 4],\n [6, np.nan, np.nan, np.nan],\n [8, 9, 10, 11],\n [13, 14, np.nan, np.nan],\n ],\n columns=list("abcd"),\n index=[0, 5, 7, 12],\n )\n\n df = read_csv(StringIO("a,b\nc\n"), skiprows=0, names=["a"], engine="c")\n tm.assert_frame_equal(df, a)\n\n df = read_csv(\n StringIO("1,1,1,1,0\n" * 2 + "\n" * 2), names=list("abcd"), engine="c"\n )\n tm.assert_frame_equal(df, b)\n\n df = read_csv(\n StringIO("0,1,2,3,4\n5,6\n7,8,9,10,11\n12,13,14"),\n names=list("abcd"),\n engine="c",\n )\n tm.assert_frame_equal(df, c)\n\n def test_empty_csv_input(self):\n # GH14867\n with read_csv(\n StringIO(), chunksize=20, header=None, names=["a", "b", "c"]\n ) as df:\n assert isinstance(df, TextFileReader)\n\n\ndef assert_array_dicts_equal(left, right):\n for k, v in left.items():\n tm.assert_numpy_array_equal(np.asarray(v), np.asarray(right[k]))\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_textreader.py | test_textreader.py | Python | 10,672 | 0.95 | 0.099415 | 0.021898 | vue-tools | 268 | 2025-06-09T14:27:07.880160 | BSD-3-Clause | true | 42d7a6d226b81682a40c55496b7baa0a |
"""\nTests that features that are currently unsupported in\neither the Python or C parser are actually enforced\nand are clearly communicated to the user.\n\nUltimately, the goal is to remove test cases from this\ntest suite as new feature support is added to the parsers.\n"""\nfrom io import StringIO\nimport os\nfrom pathlib import Path\n\nimport pytest\n\nfrom pandas.errors import ParserError\n\nimport pandas._testing as tm\n\nfrom pandas.io.parsers import read_csv\nimport pandas.io.parsers.readers as parsers\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\n\n@pytest.fixture(params=["python", "python-fwf"], ids=lambda val: val)\ndef python_engine(request):\n return request.param\n\n\nclass TestUnsupportedFeatures:\n def test_mangle_dupe_cols_false(self):\n # see gh-12935\n data = "a b c\n1 2 3"\n\n for engine in ("c", "python"):\n with pytest.raises(TypeError, match="unexpected keyword"):\n read_csv(StringIO(data), engine=engine, mangle_dupe_cols=True)\n\n def test_c_engine(self):\n # see gh-6607\n data = "a b c\n1 2 3"\n msg = "does not support"\n\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n\n # specify C engine with unsupported options (raise)\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(FutureWarning, match=depr_msg):\n read_csv(StringIO(data), engine="c", sep=None, delim_whitespace=False)\n with pytest.raises(ValueError, match=msg):\n read_csv(StringIO(data), engine="c", sep=r"\s")\n with pytest.raises(ValueError, match=msg):\n read_csv(StringIO(data), engine="c", sep="\t", quotechar=chr(128))\n with pytest.raises(ValueError, match=msg):\n read_csv(StringIO(data), engine="c", skipfooter=1)\n\n # specify C-unsupported options without python-unsupported options\n with tm.assert_produces_warning((parsers.ParserWarning, FutureWarning)):\n read_csv(StringIO(data), sep=None, delim_whitespace=False)\n with tm.assert_produces_warning(parsers.ParserWarning):\n read_csv(StringIO(data), sep=r"\s")\n with tm.assert_produces_warning(parsers.ParserWarning):\n read_csv(StringIO(data), sep="\t", quotechar=chr(128))\n with tm.assert_produces_warning(parsers.ParserWarning):\n read_csv(StringIO(data), skipfooter=1)\n\n text = """ A B C D E\none two three four\na b 10.0032 5 -0.5109 -2.3358 -0.4645 0.05076 0.3640\na q 20 4 0.4473 1.4152 0.2834 1.00661 0.1744\nx q 30 3 -0.6662 -0.5243 -0.3580 0.89145 2.5838"""\n msg = "Error tokenizing data"\n\n with pytest.raises(ParserError, match=msg):\n read_csv(StringIO(text), sep="\\s+")\n with pytest.raises(ParserError, match=msg):\n read_csv(StringIO(text), engine="c", sep="\\s+")\n\n msg = "Only length-1 thousands markers supported"\n data = """A|B|C\n1|2,334|5\n10|13|10.\n"""\n with pytest.raises(ValueError, match=msg):\n read_csv(StringIO(data), thousands=",,")\n with pytest.raises(ValueError, match=msg):\n read_csv(StringIO(data), thousands="")\n\n msg = "Only length-1 line terminators supported"\n data = "a,b,c~~1,2,3~~4,5,6"\n with pytest.raises(ValueError, match=msg):\n read_csv(StringIO(data), lineterminator="~~")\n\n def test_python_engine(self, python_engine):\n from pandas.io.parsers.readers import _python_unsupported as py_unsupported\n\n data = """1,2,3,,\n1,2,3,4,\n1,2,3,4,5\n1,2,,,\n1,2,3,4,"""\n\n for default in py_unsupported:\n msg = (\n f"The {repr(default)} option is not "\n f"supported with the {repr(python_engine)} engine"\n )\n\n kwargs = {default: object()}\n with pytest.raises(ValueError, match=msg):\n read_csv(StringIO(data), engine=python_engine, **kwargs)\n\n def test_python_engine_file_no_iter(self, python_engine):\n # see gh-16530\n class NoNextBuffer:\n def __init__(self, csv_data) -> None:\n self.data = csv_data\n\n def __next__(self):\n return self.data.__next__()\n\n def read(self):\n return self.data\n\n def readline(self):\n return self.data\n\n data = "a\n1"\n msg = "'NoNextBuffer' object is not iterable|argument 1 must be an iterator"\n\n with pytest.raises(TypeError, match=msg):\n read_csv(NoNextBuffer(data), engine=python_engine)\n\n def test_pyarrow_engine(self):\n from pandas.io.parsers.readers import _pyarrow_unsupported as pa_unsupported\n\n data = """1,2,3,,\n 1,2,3,4,\n 1,2,3,4,5\n 1,2,,,\n 1,2,3,4,"""\n\n for default in pa_unsupported:\n msg = (\n f"The {repr(default)} option is not "\n f"supported with the 'pyarrow' engine"\n )\n kwargs = {default: object()}\n default_needs_bool = {"warn_bad_lines", "error_bad_lines"}\n if default == "dialect":\n kwargs[default] = "excel" # test a random dialect\n elif default in default_needs_bool:\n kwargs[default] = True\n elif default == "on_bad_lines":\n kwargs[default] = "warn"\n\n warn = None\n depr_msg = None\n if "delim_whitespace" in kwargs:\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n warn = FutureWarning\n if "verbose" in kwargs:\n depr_msg = "The 'verbose' keyword in pd.read_csv is deprecated"\n warn = FutureWarning\n\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(warn, match=depr_msg):\n read_csv(StringIO(data), engine="pyarrow", **kwargs)\n\n def test_on_bad_lines_callable_python_or_pyarrow(self, all_parsers):\n # GH 5686\n # GH 54643\n sio = StringIO("a,b\n1,2")\n bad_lines_func = lambda x: x\n parser = all_parsers\n if all_parsers.engine not in ["python", "pyarrow"]:\n msg = (\n "on_bad_line can only be a callable "\n "function if engine='python' or 'pyarrow'"\n )\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(sio, on_bad_lines=bad_lines_func)\n else:\n parser.read_csv(sio, on_bad_lines=bad_lines_func)\n\n\ndef test_close_file_handle_on_invalid_usecols(all_parsers):\n # GH 45384\n parser = all_parsers\n\n error = ValueError\n if parser.engine == "pyarrow":\n # Raises pyarrow.lib.ArrowKeyError\n pytest.skip(reason="https://github.com/apache/arrow/issues/38676")\n\n with tm.ensure_clean("test.csv") as fname:\n Path(fname).write_text("col1,col2\na,b\n1,2", encoding="utf-8")\n with tm.assert_produces_warning(False):\n with pytest.raises(error, match="col3"):\n parser.read_csv(fname, usecols=["col1", "col2", "col3"])\n # unlink fails on windows if file handles still point to it\n os.unlink(fname)\n\n\ndef test_invalid_file_inputs(request, all_parsers):\n # GH#45957\n parser = all_parsers\n if parser.engine == "python":\n request.applymarker(\n pytest.mark.xfail(reason=f"{parser.engine} engine supports lists.")\n )\n\n with pytest.raises(ValueError, match="Invalid"):\n parser.read_csv([])\n\n\ndef test_invalid_dtype_backend(all_parsers):\n parser = all_parsers\n msg = (\n "dtype_backend numpy is invalid, only 'numpy_nullable' and "\n "'pyarrow' are allowed."\n )\n with pytest.raises(ValueError, match=msg):\n parser.read_csv("test", dtype_backend="numpy")\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_unsupported.py | test_unsupported.py | Python | 7,986 | 0.95 | 0.123894 | 0.06044 | react-lib | 640 | 2025-05-12T01:09:00.735347 | BSD-3-Clause | true | 7ce3abf40166954ee862c01df6c8cf0f |
import numpy as np\nimport pytest\n\nfrom pandas._libs.parsers import (\n _maybe_upcast,\n na_values,\n)\n\nimport pandas as pd\nfrom pandas import NA\nimport pandas._testing as tm\nfrom pandas.core.arrays import (\n ArrowStringArray,\n BooleanArray,\n FloatingArray,\n IntegerArray,\n StringArray,\n)\n\n\ndef test_maybe_upcast(any_real_numpy_dtype):\n # GH#36712\n\n dtype = np.dtype(any_real_numpy_dtype)\n na_value = na_values[dtype]\n arr = np.array([1, 2, na_value], dtype=dtype)\n result = _maybe_upcast(arr, use_dtype_backend=True)\n\n expected_mask = np.array([False, False, True])\n if issubclass(dtype.type, np.integer):\n expected = IntegerArray(arr, mask=expected_mask)\n else:\n expected = FloatingArray(arr, mask=expected_mask)\n\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_maybe_upcast_no_na(any_real_numpy_dtype):\n # GH#36712\n arr = np.array([1, 2, 3], dtype=any_real_numpy_dtype)\n result = _maybe_upcast(arr, use_dtype_backend=True)\n\n expected_mask = np.array([False, False, False])\n if issubclass(np.dtype(any_real_numpy_dtype).type, np.integer):\n expected = IntegerArray(arr, mask=expected_mask)\n else:\n expected = FloatingArray(arr, mask=expected_mask)\n\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_maybe_upcaste_bool():\n # GH#36712\n dtype = np.bool_\n na_value = na_values[dtype]\n arr = np.array([True, False, na_value], dtype="uint8").view(dtype)\n result = _maybe_upcast(arr, use_dtype_backend=True)\n\n expected_mask = np.array([False, False, True])\n expected = BooleanArray(arr, mask=expected_mask)\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_maybe_upcaste_bool_no_nan():\n # GH#36712\n dtype = np.bool_\n arr = np.array([True, False, False], dtype="uint8").view(dtype)\n result = _maybe_upcast(arr, use_dtype_backend=True)\n\n expected_mask = np.array([False, False, False])\n expected = BooleanArray(arr, mask=expected_mask)\n tm.assert_extension_array_equal(result, expected)\n\n\ndef test_maybe_upcaste_all_nan():\n # GH#36712\n dtype = np.int64\n na_value = na_values[dtype]\n arr = np.array([na_value, na_value], dtype=dtype)\n result = _maybe_upcast(arr, use_dtype_backend=True)\n\n expected_mask = np.array([True, True])\n expected = IntegerArray(arr, mask=expected_mask)\n tm.assert_extension_array_equal(result, expected)\n\n\n@pytest.mark.parametrize("val", [na_values[np.object_], "c"])\ndef test_maybe_upcast_object(val, string_storage):\n # GH#36712\n pa = pytest.importorskip("pyarrow")\n\n with pd.option_context("mode.string_storage", string_storage):\n arr = np.array(["a", "b", val], dtype=np.object_)\n result = _maybe_upcast(arr, use_dtype_backend=True)\n\n if string_storage == "python":\n exp_val = "c" if val == "c" else NA\n expected = StringArray(np.array(["a", "b", exp_val], dtype=np.object_))\n else:\n exp_val = "c" if val == "c" else None\n expected = ArrowStringArray(pa.array(["a", "b", exp_val]))\n tm.assert_extension_array_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\test_upcast.py | test_upcast.py | Python | 3,141 | 0.95 | 0.107843 | 0.076923 | python-kit | 116 | 2024-02-15T00:05:15.115750 | Apache-2.0 | true | 6e887d4aeeb550275ad03c29563761f5 |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import parsers as libparsers\nfrom pandas.errors import DtypeWarning\n\nfrom pandas import (\n DataFrame,\n concat,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\n\n@pytest.mark.parametrize("index_col", [0, "index"])\ndef test_read_chunksize_with_index(all_parsers, index_col):\n parser = all_parsers\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n\n expected = DataFrame(\n [\n ["foo", 2, 3, 4, 5],\n ["bar", 7, 8, 9, 10],\n ["baz", 12, 13, 14, 15],\n ["qux", 12, 13, 14, 15],\n ["foo2", 12, 13, 14, 15],\n ["bar2", 12, 13, 14, 15],\n ],\n columns=["index", "A", "B", "C", "D"],\n )\n expected = expected.set_index("index")\n\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:\n list(reader)\n return\n\n with parser.read_csv(StringIO(data), index_col=0, chunksize=2) as reader:\n chunks = list(reader)\n tm.assert_frame_equal(chunks[0], expected[:2])\n tm.assert_frame_equal(chunks[1], expected[2:4])\n tm.assert_frame_equal(chunks[2], expected[4:])\n\n\n@pytest.mark.parametrize("chunksize", [1.3, "foo", 0])\ndef test_read_chunksize_bad(all_parsers, chunksize):\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n parser = all_parsers\n msg = r"'chunksize' must be an integer >=1"\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n\n with pytest.raises(ValueError, match=msg):\n with parser.read_csv(StringIO(data), chunksize=chunksize) as _:\n pass\n\n\n@pytest.mark.parametrize("chunksize", [2, 8])\ndef test_read_chunksize_and_nrows(all_parsers, chunksize):\n # see gh-15755\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n parser = all_parsers\n kwargs = {"index_col": 0, "nrows": 5}\n\n if parser.engine == "pyarrow":\n msg = "The 'nrows' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n return\n\n expected = parser.read_csv(StringIO(data), **kwargs)\n with parser.read_csv(StringIO(data), chunksize=chunksize, **kwargs) as reader:\n tm.assert_frame_equal(concat(reader), expected)\n\n\ndef test_read_chunksize_and_nrows_changing_size(all_parsers):\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n parser = all_parsers\n kwargs = {"index_col": 0, "nrows": 5}\n\n if parser.engine == "pyarrow":\n msg = "The 'nrows' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n return\n\n expected = parser.read_csv(StringIO(data), **kwargs)\n with parser.read_csv(StringIO(data), chunksize=8, **kwargs) as reader:\n tm.assert_frame_equal(reader.get_chunk(size=2), expected.iloc[:2])\n tm.assert_frame_equal(reader.get_chunk(size=4), expected.iloc[2:5])\n\n with pytest.raises(StopIteration, match=""):\n reader.get_chunk(size=3)\n\n\ndef test_get_chunk_passed_chunksize(all_parsers):\n parser = all_parsers\n data = """A,B,C\n1,2,3\n4,5,6\n7,8,9\n1,2,3"""\n\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with parser.read_csv(StringIO(data), chunksize=2) as reader:\n reader.get_chunk()\n return\n\n with parser.read_csv(StringIO(data), chunksize=2) as reader:\n result = reader.get_chunk()\n\n expected = DataFrame([[1, 2, 3], [4, 5, 6]], columns=["A", "B", "C"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("kwargs", [{}, {"index_col": 0}])\ndef test_read_chunksize_compat(all_parsers, kwargs):\n # see gh-12185\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data), **kwargs)\n\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:\n concat(reader)\n return\n\n with parser.read_csv(StringIO(data), chunksize=2, **kwargs) as reader:\n via_reader = concat(reader)\n tm.assert_frame_equal(via_reader, result)\n\n\ndef test_read_chunksize_jagged_names(all_parsers):\n # see gh-23509\n parser = all_parsers\n data = "\n".join(["0"] * 7 + [",".join(["0"] * 10)])\n\n expected = DataFrame([[0] + [np.nan] * 9] * 7 + [[0] * 10])\n\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with parser.read_csv(\n StringIO(data), names=range(10), chunksize=4\n ) as reader:\n concat(reader)\n return\n\n with parser.read_csv(StringIO(data), names=range(10), chunksize=4) as reader:\n result = concat(reader)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_chunk_begins_with_newline_whitespace(all_parsers):\n # see gh-10022\n parser = all_parsers\n data = "\n hello\nworld\n"\n\n result = parser.read_csv(StringIO(data), header=None)\n expected = DataFrame([" hello", "world"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.slow\ndef test_chunks_have_consistent_numerical_type(all_parsers, monkeypatch):\n # mainly an issue with the C parser\n heuristic = 2**3\n parser = all_parsers\n integers = [str(i) for i in range(heuristic - 1)]\n data = "a\n" + "\n".join(integers + ["1.0", "2.0"] + integers)\n\n # Coercions should work without warnings.\n with monkeypatch.context() as m:\n m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic)\n result = parser.read_csv(StringIO(data))\n\n assert type(result.a[0]) is np.float64\n assert result.a.dtype == float\n\n\ndef test_warn_if_chunks_have_mismatched_type(all_parsers, using_infer_string):\n warning_type = None\n parser = all_parsers\n size = 10000\n\n # see gh-3866: if chunks are different types and can't\n # be coerced using numerical types, then issue warning.\n if parser.engine == "c" and parser.low_memory:\n warning_type = DtypeWarning\n # Use larger size to hit warning path\n size = 499999\n\n integers = [str(i) for i in range(size)]\n data = "a\n" + "\n".join(integers + ["a", "b"] + integers)\n\n buf = StringIO(data)\n\n if parser.engine == "pyarrow":\n df = parser.read_csv(\n buf,\n )\n else:\n df = parser.read_csv_check_warnings(\n warning_type,\n r"Columns \(0\) have mixed types. "\n "Specify dtype option on import or set low_memory=False.",\n buf,\n )\n if parser.engine == "c" and parser.low_memory:\n assert df.a.dtype == object\n elif using_infer_string:\n assert df.a.dtype == "str"\n else:\n assert df.a.dtype == object\n\n\n@pytest.mark.parametrize("iterator", [True, False])\ndef test_empty_with_nrows_chunksize(all_parsers, iterator):\n # see gh-9535\n parser = all_parsers\n expected = DataFrame(columns=["foo", "bar"])\n\n nrows = 10\n data = StringIO("foo,bar\n")\n\n if parser.engine == "pyarrow":\n msg = (\n "The '(nrows|chunksize)' option is not supported with the 'pyarrow' engine"\n )\n with pytest.raises(ValueError, match=msg):\n if iterator:\n with parser.read_csv(data, chunksize=nrows) as reader:\n next(iter(reader))\n else:\n parser.read_csv(data, nrows=nrows)\n return\n\n if iterator:\n with parser.read_csv(data, chunksize=nrows) as reader:\n result = next(iter(reader))\n else:\n result = parser.read_csv(data, nrows=nrows)\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_memory_growth_chunksize(all_parsers):\n # see gh-24805\n #\n # Let's just make sure that we don't crash\n # as we iteratively process all chunks.\n parser = all_parsers\n\n with tm.ensure_clean() as path:\n with open(path, "w", encoding="utf-8") as f:\n for i in range(1000):\n f.write(str(i) + "\n")\n\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with parser.read_csv(path, chunksize=20) as result:\n for _ in result:\n pass\n return\n\n with parser.read_csv(path, chunksize=20) as result:\n for _ in result:\n pass\n\n\ndef test_chunksize_with_usecols_second_block_shorter(all_parsers):\n # GH#21211\n parser = all_parsers\n data = """1,2,3,4\n5,6,7,8\n9,10,11\n"""\n\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data),\n names=["a", "b"],\n chunksize=2,\n usecols=[0, 1],\n header=None,\n )\n return\n\n result_chunks = parser.read_csv(\n StringIO(data),\n names=["a", "b"],\n chunksize=2,\n usecols=[0, 1],\n header=None,\n )\n\n expected_frames = [\n DataFrame({"a": [1, 5], "b": [2, 6]}),\n DataFrame({"a": [9], "b": [10]}, index=[2]),\n ]\n\n for i, result in enumerate(result_chunks):\n tm.assert_frame_equal(result, expected_frames[i])\n\n\ndef test_chunksize_second_block_shorter(all_parsers):\n # GH#21211\n parser = all_parsers\n data = """a,b,c,d\n1,2,3,4\n5,6,7,8\n9,10,11\n"""\n\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), chunksize=2)\n return\n\n result_chunks = parser.read_csv(StringIO(data), chunksize=2)\n\n expected_frames = [\n DataFrame({"a": [1, 5], "b": [2, 6], "c": [3, 7], "d": [4, 8]}),\n DataFrame({"a": [9], "b": [10], "c": [11], "d": [np.nan]}, index=[2]),\n ]\n\n for i, result in enumerate(result_chunks):\n tm.assert_frame_equal(result, expected_frames[i])\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_chunksize.py | test_chunksize.py | Python | 11,287 | 0.95 | 0.099476 | 0.051447 | node-utils | 673 | 2024-02-28T02:31:31.665159 | MIT | true | 8f6e1a51cf2dfda5e48d8392fec7b4b4 |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nfrom datetime import datetime\nfrom inspect import signature\nfrom io import StringIO\nimport os\nfrom pathlib import Path\nimport sys\n\nimport numpy as np\nimport pytest\n\nfrom pandas._config import using_string_dtype\n\nfrom pandas.compat import HAS_PYARROW\nfrom pandas.errors import (\n EmptyDataError,\n ParserError,\n ParserWarning,\n)\n\nfrom pandas import (\n DataFrame,\n Index,\n Timestamp,\n compat,\n)\nimport pandas._testing as tm\n\nfrom pandas.io.parsers import TextFileReader\nfrom pandas.io.parsers.c_parser_wrapper import CParserWrapper\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\ndef test_override_set_noconvert_columns():\n # see gh-17351\n #\n # Usecols needs to be sorted in _set_noconvert_columns based\n # on the test_usecols_with_parse_dates test from test_usecols.py\n class MyTextFileReader(TextFileReader):\n def __init__(self) -> None:\n self._currow = 0\n self.squeeze = False\n\n class MyCParserWrapper(CParserWrapper):\n def _set_noconvert_columns(self):\n if self.usecols_dtype == "integer":\n # self.usecols is a set, which is documented as unordered\n # but in practice, a CPython set of integers is sorted.\n # In other implementations this assumption does not hold.\n # The following code simulates a different order, which\n # before GH 17351 would cause the wrong columns to be\n # converted via the parse_dates parameter\n self.usecols = list(self.usecols)\n self.usecols.reverse()\n return CParserWrapper._set_noconvert_columns(self)\n\n data = """a,b,c,d,e\n0,1,2014-01-01,09:00,4\n0,1,2014-01-02,10:00,4"""\n\n parse_dates = [[1, 2]]\n cols = {\n "a": [0, 0],\n "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],\n }\n expected = DataFrame(cols, columns=["c_d", "a"])\n\n parser = MyTextFileReader()\n parser.options = {\n "usecols": [0, 2, 3],\n "parse_dates": parse_dates,\n "delimiter": ",",\n }\n parser.engine = "c"\n parser._engine = MyCParserWrapper(StringIO(data), **parser.options)\n\n result = parser.read()\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_local(all_parsers, csv1):\n prefix = "file:///" if compat.is_platform_windows() else "file://"\n parser = all_parsers\n\n fname = prefix + str(os.path.abspath(csv1))\n result = parser.read_csv(fname, index_col=0, parse_dates=True)\n # TODO: make unit check more specific\n if parser.engine == "pyarrow":\n result.index = result.index.as_unit("ns")\n expected = DataFrame(\n [\n [0.980269, 3.685731, -0.364216805298, -1.159738],\n [1.047916, -0.041232, -0.16181208307, 0.212549],\n [0.498581, 0.731168, -0.537677223318, 1.346270],\n [1.120202, 1.567621, 0.00364077397681, 0.675253],\n [-0.487094, 0.571455, -1.6116394093, 0.103469],\n [0.836649, 0.246462, 0.588542635376, 1.062782],\n [-0.157161, 1.340307, 1.1957779562, -1.097007],\n ],\n columns=["A", "B", "C", "D"],\n index=Index(\n [\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n datetime(2000, 1, 6),\n datetime(2000, 1, 7),\n datetime(2000, 1, 10),\n datetime(2000, 1, 11),\n ],\n name="index",\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_1000_sep(all_parsers):\n parser = all_parsers\n data = """A|B|C\n1|2,334|5\n10|13|10.\n"""\n expected = DataFrame({"A": [1, 10], "B": [2334, 13], "C": [5, 10.0]})\n\n if parser.engine == "pyarrow":\n msg = "The 'thousands' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), sep="|", thousands=",")\n return\n\n result = parser.read_csv(StringIO(data), sep="|", thousands=",")\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: Found non-unique column index\ndef test_unnamed_columns(all_parsers):\n data = """A,B,C,,\n1,2,3,4,5\n6,7,8,9,10\n11,12,13,14,15\n"""\n parser = all_parsers\n expected = DataFrame(\n [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10], [11, 12, 13, 14, 15]],\n dtype=np.int64,\n columns=["A", "B", "C", "Unnamed: 3", "Unnamed: 4"],\n )\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_csv_mixed_type(all_parsers):\n data = """A,B,C\na,1,2\nb,3,4\nc,4,5\n"""\n parser = all_parsers\n expected = DataFrame({"A": ["a", "b", "c"], "B": [1, 3, 4], "C": [2, 4, 5]})\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_low_memory_no_rows_with_index(all_parsers):\n # see gh-21141\n parser = all_parsers\n\n if not parser.low_memory:\n pytest.skip("This is a low-memory specific test")\n\n data = """A,B,C\n1,1,1,2\n2,2,3,4\n3,3,4,5\n"""\n\n if parser.engine == "pyarrow":\n msg = "The 'nrows' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)\n return\n\n result = parser.read_csv(StringIO(data), low_memory=True, index_col=0, nrows=0)\n expected = DataFrame(columns=["A", "B", "C"])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_dataframe(all_parsers, csv1):\n parser = all_parsers\n result = parser.read_csv(csv1, index_col=0, parse_dates=True)\n # TODO: make unit check more specific\n if parser.engine == "pyarrow":\n result.index = result.index.as_unit("ns")\n expected = DataFrame(\n [\n [0.980269, 3.685731, -0.364216805298, -1.159738],\n [1.047916, -0.041232, -0.16181208307, 0.212549],\n [0.498581, 0.731168, -0.537677223318, 1.346270],\n [1.120202, 1.567621, 0.00364077397681, 0.675253],\n [-0.487094, 0.571455, -1.6116394093, 0.103469],\n [0.836649, 0.246462, 0.588542635376, 1.062782],\n [-0.157161, 1.340307, 1.1957779562, -1.097007],\n ],\n columns=["A", "B", "C", "D"],\n index=Index(\n [\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n datetime(2000, 1, 6),\n datetime(2000, 1, 7),\n datetime(2000, 1, 10),\n datetime(2000, 1, 11),\n ],\n name="index",\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("nrows", [3, 3.0])\ndef test_read_nrows(all_parsers, nrows):\n # see gh-10476\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n expected = DataFrame(\n [["foo", 2, 3, 4, 5], ["bar", 7, 8, 9, 10], ["baz", 12, 13, 14, 15]],\n columns=["index", "A", "B", "C", "D"],\n )\n parser = all_parsers\n\n if parser.engine == "pyarrow":\n msg = "The 'nrows' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), nrows=nrows)\n return\n\n result = parser.read_csv(StringIO(data), nrows=nrows)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("nrows", [1.2, "foo", -1])\ndef test_read_nrows_bad(all_parsers, nrows):\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n msg = r"'nrows' must be an integer >=0"\n parser = all_parsers\n if parser.engine == "pyarrow":\n msg = "The 'nrows' option is not supported with the 'pyarrow' engine"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), nrows=nrows)\n\n\ndef test_nrows_skipfooter_errors(all_parsers):\n msg = "'skipfooter' not supported with 'nrows'"\n data = "a\n1\n2\n3\n4\n5\n6"\n parser = all_parsers\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), skipfooter=1, nrows=5)\n\n\n@skip_pyarrow\ndef test_missing_trailing_delimiters(all_parsers):\n parser = all_parsers\n data = """A,B,C,D\n1,2,3,4\n1,3,3,\n1,4,5"""\n\n result = parser.read_csv(StringIO(data))\n expected = DataFrame(\n [[1, 2, 3, 4], [1, 3, 3, np.nan], [1, 4, 5, np.nan]],\n columns=["A", "B", "C", "D"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_skip_initial_space(all_parsers):\n data = (\n '"09-Apr-2012", "01:10:18.300", 2456026.548822908, 12849, '\n "1.00361, 1.12551, 330.65659, 0355626618.16711, 73.48821, "\n "314.11625, 1917.09447, 179.71425, 80.000, 240.000, -350, "\n "70.06056, 344.98370, 1, 1, -0.689265, -0.692787, "\n "0.212036, 14.7674, 41.605, -9999.0, -9999.0, "\n "-9999.0, -9999.0, -9999.0, -9999.0, 000, 012, 128"\n )\n parser = all_parsers\n\n if parser.engine == "pyarrow":\n msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data),\n names=list(range(33)),\n header=None,\n na_values=["-9999.0"],\n skipinitialspace=True,\n )\n return\n\n result = parser.read_csv(\n StringIO(data),\n names=list(range(33)),\n header=None,\n na_values=["-9999.0"],\n skipinitialspace=True,\n )\n expected = DataFrame(\n [\n [\n "09-Apr-2012",\n "01:10:18.300",\n 2456026.548822908,\n 12849,\n 1.00361,\n 1.12551,\n 330.65659,\n 355626618.16711,\n 73.48821,\n 314.11625,\n 1917.09447,\n 179.71425,\n 80.0,\n 240.0,\n -350,\n 70.06056,\n 344.9837,\n 1,\n 1,\n -0.689265,\n -0.692787,\n 0.212036,\n 14.7674,\n 41.605,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n np.nan,\n 0,\n 12,\n 128,\n ]\n ]\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\ndef test_trailing_delimiters(all_parsers):\n # see gh-2442\n data = """A,B,C\n1,2,3,\n4,5,6,\n7,8,9,"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=False)\n\n expected = DataFrame({"A": [1, 4, 7], "B": [2, 5, 8], "C": [3, 6, 9]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_escapechar(all_parsers):\n # https://stackoverflow.com/questions/13824840/feature-request-for-\n # pandas-read-csv\n data = '''SEARCH_TERM,ACTUAL_URL\n"bra tv board","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"\n"tv p\xc3\xa5 hjul","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"\n"SLAGBORD, \\"Bergslagen\\", IKEA:s 1700-tals series","http://www.ikea.com/se/sv/catalog/categories/departments/living_room/10475/?se%7cps%7cnonbranded%7cvardagsrum%7cgoogle%7ctv_bord"'''\n\n parser = all_parsers\n result = parser.read_csv(\n StringIO(data), escapechar="\\", quotechar='"', encoding="utf-8"\n )\n\n assert result["SEARCH_TERM"][2] == 'SLAGBORD, "Bergslagen", IKEA:s 1700-tals series'\n\n tm.assert_index_equal(result.columns, Index(["SEARCH_TERM", "ACTUAL_URL"]))\n\n\ndef test_ignore_leading_whitespace(all_parsers):\n # see gh-3374, gh-6607\n parser = all_parsers\n data = " a b c\n 1 2 3\n 4 5 6\n 7 8 9"\n\n if parser.engine == "pyarrow":\n msg = "the 'pyarrow' engine does not support regex separators"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), sep=r"\s+")\n return\n result = parser.read_csv(StringIO(data), sep=r"\s+")\n\n expected = DataFrame({"a": [1, 4, 7], "b": [2, 5, 8], "c": [3, 6, 9]})\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\n@pytest.mark.parametrize("usecols", [None, [0, 1], ["a", "b"]])\ndef test_uneven_lines_with_usecols(all_parsers, usecols):\n # see gh-12203\n parser = all_parsers\n data = r"""a,b,c\n0,1,2\n3,4,5,6,7\n8,9,10"""\n\n if usecols is None:\n # Make sure that an error is still raised\n # when the "usecols" parameter is not provided.\n msg = r"Expected \d+ fields in line \d+, saw \d+"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data))\n else:\n expected = DataFrame({"a": [0, 3, 8], "b": [1, 4, 9]})\n\n result = parser.read_csv(StringIO(data), usecols=usecols)\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\n@pytest.mark.parametrize(\n "data,kwargs,expected",\n [\n # First, check to see that the response of parser when faced with no\n # provided columns raises the correct error, with or without usecols.\n ("", {}, None),\n ("", {"usecols": ["X"]}, None),\n (\n ",,",\n {"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},\n DataFrame(columns=["X"], index=[0], dtype=np.float64),\n ),\n (\n "",\n {"names": ["Dummy", "X", "Dummy_2"], "usecols": ["X"]},\n DataFrame(columns=["X"]),\n ),\n ],\n)\ndef test_read_empty_with_usecols(all_parsers, data, kwargs, expected):\n # see gh-12493\n parser = all_parsers\n\n if expected is None:\n msg = "No columns to parse from file"\n with pytest.raises(EmptyDataError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n else:\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "kwargs,expected",\n [\n # gh-8661, gh-8679: this should ignore six lines, including\n # lines with trailing whitespace and blank lines.\n (\n {\n "header": None,\n "delim_whitespace": True,\n "skiprows": [0, 1, 2, 3, 5, 6],\n "skip_blank_lines": True,\n },\n DataFrame([[1.0, 2.0, 4.0], [5.1, np.nan, 10.0]]),\n ),\n # gh-8983: test skipping set of rows after a row with trailing spaces.\n (\n {\n "delim_whitespace": True,\n "skiprows": [1, 2, 3, 5, 6],\n "skip_blank_lines": True,\n },\n DataFrame({"A": [1.0, 5.1], "B": [2.0, np.nan], "C": [4.0, 10]}),\n ),\n ],\n)\ndef test_trailing_spaces(all_parsers, kwargs, expected):\n data = "A B C \nrandom line with trailing spaces \nskip\n1,2,3\n1,2.,4.\nrandom line with trailing tabs\t\t\t\n \n5.1,NaN,10.0\n" # noqa: E501\n parser = all_parsers\n\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n\n if parser.engine == "pyarrow":\n msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)\n return\n\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(StringIO(data.replace(",", " ")), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_raise_on_sep_with_delim_whitespace(all_parsers):\n # see gh-6607\n data = "a b c\n1 2 3"\n parser = all_parsers\n\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n with pytest.raises(ValueError, match="you can only specify one"):\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(StringIO(data), sep=r"\s", delim_whitespace=True)\n\n\ndef test_read_filepath_or_buffer(all_parsers):\n # see gh-43366\n parser = all_parsers\n\n with pytest.raises(TypeError, match="Expected file path name or file-like"):\n parser.read_csv(filepath_or_buffer=b"input")\n\n\n@pytest.mark.parametrize("delim_whitespace", [True, False])\ndef test_single_char_leading_whitespace(all_parsers, delim_whitespace):\n # see gh-9710\n parser = all_parsers\n data = """\\nMyColumn\na\nb\na\nb\n"""\n\n expected = DataFrame({"MyColumn": list("abab")})\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n\n if parser.engine == "pyarrow":\n msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(\n StringIO(data),\n skipinitialspace=True,\n delim_whitespace=delim_whitespace,\n )\n return\n\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(data), skipinitialspace=True, delim_whitespace=delim_whitespace\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "sep,skip_blank_lines,exp_data",\n [\n (",", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),\n (r"\s+", True, [[1.0, 2.0, 4.0], [5.0, np.nan, 10.0], [-70.0, 0.4, 1.0]]),\n (\n ",",\n False,\n [\n [1.0, 2.0, 4.0],\n [np.nan, np.nan, np.nan],\n [np.nan, np.nan, np.nan],\n [5.0, np.nan, 10.0],\n [np.nan, np.nan, np.nan],\n [-70.0, 0.4, 1.0],\n ],\n ),\n ],\n)\ndef test_empty_lines(all_parsers, sep, skip_blank_lines, exp_data, request):\n parser = all_parsers\n data = """\\nA,B,C\n1,2.,4.\n\n\n5.,NaN,10.0\n\n-70,.4,1\n"""\n\n if sep == r"\s+":\n data = data.replace(",", " ")\n\n if parser.engine == "pyarrow":\n msg = "the 'pyarrow' engine does not support regex separators"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines\n )\n return\n\n result = parser.read_csv(StringIO(data), sep=sep, skip_blank_lines=skip_blank_lines)\n expected = DataFrame(exp_data, columns=["A", "B", "C"])\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\ndef test_whitespace_lines(all_parsers):\n parser = all_parsers\n data = """\n\n\t \t\t\n\t\nA,B,C\n\t 1,2.,4.\n5.,NaN,10.0\n"""\n expected = DataFrame([[1, 2.0, 4.0], [5.0, np.nan, 10.0]], columns=["A", "B", "C"])\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data,expected",\n [\n (\n """ A B C D\na 1 2 3 4\nb 1 2 3 4\nc 1 2 3 4\n""",\n DataFrame(\n [[1, 2, 3, 4], [1, 2, 3, 4], [1, 2, 3, 4]],\n columns=["A", "B", "C", "D"],\n index=["a", "b", "c"],\n ),\n ),\n (\n " a b c\n1 2 3 \n4 5 6\n 7 8 9",\n DataFrame([[1, 2, 3], [4, 5, 6], [7, 8, 9]], columns=["a", "b", "c"]),\n ),\n ],\n)\ndef test_whitespace_regex_separator(all_parsers, data, expected):\n # see gh-6607\n parser = all_parsers\n if parser.engine == "pyarrow":\n msg = "the 'pyarrow' engine does not support regex separators"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), sep=r"\s+")\n return\n\n result = parser.read_csv(StringIO(data), sep=r"\s+")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_sub_character(all_parsers, csv_dir_path):\n # see gh-16893\n filename = os.path.join(csv_dir_path, "sub_char.csv")\n expected = DataFrame([[1, 2, 3]], columns=["a", "\x1ab", "c"])\n\n parser = all_parsers\n result = parser.read_csv(filename)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("filename", ["sé-es-vé.csv", "ru-sй.csv", "中文文件名.csv"])\ndef test_filename_with_special_chars(all_parsers, filename):\n # see gh-15086.\n parser = all_parsers\n df = DataFrame({"a": [1, 2, 3]})\n\n with tm.ensure_clean(filename) as path:\n df.to_csv(path, index=False)\n\n result = parser.read_csv(path)\n tm.assert_frame_equal(result, df)\n\n\ndef test_read_table_same_signature_as_read_csv(all_parsers):\n # GH-34976\n parser = all_parsers\n\n table_sign = signature(parser.read_table)\n csv_sign = signature(parser.read_csv)\n\n assert table_sign.parameters.keys() == csv_sign.parameters.keys()\n assert table_sign.return_annotation == csv_sign.return_annotation\n\n for key, csv_param in csv_sign.parameters.items():\n table_param = table_sign.parameters[key]\n if key == "sep":\n assert csv_param.default == ","\n assert table_param.default == "\t"\n assert table_param.annotation == csv_param.annotation\n assert table_param.kind == csv_param.kind\n continue\n\n assert table_param == csv_param\n\n\ndef test_read_table_equivalency_to_read_csv(all_parsers):\n # see gh-21948\n # As of 0.25.0, read_table is undeprecated\n parser = all_parsers\n data = "a\tb\n1\t2\n3\t4"\n expected = parser.read_csv(StringIO(data), sep="\t")\n result = parser.read_table(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("read_func", ["read_csv", "read_table"])\ndef test_read_csv_and_table_sys_setprofile(all_parsers, read_func):\n # GH#41069\n parser = all_parsers\n data = "a b\n0 1"\n\n sys.setprofile(lambda *a, **k: None)\n result = getattr(parser, read_func)(StringIO(data))\n sys.setprofile(None)\n\n expected = DataFrame({"a b": ["0 1"]})\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\ndef test_first_row_bom(all_parsers):\n # see gh-26545\n parser = all_parsers\n data = '''\ufeff"Head1"\t"Head2"\t"Head3"'''\n\n result = parser.read_csv(StringIO(data), delimiter="\t")\n expected = DataFrame(columns=["Head1", "Head2", "Head3"])\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\ndef test_first_row_bom_unquoted(all_parsers):\n # see gh-36343\n parser = all_parsers\n data = """\ufeffHead1\tHead2\tHead3"""\n\n result = parser.read_csv(StringIO(data), delimiter="\t")\n expected = DataFrame(columns=["Head1", "Head2", "Head3"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("nrows", range(1, 6))\ndef test_blank_lines_between_header_and_data_rows(all_parsers, nrows):\n # GH 28071\n ref = DataFrame(\n [[np.nan, np.nan], [np.nan, np.nan], [1, 2], [np.nan, np.nan], [3, 4]],\n columns=list("ab"),\n )\n csv = "\nheader\n\na,b\n\n\n1,2\n\n3,4"\n parser = all_parsers\n\n if parser.engine == "pyarrow":\n msg = "The 'nrows' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False\n )\n return\n\n df = parser.read_csv(StringIO(csv), header=3, nrows=nrows, skip_blank_lines=False)\n tm.assert_frame_equal(df, ref[:nrows])\n\n\n@skip_pyarrow\ndef test_no_header_two_extra_columns(all_parsers):\n # GH 26218\n column_names = ["one", "two", "three"]\n ref = DataFrame([["foo", "bar", "baz"]], columns=column_names)\n stream = StringIO("foo,bar,baz,bam,blah")\n parser = all_parsers\n df = parser.read_csv_check_warnings(\n ParserWarning,\n "Length of header or names does not match length of data. "\n "This leads to a loss of data with index_col=False.",\n stream,\n header=None,\n names=column_names,\n index_col=False,\n )\n tm.assert_frame_equal(df, ref)\n\n\ndef test_read_csv_names_not_accepting_sets(all_parsers):\n # GH 34946\n data = """\\n 1,2,3\n 4,5,6\n"""\n parser = all_parsers\n with pytest.raises(ValueError, match="Names should be an ordered collection."):\n parser.read_csv(StringIO(data), names=set("QAZ"))\n\n\ndef test_read_table_delim_whitespace_default_sep(all_parsers):\n # GH: 35958\n f = StringIO("a b c\n1 -2 -3\n4 5 6")\n parser = all_parsers\n\n depr_msg = "The 'delim_whitespace' keyword in pd.read_table is deprecated"\n\n if parser.engine == "pyarrow":\n msg = "The 'delim_whitespace' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n parser.read_table(f, delim_whitespace=True)\n return\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_table(f, delim_whitespace=True)\n expected = DataFrame({"a": [1, 4], "b": [-2, 5], "c": [-3, 6]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("delimiter", [",", "\t"])\ndef test_read_csv_delim_whitespace_non_default_sep(all_parsers, delimiter):\n # GH: 35958\n f = StringIO("a b c\n1 -2 -3\n4 5 6")\n parser = all_parsers\n msg = (\n "Specified a delimiter with both sep and "\n "delim_whitespace=True; you can only specify one."\n )\n depr_msg = "The 'delim_whitespace' keyword in pd.read_csv is deprecated"\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(f, delim_whitespace=True, sep=delimiter)\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(f, delim_whitespace=True, delimiter=delimiter)\n\n\ndef test_read_csv_delimiter_and_sep_no_default(all_parsers):\n # GH#39823\n f = StringIO("a,b\n1,2")\n parser = all_parsers\n msg = "Specified a sep and a delimiter; you can only specify one."\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(f, sep=" ", delimiter=".")\n\n\n@pytest.mark.parametrize("kwargs", [{"delimiter": "\n"}, {"sep": "\n"}])\ndef test_read_csv_line_break_as_separator(kwargs, all_parsers):\n # GH#43528\n parser = all_parsers\n data = """a,b,c\n1,2,3\n """\n msg = (\n r"Specified \\n as separator or delimiter. This forces the python engine "\n r"which does not accept a line terminator. Hence it is not allowed to use "\n r"the line terminator as separator."\n )\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n\n\n@pytest.mark.parametrize("delimiter", [",", "\t"])\ndef test_read_table_delim_whitespace_non_default_sep(all_parsers, delimiter):\n # GH: 35958\n f = StringIO("a b c\n1 -2 -3\n4 5 6")\n parser = all_parsers\n msg = (\n "Specified a delimiter with both sep and "\n "delim_whitespace=True; you can only specify one."\n )\n depr_msg = "The 'delim_whitespace' keyword in pd.read_table is deprecated"\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n with pytest.raises(ValueError, match=msg):\n parser.read_table(f, delim_whitespace=True, sep=delimiter)\n\n with pytest.raises(ValueError, match=msg):\n parser.read_table(f, delim_whitespace=True, delimiter=delimiter)\n\n\n@skip_pyarrow\ndef test_dict_keys_as_names(all_parsers):\n # GH: 36928\n data = "1,2"\n\n keys = {"a": int, "b": int}.keys()\n parser = all_parsers\n\n result = parser.read_csv(StringIO(data), names=keys)\n expected = DataFrame({"a": [1], "b": [2]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.xfail(using_string_dtype() and HAS_PYARROW, reason="TODO(infer_string)")\n@xfail_pyarrow # UnicodeDecodeError: 'utf-8' codec can't decode byte 0xed in position 0\ndef test_encoding_surrogatepass(all_parsers):\n # GH39017\n parser = all_parsers\n content = b"\xed\xbd\xbf"\n decoded = content.decode("utf-8", errors="surrogatepass")\n expected = DataFrame({decoded: [decoded]}, index=[decoded * 2])\n expected.index.name = decoded * 2\n\n with tm.ensure_clean() as path:\n Path(path).write_bytes(\n content * 2 + b"," + content + b"\n" + content * 2 + b"," + content\n )\n df = parser.read_csv(path, encoding_errors="surrogatepass", index_col=0)\n tm.assert_frame_equal(df, expected)\n with pytest.raises(UnicodeDecodeError, match="'utf-8' codec can't decode byte"):\n parser.read_csv(path)\n\n\ndef test_malformed_second_line(all_parsers):\n # see GH14782\n parser = all_parsers\n data = "\na\nb\n"\n result = parser.read_csv(StringIO(data), skip_blank_lines=False, header=1)\n expected = DataFrame({"a": ["b"]})\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\ndef test_short_single_line(all_parsers):\n # GH 47566\n parser = all_parsers\n columns = ["a", "b", "c"]\n data = "1,2"\n result = parser.read_csv(StringIO(data), header=None, names=columns)\n expected = DataFrame({"a": [1], "b": [2], "c": [np.nan]})\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # ValueError: Length mismatch: Expected axis has 2 elements\ndef test_short_multi_line(all_parsers):\n # GH 47566\n parser = all_parsers\n columns = ["a", "b", "c"]\n data = "1,2\n1,2"\n result = parser.read_csv(StringIO(data), header=None, names=columns)\n expected = DataFrame({"a": [1, 1], "b": [2, 2], "c": [np.nan, np.nan]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_seek(all_parsers):\n # GH48646\n parser = all_parsers\n prefix = "### DATA\n"\n content = "nkey,value\ntables,rectangular\n"\n with tm.ensure_clean() as path:\n Path(path).write_text(prefix + content, encoding="utf-8")\n with open(path, encoding="utf-8") as file:\n file.readline()\n actual = parser.read_csv(file)\n expected = parser.read_csv(StringIO(content))\n tm.assert_frame_equal(actual, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_common_basic.py | test_common_basic.py | Python | 31,043 | 0.95 | 0.073245 | 0.064477 | awesome-app | 25 | 2025-01-31T11:13:55.806015 | Apache-2.0 | true | 2a45dcc6379ac1548dc6b0bb3d7f17df |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nimport csv\nfrom io import StringIO\n\nimport pytest\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\nfrom pandas.io.parsers import TextParser\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\n\n\n@xfail_pyarrow\ndef test_read_data_list(all_parsers):\n parser = all_parsers\n kwargs = {"index_col": 0}\n data = "A,B,C\nfoo,1,2,3\nbar,4,5,6"\n\n data_list = [["A", "B", "C"], ["foo", "1", "2", "3"], ["bar", "4", "5", "6"]]\n expected = parser.read_csv(StringIO(data), **kwargs)\n\n with TextParser(data_list, chunksize=2, **kwargs) as parser:\n result = parser.read()\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_reader_list(all_parsers):\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n parser = all_parsers\n kwargs = {"index_col": 0}\n\n lines = list(csv.reader(StringIO(data)))\n with TextParser(lines, chunksize=2, **kwargs) as reader:\n chunks = list(reader)\n\n expected = parser.read_csv(StringIO(data), **kwargs)\n\n tm.assert_frame_equal(chunks[0], expected[:2])\n tm.assert_frame_equal(chunks[1], expected[2:4])\n tm.assert_frame_equal(chunks[2], expected[4:])\n\n\ndef test_reader_list_skiprows(all_parsers):\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n parser = all_parsers\n kwargs = {"index_col": 0}\n\n lines = list(csv.reader(StringIO(data)))\n with TextParser(lines, chunksize=2, skiprows=[1], **kwargs) as reader:\n chunks = list(reader)\n\n expected = parser.read_csv(StringIO(data), **kwargs)\n\n tm.assert_frame_equal(chunks[0], expected[1:3])\n\n\ndef test_read_csv_parse_simple_list(all_parsers):\n parser = all_parsers\n data = """foo\nbar baz\nqux foo\nfoo\nbar"""\n\n result = parser.read_csv(StringIO(data), header=None)\n expected = DataFrame(["foo", "bar baz", "qux foo", "foo", "bar"])\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_data_list.py | test_data_list.py | Python | 2,228 | 0.85 | 0.043956 | 0 | vue-tools | 855 | 2023-08-26T13:16:10.196670 | BSD-3-Clause | true | 9db0037547ae8082a43edcd38feba9c8 |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nfrom io import StringIO\n\nimport pytest\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\n\n@pytest.mark.parametrize(\n "data,thousands,decimal",\n [\n (\n """A|B|C\n1|2,334.01|5\n10|13|10.\n""",\n ",",\n ".",\n ),\n (\n """A|B|C\n1|2.334,01|5\n10|13|10,\n""",\n ".",\n ",",\n ),\n ],\n)\ndef test_1000_sep_with_decimal(all_parsers, data, thousands, decimal):\n parser = all_parsers\n expected = DataFrame({"A": [1, 10], "B": [2334.01, 13], "C": [5, 10.0]})\n\n if parser.engine == "pyarrow":\n msg = "The 'thousands' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data), sep="|", thousands=thousands, decimal=decimal\n )\n return\n\n result = parser.read_csv(\n StringIO(data), sep="|", thousands=thousands, decimal=decimal\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_euro_decimal_format(all_parsers):\n parser = all_parsers\n data = """Id;Number1;Number2;Text1;Text2;Number3\n1;1521,1541;187101,9543;ABC;poi;4,738797819\n2;121,12;14897,76;DEF;uyt;0,377320872\n3;878,158;108013,434;GHI;rez;2,735694704"""\n\n result = parser.read_csv(StringIO(data), sep=";", decimal=",")\n expected = DataFrame(\n [\n [1, 1521.1541, 187101.9543, "ABC", "poi", 4.738797819],\n [2, 121.12, 14897.76, "DEF", "uyt", 0.377320872],\n [3, 878.158, 108013.434, "GHI", "rez", 2.735694704],\n ],\n columns=["Id", "Number1", "Number2", "Text1", "Text2", "Number3"],\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_decimal.py | test_decimal.py | Python | 1,932 | 0.85 | 0.041667 | 0 | react-lib | 222 | 2025-02-15T09:33:01.308527 | MIT | true | 8bdfc37618d1e4555ba6760e485fd4d3 |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nfrom io import (\n BytesIO,\n StringIO,\n)\nimport os\nimport platform\nfrom urllib.error import URLError\nimport uuid\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import (\n EmptyDataError,\n ParserError,\n)\nimport pandas.util._test_decorators as td\n\nfrom pandas import (\n DataFrame,\n Index,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\n@pytest.mark.network\n@pytest.mark.single_cpu\ndef test_url(all_parsers, csv_dir_path, httpserver):\n parser = all_parsers\n kwargs = {"sep": "\t"}\n\n local_path = os.path.join(csv_dir_path, "salaries.csv")\n with open(local_path, encoding="utf-8") as f:\n httpserver.serve_content(content=f.read())\n\n url_result = parser.read_csv(httpserver.url, **kwargs)\n\n local_result = parser.read_csv(local_path, **kwargs)\n tm.assert_frame_equal(url_result, local_result)\n\n\n@pytest.mark.slow\ndef test_local_file(all_parsers, csv_dir_path):\n parser = all_parsers\n kwargs = {"sep": "\t"}\n\n local_path = os.path.join(csv_dir_path, "salaries.csv")\n local_result = parser.read_csv(local_path, **kwargs)\n url = "file://localhost/" + local_path\n\n try:\n url_result = parser.read_csv(url, **kwargs)\n tm.assert_frame_equal(url_result, local_result)\n except URLError:\n # Fails on some systems.\n pytest.skip("Failing on: " + " ".join(platform.uname()))\n\n\n@xfail_pyarrow # AssertionError: DataFrame.index are different\ndef test_path_path_lib(all_parsers):\n parser = all_parsers\n df = DataFrame(\n 1.1 * np.arange(120).reshape((30, 4)),\n columns=Index(list("ABCD")),\n index=Index([f"i-{i}" for i in range(30)]),\n )\n result = tm.round_trip_pathlib(df.to_csv, lambda p: parser.read_csv(p, index_col=0))\n tm.assert_frame_equal(df, result)\n\n\n@xfail_pyarrow # AssertionError: DataFrame.index are different\ndef test_path_local_path(all_parsers):\n parser = all_parsers\n df = DataFrame(\n 1.1 * np.arange(120).reshape((30, 4)),\n columns=Index(list("ABCD")),\n index=Index([f"i-{i}" for i in range(30)]),\n )\n result = tm.round_trip_localpath(\n df.to_csv, lambda p: parser.read_csv(p, index_col=0)\n )\n tm.assert_frame_equal(df, result)\n\n\ndef test_nonexistent_path(all_parsers):\n # gh-2428: pls no segfault\n # gh-14086: raise more helpful FileNotFoundError\n # GH#29233 "File foo" instead of "File b'foo'"\n parser = all_parsers\n path = f"{uuid.uuid4()}.csv"\n\n msg = r"\[Errno 2\]"\n with pytest.raises(FileNotFoundError, match=msg) as e:\n parser.read_csv(path)\n assert path == e.value.filename\n\n\n@td.skip_if_windows # os.chmod does not work in windows\ndef test_no_permission(all_parsers):\n # GH 23784\n parser = all_parsers\n\n msg = r"\[Errno 13\]"\n with tm.ensure_clean() as path:\n os.chmod(path, 0) # make file unreadable\n\n # verify that this process cannot open the file (not running as sudo)\n try:\n with open(path, encoding="utf-8"):\n pass\n pytest.skip("Running as sudo.")\n except PermissionError:\n pass\n\n with pytest.raises(PermissionError, match=msg) as e:\n parser.read_csv(path)\n assert path == e.value.filename\n\n\n@pytest.mark.parametrize(\n "data,kwargs,expected,msg",\n [\n # gh-10728: WHITESPACE_LINE\n (\n "a,b,c\n4,5,6\n ",\n {},\n DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),\n None,\n ),\n # gh-10548: EAT_LINE_COMMENT\n (\n "a,b,c\n4,5,6\n#comment",\n {"comment": "#"},\n DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),\n None,\n ),\n # EAT_CRNL_NOP\n (\n "a,b,c\n4,5,6\n\r",\n {},\n DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),\n None,\n ),\n # EAT_COMMENT\n (\n "a,b,c\n4,5,6#comment",\n {"comment": "#"},\n DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),\n None,\n ),\n # SKIP_LINE\n (\n "a,b,c\n4,5,6\nskipme",\n {"skiprows": [2]},\n DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),\n None,\n ),\n # EAT_LINE_COMMENT\n (\n "a,b,c\n4,5,6\n#comment",\n {"comment": "#", "skip_blank_lines": False},\n DataFrame([[4, 5, 6]], columns=["a", "b", "c"]),\n None,\n ),\n # IN_FIELD\n (\n "a,b,c\n4,5,6\n ",\n {"skip_blank_lines": False},\n DataFrame([["4", 5, 6], [" ", None, None]], columns=["a", "b", "c"]),\n None,\n ),\n # EAT_CRNL\n (\n "a,b,c\n4,5,6\n\r",\n {"skip_blank_lines": False},\n DataFrame([[4, 5, 6], [None, None, None]], columns=["a", "b", "c"]),\n None,\n ),\n # ESCAPED_CHAR\n (\n "a,b,c\n4,5,6\n\\",\n {"escapechar": "\\"},\n None,\n "(EOF following escape character)|(unexpected end of data)",\n ),\n # ESCAPE_IN_QUOTED_FIELD\n (\n 'a,b,c\n4,5,6\n"\\',\n {"escapechar": "\\"},\n None,\n "(EOF inside string starting at row 2)|(unexpected end of data)",\n ),\n # IN_QUOTED_FIELD\n (\n 'a,b,c\n4,5,6\n"',\n {"escapechar": "\\"},\n None,\n "(EOF inside string starting at row 2)|(unexpected end of data)",\n ),\n ],\n ids=[\n "whitespace-line",\n "eat-line-comment",\n "eat-crnl-nop",\n "eat-comment",\n "skip-line",\n "eat-line-comment",\n "in-field",\n "eat-crnl",\n "escaped-char",\n "escape-in-quoted-field",\n "in-quoted-field",\n ],\n)\ndef test_eof_states(all_parsers, data, kwargs, expected, msg, request):\n # see gh-10728, gh-10548\n parser = all_parsers\n\n if parser.engine == "pyarrow" and "comment" in kwargs:\n msg = "The 'comment' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n return\n\n if parser.engine == "pyarrow" and "\r" not in data:\n # pandas.errors.ParserError: CSV parse error: Expected 3 columns, got 1:\n # ValueError: skiprows argument must be an integer when using engine='pyarrow'\n # AssertionError: Regex pattern did not match.\n pytest.skip(reason="https://github.com/apache/arrow/issues/38676")\n\n if expected is None:\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), **kwargs)\n else:\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_temporary_file(all_parsers):\n # see gh-13398\n parser = all_parsers\n data = "0 0"\n\n with tm.ensure_clean(mode="w+", return_filelike=True) as new_file:\n new_file.write(data)\n new_file.flush()\n new_file.seek(0)\n\n if parser.engine == "pyarrow":\n msg = "the 'pyarrow' engine does not support regex separators"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(new_file, sep=r"\s+", header=None)\n return\n\n result = parser.read_csv(new_file, sep=r"\s+", header=None)\n\n expected = DataFrame([[0, 0]])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_internal_eof_byte(all_parsers):\n # see gh-5500\n parser = all_parsers\n data = "a,b\n1\x1a,2"\n\n expected = DataFrame([["1\x1a", 2]], columns=["a", "b"])\n result = parser.read_csv(StringIO(data))\n tm.assert_frame_equal(result, expected)\n\n\ndef test_internal_eof_byte_to_file(all_parsers):\n # see gh-16559\n parser = all_parsers\n data = b'c1,c2\r\n"test \x1a test", test\r\n'\n expected = DataFrame([["test \x1a test", " test"]], columns=["c1", "c2"])\n path = f"__{uuid.uuid4()}__.csv"\n\n with tm.ensure_clean(path) as path:\n with open(path, "wb") as f:\n f.write(data)\n\n result = parser.read_csv(path)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_file_handle_string_io(all_parsers):\n # gh-14418\n #\n # Don't close user provided file handles.\n parser = all_parsers\n data = "a,b\n1,2"\n\n fh = StringIO(data)\n parser.read_csv(fh)\n assert not fh.closed\n\n\ndef test_file_handles_with_open(all_parsers, csv1):\n # gh-14418\n #\n # Don't close user provided file handles.\n parser = all_parsers\n\n for mode in ["r", "rb"]:\n with open(csv1, mode, encoding="utf-8" if mode == "r" else None) as f:\n parser.read_csv(f)\n assert not f.closed\n\n\ndef test_invalid_file_buffer_class(all_parsers):\n # see gh-15337\n class InvalidBuffer:\n pass\n\n parser = all_parsers\n msg = "Invalid file path or buffer object type"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(InvalidBuffer())\n\n\ndef test_invalid_file_buffer_mock(all_parsers):\n # see gh-15337\n parser = all_parsers\n msg = "Invalid file path or buffer object type"\n\n class Foo:\n pass\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(Foo())\n\n\ndef test_valid_file_buffer_seems_invalid(all_parsers):\n # gh-16135: we want to ensure that "tell" and "seek"\n # aren't actually being used when we call `read_csv`\n #\n # Thus, while the object may look "invalid" (these\n # methods are attributes of the `StringIO` class),\n # it is still a valid file-object for our purposes.\n class NoSeekTellBuffer(StringIO):\n def tell(self):\n raise AttributeError("No tell method")\n\n def seek(self, pos, whence=0):\n raise AttributeError("No seek method")\n\n data = "a\n1"\n parser = all_parsers\n expected = DataFrame({"a": [1]})\n\n result = parser.read_csv(NoSeekTellBuffer(data))\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("io_class", [StringIO, BytesIO])\n@pytest.mark.parametrize("encoding", [None, "utf-8"])\ndef test_read_csv_file_handle(all_parsers, io_class, encoding):\n """\n Test whether read_csv does not close user-provided file handles.\n\n GH 36980\n """\n parser = all_parsers\n expected = DataFrame({"a": [1], "b": [2]})\n\n content = "a,b\n1,2"\n handle = io_class(content.encode("utf-8") if io_class == BytesIO else content)\n\n tm.assert_frame_equal(parser.read_csv(handle, encoding=encoding), expected)\n assert not handle.closed\n\n\ndef test_memory_map_compression(all_parsers, compression):\n """\n Support memory map for compressed files.\n\n GH 37621\n """\n parser = all_parsers\n expected = DataFrame({"a": [1], "b": [2]})\n\n with tm.ensure_clean() as path:\n expected.to_csv(path, index=False, compression=compression)\n\n if parser.engine == "pyarrow":\n msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(path, memory_map=True, compression=compression)\n return\n\n result = parser.read_csv(path, memory_map=True, compression=compression)\n\n tm.assert_frame_equal(\n result,\n expected,\n )\n\n\ndef test_context_manager(all_parsers, datapath):\n # make sure that opened files are closed\n parser = all_parsers\n\n path = datapath("io", "data", "csv", "iris.csv")\n\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(path, chunksize=1)\n return\n\n reader = parser.read_csv(path, chunksize=1)\n assert not reader.handles.handle.closed\n try:\n with reader:\n next(reader)\n assert False\n except AssertionError:\n assert reader.handles.handle.closed\n\n\ndef test_context_manageri_user_provided(all_parsers, datapath):\n # make sure that user-provided handles are not closed\n parser = all_parsers\n\n with open(datapath("io", "data", "csv", "iris.csv"), encoding="utf-8") as path:\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(path, chunksize=1)\n return\n\n reader = parser.read_csv(path, chunksize=1)\n assert not reader.handles.handle.closed\n try:\n with reader:\n next(reader)\n assert False\n except AssertionError:\n assert not reader.handles.handle.closed\n\n\n@skip_pyarrow # ParserError: Empty CSV file\ndef test_file_descriptor_leak(all_parsers, using_copy_on_write):\n # GH 31488\n parser = all_parsers\n with tm.ensure_clean() as path:\n with pytest.raises(EmptyDataError, match="No columns to parse from file"):\n parser.read_csv(path)\n\n\ndef test_memory_map(all_parsers, csv_dir_path):\n mmap_file = os.path.join(csv_dir_path, "test_mmap.csv")\n parser = all_parsers\n\n expected = DataFrame(\n {"a": [1, 2, 3], "b": ["one", "two", "three"], "c": ["I", "II", "III"]}\n )\n\n if parser.engine == "pyarrow":\n msg = "The 'memory_map' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(mmap_file, memory_map=True)\n return\n\n result = parser.read_csv(mmap_file, memory_map=True)\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_file_buffer_url.py | test_file_buffer_url.py | Python | 13,951 | 0.95 | 0.098326 | 0.105943 | react-lib | 278 | 2024-02-23T13:52:13.594234 | MIT | true | 93201c6f346a934743a8f04212fb9faa |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import is_platform_linux\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\n@skip_pyarrow # ParserError: CSV parse error: Empty CSV file or block\ndef test_float_parser(all_parsers):\n # see gh-9565\n parser = all_parsers\n data = "45e-1,4.5,45.,inf,-inf"\n result = parser.read_csv(StringIO(data), header=None)\n\n expected = DataFrame([[float(s) for s in data.split(",")]])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_scientific_no_exponent(all_parsers_all_precisions):\n # see gh-12215\n df = DataFrame.from_dict({"w": ["2e"], "x": ["3E"], "y": ["42e"], "z": ["632E"]})\n data = df.to_csv(index=False)\n parser, precision = all_parsers_all_precisions\n\n df_roundtrip = parser.read_csv(StringIO(data), float_precision=precision)\n tm.assert_frame_equal(df_roundtrip, df)\n\n\n@pytest.mark.parametrize(\n "neg_exp",\n [\n -617,\n -100000,\n pytest.param(-99999999999999999, marks=pytest.mark.skip_ubsan),\n ],\n)\ndef test_very_negative_exponent(all_parsers_all_precisions, neg_exp):\n # GH#38753\n parser, precision = all_parsers_all_precisions\n\n data = f"data\n10E{neg_exp}"\n result = parser.read_csv(StringIO(data), float_precision=precision)\n expected = DataFrame({"data": [0.0]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.skip_ubsan\n@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different\n@pytest.mark.parametrize("exp", [999999999999999999, -999999999999999999])\ndef test_too_many_exponent_digits(all_parsers_all_precisions, exp, request):\n # GH#38753\n parser, precision = all_parsers_all_precisions\n data = f"data\n10E{exp}"\n result = parser.read_csv(StringIO(data), float_precision=precision)\n if precision == "round_trip":\n if exp == 999999999999999999 and is_platform_linux():\n mark = pytest.mark.xfail(reason="GH38794, on Linux gives object result")\n request.applymarker(mark)\n\n value = np.inf if exp > 0 else 0.0\n expected = DataFrame({"data": [value]})\n else:\n expected = DataFrame({"data": [f"10E{exp}"]})\n\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_float.py | test_float.py | Python | 2,582 | 0.95 | 0.101266 | 0.064516 | awesome-app | 757 | 2025-03-07T11:32:52.425197 | BSD-3-Clause | true | 1f46a86f1c8d5ee966bd0abd0e30d811 |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nfrom datetime import datetime\nfrom io import StringIO\nimport os\n\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n MultiIndex,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\n@pytest.mark.parametrize(\n "data,kwargs,expected",\n [\n (\n """foo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n""",\n {"index_col": 0, "names": ["index", "A", "B", "C", "D"]},\n DataFrame(\n [\n [2, 3, 4, 5],\n [7, 8, 9, 10],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n ],\n index=Index(["foo", "bar", "baz", "qux", "foo2", "bar2"], name="index"),\n columns=["A", "B", "C", "D"],\n ),\n ),\n (\n """foo,one,2,3,4,5\nfoo,two,7,8,9,10\nfoo,three,12,13,14,15\nbar,one,12,13,14,15\nbar,two,12,13,14,15\n""",\n {"index_col": [0, 1], "names": ["index1", "index2", "A", "B", "C", "D"]},\n DataFrame(\n [\n [2, 3, 4, 5],\n [7, 8, 9, 10],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n ],\n index=MultiIndex.from_tuples(\n [\n ("foo", "one"),\n ("foo", "two"),\n ("foo", "three"),\n ("bar", "one"),\n ("bar", "two"),\n ],\n names=["index1", "index2"],\n ),\n columns=["A", "B", "C", "D"],\n ),\n ),\n ],\n)\ndef test_pass_names_with_index(all_parsers, data, kwargs, expected):\n parser = all_parsers\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("index_col", [[0, 1], [1, 0]])\ndef test_multi_index_no_level_names(\n request, all_parsers, index_col, using_infer_string\n):\n data = """index1,index2,A,B,C,D\nfoo,one,2,3,4,5\nfoo,two,7,8,9,10\nfoo,three,12,13,14,15\nbar,one,12,13,14,15\nbar,two,12,13,14,15\n"""\n headless_data = "\n".join(data.split("\n")[1:])\n\n names = ["A", "B", "C", "D"]\n parser = all_parsers\n\n result = parser.read_csv(\n StringIO(headless_data), index_col=index_col, header=None, names=names\n )\n expected = parser.read_csv(StringIO(data), index_col=index_col)\n\n # No index names in headless data.\n expected.index.names = [None] * 2\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\ndef test_multi_index_no_level_names_implicit(all_parsers):\n parser = all_parsers\n data = """A,B,C,D\nfoo,one,2,3,4,5\nfoo,two,7,8,9,10\nfoo,three,12,13,14,15\nbar,one,12,13,14,15\nbar,two,12,13,14,15\n"""\n\n result = parser.read_csv(StringIO(data))\n expected = DataFrame(\n [\n [2, 3, 4, 5],\n [7, 8, 9, 10],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n ],\n columns=["A", "B", "C", "D"],\n index=MultiIndex.from_tuples(\n [\n ("foo", "one"),\n ("foo", "two"),\n ("foo", "three"),\n ("bar", "one"),\n ("bar", "two"),\n ]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # TypeError: an integer is required\n@pytest.mark.parametrize(\n "data,expected,header",\n [\n ("a,b", DataFrame(columns=["a", "b"]), [0]),\n (\n "a,b\nc,d",\n DataFrame(columns=MultiIndex.from_tuples([("a", "c"), ("b", "d")])),\n [0, 1],\n ),\n ],\n)\n@pytest.mark.parametrize("round_trip", [True, False])\ndef test_multi_index_blank_df(all_parsers, data, expected, header, round_trip):\n # see gh-14545\n parser = all_parsers\n data = expected.to_csv(index=False) if round_trip else data\n\n result = parser.read_csv(StringIO(data), header=header)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # AssertionError: DataFrame.columns are different\ndef test_no_unnamed_index(all_parsers):\n parser = all_parsers\n data = """ id c0 c1 c2\n0 1 0 a b\n1 2 0 c d\n2 2 2 e f\n"""\n result = parser.read_csv(StringIO(data), sep=" ")\n expected = DataFrame(\n [[0, 1, 0, "a", "b"], [1, 2, 0, "c", "d"], [2, 2, 2, "e", "f"]],\n columns=["Unnamed: 0", "id", "c0", "c1", "c2"],\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_duplicate_index_explicit(all_parsers):\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo,12,13,14,15\nbar,12,13,14,15\n"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=0)\n\n expected = DataFrame(\n [\n [2, 3, 4, 5],\n [7, 8, 9, 10],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n ],\n columns=["A", "B", "C", "D"],\n index=Index(["foo", "bar", "baz", "qux", "foo", "bar"], name="index"),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\ndef test_read_duplicate_index_implicit(all_parsers):\n data = """A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo,12,13,14,15\nbar,12,13,14,15\n"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data))\n\n expected = DataFrame(\n [\n [2, 3, 4, 5],\n [7, 8, 9, 10],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n [12, 13, 14, 15],\n ],\n columns=["A", "B", "C", "D"],\n index=Index(["foo", "bar", "baz", "qux", "foo", "bar"]),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\ndef test_read_csv_no_index_name(all_parsers, csv_dir_path):\n parser = all_parsers\n csv2 = os.path.join(csv_dir_path, "test2.csv")\n result = parser.read_csv(csv2, index_col=0, parse_dates=True)\n\n expected = DataFrame(\n [\n [0.980269, 3.685731, -0.364216805298, -1.159738, "foo"],\n [1.047916, -0.041232, -0.16181208307, 0.212549, "bar"],\n [0.498581, 0.731168, -0.537677223318, 1.346270, "baz"],\n [1.120202, 1.567621, 0.00364077397681, 0.675253, "qux"],\n [-0.487094, 0.571455, -1.6116394093, 0.103469, "foo2"],\n ],\n columns=["A", "B", "C", "D", "E"],\n index=Index(\n [\n datetime(2000, 1, 3),\n datetime(2000, 1, 4),\n datetime(2000, 1, 5),\n datetime(2000, 1, 6),\n datetime(2000, 1, 7),\n ]\n ),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow\ndef test_empty_with_index(all_parsers):\n # see gh-10184\n data = "x,y"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=0)\n\n expected = DataFrame(columns=["y"], index=Index([], name="x"))\n tm.assert_frame_equal(result, expected)\n\n\n# CSV parse error: Empty CSV file or block: cannot infer number of columns\n@skip_pyarrow\ndef test_empty_with_multi_index(all_parsers):\n # see gh-10467\n data = "x,y,z"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=["x", "y"])\n\n expected = DataFrame(\n columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["x", "y"])\n )\n tm.assert_frame_equal(result, expected)\n\n\n# CSV parse error: Empty CSV file or block: cannot infer number of columns\n@skip_pyarrow\ndef test_empty_with_reversed_multi_index(all_parsers):\n data = "x,y,z"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), index_col=[1, 0])\n\n expected = DataFrame(\n columns=["z"], index=MultiIndex.from_arrays([[]] * 2, names=["y", "x"])\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_index.py | test_index.py | Python | 8,269 | 0.95 | 0.039474 | 0.022472 | awesome-app | 39 | 2025-03-08T00:13:43.818065 | BSD-3-Clause | true | e2a6902c8be6cf26263cce5f39f4300a |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n option_context,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\n\n\n@xfail_pyarrow # AssertionError: DataFrame.index are different\n@pytest.mark.parametrize("na_filter", [True, False])\ndef test_inf_parsing(all_parsers, na_filter):\n parser = all_parsers\n data = """\\n,A\na,inf\nb,-inf\nc,+Inf\nd,-Inf\ne,INF\nf,-INF\ng,+INf\nh,-INf\ni,inF\nj,-inF"""\n expected = DataFrame(\n {"A": [float("inf"), float("-inf")] * 5},\n index=["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"],\n )\n result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # AssertionError: DataFrame.index are different\n@pytest.mark.parametrize("na_filter", [True, False])\ndef test_infinity_parsing(all_parsers, na_filter):\n parser = all_parsers\n data = """\\n,A\na,Infinity\nb,-Infinity\nc,+Infinity\n"""\n expected = DataFrame(\n {"A": [float("infinity"), float("-infinity"), float("+infinity")]},\n index=["a", "b", "c"],\n )\n result = parser.read_csv(StringIO(data), index_col=0, na_filter=na_filter)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_with_use_inf_as_na(all_parsers):\n # https://github.com/pandas-dev/pandas/issues/35493\n parser = all_parsers\n data = "1.0\nNaN\n3.0"\n msg = "use_inf_as_na option is deprecated"\n warn = FutureWarning\n if parser.engine == "pyarrow":\n warn = (FutureWarning, DeprecationWarning)\n\n with tm.assert_produces_warning(warn, match=msg, check_stacklevel=False):\n with option_context("use_inf_as_na", True):\n result = parser.read_csv(StringIO(data), header=None)\n expected = DataFrame([1.0, np.nan, 3.0])\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_inf.py | test_inf.py | Python | 2,114 | 0.95 | 0.051282 | 0.014925 | react-lib | 589 | 2024-08-12T19:48:13.430440 | MIT | true | a625ae9b5396eb1467cf8bfec9b4159b |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Series,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\ndef test_int_conversion(all_parsers):\n data = """A,B\n1.0,1\n2.0,2\n3.0,3\n"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data))\n\n expected = DataFrame([[1.0, 1], [2.0, 2], [3.0, 3]], columns=["A", "B"])\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data,kwargs,expected",\n [\n (\n "A,B\nTrue,1\nFalse,2\nTrue,3",\n {},\n DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),\n ),\n (\n "A,B\nYES,1\nno,2\nyes,3\nNo,3\nYes,3",\n {"true_values": ["yes", "Yes", "YES"], "false_values": ["no", "NO", "No"]},\n DataFrame(\n [[True, 1], [False, 2], [True, 3], [False, 3], [True, 3]],\n columns=["A", "B"],\n ),\n ),\n (\n "A,B\nTRUE,1\nFALSE,2\nTRUE,3",\n {},\n DataFrame([[True, 1], [False, 2], [True, 3]], columns=["A", "B"]),\n ),\n (\n "A,B\nfoo,bar\nbar,foo",\n {"true_values": ["foo"], "false_values": ["bar"]},\n DataFrame([[True, False], [False, True]], columns=["A", "B"]),\n ),\n ],\n)\ndef test_parse_bool(all_parsers, data, kwargs, expected):\n parser = all_parsers\n result = parser.read_csv(StringIO(data), **kwargs)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_parse_integers_above_fp_precision(all_parsers):\n data = """Numbers\n17007000002000191\n17007000002000191\n17007000002000191\n17007000002000191\n17007000002000192\n17007000002000192\n17007000002000192\n17007000002000192\n17007000002000192\n17007000002000194"""\n parser = all_parsers\n result = parser.read_csv(StringIO(data))\n expected = DataFrame(\n {\n "Numbers": [\n 17007000002000191,\n 17007000002000191,\n 17007000002000191,\n 17007000002000191,\n 17007000002000192,\n 17007000002000192,\n 17007000002000192,\n 17007000002000192,\n 17007000002000192,\n 17007000002000194,\n ]\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("sep", [" ", r"\s+"])\ndef test_integer_overflow_bug(all_parsers, sep):\n # see gh-2601\n data = "65248E10 11\n55555E55 22\n"\n parser = all_parsers\n if parser.engine == "pyarrow" and sep != " ":\n msg = "the 'pyarrow' engine does not support regex separators"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), header=None, sep=sep)\n return\n\n result = parser.read_csv(StringIO(data), header=None, sep=sep)\n expected = DataFrame([[6.5248e14, 11], [5.5555e59, 22]])\n tm.assert_frame_equal(result, expected)\n\n\ndef test_int64_min_issues(all_parsers):\n # see gh-2599\n parser = all_parsers\n data = "A,B\n0,0\n0,"\n result = parser.read_csv(StringIO(data))\n\n expected = DataFrame({"A": [0, 0], "B": [0, np.nan]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("conv", [None, np.int64, np.uint64])\ndef test_int64_overflow(all_parsers, conv, request):\n data = """ID\n00013007854817840016671868\n00013007854817840016749251\n00013007854817840016754630\n00013007854817840016781876\n00013007854817840017028824\n00013007854817840017963235\n00013007854817840018860166"""\n parser = all_parsers\n\n if conv is None:\n # 13007854817840016671868 > UINT64_MAX, so this\n # will overflow and return object as the dtype.\n if parser.engine == "pyarrow":\n mark = pytest.mark.xfail(reason="parses to float64")\n request.applymarker(mark)\n\n result = parser.read_csv(StringIO(data))\n expected = DataFrame(\n [\n "00013007854817840016671868",\n "00013007854817840016749251",\n "00013007854817840016754630",\n "00013007854817840016781876",\n "00013007854817840017028824",\n "00013007854817840017963235",\n "00013007854817840018860166",\n ],\n columns=["ID"],\n )\n tm.assert_frame_equal(result, expected)\n else:\n # 13007854817840016671868 > UINT64_MAX, so attempts\n # to cast to either int64 or uint64 will result in\n # an OverflowError being raised.\n msg = "|".join(\n [\n "Python int too large to convert to C long",\n "long too big to convert",\n "int too big to convert",\n ]\n )\n err = OverflowError\n if parser.engine == "pyarrow":\n err = ValueError\n msg = "The 'converters' option is not supported with the 'pyarrow' engine"\n\n with pytest.raises(err, match=msg):\n parser.read_csv(StringIO(data), converters={"ID": conv})\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\n@pytest.mark.parametrize(\n "val", [np.iinfo(np.uint64).max, np.iinfo(np.int64).max, np.iinfo(np.int64).min]\n)\ndef test_int64_uint64_range(all_parsers, val):\n # These numbers fall right inside the int64-uint64\n # range, so they should be parsed as string.\n parser = all_parsers\n result = parser.read_csv(StringIO(str(val)), header=None)\n\n expected = DataFrame([val])\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\n@pytest.mark.parametrize(\n "val", [np.iinfo(np.uint64).max + 1, np.iinfo(np.int64).min - 1]\n)\ndef test_outside_int64_uint64_range(all_parsers, val):\n # These numbers fall just outside the int64-uint64\n # range, so they should be parsed as string.\n parser = all_parsers\n result = parser.read_csv(StringIO(str(val)), header=None)\n\n expected = DataFrame([str(val)])\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow # gets float64 dtype instead of object\n@pytest.mark.parametrize("exp_data", [[str(-1), str(2**63)], [str(2**63), str(-1)]])\ndef test_numeric_range_too_wide(all_parsers, exp_data):\n # No numerical dtype can hold both negative and uint64\n # values, so they should be cast as string.\n parser = all_parsers\n data = "\n".join(exp_data)\n expected = DataFrame(exp_data)\n\n result = parser.read_csv(StringIO(data), header=None)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_integer_precision(all_parsers):\n # Gh 7072\n s = """1,1;0;0;0;1;1;3844;3844;3844;1;1;1;1;1;1;0;0;1;1;0;0,,,4321583677327450765\n5,1;0;0;0;1;1;843;843;843;1;1;1;1;1;1;0;0;1;1;0;0,64.0,;,4321113141090630389"""\n parser = all_parsers\n result = parser.read_csv(StringIO(s), header=None)[4]\n expected = Series([4321583677327450765, 4321113141090630389], name=4)\n tm.assert_series_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_ints.py | test_ints.py | Python | 7,243 | 0.95 | 0.060606 | 0.070707 | python-kit | 387 | 2025-04-07T07:23:57.438416 | Apache-2.0 | true | 2dedef71903460eb18d872602d05ca71 |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nfrom io import StringIO\n\nimport pytest\n\nfrom pandas import (\n DataFrame,\n concat,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\n\ndef test_iterator(all_parsers):\n # see gh-6607\n data = """index,A,B,C,D\nfoo,2,3,4,5\nbar,7,8,9,10\nbaz,12,13,14,15\nqux,12,13,14,15\nfoo2,12,13,14,15\nbar2,12,13,14,15\n"""\n parser = all_parsers\n kwargs = {"index_col": 0}\n\n expected = parser.read_csv(StringIO(data), **kwargs)\n\n if parser.engine == "pyarrow":\n msg = "The 'iterator' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), iterator=True, **kwargs)\n return\n\n with parser.read_csv(StringIO(data), iterator=True, **kwargs) as reader:\n first_chunk = reader.read(3)\n tm.assert_frame_equal(first_chunk, expected[:3])\n\n last_chunk = reader.read(5)\n tm.assert_frame_equal(last_chunk, expected[3:])\n\n\ndef test_iterator2(all_parsers):\n parser = all_parsers\n data = """A,B,C\nfoo,1,2,3\nbar,4,5,6\nbaz,7,8,9\n"""\n\n if parser.engine == "pyarrow":\n msg = "The 'iterator' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), iterator=True)\n return\n\n with parser.read_csv(StringIO(data), iterator=True) as reader:\n result = list(reader)\n\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n index=["foo", "bar", "baz"],\n columns=["A", "B", "C"],\n )\n tm.assert_frame_equal(result[0], expected)\n\n\ndef test_iterator_stop_on_chunksize(all_parsers):\n # gh-3967: stopping iteration when chunksize is specified\n parser = all_parsers\n data = """A,B,C\nfoo,1,2,3\nbar,4,5,6\nbaz,7,8,9\n"""\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), chunksize=1)\n return\n\n with parser.read_csv(StringIO(data), chunksize=1) as reader:\n result = list(reader)\n\n assert len(result) == 3\n expected = DataFrame(\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]],\n index=["foo", "bar", "baz"],\n columns=["A", "B", "C"],\n )\n tm.assert_frame_equal(concat(result), expected)\n\n\n@pytest.mark.parametrize(\n "kwargs", [{"iterator": True, "chunksize": 1}, {"iterator": True}, {"chunksize": 1}]\n)\ndef test_iterator_skipfooter_errors(all_parsers, kwargs):\n msg = "'skipfooter' not supported for iteration"\n parser = all_parsers\n data = "a\n1\n2"\n\n if parser.engine == "pyarrow":\n msg = (\n "The '(chunksize|iterator)' option is not supported with the "\n "'pyarrow' engine"\n )\n\n with pytest.raises(ValueError, match=msg):\n with parser.read_csv(StringIO(data), skipfooter=1, **kwargs) as _:\n pass\n\n\ndef test_iteration_open_handle(all_parsers):\n parser = all_parsers\n kwargs = {"header": None}\n\n with tm.ensure_clean() as path:\n with open(path, "w", encoding="utf-8") as f:\n f.write("AAA\nBBB\nCCC\nDDD\nEEE\nFFF\nGGG")\n\n with open(path, encoding="utf-8") as f:\n for line in f:\n if "CCC" in line:\n break\n\n result = parser.read_csv(f, **kwargs)\n expected = DataFrame({0: ["DDD", "EEE", "FFF", "GGG"]})\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_iterator.py | test_iterator.py | Python | 3,702 | 0.95 | 0.089552 | 0.018692 | python-kit | 814 | 2024-10-09T02:26:02.401531 | BSD-3-Clause | true | 575a0f9120caca665278d9fdb7bd5605 |
"""\nTests that work on the Python, C and PyArrow engines but do not have a\nspecific classification into the other test modules.\n"""\nimport codecs\nimport csv\nfrom io import StringIO\nimport os\nfrom pathlib import Path\n\nimport numpy as np\nimport pytest\n\nfrom pandas.compat import PY311\nfrom pandas.errors import (\n EmptyDataError,\n ParserError,\n ParserWarning,\n)\n\nfrom pandas import DataFrame\nimport pandas._testing as tm\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\ndef test_empty_decimal_marker(all_parsers):\n data = """A|B|C\n1|2,334|5\n10|13|10.\n"""\n # Parsers support only length-1 decimals\n msg = "Only length-1 decimal markers supported"\n parser = all_parsers\n\n if parser.engine == "pyarrow":\n msg = (\n "only single character unicode strings can be "\n "converted to Py_UCS4, got length 0"\n )\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), decimal="")\n\n\ndef test_bad_stream_exception(all_parsers, csv_dir_path):\n # see gh-13652\n #\n # This test validates that both the Python engine and C engine will\n # raise UnicodeDecodeError instead of C engine raising ParserError\n # and swallowing the exception that caused read to fail.\n path = os.path.join(csv_dir_path, "sauron.SHIFT_JIS.csv")\n codec = codecs.lookup("utf-8")\n utf8 = codecs.lookup("utf-8")\n parser = all_parsers\n msg = "'utf-8' codec can't decode byte"\n\n # Stream must be binary UTF8.\n with open(path, "rb") as handle, codecs.StreamRecoder(\n handle, utf8.encode, utf8.decode, codec.streamreader, codec.streamwriter\n ) as stream:\n with pytest.raises(UnicodeDecodeError, match=msg):\n parser.read_csv(stream)\n\n\ndef test_malformed(all_parsers):\n # see gh-6607\n parser = all_parsers\n data = """ignore\nA,B,C\n1,2,3 # comment\n1,2,3,4,5\n2,3,4\n"""\n msg = "Expected 3 fields in line 4, saw 5"\n err = ParserError\n if parser.engine == "pyarrow":\n msg = "The 'comment' option is not supported with the 'pyarrow' engine"\n err = ValueError\n with pytest.raises(err, match=msg):\n parser.read_csv(StringIO(data), header=1, comment="#")\n\n\n@pytest.mark.parametrize("nrows", [5, 3, None])\ndef test_malformed_chunks(all_parsers, nrows):\n data = """ignore\nA,B,C\nskip\n1,2,3\n3,5,10 # comment\n1,2,3,4,5\n2,3,4\n"""\n parser = all_parsers\n\n if parser.engine == "pyarrow":\n msg = "The 'iterator' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data),\n header=1,\n comment="#",\n iterator=True,\n chunksize=1,\n skiprows=[2],\n )\n return\n\n msg = "Expected 3 fields in line 6, saw 5"\n with parser.read_csv(\n StringIO(data), header=1, comment="#", iterator=True, chunksize=1, skiprows=[2]\n ) as reader:\n with pytest.raises(ParserError, match=msg):\n reader.read(nrows)\n\n\n@xfail_pyarrow # does not raise\ndef test_catch_too_many_names(all_parsers):\n # see gh-5156\n data = """\\n1,2,3\n4,,6\n7,8,9\n10,11,12\n"""\n parser = all_parsers\n msg = (\n "Too many columns specified: expected 4 and found 3"\n if parser.engine == "c"\n else "Number of passed names did not match "\n "number of header fields in the file"\n )\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), header=0, names=["a", "b", "c", "d"])\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\n@pytest.mark.parametrize("nrows", [0, 1, 2, 3, 4, 5])\ndef test_raise_on_no_columns(all_parsers, nrows):\n parser = all_parsers\n data = "\n" * nrows\n\n msg = "No columns to parse from file"\n with pytest.raises(EmptyDataError, match=msg):\n parser.read_csv(StringIO(data))\n\n\ndef test_unexpected_keyword_parameter_exception(all_parsers):\n # GH-34976\n parser = all_parsers\n\n msg = "{}\\(\\) got an unexpected keyword argument 'foo'"\n with pytest.raises(TypeError, match=msg.format("read_csv")):\n parser.read_csv("foo.csv", foo=1)\n with pytest.raises(TypeError, match=msg.format("read_table")):\n parser.read_table("foo.tsv", foo=1)\n\n\ndef test_suppress_error_output(all_parsers):\n # see gh-15925\n parser = all_parsers\n data = "a\n1\n1,2,3\n4\n5,6,7"\n expected = DataFrame({"a": [1, 4]})\n\n result = parser.read_csv(StringIO(data), on_bad_lines="skip")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_error_bad_lines(all_parsers):\n # see gh-15925\n parser = all_parsers\n data = "a\n1\n1,2,3\n4\n5,6,7"\n\n msg = "Expected 1 fields in line 3, saw 3"\n\n if parser.engine == "pyarrow":\n # "CSV parse error: Expected 1 columns, got 3: 1,2,3"\n pytest.skip(reason="https://github.com/apache/arrow/issues/38676")\n\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), on_bad_lines="error")\n\n\ndef test_warn_bad_lines(all_parsers):\n # see gh-15925\n parser = all_parsers\n data = "a\n1\n1,2,3\n4\n5,6,7"\n expected = DataFrame({"a": [1, 4]})\n match_msg = "Skipping line"\n\n expected_warning = ParserWarning\n if parser.engine == "pyarrow":\n match_msg = "Expected 1 columns, but found 3: 1,2,3"\n expected_warning = (ParserWarning, DeprecationWarning)\n\n with tm.assert_produces_warning(\n expected_warning, match=match_msg, check_stacklevel=False\n ):\n result = parser.read_csv(StringIO(data), on_bad_lines="warn")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_read_csv_wrong_num_columns(all_parsers):\n # Too few columns.\n data = """A,B,C,D,E,F\n1,2,3,4,5,6\n6,7,8,9,10,11,12\n11,12,13,14,15,16\n"""\n parser = all_parsers\n msg = "Expected 6 fields in line 3, saw 7"\n\n if parser.engine == "pyarrow":\n # Expected 6 columns, got 7: 6,7,8,9,10,11,12\n pytest.skip(reason="https://github.com/apache/arrow/issues/38676")\n\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data))\n\n\ndef test_null_byte_char(request, all_parsers):\n # see gh-2741\n data = "\x00,foo"\n names = ["a", "b"]\n parser = all_parsers\n\n if parser.engine == "c" or (parser.engine == "python" and PY311):\n if parser.engine == "python" and PY311:\n request.applymarker(\n pytest.mark.xfail(\n reason="In Python 3.11, this is read as an empty character not null"\n )\n )\n expected = DataFrame([[np.nan, "foo"]], columns=names)\n out = parser.read_csv(StringIO(data), names=names)\n tm.assert_frame_equal(out, expected)\n else:\n if parser.engine == "pyarrow":\n # CSV parse error: Empty CSV file or block: "\n # cannot infer number of columns"\n pytest.skip(reason="https://github.com/apache/arrow/issues/38676")\n else:\n msg = "NULL byte detected"\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), names=names)\n\n\n@pytest.mark.filterwarnings("always::ResourceWarning")\ndef test_open_file(request, all_parsers):\n # GH 39024\n parser = all_parsers\n\n msg = "Could not determine delimiter"\n err = csv.Error\n if parser.engine == "c":\n msg = "the 'c' engine does not support sep=None with delim_whitespace=False"\n err = ValueError\n elif parser.engine == "pyarrow":\n msg = (\n "the 'pyarrow' engine does not support sep=None with delim_whitespace=False"\n )\n err = ValueError\n\n with tm.ensure_clean() as path:\n file = Path(path)\n file.write_bytes(b"\xe4\na\n1")\n\n with tm.assert_produces_warning(None):\n # should not trigger a ResourceWarning\n with pytest.raises(err, match=msg):\n parser.read_csv(file, sep=None, encoding_errors="replace")\n\n\ndef test_invalid_on_bad_line(all_parsers):\n parser = all_parsers\n data = "a\n1\n1,2,3\n4\n5,6,7"\n with pytest.raises(ValueError, match="Argument abc is invalid for on_bad_lines"):\n parser.read_csv(StringIO(data), on_bad_lines="abc")\n\n\ndef test_bad_header_uniform_error(all_parsers):\n parser = all_parsers\n data = "+++123456789...\ncol1,col2,col3,col4\n1,2,3,4\n"\n msg = "Expected 2 fields in line 2, saw 4"\n if parser.engine == "c":\n msg = (\n "Could not construct index. Requested to use 1 "\n "number of columns, but 3 left to parse."\n )\n elif parser.engine == "pyarrow":\n # "CSV parse error: Expected 1 columns, got 4: col1,col2,col3,col4"\n pytest.skip(reason="https://github.com/apache/arrow/issues/38676")\n\n with pytest.raises(ParserError, match=msg):\n parser.read_csv(StringIO(data), index_col=0, on_bad_lines="error")\n\n\ndef test_on_bad_lines_warn_correct_formatting(all_parsers):\n # see gh-15925\n parser = all_parsers\n data = """1,2\na,b\na,b,c\na,b,d\na,b\n"""\n expected = DataFrame({"1": "a", "2": ["b"] * 2})\n match_msg = "Skipping line"\n\n expected_warning = ParserWarning\n if parser.engine == "pyarrow":\n match_msg = "Expected 2 columns, but found 3: a,b,c"\n expected_warning = (ParserWarning, DeprecationWarning)\n\n with tm.assert_produces_warning(\n expected_warning, match=match_msg, check_stacklevel=False\n ):\n result = parser.read_csv(StringIO(data), on_bad_lines="warn")\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_read_errors.py | test_read_errors.py | Python | 9,592 | 0.95 | 0.09375 | 0.088123 | react-lib | 488 | 2025-02-21T20:49:34.459166 | GPL-3.0 | true | 50484c2611c0a5add05784c48ecefc73 |
"""\nTests that work on both the Python and C engines but do not have a\nspecific classification into the other test modules.\n"""\nfrom io import StringIO\n\nimport pytest\n\nimport pandas._testing as tm\n\ndepr_msg = "The 'verbose' keyword in pd.read_csv is deprecated"\n\n\ndef test_verbose_read(all_parsers, capsys):\n parser = all_parsers\n data = """a,b,c,d\none,1,2,3\none,1,2,3\n,1,2,3\none,1,2,3\n,1,2,3\n,1,2,3\none,1,2,3\ntwo,1,2,3"""\n\n if parser.engine == "pyarrow":\n msg = "The 'verbose' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(StringIO(data), verbose=True)\n return\n\n # Engines are verbose in different ways.\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(StringIO(data), verbose=True)\n captured = capsys.readouterr()\n\n if parser.engine == "c":\n assert "Tokenization took:" in captured.out\n assert "Parser memory cleanup took:" in captured.out\n else: # Python engine\n assert captured.out == "Filled 3 NA values in column a\n"\n\n\ndef test_verbose_read2(all_parsers, capsys):\n parser = all_parsers\n data = """a,b,c,d\none,1,2,3\ntwo,1,2,3\nthree,1,2,3\nfour,1,2,3\nfive,1,2,3\n,1,2,3\nseven,1,2,3\neight,1,2,3"""\n\n if parser.engine == "pyarrow":\n msg = "The 'verbose' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(StringIO(data), verbose=True, index_col=0)\n return\n\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(StringIO(data), verbose=True, index_col=0)\n captured = capsys.readouterr()\n\n # Engines are verbose in different ways.\n if parser.engine == "c":\n assert "Tokenization took:" in captured.out\n assert "Parser memory cleanup took:" in captured.out\n else: # Python engine\n assert captured.out == "Filled 1 NA values in column a\n"\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\test_verbose.py | test_verbose.py | Python | 2,339 | 0.95 | 0.074074 | 0.029412 | react-lib | 176 | 2024-03-01T21:31:34.382792 | Apache-2.0 | true | 468bafcfeb58158fcecb5dc9f2251b33 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_chunksize.cpython-313.pyc | test_chunksize.cpython-313.pyc | Other | 16,949 | 0.95 | 0 | 0.003876 | awesome-app | 231 | 2025-01-13T15:12:23.350456 | GPL-3.0 | true | a5760c6b69b3c9607aa237639fd5c012 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_common_basic.cpython-313.pyc | test_common_basic.cpython-313.pyc | Other | 40,980 | 0.8 | 0 | 0.001686 | react-lib | 11 | 2024-12-13T15:47:22.503186 | GPL-3.0 | true | 64c93f4bbce457ae96d735059bb6e62d |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_data_list.cpython-313.pyc | test_data_list.cpython-313.pyc | Other | 3,646 | 0.7 | 0 | 0 | vue-tools | 616 | 2025-04-18T09:58:16.170813 | BSD-3-Clause | true | dd79b73222b802924e001804cc9d9928 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_decimal.cpython-313.pyc | test_decimal.cpython-313.pyc | Other | 2,603 | 0.8 | 0 | 0 | react-lib | 734 | 2024-09-22T19:04:02.762640 | BSD-3-Clause | true | 8e30655f0d12df1de5a547100de0c9e5 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_file_buffer_url.cpython-313.pyc | test_file_buffer_url.cpython-313.pyc | Other | 20,152 | 0.8 | 0.003597 | 0.015267 | vue-tools | 87 | 2025-04-26T06:26:39.967507 | BSD-3-Clause | true | 77942e02fe6833f68bdfca58cfbc9b1c |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_float.cpython-313.pyc | test_float.cpython-313.pyc | Other | 3,961 | 0.8 | 0 | 0 | python-kit | 594 | 2025-06-08T11:33:15.247144 | BSD-3-Clause | true | 6d23be902698861ff007e3be47c53b3f |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_index.cpython-313.pyc | test_index.cpython-313.pyc | Other | 8,990 | 0.8 | 0 | 0 | node-utils | 413 | 2023-12-22T22:04:46.895140 | BSD-3-Clause | true | 3620eee5aa3643e454131ea09a4206aa |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_inf.cpython-313.pyc | test_inf.cpython-313.pyc | Other | 3,253 | 0.8 | 0 | 0 | node-utils | 63 | 2024-12-26T10:51:53.345989 | BSD-3-Clause | true | 43c909980c8136686ab32d51375105a6 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_ints.cpython-313.pyc | test_ints.cpython-313.pyc | Other | 9,033 | 0.8 | 0 | 0.021583 | python-kit | 609 | 2023-10-04T10:19:16.930437 | MIT | true | 7b45abc6b8fa9a6ff0ece5beea8495ef |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_iterator.cpython-313.pyc | test_iterator.cpython-313.pyc | Other | 5,904 | 0.8 | 0.008547 | 0 | awesome-app | 689 | 2025-06-08T15:35:12.589224 | Apache-2.0 | true | c006931f6a46823df963c20245da6b6f |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_read_errors.cpython-313.pyc | test_read_errors.cpython-313.pyc | Other | 13,855 | 0.8 | 0.004525 | 0 | awesome-app | 888 | 2024-06-03T05:53:35.417270 | MIT | true | ff1e4d637b7f5df46e89077c9866d6ee |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\test_verbose.cpython-313.pyc | test_verbose.cpython-313.pyc | Other | 3,329 | 0.8 | 0 | 0.015385 | node-utils | 562 | 2025-02-13T17:46:43.077157 | MIT | true | eaf4c53babce91c22cbd3476b4490285 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\common\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 204 | 0.7 | 0 | 0 | python-kit | 204 | 2023-09-28T16:38:02.327165 | Apache-2.0 | true | 0366ee9cfe5a4f878b9f93f853dd1547 |
"""\nTests dtype specification during parsing\nfor all of the parsers defined in parsers.py\n"""\nfrom io import StringIO\nimport os\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs import parsers as libparsers\n\nfrom pandas.core.dtypes.dtypes import CategoricalDtype\n\nimport pandas as pd\nfrom pandas import (\n Categorical,\n DataFrame,\n Timestamp,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\n\n\n@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different\n@pytest.mark.parametrize(\n "dtype",\n [\n "category",\n CategoricalDtype(),\n {"a": "category", "b": "category", "c": CategoricalDtype()},\n ],\n)\ndef test_categorical_dtype(all_parsers, dtype):\n # see gh-10153\n parser = all_parsers\n data = """a,b,c\n1,a,3.4\n1,a,3.4\n2,b,4.5"""\n expected = DataFrame(\n {\n "a": Categorical(["1", "1", "2"]),\n "b": Categorical(["a", "a", "b"]),\n "c": Categorical(["3.4", "3.4", "4.5"]),\n }\n )\n actual = parser.read_csv(StringIO(data), dtype=dtype)\n tm.assert_frame_equal(actual, expected)\n\n\n@pytest.mark.parametrize("dtype", [{"b": "category"}, {1: "category"}])\ndef test_categorical_dtype_single(all_parsers, dtype, request):\n # see gh-10153\n parser = all_parsers\n data = """a,b,c\n1,a,3.4\n1,a,3.4\n2,b,4.5"""\n expected = DataFrame(\n {"a": [1, 1, 2], "b": Categorical(["a", "a", "b"]), "c": [3.4, 3.4, 4.5]}\n )\n if parser.engine == "pyarrow":\n mark = pytest.mark.xfail(\n strict=False,\n reason="Flaky test sometimes gives object dtype instead of Categorical",\n )\n request.applymarker(mark)\n\n actual = parser.read_csv(StringIO(data), dtype=dtype)\n tm.assert_frame_equal(actual, expected)\n\n\n@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different\ndef test_categorical_dtype_unsorted(all_parsers):\n # see gh-10153\n parser = all_parsers\n data = """a,b,c\n1,b,3.4\n1,b,3.4\n2,a,4.5"""\n expected = DataFrame(\n {\n "a": Categorical(["1", "1", "2"]),\n "b": Categorical(["b", "b", "a"]),\n "c": Categorical(["3.4", "3.4", "4.5"]),\n }\n )\n actual = parser.read_csv(StringIO(data), dtype="category")\n tm.assert_frame_equal(actual, expected)\n\n\n@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different\ndef test_categorical_dtype_missing(all_parsers):\n # see gh-10153\n parser = all_parsers\n data = """a,b,c\n1,b,3.4\n1,nan,3.4\n2,a,4.5"""\n expected = DataFrame(\n {\n "a": Categorical(["1", "1", "2"]),\n "b": Categorical(["b", np.nan, "a"]),\n "c": Categorical(["3.4", "3.4", "4.5"]),\n }\n )\n actual = parser.read_csv(StringIO(data), dtype="category")\n tm.assert_frame_equal(actual, expected)\n\n\n@xfail_pyarrow # AssertionError: Attributes of DataFrame.iloc[:, 0] are different\n@pytest.mark.slow\ndef test_categorical_dtype_high_cardinality_numeric(all_parsers, monkeypatch):\n # see gh-18186\n # was an issue with C parser, due to DEFAULT_BUFFER_HEURISTIC\n parser = all_parsers\n heuristic = 2**5\n data = np.sort([str(i) for i in range(heuristic + 1)])\n expected = DataFrame({"a": Categorical(data, ordered=True)})\n with monkeypatch.context() as m:\n m.setattr(libparsers, "DEFAULT_BUFFER_HEURISTIC", heuristic)\n actual = parser.read_csv(StringIO("a\n" + "\n".join(data)), dtype="category")\n actual["a"] = actual["a"].cat.reorder_categories(\n np.sort(actual.a.cat.categories), ordered=True\n )\n tm.assert_frame_equal(actual, expected)\n\n\ndef test_categorical_dtype_utf16(all_parsers, csv_dir_path):\n # see gh-10153\n pth = os.path.join(csv_dir_path, "utf16_ex.txt")\n parser = all_parsers\n encoding = "utf-16"\n sep = "\t"\n\n expected = parser.read_csv(pth, sep=sep, encoding=encoding)\n expected = expected.apply(Categorical)\n\n actual = parser.read_csv(pth, sep=sep, encoding=encoding, dtype="category")\n tm.assert_frame_equal(actual, expected)\n\n\ndef test_categorical_dtype_chunksize_infer_categories(all_parsers):\n # see gh-10153\n parser = all_parsers\n data = """a,b\n1,a\n1,b\n1,b\n2,c"""\n expecteds = [\n DataFrame({"a": [1, 1], "b": Categorical(["a", "b"])}),\n DataFrame({"a": [1, 2], "b": Categorical(["b", "c"])}, index=[2, 3]),\n ]\n\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), dtype={"b": "category"}, chunksize=2)\n return\n\n with parser.read_csv(\n StringIO(data), dtype={"b": "category"}, chunksize=2\n ) as actuals:\n for actual, expected in zip(actuals, expecteds):\n tm.assert_frame_equal(actual, expected)\n\n\ndef test_categorical_dtype_chunksize_explicit_categories(all_parsers):\n # see gh-10153\n parser = all_parsers\n data = """a,b\n1,a\n1,b\n1,b\n2,c"""\n cats = ["a", "b", "c"]\n expecteds = [\n DataFrame({"a": [1, 1], "b": Categorical(["a", "b"], categories=cats)}),\n DataFrame(\n {"a": [1, 2], "b": Categorical(["b", "c"], categories=cats)},\n index=[2, 3],\n ),\n ]\n dtype = CategoricalDtype(cats)\n\n if parser.engine == "pyarrow":\n msg = "The 'chunksize' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2)\n return\n\n with parser.read_csv(StringIO(data), dtype={"b": dtype}, chunksize=2) as actuals:\n for actual, expected in zip(actuals, expecteds):\n tm.assert_frame_equal(actual, expected)\n\n\ndef test_categorical_dtype_latin1(all_parsers, csv_dir_path):\n # see gh-10153\n pth = os.path.join(csv_dir_path, "unicode_series.csv")\n parser = all_parsers\n encoding = "latin-1"\n\n expected = parser.read_csv(pth, header=None, encoding=encoding)\n expected[1] = Categorical(expected[1])\n\n actual = parser.read_csv(pth, header=None, encoding=encoding, dtype={1: "category"})\n tm.assert_frame_equal(actual, expected)\n\n\n@pytest.mark.parametrize("ordered", [False, True])\n@pytest.mark.parametrize(\n "categories",\n [["a", "b", "c"], ["a", "c", "b"], ["a", "b", "c", "d"], ["c", "b", "a"]],\n)\ndef test_categorical_category_dtype(all_parsers, categories, ordered):\n parser = all_parsers\n data = """a,b\n1,a\n1,b\n1,b\n2,c"""\n expected = DataFrame(\n {\n "a": [1, 1, 1, 2],\n "b": Categorical(\n ["a", "b", "b", "c"], categories=categories, ordered=ordered\n ),\n }\n )\n\n dtype = {"b": CategoricalDtype(categories=categories, ordered=ordered)}\n result = parser.read_csv(StringIO(data), dtype=dtype)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_category_dtype_unsorted(all_parsers):\n parser = all_parsers\n data = """a,b\n1,a\n1,b\n1,b\n2,c"""\n dtype = CategoricalDtype(["c", "b", "a"])\n expected = DataFrame(\n {\n "a": [1, 1, 1, 2],\n "b": Categorical(["a", "b", "b", "c"], categories=["c", "b", "a"]),\n }\n )\n\n result = parser.read_csv(StringIO(data), dtype={"b": dtype})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_coerces_numeric(all_parsers):\n parser = all_parsers\n dtype = {"b": CategoricalDtype([1, 2, 3])}\n\n data = "b\n1\n1\n2\n3"\n expected = DataFrame({"b": Categorical([1, 1, 2, 3])})\n\n result = parser.read_csv(StringIO(data), dtype=dtype)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_coerces_datetime(all_parsers):\n parser = all_parsers\n dti = pd.DatetimeIndex(["2017-01-01", "2018-01-01", "2019-01-01"], freq=None)\n dtype = {"b": CategoricalDtype(dti)}\n\n data = "b\n2017-01-01\n2018-01-01\n2019-01-01"\n expected = DataFrame({"b": Categorical(dtype["b"].categories)})\n\n result = parser.read_csv(StringIO(data), dtype=dtype)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_coerces_timestamp(all_parsers):\n parser = all_parsers\n dtype = {"b": CategoricalDtype([Timestamp("2014")])}\n\n data = "b\n2014-01-01\n2014-01-01"\n expected = DataFrame({"b": Categorical([Timestamp("2014")] * 2)})\n\n result = parser.read_csv(StringIO(data), dtype=dtype)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_coerces_timedelta(all_parsers):\n parser = all_parsers\n dtype = {"b": CategoricalDtype(pd.to_timedelta(["1h", "2h", "3h"]))}\n\n data = "b\n1h\n2h\n3h"\n expected = DataFrame({"b": Categorical(dtype["b"].categories)})\n\n result = parser.read_csv(StringIO(data), dtype=dtype)\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "data",\n [\n "b\nTrue\nFalse\nNA\nFalse",\n "b\ntrue\nfalse\nNA\nfalse",\n "b\nTRUE\nFALSE\nNA\nFALSE",\n "b\nTrue\nFalse\nNA\nFALSE",\n ],\n)\ndef test_categorical_dtype_coerces_boolean(all_parsers, data):\n # see gh-20498\n parser = all_parsers\n dtype = {"b": CategoricalDtype([False, True])}\n expected = DataFrame({"b": Categorical([True, False, None, False])})\n\n result = parser.read_csv(StringIO(data), dtype=dtype)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_categorical_unexpected_categories(all_parsers):\n parser = all_parsers\n dtype = {"b": CategoricalDtype(["a", "b", "d", "e"])}\n\n data = "b\nd\na\nc\nd" # Unexpected c\n expected = DataFrame({"b": Categorical(list("dacd"), dtype=dtype["b"])})\n\n result = parser.read_csv(StringIO(data), dtype=dtype)\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\dtypes\test_categorical.py | test_categorical.py | Python | 9,836 | 0.95 | 0.071856 | 0.040441 | awesome-app | 442 | 2024-10-21T00:53:48.517516 | Apache-2.0 | true | ef56c7c4a2fa4cb18b13a60ce220d1bd |
"""\nTests dtype specification during parsing\nfor all of the parsers defined in parsers.py\n"""\nfrom collections import defaultdict\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas.errors import ParserWarning\n\nimport pandas as pd\nfrom pandas import (\n DataFrame,\n Timestamp,\n)\nimport pandas._testing as tm\nfrom pandas.core.arrays import IntegerArray\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\n\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\n\n\n@pytest.mark.parametrize("dtype", [str, object])\n@pytest.mark.parametrize("check_orig", [True, False])\n@pytest.mark.usefixtures("pyarrow_xfail")\ndef test_dtype_all_columns(all_parsers, dtype, check_orig, using_infer_string):\n # see gh-3795, gh-6607\n parser = all_parsers\n\n df = DataFrame(\n np.random.default_rng(2).random((5, 2)).round(4),\n columns=list("AB"),\n index=["1A", "1B", "1C", "1D", "1E"],\n )\n\n with tm.ensure_clean("__passing_str_as_dtype__.csv") as path:\n df.to_csv(path)\n\n result = parser.read_csv(path, dtype=dtype, index_col=0)\n\n if check_orig:\n expected = df.copy()\n result = result.astype(float)\n elif using_infer_string and dtype is str:\n expected = df.astype(str)\n else:\n expected = df.astype(str).astype(object)\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\ndef test_dtype_per_column(all_parsers):\n parser = all_parsers\n data = """\\none,two\n1,2.5\n2,3.5\n3,4.5\n4,5.5"""\n expected = DataFrame(\n [[1, "2.5"], [2, "3.5"], [3, "4.5"], [4, "5.5"]], columns=["one", "two"]\n )\n expected["one"] = expected["one"].astype(np.float64)\n\n result = parser.read_csv(StringIO(data), dtype={"one": np.float64, 1: str})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_invalid_dtype_per_column(all_parsers):\n parser = all_parsers\n data = """\\none,two\n1,2.5\n2,3.5\n3,4.5\n4,5.5"""\n\n with pytest.raises(TypeError, match="data type [\"']foo[\"'] not understood"):\n parser.read_csv(StringIO(data), dtype={"one": "foo", 1: "int"})\n\n\ndef test_raise_on_passed_int_dtype_with_nas(all_parsers):\n # see gh-2631\n parser = all_parsers\n data = """YEAR, DOY, a\n2001,106380451,10\n2001,,11\n2001,106380451,67"""\n\n if parser.engine == "c":\n msg = "Integer column has NA values"\n elif parser.engine == "pyarrow":\n msg = "The 'skipinitialspace' option is not supported with the 'pyarrow' engine"\n else:\n msg = "Unable to convert column DOY"\n\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(StringIO(data), dtype={"DOY": np.int64}, skipinitialspace=True)\n\n\ndef test_dtype_with_converters(all_parsers):\n parser = all_parsers\n data = """a,b\n1.1,2.2\n1.2,2.3"""\n\n if parser.engine == "pyarrow":\n msg = "The 'converters' option is not supported with the 'pyarrow' engine"\n with pytest.raises(ValueError, match=msg):\n parser.read_csv(\n StringIO(data), dtype={"a": "i8"}, converters={"a": lambda x: str(x)}\n )\n return\n\n # Dtype spec ignored if converted specified.\n result = parser.read_csv_check_warnings(\n ParserWarning,\n "Both a converter and dtype were specified for column a "\n "- only the converter will be used.",\n StringIO(data),\n dtype={"a": "i8"},\n converters={"a": lambda x: str(x)},\n )\n expected = DataFrame({"a": ["1.1", "1.2"], "b": [2.2, 2.3]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize(\n "dtype", list(np.typecodes["AllInteger"] + np.typecodes["Float"])\n)\ndef test_numeric_dtype(all_parsers, dtype):\n data = "0\n1"\n parser = all_parsers\n expected = DataFrame([0, 1], dtype=dtype)\n\n result = parser.read_csv(StringIO(data), header=None, dtype=dtype)\n tm.assert_frame_equal(expected, result)\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\ndef test_boolean_dtype(all_parsers):\n parser = all_parsers\n data = "\n".join(\n [\n "a",\n "True",\n "TRUE",\n "true",\n "1",\n "1.0",\n "False",\n "FALSE",\n "false",\n "0",\n "0.0",\n "NaN",\n "nan",\n "NA",\n "null",\n "NULL",\n ]\n )\n\n result = parser.read_csv(StringIO(data), dtype="boolean")\n expected = DataFrame(\n {\n "a": pd.array(\n [\n True,\n True,\n True,\n True,\n True,\n False,\n False,\n False,\n False,\n False,\n None,\n None,\n None,\n None,\n None,\n ],\n dtype="boolean",\n )\n }\n )\n\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\ndef test_delimiter_with_usecols_and_parse_dates(all_parsers):\n # GH#35873\n result = all_parsers.read_csv(\n StringIO('"dump","-9,1","-9,1",20101010'),\n engine="python",\n names=["col", "col1", "col2", "col3"],\n usecols=["col1", "col2", "col3"],\n parse_dates=["col3"],\n decimal=",",\n )\n expected = DataFrame(\n {"col1": [-9.1], "col2": [-9.1], "col3": [Timestamp("2010-10-10")]}\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("thousands", ["_", None])\ndef test_decimal_and_exponential(\n request, python_parser_only, numeric_decimal, thousands\n):\n # GH#31920\n decimal_number_check(request, python_parser_only, numeric_decimal, thousands, None)\n\n\n@pytest.mark.parametrize("thousands", ["_", None])\n@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])\ndef test_1000_sep_decimal_float_precision(\n request, c_parser_only, numeric_decimal, float_precision, thousands\n):\n # test decimal and thousand sep handling in across 'float_precision'\n # parsers\n decimal_number_check(\n request, c_parser_only, numeric_decimal, thousands, float_precision\n )\n text, value = numeric_decimal\n text = " " + text + " "\n if isinstance(value, str): # the negative cases (parse as text)\n value = " " + value + " "\n decimal_number_check(\n request, c_parser_only, (text, value), thousands, float_precision\n )\n\n\ndef decimal_number_check(request, parser, numeric_decimal, thousands, float_precision):\n # GH#31920\n value = numeric_decimal[0]\n if thousands is None and value in ("1_,", "1_234,56", "1_234,56e0"):\n request.applymarker(\n pytest.mark.xfail(reason=f"thousands={thousands} and sep is in {value}")\n )\n df = parser.read_csv(\n StringIO(value),\n float_precision=float_precision,\n sep="|",\n thousands=thousands,\n decimal=",",\n header=None,\n )\n val = df.iloc[0, 0]\n assert val == numeric_decimal[1]\n\n\n@pytest.mark.parametrize("float_precision", [None, "legacy", "high", "round_trip"])\ndef test_skip_whitespace(c_parser_only, float_precision):\n DATA = """id\tnum\t\n1\t1.2 \t\n1\t 2.1\t\n2\t 1\t\n2\t 1.2 \t\n"""\n df = c_parser_only.read_csv(\n StringIO(DATA),\n float_precision=float_precision,\n sep="\t",\n header=0,\n dtype={1: np.float64},\n )\n tm.assert_series_equal(df.iloc[:, 1], pd.Series([1.2, 2.1, 1.0, 1.2], name="num"))\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\ndef test_true_values_cast_to_bool(all_parsers):\n # GH#34655\n text = """a,b\nyes,xxx\nno,yyy\n1,zzz\n0,aaa\n """\n parser = all_parsers\n result = parser.read_csv(\n StringIO(text),\n true_values=["yes"],\n false_values=["no"],\n dtype={"a": "boolean"},\n )\n expected = DataFrame(\n {"a": [True, False, True, False], "b": ["xxx", "yyy", "zzz", "aaa"]}\n )\n expected["a"] = expected["a"].astype("boolean")\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\n@pytest.mark.parametrize("dtypes, exp_value", [({}, "1"), ({"a.1": "int64"}, 1)])\ndef test_dtype_mangle_dup_cols(all_parsers, dtypes, exp_value):\n # GH#35211\n parser = all_parsers\n data = """a,a\n1,1"""\n dtype_dict = {"a": str, **dtypes}\n # GH#42462\n dtype_dict_copy = dtype_dict.copy()\n result = parser.read_csv(StringIO(data), dtype=dtype_dict)\n expected = DataFrame({"a": ["1"], "a.1": [exp_value]})\n assert dtype_dict == dtype_dict_copy, "dtype dict changed"\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\ndef test_dtype_mangle_dup_cols_single_dtype(all_parsers):\n # GH#42022\n parser = all_parsers\n data = """a,a\n1,1"""\n result = parser.read_csv(StringIO(data), dtype=str)\n expected = DataFrame({"a": ["1"], "a.1": ["1"]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\ndef test_dtype_multi_index(all_parsers):\n # GH 42446\n parser = all_parsers\n data = "A,B,B\nX,Y,Z\n1,2,3"\n\n result = parser.read_csv(\n StringIO(data),\n header=list(range(2)),\n dtype={\n ("A", "X"): np.int32,\n ("B", "Y"): np.int32,\n ("B", "Z"): np.float32,\n },\n )\n\n expected = DataFrame(\n {\n ("A", "X"): np.int32([1]),\n ("B", "Y"): np.int32([2]),\n ("B", "Z"): np.float32([3]),\n }\n )\n\n tm.assert_frame_equal(result, expected)\n\n\ndef test_nullable_int_dtype(all_parsers, any_int_ea_dtype):\n # GH 25472\n parser = all_parsers\n dtype = any_int_ea_dtype\n\n data = """a,b,c\n,3,5\n1,,6\n2,4,"""\n expected = DataFrame(\n {\n "a": pd.array([pd.NA, 1, 2], dtype=dtype),\n "b": pd.array([3, pd.NA, 4], dtype=dtype),\n "c": pd.array([5, 6, pd.NA], dtype=dtype),\n }\n )\n actual = parser.read_csv(StringIO(data), dtype=dtype)\n tm.assert_frame_equal(actual, expected)\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\n@pytest.mark.parametrize("default", ["float", "float64"])\ndef test_dtypes_defaultdict(all_parsers, default):\n # GH#41574\n data = """a,b\n1,2\n"""\n dtype = defaultdict(lambda: default, a="int64")\n parser = all_parsers\n result = parser.read_csv(StringIO(data), dtype=dtype)\n expected = DataFrame({"a": [1], "b": 2.0})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\ndef test_dtypes_defaultdict_mangle_dup_cols(all_parsers):\n # GH#41574\n data = """a,b,a,b,b.1\n1,2,3,4,5\n"""\n dtype = defaultdict(lambda: "float64", a="int64")\n dtype["b.1"] = "int64"\n parser = all_parsers\n result = parser.read_csv(StringIO(data), dtype=dtype)\n expected = DataFrame({"a": [1], "b": [2.0], "a.1": [3], "b.2": [4.0], "b.1": [5]})\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.usefixtures("pyarrow_xfail")\ndef test_dtypes_defaultdict_invalid(all_parsers):\n # GH#41574\n data = """a,b\n1,2\n"""\n dtype = defaultdict(lambda: "invalid_dtype", a="int64")\n parser = all_parsers\n with pytest.raises(TypeError, match="not understood"):\n parser.read_csv(StringIO(data), dtype=dtype)\n\n\ndef test_dtype_backend(all_parsers):\n # GH#36712\n\n parser = all_parsers\n\n data = """a,b,c,d,e,f,g,h,i,j\n1,2.5,True,a,,,,,12-31-2019,\n3,4.5,False,b,6,7.5,True,a,12-31-2019,\n"""\n result = parser.read_csv(\n StringIO(data), dtype_backend="numpy_nullable", parse_dates=["i"]\n )\n expected = DataFrame(\n {\n "a": pd.Series([1, 3], dtype="Int64"),\n "b": pd.Series([2.5, 4.5], dtype="Float64"),\n "c": pd.Series([True, False], dtype="boolean"),\n "d": pd.Series(["a", "b"], dtype="string"),\n "e": pd.Series([pd.NA, 6], dtype="Int64"),\n "f": pd.Series([pd.NA, 7.5], dtype="Float64"),\n "g": pd.Series([pd.NA, True], dtype="boolean"),\n "h": pd.Series([pd.NA, "a"], dtype="string"),\n "i": pd.Series([Timestamp("2019-12-31")] * 2),\n "j": pd.Series([pd.NA, pd.NA], dtype="Int64"),\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dtype_backend_and_dtype(all_parsers):\n # GH#36712\n\n parser = all_parsers\n\n data = """a,b\n1,2.5\n,\n"""\n result = parser.read_csv(\n StringIO(data), dtype_backend="numpy_nullable", dtype="float64"\n )\n expected = DataFrame({"a": [1.0, np.nan], "b": [2.5, np.nan]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dtype_backend_string(all_parsers, string_storage):\n # GH#36712\n with pd.option_context("mode.string_storage", string_storage):\n parser = all_parsers\n\n data = """a,b\na,x\nb,\n"""\n result = parser.read_csv(StringIO(data), dtype_backend="numpy_nullable")\n\n expected = DataFrame(\n {\n "a": pd.array(["a", "b"], dtype=pd.StringDtype(string_storage)),\n "b": pd.array(["x", pd.NA], dtype=pd.StringDtype(string_storage)),\n },\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dtype_backend_ea_dtype_specified(all_parsers):\n # GH#491496\n data = """a,b\n1,2\n"""\n parser = all_parsers\n result = parser.read_csv(\n StringIO(data), dtype="Int64", dtype_backend="numpy_nullable"\n )\n expected = DataFrame({"a": [1], "b": 2}, dtype="Int64")\n tm.assert_frame_equal(result, expected)\n\n\ndef test_dtype_backend_pyarrow(all_parsers, request):\n # GH#36712\n pa = pytest.importorskip("pyarrow")\n parser = all_parsers\n\n data = """a,b,c,d,e,f,g,h,i,j\n1,2.5,True,a,,,,,12-31-2019,\n3,4.5,False,b,6,7.5,True,a,12-31-2019,\n"""\n result = parser.read_csv(StringIO(data), dtype_backend="pyarrow", parse_dates=["i"])\n expected = DataFrame(\n {\n "a": pd.Series([1, 3], dtype="int64[pyarrow]"),\n "b": pd.Series([2.5, 4.5], dtype="float64[pyarrow]"),\n "c": pd.Series([True, False], dtype="bool[pyarrow]"),\n "d": pd.Series(["a", "b"], dtype=pd.ArrowDtype(pa.string())),\n "e": pd.Series([pd.NA, 6], dtype="int64[pyarrow]"),\n "f": pd.Series([pd.NA, 7.5], dtype="float64[pyarrow]"),\n "g": pd.Series([pd.NA, True], dtype="bool[pyarrow]"),\n "h": pd.Series(\n [pd.NA, "a"],\n dtype=pd.ArrowDtype(pa.string()),\n ),\n "i": pd.Series([Timestamp("2019-12-31")] * 2),\n "j": pd.Series([pd.NA, pd.NA], dtype="null[pyarrow]"),\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\n# pyarrow engine failing:\n# https://github.com/pandas-dev/pandas/issues/56136\n@pytest.mark.usefixtures("pyarrow_xfail")\ndef test_ea_int_avoid_overflow(all_parsers):\n # GH#32134\n parser = all_parsers\n data = """a,b\n1,1\n,1\n1582218195625938945,1\n"""\n result = parser.read_csv(StringIO(data), dtype={"a": "Int64"})\n expected = DataFrame(\n {\n "a": IntegerArray(\n np.array([1, 1, 1582218195625938945]), np.array([False, True, False])\n ),\n "b": 1,\n }\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_string_inference(all_parsers):\n # GH#54430\n dtype = pd.StringDtype(na_value=np.nan)\n\n data = """a,b\nx,1\ny,2\n,3"""\n parser = all_parsers\n with pd.option_context("future.infer_string", True):\n result = parser.read_csv(StringIO(data))\n\n expected = DataFrame(\n {"a": pd.Series(["x", "y", None], dtype=dtype), "b": [1, 2, 3]},\n columns=pd.Index(["a", "b"], dtype=dtype),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("dtype", ["O", object, "object", np.object_, str, np.str_])\ndef test_string_inference_object_dtype(all_parsers, dtype, using_infer_string):\n # GH#56047\n data = """a,b\nx,a\ny,a\nz,a"""\n parser = all_parsers\n with pd.option_context("future.infer_string", True):\n result = parser.read_csv(StringIO(data), dtype=dtype)\n\n expected_dtype = pd.StringDtype(na_value=np.nan) if dtype is str else object\n expected = DataFrame(\n {\n "a": pd.Series(["x", "y", "z"], dtype=expected_dtype),\n "b": pd.Series(["a", "a", "a"], dtype=expected_dtype),\n },\n columns=pd.Index(["a", "b"], dtype=pd.StringDtype(na_value=np.nan)),\n )\n tm.assert_frame_equal(result, expected)\n\n with pd.option_context("future.infer_string", True):\n result = parser.read_csv(StringIO(data), dtype={"a": dtype})\n\n expected = DataFrame(\n {\n "a": pd.Series(["x", "y", "z"], dtype=expected_dtype),\n "b": pd.Series(["a", "a", "a"], dtype=pd.StringDtype(na_value=np.nan)),\n },\n columns=pd.Index(["a", "b"], dtype=pd.StringDtype(na_value=np.nan)),\n )\n tm.assert_frame_equal(result, expected)\n\n\n@xfail_pyarrow\ndef test_accurate_parsing_of_large_integers(all_parsers):\n # GH#52505\n data = """SYMBOL,MOMENT,ID,ID_DEAL\nAAPL,20230301181139587,1925036343869802844,\nAAPL,20230301181139587,2023552585717889863,2023552585717263358\nNVDA,20230301181139587,2023552585717889863,2023552585717263359\nAMC,20230301181139587,2023552585717889863,2023552585717263360\nAMZN,20230301181139587,2023552585717889759,2023552585717263360\nMSFT,20230301181139587,2023552585717889863,2023552585717263361\nNVDA,20230301181139587,2023552585717889827,2023552585717263361"""\n orders = all_parsers.read_csv(StringIO(data), dtype={"ID_DEAL": pd.Int64Dtype()})\n assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263358, "ID_DEAL"]) == 1\n assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263359, "ID_DEAL"]) == 1\n assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263360, "ID_DEAL"]) == 2\n assert len(orders.loc[orders["ID_DEAL"] == 2023552585717263361, "ID_DEAL"]) == 2\n\n\ndef test_dtypes_with_usecols(all_parsers):\n # GH#54868\n\n parser = all_parsers\n data = """a,b,c\n1,2,3\n4,5,6"""\n\n result = parser.read_csv(StringIO(data), usecols=["a", "c"], dtype={"a": object})\n if parser.engine == "pyarrow":\n values = [1, 4]\n else:\n values = ["1", "4"]\n expected = DataFrame({"a": pd.Series(values, dtype=object), "c": [3, 6]})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_index_col_with_dtype_no_rangeindex(all_parsers):\n data = StringIO("345.5,519.5,0\n519.5,726.5,1")\n result = all_parsers.read_csv(\n data,\n header=None,\n names=["start", "stop", "bin_id"],\n dtype={"start": np.float32, "stop": np.float32, "bin_id": np.uint32},\n index_col="bin_id",\n ).index\n expected = pd.Index([0, 1], dtype=np.uint32, name="bin_id")\n tm.assert_index_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\dtypes\test_dtypes_basic.py | test_dtypes_basic.py | Python | 18,821 | 0.95 | 0.063665 | 0.053211 | vue-tools | 191 | 2023-09-08T23:12:10.499917 | GPL-3.0 | true | 68b4e211aa0e654b80288f7b9933aed1 |
"""\nTests dtype specification during parsing\nfor all of the parsers defined in parsers.py\n"""\nfrom io import StringIO\n\nimport numpy as np\nimport pytest\n\nfrom pandas import (\n Categorical,\n DataFrame,\n Index,\n MultiIndex,\n Series,\n concat,\n)\nimport pandas._testing as tm\n\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_dtype_all_columns_empty(all_parsers):\n # see gh-12048\n parser = all_parsers\n result = parser.read_csv(StringIO("A,B"), dtype=str)\n\n expected = DataFrame({"A": [], "B": []}, dtype=str)\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_empty_pass_dtype(all_parsers):\n parser = all_parsers\n\n data = "one,two"\n result = parser.read_csv(StringIO(data), dtype={"one": "u1"})\n\n expected = DataFrame(\n {"one": np.empty(0, dtype="u1"), "two": np.empty(0, dtype=object)},\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_empty_with_index_pass_dtype(all_parsers):\n parser = all_parsers\n\n data = "one,two"\n result = parser.read_csv(\n StringIO(data), index_col=["one"], dtype={"one": "u1", 1: "f"}\n )\n\n expected = DataFrame(\n {"two": np.empty(0, dtype="f")}, index=Index([], dtype="u1", name="one")\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_empty_with_multi_index_pass_dtype(all_parsers):\n parser = all_parsers\n\n data = "one,two,three"\n result = parser.read_csv(\n StringIO(data), index_col=["one", "two"], dtype={"one": "u1", 1: "f8"}\n )\n\n exp_idx = MultiIndex.from_arrays(\n [np.empty(0, dtype="u1"), np.empty(0, dtype=np.float64)],\n names=["one", "two"],\n )\n expected = DataFrame({"three": np.empty(0, dtype=object)}, index=exp_idx)\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_empty_with_mangled_column_pass_dtype_by_names(all_parsers):\n parser = all_parsers\n\n data = "one,one"\n result = parser.read_csv(StringIO(data), dtype={"one": "u1", "one.1": "f"})\n\n expected = DataFrame(\n {"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_empty_with_mangled_column_pass_dtype_by_indexes(all_parsers):\n parser = all_parsers\n\n data = "one,one"\n result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})\n\n expected = DataFrame(\n {"one": np.empty(0, dtype="u1"), "one.1": np.empty(0, dtype="f")},\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_empty_with_dup_column_pass_dtype_by_indexes(all_parsers):\n # see gh-9424\n parser = all_parsers\n expected = concat(\n [Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],\n axis=1,\n )\n\n data = "one,one"\n result = parser.read_csv(StringIO(data), dtype={0: "u1", 1: "f"})\n tm.assert_frame_equal(result, expected)\n\n\ndef test_empty_with_dup_column_pass_dtype_by_indexes_raises(all_parsers):\n # see gh-9424\n parser = all_parsers\n expected = concat(\n [Series([], name="one", dtype="u1"), Series([], name="one.1", dtype="f")],\n axis=1,\n )\n expected.index = expected.index.astype(object)\n\n with pytest.raises(ValueError, match="Duplicate names"):\n data = ""\n parser.read_csv(StringIO(data), names=["one", "one"], dtype={0: "u1", 1: "f"})\n\n\n@pytest.mark.parametrize(\n "dtype,expected",\n [\n (np.float64, DataFrame(columns=["a", "b"], dtype=np.float64)),\n (\n "category",\n DataFrame({"a": Categorical([]), "b": Categorical([])}),\n ),\n (\n {"a": "category", "b": "category"},\n DataFrame({"a": Categorical([]), "b": Categorical([])}),\n ),\n ("datetime64[ns]", DataFrame(columns=["a", "b"], dtype="datetime64[ns]")),\n (\n "timedelta64[ns]",\n DataFrame(\n {\n "a": Series([], dtype="timedelta64[ns]"),\n "b": Series([], dtype="timedelta64[ns]"),\n },\n ),\n ),\n (\n {"a": np.int64, "b": np.int32},\n DataFrame(\n {"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},\n ),\n ),\n (\n {0: np.int64, 1: np.int32},\n DataFrame(\n {"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},\n ),\n ),\n (\n {"a": np.int64, 1: np.int32},\n DataFrame(\n {"a": Series([], dtype=np.int64), "b": Series([], dtype=np.int32)},\n ),\n ),\n ],\n)\n@skip_pyarrow # CSV parse error: Empty CSV file or block\ndef test_empty_dtype(all_parsers, dtype, expected):\n # see gh-14712\n parser = all_parsers\n data = "a,b"\n\n result = parser.read_csv(StringIO(data), header=0, dtype=dtype)\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\dtypes\test_empty.py | test_empty.py | Python | 5,258 | 0.95 | 0.055249 | 0.027397 | python-kit | 535 | 2023-07-14T06:09:25.535702 | BSD-3-Clause | true | 2a232b4f6ab7c20f895f9685065e1768 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\dtypes\__pycache__\test_categorical.cpython-313.pyc | test_categorical.cpython-313.pyc | Other | 13,566 | 0.8 | 0.004444 | 0 | node-utils | 392 | 2024-07-17T15:29:11.171882 | MIT | true | 895429c2da75a7286362d5009c66c6db |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\dtypes\__pycache__\test_dtypes_basic.cpython-313.pyc | test_dtypes_basic.cpython-313.pyc | Other | 27,633 | 0.95 | 0.005666 | 0 | node-utils | 313 | 2024-06-04T18:02:45.299174 | BSD-3-Clause | true | bf56c9d0169ea5986bf29ece684f87a9 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\dtypes\__pycache__\test_empty.cpython-313.pyc | test_empty.cpython-313.pyc | Other | 7,046 | 0.8 | 0.012821 | 0 | python-kit | 375 | 2024-01-14T20:48:50.968631 | GPL-3.0 | true | 35e9cc108317f21f9ef3b8cd93047969 |
\n\n | .venv\Lib\site-packages\pandas\tests\io\parser\dtypes\__pycache__\__init__.cpython-313.pyc | __init__.cpython-313.pyc | Other | 204 | 0.7 | 0 | 0 | python-kit | 341 | 2024-07-25T02:23:37.479325 | BSD-3-Clause | true | 95d825ee4eeca3857123544ebd76c545 |
"""\nTests the usecols functionality during parsing\nfor all of the parsers defined in parsers.py\n"""\nfrom io import StringIO\n\nimport pytest\n\nfrom pandas import (\n DataFrame,\n Index,\n Timestamp,\n)\nimport pandas._testing as tm\n\npytestmark = pytest.mark.filterwarnings(\n "ignore:Passing a BlockManager to DataFrame:DeprecationWarning"\n)\nxfail_pyarrow = pytest.mark.usefixtures("pyarrow_xfail")\nskip_pyarrow = pytest.mark.usefixtures("pyarrow_skip")\n\n_msg_pyarrow_requires_names = (\n "The pyarrow engine does not allow 'usecols' to be integer column "\n "positions. Pass a list of string column names instead."\n)\n\n\n@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]])\ndef test_usecols_with_parse_dates(all_parsers, usecols):\n # see gh-9755\n data = """a,b,c,d,e\n0,1,2014-01-01,09:00,4\n0,1,2014-01-02,10:00,4"""\n parser = all_parsers\n parse_dates = [[1, 2]]\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n\n cols = {\n "a": [0, 0],\n "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],\n }\n expected = DataFrame(cols, columns=["c_d", "a"])\n if parser.engine == "pyarrow":\n with pytest.raises(ValueError, match=_msg_pyarrow_requires_names):\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n parser.read_csv(\n StringIO(data), usecols=usecols, parse_dates=parse_dates\n )\n return\n with tm.assert_produces_warning(\n FutureWarning, match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(data), usecols=usecols, parse_dates=parse_dates\n )\n tm.assert_frame_equal(result, expected)\n\n\n@skip_pyarrow # pyarrow.lib.ArrowKeyError: Column 'fdate' in include_columns\ndef test_usecols_with_parse_dates2(all_parsers):\n # see gh-13604\n parser = all_parsers\n data = """2008-02-07 09:40,1032.43\n2008-02-07 09:50,1042.54\n2008-02-07 10:00,1051.65"""\n\n names = ["date", "values"]\n usecols = names[:]\n parse_dates = [0]\n\n index = Index(\n [\n Timestamp("2008-02-07 09:40"),\n Timestamp("2008-02-07 09:50"),\n Timestamp("2008-02-07 10:00"),\n ],\n name="date",\n )\n cols = {"values": [1032.43, 1042.54, 1051.65]}\n expected = DataFrame(cols, index=index)\n\n result = parser.read_csv(\n StringIO(data),\n parse_dates=parse_dates,\n index_col=0,\n usecols=usecols,\n header=None,\n names=names,\n )\n tm.assert_frame_equal(result, expected)\n\n\ndef test_usecols_with_parse_dates3(all_parsers):\n # see gh-14792\n parser = all_parsers\n data = """a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8"""\n\n usecols = list("abcdefghij")\n parse_dates = [0]\n\n cols = {\n "a": Timestamp("2016-09-21").as_unit("ns"),\n "b": [1],\n "c": [1],\n "d": [2],\n "e": [3],\n "f": [4],\n "g": [5],\n "h": [6],\n "i": [7],\n "j": [8],\n }\n expected = DataFrame(cols, columns=usecols)\n\n result = parser.read_csv(StringIO(data), usecols=usecols, parse_dates=parse_dates)\n tm.assert_frame_equal(result, expected)\n\n\ndef test_usecols_with_parse_dates4(all_parsers):\n data = "a,b,c,d,e,f,g,h,i,j\n2016/09/21,1,1,2,3,4,5,6,7,8"\n usecols = list("abcdefghij")\n parse_dates = [[0, 1]]\n parser = all_parsers\n\n cols = {\n "a_b": "2016/09/21 1",\n "c": [1],\n "d": [2],\n "e": [3],\n "f": [4],\n "g": [5],\n "h": [6],\n "i": [7],\n "j": [8],\n }\n expected = DataFrame(cols, columns=["a_b"] + list("cdefghij"))\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with tm.assert_produces_warning(\n (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(data),\n usecols=usecols,\n parse_dates=parse_dates,\n )\n tm.assert_frame_equal(result, expected)\n\n\n@pytest.mark.parametrize("usecols", [[0, 2, 3], [3, 0, 2]])\n@pytest.mark.parametrize(\n "names",\n [\n list("abcde"), # Names span all columns in original data.\n list("acd"), # Names span only the selected columns.\n ],\n)\ndef test_usecols_with_parse_dates_and_names(all_parsers, usecols, names, request):\n # see gh-9755\n s = """0,1,2014-01-01,09:00,4\n0,1,2014-01-02,10:00,4"""\n parse_dates = [[1, 2]]\n parser = all_parsers\n\n if parser.engine == "pyarrow" and not (len(names) == 3 and usecols[0] == 0):\n mark = pytest.mark.xfail(\n reason="Length mismatch in some cases, UserWarning in other"\n )\n request.applymarker(mark)\n\n cols = {\n "a": [0, 0],\n "c_d": [Timestamp("2014-01-01 09:00:00"), Timestamp("2014-01-02 10:00:00")],\n }\n expected = DataFrame(cols, columns=["c_d", "a"])\n\n depr_msg = (\n "Support for nested sequences for 'parse_dates' in pd.read_csv is deprecated"\n )\n with tm.assert_produces_warning(\n (FutureWarning, DeprecationWarning), match=depr_msg, check_stacklevel=False\n ):\n result = parser.read_csv(\n StringIO(s), names=names, parse_dates=parse_dates, usecols=usecols\n )\n tm.assert_frame_equal(result, expected)\n | .venv\Lib\site-packages\pandas\tests\io\parser\usecols\test_parse_dates.py | test_parse_dates.py | Python | 5,469 | 0.95 | 0.072165 | 0.023952 | node-utils | 422 | 2024-06-26T05:07:29.960830 | GPL-3.0 | true | 78286b83b527a3abfe43f8470db32c89 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.