diff --git a/.gitattributes b/.gitattributes index ae9738f52e2d86e1bbd2bfc2e3bdb33a55dfe3ee..d0bc835f5fbe3e04ea7baf880d7af02264ed5b39 100644 --- a/.gitattributes +++ b/.gitattributes @@ -550,3 +550,5 @@ mantis_evalkit/lib/python3.10/site-packages/nvidia/cublas/lib/libnvblas.so.12 fi moondream/lib/python3.10/site-packages/fontTools/subset/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text moondream/lib/python3.10/site-packages/pygments/lexers/__pycache__/lisp.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/nvidia/cuda_cupti/lib/libcupti.so.12 filter=lfs diff=lfs merge=lfs -text +moondream/lib/python3.10/site-packages/altair/vegalite/v5/__pycache__/api.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +mantis_evalkit/bin/python3.10 filter=lfs diff=lfs merge=lfs -text diff --git a/mantis_evalkit/bin/python3.10 b/mantis_evalkit/bin/python3.10 new file mode 100644 index 0000000000000000000000000000000000000000..976904bfe553ba959ea37b69dae6788f90406444 --- /dev/null +++ b/mantis_evalkit/bin/python3.10 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:942d139b2c77dad76f23ac64382f5e2714b5b2f0b1be3e76a8f79e1214cf0e6d +size 17225608 diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_asof.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_asof.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a539bc44abb5ac54646db9f154c47cc89a9e9632 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_asof.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_assign.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_assign.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..252cbcb513b458d612e6f132d83882135bfb5c63 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_assign.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_at_time.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_at_time.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..460610956a18a92c411916c07747d8b4bab1e46a Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_at_time.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_between_time.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_between_time.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c01fe55ddc66d75801ec25db403dfc3fac2bc776 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_between_time.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_combine.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_combine.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f599834c47b1d033858303ed0a16ebcd9645db91 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_combine.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_combine_first.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_combine_first.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..97ae7774a7c70e45c082270c453356b600ec34fd Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_combine_first.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_compare.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_compare.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..690f010ece652527ebe1fb4d9446f39096e9222b Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_compare.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_convert_dtypes.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_convert_dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45439bdc464c395a06d4d7bfed79a2d25dc2f84b Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_convert_dtypes.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_cov_corr.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_cov_corr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59a115f5e37dc44fa5d11659d2dd25af1a6e28c4 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_cov_corr.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_describe.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_describe.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ec1fe099811e777d44d3236f6d113760d579c3f Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_describe.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_dropna.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_dropna.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..666ba981c38ca9931d525b60cac98e000ce4c167 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_dropna.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_dtypes.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_dtypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da8282ee2d409ee9ebb9c7d2e07f81a97a050c72 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_dtypes.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_duplicated.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_duplicated.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fce5423c1bf8a81af914484ec6543167929d9ac6 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_duplicated.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_filter.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_filter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c978b1dc209a9035dd37c9cbc417f6f1fbe0c544 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_filter.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_first_valid_index.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_first_valid_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..291fde6995d6367126cca37bf1ec84e47b403cd8 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_first_valid_index.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_infer_objects.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_infer_objects.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1c41ddc63299855ffec953735f7b50b05f632ea8 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_infer_objects.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_interpolate.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_interpolate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b5d87cfb2135e631a4fc7e3fabab5ffa1c33588 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_interpolate.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_iterrows.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_iterrows.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..78ab0f7637426ee90b89ad028dce40ad6ab9a84c Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_iterrows.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_matmul.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_matmul.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3d0bfce9c17739199ea5ea5b8ccfaeeab0d82be0 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_matmul.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_pct_change.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_pct_change.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37413f854384d0b4e3dab6850bb2d6fc35b740ce Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_pct_change.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_quantile.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_quantile.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da2467d53af9c9be42680b86374bbca06e03f6c7 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_quantile.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_reindex_like.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_reindex_like.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1847bf6094420ef5e9f25d239ce51ce27e6ac3d5 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_reindex_like.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_rename.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_rename.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25e625b16740c45d97474a64ce67afa1dff913e1 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_rename.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_reorder_levels.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_reorder_levels.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13354d42dfeebfe8b75c448828b8ecf3d2b70d73 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_reorder_levels.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_replace.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_replace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf59b60ca83b86f6dce34de20a75001eecdf4800 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_replace.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_round.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_round.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caffdfd62588fe62b175abaf4fa732ca78bc75cf Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_round.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_set_axis.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_set_axis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..62f89774cc47f5a623997e23ef78ad016e7749b0 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_set_axis.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_shift.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_shift.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4c50707b6d16c547477cad59272e3e9926025de5 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_shift.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_sort_index.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_sort_index.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40ced8d35d421ae576e1838fbb8d599fc94e47cc Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_sort_index.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_sort_values.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_sort_values.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d62040e744e10f8950cce643c2f3c318ce2df836 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_sort_values.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_to_numpy.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_to_numpy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..caa05d562e43b1a5156d379aade884938b9292b2 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_to_numpy.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_to_period.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_to_period.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..88ea28ba502c1616f5a1e90587b2a35ebd704ba6 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_to_period.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_to_records.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_to_records.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1958237e96de896e95ecd7e1887753258d2c84c Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_to_records.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_truncate.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_truncate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2d21f47e2bdbed632b27ade432046e82a99a9386 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_truncate.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_tz_localize.cpython-310.pyc b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_tz_localize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6fcb86a00cca64295f53c03c0e37b49b0d9ab1b7 Binary files /dev/null and b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/__pycache__/test_tz_localize.cpython-310.pyc differ diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_at_time.py b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_at_time.py new file mode 100644 index 0000000000000000000000000000000000000000..4c1434bd66aff127e07c4ff3fce90a22721b2035 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_at_time.py @@ -0,0 +1,132 @@ +from datetime import time + +import numpy as np +import pytest +import pytz + +from pandas._libs.tslibs import timezones + +from pandas import ( + DataFrame, + date_range, +) +import pandas._testing as tm + + +class TestAtTime: + @pytest.mark.parametrize("tzstr", ["US/Eastern", "dateutil/US/Eastern"]) + def test_localized_at_time(self, tzstr, frame_or_series): + tz = timezones.maybe_get_tz(tzstr) + + rng = date_range("4/16/2012", "5/1/2012", freq="h") + ts = frame_or_series( + np.random.default_rng(2).standard_normal(len(rng)), index=rng + ) + + ts_local = ts.tz_localize(tzstr) + + result = ts_local.at_time(time(10, 0)) + expected = ts.at_time(time(10, 0)).tz_localize(tzstr) + tm.assert_equal(result, expected) + assert timezones.tz_compare(result.index.tz, tz) + + def test_at_time(self, frame_or_series): + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 2)), index=rng + ) + ts = tm.get_obj(ts, frame_or_series) + rs = ts.at_time(rng[1]) + assert (rs.index.hour == rng[1].hour).all() + assert (rs.index.minute == rng[1].minute).all() + assert (rs.index.second == rng[1].second).all() + + result = ts.at_time("9:30") + expected = ts.at_time(time(9, 30)) + tm.assert_equal(result, expected) + + def test_at_time_midnight(self, frame_or_series): + # midnight, everything + rng = date_range("1/1/2000", "1/31/2000") + ts = DataFrame( + np.random.default_rng(2).standard_normal((len(rng), 3)), index=rng + ) + ts = tm.get_obj(ts, frame_or_series) + + result = ts.at_time(time(0, 0)) + tm.assert_equal(result, ts) + + def test_at_time_nonexistent(self, frame_or_series): + # time doesn't exist + rng = date_range("1/1/2012", freq="23Min", periods=384) + ts = DataFrame(np.random.default_rng(2).standard_normal(len(rng)), rng) + ts = tm.get_obj(ts, frame_or_series) + rs = ts.at_time("16:00") + assert len(rs) == 0 + + @pytest.mark.parametrize( + "hour", ["1:00", "1:00AM", time(1), time(1, tzinfo=pytz.UTC)] + ) + def test_at_time_errors(self, hour): + # GH#24043 + dti = date_range("2018", periods=3, freq="h") + df = DataFrame(list(range(len(dti))), index=dti) + if getattr(hour, "tzinfo", None) is None: + result = df.at_time(hour) + expected = df.iloc[1:2] + tm.assert_frame_equal(result, expected) + else: + with pytest.raises(ValueError, match="Index must be timezone"): + df.at_time(hour) + + def test_at_time_tz(self): + # GH#24043 + dti = date_range("2018", periods=3, freq="h", tz="US/Pacific") + df = DataFrame(list(range(len(dti))), index=dti) + result = df.at_time(time(4, tzinfo=pytz.timezone("US/Eastern"))) + expected = df.iloc[1:2] + tm.assert_frame_equal(result, expected) + + def test_at_time_raises(self, frame_or_series): + # GH#20725 + obj = DataFrame([[1, 2, 3], [4, 5, 6]]) + obj = tm.get_obj(obj, frame_or_series) + msg = "Index must be DatetimeIndex" + with pytest.raises(TypeError, match=msg): # index is not a DatetimeIndex + obj.at_time("00:00") + + @pytest.mark.parametrize("axis", ["index", "columns", 0, 1]) + def test_at_time_axis(self, axis): + # issue 8839 + rng = date_range("1/1/2000", "1/5/2000", freq="5min") + ts = DataFrame(np.random.default_rng(2).standard_normal((len(rng), len(rng)))) + ts.index, ts.columns = rng, rng + + indices = rng[(rng.hour == 9) & (rng.minute == 30) & (rng.second == 0)] + + if axis in ["index", 0]: + expected = ts.loc[indices, :] + elif axis in ["columns", 1]: + expected = ts.loc[:, indices] + + result = ts.at_time("9:30", axis=axis) + + # Without clearing freq, result has freq 1440T and expected 5T + result.index = result.index._with_freq(None) + expected.index = expected.index._with_freq(None) + tm.assert_frame_equal(result, expected) + + def test_at_time_datetimeindex(self): + index = date_range("2012-01-01", "2012-01-05", freq="30min") + df = DataFrame( + np.random.default_rng(2).standard_normal((len(index), 5)), index=index + ) + akey = time(12, 0, 0) + ainds = [24, 72, 120, 168] + + result = df.at_time(akey) + expected = df.loc[akey] + expected2 = df.iloc[ainds] + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(result, expected2) + assert len(result) == 4 diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_interpolate.py b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_interpolate.py new file mode 100644 index 0000000000000000000000000000000000000000..252b950004bea6494b686ba409c5b5563a456a67 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_interpolate.py @@ -0,0 +1,548 @@ +import numpy as np +import pytest + +from pandas._config import using_pyarrow_string_dtype + +from pandas.errors import ChainedAssignmentError +import pandas.util._test_decorators as td + +from pandas import ( + DataFrame, + NaT, + Series, + date_range, +) +import pandas._testing as tm + + +class TestDataFrameInterpolate: + def test_interpolate_complex(self): + # GH#53635 + ser = Series([complex("1+1j"), float("nan"), complex("2+2j")]) + assert ser.dtype.kind == "c" + + res = ser.interpolate() + expected = Series([ser[0], ser[0] * 1.5, ser[2]]) + tm.assert_series_equal(res, expected) + + df = ser.to_frame() + res = df.interpolate() + expected = expected.to_frame() + tm.assert_frame_equal(res, expected) + + def test_interpolate_datetimelike_values(self, frame_or_series): + # GH#11312, GH#51005 + orig = Series(date_range("2012-01-01", periods=5)) + ser = orig.copy() + ser[2] = NaT + + res = frame_or_series(ser).interpolate() + expected = frame_or_series(orig) + tm.assert_equal(res, expected) + + # datetime64tz cast + ser_tz = ser.dt.tz_localize("US/Pacific") + res_tz = frame_or_series(ser_tz).interpolate() + expected_tz = frame_or_series(orig.dt.tz_localize("US/Pacific")) + tm.assert_equal(res_tz, expected_tz) + + # timedelta64 cast + ser_td = ser - ser[0] + res_td = frame_or_series(ser_td).interpolate() + expected_td = frame_or_series(orig - orig[0]) + tm.assert_equal(res_td, expected_td) + + def test_interpolate_inplace(self, frame_or_series, using_array_manager, request): + # GH#44749 + if using_array_manager and frame_or_series is DataFrame: + mark = pytest.mark.xfail(reason=".values-based in-place check is invalid") + request.applymarker(mark) + + obj = frame_or_series([1, np.nan, 2]) + orig = obj.values + + obj.interpolate(inplace=True) + expected = frame_or_series([1, 1.5, 2]) + tm.assert_equal(obj, expected) + + # check we operated *actually* inplace + assert np.shares_memory(orig, obj.values) + assert orig.squeeze()[1] == 1.5 + + @pytest.mark.xfail( + using_pyarrow_string_dtype(), reason="interpolate doesn't work for string" + ) + def test_interp_basic(self, using_copy_on_write): + df = DataFrame( + { + "A": [1, 2, np.nan, 4], + "B": [1, 4, 9, np.nan], + "C": [1, 2, 3, 5], + "D": list("abcd"), + } + ) + expected = DataFrame( + { + "A": [1.0, 2.0, 3.0, 4.0], + "B": [1.0, 4.0, 9.0, 9.0], + "C": [1, 2, 3, 5], + "D": list("abcd"), + } + ) + msg = "DataFrame.interpolate with object dtype" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.interpolate() + tm.assert_frame_equal(result, expected) + + # check we didn't operate inplace GH#45791 + cvalues = df["C"]._values + dvalues = df["D"].values + if using_copy_on_write: + assert np.shares_memory(cvalues, result["C"]._values) + assert np.shares_memory(dvalues, result["D"]._values) + else: + assert not np.shares_memory(cvalues, result["C"]._values) + assert not np.shares_memory(dvalues, result["D"]._values) + + with tm.assert_produces_warning(FutureWarning, match=msg): + res = df.interpolate(inplace=True) + assert res is None + tm.assert_frame_equal(df, expected) + + # check we DID operate inplace + assert np.shares_memory(df["C"]._values, cvalues) + assert np.shares_memory(df["D"]._values, dvalues) + + @pytest.mark.xfail( + using_pyarrow_string_dtype(), reason="interpolate doesn't work for string" + ) + def test_interp_basic_with_non_range_index(self, using_infer_string): + df = DataFrame( + { + "A": [1, 2, np.nan, 4], + "B": [1, 4, 9, np.nan], + "C": [1, 2, 3, 5], + "D": list("abcd"), + } + ) + + msg = "DataFrame.interpolate with object dtype" + warning = FutureWarning if not using_infer_string else None + with tm.assert_produces_warning(warning, match=msg): + result = df.set_index("C").interpolate() + expected = df.set_index("C") + expected.loc[3, "A"] = 3 + expected.loc[5, "B"] = 9 + tm.assert_frame_equal(result, expected) + + def test_interp_empty(self): + # https://github.com/pandas-dev/pandas/issues/35598 + df = DataFrame() + result = df.interpolate() + assert result is not df + expected = df + tm.assert_frame_equal(result, expected) + + def test_interp_bad_method(self): + df = DataFrame( + { + "A": [1, 2, np.nan, 4], + "B": [1, 4, 9, np.nan], + "C": [1, 2, 3, 5], + } + ) + msg = ( + r"method must be one of \['linear', 'time', 'index', 'values', " + r"'nearest', 'zero', 'slinear', 'quadratic', 'cubic', " + r"'barycentric', 'krogh', 'spline', 'polynomial', " + r"'from_derivatives', 'piecewise_polynomial', 'pchip', 'akima', " + r"'cubicspline'\]. Got 'not_a_method' instead." + ) + with pytest.raises(ValueError, match=msg): + df.interpolate(method="not_a_method") + + def test_interp_combo(self): + df = DataFrame( + { + "A": [1.0, 2.0, np.nan, 4.0], + "B": [1, 4, 9, np.nan], + "C": [1, 2, 3, 5], + "D": list("abcd"), + } + ) + + result = df["A"].interpolate() + expected = Series([1.0, 2.0, 3.0, 4.0], name="A") + tm.assert_series_equal(result, expected) + + msg = "The 'downcast' keyword in Series.interpolate is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df["A"].interpolate(downcast="infer") + expected = Series([1, 2, 3, 4], name="A") + tm.assert_series_equal(result, expected) + + def test_inerpolate_invalid_downcast(self): + # GH#53103 + df = DataFrame( + { + "A": [1.0, 2.0, np.nan, 4.0], + "B": [1, 4, 9, np.nan], + "C": [1, 2, 3, 5], + "D": list("abcd"), + } + ) + + msg = "downcast must be either None or 'infer'" + msg2 = "The 'downcast' keyword in DataFrame.interpolate is deprecated" + msg3 = "The 'downcast' keyword in Series.interpolate is deprecated" + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg2): + df.interpolate(downcast="int64") + with pytest.raises(ValueError, match=msg): + with tm.assert_produces_warning(FutureWarning, match=msg3): + df["A"].interpolate(downcast="int64") + + def test_interp_nan_idx(self): + df = DataFrame({"A": [1, 2, np.nan, 4], "B": [np.nan, 2, 3, 4]}) + df = df.set_index("A") + msg = ( + "Interpolation with NaNs in the index has not been implemented. " + "Try filling those NaNs before interpolating." + ) + with pytest.raises(NotImplementedError, match=msg): + df.interpolate(method="values") + + def test_interp_various(self): + pytest.importorskip("scipy") + df = DataFrame( + {"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]} + ) + df = df.set_index("C") + expected = df.copy() + result = df.interpolate(method="polynomial", order=1) + + expected.loc[3, "A"] = 2.66666667 + expected.loc[13, "A"] = 5.76923076 + tm.assert_frame_equal(result, expected) + + result = df.interpolate(method="cubic") + # GH #15662. + expected.loc[3, "A"] = 2.81547781 + expected.loc[13, "A"] = 5.52964175 + tm.assert_frame_equal(result, expected) + + result = df.interpolate(method="nearest") + expected.loc[3, "A"] = 2 + expected.loc[13, "A"] = 5 + tm.assert_frame_equal(result, expected, check_dtype=False) + + result = df.interpolate(method="quadratic") + expected.loc[3, "A"] = 2.82150771 + expected.loc[13, "A"] = 6.12648668 + tm.assert_frame_equal(result, expected) + + result = df.interpolate(method="slinear") + expected.loc[3, "A"] = 2.66666667 + expected.loc[13, "A"] = 5.76923077 + tm.assert_frame_equal(result, expected) + + result = df.interpolate(method="zero") + expected.loc[3, "A"] = 2.0 + expected.loc[13, "A"] = 5 + tm.assert_frame_equal(result, expected, check_dtype=False) + + def test_interp_alt_scipy(self): + pytest.importorskip("scipy") + df = DataFrame( + {"A": [1, 2, np.nan, 4, 5, np.nan, 7], "C": [1, 2, 3, 5, 8, 13, 21]} + ) + result = df.interpolate(method="barycentric") + expected = df.copy() + expected.loc[2, "A"] = 3 + expected.loc[5, "A"] = 6 + tm.assert_frame_equal(result, expected) + + msg = "The 'downcast' keyword in DataFrame.interpolate is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.interpolate(method="barycentric", downcast="infer") + tm.assert_frame_equal(result, expected.astype(np.int64)) + + result = df.interpolate(method="krogh") + expectedk = df.copy() + expectedk["A"] = expected["A"] + tm.assert_frame_equal(result, expectedk) + + result = df.interpolate(method="pchip") + expected.loc[2, "A"] = 3 + expected.loc[5, "A"] = 6.0 + + tm.assert_frame_equal(result, expected) + + def test_interp_rowwise(self): + df = DataFrame( + { + 0: [1, 2, np.nan, 4], + 1: [2, 3, 4, np.nan], + 2: [np.nan, 4, 5, 6], + 3: [4, np.nan, 6, 7], + 4: [1, 2, 3, 4], + } + ) + result = df.interpolate(axis=1) + expected = df.copy() + expected.loc[3, 1] = 5 + expected.loc[0, 2] = 3 + expected.loc[1, 3] = 3 + expected[4] = expected[4].astype(np.float64) + tm.assert_frame_equal(result, expected) + + result = df.interpolate(axis=1, method="values") + tm.assert_frame_equal(result, expected) + + result = df.interpolate(axis=0) + expected = df.interpolate() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "axis_name, axis_number", + [ + pytest.param("rows", 0, id="rows_0"), + pytest.param("index", 0, id="index_0"), + pytest.param("columns", 1, id="columns_1"), + ], + ) + def test_interp_axis_names(self, axis_name, axis_number): + # GH 29132: test axis names + data = {0: [0, np.nan, 6], 1: [1, np.nan, 7], 2: [2, 5, 8]} + + df = DataFrame(data, dtype=np.float64) + result = df.interpolate(axis=axis_name, method="linear") + expected = df.interpolate(axis=axis_number, method="linear") + tm.assert_frame_equal(result, expected) + + def test_rowwise_alt(self): + df = DataFrame( + { + 0: [0, 0.5, 1.0, np.nan, 4, 8, np.nan, np.nan, 64], + 1: [1, 2, 3, 4, 3, 2, 1, 0, -1], + } + ) + df.interpolate(axis=0) + # TODO: assert something? + + @pytest.mark.parametrize( + "check_scipy", [False, pytest.param(True, marks=td.skip_if_no("scipy"))] + ) + def test_interp_leading_nans(self, check_scipy): + df = DataFrame( + {"A": [np.nan, np.nan, 0.5, 0.25, 0], "B": [np.nan, -3, -3.5, np.nan, -4]} + ) + result = df.interpolate() + expected = df.copy() + expected.loc[3, "B"] = -3.75 + tm.assert_frame_equal(result, expected) + + if check_scipy: + result = df.interpolate(method="polynomial", order=1) + tm.assert_frame_equal(result, expected) + + def test_interp_raise_on_only_mixed(self, axis): + df = DataFrame( + { + "A": [1, 2, np.nan, 4], + "B": ["a", "b", "c", "d"], + "C": [np.nan, 2, 5, 7], + "D": [np.nan, np.nan, 9, 9], + "E": [1, 2, 3, 4], + } + ) + msg = ( + "Cannot interpolate with all object-dtype columns " + "in the DataFrame. Try setting at least one " + "column to a numeric dtype." + ) + with pytest.raises(TypeError, match=msg): + df.astype("object").interpolate(axis=axis) + + def test_interp_raise_on_all_object_dtype(self): + # GH 22985 + df = DataFrame({"A": [1, 2, 3], "B": [4, 5, 6]}, dtype="object") + msg = ( + "Cannot interpolate with all object-dtype columns " + "in the DataFrame. Try setting at least one " + "column to a numeric dtype." + ) + with pytest.raises(TypeError, match=msg): + df.interpolate() + + def test_interp_inplace(self, using_copy_on_write): + df = DataFrame({"a": [1.0, 2.0, np.nan, 4.0]}) + expected = DataFrame({"a": [1.0, 2.0, 3.0, 4.0]}) + expected_cow = df.copy() + result = df.copy() + + if using_copy_on_write: + with tm.raises_chained_assignment_error(): + return_value = result["a"].interpolate(inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected_cow) + else: + with tm.assert_produces_warning(FutureWarning, match="inplace method"): + return_value = result["a"].interpolate(inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + result = df.copy() + msg = "The 'downcast' keyword in Series.interpolate is deprecated" + + if using_copy_on_write: + with tm.assert_produces_warning( + (FutureWarning, ChainedAssignmentError), match=msg + ): + return_value = result["a"].interpolate(inplace=True, downcast="infer") + assert return_value is None + tm.assert_frame_equal(result, expected_cow) + else: + with tm.assert_produces_warning(FutureWarning, match=msg): + return_value = result["a"].interpolate(inplace=True, downcast="infer") + assert return_value is None + tm.assert_frame_equal(result, expected.astype("int64")) + + def test_interp_inplace_row(self): + # GH 10395 + result = DataFrame( + {"a": [1.0, 2.0, 3.0, 4.0], "b": [np.nan, 2.0, 3.0, 4.0], "c": [3, 2, 2, 2]} + ) + expected = result.interpolate(method="linear", axis=1, inplace=False) + return_value = result.interpolate(method="linear", axis=1, inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + def test_interp_ignore_all_good(self): + # GH + df = DataFrame( + { + "A": [1, 2, np.nan, 4], + "B": [1, 2, 3, 4], + "C": [1.0, 2.0, np.nan, 4.0], + "D": [1.0, 2.0, 3.0, 4.0], + } + ) + expected = DataFrame( + { + "A": np.array([1, 2, 3, 4], dtype="float64"), + "B": np.array([1, 2, 3, 4], dtype="int64"), + "C": np.array([1.0, 2.0, 3, 4.0], dtype="float64"), + "D": np.array([1.0, 2.0, 3.0, 4.0], dtype="float64"), + } + ) + + msg = "The 'downcast' keyword in DataFrame.interpolate is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.interpolate(downcast=None) + tm.assert_frame_equal(result, expected) + + # all good + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df[["B", "D"]].interpolate(downcast=None) + tm.assert_frame_equal(result, df[["B", "D"]]) + + def test_interp_time_inplace_axis(self): + # GH 9687 + periods = 5 + idx = date_range(start="2014-01-01", periods=periods) + data = np.random.default_rng(2).random((periods, periods)) + data[data < 0.5] = np.nan + expected = DataFrame(index=idx, columns=idx, data=data) + + result = expected.interpolate(axis=0, method="time") + return_value = expected.interpolate(axis=0, method="time", inplace=True) + assert return_value is None + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("axis_name, axis_number", [("index", 0), ("columns", 1)]) + def test_interp_string_axis(self, axis_name, axis_number): + # https://github.com/pandas-dev/pandas/issues/25190 + x = np.linspace(0, 100, 1000) + y = np.sin(x) + df = DataFrame( + data=np.tile(y, (10, 1)), index=np.arange(10), columns=x + ).reindex(columns=x * 1.005) + result = df.interpolate(method="linear", axis=axis_name) + expected = df.interpolate(method="linear", axis=axis_number) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("multiblock", [True, False]) + @pytest.mark.parametrize("method", ["ffill", "bfill", "pad"]) + def test_interp_fillna_methods( + self, request, axis, multiblock, method, using_array_manager + ): + # GH 12918 + if using_array_manager and axis in (1, "columns"): + # TODO(ArrayManager) support axis=1 + td.mark_array_manager_not_yet_implemented(request) + + df = DataFrame( + { + "A": [1.0, 2.0, 3.0, 4.0, np.nan, 5.0], + "B": [2.0, 4.0, 6.0, np.nan, 8.0, 10.0], + "C": [3.0, 6.0, 9.0, np.nan, np.nan, 30.0], + } + ) + if multiblock: + df["D"] = np.nan + df["E"] = 1.0 + + method2 = method if method != "pad" else "ffill" + expected = getattr(df, method2)(axis=axis) + msg = f"DataFrame.interpolate with method={method} is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + result = df.interpolate(method=method, axis=axis) + tm.assert_frame_equal(result, expected) + + def test_interpolate_empty_df(self): + # GH#53199 + df = DataFrame() + expected = df.copy() + result = df.interpolate(inplace=True) + assert result is None + tm.assert_frame_equal(df, expected) + + def test_interpolate_ea(self, any_int_ea_dtype): + # GH#55347 + df = DataFrame({"a": [1, None, None, None, 3]}, dtype=any_int_ea_dtype) + orig = df.copy() + result = df.interpolate(limit=2) + expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype="Float64") + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(df, orig) + + @pytest.mark.parametrize( + "dtype", + [ + "Float64", + "Float32", + pytest.param("float32[pyarrow]", marks=td.skip_if_no("pyarrow")), + pytest.param("float64[pyarrow]", marks=td.skip_if_no("pyarrow")), + ], + ) + def test_interpolate_ea_float(self, dtype): + # GH#55347 + df = DataFrame({"a": [1, None, None, None, 3]}, dtype=dtype) + orig = df.copy() + result = df.interpolate(limit=2) + expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype=dtype) + tm.assert_frame_equal(result, expected) + tm.assert_frame_equal(df, orig) + + @pytest.mark.parametrize( + "dtype", + ["int64", "uint64", "int32", "int16", "int8", "uint32", "uint16", "uint8"], + ) + def test_interpolate_arrow(self, dtype): + # GH#55347 + pytest.importorskip("pyarrow") + df = DataFrame({"a": [1, None, None, None, 3]}, dtype=dtype + "[pyarrow]") + result = df.interpolate(limit=2) + expected = DataFrame({"a": [1, 1.5, 2.0, None, 3]}, dtype="float64[pyarrow]") + tm.assert_frame_equal(result, expected) diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_quantile.py b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_quantile.py new file mode 100644 index 0000000000000000000000000000000000000000..0f27eae1a3bfcb963bd3e5ba33056a6635a2d2bc --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_quantile.py @@ -0,0 +1,972 @@ +import numpy as np +import pytest + +import pandas as pd +from pandas import ( + DataFrame, + Index, + Series, + Timestamp, +) +import pandas._testing as tm + + +@pytest.fixture( + params=[["linear", "single"], ["nearest", "table"]], ids=lambda x: "-".join(x) +) +def interp_method(request): + """(interpolation, method) arguments for quantile""" + return request.param + + +class TestDataFrameQuantile: + @pytest.mark.parametrize( + "df,expected", + [ + [ + DataFrame( + { + 0: Series(pd.arrays.SparseArray([1, 2])), + 1: Series(pd.arrays.SparseArray([3, 4])), + } + ), + Series([1.5, 3.5], name=0.5), + ], + [ + DataFrame(Series([0.0, None, 1.0, 2.0], dtype="Sparse[float]")), + Series([1.0], name=0.5), + ], + ], + ) + def test_quantile_sparse(self, df, expected): + # GH#17198 + # GH#24600 + result = df.quantile() + expected = expected.astype("Sparse[float]") + tm.assert_series_equal(result, expected) + + def test_quantile( + self, datetime_frame, interp_method, using_array_manager, request + ): + interpolation, method = interp_method + df = datetime_frame + result = df.quantile( + 0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series( + [np.percentile(df[col], 10) for col in df.columns], + index=df.columns, + name=0.1, + ) + if interpolation == "linear": + # np.percentile values only comparable to linear interpolation + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result.index, expected.index) + request.applymarker( + pytest.mark.xfail( + using_array_manager, reason="Name set incorrectly for arraymanager" + ) + ) + assert result.name == expected.name + + result = df.quantile( + 0.9, axis=1, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series( + [np.percentile(df.loc[date], 90) for date in df.index], + index=df.index, + name=0.9, + ) + if interpolation == "linear": + # np.percentile values only comparable to linear interpolation + tm.assert_series_equal(result, expected) + else: + tm.assert_index_equal(result.index, expected.index) + request.applymarker( + pytest.mark.xfail( + using_array_manager, reason="Name set incorrectly for arraymanager" + ) + ) + assert result.name == expected.name + + def test_empty(self, interp_method): + interpolation, method = interp_method + q = DataFrame({"x": [], "y": []}).quantile( + 0.1, axis=0, numeric_only=True, interpolation=interpolation, method=method + ) + assert np.isnan(q["x"]) and np.isnan(q["y"]) + + def test_non_numeric_exclusion(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + df = DataFrame({"col1": ["A", "A", "B", "B"], "col2": [1, 2, 3, 4]}) + rs = df.quantile( + 0.5, numeric_only=True, interpolation=interpolation, method=method + ) + xp = df.median(numeric_only=True).rename(0.5) + if interpolation == "nearest": + xp = (xp + 0.5).astype(np.int64) + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + tm.assert_series_equal(rs, xp) + + def test_axis(self, interp_method, request, using_array_manager): + # axis + interpolation, method = interp_method + df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) + result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) + expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + tm.assert_series_equal(result, expected) + + result = df.quantile( + [0.5, 0.75], axis=1, interpolation=interpolation, method=method + ) + expected = DataFrame( + {1: [1.5, 1.75], 2: [2.5, 2.75], 3: [3.5, 3.75]}, index=[0.5, 0.75] + ) + if interpolation == "nearest": + expected.iloc[0, :] -= 0.5 + expected.iloc[1, :] += 0.25 + expected = expected.astype(np.int64) + tm.assert_frame_equal(result, expected, check_index_type=True) + + def test_axis_numeric_only_true(self, interp_method, request, using_array_manager): + # We may want to break API in the future to change this + # so that we exclude non-numeric along the same axis + # See GH #7312 + interpolation, method = interp_method + df = DataFrame([[1, 2, 3], ["a", "b", 4]]) + result = df.quantile( + 0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series([3.0, 4.0], index=[0, 1], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + tm.assert_series_equal(result, expected) + + def test_quantile_date_range(self, interp_method, request, using_array_manager): + # GH 2460 + interpolation, method = interp_method + dti = pd.date_range("2016-01-01", periods=3, tz="US/Pacific") + ser = Series(dti) + df = DataFrame(ser) + + result = df.quantile( + numeric_only=False, interpolation=interpolation, method=method + ) + expected = Series( + ["2016-01-02 00:00:00"], name=0.5, dtype="datetime64[ns, US/Pacific]" + ) + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + + tm.assert_series_equal(result, expected) + + def test_quantile_axis_mixed(self, interp_method, request, using_array_manager): + # mixed on axis=1 + interpolation, method = interp_method + df = DataFrame( + { + "A": [1, 2, 3], + "B": [2.0, 3.0, 4.0], + "C": pd.date_range("20130101", periods=3), + "D": ["foo", "bar", "baz"], + } + ) + result = df.quantile( + 0.5, axis=1, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series([1.5, 2.5, 3.5], name=0.5) + if interpolation == "nearest": + expected -= 0.5 + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + tm.assert_series_equal(result, expected) + + # must raise + msg = "'<' not supported between instances of 'Timestamp' and 'float'" + with pytest.raises(TypeError, match=msg): + df.quantile(0.5, axis=1, numeric_only=False) + + def test_quantile_axis_parameter(self, interp_method, request, using_array_manager): + # GH 9543/9544 + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) + + result = df.quantile(0.5, axis=0, interpolation=interpolation, method=method) + + expected = Series([2.0, 3.0], index=["A", "B"], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) + tm.assert_series_equal(result, expected) + + expected = df.quantile( + 0.5, axis="index", interpolation=interpolation, method=method + ) + if interpolation == "nearest": + expected = expected.astype(np.int64) + tm.assert_series_equal(result, expected) + + result = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) + + expected = Series([1.5, 2.5, 3.5], index=[1, 2, 3], name=0.5) + if interpolation == "nearest": + expected = expected.astype(np.int64) + tm.assert_series_equal(result, expected) + + result = df.quantile( + 0.5, axis="columns", interpolation=interpolation, method=method + ) + tm.assert_series_equal(result, expected) + + msg = "No axis named -1 for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.quantile(0.1, axis=-1, interpolation=interpolation, method=method) + msg = "No axis named column for object type DataFrame" + with pytest.raises(ValueError, match=msg): + df.quantile(0.1, axis="column") + + def test_quantile_interpolation(self): + # see gh-10174 + + # interpolation method other than default linear + df = DataFrame({"A": [1, 2, 3], "B": [2, 3, 4]}, index=[1, 2, 3]) + result = df.quantile(0.5, axis=1, interpolation="nearest") + expected = Series([1, 2, 3], index=[1, 2, 3], name=0.5) + tm.assert_series_equal(result, expected) + + # cross-check interpolation=nearest results in original dtype + exp = np.percentile( + np.array([[1, 2, 3], [2, 3, 4]]), + 0.5, + axis=0, + method="nearest", + ) + expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="int64") + tm.assert_series_equal(result, expected) + + # float + df = DataFrame({"A": [1.0, 2.0, 3.0], "B": [2.0, 3.0, 4.0]}, index=[1, 2, 3]) + result = df.quantile(0.5, axis=1, interpolation="nearest") + expected = Series([1.0, 2.0, 3.0], index=[1, 2, 3], name=0.5) + tm.assert_series_equal(result, expected) + exp = np.percentile( + np.array([[1.0, 2.0, 3.0], [2.0, 3.0, 4.0]]), + 0.5, + axis=0, + method="nearest", + ) + expected = Series(exp, index=[1, 2, 3], name=0.5, dtype="float64") + tm.assert_series_equal(result, expected) + + # axis + result = df.quantile([0.5, 0.75], axis=1, interpolation="lower") + expected = DataFrame( + {1: [1.0, 1.0], 2: [2.0, 2.0], 3: [3.0, 3.0]}, index=[0.5, 0.75] + ) + tm.assert_frame_equal(result, expected) + + # test degenerate case + df = DataFrame({"x": [], "y": []}) + q = df.quantile(0.1, axis=0, interpolation="higher") + assert np.isnan(q["x"]) and np.isnan(q["y"]) + + # multi + df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) + result = df.quantile([0.25, 0.5], interpolation="midpoint") + + # https://github.com/numpy/numpy/issues/7163 + expected = DataFrame( + [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], + index=[0.25, 0.5], + columns=["a", "b", "c"], + ) + tm.assert_frame_equal(result, expected) + + def test_quantile_interpolation_datetime(self, datetime_frame): + # see gh-10174 + + # interpolation = linear (default case) + df = datetime_frame + q = df.quantile(0.1, axis=0, numeric_only=True, interpolation="linear") + assert q["A"] == np.percentile(df["A"], 10) + + def test_quantile_interpolation_int(self, int_frame): + # see gh-10174 + + df = int_frame + # interpolation = linear (default case) + q = df.quantile(0.1) + assert q["A"] == np.percentile(df["A"], 10) + + # test with and without interpolation keyword + q1 = df.quantile(0.1, axis=0, interpolation="linear") + assert q1["A"] == np.percentile(df["A"], 10) + tm.assert_series_equal(q, q1) + + def test_quantile_multi(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) + result = df.quantile([0.25, 0.5], interpolation=interpolation, method=method) + expected = DataFrame( + [[1.5, 1.5, 1.5], [2.0, 2.0, 2.0]], + index=[0.25, 0.5], + columns=["a", "b", "c"], + ) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + tm.assert_frame_equal(result, expected) + + def test_quantile_multi_axis_1(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + df = DataFrame([[1, 1, 1], [2, 2, 2], [3, 3, 3]], columns=["a", "b", "c"]) + result = df.quantile( + [0.25, 0.5], axis=1, interpolation=interpolation, method=method + ) + expected = DataFrame( + [[1.0, 2.0, 3.0]] * 2, index=[0.25, 0.5], columns=[0, 1, 2] + ) + if interpolation == "nearest": + expected = expected.astype(np.int64) + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + tm.assert_frame_equal(result, expected) + + def test_quantile_multi_empty(self, interp_method): + interpolation, method = interp_method + result = DataFrame({"x": [], "y": []}).quantile( + [0.1, 0.9], axis=0, interpolation=interpolation, method=method + ) + expected = DataFrame( + {"x": [np.nan, np.nan], "y": [np.nan, np.nan]}, index=[0.1, 0.9] + ) + tm.assert_frame_equal(result, expected) + + def test_quantile_datetime(self, unit): + dti = pd.to_datetime(["2010", "2011"]).as_unit(unit) + df = DataFrame({"a": dti, "b": [0, 5]}) + + # exclude datetime + result = df.quantile(0.5, numeric_only=True) + expected = Series([2.5], index=["b"], name=0.5) + tm.assert_series_equal(result, expected) + + # datetime + result = df.quantile(0.5, numeric_only=False) + expected = Series( + [Timestamp("2010-07-02 12:00:00"), 2.5], index=["a", "b"], name=0.5 + ) + tm.assert_series_equal(result, expected) + + # datetime w/ multi + result = df.quantile([0.5], numeric_only=False) + expected = DataFrame( + {"a": Timestamp("2010-07-02 12:00:00").as_unit(unit), "b": 2.5}, + index=[0.5], + ) + tm.assert_frame_equal(result, expected) + + # axis = 1 + df["c"] = pd.to_datetime(["2011", "2012"]).as_unit(unit) + result = df[["a", "c"]].quantile(0.5, axis=1, numeric_only=False) + expected = Series( + [Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")], + index=[0, 1], + name=0.5, + dtype=f"M8[{unit}]", + ) + tm.assert_series_equal(result, expected) + + result = df[["a", "c"]].quantile([0.5], axis=1, numeric_only=False) + expected = DataFrame( + [[Timestamp("2010-07-02 12:00:00"), Timestamp("2011-07-02 12:00:00")]], + index=[0.5], + columns=[0, 1], + dtype=f"M8[{unit}]", + ) + tm.assert_frame_equal(result, expected) + + # empty when numeric_only=True + result = df[["a", "c"]].quantile(0.5, numeric_only=True) + expected = Series([], index=[], dtype=np.float64, name=0.5) + tm.assert_series_equal(result, expected) + + result = df[["a", "c"]].quantile([0.5], numeric_only=True) + expected = DataFrame(index=[0.5], columns=[]) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "dtype", + [ + "datetime64[ns]", + "datetime64[ns, US/Pacific]", + "timedelta64[ns]", + "Period[D]", + ], + ) + def test_quantile_dt64_empty(self, dtype, interp_method): + # GH#41544 + interpolation, method = interp_method + df = DataFrame(columns=["a", "b"], dtype=dtype) + + res = df.quantile( + 0.5, axis=1, numeric_only=False, interpolation=interpolation, method=method + ) + expected = Series([], index=[], name=0.5, dtype=dtype) + tm.assert_series_equal(res, expected) + + # no columns in result, so no dtype preservation + res = df.quantile( + [0.5], + axis=1, + numeric_only=False, + interpolation=interpolation, + method=method, + ) + expected = DataFrame(index=[0.5], columns=[]) + tm.assert_frame_equal(res, expected) + + @pytest.mark.parametrize("invalid", [-1, 2, [0.5, -1], [0.5, 2]]) + def test_quantile_invalid(self, invalid, datetime_frame, interp_method): + msg = "percentiles should all be in the interval \\[0, 1\\]" + interpolation, method = interp_method + with pytest.raises(ValueError, match=msg): + datetime_frame.quantile(invalid, interpolation=interpolation, method=method) + + def test_quantile_box(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + df = DataFrame( + { + "A": [ + Timestamp("2011-01-01"), + Timestamp("2011-01-02"), + Timestamp("2011-01-03"), + ], + "B": [ + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-03", tz="US/Eastern"), + ], + "C": [ + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Timedelta("3 days"), + ], + } + ) + + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + + exp = Series( + [ + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), + pd.Timedelta("2 days"), + ], + name=0.5, + index=["A", "B", "C"], + ) + tm.assert_series_equal(res, exp) + + res = df.quantile( + [0.5], numeric_only=False, interpolation=interpolation, method=method + ) + exp = DataFrame( + [ + [ + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), + pd.Timedelta("2 days"), + ] + ], + index=[0.5], + columns=["A", "B", "C"], + ) + tm.assert_frame_equal(res, exp) + + def test_quantile_box_nat(self): + # DatetimeLikeBlock may be consolidated and contain NaT in different loc + df = DataFrame( + { + "A": [ + Timestamp("2011-01-01"), + pd.NaT, + Timestamp("2011-01-02"), + Timestamp("2011-01-03"), + ], + "a": [ + Timestamp("2011-01-01"), + Timestamp("2011-01-02"), + pd.NaT, + Timestamp("2011-01-03"), + ], + "B": [ + Timestamp("2011-01-01", tz="US/Eastern"), + pd.NaT, + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-03", tz="US/Eastern"), + ], + "b": [ + Timestamp("2011-01-01", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + pd.NaT, + Timestamp("2011-01-03", tz="US/Eastern"), + ], + "C": [ + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Timedelta("3 days"), + pd.NaT, + ], + "c": [ + pd.NaT, + pd.Timedelta("1 days"), + pd.Timedelta("2 days"), + pd.Timedelta("3 days"), + ], + }, + columns=list("AaBbCc"), + ) + + res = df.quantile(0.5, numeric_only=False) + exp = Series( + [ + Timestamp("2011-01-02"), + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + pd.Timedelta("2 days"), + pd.Timedelta("2 days"), + ], + name=0.5, + index=list("AaBbCc"), + ) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5], numeric_only=False) + exp = DataFrame( + [ + [ + Timestamp("2011-01-02"), + Timestamp("2011-01-02"), + Timestamp("2011-01-02", tz="US/Eastern"), + Timestamp("2011-01-02", tz="US/Eastern"), + pd.Timedelta("2 days"), + pd.Timedelta("2 days"), + ] + ], + index=[0.5], + columns=list("AaBbCc"), + ) + tm.assert_frame_equal(res, exp) + + def test_quantile_nan(self, interp_method, request, using_array_manager): + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + # GH 14357 - float block where some cols have missing values + df = DataFrame({"a": np.arange(1, 6.0), "b": np.arange(1, 6.0)}) + df.iloc[-1, 1] = np.nan + + res = df.quantile(0.5, interpolation=interpolation, method=method) + exp = Series( + [3.0, 2.5 if interpolation == "linear" else 3.0], index=["a", "b"], name=0.5 + ) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method) + exp = DataFrame( + { + "a": [3.0, 4.0], + "b": [2.5, 3.25] if interpolation == "linear" else [3.0, 4.0], + }, + index=[0.5, 0.75], + ) + tm.assert_frame_equal(res, exp) + + res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) + exp = Series(np.arange(1.0, 6.0), name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile( + [0.5, 0.75], axis=1, interpolation=interpolation, method=method + ) + exp = DataFrame([np.arange(1.0, 6.0)] * 2, index=[0.5, 0.75]) + if interpolation == "nearest": + exp.iloc[1, -1] = np.nan + tm.assert_frame_equal(res, exp) + + # full-nan column + df["b"] = np.nan + + res = df.quantile(0.5, interpolation=interpolation, method=method) + exp = Series([3.0, np.nan], index=["a", "b"], name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5, 0.75], interpolation=interpolation, method=method) + exp = DataFrame({"a": [3.0, 4.0], "b": [np.nan, np.nan]}, index=[0.5, 0.75]) + tm.assert_frame_equal(res, exp) + + def test_quantile_nat(self, interp_method, request, using_array_manager, unit): + interpolation, method = interp_method + if method == "table" and using_array_manager: + request.applymarker(pytest.mark.xfail(reason="Axis name incorrectly set.")) + # full NaT column + df = DataFrame({"a": [pd.NaT, pd.NaT, pd.NaT]}, dtype=f"M8[{unit}]") + + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + exp = Series([pd.NaT], index=["a"], name=0.5, dtype=f"M8[{unit}]") + tm.assert_series_equal(res, exp) + + res = df.quantile( + [0.5], numeric_only=False, interpolation=interpolation, method=method + ) + exp = DataFrame({"a": [pd.NaT]}, index=[0.5], dtype=f"M8[{unit}]") + tm.assert_frame_equal(res, exp) + + # mixed non-null / full null column + df = DataFrame( + { + "a": [ + Timestamp("2012-01-01"), + Timestamp("2012-01-02"), + Timestamp("2012-01-03"), + ], + "b": [pd.NaT, pd.NaT, pd.NaT], + }, + dtype=f"M8[{unit}]", + ) + + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + exp = Series( + [Timestamp("2012-01-02"), pd.NaT], + index=["a", "b"], + name=0.5, + dtype=f"M8[{unit}]", + ) + tm.assert_series_equal(res, exp) + + res = df.quantile( + [0.5], numeric_only=False, interpolation=interpolation, method=method + ) + exp = DataFrame( + [[Timestamp("2012-01-02"), pd.NaT]], + index=[0.5], + columns=["a", "b"], + dtype=f"M8[{unit}]", + ) + tm.assert_frame_equal(res, exp) + + def test_quantile_empty_no_rows_floats(self, interp_method): + interpolation, method = interp_method + + df = DataFrame(columns=["a", "b"], dtype="float64") + + res = df.quantile(0.5, interpolation=interpolation, method=method) + exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5], interpolation=interpolation, method=method) + exp = DataFrame([[np.nan, np.nan]], columns=["a", "b"], index=[0.5]) + tm.assert_frame_equal(res, exp) + + res = df.quantile(0.5, axis=1, interpolation=interpolation, method=method) + exp = Series([], index=[], dtype="float64", name=0.5) + tm.assert_series_equal(res, exp) + + res = df.quantile([0.5], axis=1, interpolation=interpolation, method=method) + exp = DataFrame(columns=[], index=[0.5]) + tm.assert_frame_equal(res, exp) + + def test_quantile_empty_no_rows_ints(self, interp_method): + interpolation, method = interp_method + df = DataFrame(columns=["a", "b"], dtype="int64") + + res = df.quantile(0.5, interpolation=interpolation, method=method) + exp = Series([np.nan, np.nan], index=["a", "b"], name=0.5) + tm.assert_series_equal(res, exp) + + def test_quantile_empty_no_rows_dt64(self, interp_method): + interpolation, method = interp_method + # datetimes + df = DataFrame(columns=["a", "b"], dtype="datetime64[ns]") + + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + exp = Series( + [pd.NaT, pd.NaT], index=["a", "b"], dtype="datetime64[ns]", name=0.5 + ) + tm.assert_series_equal(res, exp) + + # Mixed dt64/dt64tz + df["a"] = df["a"].dt.tz_localize("US/Central") + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + exp = exp.astype(object) + if interpolation == "nearest": + # GH#18463 TODO: would we prefer NaTs here? + msg = "The 'downcast' keyword in fillna is deprecated" + with tm.assert_produces_warning(FutureWarning, match=msg): + exp = exp.fillna(np.nan, downcast=False) + tm.assert_series_equal(res, exp) + + # both dt64tz + df["b"] = df["b"].dt.tz_localize("US/Central") + res = df.quantile( + 0.5, numeric_only=False, interpolation=interpolation, method=method + ) + exp = exp.astype(df["b"].dtype) + tm.assert_series_equal(res, exp) + + def test_quantile_empty_no_columns(self, interp_method): + # GH#23925 _get_numeric_data may drop all columns + interpolation, method = interp_method + df = DataFrame(pd.date_range("1/1/18", periods=5)) + df.columns.name = "captain tightpants" + result = df.quantile( + 0.5, numeric_only=True, interpolation=interpolation, method=method + ) + expected = Series([], index=[], name=0.5, dtype=np.float64) + expected.index.name = "captain tightpants" + tm.assert_series_equal(result, expected) + + result = df.quantile( + [0.5], numeric_only=True, interpolation=interpolation, method=method + ) + expected = DataFrame([], index=[0.5], columns=[]) + expected.columns.name = "captain tightpants" + tm.assert_frame_equal(result, expected) + + def test_quantile_item_cache( + self, using_array_manager, interp_method, using_copy_on_write + ): + # previous behavior incorrect retained an invalid _item_cache entry + interpolation, method = interp_method + df = DataFrame( + np.random.default_rng(2).standard_normal((4, 3)), columns=["A", "B", "C"] + ) + df["D"] = df["A"] * 2 + ser = df["A"] + if not using_array_manager: + assert len(df._mgr.blocks) == 2 + + df.quantile(numeric_only=False, interpolation=interpolation, method=method) + + if using_copy_on_write: + ser.iloc[0] = 99 + assert df.iloc[0, 0] == df["A"][0] + assert df.iloc[0, 0] != 99 + else: + ser.values[0] = 99 + assert df.iloc[0, 0] == df["A"][0] + assert df.iloc[0, 0] == 99 + + def test_invalid_method(self): + with pytest.raises(ValueError, match="Invalid method: foo"): + DataFrame(range(1)).quantile(0.5, method="foo") + + def test_table_invalid_interpolation(self): + with pytest.raises(ValueError, match="Invalid interpolation: foo"): + DataFrame(range(1)).quantile(0.5, method="table", interpolation="foo") + + +class TestQuantileExtensionDtype: + # TODO: tests for axis=1? + # TODO: empty case? + + @pytest.fixture( + params=[ + pytest.param( + pd.IntervalIndex.from_breaks(range(10)), + marks=pytest.mark.xfail(reason="raises when trying to add Intervals"), + ), + pd.period_range("2016-01-01", periods=9, freq="D"), + pd.date_range("2016-01-01", periods=9, tz="US/Pacific"), + pd.timedelta_range("1 Day", periods=9), + pd.array(np.arange(9), dtype="Int64"), + pd.array(np.arange(9), dtype="Float64"), + ], + ids=lambda x: str(x.dtype), + ) + def index(self, request): + # NB: not actually an Index object + idx = request.param + idx.name = "A" + return idx + + @pytest.fixture + def obj(self, index, frame_or_series): + # bc index is not always an Index (yet), we need to re-patch .name + obj = frame_or_series(index).copy() + + if frame_or_series is Series: + obj.name = "A" + else: + obj.columns = ["A"] + return obj + + def compute_quantile(self, obj, qs): + if isinstance(obj, Series): + result = obj.quantile(qs) + else: + result = obj.quantile(qs, numeric_only=False) + return result + + def test_quantile_ea(self, request, obj, index): + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.default_rng(2).shuffle(indexer) + obj = obj.iloc[indexer] + + qs = [0.5, 0, 1] + result = self.compute_quantile(obj, qs) + + exp_dtype = index.dtype + if index.dtype == "Int64": + # match non-nullable casting behavior + exp_dtype = "Float64" + + # expected here assumes len(index) == 9 + expected = Series( + [index[4], index[0], index[-1]], dtype=exp_dtype, index=qs, name="A" + ) + expected = type(obj)(expected) + + tm.assert_equal(result, expected) + + def test_quantile_ea_with_na(self, obj, index): + obj.iloc[0] = index._na_value + obj.iloc[-1] = index._na_value + + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.default_rng(2).shuffle(indexer) + obj = obj.iloc[indexer] + + qs = [0.5, 0, 1] + result = self.compute_quantile(obj, qs) + + # expected here assumes len(index) == 9 + expected = Series( + [index[4], index[1], index[-2]], dtype=index.dtype, index=qs, name="A" + ) + expected = type(obj)(expected) + tm.assert_equal(result, expected) + + def test_quantile_ea_all_na(self, request, obj, index): + obj.iloc[:] = index._na_value + # Check dtypes were preserved; this was once a problem see GH#39763 + assert np.all(obj.dtypes == index.dtype) + + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.default_rng(2).shuffle(indexer) + obj = obj.iloc[indexer] + + qs = [0.5, 0, 1] + result = self.compute_quantile(obj, qs) + + expected = index.take([-1, -1, -1], allow_fill=True, fill_value=index._na_value) + expected = Series(expected, index=qs, name="A") + expected = type(obj)(expected) + tm.assert_equal(result, expected) + + def test_quantile_ea_scalar(self, request, obj, index): + # scalar qs + + # result should be invariant to shuffling + indexer = np.arange(len(index), dtype=np.intp) + np.random.default_rng(2).shuffle(indexer) + obj = obj.iloc[indexer] + + qs = 0.5 + result = self.compute_quantile(obj, qs) + + exp_dtype = index.dtype + if index.dtype == "Int64": + exp_dtype = "Float64" + + expected = Series({"A": index[4]}, dtype=exp_dtype, name=0.5) + if isinstance(obj, Series): + expected = expected["A"] + assert result == expected + else: + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, expected_data, expected_index, axis", + [ + ["float64", [], [], 1], + ["int64", [], [], 1], + ["float64", [np.nan, np.nan], ["a", "b"], 0], + ["int64", [np.nan, np.nan], ["a", "b"], 0], + ], + ) + def test_empty_numeric(self, dtype, expected_data, expected_index, axis): + # GH 14564 + df = DataFrame(columns=["a", "b"], dtype=dtype) + result = df.quantile(0.5, axis=axis) + expected = Series( + expected_data, name=0.5, index=Index(expected_index), dtype="float64" + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "dtype, expected_data, expected_index, axis, expected_dtype", + [ + ["datetime64[ns]", [], [], 1, "datetime64[ns]"], + ["datetime64[ns]", [pd.NaT, pd.NaT], ["a", "b"], 0, "datetime64[ns]"], + ], + ) + def test_empty_datelike( + self, dtype, expected_data, expected_index, axis, expected_dtype + ): + # GH 14564 + df = DataFrame(columns=["a", "b"], dtype=dtype) + result = df.quantile(0.5, axis=axis, numeric_only=False) + expected = Series( + expected_data, name=0.5, index=Index(expected_index), dtype=expected_dtype + ) + tm.assert_series_equal(result, expected) + + @pytest.mark.parametrize( + "expected_data, expected_index, axis", + [ + [[np.nan, np.nan], range(2), 1], + [[], [], 0], + ], + ) + def test_datelike_numeric_only(self, expected_data, expected_index, axis): + # GH 14564 + df = DataFrame( + { + "a": pd.to_datetime(["2010", "2011"]), + "b": [0, 5], + "c": pd.to_datetime(["2011", "2012"]), + } + ) + result = df[["a", "c"]].quantile(0.5, axis=axis, numeric_only=True) + expected = Series( + expected_data, name=0.5, index=Index(expected_index), dtype=np.float64 + ) + tm.assert_series_equal(result, expected) diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_reset_index.py b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_reset_index.py new file mode 100644 index 0000000000000000000000000000000000000000..fbf36dbc4fb023364a1def844463679fa7174757 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_reset_index.py @@ -0,0 +1,782 @@ +from datetime import datetime +from itertools import product + +import numpy as np +import pytest + +from pandas.core.dtypes.common import ( + is_float_dtype, + is_integer_dtype, +) + +import pandas as pd +from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + Index, + Interval, + IntervalIndex, + MultiIndex, + RangeIndex, + Series, + Timestamp, + cut, + date_range, +) +import pandas._testing as tm + + +@pytest.fixture() +def multiindex_df(): + levels = [["A", ""], ["B", "b"]] + return DataFrame([[0, 2], [1, 3]], columns=MultiIndex.from_tuples(levels)) + + +class TestResetIndex: + def test_reset_index_empty_rangeindex(self): + # GH#45230 + df = DataFrame( + columns=["brand"], dtype=np.int64, index=RangeIndex(0, 0, 1, name="foo") + ) + + df2 = df.set_index([df.index, "brand"]) + + result = df2.reset_index([1], drop=True) + tm.assert_frame_equal(result, df[[]], check_index_type=True) + + def test_set_reset(self): + idx = Index([2**63, 2**63 + 5, 2**63 + 10], name="foo") + + # set/reset + df = DataFrame({"A": [0, 1, 2]}, index=idx) + result = df.reset_index() + assert result["foo"].dtype == np.dtype("uint64") + + df = result.set_index("foo") + tm.assert_index_equal(df.index, idx) + + def test_set_index_reset_index_dt64tz(self): + idx = Index(date_range("20130101", periods=3, tz="US/Eastern"), name="foo") + + # set/reset + df = DataFrame({"A": [0, 1, 2]}, index=idx) + result = df.reset_index() + assert result["foo"].dtype == "datetime64[ns, US/Eastern]" + + df = result.set_index("foo") + tm.assert_index_equal(df.index, idx) + + def test_reset_index_tz(self, tz_aware_fixture): + # GH 3950 + # reset_index with single level + tz = tz_aware_fixture + idx = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx") + df = DataFrame({"a": range(5), "b": ["A", "B", "C", "D", "E"]}, index=idx) + + expected = DataFrame( + { + "idx": idx, + "a": range(5), + "b": ["A", "B", "C", "D", "E"], + }, + columns=["idx", "a", "b"], + ) + result = df.reset_index() + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("tz", ["US/Eastern", "dateutil/US/Eastern"]) + def test_frame_reset_index_tzaware_index(self, tz): + dr = date_range("2012-06-02", periods=10, tz=tz) + df = DataFrame(np.random.default_rng(2).standard_normal(len(dr)), dr) + roundtripped = df.reset_index().set_index("index") + xp = df.index.tz + rs = roundtripped.index.tz + assert xp == rs + + def test_reset_index_with_intervals(self): + idx = IntervalIndex.from_breaks(np.arange(11), name="x") + original = DataFrame({"x": idx, "y": np.arange(10)})[["x", "y"]] + + result = original.set_index("x") + expected = DataFrame({"y": np.arange(10)}, index=idx) + tm.assert_frame_equal(result, expected) + + result2 = result.reset_index() + tm.assert_frame_equal(result2, original) + + def test_reset_index(self, float_frame): + stacked = float_frame.stack(future_stack=True)[::2] + stacked = DataFrame({"foo": stacked, "bar": stacked}) + + names = ["first", "second"] + stacked.index.names = names + deleveled = stacked.reset_index() + for i, (lev, level_codes) in enumerate( + zip(stacked.index.levels, stacked.index.codes) + ): + values = lev.take(level_codes) + name = names[i] + tm.assert_index_equal(values, Index(deleveled[name])) + + stacked.index.names = [None, None] + deleveled2 = stacked.reset_index() + tm.assert_series_equal( + deleveled["first"], deleveled2["level_0"], check_names=False + ) + tm.assert_series_equal( + deleveled["second"], deleveled2["level_1"], check_names=False + ) + + # default name assigned + rdf = float_frame.reset_index() + exp = Series(float_frame.index.values, name="index") + tm.assert_series_equal(rdf["index"], exp) + + # default name assigned, corner case + df = float_frame.copy() + df["index"] = "foo" + rdf = df.reset_index() + exp = Series(float_frame.index.values, name="level_0") + tm.assert_series_equal(rdf["level_0"], exp) + + # but this is ok + float_frame.index.name = "index" + deleveled = float_frame.reset_index() + tm.assert_series_equal(deleveled["index"], Series(float_frame.index)) + tm.assert_index_equal(deleveled.index, Index(range(len(deleveled))), exact=True) + + # preserve column names + float_frame.columns.name = "columns" + reset = float_frame.reset_index() + assert reset.columns.name == "columns" + + # only remove certain columns + df = float_frame.reset_index().set_index(["index", "A", "B"]) + rs = df.reset_index(["A", "B"]) + + tm.assert_frame_equal(rs, float_frame) + + rs = df.reset_index(["index", "A", "B"]) + tm.assert_frame_equal(rs, float_frame.reset_index()) + + rs = df.reset_index(["index", "A", "B"]) + tm.assert_frame_equal(rs, float_frame.reset_index()) + + rs = df.reset_index("A") + xp = float_frame.reset_index().set_index(["index", "B"]) + tm.assert_frame_equal(rs, xp) + + # test resetting in place + df = float_frame.copy() + reset = float_frame.reset_index() + return_value = df.reset_index(inplace=True) + assert return_value is None + tm.assert_frame_equal(df, reset) + + df = float_frame.reset_index().set_index(["index", "A", "B"]) + rs = df.reset_index("A", drop=True) + xp = float_frame.copy() + del xp["A"] + xp = xp.set_index(["B"], append=True) + tm.assert_frame_equal(rs, xp) + + def test_reset_index_name(self): + df = DataFrame( + [[1, 2, 3, 4], [5, 6, 7, 8]], + columns=["A", "B", "C", "D"], + index=Index(range(2), name="x"), + ) + assert df.reset_index().index.name is None + assert df.reset_index(drop=True).index.name is None + return_value = df.reset_index(inplace=True) + assert return_value is None + assert df.index.name is None + + @pytest.mark.parametrize("levels", [["A", "B"], [0, 1]]) + def test_reset_index_level(self, levels): + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"]) + + # With MultiIndex + result = df.set_index(["A", "B"]).reset_index(level=levels[0]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = df.set_index(["A", "B"]).reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df.set_index("B")) + + result = df.set_index(["A", "B"]).reset_index(level=levels) + tm.assert_frame_equal(result, df) + + result = df.set_index(["A", "B"]).reset_index(level=levels, drop=True) + tm.assert_frame_equal(result, df[["C", "D"]]) + + # With single-level Index (GH 16263) + result = df.set_index("A").reset_index(level=levels[0]) + tm.assert_frame_equal(result, df) + + result = df.set_index("A").reset_index(level=levels[:1]) + tm.assert_frame_equal(result, df) + + result = df.set_index(["A"]).reset_index(level=levels[0], drop=True) + tm.assert_frame_equal(result, df[["B", "C", "D"]]) + + @pytest.mark.parametrize("idx_lev", [["A", "B"], ["A"]]) + def test_reset_index_level_missing(self, idx_lev): + # Missing levels - for both MultiIndex and single-level Index: + df = DataFrame([[1, 2, 3, 4], [5, 6, 7, 8]], columns=["A", "B", "C", "D"]) + + with pytest.raises(KeyError, match=r"(L|l)evel \(?E\)?"): + df.set_index(idx_lev).reset_index(level=["A", "E"]) + with pytest.raises(IndexError, match="Too many levels"): + df.set_index(idx_lev).reset_index(level=[0, 1, 2]) + + def test_reset_index_right_dtype(self): + time = np.arange(0.0, 10, np.sqrt(2) / 2) + s1 = Series( + (9.81 * time**2) / 2, index=Index(time, name="time"), name="speed" + ) + df = DataFrame(s1) + + reset = s1.reset_index() + assert reset["time"].dtype == np.float64 + + reset = df.reset_index() + assert reset["time"].dtype == np.float64 + + def test_reset_index_multiindex_col(self): + vals = np.random.default_rng(2).standard_normal((3, 3)).astype(object) + idx = ["x", "y", "z"] + full = np.hstack(([[x] for x in idx], vals)) + df = DataFrame( + vals, + Index(idx, name="a"), + columns=[["b", "b", "c"], ["mean", "median", "mean"]], + ) + rs = df.reset_index() + xp = DataFrame( + full, columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index(col_fill=None) + xp = DataFrame( + full, columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index(col_level=1, col_fill="blah") + xp = DataFrame( + full, columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]] + ) + tm.assert_frame_equal(rs, xp) + + df = DataFrame( + vals, + MultiIndex.from_arrays([[0, 1, 2], ["x", "y", "z"]], names=["d", "a"]), + columns=[["b", "b", "c"], ["mean", "median", "mean"]], + ) + rs = df.reset_index("a") + xp = DataFrame( + full, + Index([0, 1, 2], name="d"), + columns=[["a", "b", "b", "c"], ["", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index("a", col_fill=None) + xp = DataFrame( + full, + Index(range(3), name="d"), + columns=[["a", "b", "b", "c"], ["a", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + rs = df.reset_index("a", col_fill="blah", col_level=1) + xp = DataFrame( + full, + Index(range(3), name="d"), + columns=[["blah", "b", "b", "c"], ["a", "mean", "median", "mean"]], + ) + tm.assert_frame_equal(rs, xp) + + def test_reset_index_multiindex_nan(self): + # GH#6322, testing reset_index on MultiIndexes + # when we have a nan or all nan + df = DataFrame( + { + "A": ["a", "b", "c"], + "B": [0, 1, np.nan], + "C": np.random.default_rng(2).random(3), + } + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame( + { + "A": [np.nan, "b", "c"], + "B": [0, 1, 2], + "C": np.random.default_rng(2).random(3), + } + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame({"A": ["a", "b", "c"], "B": [0, 1, 2], "C": [np.nan, 1.1, 2.2]}) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + df = DataFrame( + { + "A": ["a", "b", "c"], + "B": [np.nan, np.nan, np.nan], + "C": np.random.default_rng(2).random(3), + } + ) + rs = df.set_index(["A", "B"]).reset_index() + tm.assert_frame_equal(rs, df) + + @pytest.mark.parametrize( + "name", + [ + None, + "foo", + 2, + 3.0, + pd.Timedelta(6), + Timestamp("2012-12-30", tz="UTC"), + "2012-12-31", + ], + ) + def test_reset_index_with_datetimeindex_cols(self, name): + # GH#5818 + df = DataFrame( + [[1, 2], [3, 4]], + columns=date_range("1/1/2013", "1/2/2013"), + index=["A", "B"], + ) + df.index.name = name + + result = df.reset_index() + + item = name if name is not None else "index" + columns = Index([item, datetime(2013, 1, 1), datetime(2013, 1, 2)]) + if isinstance(item, str) and item == "2012-12-31": + columns = columns.astype("datetime64[ns]") + else: + assert columns.dtype == object + + expected = DataFrame( + [["A", 1, 2], ["B", 3, 4]], + columns=columns, + ) + tm.assert_frame_equal(result, expected) + + def test_reset_index_range(self): + # GH#12071 + df = DataFrame([[0, 0], [1, 1]], columns=["A", "B"], index=RangeIndex(stop=2)) + result = df.reset_index() + assert isinstance(result.index, RangeIndex) + expected = DataFrame( + [[0, 0, 0], [1, 1, 1]], + columns=["index", "A", "B"], + index=RangeIndex(stop=2), + ) + tm.assert_frame_equal(result, expected) + + def test_reset_index_multiindex_columns(self, multiindex_df): + result = multiindex_df[["B"]].rename_axis("A").reset_index() + tm.assert_frame_equal(result, multiindex_df) + + # GH#16120: already existing column + msg = r"cannot insert \('A', ''\), already exists" + with pytest.raises(ValueError, match=msg): + multiindex_df.rename_axis("A").reset_index() + + # GH#16164: multiindex (tuple) full key + result = multiindex_df.set_index([("A", "")]).reset_index() + tm.assert_frame_equal(result, multiindex_df) + + # with additional (unnamed) index level + idx_col = DataFrame( + [[0], [1]], columns=MultiIndex.from_tuples([("level_0", "")]) + ) + expected = pd.concat([idx_col, multiindex_df[[("B", "b"), ("A", "")]]], axis=1) + result = multiindex_df.set_index([("B", "b")], append=True).reset_index() + tm.assert_frame_equal(result, expected) + + # with index name which is a too long tuple... + msg = "Item must have length equal to number of levels." + with pytest.raises(ValueError, match=msg): + multiindex_df.rename_axis([("C", "c", "i")]).reset_index() + + # or too short... + levels = [["A", "a", ""], ["B", "b", "i"]] + df2 = DataFrame([[0, 2], [1, 3]], columns=MultiIndex.from_tuples(levels)) + idx_col = DataFrame( + [[0], [1]], columns=MultiIndex.from_tuples([("C", "c", "ii")]) + ) + expected = pd.concat([idx_col, df2], axis=1) + result = df2.rename_axis([("C", "c")]).reset_index(col_fill="ii") + tm.assert_frame_equal(result, expected) + + # ... which is incompatible with col_fill=None + with pytest.raises( + ValueError, + match=( + "col_fill=None is incompatible with " + r"incomplete column name \('C', 'c'\)" + ), + ): + df2.rename_axis([("C", "c")]).reset_index(col_fill=None) + + # with col_level != 0 + result = df2.rename_axis([("c", "ii")]).reset_index(col_level=1, col_fill="C") + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize("flag", [False, True]) + @pytest.mark.parametrize("allow_duplicates", [False, True]) + def test_reset_index_duplicate_columns_allow( + self, multiindex_df, flag, allow_duplicates + ): + # GH#44755 reset_index with duplicate column labels + df = multiindex_df.rename_axis("A") + df = df.set_flags(allows_duplicate_labels=flag) + + if flag and allow_duplicates: + result = df.reset_index(allow_duplicates=allow_duplicates) + levels = [["A", ""], ["A", ""], ["B", "b"]] + expected = DataFrame( + [[0, 0, 2], [1, 1, 3]], columns=MultiIndex.from_tuples(levels) + ) + tm.assert_frame_equal(result, expected) + else: + if not flag and allow_duplicates: + msg = ( + "Cannot specify 'allow_duplicates=True' when " + "'self.flags.allows_duplicate_labels' is False" + ) + else: + msg = r"cannot insert \('A', ''\), already exists" + with pytest.raises(ValueError, match=msg): + df.reset_index(allow_duplicates=allow_duplicates) + + @pytest.mark.parametrize("flag", [False, True]) + def test_reset_index_duplicate_columns_default(self, multiindex_df, flag): + df = multiindex_df.rename_axis("A") + df = df.set_flags(allows_duplicate_labels=flag) + + msg = r"cannot insert \('A', ''\), already exists" + with pytest.raises(ValueError, match=msg): + df.reset_index() + + @pytest.mark.parametrize("allow_duplicates", ["bad value"]) + def test_reset_index_allow_duplicates_check(self, multiindex_df, allow_duplicates): + with pytest.raises(ValueError, match="expected type bool"): + multiindex_df.reset_index(allow_duplicates=allow_duplicates) + + def test_reset_index_datetime(self, tz_naive_fixture): + # GH#3950 + tz = tz_naive_fixture + idx1 = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx1") + idx2 = Index(range(5), name="idx2", dtype="int64") + idx = MultiIndex.from_arrays([idx1, idx2]) + df = DataFrame( + {"a": np.arange(5, dtype="int64"), "b": ["A", "B", "C", "D", "E"]}, + index=idx, + ) + + expected = DataFrame( + { + "idx1": idx1, + "idx2": np.arange(5, dtype="int64"), + "a": np.arange(5, dtype="int64"), + "b": ["A", "B", "C", "D", "E"], + }, + columns=["idx1", "idx2", "a", "b"], + ) + + tm.assert_frame_equal(df.reset_index(), expected) + + def test_reset_index_datetime2(self, tz_naive_fixture): + tz = tz_naive_fixture + idx1 = date_range("1/1/2011", periods=5, freq="D", tz=tz, name="idx1") + idx2 = Index(range(5), name="idx2", dtype="int64") + idx3 = date_range( + "1/1/2012", periods=5, freq="MS", tz="Europe/Paris", name="idx3" + ) + idx = MultiIndex.from_arrays([idx1, idx2, idx3]) + df = DataFrame( + {"a": np.arange(5, dtype="int64"), "b": ["A", "B", "C", "D", "E"]}, + index=idx, + ) + + expected = DataFrame( + { + "idx1": idx1, + "idx2": np.arange(5, dtype="int64"), + "idx3": idx3, + "a": np.arange(5, dtype="int64"), + "b": ["A", "B", "C", "D", "E"], + }, + columns=["idx1", "idx2", "idx3", "a", "b"], + ) + result = df.reset_index() + tm.assert_frame_equal(result, expected) + + def test_reset_index_datetime3(self, tz_naive_fixture): + # GH#7793 + tz = tz_naive_fixture + dti = date_range("20130101", periods=3, tz=tz) + idx = MultiIndex.from_product([["a", "b"], dti]) + df = DataFrame( + np.arange(6, dtype="int64").reshape(6, 1), columns=["a"], index=idx + ) + + expected = DataFrame( + { + "level_0": "a a a b b b".split(), + "level_1": dti.append(dti), + "a": np.arange(6, dtype="int64"), + }, + columns=["level_0", "level_1", "a"], + ) + result = df.reset_index() + tm.assert_frame_equal(result, expected) + + def test_reset_index_period(self): + # GH#7746 + idx = MultiIndex.from_product( + [pd.period_range("20130101", periods=3, freq="M"), list("abc")], + names=["month", "feature"], + ) + + df = DataFrame( + np.arange(9, dtype="int64").reshape(-1, 1), index=idx, columns=["a"] + ) + expected = DataFrame( + { + "month": ( + [pd.Period("2013-01", freq="M")] * 3 + + [pd.Period("2013-02", freq="M")] * 3 + + [pd.Period("2013-03", freq="M")] * 3 + ), + "feature": ["a", "b", "c"] * 3, + "a": np.arange(9, dtype="int64"), + }, + columns=["month", "feature", "a"], + ) + result = df.reset_index() + tm.assert_frame_equal(result, expected) + + def test_reset_index_delevel_infer_dtype(self): + tuples = list(product(["foo", "bar"], [10, 20], [1.0, 1.1])) + index = MultiIndex.from_tuples(tuples, names=["prm0", "prm1", "prm2"]) + df = DataFrame( + np.random.default_rng(2).standard_normal((8, 3)), + columns=["A", "B", "C"], + index=index, + ) + deleveled = df.reset_index() + assert is_integer_dtype(deleveled["prm1"]) + assert is_float_dtype(deleveled["prm2"]) + + def test_reset_index_with_drop( + self, multiindex_year_month_day_dataframe_random_data + ): + ymd = multiindex_year_month_day_dataframe_random_data + + deleveled = ymd.reset_index(drop=True) + assert len(deleveled.columns) == len(ymd.columns) + assert deleveled.index.name == ymd.index.name + + @pytest.mark.parametrize( + "ix_data, exp_data", + [ + ( + [(pd.NaT, 1), (pd.NaT, 2)], + {"a": [pd.NaT, pd.NaT], "b": [1, 2], "x": [11, 12]}, + ), + ( + [(pd.NaT, 1), (Timestamp("2020-01-01"), 2)], + {"a": [pd.NaT, Timestamp("2020-01-01")], "b": [1, 2], "x": [11, 12]}, + ), + ( + [(pd.NaT, 1), (pd.Timedelta(123, "d"), 2)], + {"a": [pd.NaT, pd.Timedelta(123, "d")], "b": [1, 2], "x": [11, 12]}, + ), + ], + ) + def test_reset_index_nat_multiindex(self, ix_data, exp_data): + # GH#36541: that reset_index() does not raise ValueError + ix = MultiIndex.from_tuples(ix_data, names=["a", "b"]) + result = DataFrame({"x": [11, 12]}, index=ix) + result = result.reset_index() + + expected = DataFrame(exp_data) + tm.assert_frame_equal(result, expected) + + @pytest.mark.parametrize( + "codes", ([[0, 0, 1, 1], [0, 1, 0, 1]], [[0, 0, -1, 1], [0, 1, 0, 1]]) + ) + def test_rest_index_multiindex_categorical_with_missing_values(self, codes): + # GH#24206 + + index = MultiIndex( + [CategoricalIndex(["A", "B"]), CategoricalIndex(["a", "b"])], codes + ) + data = {"col": range(len(index))} + df = DataFrame(data=data, index=index) + + expected = DataFrame( + { + "level_0": Categorical.from_codes(codes[0], categories=["A", "B"]), + "level_1": Categorical.from_codes(codes[1], categories=["a", "b"]), + "col": range(4), + } + ) + + res = df.reset_index() + tm.assert_frame_equal(res, expected) + + # roundtrip + res = expected.set_index(["level_0", "level_1"]).reset_index() + tm.assert_frame_equal(res, expected) + + +@pytest.mark.parametrize( + "array, dtype", + [ + (["a", "b"], object), + ( + pd.period_range("12-1-2000", periods=2, freq="Q-DEC"), + pd.PeriodDtype(freq="Q-DEC"), + ), + ], +) +def test_reset_index_dtypes_on_empty_frame_with_multiindex( + array, dtype, using_infer_string +): + # GH 19602 - Preserve dtype on empty DataFrame with MultiIndex + idx = MultiIndex.from_product([[0, 1], [0.5, 1.0], array]) + result = DataFrame(index=idx)[:0].reset_index().dtypes + if using_infer_string and dtype == object: + dtype = "string" + expected = Series({"level_0": np.int64, "level_1": np.float64, "level_2": dtype}) + tm.assert_series_equal(result, expected) + + +def test_reset_index_empty_frame_with_datetime64_multiindex(): + # https://github.com/pandas-dev/pandas/issues/35606 + dti = pd.DatetimeIndex(["2020-07-20 00:00:00"], dtype="M8[ns]") + idx = MultiIndex.from_product([dti, [3, 4]], names=["a", "b"])[:0] + df = DataFrame(index=idx, columns=["c", "d"]) + result = df.reset_index() + expected = DataFrame( + columns=list("abcd"), index=RangeIndex(start=0, stop=0, step=1) + ) + expected["a"] = expected["a"].astype("datetime64[ns]") + expected["b"] = expected["b"].astype("int64") + tm.assert_frame_equal(result, expected) + + +def test_reset_index_empty_frame_with_datetime64_multiindex_from_groupby( + using_infer_string, +): + # https://github.com/pandas-dev/pandas/issues/35657 + dti = pd.DatetimeIndex(["2020-01-01"], dtype="M8[ns]") + df = DataFrame({"c1": [10.0], "c2": ["a"], "c3": dti}) + df = df.head(0).groupby(["c2", "c3"])[["c1"]].sum() + result = df.reset_index() + expected = DataFrame( + columns=["c2", "c3", "c1"], index=RangeIndex(start=0, stop=0, step=1) + ) + expected["c3"] = expected["c3"].astype("datetime64[ns]") + expected["c1"] = expected["c1"].astype("float64") + if using_infer_string: + expected["c2"] = expected["c2"].astype("string[pyarrow_numpy]") + tm.assert_frame_equal(result, expected) + + +def test_reset_index_multiindex_nat(): + # GH 11479 + idx = range(3) + tstamp = date_range("2015-07-01", freq="D", periods=3) + df = DataFrame({"id": idx, "tstamp": tstamp, "a": list("abc")}) + df.loc[2, "tstamp"] = pd.NaT + result = df.set_index(["id", "tstamp"]).reset_index("id") + exp_dti = pd.DatetimeIndex( + ["2015-07-01", "2015-07-02", "NaT"], dtype="M8[ns]", name="tstamp" + ) + expected = DataFrame( + {"id": range(3), "a": list("abc")}, + index=exp_dti, + ) + tm.assert_frame_equal(result, expected) + + +def test_reset_index_interval_columns_object_cast(): + # GH 19136 + df = DataFrame( + np.eye(2), index=Index([1, 2], name="Year"), columns=cut([1, 2], [0, 1, 2]) + ) + result = df.reset_index() + expected = DataFrame( + [[1, 1.0, 0.0], [2, 0.0, 1.0]], + columns=Index(["Year", Interval(0, 1), Interval(1, 2)]), + ) + tm.assert_frame_equal(result, expected) + + +def test_reset_index_rename(float_frame): + # GH 6878 + result = float_frame.reset_index(names="new_name") + expected = Series(float_frame.index.values, name="new_name") + tm.assert_series_equal(result["new_name"], expected) + + result = float_frame.reset_index(names=123) + expected = Series(float_frame.index.values, name=123) + tm.assert_series_equal(result[123], expected) + + +def test_reset_index_rename_multiindex(float_frame): + # GH 6878 + stacked_df = float_frame.stack(future_stack=True)[::2] + stacked_df = DataFrame({"foo": stacked_df, "bar": stacked_df}) + + names = ["first", "second"] + stacked_df.index.names = names + + result = stacked_df.reset_index() + expected = stacked_df.reset_index(names=["new_first", "new_second"]) + tm.assert_series_equal(result["first"], expected["new_first"], check_names=False) + tm.assert_series_equal(result["second"], expected["new_second"], check_names=False) + + +def test_errorreset_index_rename(float_frame): + # GH 6878 + stacked_df = float_frame.stack(future_stack=True)[::2] + stacked_df = DataFrame({"first": stacked_df, "second": stacked_df}) + + with pytest.raises( + ValueError, match="Index names must be str or 1-dimensional list" + ): + stacked_df.reset_index(names={"first": "new_first", "second": "new_second"}) + + with pytest.raises(IndexError, match="list index out of range"): + stacked_df.reset_index(names=["new_first"]) + + +def test_reset_index_false_index_name(): + result_series = Series(data=range(5, 10), index=range(5)) + result_series.index.name = False + result_series.reset_index() + expected_series = Series(range(5, 10), RangeIndex(range(5), name=False)) + tm.assert_series_equal(result_series, expected_series) + + # GH 38147 + result_frame = DataFrame(data=range(5, 10), index=range(5)) + result_frame.index.name = False + result_frame.reset_index() + expected_frame = DataFrame(range(5, 10), RangeIndex(range(5), name=False)) + tm.assert_frame_equal(result_frame, expected_frame) diff --git a/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_set_axis.py b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_set_axis.py new file mode 100644 index 0000000000000000000000000000000000000000..8d249bc7b7fa471db401ed44e50fdb514cf85a51 --- /dev/null +++ b/mantis_evalkit/lib/python3.10/site-packages/pandas/tests/frame/methods/test_set_axis.py @@ -0,0 +1,143 @@ +import numpy as np +import pytest + +from pandas import ( + DataFrame, + Series, +) +import pandas._testing as tm + + +class SharedSetAxisTests: + @pytest.fixture + def obj(self): + raise NotImplementedError("Implemented by subclasses") + + def test_set_axis(self, obj): + # GH14636; this tests setting index for both Series and DataFrame + new_index = list("abcd")[: len(obj)] + expected = obj.copy() + expected.index = new_index + result = obj.set_axis(new_index, axis=0) + tm.assert_equal(expected, result) + + def test_set_axis_copy(self, obj, using_copy_on_write): + # Test copy keyword GH#47932 + new_index = list("abcd")[: len(obj)] + + orig = obj.iloc[:] + expected = obj.copy() + expected.index = new_index + + result = obj.set_axis(new_index, axis=0, copy=True) + tm.assert_equal(expected, result) + assert result is not obj + # check we DID make a copy + if not using_copy_on_write: + if obj.ndim == 1: + assert not tm.shares_memory(result, obj) + else: + assert not any( + tm.shares_memory(result.iloc[:, i], obj.iloc[:, i]) + for i in range(obj.shape[1]) + ) + + result = obj.set_axis(new_index, axis=0, copy=False) + tm.assert_equal(expected, result) + assert result is not obj + # check we did NOT make a copy + if obj.ndim == 1: + assert tm.shares_memory(result, obj) + else: + assert all( + tm.shares_memory(result.iloc[:, i], obj.iloc[:, i]) + for i in range(obj.shape[1]) + ) + + # copy defaults to True + result = obj.set_axis(new_index, axis=0) + tm.assert_equal(expected, result) + assert result is not obj + if using_copy_on_write: + # check we DID NOT make a copy + if obj.ndim == 1: + assert tm.shares_memory(result, obj) + else: + assert any( + tm.shares_memory(result.iloc[:, i], obj.iloc[:, i]) + for i in range(obj.shape[1]) + ) + # check we DID make a copy + elif obj.ndim == 1: + assert not tm.shares_memory(result, obj) + else: + assert not any( + tm.shares_memory(result.iloc[:, i], obj.iloc[:, i]) + for i in range(obj.shape[1]) + ) + + res = obj.set_axis(new_index, copy=False) + tm.assert_equal(expected, res) + # check we did NOT make a copy + if res.ndim == 1: + assert tm.shares_memory(res, orig) + else: + assert all( + tm.shares_memory(res.iloc[:, i], orig.iloc[:, i]) + for i in range(res.shape[1]) + ) + + def test_set_axis_unnamed_kwarg_warns(self, obj): + # omitting the "axis" parameter + new_index = list("abcd")[: len(obj)] + + expected = obj.copy() + expected.index = new_index + + result = obj.set_axis(new_index) + tm.assert_equal(result, expected) + + @pytest.mark.parametrize("axis", [3, "foo"]) + def test_set_axis_invalid_axis_name(self, axis, obj): + # wrong values for the "axis" parameter + with pytest.raises(ValueError, match="No axis named"): + obj.set_axis(list("abc"), axis=axis) + + def test_set_axis_setattr_index_not_collection(self, obj): + # wrong type + msg = ( + r"Index\(\.\.\.\) must be called with a collection of some " + r"kind, None was passed" + ) + with pytest.raises(TypeError, match=msg): + obj.index = None + + def test_set_axis_setattr_index_wrong_length(self, obj): + # wrong length + msg = ( + f"Length mismatch: Expected axis has {len(obj)} elements, " + f"new values have {len(obj)-1} elements" + ) + with pytest.raises(ValueError, match=msg): + obj.index = np.arange(len(obj) - 1) + + if obj.ndim == 2: + with pytest.raises(ValueError, match="Length mismatch"): + obj.columns = obj.columns[::2] + + +class TestDataFrameSetAxis(SharedSetAxisTests): + @pytest.fixture + def obj(self): + df = DataFrame( + {"A": [1.1, 2.2, 3.3], "B": [5.0, 6.1, 7.2], "C": [4.4, 5.5, 6.6]}, + index=[2010, 2011, 2012], + ) + return df + + +class TestSeriesSetAxis(SharedSetAxisTests): + @pytest.fixture + def obj(self): + ser = Series(np.arange(4), index=[1, 3, 5, 7], dtype="int64") + return ser diff --git a/moondream/lib/python3.10/site-packages/altair/vegalite/v5/__pycache__/api.cpython-310.pyc b/moondream/lib/python3.10/site-packages/altair/vegalite/v5/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3987df5b14d5ae2beced8da69fda875bf1a149de --- /dev/null +++ b/moondream/lib/python3.10/site-packages/altair/vegalite/v5/__pycache__/api.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f22b5f7666123e57a16b8fcd0ee830797e1dcba7a2df4aa2866e2cfdee2f6a01 +size 162833 diff --git a/moondream/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py b/moondream/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6491849925e863c35a98390a31729cb13e28ca19 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/io/clipboard/__init__.py @@ -0,0 +1,747 @@ +""" +Pyperclip + +A cross-platform clipboard module for Python, +with copy & paste functions for plain text. +By Al Sweigart al@inventwithpython.com +Licence at LICENSES/PYPERCLIP_LICENSE + +Usage: + import pyperclip + pyperclip.copy('The text to be copied to the clipboard.') + spam = pyperclip.paste() + + if not pyperclip.is_available(): + print("Copy functionality unavailable!") + +On Windows, no additional modules are needed. +On Mac, the pyobjc module is used, falling back to the pbcopy and pbpaste cli + commands. (These commands should come with OS X.). +On Linux, install xclip, xsel, or wl-clipboard (for "wayland" sessions) via +package manager. +For example, in Debian: + sudo apt-get install xclip + sudo apt-get install xsel + sudo apt-get install wl-clipboard + +Otherwise on Linux, you will need the PyQt5 modules installed. + +This module does not work with PyGObject yet. + +Cygwin is currently not supported. + +Security Note: This module runs programs with these names: + - pbcopy + - pbpaste + - xclip + - xsel + - wl-copy/wl-paste + - klipper + - qdbus +A malicious user could rename or add programs with these names, tricking +Pyperclip into running them with whatever permissions the Python process has. + +""" + +__version__ = "1.8.2" + + +import contextlib +import ctypes +from ctypes import ( + c_size_t, + c_wchar, + c_wchar_p, + get_errno, + sizeof, +) +import os +import platform +from shutil import which as _executable_exists +import subprocess +import time +import warnings + +from pandas.errors import ( + PyperclipException, + PyperclipWindowsException, +) +from pandas.util._exceptions import find_stack_level + +# `import PyQt4` sys.exit()s if DISPLAY is not in the environment. +# Thus, we need to detect the presence of $DISPLAY manually +# and not load PyQt4 if it is absent. +HAS_DISPLAY = os.getenv("DISPLAY") + +EXCEPT_MSG = """ + Pyperclip could not find a copy/paste mechanism for your system. + For more information, please visit + https://pyperclip.readthedocs.io/en/latest/index.html#not-implemented-error + """ + +ENCODING = "utf-8" + + +class PyperclipTimeoutException(PyperclipException): + pass + + +def _stringifyText(text) -> str: + acceptedTypes = (str, int, float, bool) + if not isinstance(text, acceptedTypes): + raise PyperclipException( + f"only str, int, float, and bool values " + f"can be copied to the clipboard, not {type(text).__name__}" + ) + return str(text) + + +def init_osx_pbcopy_clipboard(): + def copy_osx_pbcopy(text): + text = _stringifyText(text) # Converts non-str values to str. + with subprocess.Popen( + ["pbcopy", "w"], stdin=subprocess.PIPE, close_fds=True + ) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_osx_pbcopy(): + with subprocess.Popen( + ["pbpaste", "r"], stdout=subprocess.PIPE, close_fds=True + ) as p: + stdout = p.communicate()[0] + return stdout.decode(ENCODING) + + return copy_osx_pbcopy, paste_osx_pbcopy + + +def init_osx_pyobjc_clipboard(): + def copy_osx_pyobjc(text): + """Copy string argument to clipboard""" + text = _stringifyText(text) # Converts non-str values to str. + newStr = Foundation.NSString.stringWithString_(text).nsstring() + newData = newStr.dataUsingEncoding_(Foundation.NSUTF8StringEncoding) + board = AppKit.NSPasteboard.generalPasteboard() + board.declareTypes_owner_([AppKit.NSStringPboardType], None) + board.setData_forType_(newData, AppKit.NSStringPboardType) + + def paste_osx_pyobjc(): + """Returns contents of clipboard""" + board = AppKit.NSPasteboard.generalPasteboard() + content = board.stringForType_(AppKit.NSStringPboardType) + return content + + return copy_osx_pyobjc, paste_osx_pyobjc + + +def init_qt_clipboard(): + global QApplication + # $DISPLAY should exist + + # Try to import from qtpy, but if that fails try PyQt5 then PyQt4 + try: + from qtpy.QtWidgets import QApplication + except ImportError: + try: + from PyQt5.QtWidgets import QApplication + except ImportError: + from PyQt4.QtGui import QApplication + + app = QApplication.instance() + if app is None: + app = QApplication([]) + + def copy_qt(text): + text = _stringifyText(text) # Converts non-str values to str. + cb = app.clipboard() + cb.setText(text) + + def paste_qt() -> str: + cb = app.clipboard() + return str(cb.text()) + + return copy_qt, paste_qt + + +def init_xclip_clipboard(): + DEFAULT_SELECTION = "c" + PRIMARY_SELECTION = "p" + + def copy_xclip(text, primary=False): + text = _stringifyText(text) # Converts non-str values to str. + selection = DEFAULT_SELECTION + if primary: + selection = PRIMARY_SELECTION + with subprocess.Popen( + ["xclip", "-selection", selection], stdin=subprocess.PIPE, close_fds=True + ) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_xclip(primary=False): + selection = DEFAULT_SELECTION + if primary: + selection = PRIMARY_SELECTION + with subprocess.Popen( + ["xclip", "-selection", selection, "-o"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) as p: + stdout = p.communicate()[0] + # Intentionally ignore extraneous output on stderr when clipboard is empty + return stdout.decode(ENCODING) + + return copy_xclip, paste_xclip + + +def init_xsel_clipboard(): + DEFAULT_SELECTION = "-b" + PRIMARY_SELECTION = "-p" + + def copy_xsel(text, primary=False): + text = _stringifyText(text) # Converts non-str values to str. + selection_flag = DEFAULT_SELECTION + if primary: + selection_flag = PRIMARY_SELECTION + with subprocess.Popen( + ["xsel", selection_flag, "-i"], stdin=subprocess.PIPE, close_fds=True + ) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_xsel(primary=False): + selection_flag = DEFAULT_SELECTION + if primary: + selection_flag = PRIMARY_SELECTION + with subprocess.Popen( + ["xsel", selection_flag, "-o"], stdout=subprocess.PIPE, close_fds=True + ) as p: + stdout = p.communicate()[0] + return stdout.decode(ENCODING) + + return copy_xsel, paste_xsel + + +def init_wl_clipboard(): + PRIMARY_SELECTION = "-p" + + def copy_wl(text, primary=False): + text = _stringifyText(text) # Converts non-str values to str. + args = ["wl-copy"] + if primary: + args.append(PRIMARY_SELECTION) + if not text: + args.append("--clear") + subprocess.check_call(args, close_fds=True) + else: + p = subprocess.Popen(args, stdin=subprocess.PIPE, close_fds=True) + p.communicate(input=text.encode(ENCODING)) + + def paste_wl(primary=False): + args = ["wl-paste", "-n"] + if primary: + args.append(PRIMARY_SELECTION) + p = subprocess.Popen(args, stdout=subprocess.PIPE, close_fds=True) + stdout, _stderr = p.communicate() + return stdout.decode(ENCODING) + + return copy_wl, paste_wl + + +def init_klipper_clipboard(): + def copy_klipper(text): + text = _stringifyText(text) # Converts non-str values to str. + with subprocess.Popen( + [ + "qdbus", + "org.kde.klipper", + "/klipper", + "setClipboardContents", + text.encode(ENCODING), + ], + stdin=subprocess.PIPE, + close_fds=True, + ) as p: + p.communicate(input=None) + + def paste_klipper(): + with subprocess.Popen( + ["qdbus", "org.kde.klipper", "/klipper", "getClipboardContents"], + stdout=subprocess.PIPE, + close_fds=True, + ) as p: + stdout = p.communicate()[0] + + # Workaround for https://bugs.kde.org/show_bug.cgi?id=342874 + # TODO: https://github.com/asweigart/pyperclip/issues/43 + clipboardContents = stdout.decode(ENCODING) + # even if blank, Klipper will append a newline at the end + assert len(clipboardContents) > 0 + # make sure that newline is there + assert clipboardContents.endswith("\n") + if clipboardContents.endswith("\n"): + clipboardContents = clipboardContents[:-1] + return clipboardContents + + return copy_klipper, paste_klipper + + +def init_dev_clipboard_clipboard(): + def copy_dev_clipboard(text): + text = _stringifyText(text) # Converts non-str values to str. + if text == "": + warnings.warn( + "Pyperclip cannot copy a blank string to the clipboard on Cygwin. " + "This is effectively a no-op.", + stacklevel=find_stack_level(), + ) + if "\r" in text: + warnings.warn( + "Pyperclip cannot handle \\r characters on Cygwin.", + stacklevel=find_stack_level(), + ) + + with open("/dev/clipboard", "w", encoding="utf-8") as fd: + fd.write(text) + + def paste_dev_clipboard() -> str: + with open("/dev/clipboard", encoding="utf-8") as fd: + content = fd.read() + return content + + return copy_dev_clipboard, paste_dev_clipboard + + +def init_no_clipboard(): + class ClipboardUnavailable: + def __call__(self, *args, **kwargs): + raise PyperclipException(EXCEPT_MSG) + + def __bool__(self) -> bool: + return False + + return ClipboardUnavailable(), ClipboardUnavailable() + + +# Windows-related clipboard functions: +class CheckedCall: + def __init__(self, f) -> None: + super().__setattr__("f", f) + + def __call__(self, *args): + ret = self.f(*args) + if not ret and get_errno(): + raise PyperclipWindowsException("Error calling " + self.f.__name__) + return ret + + def __setattr__(self, key, value): + setattr(self.f, key, value) + + +def init_windows_clipboard(): + global HGLOBAL, LPVOID, DWORD, LPCSTR, INT + global HWND, HINSTANCE, HMENU, BOOL, UINT, HANDLE + from ctypes.wintypes import ( + BOOL, + DWORD, + HANDLE, + HGLOBAL, + HINSTANCE, + HMENU, + HWND, + INT, + LPCSTR, + LPVOID, + UINT, + ) + + windll = ctypes.windll + msvcrt = ctypes.CDLL("msvcrt") + + safeCreateWindowExA = CheckedCall(windll.user32.CreateWindowExA) + safeCreateWindowExA.argtypes = [ + DWORD, + LPCSTR, + LPCSTR, + DWORD, + INT, + INT, + INT, + INT, + HWND, + HMENU, + HINSTANCE, + LPVOID, + ] + safeCreateWindowExA.restype = HWND + + safeDestroyWindow = CheckedCall(windll.user32.DestroyWindow) + safeDestroyWindow.argtypes = [HWND] + safeDestroyWindow.restype = BOOL + + OpenClipboard = windll.user32.OpenClipboard + OpenClipboard.argtypes = [HWND] + OpenClipboard.restype = BOOL + + safeCloseClipboard = CheckedCall(windll.user32.CloseClipboard) + safeCloseClipboard.argtypes = [] + safeCloseClipboard.restype = BOOL + + safeEmptyClipboard = CheckedCall(windll.user32.EmptyClipboard) + safeEmptyClipboard.argtypes = [] + safeEmptyClipboard.restype = BOOL + + safeGetClipboardData = CheckedCall(windll.user32.GetClipboardData) + safeGetClipboardData.argtypes = [UINT] + safeGetClipboardData.restype = HANDLE + + safeSetClipboardData = CheckedCall(windll.user32.SetClipboardData) + safeSetClipboardData.argtypes = [UINT, HANDLE] + safeSetClipboardData.restype = HANDLE + + safeGlobalAlloc = CheckedCall(windll.kernel32.GlobalAlloc) + safeGlobalAlloc.argtypes = [UINT, c_size_t] + safeGlobalAlloc.restype = HGLOBAL + + safeGlobalLock = CheckedCall(windll.kernel32.GlobalLock) + safeGlobalLock.argtypes = [HGLOBAL] + safeGlobalLock.restype = LPVOID + + safeGlobalUnlock = CheckedCall(windll.kernel32.GlobalUnlock) + safeGlobalUnlock.argtypes = [HGLOBAL] + safeGlobalUnlock.restype = BOOL + + wcslen = CheckedCall(msvcrt.wcslen) + wcslen.argtypes = [c_wchar_p] + wcslen.restype = UINT + + GMEM_MOVEABLE = 0x0002 + CF_UNICODETEXT = 13 + + @contextlib.contextmanager + def window(): + """ + Context that provides a valid Windows hwnd. + """ + # we really just need the hwnd, so setting "STATIC" + # as predefined lpClass is just fine. + hwnd = safeCreateWindowExA( + 0, b"STATIC", None, 0, 0, 0, 0, 0, None, None, None, None + ) + try: + yield hwnd + finally: + safeDestroyWindow(hwnd) + + @contextlib.contextmanager + def clipboard(hwnd): + """ + Context manager that opens the clipboard and prevents + other applications from modifying the clipboard content. + """ + # We may not get the clipboard handle immediately because + # some other application is accessing it (?) + # We try for at least 500ms to get the clipboard. + t = time.time() + 0.5 + success = False + while time.time() < t: + success = OpenClipboard(hwnd) + if success: + break + time.sleep(0.01) + if not success: + raise PyperclipWindowsException("Error calling OpenClipboard") + + try: + yield + finally: + safeCloseClipboard() + + def copy_windows(text): + # This function is heavily based on + # http://msdn.com/ms649016#_win32_Copying_Information_to_the_Clipboard + + text = _stringifyText(text) # Converts non-str values to str. + + with window() as hwnd: + # http://msdn.com/ms649048 + # If an application calls OpenClipboard with hwnd set to NULL, + # EmptyClipboard sets the clipboard owner to NULL; + # this causes SetClipboardData to fail. + # => We need a valid hwnd to copy something. + with clipboard(hwnd): + safeEmptyClipboard() + + if text: + # http://msdn.com/ms649051 + # If the hMem parameter identifies a memory object, + # the object must have been allocated using the + # function with the GMEM_MOVEABLE flag. + count = wcslen(text) + 1 + handle = safeGlobalAlloc(GMEM_MOVEABLE, count * sizeof(c_wchar)) + locked_handle = safeGlobalLock(handle) + + ctypes.memmove( + c_wchar_p(locked_handle), + c_wchar_p(text), + count * sizeof(c_wchar), + ) + + safeGlobalUnlock(handle) + safeSetClipboardData(CF_UNICODETEXT, handle) + + def paste_windows(): + with clipboard(None): + handle = safeGetClipboardData(CF_UNICODETEXT) + if not handle: + # GetClipboardData may return NULL with errno == NO_ERROR + # if the clipboard is empty. + # (Also, it may return a handle to an empty buffer, + # but technically that's not empty) + return "" + return c_wchar_p(handle).value + + return copy_windows, paste_windows + + +def init_wsl_clipboard(): + def copy_wsl(text): + text = _stringifyText(text) # Converts non-str values to str. + with subprocess.Popen(["clip.exe"], stdin=subprocess.PIPE, close_fds=True) as p: + p.communicate(input=text.encode(ENCODING)) + + def paste_wsl(): + with subprocess.Popen( + ["powershell.exe", "-command", "Get-Clipboard"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + close_fds=True, + ) as p: + stdout = p.communicate()[0] + # WSL appends "\r\n" to the contents. + return stdout[:-2].decode(ENCODING) + + return copy_wsl, paste_wsl + + +# Automatic detection of clipboard mechanisms +# and importing is done in determine_clipboard(): +def determine_clipboard(): + """ + Determine the OS/platform and set the copy() and paste() functions + accordingly. + """ + global Foundation, AppKit, qtpy, PyQt4, PyQt5 + + # Setup for the CYGWIN platform: + if ( + "cygwin" in platform.system().lower() + ): # Cygwin has a variety of values returned by platform.system(), + # such as 'CYGWIN_NT-6.1' + # FIXME(pyperclip#55): pyperclip currently does not support Cygwin, + # see https://github.com/asweigart/pyperclip/issues/55 + if os.path.exists("/dev/clipboard"): + warnings.warn( + "Pyperclip's support for Cygwin is not perfect, " + "see https://github.com/asweigart/pyperclip/issues/55", + stacklevel=find_stack_level(), + ) + return init_dev_clipboard_clipboard() + + # Setup for the WINDOWS platform: + elif os.name == "nt" or platform.system() == "Windows": + return init_windows_clipboard() + + if platform.system() == "Linux": + if _executable_exists("wslconfig.exe"): + return init_wsl_clipboard() + + # Setup for the macOS platform: + if os.name == "mac" or platform.system() == "Darwin": + try: + import AppKit + import Foundation # check if pyobjc is installed + except ImportError: + return init_osx_pbcopy_clipboard() + else: + return init_osx_pyobjc_clipboard() + + # Setup for the LINUX platform: + if HAS_DISPLAY: + if os.environ.get("WAYLAND_DISPLAY") and _executable_exists("wl-copy"): + return init_wl_clipboard() + if _executable_exists("xsel"): + return init_xsel_clipboard() + if _executable_exists("xclip"): + return init_xclip_clipboard() + if _executable_exists("klipper") and _executable_exists("qdbus"): + return init_klipper_clipboard() + + try: + # qtpy is a small abstraction layer that lets you write applications + # using a single api call to either PyQt or PySide. + # https://pypi.python.org/project/QtPy + import qtpy # check if qtpy is installed + except ImportError: + # If qtpy isn't installed, fall back on importing PyQt4. + try: + import PyQt5 # check if PyQt5 is installed + except ImportError: + try: + import PyQt4 # check if PyQt4 is installed + except ImportError: + pass # We want to fail fast for all non-ImportError exceptions. + else: + return init_qt_clipboard() + else: + return init_qt_clipboard() + else: + return init_qt_clipboard() + + return init_no_clipboard() + + +def set_clipboard(clipboard): + """ + Explicitly sets the clipboard mechanism. The "clipboard mechanism" is how + the copy() and paste() functions interact with the operating system to + implement the copy/paste feature. The clipboard parameter must be one of: + - pbcopy + - pyobjc (default on macOS) + - qt + - xclip + - xsel + - klipper + - windows (default on Windows) + - no (this is what is set when no clipboard mechanism can be found) + """ + global copy, paste + + clipboard_types = { + "pbcopy": init_osx_pbcopy_clipboard, + "pyobjc": init_osx_pyobjc_clipboard, + "qt": init_qt_clipboard, # TODO - split this into 'qtpy', 'pyqt4', and 'pyqt5' + "xclip": init_xclip_clipboard, + "xsel": init_xsel_clipboard, + "wl-clipboard": init_wl_clipboard, + "klipper": init_klipper_clipboard, + "windows": init_windows_clipboard, + "no": init_no_clipboard, + } + + if clipboard not in clipboard_types: + allowed_clipboard_types = [repr(_) for _ in clipboard_types] + raise ValueError( + f"Argument must be one of {', '.join(allowed_clipboard_types)}" + ) + + # Sets pyperclip's copy() and paste() functions: + copy, paste = clipboard_types[clipboard]() + + +def lazy_load_stub_copy(text): + """ + A stub function for copy(), which will load the real copy() function when + called so that the real copy() function is used for later calls. + + This allows users to import pyperclip without having determine_clipboard() + automatically run, which will automatically select a clipboard mechanism. + This could be a problem if it selects, say, the memory-heavy PyQt4 module + but the user was just going to immediately call set_clipboard() to use a + different clipboard mechanism. + + The lazy loading this stub function implements gives the user a chance to + call set_clipboard() to pick another clipboard mechanism. Or, if the user + simply calls copy() or paste() without calling set_clipboard() first, + will fall back on whatever clipboard mechanism that determine_clipboard() + automatically chooses. + """ + global copy, paste + copy, paste = determine_clipboard() + return copy(text) + + +def lazy_load_stub_paste(): + """ + A stub function for paste(), which will load the real paste() function when + called so that the real paste() function is used for later calls. + + This allows users to import pyperclip without having determine_clipboard() + automatically run, which will automatically select a clipboard mechanism. + This could be a problem if it selects, say, the memory-heavy PyQt4 module + but the user was just going to immediately call set_clipboard() to use a + different clipboard mechanism. + + The lazy loading this stub function implements gives the user a chance to + call set_clipboard() to pick another clipboard mechanism. Or, if the user + simply calls copy() or paste() without calling set_clipboard() first, + will fall back on whatever clipboard mechanism that determine_clipboard() + automatically chooses. + """ + global copy, paste + copy, paste = determine_clipboard() + return paste() + + +def is_available() -> bool: + return copy != lazy_load_stub_copy and paste != lazy_load_stub_paste + + +# Initially, copy() and paste() are set to lazy loading wrappers which will +# set `copy` and `paste` to real functions the first time they're used, unless +# set_clipboard() or determine_clipboard() is called first. +copy, paste = lazy_load_stub_copy, lazy_load_stub_paste + + +def waitForPaste(timeout=None): + """This function call blocks until a non-empty text string exists on the + clipboard. It returns this text. + + This function raises PyperclipTimeoutException if timeout was set to + a number of seconds that has elapsed without non-empty text being put on + the clipboard.""" + startTime = time.time() + while True: + clipboardText = paste() + if clipboardText != "": + return clipboardText + time.sleep(0.01) + + if timeout is not None and time.time() > startTime + timeout: + raise PyperclipTimeoutException( + "waitForPaste() timed out after " + str(timeout) + " seconds." + ) + + +def waitForNewPaste(timeout=None): + """This function call blocks until a new text string exists on the + clipboard that is different from the text that was there when the function + was first called. It returns this text. + + This function raises PyperclipTimeoutException if timeout was set to + a number of seconds that has elapsed without non-empty text being put on + the clipboard.""" + startTime = time.time() + originalText = paste() + while True: + currentText = paste() + if currentText != originalText: + return currentText + time.sleep(0.01) + + if timeout is not None and time.time() > startTime + timeout: + raise PyperclipTimeoutException( + "waitForNewPaste() timed out after " + str(timeout) + " seconds." + ) + + +__all__ = [ + "copy", + "paste", + "waitForPaste", + "waitForNewPaste", + "set_clipboard", + "determine_clipboard", +] + +# pandas aliases +clipboard_get = paste +clipboard_set = copy diff --git a/moondream/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d54ecff7c24689ffa7f7ca55de400d00368299ef Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/io/clipboard/__pycache__/__init__.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/__init__.py b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ff11968db15f0f7c6057a46c252a91daee7b9cd9 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__init__.py @@ -0,0 +1,9 @@ +from pandas.io.parsers.readers import ( + TextFileReader, + TextParser, + read_csv, + read_fwf, + read_table, +) + +__all__ = ["TextFileReader", "TextParser", "read_csv", "read_fwf", "read_table"] diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..185889d11632841a5da33ffed39c5ad9ab81b599 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/__init__.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..deb4fcd84dfdaad8852251e783fa2a73b59c21a3 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/arrow_parser_wrapper.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e06785f3cb9439c2ddba86b4dcefbd87a7f8d97 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/base_parser.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..54489db96786bf06b0b1dee327c9310dfc964fe7 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/c_parser_wrapper.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0d1b22a60c58ce9f806e1d3e304bf6b11a88e153 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/python_parser.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d678b7839f69ded4a737fb58dc56c9878e4c55f7 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/io/parsers/__pycache__/readers.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py b/moondream/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..890b22154648e6b12d636c5df3595d105ff02ac9 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/io/parsers/arrow_parser_wrapper.py @@ -0,0 +1,303 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING +import warnings + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + ParserError, + ParserWarning, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.inference import is_integer + +import pandas as pd +from pandas import DataFrame + +from pandas.io._util import ( + _arrow_dtype_mapping, + arrow_string_types_mapper, +) +from pandas.io.parsers.base_parser import ParserBase + +if TYPE_CHECKING: + from pandas._typing import ReadBuffer + + +class ArrowParserWrapper(ParserBase): + """ + Wrapper for the pyarrow engine for read_csv() + """ + + def __init__(self, src: ReadBuffer[bytes], **kwds) -> None: + super().__init__(kwds) + self.kwds = kwds + self.src = src + + self._parse_kwds() + + def _parse_kwds(self) -> None: + """ + Validates keywords before passing to pyarrow. + """ + encoding: str | None = self.kwds.get("encoding") + self.encoding = "utf-8" if encoding is None else encoding + + na_values = self.kwds["na_values"] + if isinstance(na_values, dict): + raise ValueError( + "The pyarrow engine doesn't support passing a dict for na_values" + ) + self.na_values = list(self.kwds["na_values"]) + + def _get_pyarrow_options(self) -> None: + """ + Rename some arguments to pass to pyarrow + """ + mapping = { + "usecols": "include_columns", + "na_values": "null_values", + "escapechar": "escape_char", + "skip_blank_lines": "ignore_empty_lines", + "decimal": "decimal_point", + "quotechar": "quote_char", + } + for pandas_name, pyarrow_name in mapping.items(): + if pandas_name in self.kwds and self.kwds.get(pandas_name) is not None: + self.kwds[pyarrow_name] = self.kwds.pop(pandas_name) + + # Date format handling + # If we get a string, we need to convert it into a list for pyarrow + # If we get a dict, we want to parse those separately + date_format = self.date_format + if isinstance(date_format, str): + date_format = [date_format] + else: + # In case of dict, we don't want to propagate through, so + # just set to pyarrow default of None + + # Ideally, in future we disable pyarrow dtype inference (read in as string) + # to prevent misreads. + date_format = None + self.kwds["timestamp_parsers"] = date_format + + self.parse_options = { + option_name: option_value + for option_name, option_value in self.kwds.items() + if option_value is not None + and option_name + in ("delimiter", "quote_char", "escape_char", "ignore_empty_lines") + } + + on_bad_lines = self.kwds.get("on_bad_lines") + if on_bad_lines is not None: + if callable(on_bad_lines): + self.parse_options["invalid_row_handler"] = on_bad_lines + elif on_bad_lines == ParserBase.BadLineHandleMethod.ERROR: + self.parse_options[ + "invalid_row_handler" + ] = None # PyArrow raises an exception by default + elif on_bad_lines == ParserBase.BadLineHandleMethod.WARN: + + def handle_warning(invalid_row) -> str: + warnings.warn( + f"Expected {invalid_row.expected_columns} columns, but found " + f"{invalid_row.actual_columns}: {invalid_row.text}", + ParserWarning, + stacklevel=find_stack_level(), + ) + return "skip" + + self.parse_options["invalid_row_handler"] = handle_warning + elif on_bad_lines == ParserBase.BadLineHandleMethod.SKIP: + self.parse_options["invalid_row_handler"] = lambda _: "skip" + + self.convert_options = { + option_name: option_value + for option_name, option_value in self.kwds.items() + if option_value is not None + and option_name + in ( + "include_columns", + "null_values", + "true_values", + "false_values", + "decimal_point", + "timestamp_parsers", + ) + } + self.convert_options["strings_can_be_null"] = "" in self.kwds["null_values"] + # autogenerated column names are prefixed with 'f' in pyarrow.csv + if self.header is None and "include_columns" in self.convert_options: + self.convert_options["include_columns"] = [ + f"f{n}" for n in self.convert_options["include_columns"] + ] + + self.read_options = { + "autogenerate_column_names": self.header is None, + "skip_rows": self.header + if self.header is not None + else self.kwds["skiprows"], + "encoding": self.encoding, + } + + def _finalize_pandas_output(self, frame: DataFrame) -> DataFrame: + """ + Processes data read in based on kwargs. + + Parameters + ---------- + frame: DataFrame + The DataFrame to process. + + Returns + ------- + DataFrame + The processed DataFrame. + """ + num_cols = len(frame.columns) + multi_index_named = True + if self.header is None: + if self.names is None: + if self.header is None: + self.names = range(num_cols) + if len(self.names) != num_cols: + # usecols is passed through to pyarrow, we only handle index col here + # The only way self.names is not the same length as number of cols is + # if we have int index_col. We should just pad the names(they will get + # removed anyways) to expected length then. + self.names = list(range(num_cols - len(self.names))) + self.names + multi_index_named = False + frame.columns = self.names + # we only need the frame not the names + _, frame = self._do_date_conversions(frame.columns, frame) + if self.index_col is not None: + index_to_set = self.index_col.copy() + for i, item in enumerate(self.index_col): + if is_integer(item): + index_to_set[i] = frame.columns[item] + # String case + elif item not in frame.columns: + raise ValueError(f"Index {item} invalid") + + # Process dtype for index_col and drop from dtypes + if self.dtype is not None: + key, new_dtype = ( + (item, self.dtype.get(item)) + if self.dtype.get(item) is not None + else (frame.columns[item], self.dtype.get(frame.columns[item])) + ) + if new_dtype is not None: + frame[key] = frame[key].astype(new_dtype) + del self.dtype[key] + + frame.set_index(index_to_set, drop=True, inplace=True) + # Clear names if headerless and no name given + if self.header is None and not multi_index_named: + frame.index.names = [None] * len(frame.index.names) + + if self.dtype is not None: + # Ignore non-existent columns from dtype mapping + # like other parsers do + if isinstance(self.dtype, dict): + self.dtype = { + k: pandas_dtype(v) + for k, v in self.dtype.items() + if k in frame.columns + } + else: + self.dtype = pandas_dtype(self.dtype) + try: + frame = frame.astype(self.dtype) + except TypeError as e: + # GH#44901 reraise to keep api consistent + raise ValueError(e) + return frame + + def _validate_usecols(self, usecols) -> None: + if lib.is_list_like(usecols) and not all(isinstance(x, str) for x in usecols): + raise ValueError( + "The pyarrow engine does not allow 'usecols' to be integer " + "column positions. Pass a list of string column names instead." + ) + elif callable(usecols): + raise ValueError( + "The pyarrow engine does not allow 'usecols' to be a callable." + ) + + def read(self) -> DataFrame: + """ + Reads the contents of a CSV file into a DataFrame and + processes it according to the kwargs passed in the + constructor. + + Returns + ------- + DataFrame + The DataFrame created from the CSV file. + """ + pa = import_optional_dependency("pyarrow") + pyarrow_csv = import_optional_dependency("pyarrow.csv") + self._get_pyarrow_options() + + try: + convert_options = pyarrow_csv.ConvertOptions(**self.convert_options) + except TypeError: + include = self.convert_options.get("include_columns", None) + if include is not None: + self._validate_usecols(include) + + nulls = self.convert_options.get("null_values", set()) + if not lib.is_list_like(nulls) or not all( + isinstance(x, str) for x in nulls + ): + raise TypeError( + "The 'pyarrow' engine requires all na_values to be strings" + ) + + raise + + try: + table = pyarrow_csv.read_csv( + self.src, + read_options=pyarrow_csv.ReadOptions(**self.read_options), + parse_options=pyarrow_csv.ParseOptions(**self.parse_options), + convert_options=convert_options, + ) + except pa.ArrowInvalid as e: + raise ParserError(e) from e + + dtype_backend = self.kwds["dtype_backend"] + + # Convert all pa.null() cols -> float64 (non nullable) + # else Int64 (nullable case, see below) + if dtype_backend is lib.no_default: + new_schema = table.schema + new_type = pa.float64() + for i, arrow_type in enumerate(table.schema.types): + if pa.types.is_null(arrow_type): + new_schema = new_schema.set( + i, new_schema.field(i).with_type(new_type) + ) + + table = table.cast(new_schema) + + if dtype_backend == "pyarrow": + frame = table.to_pandas(types_mapper=pd.ArrowDtype) + elif dtype_backend == "numpy_nullable": + # Modify the default mapping to also + # map null to Int64 (to match other engines) + dtype_mapping = _arrow_dtype_mapping() + dtype_mapping[pa.null()] = pd.Int64Dtype() + frame = table.to_pandas(types_mapper=dtype_mapping.get) + elif using_pyarrow_string_dtype(): + frame = table.to_pandas(types_mapper=arrow_string_types_mapper()) + + else: + frame = table.to_pandas() + return self._finalize_pandas_output(frame) diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py b/moondream/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..09f0f2af8e5c6b55bff173ff74cd290fdf61cbae --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/io/parsers/base_parser.py @@ -0,0 +1,1448 @@ +from __future__ import annotations + +from collections import defaultdict +from copy import copy +import csv +import datetime +from enum import Enum +import itertools +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, + final, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + parsers, +) +import pandas._libs.ops as libops +from pandas._libs.parsers import STR_NA_VALUES +from pandas._libs.tslibs import parsing +from pandas.compat._optional import import_optional_dependency +from pandas.errors import ( + ParserError, + ParserWarning, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.astype import astype_array +from pandas.core.dtypes.common import ( + ensure_object, + is_bool_dtype, + is_dict_like, + is_extension_array_dtype, + is_float_dtype, + is_integer, + is_integer_dtype, + is_list_like, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + CategoricalDtype, + ExtensionDtype, +) +from pandas.core.dtypes.missing import isna + +from pandas import ( + ArrowDtype, + DataFrame, + DatetimeIndex, + StringDtype, + concat, +) +from pandas.core import algorithms +from pandas.core.arrays import ( + ArrowExtensionArray, + BaseMaskedArray, + BooleanArray, + Categorical, + ExtensionArray, + FloatingArray, + IntegerArray, +) +from pandas.core.arrays.boolean import BooleanDtype +from pandas.core.indexes.api import ( + Index, + MultiIndex, + default_index, + ensure_index_from_sequences, +) +from pandas.core.series import Series +from pandas.core.tools import datetimes as tools + +from pandas.io.common import is_potential_multi_index + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + DtypeArg, + DtypeObj, + Scalar, + ) + + +class ParserBase: + class BadLineHandleMethod(Enum): + ERROR = 0 + WARN = 1 + SKIP = 2 + + _implicit_index: bool + _first_chunk: bool + keep_default_na: bool + dayfirst: bool + cache_dates: bool + keep_date_col: bool + usecols_dtype: str | None + + def __init__(self, kwds) -> None: + self._implicit_index = False + + self.names = kwds.get("names") + self.orig_names: Sequence[Hashable] | None = None + + self.index_col = kwds.get("index_col", None) + self.unnamed_cols: set = set() + self.index_names: Sequence[Hashable] | None = None + self.col_names: Sequence[Hashable] | None = None + + self.parse_dates = _validate_parse_dates_arg(kwds.pop("parse_dates", False)) + self._parse_date_cols: Iterable = [] + self.date_parser = kwds.pop("date_parser", lib.no_default) + self.date_format = kwds.pop("date_format", None) + self.dayfirst = kwds.pop("dayfirst", False) + self.keep_date_col = kwds.pop("keep_date_col", False) + + self.na_values = kwds.get("na_values") + self.na_fvalues = kwds.get("na_fvalues") + self.na_filter = kwds.get("na_filter", False) + self.keep_default_na = kwds.get("keep_default_na", True) + + self.dtype = copy(kwds.get("dtype", None)) + self.converters = kwds.get("converters") + self.dtype_backend = kwds.get("dtype_backend") + + self.true_values = kwds.get("true_values") + self.false_values = kwds.get("false_values") + self.cache_dates = kwds.pop("cache_dates", True) + + self._date_conv = _make_date_converter( + date_parser=self.date_parser, + date_format=self.date_format, + dayfirst=self.dayfirst, + cache_dates=self.cache_dates, + ) + + # validate header options for mi + self.header = kwds.get("header") + if is_list_like(self.header, allow_sets=False): + if kwds.get("usecols"): + raise ValueError( + "cannot specify usecols when specifying a multi-index header" + ) + if kwds.get("names"): + raise ValueError( + "cannot specify names when specifying a multi-index header" + ) + + # validate index_col that only contains integers + if self.index_col is not None: + # In this case we can pin down index_col as list[int] + if is_integer(self.index_col): + self.index_col = [self.index_col] + elif not ( + is_list_like(self.index_col, allow_sets=False) + and all(map(is_integer, self.index_col)) + ): + raise ValueError( + "index_col must only contain row numbers " + "when specifying a multi-index header" + ) + else: + self.index_col = list(self.index_col) + + self._name_processed = False + + self._first_chunk = True + + self.usecols, self.usecols_dtype = self._validate_usecols_arg(kwds["usecols"]) + + # Fallback to error to pass a sketchy test(test_override_set_noconvert_columns) + # Normally, this arg would get pre-processed earlier on + self.on_bad_lines = kwds.get("on_bad_lines", self.BadLineHandleMethod.ERROR) + + def _validate_parse_dates_presence(self, columns: Sequence[Hashable]) -> Iterable: + """ + Check if parse_dates are in columns. + + If user has provided names for parse_dates, check if those columns + are available. + + Parameters + ---------- + columns : list + List of names of the dataframe. + + Returns + ------- + The names of the columns which will get parsed later if a dict or list + is given as specification. + + Raises + ------ + ValueError + If column to parse_date is not in dataframe. + + """ + cols_needed: Iterable + if is_dict_like(self.parse_dates): + cols_needed = itertools.chain(*self.parse_dates.values()) + elif is_list_like(self.parse_dates): + # a column in parse_dates could be represented + # ColReference = Union[int, str] + # DateGroups = List[ColReference] + # ParseDates = Union[DateGroups, List[DateGroups], + # Dict[ColReference, DateGroups]] + cols_needed = itertools.chain.from_iterable( + col if is_list_like(col) and not isinstance(col, tuple) else [col] + for col in self.parse_dates + ) + else: + cols_needed = [] + + cols_needed = list(cols_needed) + + # get only columns that are references using names (str), not by index + missing_cols = ", ".join( + sorted( + { + col + for col in cols_needed + if isinstance(col, str) and col not in columns + } + ) + ) + if missing_cols: + raise ValueError( + f"Missing column provided to 'parse_dates': '{missing_cols}'" + ) + # Convert positions to actual column names + return [ + col if (isinstance(col, str) or col in columns) else columns[col] + for col in cols_needed + ] + + def close(self) -> None: + pass + + @final + @property + def _has_complex_date_col(self) -> bool: + return isinstance(self.parse_dates, dict) or ( + isinstance(self.parse_dates, list) + and len(self.parse_dates) > 0 + and isinstance(self.parse_dates[0], list) + ) + + @final + def _should_parse_dates(self, i: int) -> bool: + if lib.is_bool(self.parse_dates): + return bool(self.parse_dates) + else: + if self.index_names is not None: + name = self.index_names[i] + else: + name = None + j = i if self.index_col is None else self.index_col[i] + + return (j in self.parse_dates) or ( + name is not None and name in self.parse_dates + ) + + @final + def _extract_multi_indexer_columns( + self, + header, + index_names: Sequence[Hashable] | None, + passed_names: bool = False, + ) -> tuple[ + Sequence[Hashable], Sequence[Hashable] | None, Sequence[Hashable] | None, bool + ]: + """ + Extract and return the names, index_names, col_names if the column + names are a MultiIndex. + + Parameters + ---------- + header: list of lists + The header rows + index_names: list, optional + The names of the future index + passed_names: bool, default False + A flag specifying if names where passed + + """ + if len(header) < 2: + return header[0], index_names, None, passed_names + + # the names are the tuples of the header that are not the index cols + # 0 is the name of the index, assuming index_col is a list of column + # numbers + ic = self.index_col + if ic is None: + ic = [] + + if not isinstance(ic, (list, tuple, np.ndarray)): + ic = [ic] + sic = set(ic) + + # clean the index_names + index_names = header.pop(-1) + index_names, _, _ = self._clean_index_names(index_names, self.index_col) + + # extract the columns + field_count = len(header[0]) + + # check if header lengths are equal + if not all(len(header_iter) == field_count for header_iter in header[1:]): + raise ParserError("Header rows must have an equal number of columns.") + + def extract(r): + return tuple(r[i] for i in range(field_count) if i not in sic) + + columns = list(zip(*(extract(r) for r in header))) + names = columns.copy() + for single_ic in sorted(ic): + names.insert(single_ic, single_ic) + + # Clean the column names (if we have an index_col). + if len(ic): + col_names = [ + r[ic[0]] + if ((r[ic[0]] is not None) and r[ic[0]] not in self.unnamed_cols) + else None + for r in header + ] + else: + col_names = [None] * len(header) + + passed_names = True + + return names, index_names, col_names, passed_names + + @final + def _maybe_make_multi_index_columns( + self, + columns: Sequence[Hashable], + col_names: Sequence[Hashable] | None = None, + ) -> Sequence[Hashable] | MultiIndex: + # possibly create a column mi here + if is_potential_multi_index(columns): + list_columns = cast(list[tuple], columns) + return MultiIndex.from_tuples(list_columns, names=col_names) + return columns + + @final + def _make_index( + self, data, alldata, columns, indexnamerow: list[Scalar] | None = None + ) -> tuple[Index | None, Sequence[Hashable] | MultiIndex]: + index: Index | None + if not is_index_col(self.index_col) or not self.index_col: + index = None + + elif not self._has_complex_date_col: + simple_index = self._get_simple_index(alldata, columns) + index = self._agg_index(simple_index) + elif self._has_complex_date_col: + if not self._name_processed: + (self.index_names, _, self.index_col) = self._clean_index_names( + list(columns), self.index_col + ) + self._name_processed = True + date_index = self._get_complex_date_index(data, columns) + index = self._agg_index(date_index, try_parse_dates=False) + + # add names for the index + if indexnamerow: + coffset = len(indexnamerow) - len(columns) + assert index is not None + index = index.set_names(indexnamerow[:coffset]) + + # maybe create a mi on the columns + columns = self._maybe_make_multi_index_columns(columns, self.col_names) + + return index, columns + + @final + def _get_simple_index(self, data, columns): + def ix(col): + if not isinstance(col, str): + return col + raise ValueError(f"Index {col} invalid") + + to_remove = [] + index = [] + for idx in self.index_col: + i = ix(idx) + to_remove.append(i) + index.append(data[i]) + + # remove index items from content and columns, don't pop in + # loop + for i in sorted(to_remove, reverse=True): + data.pop(i) + if not self._implicit_index: + columns.pop(i) + + return index + + @final + def _get_complex_date_index(self, data, col_names): + def _get_name(icol): + if isinstance(icol, str): + return icol + + if col_names is None: + raise ValueError(f"Must supply column order to use {icol!s} as index") + + for i, c in enumerate(col_names): + if i == icol: + return c + + to_remove = [] + index = [] + for idx in self.index_col: + name = _get_name(idx) + to_remove.append(name) + index.append(data[name]) + + # remove index items from content and columns, don't pop in + # loop + for c in sorted(to_remove, reverse=True): + data.pop(c) + col_names.remove(c) + + return index + + @final + def _clean_mapping(self, mapping): + """converts col numbers to names""" + if not isinstance(mapping, dict): + return mapping + clean = {} + # for mypy + assert self.orig_names is not None + + for col, v in mapping.items(): + if isinstance(col, int) and col not in self.orig_names: + col = self.orig_names[col] + clean[col] = v + if isinstance(mapping, defaultdict): + remaining_cols = set(self.orig_names) - set(clean.keys()) + clean.update({col: mapping[col] for col in remaining_cols}) + return clean + + @final + def _agg_index(self, index, try_parse_dates: bool = True) -> Index: + arrays = [] + converters = self._clean_mapping(self.converters) + + for i, arr in enumerate(index): + if try_parse_dates and self._should_parse_dates(i): + arr = self._date_conv( + arr, + col=self.index_names[i] if self.index_names is not None else None, + ) + + if self.na_filter: + col_na_values = self.na_values + col_na_fvalues = self.na_fvalues + else: + col_na_values = set() + col_na_fvalues = set() + + if isinstance(self.na_values, dict): + assert self.index_names is not None + col_name = self.index_names[i] + if col_name is not None: + col_na_values, col_na_fvalues = _get_na_values( + col_name, self.na_values, self.na_fvalues, self.keep_default_na + ) + + clean_dtypes = self._clean_mapping(self.dtype) + + cast_type = None + index_converter = False + if self.index_names is not None: + if isinstance(clean_dtypes, dict): + cast_type = clean_dtypes.get(self.index_names[i], None) + + if isinstance(converters, dict): + index_converter = converters.get(self.index_names[i]) is not None + + try_num_bool = not ( + cast_type and is_string_dtype(cast_type) or index_converter + ) + + arr, _ = self._infer_types( + arr, col_na_values | col_na_fvalues, cast_type is None, try_num_bool + ) + arrays.append(arr) + + names = self.index_names + index = ensure_index_from_sequences(arrays, names) + + return index + + @final + def _convert_to_ndarrays( + self, + dct: Mapping, + na_values, + na_fvalues, + verbose: bool = False, + converters=None, + dtypes=None, + ): + result = {} + for c, values in dct.items(): + conv_f = None if converters is None else converters.get(c, None) + if isinstance(dtypes, dict): + cast_type = dtypes.get(c, None) + else: + # single dtype or None + cast_type = dtypes + + if self.na_filter: + col_na_values, col_na_fvalues = _get_na_values( + c, na_values, na_fvalues, self.keep_default_na + ) + else: + col_na_values, col_na_fvalues = set(), set() + + if c in self._parse_date_cols: + # GH#26203 Do not convert columns which get converted to dates + # but replace nans to ensure to_datetime works + mask = algorithms.isin(values, set(col_na_values) | col_na_fvalues) + np.putmask(values, mask, np.nan) + result[c] = values + continue + + if conv_f is not None: + # conv_f applied to data before inference + if cast_type is not None: + warnings.warn( + ( + "Both a converter and dtype were specified " + f"for column {c} - only the converter will be used." + ), + ParserWarning, + stacklevel=find_stack_level(), + ) + + try: + values = lib.map_infer(values, conv_f) + except ValueError: + mask = algorithms.isin(values, list(na_values)).view(np.uint8) + values = lib.map_infer_mask(values, conv_f, mask) + + cvals, na_count = self._infer_types( + values, + set(col_na_values) | col_na_fvalues, + cast_type is None, + try_num_bool=False, + ) + else: + is_ea = is_extension_array_dtype(cast_type) + is_str_or_ea_dtype = is_ea or is_string_dtype(cast_type) + # skip inference if specified dtype is object + # or casting to an EA + try_num_bool = not (cast_type and is_str_or_ea_dtype) + + # general type inference and conversion + cvals, na_count = self._infer_types( + values, + set(col_na_values) | col_na_fvalues, + cast_type is None, + try_num_bool, + ) + + # type specified in dtype param or cast_type is an EA + if cast_type is not None: + cast_type = pandas_dtype(cast_type) + if cast_type and (cvals.dtype != cast_type or is_ea): + if not is_ea and na_count > 0: + if is_bool_dtype(cast_type): + raise ValueError(f"Bool column has NA values in column {c}") + cvals = self._cast_types(cvals, cast_type, c) + + result[c] = cvals + if verbose and na_count: + print(f"Filled {na_count} NA values in column {c!s}") + return result + + @final + def _set_noconvert_dtype_columns( + self, col_indices: list[int], names: Sequence[Hashable] + ) -> set[int]: + """ + Set the columns that should not undergo dtype conversions. + + Currently, any column that is involved with date parsing will not + undergo such conversions. If usecols is specified, the positions of the columns + not to cast is relative to the usecols not to all columns. + + Parameters + ---------- + col_indices: The indices specifying order and positions of the columns + names: The column names which order is corresponding with the order + of col_indices + + Returns + ------- + A set of integers containing the positions of the columns not to convert. + """ + usecols: list[int] | list[str] | None + noconvert_columns = set() + if self.usecols_dtype == "integer": + # A set of integers will be converted to a list in + # the correct order every single time. + usecols = sorted(self.usecols) + elif callable(self.usecols) or self.usecols_dtype not in ("empty", None): + # The names attribute should have the correct columns + # in the proper order for indexing with parse_dates. + usecols = col_indices + else: + # Usecols is empty. + usecols = None + + def _set(x) -> int: + if usecols is not None and is_integer(x): + x = usecols[x] + + if not is_integer(x): + x = col_indices[names.index(x)] + + return x + + if isinstance(self.parse_dates, list): + for val in self.parse_dates: + if isinstance(val, list): + for k in val: + noconvert_columns.add(_set(k)) + else: + noconvert_columns.add(_set(val)) + + elif isinstance(self.parse_dates, dict): + for val in self.parse_dates.values(): + if isinstance(val, list): + for k in val: + noconvert_columns.add(_set(k)) + else: + noconvert_columns.add(_set(val)) + + elif self.parse_dates: + if isinstance(self.index_col, list): + for k in self.index_col: + noconvert_columns.add(_set(k)) + elif self.index_col is not None: + noconvert_columns.add(_set(self.index_col)) + + return noconvert_columns + + @final + def _infer_types( + self, values, na_values, no_dtype_specified, try_num_bool: bool = True + ) -> tuple[ArrayLike, int]: + """ + Infer types of values, possibly casting + + Parameters + ---------- + values : ndarray + na_values : set + no_dtype_specified: Specifies if we want to cast explicitly + try_num_bool : bool, default try + try to cast values to numeric (first preference) or boolean + + Returns + ------- + converted : ndarray or ExtensionArray + na_count : int + """ + na_count = 0 + if issubclass(values.dtype.type, (np.number, np.bool_)): + # If our array has numeric dtype, we don't have to check for strings in isin + na_values = np.array([val for val in na_values if not isinstance(val, str)]) + mask = algorithms.isin(values, na_values) + na_count = mask.astype("uint8", copy=False).sum() + if na_count > 0: + if is_integer_dtype(values): + values = values.astype(np.float64) + np.putmask(values, mask, np.nan) + return values, na_count + + dtype_backend = self.dtype_backend + non_default_dtype_backend = ( + no_dtype_specified and dtype_backend is not lib.no_default + ) + result: ArrayLike + + if try_num_bool and is_object_dtype(values.dtype): + # exclude e.g DatetimeIndex here + try: + result, result_mask = lib.maybe_convert_numeric( + values, + na_values, + False, + convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] + ) + except (ValueError, TypeError): + # e.g. encountering datetime string gets ValueError + # TypeError can be raised in floatify + na_count = parsers.sanitize_objects(values, na_values) + result = values + else: + if non_default_dtype_backend: + if result_mask is None: + result_mask = np.zeros(result.shape, dtype=np.bool_) + + if result_mask.all(): + result = IntegerArray( + np.ones(result_mask.shape, dtype=np.int64), result_mask + ) + elif is_integer_dtype(result): + result = IntegerArray(result, result_mask) + elif is_bool_dtype(result): + result = BooleanArray(result, result_mask) + elif is_float_dtype(result): + result = FloatingArray(result, result_mask) + + na_count = result_mask.sum() + else: + na_count = isna(result).sum() + else: + result = values + if values.dtype == np.object_: + na_count = parsers.sanitize_objects(values, na_values) + + if result.dtype == np.object_ and try_num_bool: + result, bool_mask = libops.maybe_convert_bool( + np.asarray(values), + true_values=self.true_values, + false_values=self.false_values, + convert_to_masked_nullable=non_default_dtype_backend, # type: ignore[arg-type] + ) + if result.dtype == np.bool_ and non_default_dtype_backend: + if bool_mask is None: + bool_mask = np.zeros(result.shape, dtype=np.bool_) + result = BooleanArray(result, bool_mask) + elif result.dtype == np.object_ and non_default_dtype_backend: + # read_excel sends array of datetime objects + if not lib.is_datetime_array(result, skipna=True): + dtype = StringDtype() + cls = dtype.construct_array_type() + result = cls._from_sequence(values, dtype=dtype) + + if dtype_backend == "pyarrow": + pa = import_optional_dependency("pyarrow") + if isinstance(result, np.ndarray): + result = ArrowExtensionArray(pa.array(result, from_pandas=True)) + elif isinstance(result, BaseMaskedArray): + if result._mask.all(): + # We want an arrow null array here + result = ArrowExtensionArray(pa.array([None] * len(result))) + else: + result = ArrowExtensionArray( + pa.array(result._data, mask=result._mask) + ) + else: + result = ArrowExtensionArray( + pa.array(result.to_numpy(), from_pandas=True) + ) + + return result, na_count + + @final + def _cast_types(self, values: ArrayLike, cast_type: DtypeObj, column) -> ArrayLike: + """ + Cast values to specified type + + Parameters + ---------- + values : ndarray or ExtensionArray + cast_type : np.dtype or ExtensionDtype + dtype to cast values to + column : string + column name - used only for error reporting + + Returns + ------- + converted : ndarray or ExtensionArray + """ + if isinstance(cast_type, CategoricalDtype): + known_cats = cast_type.categories is not None + + if not is_object_dtype(values.dtype) and not known_cats: + # TODO: this is for consistency with + # c-parser which parses all categories + # as strings + values = lib.ensure_string_array( + values, skipna=False, convert_na_value=False + ) + + cats = Index(values).unique().dropna() + values = Categorical._from_inferred_categories( + cats, cats.get_indexer(values), cast_type, true_values=self.true_values + ) + + # use the EA's implementation of casting + elif isinstance(cast_type, ExtensionDtype): + array_type = cast_type.construct_array_type() + try: + if isinstance(cast_type, BooleanDtype): + # error: Unexpected keyword argument "true_values" for + # "_from_sequence_of_strings" of "ExtensionArray" + return array_type._from_sequence_of_strings( # type: ignore[call-arg] + values, + dtype=cast_type, + true_values=self.true_values, + false_values=self.false_values, + ) + else: + return array_type._from_sequence_of_strings(values, dtype=cast_type) + except NotImplementedError as err: + raise NotImplementedError( + f"Extension Array: {array_type} must implement " + "_from_sequence_of_strings in order to be used in parser methods" + ) from err + + elif isinstance(values, ExtensionArray): + values = values.astype(cast_type, copy=False) + elif issubclass(cast_type.type, str): + # TODO: why skipna=True here and False above? some tests depend + # on it here, but nothing fails if we change it above + # (as no tests get there as of 2022-12-06) + values = lib.ensure_string_array( + values, skipna=True, convert_na_value=False + ) + else: + try: + values = astype_array(values, cast_type, copy=True) + except ValueError as err: + raise ValueError( + f"Unable to convert column {column} to type {cast_type}" + ) from err + return values + + @overload + def _do_date_conversions( + self, + names: Index, + data: DataFrame, + ) -> tuple[Sequence[Hashable] | Index, DataFrame]: + ... + + @overload + def _do_date_conversions( + self, + names: Sequence[Hashable], + data: Mapping[Hashable, ArrayLike], + ) -> tuple[Sequence[Hashable], Mapping[Hashable, ArrayLike]]: + ... + + @final + def _do_date_conversions( + self, + names: Sequence[Hashable] | Index, + data: Mapping[Hashable, ArrayLike] | DataFrame, + ) -> tuple[Sequence[Hashable] | Index, Mapping[Hashable, ArrayLike] | DataFrame]: + # returns data, columns + + if self.parse_dates is not None: + data, names = _process_date_conversion( + data, + self._date_conv, + self.parse_dates, + self.index_col, + self.index_names, + names, + keep_date_col=self.keep_date_col, + dtype_backend=self.dtype_backend, + ) + + return names, data + + @final + def _check_data_length( + self, + columns: Sequence[Hashable], + data: Sequence[ArrayLike], + ) -> None: + """Checks if length of data is equal to length of column names. + + One set of trailing commas is allowed. self.index_col not False + results in a ParserError previously when lengths do not match. + + Parameters + ---------- + columns: list of column names + data: list of array-likes containing the data column-wise. + """ + if not self.index_col and len(columns) != len(data) and columns: + empty_str = is_object_dtype(data[-1]) and data[-1] == "" + # error: No overload variant of "__ror__" of "ndarray" matches + # argument type "ExtensionArray" + empty_str_or_na = empty_str | isna(data[-1]) # type: ignore[operator] + if len(columns) == len(data) - 1 and np.all(empty_str_or_na): + return + warnings.warn( + "Length of header or names does not match length of data. This leads " + "to a loss of data with index_col=False.", + ParserWarning, + stacklevel=find_stack_level(), + ) + + @overload + def _evaluate_usecols( + self, + usecols: set[int] | Callable[[Hashable], object], + names: Sequence[Hashable], + ) -> set[int]: + ... + + @overload + def _evaluate_usecols( + self, usecols: set[str], names: Sequence[Hashable] + ) -> set[str]: + ... + + @final + def _evaluate_usecols( + self, + usecols: Callable[[Hashable], object] | set[str] | set[int], + names: Sequence[Hashable], + ) -> set[str] | set[int]: + """ + Check whether or not the 'usecols' parameter + is a callable. If so, enumerates the 'names' + parameter and returns a set of indices for + each entry in 'names' that evaluates to True. + If not a callable, returns 'usecols'. + """ + if callable(usecols): + return {i for i, name in enumerate(names) if usecols(name)} + return usecols + + @final + def _validate_usecols_names(self, usecols, names: Sequence): + """ + Validates that all usecols are present in a given + list of names. If not, raise a ValueError that + shows what usecols are missing. + + Parameters + ---------- + usecols : iterable of usecols + The columns to validate are present in names. + names : iterable of names + The column names to check against. + + Returns + ------- + usecols : iterable of usecols + The `usecols` parameter if the validation succeeds. + + Raises + ------ + ValueError : Columns were missing. Error message will list them. + """ + missing = [c for c in usecols if c not in names] + if len(missing) > 0: + raise ValueError( + f"Usecols do not match columns, columns expected but not found: " + f"{missing}" + ) + + return usecols + + @final + def _validate_usecols_arg(self, usecols): + """ + Validate the 'usecols' parameter. + + Checks whether or not the 'usecols' parameter contains all integers + (column selection by index), strings (column by name) or is a callable. + Raises a ValueError if that is not the case. + + Parameters + ---------- + usecols : list-like, callable, or None + List of columns to use when parsing or a callable that can be used + to filter a list of table columns. + + Returns + ------- + usecols_tuple : tuple + A tuple of (verified_usecols, usecols_dtype). + + 'verified_usecols' is either a set if an array-like is passed in or + 'usecols' if a callable or None is passed in. + + 'usecols_dtype` is the inferred dtype of 'usecols' if an array-like + is passed in or None if a callable or None is passed in. + """ + msg = ( + "'usecols' must either be list-like of all strings, all unicode, " + "all integers or a callable." + ) + if usecols is not None: + if callable(usecols): + return usecols, None + + if not is_list_like(usecols): + # see gh-20529 + # + # Ensure it is iterable container but not string. + raise ValueError(msg) + + usecols_dtype = lib.infer_dtype(usecols, skipna=False) + + if usecols_dtype not in ("empty", "integer", "string"): + raise ValueError(msg) + + usecols = set(usecols) + + return usecols, usecols_dtype + return usecols, None + + @final + def _clean_index_names(self, columns, index_col) -> tuple[list | None, list, list]: + if not is_index_col(index_col): + return None, columns, index_col + + columns = list(columns) + + # In case of no rows and multiindex columns we have to set index_names to + # list of Nones GH#38292 + if not columns: + return [None] * len(index_col), columns, index_col + + cp_cols = list(columns) + index_names: list[str | int | None] = [] + + # don't mutate + index_col = list(index_col) + + for i, c in enumerate(index_col): + if isinstance(c, str): + index_names.append(c) + for j, name in enumerate(cp_cols): + if name == c: + index_col[i] = j + columns.remove(name) + break + else: + name = cp_cols[c] + columns.remove(name) + index_names.append(name) + + # Only clean index names that were placeholders. + for i, name in enumerate(index_names): + if isinstance(name, str) and name in self.unnamed_cols: + index_names[i] = None + + return index_names, columns, index_col + + @final + def _get_empty_meta(self, columns, dtype: DtypeArg | None = None): + columns = list(columns) + + index_col = self.index_col + index_names = self.index_names + + # Convert `dtype` to a defaultdict of some kind. + # This will enable us to write `dtype[col_name]` + # without worrying about KeyError issues later on. + dtype_dict: defaultdict[Hashable, Any] + if not is_dict_like(dtype): + # if dtype == None, default will be object. + default_dtype = dtype or object + dtype_dict = defaultdict(lambda: default_dtype) + else: + dtype = cast(dict, dtype) + dtype_dict = defaultdict( + lambda: object, + {columns[k] if is_integer(k) else k: v for k, v in dtype.items()}, + ) + + # Even though we have no data, the "index" of the empty DataFrame + # could for example still be an empty MultiIndex. Thus, we need to + # check whether we have any index columns specified, via either: + # + # 1) index_col (column indices) + # 2) index_names (column names) + # + # Both must be non-null to ensure a successful construction. Otherwise, + # we have to create a generic empty Index. + index: Index + if (index_col is None or index_col is False) or index_names is None: + index = default_index(0) + else: + data = [Series([], dtype=dtype_dict[name]) for name in index_names] + index = ensure_index_from_sequences(data, names=index_names) + index_col.sort() + + for i, n in enumerate(index_col): + columns.pop(n - i) + + col_dict = { + col_name: Series([], dtype=dtype_dict[col_name]) for col_name in columns + } + + return index, columns, col_dict + + +def _make_date_converter( + date_parser=lib.no_default, + dayfirst: bool = False, + cache_dates: bool = True, + date_format: dict[Hashable, str] | str | None = None, +): + if date_parser is not lib.no_default: + warnings.warn( + "The argument 'date_parser' is deprecated and will " + "be removed in a future version. " + "Please use 'date_format' instead, or read your data in as 'object' dtype " + "and then call 'to_datetime'.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if date_parser is not lib.no_default and date_format is not None: + raise TypeError("Cannot use both 'date_parser' and 'date_format'") + + def unpack_if_single_element(arg): + # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615 + if isinstance(arg, np.ndarray) and arg.ndim == 1 and len(arg) == 1: + return arg[0] + return arg + + def converter(*date_cols, col: Hashable): + if len(date_cols) == 1 and date_cols[0].dtype.kind in "Mm": + return date_cols[0] + + if date_parser is lib.no_default: + strs = parsing.concat_date_cols(date_cols) + date_fmt = ( + date_format.get(col) if isinstance(date_format, dict) else date_format + ) + + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time zones will raise an error", + category=FutureWarning, + ) + str_objs = ensure_object(strs) + try: + result = tools.to_datetime( + str_objs, + format=date_fmt, + utc=False, + dayfirst=dayfirst, + cache=cache_dates, + ) + except (ValueError, TypeError): + # test_usecols_with_parse_dates4 + return str_objs + + if isinstance(result, DatetimeIndex): + arr = result.to_numpy() + arr.flags.writeable = True + return arr + return result._values + else: + try: + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time zones " + "will raise an error", + category=FutureWarning, + ) + pre_parsed = date_parser( + *(unpack_if_single_element(arg) for arg in date_cols) + ) + try: + result = tools.to_datetime( + pre_parsed, + cache=cache_dates, + ) + except (ValueError, TypeError): + # test_read_csv_with_custom_date_parser + result = pre_parsed + if isinstance(result, datetime.datetime): + raise Exception("scalar parser") + return result + except Exception: + # e.g. test_datetime_fractional_seconds + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", + ".*parsing datetimes with mixed time zones " + "will raise an error", + category=FutureWarning, + ) + pre_parsed = parsing.try_parse_dates( + parsing.concat_date_cols(date_cols), + parser=date_parser, + ) + try: + return tools.to_datetime(pre_parsed) + except (ValueError, TypeError): + # TODO: not reached in tests 2023-10-27; needed? + return pre_parsed + + return converter + + +parser_defaults = { + "delimiter": None, + "escapechar": None, + "quotechar": '"', + "quoting": csv.QUOTE_MINIMAL, + "doublequote": True, + "skipinitialspace": False, + "lineterminator": None, + "header": "infer", + "index_col": None, + "names": None, + "skiprows": None, + "skipfooter": 0, + "nrows": None, + "na_values": None, + "keep_default_na": True, + "true_values": None, + "false_values": None, + "converters": None, + "dtype": None, + "cache_dates": True, + "thousands": None, + "comment": None, + "decimal": ".", + # 'engine': 'c', + "parse_dates": False, + "keep_date_col": False, + "dayfirst": False, + "date_parser": lib.no_default, + "date_format": None, + "usecols": None, + # 'iterator': False, + "chunksize": None, + "verbose": False, + "encoding": None, + "compression": None, + "skip_blank_lines": True, + "encoding_errors": "strict", + "on_bad_lines": ParserBase.BadLineHandleMethod.ERROR, + "dtype_backend": lib.no_default, +} + + +def _process_date_conversion( + data_dict, + converter: Callable, + parse_spec, + index_col, + index_names, + columns, + keep_date_col: bool = False, + dtype_backend=lib.no_default, +): + def _isindex(colspec): + return (isinstance(index_col, list) and colspec in index_col) or ( + isinstance(index_names, list) and colspec in index_names + ) + + new_cols = [] + new_data = {} + + orig_names = columns + columns = list(columns) + + date_cols = set() + + if parse_spec is None or isinstance(parse_spec, bool): + return data_dict, columns + + if isinstance(parse_spec, list): + # list of column lists + for colspec in parse_spec: + if is_scalar(colspec) or isinstance(colspec, tuple): + if isinstance(colspec, int) and colspec not in data_dict: + colspec = orig_names[colspec] + if _isindex(colspec): + continue + elif dtype_backend == "pyarrow": + import pyarrow as pa + + dtype = data_dict[colspec].dtype + if isinstance(dtype, ArrowDtype) and ( + pa.types.is_timestamp(dtype.pyarrow_dtype) + or pa.types.is_date(dtype.pyarrow_dtype) + ): + continue + + # Pyarrow engine returns Series which we need to convert to + # numpy array before converter, its a no-op for other parsers + data_dict[colspec] = converter( + np.asarray(data_dict[colspec]), col=colspec + ) + else: + new_name, col, old_names = _try_convert_dates( + converter, colspec, data_dict, orig_names + ) + if new_name in data_dict: + raise ValueError(f"New date column already in dict {new_name}") + new_data[new_name] = col + new_cols.append(new_name) + date_cols.update(old_names) + + elif isinstance(parse_spec, dict): + # dict of new name to column list + for new_name, colspec in parse_spec.items(): + if new_name in data_dict: + raise ValueError(f"Date column {new_name} already in dict") + + _, col, old_names = _try_convert_dates( + converter, + colspec, + data_dict, + orig_names, + target_name=new_name, + ) + + new_data[new_name] = col + + # If original column can be converted to date we keep the converted values + # This can only happen if values are from single column + if len(colspec) == 1: + new_data[colspec[0]] = col + + new_cols.append(new_name) + date_cols.update(old_names) + + if isinstance(data_dict, DataFrame): + data_dict = concat([DataFrame(new_data), data_dict], axis=1, copy=False) + else: + data_dict.update(new_data) + new_cols.extend(columns) + + if not keep_date_col: + for c in list(date_cols): + data_dict.pop(c) + new_cols.remove(c) + + return data_dict, new_cols + + +def _try_convert_dates( + parser: Callable, colspec, data_dict, columns, target_name: str | None = None +): + colset = set(columns) + colnames = [] + + for c in colspec: + if c in colset: + colnames.append(c) + elif isinstance(c, int) and c not in columns: + colnames.append(columns[c]) + else: + colnames.append(c) + + new_name: tuple | str + if all(isinstance(x, tuple) for x in colnames): + new_name = tuple(map("_".join, zip(*colnames))) + else: + new_name = "_".join([str(x) for x in colnames]) + to_parse = [np.asarray(data_dict[c]) for c in colnames if c in data_dict] + + new_col = parser(*to_parse, col=new_name if target_name is None else target_name) + return new_name, new_col, colnames + + +def _get_na_values(col, na_values, na_fvalues, keep_default_na: bool): + """ + Get the NaN values for a given column. + + Parameters + ---------- + col : str + The name of the column. + na_values : array-like, dict + The object listing the NaN values as strings. + na_fvalues : array-like, dict + The object listing the NaN values as floats. + keep_default_na : bool + If `na_values` is a dict, and the column is not mapped in the + dictionary, whether to return the default NaN values or the empty set. + + Returns + ------- + nan_tuple : A length-two tuple composed of + + 1) na_values : the string NaN values for that column. + 2) na_fvalues : the float NaN values for that column. + """ + if isinstance(na_values, dict): + if col in na_values: + return na_values[col], na_fvalues[col] + else: + if keep_default_na: + return STR_NA_VALUES, set() + + return set(), set() + else: + return na_values, na_fvalues + + +def _validate_parse_dates_arg(parse_dates): + """ + Check whether or not the 'parse_dates' parameter + is a non-boolean scalar. Raises a ValueError if + that is the case. + """ + msg = ( + "Only booleans, lists, and dictionaries are accepted " + "for the 'parse_dates' parameter" + ) + + if not ( + parse_dates is None + or lib.is_bool(parse_dates) + or isinstance(parse_dates, (list, dict)) + ): + raise TypeError(msg) + + return parse_dates + + +def is_index_col(col) -> bool: + return col is not None and col is not False diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py b/moondream/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py new file mode 100644 index 0000000000000000000000000000000000000000..0cd788c5e57399597e3fe4ee1b1bf2af4bffd74b --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/io/parsers/c_parser_wrapper.py @@ -0,0 +1,410 @@ +from __future__ import annotations + +from collections import defaultdict +from typing import TYPE_CHECKING +import warnings + +import numpy as np + +from pandas._libs import ( + lib, + parsers, +) +from pandas.compat._optional import import_optional_dependency +from pandas.errors import DtypeWarning +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.concat import ( + concat_compat, + union_categoricals, +) +from pandas.core.dtypes.dtypes import CategoricalDtype + +from pandas.core.indexes.api import ensure_index_from_sequences + +from pandas.io.common import ( + dedup_names, + is_potential_multi_index, +) +from pandas.io.parsers.base_parser import ( + ParserBase, + ParserError, + is_index_col, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Mapping, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + DtypeArg, + DtypeObj, + ReadCsvBuffer, + ) + + from pandas import ( + Index, + MultiIndex, + ) + + +class CParserWrapper(ParserBase): + low_memory: bool + _reader: parsers.TextReader + + def __init__(self, src: ReadCsvBuffer[str], **kwds) -> None: + super().__init__(kwds) + self.kwds = kwds + kwds = kwds.copy() + + self.low_memory = kwds.pop("low_memory", False) + + # #2442 + # error: Cannot determine type of 'index_col' + kwds["allow_leading_cols"] = ( + self.index_col is not False # type: ignore[has-type] + ) + + # GH20529, validate usecol arg before TextReader + kwds["usecols"] = self.usecols + + # Have to pass int, would break tests using TextReader directly otherwise :( + kwds["on_bad_lines"] = self.on_bad_lines.value + + for key in ( + "storage_options", + "encoding", + "memory_map", + "compression", + ): + kwds.pop(key, None) + + kwds["dtype"] = ensure_dtype_objs(kwds.get("dtype", None)) + if "dtype_backend" not in kwds or kwds["dtype_backend"] is lib.no_default: + kwds["dtype_backend"] = "numpy" + if kwds["dtype_backend"] == "pyarrow": + # Fail here loudly instead of in cython after reading + import_optional_dependency("pyarrow") + self._reader = parsers.TextReader(src, **kwds) + + self.unnamed_cols = self._reader.unnamed_cols + + # error: Cannot determine type of 'names' + passed_names = self.names is None # type: ignore[has-type] + + if self._reader.header is None: + self.names = None + else: + # error: Cannot determine type of 'names' + # error: Cannot determine type of 'index_names' + ( + self.names, # type: ignore[has-type] + self.index_names, + self.col_names, + passed_names, + ) = self._extract_multi_indexer_columns( + self._reader.header, + self.index_names, # type: ignore[has-type] + passed_names, + ) + + # error: Cannot determine type of 'names' + if self.names is None: # type: ignore[has-type] + self.names = list(range(self._reader.table_width)) + + # gh-9755 + # + # need to set orig_names here first + # so that proper indexing can be done + # with _set_noconvert_columns + # + # once names has been filtered, we will + # then set orig_names again to names + # error: Cannot determine type of 'names' + self.orig_names = self.names[:] # type: ignore[has-type] + + if self.usecols: + usecols = self._evaluate_usecols(self.usecols, self.orig_names) + + # GH 14671 + # assert for mypy, orig_names is List or None, None would error in issubset + assert self.orig_names is not None + if self.usecols_dtype == "string" and not set(usecols).issubset( + self.orig_names + ): + self._validate_usecols_names(usecols, self.orig_names) + + # error: Cannot determine type of 'names' + if len(self.names) > len(usecols): # type: ignore[has-type] + # error: Cannot determine type of 'names' + self.names = [ # type: ignore[has-type] + n + # error: Cannot determine type of 'names' + for i, n in enumerate(self.names) # type: ignore[has-type] + if (i in usecols or n in usecols) + ] + + # error: Cannot determine type of 'names' + if len(self.names) < len(usecols): # type: ignore[has-type] + # error: Cannot determine type of 'names' + self._validate_usecols_names( + usecols, + self.names, # type: ignore[has-type] + ) + + # error: Cannot determine type of 'names' + self._validate_parse_dates_presence(self.names) # type: ignore[has-type] + self._set_noconvert_columns() + + # error: Cannot determine type of 'names' + self.orig_names = self.names # type: ignore[has-type] + + if not self._has_complex_date_col: + # error: Cannot determine type of 'index_col' + if self._reader.leading_cols == 0 and is_index_col( + self.index_col # type: ignore[has-type] + ): + self._name_processed = True + ( + index_names, + # error: Cannot determine type of 'names' + self.names, # type: ignore[has-type] + self.index_col, + ) = self._clean_index_names( + # error: Cannot determine type of 'names' + self.names, # type: ignore[has-type] + # error: Cannot determine type of 'index_col' + self.index_col, # type: ignore[has-type] + ) + + if self.index_names is None: + self.index_names = index_names + + if self._reader.header is None and not passed_names: + assert self.index_names is not None + self.index_names = [None] * len(self.index_names) + + self._implicit_index = self._reader.leading_cols > 0 + + def close(self) -> None: + # close handles opened by C parser + try: + self._reader.close() + except ValueError: + pass + + def _set_noconvert_columns(self) -> None: + """ + Set the columns that should not undergo dtype conversions. + + Currently, any column that is involved with date parsing will not + undergo such conversions. + """ + assert self.orig_names is not None + # error: Cannot determine type of 'names' + + # much faster than using orig_names.index(x) xref GH#44106 + names_dict = {x: i for i, x in enumerate(self.orig_names)} + col_indices = [names_dict[x] for x in self.names] # type: ignore[has-type] + # error: Cannot determine type of 'names' + noconvert_columns = self._set_noconvert_dtype_columns( + col_indices, + self.names, # type: ignore[has-type] + ) + for col in noconvert_columns: + self._reader.set_noconvert(col) + + def read( + self, + nrows: int | None = None, + ) -> tuple[ + Index | MultiIndex | None, + Sequence[Hashable] | MultiIndex, + Mapping[Hashable, ArrayLike], + ]: + index: Index | MultiIndex | None + column_names: Sequence[Hashable] | MultiIndex + try: + if self.low_memory: + chunks = self._reader.read_low_memory(nrows) + # destructive to chunks + data = _concatenate_chunks(chunks) + + else: + data = self._reader.read(nrows) + except StopIteration: + if self._first_chunk: + self._first_chunk = False + names = dedup_names( + self.orig_names, + is_potential_multi_index(self.orig_names, self.index_col), + ) + index, columns, col_dict = self._get_empty_meta( + names, + dtype=self.dtype, + ) + columns = self._maybe_make_multi_index_columns(columns, self.col_names) + + if self.usecols is not None: + columns = self._filter_usecols(columns) + + col_dict = {k: v for k, v in col_dict.items() if k in columns} + + return index, columns, col_dict + + else: + self.close() + raise + + # Done with first read, next time raise StopIteration + self._first_chunk = False + + # error: Cannot determine type of 'names' + names = self.names # type: ignore[has-type] + + if self._reader.leading_cols: + if self._has_complex_date_col: + raise NotImplementedError("file structure not yet supported") + + # implicit index, no index names + arrays = [] + + if self.index_col and self._reader.leading_cols != len(self.index_col): + raise ParserError( + "Could not construct index. Requested to use " + f"{len(self.index_col)} number of columns, but " + f"{self._reader.leading_cols} left to parse." + ) + + for i in range(self._reader.leading_cols): + if self.index_col is None: + values = data.pop(i) + else: + values = data.pop(self.index_col[i]) + + values = self._maybe_parse_dates(values, i, try_parse_dates=True) + arrays.append(values) + + index = ensure_index_from_sequences(arrays) + + if self.usecols is not None: + names = self._filter_usecols(names) + + names = dedup_names(names, is_potential_multi_index(names, self.index_col)) + + # rename dict keys + data_tups = sorted(data.items()) + data = {k: v for k, (i, v) in zip(names, data_tups)} + + column_names, date_data = self._do_date_conversions(names, data) + + # maybe create a mi on the columns + column_names = self._maybe_make_multi_index_columns( + column_names, self.col_names + ) + + else: + # rename dict keys + data_tups = sorted(data.items()) + + # ugh, mutation + + # assert for mypy, orig_names is List or None, None would error in list(...) + assert self.orig_names is not None + names = list(self.orig_names) + names = dedup_names(names, is_potential_multi_index(names, self.index_col)) + + if self.usecols is not None: + names = self._filter_usecols(names) + + # columns as list + alldata = [x[1] for x in data_tups] + if self.usecols is None: + self._check_data_length(names, alldata) + + data = {k: v for k, (i, v) in zip(names, data_tups)} + + names, date_data = self._do_date_conversions(names, data) + index, column_names = self._make_index(date_data, alldata, names) + + return index, column_names, date_data + + def _filter_usecols(self, names: Sequence[Hashable]) -> Sequence[Hashable]: + # hackish + usecols = self._evaluate_usecols(self.usecols, names) + if usecols is not None and len(names) != len(usecols): + names = [ + name for i, name in enumerate(names) if i in usecols or name in usecols + ] + return names + + def _maybe_parse_dates(self, values, index: int, try_parse_dates: bool = True): + if try_parse_dates and self._should_parse_dates(index): + values = self._date_conv( + values, + col=self.index_names[index] if self.index_names is not None else None, + ) + return values + + +def _concatenate_chunks(chunks: list[dict[int, ArrayLike]]) -> dict: + """ + Concatenate chunks of data read with low_memory=True. + + The tricky part is handling Categoricals, where different chunks + may have different inferred categories. + """ + names = list(chunks[0].keys()) + warning_columns = [] + + result: dict = {} + for name in names: + arrs = [chunk.pop(name) for chunk in chunks] + # Check each arr for consistent types. + dtypes = {a.dtype for a in arrs} + non_cat_dtypes = {x for x in dtypes if not isinstance(x, CategoricalDtype)} + + dtype = dtypes.pop() + if isinstance(dtype, CategoricalDtype): + result[name] = union_categoricals(arrs, sort_categories=False) + else: + result[name] = concat_compat(arrs) + if len(non_cat_dtypes) > 1 and result[name].dtype == np.dtype(object): + warning_columns.append(str(name)) + + if warning_columns: + warning_names = ",".join(warning_columns) + warning_message = " ".join( + [ + f"Columns ({warning_names}) have mixed types. " + f"Specify dtype option on import or set low_memory=False." + ] + ) + warnings.warn(warning_message, DtypeWarning, stacklevel=find_stack_level()) + return result + + +def ensure_dtype_objs( + dtype: DtypeArg | dict[Hashable, DtypeArg] | None +) -> DtypeObj | dict[Hashable, DtypeObj] | None: + """ + Ensure we have either None, a dtype object, or a dictionary mapping to + dtype objects. + """ + if isinstance(dtype, defaultdict): + # "None" not callable [misc] + default_dtype = pandas_dtype(dtype.default_factory()) # type: ignore[misc] + dtype_converted: defaultdict = defaultdict(lambda: default_dtype) + for key in dtype.keys(): + dtype_converted[key] = pandas_dtype(dtype[key]) + return dtype_converted + elif isinstance(dtype, dict): + return {k: pandas_dtype(dtype[k]) for k in dtype} + elif dtype is not None: + return pandas_dtype(dtype) + return dtype diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py b/moondream/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..79e7554a5744cf439a65e9fd1e18782a0fa71548 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/io/parsers/python_parser.py @@ -0,0 +1,1387 @@ +from __future__ import annotations + +from collections import ( + abc, + defaultdict, +) +from collections.abc import ( + Hashable, + Iterator, + Mapping, + Sequence, +) +import csv +from io import StringIO +import re +from typing import ( + IO, + TYPE_CHECKING, + DefaultDict, + Literal, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas.errors import ( + EmptyDataError, + ParserError, + ParserWarning, +) +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.common import ( + is_bool_dtype, + is_integer, + is_numeric_dtype, +) +from pandas.core.dtypes.inference import is_dict_like + +from pandas.io.common import ( + dedup_names, + is_potential_multi_index, +) +from pandas.io.parsers.base_parser import ( + ParserBase, + parser_defaults, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + ReadCsvBuffer, + Scalar, + ) + + from pandas import ( + Index, + MultiIndex, + ) + +# BOM character (byte order mark) +# This exists at the beginning of a file to indicate endianness +# of a file (stream). Unfortunately, this marker screws up parsing, +# so we need to remove it if we see it. +_BOM = "\ufeff" + + +class PythonParser(ParserBase): + _no_thousands_columns: set[int] + + def __init__(self, f: ReadCsvBuffer[str] | list, **kwds) -> None: + """ + Workhorse function for processing nested list into DataFrame + """ + super().__init__(kwds) + + self.data: Iterator[str] | None = None + self.buf: list = [] + self.pos = 0 + self.line_pos = 0 + + self.skiprows = kwds["skiprows"] + + if callable(self.skiprows): + self.skipfunc = self.skiprows + else: + self.skipfunc = lambda x: x in self.skiprows + + self.skipfooter = _validate_skipfooter_arg(kwds["skipfooter"]) + self.delimiter = kwds["delimiter"] + + self.quotechar = kwds["quotechar"] + if isinstance(self.quotechar, str): + self.quotechar = str(self.quotechar) + + self.escapechar = kwds["escapechar"] + self.doublequote = kwds["doublequote"] + self.skipinitialspace = kwds["skipinitialspace"] + self.lineterminator = kwds["lineterminator"] + self.quoting = kwds["quoting"] + self.skip_blank_lines = kwds["skip_blank_lines"] + + self.has_index_names = False + if "has_index_names" in kwds: + self.has_index_names = kwds["has_index_names"] + + self.verbose = kwds["verbose"] + + self.thousands = kwds["thousands"] + self.decimal = kwds["decimal"] + + self.comment = kwds["comment"] + + # Set self.data to something that can read lines. + if isinstance(f, list): + # read_excel: f is a list + self.data = cast(Iterator[str], f) + else: + assert hasattr(f, "readline") + self.data = self._make_reader(f) + + # Get columns in two steps: infer from data, then + # infer column indices from self.usecols if it is specified. + self._col_indices: list[int] | None = None + columns: list[list[Scalar | None]] + ( + columns, + self.num_original_columns, + self.unnamed_cols, + ) = self._infer_columns() + + # Now self.columns has the set of columns that we will process. + # The original set is stored in self.original_columns. + # error: Cannot determine type of 'index_names' + ( + self.columns, + self.index_names, + self.col_names, + _, + ) = self._extract_multi_indexer_columns( + columns, + self.index_names, # type: ignore[has-type] + ) + + # get popped off for index + self.orig_names: list[Hashable] = list(self.columns) + + # needs to be cleaned/refactored + # multiple date column thing turning into a real spaghetti factory + + if not self._has_complex_date_col: + (index_names, self.orig_names, self.columns) = self._get_index_name() + self._name_processed = True + if self.index_names is None: + self.index_names = index_names + + if self._col_indices is None: + self._col_indices = list(range(len(self.columns))) + + self._parse_date_cols = self._validate_parse_dates_presence(self.columns) + self._no_thousands_columns = self._set_no_thousand_columns() + + if len(self.decimal) != 1: + raise ValueError("Only length-1 decimal markers supported") + + @cache_readonly + def num(self) -> re.Pattern: + decimal = re.escape(self.decimal) + if self.thousands is None: + regex = rf"^[\-\+]?[0-9]*({decimal}[0-9]*)?([0-9]?(E|e)\-?[0-9]+)?$" + else: + thousands = re.escape(self.thousands) + regex = ( + rf"^[\-\+]?([0-9]+{thousands}|[0-9])*({decimal}[0-9]*)?" + rf"([0-9]?(E|e)\-?[0-9]+)?$" + ) + return re.compile(regex) + + def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]): + sep = self.delimiter + + if sep is None or len(sep) == 1: + if self.lineterminator: + raise ValueError( + "Custom line terminators not supported in python parser (yet)" + ) + + class MyDialect(csv.Dialect): + delimiter = self.delimiter + quotechar = self.quotechar + escapechar = self.escapechar + doublequote = self.doublequote + skipinitialspace = self.skipinitialspace + quoting = self.quoting + lineterminator = "\n" + + dia = MyDialect + + if sep is not None: + dia.delimiter = sep + else: + # attempt to sniff the delimiter from the first valid line, + # i.e. no comment line and not in skiprows + line = f.readline() + lines = self._check_comments([[line]])[0] + while self.skipfunc(self.pos) or not lines: + self.pos += 1 + line = f.readline() + lines = self._check_comments([[line]])[0] + lines_str = cast(list[str], lines) + + # since `line` was a string, lines will be a list containing + # only a single string + line = lines_str[0] + + self.pos += 1 + self.line_pos += 1 + sniffed = csv.Sniffer().sniff(line) + dia.delimiter = sniffed.delimiter + + # Note: encoding is irrelevant here + line_rdr = csv.reader(StringIO(line), dialect=dia) + self.buf.extend(list(line_rdr)) + + # Note: encoding is irrelevant here + reader = csv.reader(f, dialect=dia, strict=True) + + else: + + def _read(): + line = f.readline() + pat = re.compile(sep) + + yield pat.split(line.strip()) + + for line in f: + yield pat.split(line.strip()) + + reader = _read() + + return reader + + def read( + self, rows: int | None = None + ) -> tuple[ + Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike] + ]: + try: + content = self._get_lines(rows) + except StopIteration: + if self._first_chunk: + content = [] + else: + self.close() + raise + + # done with first read, next time raise StopIteration + self._first_chunk = False + + columns: Sequence[Hashable] = list(self.orig_names) + if not len(content): # pragma: no cover + # DataFrame with the right metadata, even though it's length 0 + # error: Cannot determine type of 'index_col' + names = dedup_names( + self.orig_names, + is_potential_multi_index( + self.orig_names, + self.index_col, # type: ignore[has-type] + ), + ) + index, columns, col_dict = self._get_empty_meta( + names, + self.dtype, + ) + conv_columns = self._maybe_make_multi_index_columns(columns, self.col_names) + return index, conv_columns, col_dict + + # handle new style for names in index + count_empty_content_vals = count_empty_vals(content[0]) + indexnamerow = None + if self.has_index_names and count_empty_content_vals == len(columns): + indexnamerow = content[0] + content = content[1:] + + alldata = self._rows_to_cols(content) + data, columns = self._exclude_implicit_index(alldata) + + conv_data = self._convert_data(data) + columns, conv_data = self._do_date_conversions(columns, conv_data) + + index, result_columns = self._make_index( + conv_data, alldata, columns, indexnamerow + ) + + return index, result_columns, conv_data + + def _exclude_implicit_index( + self, + alldata: list[np.ndarray], + ) -> tuple[Mapping[Hashable, np.ndarray], Sequence[Hashable]]: + # error: Cannot determine type of 'index_col' + names = dedup_names( + self.orig_names, + is_potential_multi_index( + self.orig_names, + self.index_col, # type: ignore[has-type] + ), + ) + + offset = 0 + if self._implicit_index: + # error: Cannot determine type of 'index_col' + offset = len(self.index_col) # type: ignore[has-type] + + len_alldata = len(alldata) + self._check_data_length(names, alldata) + + return { + name: alldata[i + offset] for i, name in enumerate(names) if i < len_alldata + }, names + + # legacy + def get_chunk( + self, size: int | None = None + ) -> tuple[ + Index | None, Sequence[Hashable] | MultiIndex, Mapping[Hashable, ArrayLike] + ]: + if size is None: + # error: "PythonParser" has no attribute "chunksize" + size = self.chunksize # type: ignore[attr-defined] + return self.read(rows=size) + + def _convert_data( + self, + data: Mapping[Hashable, np.ndarray], + ) -> Mapping[Hashable, ArrayLike]: + # apply converters + clean_conv = self._clean_mapping(self.converters) + clean_dtypes = self._clean_mapping(self.dtype) + + # Apply NA values. + clean_na_values = {} + clean_na_fvalues = {} + + if isinstance(self.na_values, dict): + for col in self.na_values: + na_value = self.na_values[col] + na_fvalue = self.na_fvalues[col] + + if isinstance(col, int) and col not in self.orig_names: + col = self.orig_names[col] + + clean_na_values[col] = na_value + clean_na_fvalues[col] = na_fvalue + else: + clean_na_values = self.na_values + clean_na_fvalues = self.na_fvalues + + return self._convert_to_ndarrays( + data, + clean_na_values, + clean_na_fvalues, + self.verbose, + clean_conv, + clean_dtypes, + ) + + @cache_readonly + def _have_mi_columns(self) -> bool: + if self.header is None: + return False + + header = self.header + if isinstance(header, (list, tuple, np.ndarray)): + return len(header) > 1 + else: + return False + + def _infer_columns( + self, + ) -> tuple[list[list[Scalar | None]], int, set[Scalar | None]]: + names = self.names + num_original_columns = 0 + clear_buffer = True + unnamed_cols: set[Scalar | None] = set() + + if self.header is not None: + header = self.header + have_mi_columns = self._have_mi_columns + + if isinstance(header, (list, tuple, np.ndarray)): + # we have a mi columns, so read an extra line + if have_mi_columns: + header = list(header) + [header[-1] + 1] + else: + header = [header] + + columns: list[list[Scalar | None]] = [] + for level, hr in enumerate(header): + try: + line = self._buffered_line() + + while self.line_pos <= hr: + line = self._next_line() + + except StopIteration as err: + if 0 < self.line_pos <= hr and ( + not have_mi_columns or hr != header[-1] + ): + # If no rows we want to raise a different message and if + # we have mi columns, the last line is not part of the header + joi = list(map(str, header[:-1] if have_mi_columns else header)) + msg = f"[{','.join(joi)}], len of {len(joi)}, " + raise ValueError( + f"Passed header={msg}" + f"but only {self.line_pos} lines in file" + ) from err + + # We have an empty file, so check + # if columns are provided. That will + # serve as the 'line' for parsing + if have_mi_columns and hr > 0: + if clear_buffer: + self._clear_buffer() + columns.append([None] * len(columns[-1])) + return columns, num_original_columns, unnamed_cols + + if not self.names: + raise EmptyDataError("No columns to parse from file") from err + + line = self.names[:] + + this_columns: list[Scalar | None] = [] + this_unnamed_cols = [] + + for i, c in enumerate(line): + if c == "": + if have_mi_columns: + col_name = f"Unnamed: {i}_level_{level}" + else: + col_name = f"Unnamed: {i}" + + this_unnamed_cols.append(i) + this_columns.append(col_name) + else: + this_columns.append(c) + + if not have_mi_columns: + counts: DefaultDict = defaultdict(int) + # Ensure that regular columns are used before unnamed ones + # to keep given names and mangle unnamed columns + col_loop_order = [ + i + for i in range(len(this_columns)) + if i not in this_unnamed_cols + ] + this_unnamed_cols + + # TODO: Use pandas.io.common.dedup_names instead (see #50371) + for i in col_loop_order: + col = this_columns[i] + old_col = col + cur_count = counts[col] + + if cur_count > 0: + while cur_count > 0: + counts[old_col] = cur_count + 1 + col = f"{old_col}.{cur_count}" + if col in this_columns: + cur_count += 1 + else: + cur_count = counts[col] + + if ( + self.dtype is not None + and is_dict_like(self.dtype) + and self.dtype.get(old_col) is not None + and self.dtype.get(col) is None + ): + self.dtype.update({col: self.dtype.get(old_col)}) + this_columns[i] = col + counts[col] = cur_count + 1 + elif have_mi_columns: + # if we have grabbed an extra line, but its not in our + # format so save in the buffer, and create an blank extra + # line for the rest of the parsing code + if hr == header[-1]: + lc = len(this_columns) + # error: Cannot determine type of 'index_col' + sic = self.index_col # type: ignore[has-type] + ic = len(sic) if sic is not None else 0 + unnamed_count = len(this_unnamed_cols) + + # if wrong number of blanks or no index, not our format + if (lc != unnamed_count and lc - ic > unnamed_count) or ic == 0: + clear_buffer = False + this_columns = [None] * lc + self.buf = [self.buf[-1]] + + columns.append(this_columns) + unnamed_cols.update({this_columns[i] for i in this_unnamed_cols}) + + if len(columns) == 1: + num_original_columns = len(this_columns) + + if clear_buffer: + self._clear_buffer() + + first_line: list[Scalar] | None + if names is not None: + # Read first row after header to check if data are longer + try: + first_line = self._next_line() + except StopIteration: + first_line = None + + len_first_data_row = 0 if first_line is None else len(first_line) + + if len(names) > len(columns[0]) and len(names) > len_first_data_row: + raise ValueError( + "Number of passed names did not match " + "number of header fields in the file" + ) + if len(columns) > 1: + raise TypeError("Cannot pass names with multi-index columns") + + if self.usecols is not None: + # Set _use_cols. We don't store columns because they are + # overwritten. + self._handle_usecols(columns, names, num_original_columns) + else: + num_original_columns = len(names) + if self._col_indices is not None and len(names) != len( + self._col_indices + ): + columns = [[names[i] for i in sorted(self._col_indices)]] + else: + columns = [names] + else: + columns = self._handle_usecols( + columns, columns[0], num_original_columns + ) + else: + ncols = len(self._header_line) + num_original_columns = ncols + + if not names: + columns = [list(range(ncols))] + columns = self._handle_usecols(columns, columns[0], ncols) + elif self.usecols is None or len(names) >= ncols: + columns = self._handle_usecols([names], names, ncols) + num_original_columns = len(names) + elif not callable(self.usecols) and len(names) != len(self.usecols): + raise ValueError( + "Number of passed names did not match number of " + "header fields in the file" + ) + else: + # Ignore output but set used columns. + columns = [names] + self._handle_usecols(columns, columns[0], ncols) + + return columns, num_original_columns, unnamed_cols + + @cache_readonly + def _header_line(self): + # Store line for reuse in _get_index_name + if self.header is not None: + return None + + try: + line = self._buffered_line() + except StopIteration as err: + if not self.names: + raise EmptyDataError("No columns to parse from file") from err + + line = self.names[:] + return line + + def _handle_usecols( + self, + columns: list[list[Scalar | None]], + usecols_key: list[Scalar | None], + num_original_columns: int, + ) -> list[list[Scalar | None]]: + """ + Sets self._col_indices + + usecols_key is used if there are string usecols. + """ + col_indices: set[int] | list[int] + if self.usecols is not None: + if callable(self.usecols): + col_indices = self._evaluate_usecols(self.usecols, usecols_key) + elif any(isinstance(u, str) for u in self.usecols): + if len(columns) > 1: + raise ValueError( + "If using multiple headers, usecols must be integers." + ) + col_indices = [] + + for col in self.usecols: + if isinstance(col, str): + try: + col_indices.append(usecols_key.index(col)) + except ValueError: + self._validate_usecols_names(self.usecols, usecols_key) + else: + col_indices.append(col) + else: + missing_usecols = [ + col for col in self.usecols if col >= num_original_columns + ] + if missing_usecols: + raise ParserError( + "Defining usecols with out-of-bounds indices is not allowed. " + f"{missing_usecols} are out-of-bounds.", + ) + col_indices = self.usecols + + columns = [ + [n for i, n in enumerate(column) if i in col_indices] + for column in columns + ] + self._col_indices = sorted(col_indices) + return columns + + def _buffered_line(self) -> list[Scalar]: + """ + Return a line from buffer, filling buffer if required. + """ + if len(self.buf) > 0: + return self.buf[0] + else: + return self._next_line() + + def _check_for_bom(self, first_row: list[Scalar]) -> list[Scalar]: + """ + Checks whether the file begins with the BOM character. + If it does, remove it. In addition, if there is quoting + in the field subsequent to the BOM, remove it as well + because it technically takes place at the beginning of + the name, not the middle of it. + """ + # first_row will be a list, so we need to check + # that that list is not empty before proceeding. + if not first_row: + return first_row + + # The first element of this row is the one that could have the + # BOM that we want to remove. Check that the first element is a + # string before proceeding. + if not isinstance(first_row[0], str): + return first_row + + # Check that the string is not empty, as that would + # obviously not have a BOM at the start of it. + if not first_row[0]: + return first_row + + # Since the string is non-empty, check that it does + # in fact begin with a BOM. + first_elt = first_row[0][0] + if first_elt != _BOM: + return first_row + + first_row_bom = first_row[0] + new_row: str + + if len(first_row_bom) > 1 and first_row_bom[1] == self.quotechar: + start = 2 + quote = first_row_bom[1] + end = first_row_bom[2:].index(quote) + 2 + + # Extract the data between the quotation marks + new_row = first_row_bom[start:end] + + # Extract any remaining data after the second + # quotation mark. + if len(first_row_bom) > end + 1: + new_row += first_row_bom[end + 1 :] + + else: + # No quotation so just remove BOM from first element + new_row = first_row_bom[1:] + + new_row_list: list[Scalar] = [new_row] + return new_row_list + first_row[1:] + + def _is_line_empty(self, line: list[Scalar]) -> bool: + """ + Check if a line is empty or not. + + Parameters + ---------- + line : str, array-like + The line of data to check. + + Returns + ------- + boolean : Whether or not the line is empty. + """ + return not line or all(not x for x in line) + + def _next_line(self) -> list[Scalar]: + if isinstance(self.data, list): + while self.skipfunc(self.pos): + if self.pos >= len(self.data): + break + self.pos += 1 + + while True: + try: + line = self._check_comments([self.data[self.pos]])[0] + self.pos += 1 + # either uncommented or blank to begin with + if not self.skip_blank_lines and ( + self._is_line_empty(self.data[self.pos - 1]) or line + ): + break + if self.skip_blank_lines: + ret = self._remove_empty_lines([line]) + if ret: + line = ret[0] + break + except IndexError: + raise StopIteration + else: + while self.skipfunc(self.pos): + self.pos += 1 + # assert for mypy, data is Iterator[str] or None, would error in next + assert self.data is not None + next(self.data) + + while True: + orig_line = self._next_iter_line(row_num=self.pos + 1) + self.pos += 1 + + if orig_line is not None: + line = self._check_comments([orig_line])[0] + + if self.skip_blank_lines: + ret = self._remove_empty_lines([line]) + + if ret: + line = ret[0] + break + elif self._is_line_empty(orig_line) or line: + break + + # This was the first line of the file, + # which could contain the BOM at the + # beginning of it. + if self.pos == 1: + line = self._check_for_bom(line) + + self.line_pos += 1 + self.buf.append(line) + return line + + def _alert_malformed(self, msg: str, row_num: int) -> None: + """ + Alert a user about a malformed row, depending on value of + `self.on_bad_lines` enum. + + If `self.on_bad_lines` is ERROR, the alert will be `ParserError`. + If `self.on_bad_lines` is WARN, the alert will be printed out. + + Parameters + ---------- + msg: str + The error message to display. + row_num: int + The row number where the parsing error occurred. + Because this row number is displayed, we 1-index, + even though we 0-index internally. + """ + if self.on_bad_lines == self.BadLineHandleMethod.ERROR: + raise ParserError(msg) + if self.on_bad_lines == self.BadLineHandleMethod.WARN: + warnings.warn( + f"Skipping line {row_num}: {msg}\n", + ParserWarning, + stacklevel=find_stack_level(), + ) + + def _next_iter_line(self, row_num: int) -> list[Scalar] | None: + """ + Wrapper around iterating through `self.data` (CSV source). + + When a CSV error is raised, we check for specific + error messages that allow us to customize the + error message displayed to the user. + + Parameters + ---------- + row_num: int + The row number of the line being parsed. + """ + try: + # assert for mypy, data is Iterator[str] or None, would error in next + assert self.data is not None + line = next(self.data) + # for mypy + assert isinstance(line, list) + return line + except csv.Error as e: + if self.on_bad_lines in ( + self.BadLineHandleMethod.ERROR, + self.BadLineHandleMethod.WARN, + ): + msg = str(e) + + if "NULL byte" in msg or "line contains NUL" in msg: + msg = ( + "NULL byte detected. This byte " + "cannot be processed in Python's " + "native csv library at the moment, " + "so please pass in engine='c' instead" + ) + + if self.skipfooter > 0: + reason = ( + "Error could possibly be due to " + "parsing errors in the skipped footer rows " + "(the skipfooter keyword is only applied " + "after Python's csv library has parsed " + "all rows)." + ) + msg += ". " + reason + + self._alert_malformed(msg, row_num) + return None + + def _check_comments(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + if self.comment is None: + return lines + ret = [] + for line in lines: + rl = [] + for x in line: + if ( + not isinstance(x, str) + or self.comment not in x + or x in self.na_values + ): + rl.append(x) + else: + x = x[: x.find(self.comment)] + if len(x) > 0: + rl.append(x) + break + ret.append(rl) + return ret + + def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + """ + Iterate through the lines and remove any that are + either empty or contain only one whitespace value + + Parameters + ---------- + lines : list of list of Scalars + The array of lines that we are to filter. + + Returns + ------- + filtered_lines : list of list of Scalars + The same array of lines with the "empty" ones removed. + """ + # Remove empty lines and lines with only one whitespace value + ret = [ + line + for line in lines + if ( + len(line) > 1 + or len(line) == 1 + and (not isinstance(line[0], str) or line[0].strip()) + ) + ] + return ret + + def _check_thousands(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + if self.thousands is None: + return lines + + return self._search_replace_num_columns( + lines=lines, search=self.thousands, replace="" + ) + + def _search_replace_num_columns( + self, lines: list[list[Scalar]], search: str, replace: str + ) -> list[list[Scalar]]: + ret = [] + for line in lines: + rl = [] + for i, x in enumerate(line): + if ( + not isinstance(x, str) + or search not in x + or i in self._no_thousands_columns + or not self.num.search(x.strip()) + ): + rl.append(x) + else: + rl.append(x.replace(search, replace)) + ret.append(rl) + return ret + + def _check_decimal(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + if self.decimal == parser_defaults["decimal"]: + return lines + + return self._search_replace_num_columns( + lines=lines, search=self.decimal, replace="." + ) + + def _clear_buffer(self) -> None: + self.buf = [] + + def _get_index_name( + self, + ) -> tuple[Sequence[Hashable] | None, list[Hashable], list[Hashable]]: + """ + Try several cases to get lines: + + 0) There are headers on row 0 and row 1 and their + total summed lengths equals the length of the next line. + Treat row 0 as columns and row 1 as indices + 1) Look for implicit index: there are more columns + on row 1 than row 0. If this is true, assume that row + 1 lists index columns and row 0 lists normal columns. + 2) Get index from the columns if it was listed. + """ + columns: Sequence[Hashable] = self.orig_names + orig_names = list(columns) + columns = list(columns) + + line: list[Scalar] | None + if self._header_line is not None: + line = self._header_line + else: + try: + line = self._next_line() + except StopIteration: + line = None + + next_line: list[Scalar] | None + try: + next_line = self._next_line() + except StopIteration: + next_line = None + + # implicitly index_col=0 b/c 1 fewer column names + implicit_first_cols = 0 + if line is not None: + # leave it 0, #2442 + # Case 1 + # error: Cannot determine type of 'index_col' + index_col = self.index_col # type: ignore[has-type] + if index_col is not False: + implicit_first_cols = len(line) - self.num_original_columns + + # Case 0 + if ( + next_line is not None + and self.header is not None + and index_col is not False + ): + if len(next_line) == len(line) + self.num_original_columns: + # column and index names on diff rows + self.index_col = list(range(len(line))) + self.buf = self.buf[1:] + + for c in reversed(line): + columns.insert(0, c) + + # Update list of original names to include all indices. + orig_names = list(columns) + self.num_original_columns = len(columns) + return line, orig_names, columns + + if implicit_first_cols > 0: + # Case 1 + self._implicit_index = True + if self.index_col is None: + self.index_col = list(range(implicit_first_cols)) + + index_name = None + + else: + # Case 2 + (index_name, _, self.index_col) = self._clean_index_names( + columns, self.index_col + ) + + return index_name, orig_names, columns + + def _rows_to_cols(self, content: list[list[Scalar]]) -> list[np.ndarray]: + col_len = self.num_original_columns + + if self._implicit_index: + col_len += len(self.index_col) + + max_len = max(len(row) for row in content) + + # Check that there are no rows with too many + # elements in their row (rows with too few + # elements are padded with NaN). + # error: Non-overlapping identity check (left operand type: "List[int]", + # right operand type: "Literal[False]") + if ( + max_len > col_len + and self.index_col is not False # type: ignore[comparison-overlap] + and self.usecols is None + ): + footers = self.skipfooter if self.skipfooter else 0 + bad_lines = [] + + iter_content = enumerate(content) + content_len = len(content) + content = [] + + for i, _content in iter_content: + actual_len = len(_content) + + if actual_len > col_len: + if callable(self.on_bad_lines): + new_l = self.on_bad_lines(_content) + if new_l is not None: + content.append(new_l) + elif self.on_bad_lines in ( + self.BadLineHandleMethod.ERROR, + self.BadLineHandleMethod.WARN, + ): + row_num = self.pos - (content_len - i + footers) + bad_lines.append((row_num, actual_len)) + + if self.on_bad_lines == self.BadLineHandleMethod.ERROR: + break + else: + content.append(_content) + + for row_num, actual_len in bad_lines: + msg = ( + f"Expected {col_len} fields in line {row_num + 1}, saw " + f"{actual_len}" + ) + if ( + self.delimiter + and len(self.delimiter) > 1 + and self.quoting != csv.QUOTE_NONE + ): + # see gh-13374 + reason = ( + "Error could possibly be due to quotes being " + "ignored when a multi-char delimiter is used." + ) + msg += ". " + reason + + self._alert_malformed(msg, row_num + 1) + + # see gh-13320 + zipped_content = list(lib.to_object_array(content, min_width=col_len).T) + + if self.usecols: + assert self._col_indices is not None + col_indices = self._col_indices + + if self._implicit_index: + zipped_content = [ + a + for i, a in enumerate(zipped_content) + if ( + i < len(self.index_col) + or i - len(self.index_col) in col_indices + ) + ] + else: + zipped_content = [ + a for i, a in enumerate(zipped_content) if i in col_indices + ] + return zipped_content + + def _get_lines(self, rows: int | None = None) -> list[list[Scalar]]: + lines = self.buf + new_rows = None + + # already fetched some number + if rows is not None: + # we already have the lines in the buffer + if len(self.buf) >= rows: + new_rows, self.buf = self.buf[:rows], self.buf[rows:] + + # need some lines + else: + rows -= len(self.buf) + + if new_rows is None: + if isinstance(self.data, list): + if self.pos > len(self.data): + raise StopIteration + if rows is None: + new_rows = self.data[self.pos :] + new_pos = len(self.data) + else: + new_rows = self.data[self.pos : self.pos + rows] + new_pos = self.pos + rows + + new_rows = self._remove_skipped_rows(new_rows) + lines.extend(new_rows) + self.pos = new_pos + + else: + new_rows = [] + try: + if rows is not None: + row_index = 0 + row_ct = 0 + offset = self.pos if self.pos is not None else 0 + while row_ct < rows: + # assert for mypy, data is Iterator[str] or None, would + # error in next + assert self.data is not None + new_row = next(self.data) + if not self.skipfunc(offset + row_index): + row_ct += 1 + row_index += 1 + new_rows.append(new_row) + + len_new_rows = len(new_rows) + new_rows = self._remove_skipped_rows(new_rows) + lines.extend(new_rows) + else: + rows = 0 + + while True: + next_row = self._next_iter_line(row_num=self.pos + rows + 1) + rows += 1 + + if next_row is not None: + new_rows.append(next_row) + len_new_rows = len(new_rows) + + except StopIteration: + len_new_rows = len(new_rows) + new_rows = self._remove_skipped_rows(new_rows) + lines.extend(new_rows) + if len(lines) == 0: + raise + self.pos += len_new_rows + + self.buf = [] + else: + lines = new_rows + + if self.skipfooter: + lines = lines[: -self.skipfooter] + + lines = self._check_comments(lines) + if self.skip_blank_lines: + lines = self._remove_empty_lines(lines) + lines = self._check_thousands(lines) + return self._check_decimal(lines) + + def _remove_skipped_rows(self, new_rows: list[list[Scalar]]) -> list[list[Scalar]]: + if self.skiprows: + return [ + row for i, row in enumerate(new_rows) if not self.skipfunc(i + self.pos) + ] + return new_rows + + def _set_no_thousand_columns(self) -> set[int]: + no_thousands_columns: set[int] = set() + if self.columns and self.parse_dates: + assert self._col_indices is not None + no_thousands_columns = self._set_noconvert_dtype_columns( + self._col_indices, self.columns + ) + if self.columns and self.dtype: + assert self._col_indices is not None + for i, col in zip(self._col_indices, self.columns): + if not isinstance(self.dtype, dict) and not is_numeric_dtype( + self.dtype + ): + no_thousands_columns.add(i) + if ( + isinstance(self.dtype, dict) + and col in self.dtype + and ( + not is_numeric_dtype(self.dtype[col]) + or is_bool_dtype(self.dtype[col]) + ) + ): + no_thousands_columns.add(i) + return no_thousands_columns + + +class FixedWidthReader(abc.Iterator): + """ + A reader of fixed-width lines. + """ + + def __init__( + self, + f: IO[str] | ReadCsvBuffer[str], + colspecs: list[tuple[int, int]] | Literal["infer"], + delimiter: str | None, + comment: str | None, + skiprows: set[int] | None = None, + infer_nrows: int = 100, + ) -> None: + self.f = f + self.buffer: Iterator | None = None + self.delimiter = "\r\n" + delimiter if delimiter else "\n\r\t " + self.comment = comment + if colspecs == "infer": + self.colspecs = self.detect_colspecs( + infer_nrows=infer_nrows, skiprows=skiprows + ) + else: + self.colspecs = colspecs + + if not isinstance(self.colspecs, (tuple, list)): + raise TypeError( + "column specifications must be a list or tuple, " + f"input was a {type(colspecs).__name__}" + ) + + for colspec in self.colspecs: + if not ( + isinstance(colspec, (tuple, list)) + and len(colspec) == 2 + and isinstance(colspec[0], (int, np.integer, type(None))) + and isinstance(colspec[1], (int, np.integer, type(None))) + ): + raise TypeError( + "Each column specification must be " + "2 element tuple or list of integers" + ) + + def get_rows(self, infer_nrows: int, skiprows: set[int] | None = None) -> list[str]: + """ + Read rows from self.f, skipping as specified. + + We distinguish buffer_rows (the first <= infer_nrows + lines) from the rows returned to detect_colspecs + because it's simpler to leave the other locations + with skiprows logic alone than to modify them to + deal with the fact we skipped some rows here as + well. + + Parameters + ---------- + infer_nrows : int + Number of rows to read from self.f, not counting + rows that are skipped. + skiprows: set, optional + Indices of rows to skip. + + Returns + ------- + detect_rows : list of str + A list containing the rows to read. + + """ + if skiprows is None: + skiprows = set() + buffer_rows = [] + detect_rows = [] + for i, row in enumerate(self.f): + if i not in skiprows: + detect_rows.append(row) + buffer_rows.append(row) + if len(detect_rows) >= infer_nrows: + break + self.buffer = iter(buffer_rows) + return detect_rows + + def detect_colspecs( + self, infer_nrows: int = 100, skiprows: set[int] | None = None + ) -> list[tuple[int, int]]: + # Regex escape the delimiters + delimiters = "".join([rf"\{x}" for x in self.delimiter]) + pattern = re.compile(f"([^{delimiters}]+)") + rows = self.get_rows(infer_nrows, skiprows) + if not rows: + raise EmptyDataError("No rows from which to infer column width") + max_len = max(map(len, rows)) + mask = np.zeros(max_len + 1, dtype=int) + if self.comment is not None: + rows = [row.partition(self.comment)[0] for row in rows] + for row in rows: + for m in pattern.finditer(row): + mask[m.start() : m.end()] = 1 + shifted = np.roll(mask, 1) + shifted[0] = 0 + edges = np.where((mask ^ shifted) == 1)[0] + edge_pairs = list(zip(edges[::2], edges[1::2])) + return edge_pairs + + def __next__(self) -> list[str]: + # Argument 1 to "next" has incompatible type "Union[IO[str], + # ReadCsvBuffer[str]]"; expected "SupportsNext[str]" + if self.buffer is not None: + try: + line = next(self.buffer) + except StopIteration: + self.buffer = None + line = next(self.f) # type: ignore[arg-type] + else: + line = next(self.f) # type: ignore[arg-type] + # Note: 'colspecs' is a sequence of half-open intervals. + return [line[from_:to].strip(self.delimiter) for (from_, to) in self.colspecs] + + +class FixedWidthFieldParser(PythonParser): + """ + Specialization that Converts fixed-width fields into DataFrames. + See PythonParser for details. + """ + + def __init__(self, f: ReadCsvBuffer[str], **kwds) -> None: + # Support iterators, convert to a list. + self.colspecs = kwds.pop("colspecs") + self.infer_nrows = kwds.pop("infer_nrows") + PythonParser.__init__(self, f, **kwds) + + def _make_reader(self, f: IO[str] | ReadCsvBuffer[str]) -> FixedWidthReader: + return FixedWidthReader( + f, + self.colspecs, + self.delimiter, + self.comment, + self.skiprows, + self.infer_nrows, + ) + + def _remove_empty_lines(self, lines: list[list[Scalar]]) -> list[list[Scalar]]: + """ + Returns the list of lines without the empty ones. With fixed-width + fields, empty lines become arrays of empty strings. + + See PythonParser._remove_empty_lines. + """ + return [ + line + for line in lines + if any(not isinstance(e, str) or e.strip() for e in line) + ] + + +def count_empty_vals(vals) -> int: + return sum(1 for v in vals if v == "" or v is None) + + +def _validate_skipfooter_arg(skipfooter: int) -> int: + """ + Validate the 'skipfooter' parameter. + + Checks whether 'skipfooter' is a non-negative integer. + Raises a ValueError if that is not the case. + + Parameters + ---------- + skipfooter : non-negative integer + The number of rows to skip at the end of the file. + + Returns + ------- + validated_skipfooter : non-negative integer + The original input if the validation succeeds. + + Raises + ------ + ValueError : 'skipfooter' was not a non-negative integer. + """ + if not is_integer(skipfooter): + raise ValueError("skipfooter must be an integer") + + if skipfooter < 0: + raise ValueError("skipfooter cannot be negative") + + # Incompatible return value type (got "Union[int, integer[Any]]", expected "int") + return skipfooter # type: ignore[return-value] diff --git a/moondream/lib/python3.10/site-packages/pandas/io/parsers/readers.py b/moondream/lib/python3.10/site-packages/pandas/io/parsers/readers.py new file mode 100644 index 0000000000000000000000000000000000000000..e04f27b56061030d19081d87439f0461fa53cc76 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/io/parsers/readers.py @@ -0,0 +1,2383 @@ +""" +Module contains tools for processing files into DataFrames or other objects + +GH#48849 provides a convenient way of deprecating keyword arguments +""" +from __future__ import annotations + +from collections import ( + abc, + defaultdict, +) +import csv +import sys +from textwrap import fill +from typing import ( + IO, + TYPE_CHECKING, + Any, + Callable, + Literal, + NamedTuple, + TypedDict, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_copy_on_write + +from pandas._libs import lib +from pandas._libs.parsers import STR_NA_VALUES +from pandas.errors import ( + AbstractMethodError, + ParserWarning, +) +from pandas.util._decorators import Appender +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import check_dtype_backend + +from pandas.core.dtypes.common import ( + is_file_like, + is_float, + is_hashable, + is_integer, + is_list_like, + pandas_dtype, +) + +from pandas import Series +from pandas.core.frame import DataFrame +from pandas.core.indexes.api import RangeIndex +from pandas.core.shared_docs import _shared_docs + +from pandas.io.common import ( + IOHandles, + get_handle, + stringify_path, + validate_header_arg, +) +from pandas.io.parsers.arrow_parser_wrapper import ArrowParserWrapper +from pandas.io.parsers.base_parser import ( + ParserBase, + is_index_col, + parser_defaults, +) +from pandas.io.parsers.c_parser_wrapper import CParserWrapper +from pandas.io.parsers.python_parser import ( + FixedWidthFieldParser, + PythonParser, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Iterable, + Mapping, + Sequence, + ) + from types import TracebackType + + from pandas._typing import ( + CompressionOptions, + CSVEngine, + DtypeArg, + DtypeBackend, + FilePath, + IndexLabel, + ReadCsvBuffer, + Self, + StorageOptions, + UsecolsArgType, + ) +_doc_read_csv_and_table = ( + r""" +{summary} + +Also supports optionally iterating or breaking of the file +into chunks. + +Additional help can be found in the online docs for +`IO Tools `_. + +Parameters +---------- +filepath_or_buffer : str, path object or file-like object + Any valid string path is acceptable. The string could be a URL. Valid + URL schemes include http, ftp, s3, gs, and file. For file URLs, a host is + expected. A local file could be: file://localhost/path/to/table.csv. + + If you want to pass in a path object, pandas accepts any ``os.PathLike``. + + By file-like object, we refer to objects with a ``read()`` method, such as + a file handle (e.g. via builtin ``open`` function) or ``StringIO``. +sep : str, default {_default_sep} + Character or regex pattern to treat as the delimiter. If ``sep=None``, the + C engine cannot automatically detect + the separator, but the Python parsing engine can, meaning the latter will + be used and automatically detect the separator from only the first valid + row of the file by Python's builtin sniffer tool, ``csv.Sniffer``. + In addition, separators longer than 1 character and different from + ``'\s+'`` will be interpreted as regular expressions and will also force + the use of the Python parsing engine. Note that regex delimiters are prone + to ignoring quoted data. Regex example: ``'\r\t'``. +delimiter : str, optional + Alias for ``sep``. +header : int, Sequence of int, 'infer' or None, default 'infer' + Row number(s) containing column labels and marking the start of the + data (zero-indexed). Default behavior is to infer the column names: if no ``names`` + are passed the behavior is identical to ``header=0`` and column + names are inferred from the first line of the file, if column + names are passed explicitly to ``names`` then the behavior is identical to + ``header=None``. Explicitly pass ``header=0`` to be able to + replace existing names. The header can be a list of integers that + specify row locations for a :class:`~pandas.MultiIndex` on the columns + e.g. ``[0, 1, 3]``. Intervening rows that are not specified will be + skipped (e.g. 2 in this example is skipped). Note that this + parameter ignores commented lines and empty lines if + ``skip_blank_lines=True``, so ``header=0`` denotes the first line of + data rather than the first line of the file. +names : Sequence of Hashable, optional + Sequence of column labels to apply. If the file contains a header row, + then you should explicitly pass ``header=0`` to override the column names. + Duplicates in this list are not allowed. +index_col : Hashable, Sequence of Hashable or False, optional + Column(s) to use as row label(s), denoted either by column labels or column + indices. If a sequence of labels or indices is given, :class:`~pandas.MultiIndex` + will be formed for the row labels. + + Note: ``index_col=False`` can be used to force pandas to *not* use the first + column as the index, e.g., when you have a malformed file with delimiters at + the end of each line. +usecols : Sequence of Hashable or Callable, optional + Subset of columns to select, denoted either by column labels or column indices. + If list-like, all elements must either + be positional (i.e. integer indices into the document columns) or strings + that correspond to column names provided either by the user in ``names`` or + inferred from the document header row(s). If ``names`` are given, the document + header row(s) are not taken into account. For example, a valid list-like + ``usecols`` parameter would be ``[0, 1, 2]`` or ``['foo', 'bar', 'baz']``. + Element order is ignored, so ``usecols=[0, 1]`` is the same as ``[1, 0]``. + To instantiate a :class:`~pandas.DataFrame` from ``data`` with element order + preserved use ``pd.read_csv(data, usecols=['foo', 'bar'])[['foo', 'bar']]`` + for columns in ``['foo', 'bar']`` order or + ``pd.read_csv(data, usecols=['foo', 'bar'])[['bar', 'foo']]`` + for ``['bar', 'foo']`` order. + + If callable, the callable function will be evaluated against the column + names, returning names where the callable function evaluates to ``True``. An + example of a valid callable argument would be ``lambda x: x.upper() in + ['AAA', 'BBB', 'DDD']``. Using this parameter results in much faster + parsing time and lower memory usage. +dtype : dtype or dict of {{Hashable : dtype}}, optional + Data type(s) to apply to either the whole dataset or individual columns. + E.g., ``{{'a': np.float64, 'b': np.int32, 'c': 'Int64'}}`` + Use ``str`` or ``object`` together with suitable ``na_values`` settings + to preserve and not interpret ``dtype``. + If ``converters`` are specified, they will be applied INSTEAD + of ``dtype`` conversion. + + .. versionadded:: 1.5.0 + + Support for ``defaultdict`` was added. Specify a ``defaultdict`` as input where + the default determines the ``dtype`` of the columns which are not explicitly + listed. +engine : {{'c', 'python', 'pyarrow'}}, optional + Parser engine to use. The C and pyarrow engines are faster, while the python engine + is currently more feature-complete. Multithreading is currently only supported by + the pyarrow engine. + + .. versionadded:: 1.4.0 + + The 'pyarrow' engine was added as an *experimental* engine, and some features + are unsupported, or may not work correctly, with this engine. +converters : dict of {{Hashable : Callable}}, optional + Functions for converting values in specified columns. Keys can either + be column labels or column indices. +true_values : list, optional + Values to consider as ``True`` in addition to case-insensitive variants of 'True'. +false_values : list, optional + Values to consider as ``False`` in addition to case-insensitive variants of 'False'. +skipinitialspace : bool, default False + Skip spaces after delimiter. +skiprows : int, list of int or Callable, optional + Line numbers to skip (0-indexed) or number of lines to skip (``int``) + at the start of the file. + + If callable, the callable function will be evaluated against the row + indices, returning ``True`` if the row should be skipped and ``False`` otherwise. + An example of a valid callable argument would be ``lambda x: x in [0, 2]``. +skipfooter : int, default 0 + Number of lines at bottom of file to skip (Unsupported with ``engine='c'``). +nrows : int, optional + Number of rows of file to read. Useful for reading pieces of large files. +na_values : Hashable, Iterable of Hashable or dict of {{Hashable : Iterable}}, optional + Additional strings to recognize as ``NA``/``NaN``. If ``dict`` passed, specific + per-column ``NA`` values. By default the following values are interpreted as + ``NaN``: " """ + + fill('", "'.join(sorted(STR_NA_VALUES)), 70, subsequent_indent=" ") + + """ ". + +keep_default_na : bool, default True + Whether or not to include the default ``NaN`` values when parsing the data. + Depending on whether ``na_values`` is passed in, the behavior is as follows: + + * If ``keep_default_na`` is ``True``, and ``na_values`` are specified, ``na_values`` + is appended to the default ``NaN`` values used for parsing. + * If ``keep_default_na`` is ``True``, and ``na_values`` are not specified, only + the default ``NaN`` values are used for parsing. + * If ``keep_default_na`` is ``False``, and ``na_values`` are specified, only + the ``NaN`` values specified ``na_values`` are used for parsing. + * If ``keep_default_na`` is ``False``, and ``na_values`` are not specified, no + strings will be parsed as ``NaN``. + + Note that if ``na_filter`` is passed in as ``False``, the ``keep_default_na`` and + ``na_values`` parameters will be ignored. +na_filter : bool, default True + Detect missing value markers (empty strings and the value of ``na_values``). In + data without any ``NA`` values, passing ``na_filter=False`` can improve the + performance of reading a large file. +verbose : bool, default False + Indicate number of ``NA`` values placed in non-numeric columns. + + .. deprecated:: 2.2.0 +skip_blank_lines : bool, default True + If ``True``, skip over blank lines rather than interpreting as ``NaN`` values. +parse_dates : bool, list of Hashable, list of lists or dict of {{Hashable : list}}, \ +default False + The behavior is as follows: + + * ``bool``. If ``True`` -> try parsing the index. Note: Automatically set to + ``True`` if ``date_format`` or ``date_parser`` arguments have been passed. + * ``list`` of ``int`` or names. e.g. If ``[1, 2, 3]`` -> try parsing columns 1, 2, 3 + each as a separate date column. + * ``list`` of ``list``. e.g. If ``[[1, 3]]`` -> combine columns 1 and 3 and parse + as a single date column. Values are joined with a space before parsing. + * ``dict``, e.g. ``{{'foo' : [1, 3]}}`` -> parse columns 1, 3 as date and call + result 'foo'. Values are joined with a space before parsing. + + If a column or index cannot be represented as an array of ``datetime``, + say because of an unparsable value or a mixture of timezones, the column + or index will be returned unaltered as an ``object`` data type. For + non-standard ``datetime`` parsing, use :func:`~pandas.to_datetime` after + :func:`~pandas.read_csv`. + + Note: A fast-path exists for iso8601-formatted dates. +infer_datetime_format : bool, default False + If ``True`` and ``parse_dates`` is enabled, pandas will attempt to infer the + format of the ``datetime`` strings in the columns, and if it can be inferred, + switch to a faster method of parsing them. In some cases this can increase + the parsing speed by 5-10x. + + .. deprecated:: 2.0.0 + A strict version of this argument is now the default, passing it has no effect. + +keep_date_col : bool, default False + If ``True`` and ``parse_dates`` specifies combining multiple columns then + keep the original columns. +date_parser : Callable, optional + Function to use for converting a sequence of string columns to an array of + ``datetime`` instances. The default uses ``dateutil.parser.parser`` to do the + conversion. pandas will try to call ``date_parser`` in three different ways, + advancing to the next if an exception occurs: 1) Pass one or more arrays + (as defined by ``parse_dates``) as arguments; 2) concatenate (row-wise) the + string values from the columns defined by ``parse_dates`` into a single array + and pass that; and 3) call ``date_parser`` once for each row using one or + more strings (corresponding to the columns defined by ``parse_dates``) as + arguments. + + .. deprecated:: 2.0.0 + Use ``date_format`` instead, or read in as ``object`` and then apply + :func:`~pandas.to_datetime` as-needed. +date_format : str or dict of column -> format, optional + Format to use for parsing dates when used in conjunction with ``parse_dates``. + The strftime to parse time, e.g. :const:`"%d/%m/%Y"`. See + `strftime documentation + `_ for more information on choices, though + note that :const:`"%f"` will parse all the way up to nanoseconds. + You can also pass: + + - "ISO8601", to parse any `ISO8601 `_ + time string (not necessarily in exactly the same format); + - "mixed", to infer the format for each element individually. This is risky, + and you should probably use it along with `dayfirst`. + + .. versionadded:: 2.0.0 +dayfirst : bool, default False + DD/MM format dates, international and European format. +cache_dates : bool, default True + If ``True``, use a cache of unique, converted dates to apply the ``datetime`` + conversion. May produce significant speed-up when parsing duplicate + date strings, especially ones with timezone offsets. + +iterator : bool, default False + Return ``TextFileReader`` object for iteration or getting chunks with + ``get_chunk()``. +chunksize : int, optional + Number of lines to read from the file per chunk. Passing a value will cause the + function to return a ``TextFileReader`` object for iteration. + See the `IO Tools docs + `_ + for more information on ``iterator`` and ``chunksize``. + +{decompression_options} + + .. versionchanged:: 1.4.0 Zstandard support. + +thousands : str (length 1), optional + Character acting as the thousands separator in numerical values. +decimal : str (length 1), default '.' + Character to recognize as decimal point (e.g., use ',' for European data). +lineterminator : str (length 1), optional + Character used to denote a line break. Only valid with C parser. +quotechar : str (length 1), optional + Character used to denote the start and end of a quoted item. Quoted + items can include the ``delimiter`` and it will be ignored. +quoting : {{0 or csv.QUOTE_MINIMAL, 1 or csv.QUOTE_ALL, 2 or csv.QUOTE_NONNUMERIC, \ +3 or csv.QUOTE_NONE}}, default csv.QUOTE_MINIMAL + Control field quoting behavior per ``csv.QUOTE_*`` constants. Default is + ``csv.QUOTE_MINIMAL`` (i.e., 0) which implies that only fields containing special + characters are quoted (e.g., characters defined in ``quotechar``, ``delimiter``, + or ``lineterminator``. +doublequote : bool, default True + When ``quotechar`` is specified and ``quoting`` is not ``QUOTE_NONE``, indicate + whether or not to interpret two consecutive ``quotechar`` elements INSIDE a + field as a single ``quotechar`` element. +escapechar : str (length 1), optional + Character used to escape other characters. +comment : str (length 1), optional + Character indicating that the remainder of line should not be parsed. + If found at the beginning + of a line, the line will be ignored altogether. This parameter must be a + single character. Like empty lines (as long as ``skip_blank_lines=True``), + fully commented lines are ignored by the parameter ``header`` but not by + ``skiprows``. For example, if ``comment='#'``, parsing + ``#empty\\na,b,c\\n1,2,3`` with ``header=0`` will result in ``'a,b,c'`` being + treated as the header. +encoding : str, optional, default 'utf-8' + Encoding to use for UTF when reading/writing (ex. ``'utf-8'``). `List of Python + standard encodings + `_ . + +encoding_errors : str, optional, default 'strict' + How encoding errors are treated. `List of possible values + `_ . + + .. versionadded:: 1.3.0 + +dialect : str or csv.Dialect, optional + If provided, this parameter will override values (default or not) for the + following parameters: ``delimiter``, ``doublequote``, ``escapechar``, + ``skipinitialspace``, ``quotechar``, and ``quoting``. If it is necessary to + override values, a ``ParserWarning`` will be issued. See ``csv.Dialect`` + documentation for more details. +on_bad_lines : {{'error', 'warn', 'skip'}} or Callable, default 'error' + Specifies what to do upon encountering a bad line (a line with too many fields). + Allowed values are : + + - ``'error'``, raise an Exception when a bad line is encountered. + - ``'warn'``, raise a warning when a bad line is encountered and skip that line. + - ``'skip'``, skip bad lines without raising or warning when they are encountered. + + .. versionadded:: 1.3.0 + + .. versionadded:: 1.4.0 + + - Callable, function with signature + ``(bad_line: list[str]) -> list[str] | None`` that will process a single + bad line. ``bad_line`` is a list of strings split by the ``sep``. + If the function returns ``None``, the bad line will be ignored. + If the function returns a new ``list`` of strings with more elements than + expected, a ``ParserWarning`` will be emitted while dropping extra elements. + Only supported when ``engine='python'`` + + .. versionchanged:: 2.2.0 + + - Callable, function with signature + as described in `pyarrow documentation + `_ when ``engine='pyarrow'`` + +delim_whitespace : bool, default False + Specifies whether or not whitespace (e.g. ``' '`` or ``'\\t'``) will be + used as the ``sep`` delimiter. Equivalent to setting ``sep='\\s+'``. If this option + is set to ``True``, nothing should be passed in for the ``delimiter`` + parameter. + + .. deprecated:: 2.2.0 + Use ``sep="\\s+"`` instead. +low_memory : bool, default True + Internally process the file in chunks, resulting in lower memory use + while parsing, but possibly mixed type inference. To ensure no mixed + types either set ``False``, or specify the type with the ``dtype`` parameter. + Note that the entire file is read into a single :class:`~pandas.DataFrame` + regardless, use the ``chunksize`` or ``iterator`` parameter to return the data in + chunks. (Only valid with C parser). +memory_map : bool, default False + If a filepath is provided for ``filepath_or_buffer``, map the file object + directly onto memory and access the data directly from there. Using this + option can improve performance because there is no longer any I/O overhead. +float_precision : {{'high', 'legacy', 'round_trip'}}, optional + Specifies which converter the C engine should use for floating-point + values. The options are ``None`` or ``'high'`` for the ordinary converter, + ``'legacy'`` for the original lower precision pandas converter, and + ``'round_trip'`` for the round-trip converter. + +{storage_options} + +dtype_backend : {{'numpy_nullable', 'pyarrow'}}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + +Returns +------- +DataFrame or TextFileReader + A comma-separated values (csv) file is returned as two-dimensional + data structure with labeled axes. + +See Also +-------- +DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. +{see_also_func_name} : {see_also_func_summary} +read_fwf : Read a table of fixed-width formatted lines into DataFrame. + +Examples +-------- +>>> pd.{func_name}('data.csv') # doctest: +SKIP +""" +) + + +class _C_Parser_Defaults(TypedDict): + delim_whitespace: Literal[False] + na_filter: Literal[True] + low_memory: Literal[True] + memory_map: Literal[False] + float_precision: None + + +_c_parser_defaults: _C_Parser_Defaults = { + "delim_whitespace": False, + "na_filter": True, + "low_memory": True, + "memory_map": False, + "float_precision": None, +} + + +class _Fwf_Defaults(TypedDict): + colspecs: Literal["infer"] + infer_nrows: Literal[100] + widths: None + + +_fwf_defaults: _Fwf_Defaults = {"colspecs": "infer", "infer_nrows": 100, "widths": None} +_c_unsupported = {"skipfooter"} +_python_unsupported = {"low_memory", "float_precision"} +_pyarrow_unsupported = { + "skipfooter", + "float_precision", + "chunksize", + "comment", + "nrows", + "thousands", + "memory_map", + "dialect", + "delim_whitespace", + "quoting", + "lineterminator", + "converters", + "iterator", + "dayfirst", + "verbose", + "skipinitialspace", + "low_memory", +} + + +class _DeprecationConfig(NamedTuple): + default_value: Any + msg: str | None + + +@overload +def validate_integer(name: str, val: None, min_val: int = ...) -> None: + ... + + +@overload +def validate_integer(name: str, val: float, min_val: int = ...) -> int: + ... + + +@overload +def validate_integer(name: str, val: int | None, min_val: int = ...) -> int | None: + ... + + +def validate_integer( + name: str, val: int | float | None, min_val: int = 0 +) -> int | None: + """ + Checks whether the 'name' parameter for parsing is either + an integer OR float that can SAFELY be cast to an integer + without losing accuracy. Raises a ValueError if that is + not the case. + + Parameters + ---------- + name : str + Parameter name (used for error reporting) + val : int or float + The value to check + min_val : int + Minimum allowed value (val < min_val will result in a ValueError) + """ + if val is None: + return val + + msg = f"'{name:s}' must be an integer >={min_val:d}" + if is_float(val): + if int(val) != val: + raise ValueError(msg) + val = int(val) + elif not (is_integer(val) and val >= min_val): + raise ValueError(msg) + + return int(val) + + +def _validate_names(names: Sequence[Hashable] | None) -> None: + """ + Raise ValueError if the `names` parameter contains duplicates or has an + invalid data type. + + Parameters + ---------- + names : array-like or None + An array containing a list of the names used for the output DataFrame. + + Raises + ------ + ValueError + If names are not unique or are not ordered (e.g. set). + """ + if names is not None: + if len(names) != len(set(names)): + raise ValueError("Duplicate names are not allowed.") + if not ( + is_list_like(names, allow_sets=False) or isinstance(names, abc.KeysView) + ): + raise ValueError("Names should be an ordered collection.") + + +def _read( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], kwds +) -> DataFrame | TextFileReader: + """Generic reader of line files.""" + # if we pass a date_parser and parse_dates=False, we should not parse the + # dates GH#44366 + if kwds.get("parse_dates", None) is None: + if ( + kwds.get("date_parser", lib.no_default) is lib.no_default + and kwds.get("date_format", None) is None + ): + kwds["parse_dates"] = False + else: + kwds["parse_dates"] = True + + # Extract some of the arguments (pass chunksize on). + iterator = kwds.get("iterator", False) + chunksize = kwds.get("chunksize", None) + if kwds.get("engine") == "pyarrow": + if iterator: + raise ValueError( + "The 'iterator' option is not supported with the 'pyarrow' engine" + ) + + if chunksize is not None: + raise ValueError( + "The 'chunksize' option is not supported with the 'pyarrow' engine" + ) + else: + chunksize = validate_integer("chunksize", chunksize, 1) + + nrows = kwds.get("nrows", None) + + # Check for duplicates in names. + _validate_names(kwds.get("names", None)) + + # Create the parser. + parser = TextFileReader(filepath_or_buffer, **kwds) + + if chunksize or iterator: + return parser + + with parser: + return parser.read(nrows) + + +# iterator=True -> TextFileReader +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Hashable + | Iterable[Hashable] + | Mapping[Hashable, Iterable[Hashable]] + | None = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[True], + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool | lib.NoDefault = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# chunksize=int -> TextFileReader +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Hashable + | Iterable[Hashable] + | Mapping[Hashable, Iterable[Hashable]] + | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int, + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool | lib.NoDefault = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# default case -> DataFrame +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Hashable + | Iterable[Hashable] + | Mapping[Hashable, Iterable[Hashable]] + | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[False] = ..., + chunksize: None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool | lib.NoDefault = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +# Unions -> DataFrame | TextFileReader +@overload +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Hashable + | Iterable[Hashable] + | Mapping[Hashable, Iterable[Hashable]] + | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] | None = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool | lib.NoDefault = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: Literal["high", "legacy"] | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame | TextFileReader: + ... + + +@Appender( + _doc_read_csv_and_table.format( + func_name="read_csv", + summary="Read a comma-separated values (csv) file into DataFrame.", + see_also_func_name="read_table", + see_also_func_summary="Read general delimited file into DataFrame.", + _default_sep="','", + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] + % "filepath_or_buffer", + ) +) +def read_csv( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = lib.no_default, + delimiter: str | None | lib.NoDefault = None, + # Column and Index Locations and Names + header: int | Sequence[int] | None | Literal["infer"] = "infer", + names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, + index_col: IndexLabel | Literal[False] | None = None, + usecols: UsecolsArgType = None, + # General Parsing Configuration + dtype: DtypeArg | None = None, + engine: CSVEngine | None = None, + converters: Mapping[Hashable, Callable] | None = None, + true_values: list | None = None, + false_values: list | None = None, + skipinitialspace: bool = False, + skiprows: list[int] | int | Callable[[Hashable], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + # NA and Missing Data Handling + na_values: Hashable + | Iterable[Hashable] + | Mapping[Hashable, Iterable[Hashable]] + | None = None, + keep_default_na: bool = True, + na_filter: bool = True, + verbose: bool | lib.NoDefault = lib.no_default, + skip_blank_lines: bool = True, + # Datetime Handling + parse_dates: bool | Sequence[Hashable] | None = None, + infer_datetime_format: bool | lib.NoDefault = lib.no_default, + keep_date_col: bool | lib.NoDefault = lib.no_default, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: str | dict[Hashable, str] | None = None, + dayfirst: bool = False, + cache_dates: bool = True, + # Iteration + iterator: bool = False, + chunksize: int | None = None, + # Quoting, Compression, and File Format + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: int = csv.QUOTE_MINIMAL, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, + # Error Handling + on_bad_lines: str = "error", + # Internal + delim_whitespace: bool | lib.NoDefault = lib.no_default, + low_memory: bool = _c_parser_defaults["low_memory"], + memory_map: bool = False, + float_precision: Literal["high", "legacy"] | None = None, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame | TextFileReader: + if keep_date_col is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'keep_date_col' keyword in pd.read_csv is deprecated and " + "will be removed in a future version. Explicitly remove unwanted " + "columns after parsing instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + keep_date_col = False + + if lib.is_list_like(parse_dates): + # GH#55569 + depr = False + # error: Item "bool" of "bool | Sequence[Hashable] | None" has no + # attribute "__iter__" (not iterable) + if not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr] + depr = True + elif isinstance(parse_dates, dict) and any( + lib.is_list_like(x) for x in parse_dates.values() + ): + depr = True + if depr: + warnings.warn( + "Support for nested sequences for 'parse_dates' in pd.read_csv " + "is deprecated. Combine the desired columns with pd.to_datetime " + "after parsing instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if infer_datetime_format is not lib.no_default: + warnings.warn( + "The argument 'infer_datetime_format' is deprecated and will " + "be removed in a future version. " + "A strict version of it is now the default, see " + "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. " + "You can safely remove this argument.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if delim_whitespace is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'delim_whitespace' keyword in pd.read_csv is deprecated and " + "will be removed in a future version. Use ``sep='\\s+'`` instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + delim_whitespace = False + + if verbose is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'verbose' keyword in pd.read_csv is deprecated and " + "will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + verbose = False + + # locals() should never be modified + kwds = locals().copy() + del kwds["filepath_or_buffer"] + del kwds["sep"] + + kwds_defaults = _refine_defaults_read( + dialect, + delimiter, + delim_whitespace, + engine, + sep, + on_bad_lines, + names, + defaults={"delimiter": ","}, + dtype_backend=dtype_backend, + ) + kwds.update(kwds_defaults) + + return _read(filepath_or_buffer, kwds) + + +# iterator=True -> TextFileReader +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[True], + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# chunksize=int -> TextFileReader +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int, + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> TextFileReader: + ... + + +# default -> DataFrame +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: Literal[False] = ..., + chunksize: None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame: + ... + + +# Unions -> DataFrame | TextFileReader +@overload +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = ..., + delimiter: str | None | lib.NoDefault = ..., + header: int | Sequence[int] | None | Literal["infer"] = ..., + names: Sequence[Hashable] | None | lib.NoDefault = ..., + index_col: IndexLabel | Literal[False] | None = ..., + usecols: UsecolsArgType = ..., + dtype: DtypeArg | None = ..., + engine: CSVEngine | None = ..., + converters: Mapping[Hashable, Callable] | None = ..., + true_values: list | None = ..., + false_values: list | None = ..., + skipinitialspace: bool = ..., + skiprows: list[int] | int | Callable[[Hashable], bool] | None = ..., + skipfooter: int = ..., + nrows: int | None = ..., + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = ..., + keep_default_na: bool = ..., + na_filter: bool = ..., + verbose: bool | lib.NoDefault = ..., + skip_blank_lines: bool = ..., + parse_dates: bool | Sequence[Hashable] = ..., + infer_datetime_format: bool | lib.NoDefault = ..., + keep_date_col: bool | lib.NoDefault = ..., + date_parser: Callable | lib.NoDefault = ..., + date_format: str | dict[Hashable, str] | None = ..., + dayfirst: bool = ..., + cache_dates: bool = ..., + iterator: bool = ..., + chunksize: int | None = ..., + compression: CompressionOptions = ..., + thousands: str | None = ..., + decimal: str = ..., + lineterminator: str | None = ..., + quotechar: str = ..., + quoting: int = ..., + doublequote: bool = ..., + escapechar: str | None = ..., + comment: str | None = ..., + encoding: str | None = ..., + encoding_errors: str | None = ..., + dialect: str | csv.Dialect | None = ..., + on_bad_lines=..., + delim_whitespace: bool = ..., + low_memory: bool = ..., + memory_map: bool = ..., + float_precision: str | None = ..., + storage_options: StorageOptions = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., +) -> DataFrame | TextFileReader: + ... + + +@Appender( + _doc_read_csv_and_table.format( + func_name="read_table", + summary="Read general delimited file into DataFrame.", + see_also_func_name="read_csv", + see_also_func_summary=( + "Read a comma-separated values (csv) file into DataFrame." + ), + _default_sep=r"'\\t' (tab-stop)", + storage_options=_shared_docs["storage_options"], + decompression_options=_shared_docs["decompression_options"] + % "filepath_or_buffer", + ) +) +def read_table( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + sep: str | None | lib.NoDefault = lib.no_default, + delimiter: str | None | lib.NoDefault = None, + # Column and Index Locations and Names + header: int | Sequence[int] | None | Literal["infer"] = "infer", + names: Sequence[Hashable] | None | lib.NoDefault = lib.no_default, + index_col: IndexLabel | Literal[False] | None = None, + usecols: UsecolsArgType = None, + # General Parsing Configuration + dtype: DtypeArg | None = None, + engine: CSVEngine | None = None, + converters: Mapping[Hashable, Callable] | None = None, + true_values: list | None = None, + false_values: list | None = None, + skipinitialspace: bool = False, + skiprows: list[int] | int | Callable[[Hashable], bool] | None = None, + skipfooter: int = 0, + nrows: int | None = None, + # NA and Missing Data Handling + na_values: Sequence[str] | Mapping[str, Sequence[str]] | None = None, + keep_default_na: bool = True, + na_filter: bool = True, + verbose: bool | lib.NoDefault = lib.no_default, + skip_blank_lines: bool = True, + # Datetime Handling + parse_dates: bool | Sequence[Hashable] = False, + infer_datetime_format: bool | lib.NoDefault = lib.no_default, + keep_date_col: bool | lib.NoDefault = lib.no_default, + date_parser: Callable | lib.NoDefault = lib.no_default, + date_format: str | dict[Hashable, str] | None = None, + dayfirst: bool = False, + cache_dates: bool = True, + # Iteration + iterator: bool = False, + chunksize: int | None = None, + # Quoting, Compression, and File Format + compression: CompressionOptions = "infer", + thousands: str | None = None, + decimal: str = ".", + lineterminator: str | None = None, + quotechar: str = '"', + quoting: int = csv.QUOTE_MINIMAL, + doublequote: bool = True, + escapechar: str | None = None, + comment: str | None = None, + encoding: str | None = None, + encoding_errors: str | None = "strict", + dialect: str | csv.Dialect | None = None, + # Error Handling + on_bad_lines: str = "error", + # Internal + delim_whitespace: bool | lib.NoDefault = lib.no_default, + low_memory: bool = _c_parser_defaults["low_memory"], + memory_map: bool = False, + float_precision: str | None = None, + storage_options: StorageOptions | None = None, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, +) -> DataFrame | TextFileReader: + if keep_date_col is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'keep_date_col' keyword in pd.read_table is deprecated and " + "will be removed in a future version. Explicitly remove unwanted " + "columns after parsing instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + keep_date_col = False + + # error: Item "bool" of "bool | Sequence[Hashable]" has no attribute "__iter__" + if lib.is_list_like(parse_dates) and not all(is_hashable(x) for x in parse_dates): # type: ignore[union-attr] + # GH#55569 + warnings.warn( + "Support for nested sequences for 'parse_dates' in pd.read_table " + "is deprecated. Combine the desired columns with pd.to_datetime " + "after parsing instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if infer_datetime_format is not lib.no_default: + warnings.warn( + "The argument 'infer_datetime_format' is deprecated and will " + "be removed in a future version. " + "A strict version of it is now the default, see " + "https://pandas.pydata.org/pdeps/0004-consistent-to-datetime-parsing.html. " + "You can safely remove this argument.", + FutureWarning, + stacklevel=find_stack_level(), + ) + + if delim_whitespace is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'delim_whitespace' keyword in pd.read_table is deprecated and " + "will be removed in a future version. Use ``sep='\\s+'`` instead", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + delim_whitespace = False + + if verbose is not lib.no_default: + # GH#55569 + warnings.warn( + "The 'verbose' keyword in pd.read_table is deprecated and " + "will be removed in a future version.", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + verbose = False + + # locals() should never be modified + kwds = locals().copy() + del kwds["filepath_or_buffer"] + del kwds["sep"] + + kwds_defaults = _refine_defaults_read( + dialect, + delimiter, + delim_whitespace, + engine, + sep, + on_bad_lines, + names, + defaults={"delimiter": "\t"}, + dtype_backend=dtype_backend, + ) + kwds.update(kwds_defaults) + + return _read(filepath_or_buffer, kwds) + + +@overload +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = ..., + widths: Sequence[int] | None = ..., + infer_nrows: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + iterator: Literal[True], + chunksize: int | None = ..., + **kwds, +) -> TextFileReader: + ... + + +@overload +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = ..., + widths: Sequence[int] | None = ..., + infer_nrows: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + iterator: bool = ..., + chunksize: int, + **kwds, +) -> TextFileReader: + ... + + +@overload +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = ..., + widths: Sequence[int] | None = ..., + infer_nrows: int = ..., + dtype_backend: DtypeBackend | lib.NoDefault = ..., + iterator: Literal[False] = ..., + chunksize: None = ..., + **kwds, +) -> DataFrame: + ... + + +def read_fwf( + filepath_or_buffer: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str], + *, + colspecs: Sequence[tuple[int, int]] | str | None = "infer", + widths: Sequence[int] | None = None, + infer_nrows: int = 100, + dtype_backend: DtypeBackend | lib.NoDefault = lib.no_default, + iterator: bool = False, + chunksize: int | None = None, + **kwds, +) -> DataFrame | TextFileReader: + r""" + Read a table of fixed-width formatted lines into DataFrame. + + Also supports optionally iterating or breaking of the file + into chunks. + + Additional help can be found in the `online docs for IO Tools + `_. + + Parameters + ---------- + filepath_or_buffer : str, path object, or file-like object + String, path object (implementing ``os.PathLike[str]``), or file-like + object implementing a text ``read()`` function.The string could be a URL. + Valid URL schemes include http, ftp, s3, and file. For file URLs, a host is + expected. A local file could be: + ``file://localhost/path/to/table.csv``. + colspecs : list of tuple (int, int) or 'infer'. optional + A list of tuples giving the extents of the fixed-width + fields of each line as half-open intervals (i.e., [from, to[ ). + String value 'infer' can be used to instruct the parser to try + detecting the column specifications from the first 100 rows of + the data which are not being skipped via skiprows (default='infer'). + widths : list of int, optional + A list of field widths which can be used instead of 'colspecs' if + the intervals are contiguous. + infer_nrows : int, default 100 + The number of rows to consider when letting the parser determine the + `colspecs`. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + **kwds : optional + Optional keyword arguments can be passed to ``TextFileReader``. + + Returns + ------- + DataFrame or TextFileReader + A comma-separated values (csv) file is returned as two-dimensional + data structure with labeled axes. + + See Also + -------- + DataFrame.to_csv : Write DataFrame to a comma-separated values (csv) file. + read_csv : Read a comma-separated values (csv) file into DataFrame. + + Examples + -------- + >>> pd.read_fwf('data.csv') # doctest: +SKIP + """ + # Check input arguments. + if colspecs is None and widths is None: + raise ValueError("Must specify either colspecs or widths") + if colspecs not in (None, "infer") and widths is not None: + raise ValueError("You must specify only one of 'widths' and 'colspecs'") + + # Compute 'colspecs' from 'widths', if specified. + if widths is not None: + colspecs, col = [], 0 + for w in widths: + colspecs.append((col, col + w)) + col += w + + # for mypy + assert colspecs is not None + + # GH#40830 + # Ensure length of `colspecs` matches length of `names` + names = kwds.get("names") + if names is not None: + if len(names) != len(colspecs) and colspecs != "infer": + # need to check len(index_col) as it might contain + # unnamed indices, in which case it's name is not required + len_index = 0 + if kwds.get("index_col") is not None: + index_col: Any = kwds.get("index_col") + if index_col is not False: + if not is_list_like(index_col): + len_index = 1 + else: + len_index = len(index_col) + if kwds.get("usecols") is None and len(names) + len_index != len(colspecs): + # If usecols is used colspec may be longer than names + raise ValueError("Length of colspecs must match length of names") + + kwds["colspecs"] = colspecs + kwds["infer_nrows"] = infer_nrows + kwds["engine"] = "python-fwf" + kwds["iterator"] = iterator + kwds["chunksize"] = chunksize + + check_dtype_backend(dtype_backend) + kwds["dtype_backend"] = dtype_backend + return _read(filepath_or_buffer, kwds) + + +class TextFileReader(abc.Iterator): + """ + + Passed dialect overrides any of the related parser options + + """ + + def __init__( + self, + f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list, + engine: CSVEngine | None = None, + **kwds, + ) -> None: + if engine is not None: + engine_specified = True + else: + engine = "python" + engine_specified = False + self.engine = engine + self._engine_specified = kwds.get("engine_specified", engine_specified) + + _validate_skipfooter(kwds) + + dialect = _extract_dialect(kwds) + if dialect is not None: + if engine == "pyarrow": + raise ValueError( + "The 'dialect' option is not supported with the 'pyarrow' engine" + ) + kwds = _merge_with_dialect_properties(dialect, kwds) + + if kwds.get("header", "infer") == "infer": + kwds["header"] = 0 if kwds.get("names") is None else None + + self.orig_options = kwds + + # miscellanea + self._currow = 0 + + options = self._get_options_with_defaults(engine) + options["storage_options"] = kwds.get("storage_options", None) + + self.chunksize = options.pop("chunksize", None) + self.nrows = options.pop("nrows", None) + + self._check_file_or_buffer(f, engine) + self.options, self.engine = self._clean_options(options, engine) + + if "has_index_names" in kwds: + self.options["has_index_names"] = kwds["has_index_names"] + + self.handles: IOHandles | None = None + self._engine = self._make_engine(f, self.engine) + + def close(self) -> None: + if self.handles is not None: + self.handles.close() + self._engine.close() + + def _get_options_with_defaults(self, engine: CSVEngine) -> dict[str, Any]: + kwds = self.orig_options + + options = {} + default: object | None + + for argname, default in parser_defaults.items(): + value = kwds.get(argname, default) + + # see gh-12935 + if ( + engine == "pyarrow" + and argname in _pyarrow_unsupported + and value != default + and value != getattr(value, "value", default) + ): + raise ValueError( + f"The {repr(argname)} option is not supported with the " + f"'pyarrow' engine" + ) + options[argname] = value + + for argname, default in _c_parser_defaults.items(): + if argname in kwds: + value = kwds[argname] + + if engine != "c" and value != default: + # TODO: Refactor this logic, its pretty convoluted + if "python" in engine and argname not in _python_unsupported: + pass + elif "pyarrow" in engine and argname not in _pyarrow_unsupported: + pass + else: + raise ValueError( + f"The {repr(argname)} option is not supported with the " + f"{repr(engine)} engine" + ) + else: + value = default + options[argname] = value + + if engine == "python-fwf": + for argname, default in _fwf_defaults.items(): + options[argname] = kwds.get(argname, default) + + return options + + def _check_file_or_buffer(self, f, engine: CSVEngine) -> None: + # see gh-16530 + if is_file_like(f) and engine != "c" and not hasattr(f, "__iter__"): + # The C engine doesn't need the file-like to have the "__iter__" + # attribute. However, the Python engine needs "__iter__(...)" + # when iterating through such an object, meaning it + # needs to have that attribute + raise ValueError( + "The 'python' engine cannot iterate through this file buffer." + ) + + def _clean_options( + self, options: dict[str, Any], engine: CSVEngine + ) -> tuple[dict[str, Any], CSVEngine]: + result = options.copy() + + fallback_reason = None + + # C engine not supported yet + if engine == "c": + if options["skipfooter"] > 0: + fallback_reason = "the 'c' engine does not support skipfooter" + engine = "python" + + sep = options["delimiter"] + delim_whitespace = options["delim_whitespace"] + + if sep is None and not delim_whitespace: + if engine in ("c", "pyarrow"): + fallback_reason = ( + f"the '{engine}' engine does not support " + "sep=None with delim_whitespace=False" + ) + engine = "python" + elif sep is not None and len(sep) > 1: + if engine == "c" and sep == r"\s+": + result["delim_whitespace"] = True + del result["delimiter"] + elif engine not in ("python", "python-fwf"): + # wait until regex engine integrated + fallback_reason = ( + f"the '{engine}' engine does not support " + "regex separators (separators > 1 char and " + r"different from '\s+' are interpreted as regex)" + ) + engine = "python" + elif delim_whitespace: + if "python" in engine: + result["delimiter"] = r"\s+" + elif sep is not None: + encodeable = True + encoding = sys.getfilesystemencoding() or "utf-8" + try: + if len(sep.encode(encoding)) > 1: + encodeable = False + except UnicodeDecodeError: + encodeable = False + if not encodeable and engine not in ("python", "python-fwf"): + fallback_reason = ( + f"the separator encoded in {encoding} " + f"is > 1 char long, and the '{engine}' engine " + "does not support such separators" + ) + engine = "python" + + quotechar = options["quotechar"] + if quotechar is not None and isinstance(quotechar, (str, bytes)): + if ( + len(quotechar) == 1 + and ord(quotechar) > 127 + and engine not in ("python", "python-fwf") + ): + fallback_reason = ( + "ord(quotechar) > 127, meaning the " + "quotechar is larger than one byte, " + f"and the '{engine}' engine does not support such quotechars" + ) + engine = "python" + + if fallback_reason and self._engine_specified: + raise ValueError(fallback_reason) + + if engine == "c": + for arg in _c_unsupported: + del result[arg] + + if "python" in engine: + for arg in _python_unsupported: + if fallback_reason and result[arg] != _c_parser_defaults.get(arg): + raise ValueError( + "Falling back to the 'python' engine because " + f"{fallback_reason}, but this causes {repr(arg)} to be " + "ignored as it is not supported by the 'python' engine." + ) + del result[arg] + + if fallback_reason: + warnings.warn( + ( + "Falling back to the 'python' engine because " + f"{fallback_reason}; you can avoid this warning by specifying " + "engine='python'." + ), + ParserWarning, + stacklevel=find_stack_level(), + ) + + index_col = options["index_col"] + names = options["names"] + converters = options["converters"] + na_values = options["na_values"] + skiprows = options["skiprows"] + + validate_header_arg(options["header"]) + + if index_col is True: + raise ValueError("The value of index_col couldn't be 'True'") + if is_index_col(index_col): + if not isinstance(index_col, (list, tuple, np.ndarray)): + index_col = [index_col] + result["index_col"] = index_col + + names = list(names) if names is not None else names + + # type conversion-related + if converters is not None: + if not isinstance(converters, dict): + raise TypeError( + "Type converters must be a dict or subclass, " + f"input was a {type(converters).__name__}" + ) + else: + converters = {} + + # Converting values to NA + keep_default_na = options["keep_default_na"] + floatify = engine != "pyarrow" + na_values, na_fvalues = _clean_na_values( + na_values, keep_default_na, floatify=floatify + ) + + # handle skiprows; this is internally handled by the + # c-engine, so only need for python and pyarrow parsers + if engine == "pyarrow": + if not is_integer(skiprows) and skiprows is not None: + # pyarrow expects skiprows to be passed as an integer + raise ValueError( + "skiprows argument must be an integer when using " + "engine='pyarrow'" + ) + else: + if is_integer(skiprows): + skiprows = list(range(skiprows)) + if skiprows is None: + skiprows = set() + elif not callable(skiprows): + skiprows = set(skiprows) + + # put stuff back + result["names"] = names + result["converters"] = converters + result["na_values"] = na_values + result["na_fvalues"] = na_fvalues + result["skiprows"] = skiprows + + return result, engine + + def __next__(self) -> DataFrame: + try: + return self.get_chunk() + except StopIteration: + self.close() + raise + + def _make_engine( + self, + f: FilePath | ReadCsvBuffer[bytes] | ReadCsvBuffer[str] | list | IO, + engine: CSVEngine = "c", + ) -> ParserBase: + mapping: dict[str, type[ParserBase]] = { + "c": CParserWrapper, + "python": PythonParser, + "pyarrow": ArrowParserWrapper, + "python-fwf": FixedWidthFieldParser, + } + if engine not in mapping: + raise ValueError( + f"Unknown engine: {engine} (valid options are {mapping.keys()})" + ) + if not isinstance(f, list): + # open file here + is_text = True + mode = "r" + if engine == "pyarrow": + is_text = False + mode = "rb" + elif ( + engine == "c" + and self.options.get("encoding", "utf-8") == "utf-8" + and isinstance(stringify_path(f), str) + ): + # c engine can decode utf-8 bytes, adding TextIOWrapper makes + # the c-engine especially for memory_map=True far slower + is_text = False + if "b" not in mode: + mode += "b" + self.handles = get_handle( + f, + mode, + encoding=self.options.get("encoding", None), + compression=self.options.get("compression", None), + memory_map=self.options.get("memory_map", False), + is_text=is_text, + errors=self.options.get("encoding_errors", "strict"), + storage_options=self.options.get("storage_options", None), + ) + assert self.handles is not None + f = self.handles.handle + + elif engine != "python": + msg = f"Invalid file path or buffer object type: {type(f)}" + raise ValueError(msg) + + try: + return mapping[engine](f, **self.options) + except Exception: + if self.handles is not None: + self.handles.close() + raise + + def _failover_to_python(self) -> None: + raise AbstractMethodError(self) + + def read(self, nrows: int | None = None) -> DataFrame: + if self.engine == "pyarrow": + try: + # error: "ParserBase" has no attribute "read" + df = self._engine.read() # type: ignore[attr-defined] + except Exception: + self.close() + raise + else: + nrows = validate_integer("nrows", nrows) + try: + # error: "ParserBase" has no attribute "read" + ( + index, + columns, + col_dict, + ) = self._engine.read( # type: ignore[attr-defined] + nrows + ) + except Exception: + self.close() + raise + + if index is None: + if col_dict: + # Any column is actually fine: + new_rows = len(next(iter(col_dict.values()))) + index = RangeIndex(self._currow, self._currow + new_rows) + else: + new_rows = 0 + else: + new_rows = len(index) + + if hasattr(self, "orig_options"): + dtype_arg = self.orig_options.get("dtype", None) + else: + dtype_arg = None + + if isinstance(dtype_arg, dict): + dtype = defaultdict(lambda: None) # type: ignore[var-annotated] + dtype.update(dtype_arg) + elif dtype_arg is not None and pandas_dtype(dtype_arg) in ( + np.str_, + np.object_, + ): + dtype = defaultdict(lambda: dtype_arg) + else: + dtype = None + + if dtype is not None: + new_col_dict = {} + for k, v in col_dict.items(): + d = ( + dtype[k] + if pandas_dtype(dtype[k]) in (np.str_, np.object_) + else None + ) + new_col_dict[k] = Series(v, index=index, dtype=d, copy=False) + else: + new_col_dict = col_dict + + df = DataFrame( + new_col_dict, + columns=columns, + index=index, + copy=not using_copy_on_write(), + ) + + self._currow += new_rows + return df + + def get_chunk(self, size: int | None = None) -> DataFrame: + if size is None: + size = self.chunksize + if self.nrows is not None: + if self._currow >= self.nrows: + raise StopIteration + size = min(size, self.nrows - self._currow) + return self.read(nrows=size) + + def __enter__(self) -> Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: TracebackType | None, + ) -> None: + self.close() + + +def TextParser(*args, **kwds) -> TextFileReader: + """ + Converts lists of lists/tuples into DataFrames with proper type inference + and optional (e.g. string to datetime) conversion. Also enables iterating + lazily over chunks of large files + + Parameters + ---------- + data : file-like object or list + delimiter : separator character to use + dialect : str or csv.Dialect instance, optional + Ignored if delimiter is longer than 1 character + names : sequence, default + header : int, default 0 + Row to use to parse column labels. Defaults to the first row. Prior + rows will be discarded + index_col : int or list, optional + Column or columns to use as the (possibly hierarchical) index + has_index_names: bool, default False + True if the cols defined in index_col have an index name and are + not in the header. + na_values : scalar, str, list-like, or dict, optional + Additional strings to recognize as NA/NaN. + keep_default_na : bool, default True + thousands : str, optional + Thousands separator + comment : str, optional + Comment out remainder of line + parse_dates : bool, default False + keep_date_col : bool, default False + date_parser : function, optional + + .. deprecated:: 2.0.0 + date_format : str or dict of column -> format, default ``None`` + + .. versionadded:: 2.0.0 + skiprows : list of integers + Row numbers to skip + skipfooter : int + Number of line at bottom of file to skip + converters : dict, optional + Dict of functions for converting values in certain columns. Keys can + either be integers or column labels, values are functions that take one + input argument, the cell (not column) content, and return the + transformed content. + encoding : str, optional + Encoding to use for UTF when reading/writing (ex. 'utf-8') + float_precision : str, optional + Specifies which converter the C engine should use for floating-point + values. The options are `None` or `high` for the ordinary converter, + `legacy` for the original lower precision pandas converter, and + `round_trip` for the round-trip converter. + """ + kwds["engine"] = "python" + return TextFileReader(*args, **kwds) + + +def _clean_na_values(na_values, keep_default_na: bool = True, floatify: bool = True): + na_fvalues: set | dict + if na_values is None: + if keep_default_na: + na_values = STR_NA_VALUES + else: + na_values = set() + na_fvalues = set() + elif isinstance(na_values, dict): + old_na_values = na_values.copy() + na_values = {} # Prevent aliasing. + + # Convert the values in the na_values dictionary + # into array-likes for further use. This is also + # where we append the default NaN values, provided + # that `keep_default_na=True`. + for k, v in old_na_values.items(): + if not is_list_like(v): + v = [v] + + if keep_default_na: + v = set(v) | STR_NA_VALUES + + na_values[k] = v + na_fvalues = {k: _floatify_na_values(v) for k, v in na_values.items()} + else: + if not is_list_like(na_values): + na_values = [na_values] + na_values = _stringify_na_values(na_values, floatify) + if keep_default_na: + na_values = na_values | STR_NA_VALUES + + na_fvalues = _floatify_na_values(na_values) + + return na_values, na_fvalues + + +def _floatify_na_values(na_values): + # create float versions of the na_values + result = set() + for v in na_values: + try: + v = float(v) + if not np.isnan(v): + result.add(v) + except (TypeError, ValueError, OverflowError): + pass + return result + + +def _stringify_na_values(na_values, floatify: bool): + """return a stringified and numeric for these values""" + result: list[str | float] = [] + for x in na_values: + result.append(str(x)) + result.append(x) + try: + v = float(x) + + # we are like 999 here + if v == int(v): + v = int(v) + result.append(f"{v}.0") + result.append(str(v)) + + if floatify: + result.append(v) + except (TypeError, ValueError, OverflowError): + pass + if floatify: + try: + result.append(int(x)) + except (TypeError, ValueError, OverflowError): + pass + return set(result) + + +def _refine_defaults_read( + dialect: str | csv.Dialect | None, + delimiter: str | None | lib.NoDefault, + delim_whitespace: bool, + engine: CSVEngine | None, + sep: str | None | lib.NoDefault, + on_bad_lines: str | Callable, + names: Sequence[Hashable] | None | lib.NoDefault, + defaults: dict[str, Any], + dtype_backend: DtypeBackend | lib.NoDefault, +): + """Validate/refine default values of input parameters of read_csv, read_table. + + Parameters + ---------- + dialect : str or csv.Dialect + If provided, this parameter will override values (default or not) for the + following parameters: `delimiter`, `doublequote`, `escapechar`, + `skipinitialspace`, `quotechar`, and `quoting`. If it is necessary to + override values, a ParserWarning will be issued. See csv.Dialect + documentation for more details. + delimiter : str or object + Alias for sep. + delim_whitespace : bool + Specifies whether or not whitespace (e.g. ``' '`` or ``'\t'``) will be + used as the sep. Equivalent to setting ``sep='\\s+'``. If this option + is set to True, nothing should be passed in for the ``delimiter`` + parameter. + + .. deprecated:: 2.2.0 + Use ``sep="\\s+"`` instead. + engine : {{'c', 'python'}} + Parser engine to use. The C engine is faster while the python engine is + currently more feature-complete. + sep : str or object + A delimiter provided by the user (str) or a sentinel value, i.e. + pandas._libs.lib.no_default. + on_bad_lines : str, callable + An option for handling bad lines or a sentinel value(None). + names : array-like, optional + List of column names to use. If the file contains a header row, + then you should explicitly pass ``header=0`` to override the column names. + Duplicates in this list are not allowed. + defaults: dict + Default values of input parameters. + + Returns + ------- + kwds : dict + Input parameters with correct values. + + Raises + ------ + ValueError : + If a delimiter was specified with ``sep`` (or ``delimiter``) and + ``delim_whitespace=True``. + """ + # fix types for sep, delimiter to Union(str, Any) + delim_default = defaults["delimiter"] + kwds: dict[str, Any] = {} + # gh-23761 + # + # When a dialect is passed, it overrides any of the overlapping + # parameters passed in directly. We don't want to warn if the + # default parameters were passed in (since it probably means + # that the user didn't pass them in explicitly in the first place). + # + # "delimiter" is the annoying corner case because we alias it to + # "sep" before doing comparison to the dialect values later on. + # Thus, we need a flag to indicate that we need to "override" + # the comparison to dialect values by checking if default values + # for BOTH "delimiter" and "sep" were provided. + if dialect is not None: + kwds["sep_override"] = delimiter is None and ( + sep is lib.no_default or sep == delim_default + ) + + if delimiter and (sep is not lib.no_default): + raise ValueError("Specified a sep and a delimiter; you can only specify one.") + + kwds["names"] = None if names is lib.no_default else names + + # Alias sep -> delimiter. + if delimiter is None: + delimiter = sep + + if delim_whitespace and (delimiter is not lib.no_default): + raise ValueError( + "Specified a delimiter with both sep and " + "delim_whitespace=True; you can only specify one." + ) + + if delimiter == "\n": + raise ValueError( + r"Specified \n as separator or delimiter. This forces the python engine " + "which does not accept a line terminator. Hence it is not allowed to use " + "the line terminator as separator.", + ) + + if delimiter is lib.no_default: + # assign default separator value + kwds["delimiter"] = delim_default + else: + kwds["delimiter"] = delimiter + + if engine is not None: + kwds["engine_specified"] = True + else: + kwds["engine"] = "c" + kwds["engine_specified"] = False + + if on_bad_lines == "error": + kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.ERROR + elif on_bad_lines == "warn": + kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.WARN + elif on_bad_lines == "skip": + kwds["on_bad_lines"] = ParserBase.BadLineHandleMethod.SKIP + elif callable(on_bad_lines): + if engine not in ["python", "pyarrow"]: + raise ValueError( + "on_bad_line can only be a callable function " + "if engine='python' or 'pyarrow'" + ) + kwds["on_bad_lines"] = on_bad_lines + else: + raise ValueError(f"Argument {on_bad_lines} is invalid for on_bad_lines") + + check_dtype_backend(dtype_backend) + + kwds["dtype_backend"] = dtype_backend + + return kwds + + +def _extract_dialect(kwds: dict[str, Any]) -> csv.Dialect | None: + """ + Extract concrete csv dialect instance. + + Returns + ------- + csv.Dialect or None + """ + if kwds.get("dialect") is None: + return None + + dialect = kwds["dialect"] + if dialect in csv.list_dialects(): + dialect = csv.get_dialect(dialect) + + _validate_dialect(dialect) + + return dialect + + +MANDATORY_DIALECT_ATTRS = ( + "delimiter", + "doublequote", + "escapechar", + "skipinitialspace", + "quotechar", + "quoting", +) + + +def _validate_dialect(dialect: csv.Dialect) -> None: + """ + Validate csv dialect instance. + + Raises + ------ + ValueError + If incorrect dialect is provided. + """ + for param in MANDATORY_DIALECT_ATTRS: + if not hasattr(dialect, param): + raise ValueError(f"Invalid dialect {dialect} provided") + + +def _merge_with_dialect_properties( + dialect: csv.Dialect, + defaults: dict[str, Any], +) -> dict[str, Any]: + """ + Merge default kwargs in TextFileReader with dialect parameters. + + Parameters + ---------- + dialect : csv.Dialect + Concrete csv dialect. See csv.Dialect documentation for more details. + defaults : dict + Keyword arguments passed to TextFileReader. + + Returns + ------- + kwds : dict + Updated keyword arguments, merged with dialect parameters. + """ + kwds = defaults.copy() + + for param in MANDATORY_DIALECT_ATTRS: + dialect_val = getattr(dialect, param) + + parser_default = parser_defaults[param] + provided = kwds.get(param, parser_default) + + # Messages for conflicting values between the dialect + # instance and the actual parameters provided. + conflict_msgs = [] + + # Don't warn if the default parameter was passed in, + # even if it conflicts with the dialect (gh-23761). + if provided not in (parser_default, dialect_val): + msg = ( + f"Conflicting values for '{param}': '{provided}' was " + f"provided, but the dialect specifies '{dialect_val}'. " + "Using the dialect-specified value." + ) + + # Annoying corner case for not warning about + # conflicts between dialect and delimiter parameter. + # Refer to the outer "_read_" function for more info. + if not (param == "delimiter" and kwds.pop("sep_override", False)): + conflict_msgs.append(msg) + + if conflict_msgs: + warnings.warn( + "\n\n".join(conflict_msgs), ParserWarning, stacklevel=find_stack_level() + ) + kwds[param] = dialect_val + return kwds + + +def _validate_skipfooter(kwds: dict[str, Any]) -> None: + """ + Check whether skipfooter is compatible with other kwargs in TextFileReader. + + Parameters + ---------- + kwds : dict + Keyword arguments passed to TextFileReader. + + Raises + ------ + ValueError + If skipfooter is not compatible with other parameters. + """ + if kwds.get("skipfooter"): + if kwds.get("iterator") or kwds.get("chunksize"): + raise ValueError("'skipfooter' not supported for iteration") + if kwds.get("nrows"): + raise ValueError("'skipfooter' not supported with 'nrows'") diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/test_aggregation.py b/moondream/lib/python3.10/site-packages/pandas/tests/test_aggregation.py new file mode 100644 index 0000000000000000000000000000000000000000..7695c953712ed9925e4e804d0db1e8cf606a97eb --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/test_aggregation.py @@ -0,0 +1,93 @@ +import numpy as np +import pytest + +from pandas.core.apply import ( + _make_unique_kwarg_list, + maybe_mangle_lambdas, +) + + +def test_maybe_mangle_lambdas_passthrough(): + assert maybe_mangle_lambdas("mean") == "mean" + assert maybe_mangle_lambdas(lambda x: x).__name__ == "" + # don't mangel single lambda. + assert maybe_mangle_lambdas([lambda x: x])[0].__name__ == "" + + +def test_maybe_mangle_lambdas_listlike(): + aggfuncs = [lambda x: 1, lambda x: 2] + result = maybe_mangle_lambdas(aggfuncs) + assert result[0].__name__ == "" + assert result[1].__name__ == "" + assert aggfuncs[0](None) == result[0](None) + assert aggfuncs[1](None) == result[1](None) + + +def test_maybe_mangle_lambdas(): + func = {"A": [lambda x: 0, lambda x: 1]} + result = maybe_mangle_lambdas(func) + assert result["A"][0].__name__ == "" + assert result["A"][1].__name__ == "" + + +def test_maybe_mangle_lambdas_args(): + func = {"A": [lambda x, a, b=1: (0, a, b), lambda x: 1]} + result = maybe_mangle_lambdas(func) + assert result["A"][0].__name__ == "" + assert result["A"][1].__name__ == "" + + assert func["A"][0](0, 1) == (0, 1, 1) + assert func["A"][0](0, 1, 2) == (0, 1, 2) + assert func["A"][0](0, 2, b=3) == (0, 2, 3) + + +def test_maybe_mangle_lambdas_named(): + func = {"C": np.mean, "D": {"foo": np.mean, "bar": np.mean}} + result = maybe_mangle_lambdas(func) + assert result == func + + +@pytest.mark.parametrize( + "order, expected_reorder", + [ + ( + [ + ("height", ""), + ("height", "max"), + ("weight", "max"), + ("height", ""), + ("weight", ""), + ], + [ + ("height", "_0"), + ("height", "max"), + ("weight", "max"), + ("height", "_1"), + ("weight", ""), + ], + ), + ( + [ + ("col2", "min"), + ("col1", ""), + ("col1", ""), + ("col1", ""), + ], + [ + ("col2", "min"), + ("col1", "_0"), + ("col1", "_1"), + ("col1", "_2"), + ], + ), + ( + [("col", ""), ("col", ""), ("col", "")], + [("col", "_0"), ("col", "_1"), ("col", "_2")], + ), + ], +) +def test_make_unique(order, expected_reorder): + # GH 27519, test if make_unique function reorders correctly + result = _make_unique_kwarg_list(order) + + assert result == expected_reorder diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/test_common.py b/moondream/lib/python3.10/site-packages/pandas/tests/test_common.py new file mode 100644 index 0000000000000000000000000000000000000000..e8a1c961c8cb6e5b1014f6baa193d4593d85d981 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/test_common.py @@ -0,0 +1,267 @@ +import collections +from functools import partial +import string +import subprocess +import sys +import textwrap + +import numpy as np +import pytest + +import pandas as pd +from pandas import Series +import pandas._testing as tm +from pandas.core import ops +import pandas.core.common as com +from pandas.util.version import Version + + +def test_get_callable_name(): + getname = com.get_callable_name + + def fn(x): + return x + + lambda_ = lambda x: x + part1 = partial(fn) + part2 = partial(part1) + + class somecall: + def __call__(self): + # This shouldn't actually get called below; somecall.__init__ + # should. + raise NotImplementedError + + assert getname(fn) == "fn" + assert getname(lambda_) + assert getname(part1) == "fn" + assert getname(part2) == "fn" + assert getname(somecall()) == "somecall" + assert getname(1) is None + + +def test_any_none(): + assert com.any_none(1, 2, 3, None) + assert not com.any_none(1, 2, 3, 4) + + +def test_all_not_none(): + assert com.all_not_none(1, 2, 3, 4) + assert not com.all_not_none(1, 2, 3, None) + assert not com.all_not_none(None, None, None, None) + + +def test_random_state(): + # Check with seed + state = com.random_state(5) + assert state.uniform() == np.random.RandomState(5).uniform() + + # Check with random state object + state2 = np.random.RandomState(10) + assert com.random_state(state2).uniform() == np.random.RandomState(10).uniform() + + # check with no arg random state + assert com.random_state() is np.random + + # check array-like + # GH32503 + state_arr_like = np.random.default_rng(None).integers( + 0, 2**31, size=624, dtype="uint32" + ) + assert ( + com.random_state(state_arr_like).uniform() + == np.random.RandomState(state_arr_like).uniform() + ) + + # Check BitGenerators + # GH32503 + assert ( + com.random_state(np.random.MT19937(3)).uniform() + == np.random.RandomState(np.random.MT19937(3)).uniform() + ) + assert ( + com.random_state(np.random.PCG64(11)).uniform() + == np.random.RandomState(np.random.PCG64(11)).uniform() + ) + + # Error for floats or strings + msg = ( + "random_state must be an integer, array-like, a BitGenerator, Generator, " + "a numpy RandomState, or None" + ) + with pytest.raises(ValueError, match=msg): + com.random_state("test") + + with pytest.raises(ValueError, match=msg): + com.random_state(5.5) + + +@pytest.mark.parametrize( + "left, right, expected", + [ + (Series([1], name="x"), Series([2], name="x"), "x"), + (Series([1], name="x"), Series([2], name="y"), None), + (Series([1]), Series([2], name="x"), None), + (Series([1], name="x"), Series([2]), None), + (Series([1], name="x"), [2], "x"), + ([1], Series([2], name="y"), "y"), + # matching NAs + (Series([1], name=np.nan), pd.Index([], name=np.nan), np.nan), + (Series([1], name=np.nan), pd.Index([], name=pd.NaT), None), + (Series([1], name=pd.NA), pd.Index([], name=pd.NA), pd.NA), + # tuple name GH#39757 + ( + Series([1], name=np.int64(1)), + pd.Index([], name=(np.int64(1), np.int64(2))), + None, + ), + ( + Series([1], name=(np.int64(1), np.int64(2))), + pd.Index([], name=(np.int64(1), np.int64(2))), + (np.int64(1), np.int64(2)), + ), + pytest.param( + Series([1], name=(np.float64("nan"), np.int64(2))), + pd.Index([], name=(np.float64("nan"), np.int64(2))), + (np.float64("nan"), np.int64(2)), + marks=pytest.mark.xfail( + reason="Not checking for matching NAs inside tuples." + ), + ), + ], +) +def test_maybe_match_name(left, right, expected): + res = ops.common._maybe_match_name(left, right) + assert res is expected or res == expected + + +def test_standardize_mapping(): + # No uninitialized defaultdicts + msg = r"to_dict\(\) only accepts initialized defaultdicts" + with pytest.raises(TypeError, match=msg): + com.standardize_mapping(collections.defaultdict) + + # No non-mapping subtypes, instance + msg = "unsupported type: " + with pytest.raises(TypeError, match=msg): + com.standardize_mapping([]) + + # No non-mapping subtypes, class + with pytest.raises(TypeError, match=msg): + com.standardize_mapping(list) + + fill = {"bad": "data"} + assert com.standardize_mapping(fill) == dict + + # Convert instance to type + assert com.standardize_mapping({}) == dict + + dd = collections.defaultdict(list) + assert isinstance(com.standardize_mapping(dd), partial) + + +def test_git_version(): + # GH 21295 + git_version = pd.__git_version__ + assert len(git_version) == 40 + assert all(c in string.hexdigits for c in git_version) + + +def test_version_tag(): + version = Version(pd.__version__) + try: + version > Version("0.0.1") + except TypeError: + raise ValueError( + "No git tags exist, please sync tags between upstream and your repo" + ) + + +@pytest.mark.parametrize( + "obj", [(obj,) for obj in pd.__dict__.values() if callable(obj)] +) +def test_serializable(obj): + # GH 35611 + unpickled = tm.round_trip_pickle(obj) + assert type(obj) == type(unpickled) + + +class TestIsBoolIndexer: + def test_non_bool_array_with_na(self): + # in particular, this should not raise + arr = np.array(["A", "B", np.nan], dtype=object) + assert not com.is_bool_indexer(arr) + + def test_list_subclass(self): + # GH#42433 + + class MyList(list): + pass + + val = MyList(["a"]) + + assert not com.is_bool_indexer(val) + + val = MyList([True]) + assert com.is_bool_indexer(val) + + def test_frozenlist(self): + # GH#42461 + data = {"col1": [1, 2], "col2": [3, 4]} + df = pd.DataFrame(data=data) + + frozen = df.index.names[1:] + assert not com.is_bool_indexer(frozen) + + result = df[frozen] + expected = df[[]] + tm.assert_frame_equal(result, expected) + + +@pytest.mark.parametrize("with_exception", [True, False]) +def test_temp_setattr(with_exception): + # GH#45954 + ser = Series(dtype=object) + ser.name = "first" + # Raise a ValueError in either case to satisfy pytest.raises + match = "Inside exception raised" if with_exception else "Outside exception raised" + with pytest.raises(ValueError, match=match): + with com.temp_setattr(ser, "name", "second"): + assert ser.name == "second" + if with_exception: + raise ValueError("Inside exception raised") + raise ValueError("Outside exception raised") + assert ser.name == "first" + + +@pytest.mark.single_cpu +def test_str_size(): + # GH#21758 + a = "a" + expected = sys.getsizeof(a) + pyexe = sys.executable.replace("\\", "/") + call = [ + pyexe, + "-c", + "a='a';import sys;sys.getsizeof(a);import pandas;print(sys.getsizeof(a));", + ] + result = subprocess.check_output(call).decode()[-4:-1].strip("\n") + assert int(result) == int(expected) + + +@pytest.mark.single_cpu +def test_bz2_missing_import(): + # Check whether bz2 missing import is handled correctly (issue #53857) + code = """ + import sys + sys.modules['bz2'] = None + import pytest + import pandas as pd + from pandas.compat import get_bz2_file + msg = 'bz2 module not available.' + with pytest.raises(RuntimeError, match=msg): + get_bz2_file() + """ + code = textwrap.dedent(code) + call = [sys.executable, "-c", code] + subprocess.check_output(call) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/test_nanops.py b/moondream/lib/python3.10/site-packages/pandas/tests/test_nanops.py new file mode 100644 index 0000000000000000000000000000000000000000..a50054f33f382ed913261e0cafd944c2fd86aaa3 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/test_nanops.py @@ -0,0 +1,1274 @@ +from functools import partial + +import numpy as np +import pytest + +import pandas.util._test_decorators as td + +from pandas.core.dtypes.common import is_integer_dtype + +import pandas as pd +from pandas import ( + Series, + isna, +) +import pandas._testing as tm +from pandas.core import nanops + +use_bn = nanops._USE_BOTTLENECK + + +@pytest.fixture +def disable_bottleneck(monkeypatch): + with monkeypatch.context() as m: + m.setattr(nanops, "_USE_BOTTLENECK", False) + yield + + +@pytest.fixture +def arr_shape(): + return 11, 7 + + +@pytest.fixture +def arr_float(arr_shape): + return np.random.default_rng(2).standard_normal(arr_shape) + + +@pytest.fixture +def arr_complex(arr_float): + return arr_float + arr_float * 1j + + +@pytest.fixture +def arr_int(arr_shape): + return np.random.default_rng(2).integers(-10, 10, arr_shape) + + +@pytest.fixture +def arr_bool(arr_shape): + return np.random.default_rng(2).integers(0, 2, arr_shape) == 0 + + +@pytest.fixture +def arr_str(arr_float): + return np.abs(arr_float).astype("S") + + +@pytest.fixture +def arr_utf(arr_float): + return np.abs(arr_float).astype("U") + + +@pytest.fixture +def arr_date(arr_shape): + return np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]") + + +@pytest.fixture +def arr_tdelta(arr_shape): + return np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]") + + +@pytest.fixture +def arr_nan(arr_shape): + return np.tile(np.nan, arr_shape) + + +@pytest.fixture +def arr_float_nan(arr_float, arr_nan): + return np.vstack([arr_float, arr_nan]) + + +@pytest.fixture +def arr_nan_float1(arr_nan, arr_float): + return np.vstack([arr_nan, arr_float]) + + +@pytest.fixture +def arr_nan_nan(arr_nan): + return np.vstack([arr_nan, arr_nan]) + + +@pytest.fixture +def arr_inf(arr_float): + return arr_float * np.inf + + +@pytest.fixture +def arr_float_inf(arr_float, arr_inf): + return np.vstack([arr_float, arr_inf]) + + +@pytest.fixture +def arr_nan_inf(arr_nan, arr_inf): + return np.vstack([arr_nan, arr_inf]) + + +@pytest.fixture +def arr_float_nan_inf(arr_float, arr_nan, arr_inf): + return np.vstack([arr_float, arr_nan, arr_inf]) + + +@pytest.fixture +def arr_nan_nan_inf(arr_nan, arr_inf): + return np.vstack([arr_nan, arr_nan, arr_inf]) + + +@pytest.fixture +def arr_obj( + arr_float, arr_int, arr_bool, arr_complex, arr_str, arr_utf, arr_date, arr_tdelta +): + return np.vstack( + [ + arr_float.astype("O"), + arr_int.astype("O"), + arr_bool.astype("O"), + arr_complex.astype("O"), + arr_str.astype("O"), + arr_utf.astype("O"), + arr_date.astype("O"), + arr_tdelta.astype("O"), + ] + ) + + +@pytest.fixture +def arr_nan_nanj(arr_nan): + with np.errstate(invalid="ignore"): + return arr_nan + arr_nan * 1j + + +@pytest.fixture +def arr_complex_nan(arr_complex, arr_nan_nanj): + with np.errstate(invalid="ignore"): + return np.vstack([arr_complex, arr_nan_nanj]) + + +@pytest.fixture +def arr_nan_infj(arr_inf): + with np.errstate(invalid="ignore"): + return arr_inf * 1j + + +@pytest.fixture +def arr_complex_nan_infj(arr_complex, arr_nan_infj): + with np.errstate(invalid="ignore"): + return np.vstack([arr_complex, arr_nan_infj]) + + +@pytest.fixture +def arr_float_1d(arr_float): + return arr_float[:, 0] + + +@pytest.fixture +def arr_nan_1d(arr_nan): + return arr_nan[:, 0] + + +@pytest.fixture +def arr_float_nan_1d(arr_float_nan): + return arr_float_nan[:, 0] + + +@pytest.fixture +def arr_float1_nan_1d(arr_float1_nan): + return arr_float1_nan[:, 0] + + +@pytest.fixture +def arr_nan_float1_1d(arr_nan_float1): + return arr_nan_float1[:, 0] + + +class TestnanopsDataFrame: + def setup_method(self): + nanops._USE_BOTTLENECK = False + + arr_shape = (11, 7) + + self.arr_float = np.random.default_rng(2).standard_normal(arr_shape) + self.arr_float1 = np.random.default_rng(2).standard_normal(arr_shape) + self.arr_complex = self.arr_float + self.arr_float1 * 1j + self.arr_int = np.random.default_rng(2).integers(-10, 10, arr_shape) + self.arr_bool = np.random.default_rng(2).integers(0, 2, arr_shape) == 0 + self.arr_str = np.abs(self.arr_float).astype("S") + self.arr_utf = np.abs(self.arr_float).astype("U") + self.arr_date = ( + np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]") + ) + self.arr_tdelta = ( + np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]") + ) + + self.arr_nan = np.tile(np.nan, arr_shape) + self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan]) + self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan]) + self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1]) + self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan]) + + self.arr_inf = self.arr_float * np.inf + self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf]) + + self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf]) + self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf]) + self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf]) + self.arr_obj = np.vstack( + [ + self.arr_float.astype("O"), + self.arr_int.astype("O"), + self.arr_bool.astype("O"), + self.arr_complex.astype("O"), + self.arr_str.astype("O"), + self.arr_utf.astype("O"), + self.arr_date.astype("O"), + self.arr_tdelta.astype("O"), + ] + ) + + with np.errstate(invalid="ignore"): + self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j + self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj]) + + self.arr_nan_infj = self.arr_inf * 1j + self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj]) + + self.arr_float_2d = self.arr_float + self.arr_float1_2d = self.arr_float1 + + self.arr_nan_2d = self.arr_nan + self.arr_float_nan_2d = self.arr_float_nan + self.arr_float1_nan_2d = self.arr_float1_nan + self.arr_nan_float1_2d = self.arr_nan_float1 + + self.arr_float_1d = self.arr_float[:, 0] + self.arr_float1_1d = self.arr_float1[:, 0] + + self.arr_nan_1d = self.arr_nan[:, 0] + self.arr_float_nan_1d = self.arr_float_nan[:, 0] + self.arr_float1_nan_1d = self.arr_float1_nan[:, 0] + self.arr_nan_float1_1d = self.arr_nan_float1[:, 0] + + def teardown_method(self): + nanops._USE_BOTTLENECK = use_bn + + def check_results(self, targ, res, axis, check_dtype=True): + res = getattr(res, "asm8", res) + + if ( + axis != 0 + and hasattr(targ, "shape") + and targ.ndim + and targ.shape != res.shape + ): + res = np.split(res, [targ.shape[0]], axis=0)[0] + + try: + tm.assert_almost_equal(targ, res, check_dtype=check_dtype) + except AssertionError: + # handle timedelta dtypes + if hasattr(targ, "dtype") and targ.dtype == "m8[ns]": + raise + + # There are sometimes rounding errors with + # complex and object dtypes. + # If it isn't one of those, re-raise the error. + if not hasattr(res, "dtype") or res.dtype.kind not in ["c", "O"]: + raise + # convert object dtypes to something that can be split into + # real and imaginary parts + if res.dtype.kind == "O": + if targ.dtype.kind != "O": + res = res.astype(targ.dtype) + else: + cast_dtype = "c16" if hasattr(np, "complex128") else "f8" + res = res.astype(cast_dtype) + targ = targ.astype(cast_dtype) + # there should never be a case where numpy returns an object + # but nanops doesn't, so make that an exception + elif targ.dtype.kind == "O": + raise + tm.assert_almost_equal(np.real(targ), np.real(res), check_dtype=check_dtype) + tm.assert_almost_equal(np.imag(targ), np.imag(res), check_dtype=check_dtype) + + def check_fun_data( + self, + testfunc, + targfunc, + testarval, + targarval, + skipna, + check_dtype=True, + empty_targfunc=None, + **kwargs, + ): + for axis in list(range(targarval.ndim)) + [None]: + targartempval = targarval if skipna else testarval + if skipna and empty_targfunc and isna(targartempval).all(): + targ = empty_targfunc(targartempval, axis=axis, **kwargs) + else: + targ = targfunc(targartempval, axis=axis, **kwargs) + + if targartempval.dtype == object and ( + targfunc is np.any or targfunc is np.all + ): + # GH#12863 the numpy functions will retain e.g. floatiness + if isinstance(targ, np.ndarray): + targ = targ.astype(bool) + else: + targ = bool(targ) + + res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs) + + if ( + isinstance(targ, np.complex128) + and isinstance(res, float) + and np.isnan(targ) + and np.isnan(res) + ): + # GH#18463 + targ = res + + self.check_results(targ, res, axis, check_dtype=check_dtype) + if skipna: + res = testfunc(testarval, axis=axis, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if axis is None: + res = testfunc(testarval, skipna=skipna, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + if skipna and axis is None: + res = testfunc(testarval, **kwargs) + self.check_results(targ, res, axis, check_dtype=check_dtype) + + if testarval.ndim <= 1: + return + + # Recurse on lower-dimension + testarval2 = np.take(testarval, 0, axis=-1) + targarval2 = np.take(targarval, 0, axis=-1) + self.check_fun_data( + testfunc, + targfunc, + testarval2, + targarval2, + skipna=skipna, + check_dtype=check_dtype, + empty_targfunc=empty_targfunc, + **kwargs, + ) + + def check_fun( + self, testfunc, targfunc, testar, skipna, empty_targfunc=None, **kwargs + ): + targar = testar + if testar.endswith("_nan") and hasattr(self, testar[:-4]): + targar = testar[:-4] + + testarval = getattr(self, testar) + targarval = getattr(self, targar) + self.check_fun_data( + testfunc, + targfunc, + testarval, + targarval, + skipna=skipna, + empty_targfunc=empty_targfunc, + **kwargs, + ) + + def check_funs( + self, + testfunc, + targfunc, + skipna, + allow_complex=True, + allow_all_nan=True, + allow_date=True, + allow_tdelta=True, + allow_obj=True, + **kwargs, + ): + self.check_fun(testfunc, targfunc, "arr_float", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_float_nan", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_int", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_bool", skipna, **kwargs) + objs = [ + self.arr_float.astype("O"), + self.arr_int.astype("O"), + self.arr_bool.astype("O"), + ] + + if allow_all_nan: + self.check_fun(testfunc, targfunc, "arr_nan", skipna, **kwargs) + + if allow_complex: + self.check_fun(testfunc, targfunc, "arr_complex", skipna, **kwargs) + self.check_fun(testfunc, targfunc, "arr_complex_nan", skipna, **kwargs) + if allow_all_nan: + self.check_fun(testfunc, targfunc, "arr_nan_nanj", skipna, **kwargs) + objs += [self.arr_complex.astype("O")] + + if allow_date: + targfunc(self.arr_date) + self.check_fun(testfunc, targfunc, "arr_date", skipna, **kwargs) + objs += [self.arr_date.astype("O")] + + if allow_tdelta: + try: + targfunc(self.arr_tdelta) + except TypeError: + pass + else: + self.check_fun(testfunc, targfunc, "arr_tdelta", skipna, **kwargs) + objs += [self.arr_tdelta.astype("O")] + + if allow_obj: + self.arr_obj = np.vstack(objs) + # some nanops handle object dtypes better than their numpy + # counterparts, so the numpy functions need to be given something + # else + if allow_obj == "convert": + targfunc = partial( + self._badobj_wrap, func=targfunc, allow_complex=allow_complex + ) + self.check_fun(testfunc, targfunc, "arr_obj", skipna, **kwargs) + + def _badobj_wrap(self, value, func, allow_complex=True, **kwargs): + if value.dtype.kind == "O": + if allow_complex: + value = value.astype("c16") + else: + value = value.astype("f8") + return func(value, **kwargs) + + @pytest.mark.parametrize( + "nan_op,np_op", [(nanops.nanany, np.any), (nanops.nanall, np.all)] + ) + def test_nan_funcs(self, nan_op, np_op, skipna): + self.check_funs(nan_op, np_op, skipna, allow_all_nan=False, allow_date=False) + + def test_nansum(self, skipna): + self.check_funs( + nanops.nansum, + np.sum, + skipna, + allow_date=False, + check_dtype=False, + empty_targfunc=np.nansum, + ) + + def test_nanmean(self, skipna): + self.check_funs( + nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False + ) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanmedian(self, skipna): + self.check_funs( + nanops.nanmedian, + np.median, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanvar(self, ddof, skipna): + self.check_funs( + nanops.nanvar, + np.var, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanstd(self, ddof, skipna): + self.check_funs( + nanops.nanstd, + np.std, + skipna, + allow_complex=False, + allow_date=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nansem(self, ddof, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nansem, + sp_stats.sem, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + allow_obj="convert", + ddof=ddof, + ) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + @pytest.mark.parametrize( + "nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)] + ) + def test_nanops_with_warnings(self, nan_op, np_op, skipna): + self.check_funs(nan_op, np_op, skipna, allow_obj=False) + + def _argminmax_wrap(self, value, axis=None, func=None): + res = func(value, axis) + nans = np.min(value, axis) + nullnan = isna(nans) + if res.ndim: + res[nullnan] = -1 + elif ( + hasattr(nullnan, "all") + and nullnan.all() + or not hasattr(nullnan, "all") + and nullnan + ): + res = -1 + return res + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanargmax(self, skipna): + func = partial(self._argminmax_wrap, func=np.argmax) + self.check_funs(nanops.nanargmax, func, skipna, allow_obj=False) + + @pytest.mark.filterwarnings("ignore::RuntimeWarning") + def test_nanargmin(self, skipna): + func = partial(self._argminmax_wrap, func=np.argmin) + self.check_funs(nanops.nanargmin, func, skipna, allow_obj=False) + + def _skew_kurt_wrap(self, values, axis=None, func=None): + if not isinstance(values.dtype.type, np.floating): + values = values.astype("f8") + result = func(values, axis=axis, bias=False) + # fix for handling cases where all elements in an axis are the same + if isinstance(result, np.ndarray): + result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0 + return result + elif np.max(values) == np.min(values): + return 0.0 + return result + + def test_nanskew(self, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + func = partial(self._skew_kurt_wrap, func=sp_stats.skew) + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nanskew, + func, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + ) + + def test_nankurt(self, skipna): + sp_stats = pytest.importorskip("scipy.stats") + + func1 = partial(sp_stats.kurtosis, fisher=True) + func = partial(self._skew_kurt_wrap, func=func1) + with np.errstate(invalid="ignore"): + self.check_funs( + nanops.nankurt, + func, + skipna, + allow_complex=False, + allow_date=False, + allow_tdelta=False, + ) + + def test_nanprod(self, skipna): + self.check_funs( + nanops.nanprod, + np.prod, + skipna, + allow_date=False, + allow_tdelta=False, + empty_targfunc=np.nanprod, + ) + + def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs) + res01 = checkfun( + self.arr_float_2d, + self.arr_float1_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs) + res11 = checkfun( + self.arr_float_nan_2d, + self.arr_float1_nan_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs) + res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs) + res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs) + res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs) + res24 = checkfun( + self.arr_float_nan_2d, + self.arr_nan_float1_2d, + min_periods=len(self.arr_float_2d) - 1, + **kwargs, + ) + res25 = checkfun( + self.arr_float_2d, + self.arr_float1_2d, + min_periods=len(self.arr_float_2d) + 1, + **kwargs, + ) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs): + res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs) + res01 = checkfun( + self.arr_float_1d, + self.arr_float1_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ0, res00) + tm.assert_almost_equal(targ0, res01) + + res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs) + res11 = checkfun( + self.arr_float_nan_1d, + self.arr_float1_nan_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + tm.assert_almost_equal(targ1, res10) + tm.assert_almost_equal(targ1, res11) + + targ2 = np.nan + res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs) + res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs) + res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs) + res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs) + res24 = checkfun( + self.arr_float_nan_1d, + self.arr_nan_float1_1d, + min_periods=len(self.arr_float_1d) - 1, + **kwargs, + ) + res25 = checkfun( + self.arr_float_1d, + self.arr_float1_1d, + min_periods=len(self.arr_float_1d) + 1, + **kwargs, + ) + tm.assert_almost_equal(targ2, res20) + tm.assert_almost_equal(targ2, res21) + tm.assert_almost_equal(targ2, res22) + tm.assert_almost_equal(targ2, res23) + tm.assert_almost_equal(targ2, res24) + tm.assert_almost_equal(targ2, res25) + + def test_nancorr(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1) + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson") + + def test_nancorr_pearson(self): + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="pearson") + targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson") + + def test_nancorr_kendall(self): + sp_stats = pytest.importorskip("scipy.stats") + + targ0 = sp_stats.kendalltau(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = sp_stats.kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="kendall") + targ0 = sp_stats.kendalltau(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = sp_stats.kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall") + + def test_nancorr_spearman(self): + sp_stats = pytest.importorskip("scipy.stats") + + targ0 = sp_stats.spearmanr(self.arr_float_2d, self.arr_float1_2d)[0] + targ1 = sp_stats.spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0] + self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="spearman") + targ0 = sp_stats.spearmanr(self.arr_float_1d, self.arr_float1_1d)[0] + targ1 = sp_stats.spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0] + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman") + + def test_invalid_method(self): + pytest.importorskip("scipy") + targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'" + with pytest.raises(ValueError, match=msg): + self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="foo") + + def test_nancov(self): + targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1] + targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1] + self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1) + targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1] + targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1] + self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1) + + +@pytest.mark.parametrize( + "arr, correct", + [ + ("arr_complex", False), + ("arr_int", False), + ("arr_bool", False), + ("arr_str", False), + ("arr_utf", False), + ("arr_complex", False), + ("arr_complex_nan", False), + ("arr_nan_nanj", False), + ("arr_nan_infj", True), + ("arr_complex_nan_infj", True), + ], +) +def test_has_infs_non_float(request, arr, correct, disable_bottleneck): + val = request.getfixturevalue(arr) + while getattr(val, "ndim", True): + res0 = nanops._has_infs(val) + if correct: + assert res0 + else: + assert not res0 + + if not hasattr(val, "ndim"): + break + + # Reduce dimension for next step in the loop + val = np.take(val, 0, axis=-1) + + +@pytest.mark.parametrize( + "arr, correct", + [ + ("arr_float", False), + ("arr_nan", False), + ("arr_float_nan", False), + ("arr_nan_nan", False), + ("arr_float_inf", True), + ("arr_inf", True), + ("arr_nan_inf", True), + ("arr_float_nan_inf", True), + ("arr_nan_nan_inf", True), + ], +) +@pytest.mark.parametrize("astype", [None, "f4", "f2"]) +def test_has_infs_floats(request, arr, correct, astype, disable_bottleneck): + val = request.getfixturevalue(arr) + if astype is not None: + val = val.astype(astype) + while getattr(val, "ndim", True): + res0 = nanops._has_infs(val) + if correct: + assert res0 + else: + assert not res0 + + if not hasattr(val, "ndim"): + break + + # Reduce dimension for next step in the loop + val = np.take(val, 0, axis=-1) + + +@pytest.mark.parametrize( + "fixture", ["arr_float", "arr_complex", "arr_int", "arr_bool", "arr_str", "arr_utf"] +) +def test_bn_ok_dtype(fixture, request, disable_bottleneck): + obj = request.getfixturevalue(fixture) + assert nanops._bn_ok_dtype(obj.dtype, "test") + + +@pytest.mark.parametrize( + "fixture", + [ + "arr_date", + "arr_tdelta", + "arr_obj", + ], +) +def test_bn_not_ok_dtype(fixture, request, disable_bottleneck): + obj = request.getfixturevalue(fixture) + assert not nanops._bn_ok_dtype(obj.dtype, "test") + + +class TestEnsureNumeric: + def test_numeric_values(self): + # Test integer + assert nanops._ensure_numeric(1) == 1 + + # Test float + assert nanops._ensure_numeric(1.1) == 1.1 + + # Test complex + assert nanops._ensure_numeric(1 + 2j) == 1 + 2j + + def test_ndarray(self): + # Test numeric ndarray + values = np.array([1, 2, 3]) + assert np.allclose(nanops._ensure_numeric(values), values) + + # Test object ndarray + o_values = values.astype(object) + assert np.allclose(nanops._ensure_numeric(o_values), values) + + # Test convertible string ndarray + s_values = np.array(["1", "2", "3"], dtype=object) + msg = r"Could not convert \['1' '2' '3'\] to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric(s_values) + + # Test non-convertible string ndarray + s_values = np.array(["foo", "bar", "baz"], dtype=object) + msg = r"Could not convert .* to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric(s_values) + + def test_convertable_values(self): + with pytest.raises(TypeError, match="Could not convert string '1' to numeric"): + nanops._ensure_numeric("1") + with pytest.raises( + TypeError, match="Could not convert string '1.1' to numeric" + ): + nanops._ensure_numeric("1.1") + with pytest.raises( + TypeError, match=r"Could not convert string '1\+1j' to numeric" + ): + nanops._ensure_numeric("1+1j") + + def test_non_convertable_values(self): + msg = "Could not convert string 'foo' to numeric" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric("foo") + + # with the wrong type, python raises TypeError for us + msg = "argument must be a string or a number" + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric({}) + with pytest.raises(TypeError, match=msg): + nanops._ensure_numeric([]) + + +class TestNanvarFixedValues: + # xref GH10242 + # Samples from a normal distribution. + @pytest.fixture + def variance(self): + return 3.0 + + @pytest.fixture + def samples(self, variance): + return self.prng.normal(scale=variance**0.5, size=100000) + + def test_nanvar_all_finite(self, samples, variance): + actual_variance = nanops.nanvar(samples) + tm.assert_almost_equal(actual_variance, variance, rtol=1e-2) + + def test_nanvar_nans(self, samples, variance): + samples_test = np.nan * np.ones(2 * samples.shape[0]) + samples_test[::2] = samples + + actual_variance = nanops.nanvar(samples_test, skipna=True) + tm.assert_almost_equal(actual_variance, variance, rtol=1e-2) + + actual_variance = nanops.nanvar(samples_test, skipna=False) + tm.assert_almost_equal(actual_variance, np.nan, rtol=1e-2) + + def test_nanstd_nans(self, samples, variance): + samples_test = np.nan * np.ones(2 * samples.shape[0]) + samples_test[::2] = samples + + actual_std = nanops.nanstd(samples_test, skipna=True) + tm.assert_almost_equal(actual_std, variance**0.5, rtol=1e-2) + + actual_std = nanops.nanvar(samples_test, skipna=False) + tm.assert_almost_equal(actual_std, np.nan, rtol=1e-2) + + def test_nanvar_axis(self, samples, variance): + # Generate some sample data. + samples_unif = self.prng.uniform(size=samples.shape[0]) + samples = np.vstack([samples, samples_unif]) + + actual_variance = nanops.nanvar(samples, axis=1) + tm.assert_almost_equal( + actual_variance, np.array([variance, 1.0 / 12]), rtol=1e-2 + ) + + def test_nanvar_ddof(self): + n = 5 + samples = self.prng.uniform(size=(10000, n + 1)) + samples[:, -1] = np.nan # Force use of our own algorithm. + + variance_0 = nanops.nanvar(samples, axis=1, skipna=True, ddof=0).mean() + variance_1 = nanops.nanvar(samples, axis=1, skipna=True, ddof=1).mean() + variance_2 = nanops.nanvar(samples, axis=1, skipna=True, ddof=2).mean() + + # The unbiased estimate. + var = 1.0 / 12 + tm.assert_almost_equal(variance_1, var, rtol=1e-2) + + # The underestimated variance. + tm.assert_almost_equal(variance_0, (n - 1.0) / n * var, rtol=1e-2) + + # The overestimated variance. + tm.assert_almost_equal(variance_2, (n - 1.0) / (n - 2.0) * var, rtol=1e-2) + + @pytest.mark.parametrize("axis", range(2)) + @pytest.mark.parametrize("ddof", range(3)) + def test_ground_truth(self, axis, ddof): + # Test against values that were precomputed with Numpy. + samples = np.empty((4, 4)) + samples[:3, :3] = np.array( + [ + [0.97303362, 0.21869576, 0.55560287], + [0.72980153, 0.03109364, 0.99155171], + [0.09317602, 0.60078248, 0.15871292], + ] + ) + samples[3] = samples[:, 3] = np.nan + + # Actual variances along axis=0, 1 for ddof=0, 1, 2 + variance = np.array( + [ + [ + [0.13762259, 0.05619224, 0.11568816], + [0.20643388, 0.08428837, 0.17353224], + [0.41286776, 0.16857673, 0.34706449], + ], + [ + [0.09519783, 0.16435395, 0.05082054], + [0.14279674, 0.24653093, 0.07623082], + [0.28559348, 0.49306186, 0.15246163], + ], + ] + ) + + # Test nanvar. + var = nanops.nanvar(samples, skipna=True, axis=axis, ddof=ddof) + tm.assert_almost_equal(var[:3], variance[axis, ddof]) + assert np.isnan(var[3]) + + # Test nanstd. + std = nanops.nanstd(samples, skipna=True, axis=axis, ddof=ddof) + tm.assert_almost_equal(std[:3], variance[axis, ddof] ** 0.5) + assert np.isnan(std[3]) + + @pytest.mark.parametrize("ddof", range(3)) + def test_nanstd_roundoff(self, ddof): + # Regression test for GH 10242 (test data taken from GH 10489). Ensure + # that variance is stable. + data = Series(766897346 * np.ones(10)) + result = data.std(ddof=ddof) + assert result == 0.0 + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestNanskewFixedValues: + # xref GH 11974 + # Test data + skewness value (computed with scipy.stats.skew) + @pytest.fixture + def samples(self): + return np.sin(np.linspace(0, 1, 200)) + + @pytest.fixture + def actual_skew(self): + return -0.1875895205961754 + + @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5]) + def test_constant_series(self, val): + # xref GH 11974 + data = val * np.ones(300) + skew = nanops.nanskew(data) + assert skew == 0.0 + + def test_all_finite(self): + alpha, beta = 0.3, 0.1 + left_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nanskew(left_tailed) < 0 + + alpha, beta = 0.1, 0.3 + right_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nanskew(right_tailed) > 0 + + def test_ground_truth(self, samples, actual_skew): + skew = nanops.nanskew(samples) + tm.assert_almost_equal(skew, actual_skew) + + def test_axis(self, samples, actual_skew): + samples = np.vstack([samples, np.nan * np.ones(len(samples))]) + skew = nanops.nanskew(samples, axis=1) + tm.assert_almost_equal(skew, np.array([actual_skew, np.nan])) + + def test_nans(self, samples): + samples = np.hstack([samples, np.nan]) + skew = nanops.nanskew(samples, skipna=False) + assert np.isnan(skew) + + def test_nans_skipna(self, samples, actual_skew): + samples = np.hstack([samples, np.nan]) + skew = nanops.nanskew(samples, skipna=True) + tm.assert_almost_equal(skew, actual_skew) + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestNankurtFixedValues: + # xref GH 11974 + # Test data + kurtosis value (computed with scipy.stats.kurtosis) + @pytest.fixture + def samples(self): + return np.sin(np.linspace(0, 1, 200)) + + @pytest.fixture + def actual_kurt(self): + return -1.2058303433799713 + + @pytest.mark.parametrize("val", [3075.2, 3075.3, 3075.5]) + def test_constant_series(self, val): + # xref GH 11974 + data = val * np.ones(300) + kurt = nanops.nankurt(data) + assert kurt == 0.0 + + def test_all_finite(self): + alpha, beta = 0.3, 0.1 + left_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nankurt(left_tailed) < 2 + + alpha, beta = 0.1, 0.3 + right_tailed = self.prng.beta(alpha, beta, size=100) + assert nanops.nankurt(right_tailed) < 0 + + def test_ground_truth(self, samples, actual_kurt): + kurt = nanops.nankurt(samples) + tm.assert_almost_equal(kurt, actual_kurt) + + def test_axis(self, samples, actual_kurt): + samples = np.vstack([samples, np.nan * np.ones(len(samples))]) + kurt = nanops.nankurt(samples, axis=1) + tm.assert_almost_equal(kurt, np.array([actual_kurt, np.nan])) + + def test_nans(self, samples): + samples = np.hstack([samples, np.nan]) + kurt = nanops.nankurt(samples, skipna=False) + assert np.isnan(kurt) + + def test_nans_skipna(self, samples, actual_kurt): + samples = np.hstack([samples, np.nan]) + kurt = nanops.nankurt(samples, skipna=True) + tm.assert_almost_equal(kurt, actual_kurt) + + @property + def prng(self): + return np.random.default_rng(2) + + +class TestDatetime64NaNOps: + @pytest.fixture(params=["s", "ms", "us", "ns"]) + def unit(self, request): + return request.param + + # Enabling mean changes the behavior of DataFrame.mean + # See https://github.com/pandas-dev/pandas/issues/24752 + def test_nanmean(self, unit): + dti = pd.date_range("2016-01-01", periods=3).as_unit(unit) + expected = dti[1] + + for obj in [dti, dti._data]: + result = nanops.nanmean(obj) + assert result == expected + + dti2 = dti.insert(1, pd.NaT) + + for obj in [dti2, dti2._data]: + result = nanops.nanmean(obj) + assert result == expected + + @pytest.mark.parametrize("constructor", ["M8", "m8"]) + def test_nanmean_skipna_false(self, constructor, unit): + dtype = f"{constructor}[{unit}]" + arr = np.arange(12).astype(np.int64).view(dtype).reshape(4, 3) + + arr[-1, -1] = "NaT" + + result = nanops.nanmean(arr, skipna=False) + assert np.isnat(result) + assert result.dtype == dtype + + result = nanops.nanmean(arr, axis=0, skipna=False) + expected = np.array([4, 5, "NaT"], dtype=arr.dtype) + tm.assert_numpy_array_equal(result, expected) + + result = nanops.nanmean(arr, axis=1, skipna=False) + expected = np.array([arr[0, 1], arr[1, 1], arr[2, 1], arr[-1, -1]]) + tm.assert_numpy_array_equal(result, expected) + + +def test_use_bottleneck(): + if nanops._BOTTLENECK_INSTALLED: + with pd.option_context("use_bottleneck", True): + assert pd.get_option("use_bottleneck") + + with pd.option_context("use_bottleneck", False): + assert not pd.get_option("use_bottleneck") + + +@pytest.mark.parametrize( + "numpy_op, expected", + [ + (np.sum, 10), + (np.nansum, 10), + (np.mean, 2.5), + (np.nanmean, 2.5), + (np.median, 2.5), + (np.nanmedian, 2.5), + (np.min, 1), + (np.max, 4), + (np.nanmin, 1), + (np.nanmax, 4), + ], +) +def test_numpy_ops(numpy_op, expected): + # GH8383 + result = numpy_op(Series([1, 2, 3, 4])) + assert result == expected + + +@pytest.mark.parametrize( + "operation", + [ + nanops.nanany, + nanops.nanall, + nanops.nansum, + nanops.nanmean, + nanops.nanmedian, + nanops.nanstd, + nanops.nanvar, + nanops.nansem, + nanops.nanargmax, + nanops.nanargmin, + nanops.nanmax, + nanops.nanmin, + nanops.nanskew, + nanops.nankurt, + nanops.nanprod, + ], +) +def test_nanops_independent_of_mask_param(operation): + # GH22764 + ser = Series([1, 2, np.nan, 3, np.nan, 4]) + mask = ser.isna() + median_expected = operation(ser._values) + median_result = operation(ser._values, mask=mask) + assert median_expected == median_result + + +@pytest.mark.parametrize("min_count", [-1, 0]) +def test_check_below_min_count_negative_or_zero_min_count(min_count): + # GH35227 + result = nanops.check_below_min_count((21, 37), None, min_count) + expected_result = False + assert result == expected_result + + +@pytest.mark.parametrize( + "mask", [None, np.array([False, False, True]), np.array([True] + 9 * [False])] +) +@pytest.mark.parametrize("min_count, expected_result", [(1, False), (101, True)]) +def test_check_below_min_count_positive_min_count(mask, min_count, expected_result): + # GH35227 + shape = (10, 10) + result = nanops.check_below_min_count(shape, mask, min_count) + assert result == expected_result + + +@td.skip_if_windows +@td.skip_if_32bit +@pytest.mark.parametrize("min_count, expected_result", [(1, False), (2812191852, True)]) +def test_check_below_min_count_large_shape(min_count, expected_result): + # GH35227 large shape used to show that the issue is fixed + shape = (2244367, 1253) + result = nanops.check_below_min_count(shape, mask=None, min_count=min_count) + assert result == expected_result + + +@pytest.mark.parametrize("func", ["nanmean", "nansum"]) +def test_check_bottleneck_disallow(any_real_numpy_dtype, func): + # GH 42878 bottleneck sometimes produces unreliable results for mean and sum + assert not nanops._bn_ok_dtype(np.dtype(any_real_numpy_dtype).type, func) + + +@pytest.mark.parametrize("val", [2**55, -(2**55), 20150515061816532]) +def test_nanmean_overflow(disable_bottleneck, val): + # GH 10155 + # In the previous implementation mean can overflow for int dtypes, it + # is now consistent with numpy + + ser = Series(val, index=range(500), dtype=np.int64) + result = ser.mean() + np_result = ser.values.mean() + assert result == val + assert result == np_result + assert result.dtype == np.float64 + + +@pytest.mark.parametrize( + "dtype", + [ + np.int16, + np.int32, + np.int64, + np.float32, + np.float64, + getattr(np, "float128", None), + ], +) +@pytest.mark.parametrize("method", ["mean", "std", "var", "skew", "kurt", "min", "max"]) +def test_returned_dtype(disable_bottleneck, dtype, method): + if dtype is None: + pytest.skip("np.float128 not available") + + ser = Series(range(10), dtype=dtype) + result = getattr(ser, method)() + if is_integer_dtype(dtype) and method not in ["min", "max"]: + assert result.dtype == np.float64 + else: + assert result.dtype == dtype diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py b/moondream/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py new file mode 100644 index 0000000000000000000000000000000000000000..52b5f636b1254ceddf869704a6378f4ea5012b8c --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/test_optional_dependency.py @@ -0,0 +1,100 @@ +import sys +import types + +import pytest + +from pandas.compat._optional import ( + VERSIONS, + import_optional_dependency, +) + +import pandas._testing as tm + + +def test_import_optional(): + match = "Missing .*notapackage.* pip .* conda .* notapackage" + with pytest.raises(ImportError, match=match) as exc_info: + import_optional_dependency("notapackage") + # The original exception should be there as context: + assert isinstance(exc_info.value.__context__, ImportError) + + result = import_optional_dependency("notapackage", errors="ignore") + assert result is None + + +def test_xlrd_version_fallback(): + pytest.importorskip("xlrd") + import_optional_dependency("xlrd") + + +def test_bad_version(monkeypatch): + name = "fakemodule" + module = types.ModuleType(name) + module.__version__ = "0.9.0" + sys.modules[name] = module + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'" + with pytest.raises(ImportError, match=match): + import_optional_dependency("fakemodule") + + # Test min_version parameter + result = import_optional_dependency("fakemodule", min_version="0.8") + assert result is module + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency("fakemodule", errors="warn") + assert result is None + + module.__version__ = "1.0.0" # exact match is OK + result = import_optional_dependency("fakemodule") + assert result is module + + with pytest.raises(ImportError, match="Pandas requires version '1.1.0'"): + import_optional_dependency("fakemodule", min_version="1.1.0") + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency( + "fakemodule", errors="warn", min_version="1.1.0" + ) + assert result is None + + result = import_optional_dependency( + "fakemodule", errors="ignore", min_version="1.1.0" + ) + assert result is None + + +def test_submodule(monkeypatch): + # Create a fake module with a submodule + name = "fakemodule" + module = types.ModuleType(name) + module.__version__ = "0.9.0" + sys.modules[name] = module + sub_name = "submodule" + submodule = types.ModuleType(sub_name) + setattr(module, sub_name, submodule) + sys.modules[f"{name}.{sub_name}"] = submodule + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + match = "Pandas requires .*1.0.0.* of .fakemodule.*'0.9.0'" + with pytest.raises(ImportError, match=match): + import_optional_dependency("fakemodule.submodule") + + with tm.assert_produces_warning(UserWarning): + result = import_optional_dependency("fakemodule.submodule", errors="warn") + assert result is None + + module.__version__ = "1.0.0" # exact match is OK + result = import_optional_dependency("fakemodule.submodule") + assert result is submodule + + +def test_no_version_raises(monkeypatch): + name = "fakemodule" + module = types.ModuleType(name) + sys.modules[name] = module + monkeypatch.setitem(VERSIONS, name, "1.0.0") + + with pytest.raises(ImportError, match="Can't determine .* fakemodule"): + import_optional_dependency(name) diff --git a/moondream/lib/python3.10/site-packages/pandas/tests/test_sorting.py b/moondream/lib/python3.10/site-packages/pandas/tests/test_sorting.py new file mode 100644 index 0000000000000000000000000000000000000000..285f240028152072ed52b3657113d30a7fd63fea --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/tests/test_sorting.py @@ -0,0 +1,487 @@ +from collections import defaultdict +from datetime import datetime +from itertools import product + +import numpy as np +import pytest + +from pandas import ( + NA, + DataFrame, + MultiIndex, + Series, + array, + concat, + merge, +) +import pandas._testing as tm +from pandas.core.algorithms import safe_sort +import pandas.core.common as com +from pandas.core.sorting import ( + _decons_group_index, + get_group_index, + is_int64_overflow_possible, + lexsort_indexer, + nargsort, +) + + +@pytest.fixture +def left_right(): + low, high, n = -1 << 10, 1 << 10, 1 << 20 + left = DataFrame( + np.random.default_rng(2).integers(low, high, (n, 7)), columns=list("ABCDEFG") + ) + left["left"] = left.sum(axis=1) + + # one-2-one match + i = np.random.default_rng(2).permutation(len(left)) + right = left.iloc[i].copy() + right.columns = right.columns[:-1].tolist() + ["right"] + right.index = np.arange(len(right)) + right["right"] *= -1 + return left, right + + +class TestSorting: + @pytest.mark.slow + def test_int64_overflow(self): + B = np.concatenate((np.arange(1000), np.arange(1000), np.arange(500))) + A = np.arange(2500) + df = DataFrame( + { + "A": A, + "B": B, + "C": A, + "D": B, + "E": A, + "F": B, + "G": A, + "H": B, + "values": np.random.default_rng(2).standard_normal(2500), + } + ) + + lg = df.groupby(["A", "B", "C", "D", "E", "F", "G", "H"]) + rg = df.groupby(["H", "G", "F", "E", "D", "C", "B", "A"]) + + left = lg.sum()["values"] + right = rg.sum()["values"] + + exp_index, _ = left.index.sortlevel() + tm.assert_index_equal(left.index, exp_index) + + exp_index, _ = right.index.sortlevel(0) + tm.assert_index_equal(right.index, exp_index) + + tups = list(map(tuple, df[["A", "B", "C", "D", "E", "F", "G", "H"]].values)) + tups = com.asarray_tuplesafe(tups) + + expected = df.groupby(tups).sum()["values"] + + for k, v in expected.items(): + assert left[k] == right[k[::-1]] + assert left[k] == v + assert len(left) == len(right) + + def test_int64_overflow_groupby_large_range(self): + # GH9096 + values = range(55109) + data = DataFrame.from_dict({"a": values, "b": values, "c": values, "d": values}) + grouped = data.groupby(["a", "b", "c", "d"]) + assert len(grouped) == len(values) + + @pytest.mark.parametrize("agg", ["mean", "median"]) + def test_int64_overflow_groupby_large_df_shuffled(self, agg): + rs = np.random.default_rng(2) + arr = rs.integers(-1 << 12, 1 << 12, (1 << 15, 5)) + i = rs.choice(len(arr), len(arr) * 4) + arr = np.vstack((arr, arr[i])) # add some duplicate rows + + i = rs.permutation(len(arr)) + arr = arr[i] # shuffle rows + + df = DataFrame(arr, columns=list("abcde")) + df["jim"], df["joe"] = np.zeros((2, len(df))) + gr = df.groupby(list("abcde")) + + # verify this is testing what it is supposed to test! + assert is_int64_overflow_possible(gr._grouper.shape) + + mi = MultiIndex.from_arrays( + [ar.ravel() for ar in np.array_split(np.unique(arr, axis=0), 5, axis=1)], + names=list("abcde"), + ) + + res = DataFrame( + np.zeros((len(mi), 2)), columns=["jim", "joe"], index=mi + ).sort_index() + + tm.assert_frame_equal(getattr(gr, agg)(), res) + + @pytest.mark.parametrize( + "order, na_position, exp", + [ + [ + True, + "last", + list(range(5, 105)) + list(range(5)) + list(range(105, 110)), + ], + [ + True, + "first", + list(range(5)) + list(range(105, 110)) + list(range(5, 105)), + ], + [ + False, + "last", + list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)), + ], + [ + False, + "first", + list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)), + ], + ], + ) + def test_lexsort_indexer(self, order, na_position, exp): + keys = [[np.nan] * 5 + list(range(100)) + [np.nan] * 5] + result = lexsort_indexer(keys, orders=order, na_position=na_position) + tm.assert_numpy_array_equal(result, np.array(exp, dtype=np.intp)) + + @pytest.mark.parametrize( + "ascending, na_position, exp", + [ + [ + True, + "last", + list(range(5, 105)) + list(range(5)) + list(range(105, 110)), + ], + [ + True, + "first", + list(range(5)) + list(range(105, 110)) + list(range(5, 105)), + ], + [ + False, + "last", + list(range(104, 4, -1)) + list(range(5)) + list(range(105, 110)), + ], + [ + False, + "first", + list(range(5)) + list(range(105, 110)) + list(range(104, 4, -1)), + ], + ], + ) + def test_nargsort(self, ascending, na_position, exp): + # list places NaNs last, np.array(..., dtype="O") may not place NaNs first + items = np.array([np.nan] * 5 + list(range(100)) + [np.nan] * 5, dtype="O") + + # mergesort is the most difficult to get right because we want it to be + # stable. + + # According to numpy/core/tests/test_multiarray, """The number of + # sorted items must be greater than ~50 to check the actual algorithm + # because quick and merge sort fall over to insertion sort for small + # arrays.""" + + result = nargsort( + items, kind="mergesort", ascending=ascending, na_position=na_position + ) + tm.assert_numpy_array_equal(result, np.array(exp), check_dtype=False) + + +class TestMerge: + def test_int64_overflow_outer_merge(self): + # #2690, combinatorial explosion + df1 = DataFrame( + np.random.default_rng(2).standard_normal((1000, 7)), + columns=list("ABCDEF") + ["G1"], + ) + df2 = DataFrame( + np.random.default_rng(3).standard_normal((1000, 7)), + columns=list("ABCDEF") + ["G2"], + ) + result = merge(df1, df2, how="outer") + assert len(result) == 2000 + + @pytest.mark.slow + def test_int64_overflow_check_sum_col(self, left_right): + left, right = left_right + + out = merge(left, right, how="outer") + assert len(out) == len(left) + tm.assert_series_equal(out["left"], -out["right"], check_names=False) + result = out.iloc[:, :-2].sum(axis=1) + tm.assert_series_equal(out["left"], result, check_names=False) + assert result.name is None + + @pytest.mark.slow + @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"]) + def test_int64_overflow_how_merge(self, left_right, how): + left, right = left_right + + out = merge(left, right, how="outer") + out.sort_values(out.columns.tolist(), inplace=True) + out.index = np.arange(len(out)) + tm.assert_frame_equal(out, merge(left, right, how=how, sort=True)) + + @pytest.mark.slow + def test_int64_overflow_sort_false_order(self, left_right): + left, right = left_right + + # check that left merge w/ sort=False maintains left frame order + out = merge(left, right, how="left", sort=False) + tm.assert_frame_equal(left, out[left.columns.tolist()]) + + out = merge(right, left, how="left", sort=False) + tm.assert_frame_equal(right, out[right.columns.tolist()]) + + @pytest.mark.slow + @pytest.mark.parametrize("how", ["left", "right", "outer", "inner"]) + @pytest.mark.parametrize("sort", [True, False]) + def test_int64_overflow_one_to_many_none_match(self, how, sort): + # one-2-many/none match + low, high, n = -1 << 10, 1 << 10, 1 << 11 + left = DataFrame( + np.random.default_rng(2).integers(low, high, (n, 7)).astype("int64"), + columns=list("ABCDEFG"), + ) + + # confirm that this is checking what it is supposed to check + shape = left.apply(Series.nunique).values + assert is_int64_overflow_possible(shape) + + # add duplicates to left frame + left = concat([left, left], ignore_index=True) + + right = DataFrame( + np.random.default_rng(3).integers(low, high, (n // 2, 7)).astype("int64"), + columns=list("ABCDEFG"), + ) + + # add duplicates & overlap with left to the right frame + i = np.random.default_rng(4).choice(len(left), n) + right = concat([right, right, left.iloc[i]], ignore_index=True) + + left["left"] = np.random.default_rng(2).standard_normal(len(left)) + right["right"] = np.random.default_rng(2).standard_normal(len(right)) + + # shuffle left & right frames + i = np.random.default_rng(5).permutation(len(left)) + left = left.iloc[i].copy() + left.index = np.arange(len(left)) + + i = np.random.default_rng(6).permutation(len(right)) + right = right.iloc[i].copy() + right.index = np.arange(len(right)) + + # manually compute outer merge + ldict, rdict = defaultdict(list), defaultdict(list) + + for idx, row in left.set_index(list("ABCDEFG")).iterrows(): + ldict[idx].append(row["left"]) + + for idx, row in right.set_index(list("ABCDEFG")).iterrows(): + rdict[idx].append(row["right"]) + + vals = [] + for k, lval in ldict.items(): + rval = rdict.get(k, [np.nan]) + for lv, rv in product(lval, rval): + vals.append( + k + + ( + lv, + rv, + ) + ) + + for k, rval in rdict.items(): + if k not in ldict: + vals.extend( + k + + ( + np.nan, + rv, + ) + for rv in rval + ) + + def align(df): + df = df.sort_values(df.columns.tolist()) + df.index = np.arange(len(df)) + return df + + out = DataFrame(vals, columns=list("ABCDEFG") + ["left", "right"]) + out = align(out) + + jmask = { + "left": out["left"].notna(), + "right": out["right"].notna(), + "inner": out["left"].notna() & out["right"].notna(), + "outer": np.ones(len(out), dtype="bool"), + } + + mask = jmask[how] + frame = align(out[mask].copy()) + assert mask.all() ^ mask.any() or how == "outer" + + res = merge(left, right, how=how, sort=sort) + if sort: + kcols = list("ABCDEFG") + tm.assert_frame_equal( + res[kcols].copy(), res[kcols].sort_values(kcols, kind="mergesort") + ) + + # as in GH9092 dtypes break with outer/right join + # 2021-12-18: dtype does not break anymore + tm.assert_frame_equal(frame, align(res)) + + +@pytest.mark.parametrize( + "codes_list, shape", + [ + [ + [ + np.tile([0, 1, 2, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([0, 2, 4, 3, 0, 1, 2, 3], 100).astype(np.int64), + np.tile([5, 1, 0, 2, 3, 0, 5, 4], 100).astype(np.int64), + ], + (4, 5, 6), + ], + [ + [ + np.tile(np.arange(10000, dtype=np.int64), 5), + np.tile(np.arange(10000, dtype=np.int64), 5), + ], + (10000, 10000), + ], + ], +) +def test_decons(codes_list, shape): + group_index = get_group_index(codes_list, shape, sort=True, xnull=True) + codes_list2 = _decons_group_index(group_index, shape) + + for a, b in zip(codes_list, codes_list2): + tm.assert_numpy_array_equal(a, b) + + +class TestSafeSort: + @pytest.mark.parametrize( + "arg, exp", + [ + [[3, 1, 2, 0, 4], [0, 1, 2, 3, 4]], + [ + np.array(list("baaacb"), dtype=object), + np.array(list("aaabbc"), dtype=object), + ], + [[], []], + ], + ) + def test_basic_sort(self, arg, exp): + result = safe_sort(np.array(arg)) + expected = np.array(exp) + tm.assert_numpy_array_equal(result, expected) + + @pytest.mark.parametrize("verify", [True, False]) + @pytest.mark.parametrize( + "codes, exp_codes", + [ + [[0, 1, 1, 2, 3, 0, -1, 4], [3, 1, 1, 2, 0, 3, -1, 4]], + [[], []], + ], + ) + def test_codes(self, verify, codes, exp_codes): + values = np.array([3, 1, 2, 0, 4]) + expected = np.array([0, 1, 2, 3, 4]) + + result, result_codes = safe_sort( + values, codes, use_na_sentinel=True, verify=verify + ) + expected_codes = np.array(exp_codes, dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_codes_out_of_bound(self): + values = np.array([3, 1, 2, 0, 4]) + expected = np.array([0, 1, 2, 3, 4]) + + # out of bound indices + codes = [0, 101, 102, 2, 3, 0, 99, 4] + result, result_codes = safe_sort(values, codes, use_na_sentinel=True) + expected_codes = np.array([3, -1, -1, 2, 0, 3, -1, 4], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_mixed_integer(self): + values = np.array(["b", 1, 0, "a", 0, "b"], dtype=object) + result = safe_sort(values) + expected = np.array([0, 0, 1, "a", "b", "b"], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + def test_mixed_integer_with_codes(self): + values = np.array(["b", 1, 0, "a"], dtype=object) + codes = [0, 1, 2, 3, 0, -1, 1] + result, result_codes = safe_sort(values, codes) + expected = np.array([0, 1, "a", "b"], dtype=object) + expected_codes = np.array([3, 1, 0, 2, 3, -1, 1], dtype=np.intp) + tm.assert_numpy_array_equal(result, expected) + tm.assert_numpy_array_equal(result_codes, expected_codes) + + def test_unsortable(self): + # GH 13714 + arr = np.array([1, 2, datetime.now(), 0, 3], dtype=object) + msg = "'[<>]' not supported between instances of .*" + with pytest.raises(TypeError, match=msg): + safe_sort(arr) + + @pytest.mark.parametrize( + "arg, codes, err, msg", + [ + [1, None, TypeError, "Only np.ndarray, ExtensionArray, and Index"], + [np.array([0, 1, 2]), 1, TypeError, "Only list-like objects or None"], + [np.array([0, 1, 2, 1]), [0, 1], ValueError, "values should be unique"], + ], + ) + def test_exceptions(self, arg, codes, err, msg): + with pytest.raises(err, match=msg): + safe_sort(values=arg, codes=codes) + + @pytest.mark.parametrize( + "arg, exp", [[[1, 3, 2], [1, 2, 3]], [[1, 3, np.nan, 2], [1, 2, 3, np.nan]]] + ) + def test_extension_array(self, arg, exp): + a = array(arg, dtype="Int64") + result = safe_sort(a) + expected = array(exp, dtype="Int64") + tm.assert_extension_array_equal(result, expected) + + @pytest.mark.parametrize("verify", [True, False]) + def test_extension_array_codes(self, verify): + a = array([1, 3, 2], dtype="Int64") + result, codes = safe_sort(a, [0, 1, -1, 2], use_na_sentinel=True, verify=verify) + expected_values = array([1, 2, 3], dtype="Int64") + expected_codes = np.array([0, 2, -1, 1], dtype=np.intp) + tm.assert_extension_array_equal(result, expected_values) + tm.assert_numpy_array_equal(codes, expected_codes) + + +def test_mixed_str_null(nulls_fixture): + values = np.array(["b", nulls_fixture, "a", "b"], dtype=object) + result = safe_sort(values) + expected = np.array(["a", "b", "b", nulls_fixture], dtype=object) + tm.assert_numpy_array_equal(result, expected) + + +def test_safe_sort_multiindex(): + # GH#48412 + arr1 = Series([2, 1, NA, NA], dtype="Int64") + arr2 = [2, 1, 3, 3] + midx = MultiIndex.from_arrays([arr1, arr2]) + result = safe_sort(midx) + expected = MultiIndex.from_arrays( + [Series([1, 2, NA, NA], dtype="Int64"), [1, 2, 3, 3]] + ) + tm.assert_index_equal(result, expected) diff --git a/moondream/lib/python3.10/site-packages/pandas/util/__init__.py b/moondream/lib/python3.10/site-packages/pandas/util/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..82b3aa56c653cd1241872c67e9d9016df04a6c5a --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/util/__init__.py @@ -0,0 +1,29 @@ +def __getattr__(key: str): + # These imports need to be lazy to avoid circular import errors + if key == "hash_array": + from pandas.core.util.hashing import hash_array + + return hash_array + if key == "hash_pandas_object": + from pandas.core.util.hashing import hash_pandas_object + + return hash_pandas_object + if key == "Appender": + from pandas.util._decorators import Appender + + return Appender + if key == "Substitution": + from pandas.util._decorators import Substitution + + return Substitution + + if key == "cache_readonly": + from pandas.util._decorators import cache_readonly + + return cache_readonly + + raise AttributeError(f"module 'pandas.util' has no attribute '{key}'") + + +def capitalize_first_letter(s): + return s[:1].upper() + s[1:] diff --git a/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_decorators.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18bf13cc868e46009b5a8382cbff6307cdebc679 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_decorators.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_doctools.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_doctools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfa5d9892f3bf099ed09608a1ad315e485cfed2a Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_doctools.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_exceptions.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_exceptions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b981816c3bd15320e04817527f5d4760ab4dc26a Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_exceptions.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_test_decorators.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_test_decorators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89770c5408b204e7708981919156ad4128d86fad Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_test_decorators.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_tester.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_tester.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f669893ad9648b967aabe0f980a1b63df098783d Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_tester.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_validators.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_validators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c811cdb911c09ab73bb40b3323a97465abd814f3 Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/util/__pycache__/_validators.cpython-310.pyc differ diff --git a/moondream/lib/python3.10/site-packages/pandas/util/_decorators.py b/moondream/lib/python3.10/site-packages/pandas/util/_decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..4e8189e72c427d75da0367532edfe04d0a7581e8 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/util/_decorators.py @@ -0,0 +1,508 @@ +from __future__ import annotations + +from functools import wraps +import inspect +from textwrap import dedent +from typing import ( + TYPE_CHECKING, + Any, + Callable, + cast, +) +import warnings + +from pandas._libs.properties import cache_readonly +from pandas._typing import ( + F, + T, +) +from pandas.util._exceptions import find_stack_level + +if TYPE_CHECKING: + from collections.abc import Mapping + + +def deprecate( + name: str, + alternative: Callable[..., Any], + version: str, + alt_name: str | None = None, + klass: type[Warning] | None = None, + stacklevel: int = 2, + msg: str | None = None, +) -> Callable[[F], F]: + """ + Return a new function that emits a deprecation warning on use. + + To use this method for a deprecated function, another function + `alternative` with the same signature must exist. The deprecated + function will emit a deprecation warning, and in the docstring + it will contain the deprecation directive with the provided version + so it can be detected for future removal. + + Parameters + ---------- + name : str + Name of function to deprecate. + alternative : func + Function to use instead. + version : str + Version of pandas in which the method has been deprecated. + alt_name : str, optional + Name to use in preference of alternative.__name__. + klass : Warning, default FutureWarning + stacklevel : int, default 2 + msg : str + The message to display in the warning. + Default is '{name} is deprecated. Use {alt_name} instead.' + """ + alt_name = alt_name or alternative.__name__ + klass = klass or FutureWarning + warning_msg = msg or f"{name} is deprecated, use {alt_name} instead." + + @wraps(alternative) + def wrapper(*args, **kwargs) -> Callable[..., Any]: + warnings.warn(warning_msg, klass, stacklevel=stacklevel) + return alternative(*args, **kwargs) + + # adding deprecated directive to the docstring + msg = msg or f"Use `{alt_name}` instead." + doc_error_msg = ( + "deprecate needs a correctly formatted docstring in " + "the target function (should have a one liner short " + "summary, and opening quotes should be in their own " + f"line). Found:\n{alternative.__doc__}" + ) + + # when python is running in optimized mode (i.e. `-OO`), docstrings are + # removed, so we check that a docstring with correct formatting is used + # but we allow empty docstrings + if alternative.__doc__: + if alternative.__doc__.count("\n") < 3: + raise AssertionError(doc_error_msg) + empty1, summary, empty2, doc_string = alternative.__doc__.split("\n", 3) + if empty1 or empty2 and not summary: + raise AssertionError(doc_error_msg) + wrapper.__doc__ = dedent( + f""" + {summary.strip()} + + .. deprecated:: {version} + {msg} + + {dedent(doc_string)}""" + ) + # error: Incompatible return value type (got "Callable[[VarArg(Any), KwArg(Any)], + # Callable[...,Any]]", expected "Callable[[F], F]") + return wrapper # type: ignore[return-value] + + +def deprecate_kwarg( + old_arg_name: str, + new_arg_name: str | None, + mapping: Mapping[Any, Any] | Callable[[Any], Any] | None = None, + stacklevel: int = 2, +) -> Callable[[F], F]: + """ + Decorator to deprecate a keyword argument of a function. + + Parameters + ---------- + old_arg_name : str + Name of argument in function to deprecate + new_arg_name : str or None + Name of preferred argument in function. Use None to raise warning that + ``old_arg_name`` keyword is deprecated. + mapping : dict or callable + If mapping is present, use it to translate old arguments to + new arguments. A callable must do its own value checking; + values not found in a dict will be forwarded unchanged. + + Examples + -------- + The following deprecates 'cols', using 'columns' instead + + >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name='columns') + ... def f(columns=''): + ... print(columns) + ... + >>> f(columns='should work ok') + should work ok + + >>> f(cols='should raise warning') # doctest: +SKIP + FutureWarning: cols is deprecated, use columns instead + warnings.warn(msg, FutureWarning) + should raise warning + + >>> f(cols='should error', columns="can\'t pass do both") # doctest: +SKIP + TypeError: Can only specify 'cols' or 'columns', not both + + >>> @deprecate_kwarg('old', 'new', {'yes': True, 'no': False}) + ... def f(new=False): + ... print('yes!' if new else 'no!') + ... + >>> f(old='yes') # doctest: +SKIP + FutureWarning: old='yes' is deprecated, use new=True instead + warnings.warn(msg, FutureWarning) + yes! + + To raise a warning that a keyword will be removed entirely in the future + + >>> @deprecate_kwarg(old_arg_name='cols', new_arg_name=None) + ... def f(cols='', another_param=''): + ... print(cols) + ... + >>> f(cols='should raise warning') # doctest: +SKIP + FutureWarning: the 'cols' keyword is deprecated and will be removed in a + future version please takes steps to stop use of 'cols' + should raise warning + >>> f(another_param='should not raise warning') # doctest: +SKIP + should not raise warning + + >>> f(cols='should raise warning', another_param='') # doctest: +SKIP + FutureWarning: the 'cols' keyword is deprecated and will be removed in a + future version please takes steps to stop use of 'cols' + should raise warning + """ + if mapping is not None and not hasattr(mapping, "get") and not callable(mapping): + raise TypeError( + "mapping from old to new argument values must be dict or callable!" + ) + + def _deprecate_kwarg(func: F) -> F: + @wraps(func) + def wrapper(*args, **kwargs) -> Callable[..., Any]: + old_arg_value = kwargs.pop(old_arg_name, None) + + if old_arg_value is not None: + if new_arg_name is None: + msg = ( + f"the {repr(old_arg_name)} keyword is deprecated and " + "will be removed in a future version. Please take " + f"steps to stop the use of {repr(old_arg_name)}" + ) + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) + kwargs[old_arg_name] = old_arg_value + return func(*args, **kwargs) + + elif mapping is not None: + if callable(mapping): + new_arg_value = mapping(old_arg_value) + else: + new_arg_value = mapping.get(old_arg_value, old_arg_value) + msg = ( + f"the {old_arg_name}={repr(old_arg_value)} keyword is " + "deprecated, use " + f"{new_arg_name}={repr(new_arg_value)} instead." + ) + else: + new_arg_value = old_arg_value + msg = ( + f"the {repr(old_arg_name)} keyword is deprecated, " + f"use {repr(new_arg_name)} instead." + ) + + warnings.warn(msg, FutureWarning, stacklevel=stacklevel) + if kwargs.get(new_arg_name) is not None: + msg = ( + f"Can only specify {repr(old_arg_name)} " + f"or {repr(new_arg_name)}, not both." + ) + raise TypeError(msg) + kwargs[new_arg_name] = new_arg_value + return func(*args, **kwargs) + + return cast(F, wrapper) + + return _deprecate_kwarg + + +def _format_argument_list(allow_args: list[str]) -> str: + """ + Convert the allow_args argument (either string or integer) of + `deprecate_nonkeyword_arguments` function to a string describing + it to be inserted into warning message. + + Parameters + ---------- + allowed_args : list, tuple or int + The `allowed_args` argument for `deprecate_nonkeyword_arguments`, + but None value is not allowed. + + Returns + ------- + str + The substring describing the argument list in best way to be + inserted to the warning message. + + Examples + -------- + `format_argument_list([])` -> '' + `format_argument_list(['a'])` -> "except for the arguments 'a'" + `format_argument_list(['a', 'b'])` -> "except for the arguments 'a' and 'b'" + `format_argument_list(['a', 'b', 'c'])` -> + "except for the arguments 'a', 'b' and 'c'" + """ + if "self" in allow_args: + allow_args.remove("self") + if not allow_args: + return "" + elif len(allow_args) == 1: + return f" except for the argument '{allow_args[0]}'" + else: + last = allow_args[-1] + args = ", ".join(["'" + x + "'" for x in allow_args[:-1]]) + return f" except for the arguments {args} and '{last}'" + + +def future_version_msg(version: str | None) -> str: + """Specify which version of pandas the deprecation will take place in.""" + if version is None: + return "In a future version of pandas" + else: + return f"Starting with pandas version {version}" + + +def deprecate_nonkeyword_arguments( + version: str | None, + allowed_args: list[str] | None = None, + name: str | None = None, +) -> Callable[[F], F]: + """ + Decorator to deprecate a use of non-keyword arguments of a function. + + Parameters + ---------- + version : str, optional + The version in which positional arguments will become + keyword-only. If None, then the warning message won't + specify any particular version. + + allowed_args : list, optional + In case of list, it must be the list of names of some + first arguments of the decorated functions that are + OK to be given as positional arguments. In case of None value, + defaults to list of all arguments not having the + default value. + + name : str, optional + The specific name of the function to show in the warning + message. If None, then the Qualified name of the function + is used. + """ + + def decorate(func): + old_sig = inspect.signature(func) + + if allowed_args is not None: + allow_args = allowed_args + else: + allow_args = [ + p.name + for p in old_sig.parameters.values() + if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) + and p.default is p.empty + ] + + new_params = [ + p.replace(kind=p.KEYWORD_ONLY) + if ( + p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD) + and p.name not in allow_args + ) + else p + for p in old_sig.parameters.values() + ] + new_params.sort(key=lambda p: p.kind) + new_sig = old_sig.replace(parameters=new_params) + + num_allow_args = len(allow_args) + msg = ( + f"{future_version_msg(version)} all arguments of " + f"{name or func.__qualname__}{{arguments}} will be keyword-only." + ) + + @wraps(func) + def wrapper(*args, **kwargs): + if len(args) > num_allow_args: + warnings.warn( + msg.format(arguments=_format_argument_list(allow_args)), + FutureWarning, + stacklevel=find_stack_level(), + ) + return func(*args, **kwargs) + + # error: "Callable[[VarArg(Any), KwArg(Any)], Any]" has no + # attribute "__signature__" + wrapper.__signature__ = new_sig # type: ignore[attr-defined] + return wrapper + + return decorate + + +def doc(*docstrings: None | str | Callable, **params) -> Callable[[F], F]: + """ + A decorator to take docstring templates, concatenate them and perform string + substitution on them. + + This decorator will add a variable "_docstring_components" to the wrapped + callable to keep track the original docstring template for potential usage. + If it should be consider as a template, it will be saved as a string. + Otherwise, it will be saved as callable, and later user __doc__ and dedent + to get docstring. + + Parameters + ---------- + *docstrings : None, str, or callable + The string / docstring / docstring template to be appended in order + after default docstring under callable. + **params + The string which would be used to format docstring template. + """ + + def decorator(decorated: F) -> F: + # collecting docstring and docstring templates + docstring_components: list[str | Callable] = [] + if decorated.__doc__: + docstring_components.append(dedent(decorated.__doc__)) + + for docstring in docstrings: + if docstring is None: + continue + if hasattr(docstring, "_docstring_components"): + docstring_components.extend( + docstring._docstring_components # pyright: ignore[reportGeneralTypeIssues] + ) + elif isinstance(docstring, str) or docstring.__doc__: + docstring_components.append(docstring) + + params_applied = [ + component.format(**params) + if isinstance(component, str) and len(params) > 0 + else component + for component in docstring_components + ] + + decorated.__doc__ = "".join( + [ + component + if isinstance(component, str) + else dedent(component.__doc__ or "") + for component in params_applied + ] + ) + + # error: "F" has no attribute "_docstring_components" + decorated._docstring_components = ( # type: ignore[attr-defined] + docstring_components + ) + return decorated + + return decorator + + +# Substitution and Appender are derived from matplotlib.docstring (1.1.0) +# module https://matplotlib.org/users/license.html + + +class Substitution: + """ + A decorator to take a function's docstring and perform string + substitution on it. + + This decorator should be robust even if func.__doc__ is None + (for example, if -OO was passed to the interpreter) + + Usage: construct a docstring.Substitution with a sequence or + dictionary suitable for performing substitution; then + decorate a suitable function with the constructed object. e.g. + + sub_author_name = Substitution(author='Jason') + + @sub_author_name + def some_function(x): + "%(author)s wrote this function" + + # note that some_function.__doc__ is now "Jason wrote this function" + + One can also use positional arguments. + + sub_first_last_names = Substitution('Edgar Allen', 'Poe') + + @sub_first_last_names + def some_function(x): + "%s %s wrote the Raven" + """ + + def __init__(self, *args, **kwargs) -> None: + if args and kwargs: + raise AssertionError("Only positional or keyword args are allowed") + + self.params = args or kwargs + + def __call__(self, func: F) -> F: + func.__doc__ = func.__doc__ and func.__doc__ % self.params + return func + + def update(self, *args, **kwargs) -> None: + """ + Update self.params with supplied args. + """ + if isinstance(self.params, dict): + self.params.update(*args, **kwargs) + + +class Appender: + """ + A function decorator that will append an addendum to the docstring + of the target function. + + This decorator should be robust even if func.__doc__ is None + (for example, if -OO was passed to the interpreter). + + Usage: construct a docstring.Appender with a string to be joined to + the original docstring. An optional 'join' parameter may be supplied + which will be used to join the docstring and addendum. e.g. + + add_copyright = Appender("Copyright (c) 2009", join='\n') + + @add_copyright + def my_dog(has='fleas'): + "This docstring will have a copyright below" + pass + """ + + addendum: str | None + + def __init__(self, addendum: str | None, join: str = "", indents: int = 0) -> None: + if indents > 0: + self.addendum = indent(addendum, indents=indents) + else: + self.addendum = addendum + self.join = join + + def __call__(self, func: T) -> T: + func.__doc__ = func.__doc__ if func.__doc__ else "" + self.addendum = self.addendum if self.addendum else "" + docitems = [func.__doc__, self.addendum] + func.__doc__ = dedent(self.join.join(docitems)) + return func + + +def indent(text: str | None, indents: int = 1) -> str: + if not text or not isinstance(text, str): + return "" + jointext = "".join(["\n"] + [" "] * indents) + return jointext.join(text.split("\n")) + + +__all__ = [ + "Appender", + "cache_readonly", + "deprecate", + "deprecate_kwarg", + "deprecate_nonkeyword_arguments", + "doc", + "future_version_msg", + "Substitution", +] diff --git a/moondream/lib/python3.10/site-packages/pandas/util/_doctools.py b/moondream/lib/python3.10/site-packages/pandas/util/_doctools.py new file mode 100644 index 0000000000000000000000000000000000000000..12619abf4baaf336dfd3d5ae78a9bc2133f310c0 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/util/_doctools.py @@ -0,0 +1,202 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +import pandas as pd + +if TYPE_CHECKING: + from collections.abc import Iterable + + +class TablePlotter: + """ + Layout some DataFrames in vertical/horizontal layout for explanation. + Used in merging.rst + """ + + def __init__( + self, + cell_width: float = 0.37, + cell_height: float = 0.25, + font_size: float = 7.5, + ) -> None: + self.cell_width = cell_width + self.cell_height = cell_height + self.font_size = font_size + + def _shape(self, df: pd.DataFrame) -> tuple[int, int]: + """ + Calculate table shape considering index levels. + """ + row, col = df.shape + return row + df.columns.nlevels, col + df.index.nlevels + + def _get_cells(self, left, right, vertical) -> tuple[int, int]: + """ + Calculate appropriate figure size based on left and right data. + """ + if vertical: + # calculate required number of cells + vcells = max(sum(self._shape(df)[0] for df in left), self._shape(right)[0]) + hcells = max(self._shape(df)[1] for df in left) + self._shape(right)[1] + else: + vcells = max([self._shape(df)[0] for df in left] + [self._shape(right)[0]]) + hcells = sum([self._shape(df)[1] for df in left] + [self._shape(right)[1]]) + return hcells, vcells + + def plot(self, left, right, labels: Iterable[str] = (), vertical: bool = True): + """ + Plot left / right DataFrames in specified layout. + + Parameters + ---------- + left : list of DataFrames before operation is applied + right : DataFrame of operation result + labels : list of str to be drawn as titles of left DataFrames + vertical : bool, default True + If True, use vertical layout. If False, use horizontal layout. + """ + from matplotlib import gridspec + import matplotlib.pyplot as plt + + if not isinstance(left, list): + left = [left] + left = [self._conv(df) for df in left] + right = self._conv(right) + + hcells, vcells = self._get_cells(left, right, vertical) + + if vertical: + figsize = self.cell_width * hcells, self.cell_height * vcells + else: + # include margin for titles + figsize = self.cell_width * hcells, self.cell_height * vcells + fig = plt.figure(figsize=figsize) + + if vertical: + gs = gridspec.GridSpec(len(left), hcells) + # left + max_left_cols = max(self._shape(df)[1] for df in left) + max_left_rows = max(self._shape(df)[0] for df in left) + for i, (_left, _label) in enumerate(zip(left, labels)): + ax = fig.add_subplot(gs[i, 0:max_left_cols]) + self._make_table(ax, _left, title=_label, height=1.0 / max_left_rows) + # right + ax = plt.subplot(gs[:, max_left_cols:]) + self._make_table(ax, right, title="Result", height=1.05 / vcells) + fig.subplots_adjust(top=0.9, bottom=0.05, left=0.05, right=0.95) + else: + max_rows = max(self._shape(df)[0] for df in left + [right]) + height = 1.0 / np.max(max_rows) + gs = gridspec.GridSpec(1, hcells) + # left + i = 0 + for df, _label in zip(left, labels): + sp = self._shape(df) + ax = fig.add_subplot(gs[0, i : i + sp[1]]) + self._make_table(ax, df, title=_label, height=height) + i += sp[1] + # right + ax = plt.subplot(gs[0, i:]) + self._make_table(ax, right, title="Result", height=height) + fig.subplots_adjust(top=0.85, bottom=0.05, left=0.05, right=0.95) + + return fig + + def _conv(self, data): + """ + Convert each input to appropriate for table outplot. + """ + if isinstance(data, pd.Series): + if data.name is None: + data = data.to_frame(name="") + else: + data = data.to_frame() + data = data.fillna("NaN") + return data + + def _insert_index(self, data): + # insert is destructive + data = data.copy() + idx_nlevels = data.index.nlevels + if idx_nlevels == 1: + data.insert(0, "Index", data.index) + else: + for i in range(idx_nlevels): + data.insert(i, f"Index{i}", data.index._get_level_values(i)) + + col_nlevels = data.columns.nlevels + if col_nlevels > 1: + col = data.columns._get_level_values(0) + values = [ + data.columns._get_level_values(i)._values for i in range(1, col_nlevels) + ] + col_df = pd.DataFrame(values) + data.columns = col_df.columns + data = pd.concat([col_df, data]) + data.columns = col + return data + + def _make_table(self, ax, df, title: str, height: float | None = None) -> None: + if df is None: + ax.set_visible(False) + return + + from pandas import plotting + + idx_nlevels = df.index.nlevels + col_nlevels = df.columns.nlevels + # must be convert here to get index levels for colorization + df = self._insert_index(df) + tb = plotting.table(ax, df, loc=9) + tb.set_fontsize(self.font_size) + + if height is None: + height = 1.0 / (len(df) + 1) + + props = tb.properties() + for (r, c), cell in props["celld"].items(): + if c == -1: + cell.set_visible(False) + elif r < col_nlevels and c < idx_nlevels: + cell.set_visible(False) + elif r < col_nlevels or c < idx_nlevels: + cell.set_facecolor("#AAAAAA") + cell.set_height(height) + + ax.set_title(title, size=self.font_size) + ax.axis("off") + + +def main() -> None: + import matplotlib.pyplot as plt + + p = TablePlotter() + + df1 = pd.DataFrame({"A": [10, 11, 12], "B": [20, 21, 22], "C": [30, 31, 32]}) + df2 = pd.DataFrame({"A": [10, 12], "C": [30, 32]}) + + p.plot([df1, df2], pd.concat([df1, df2]), labels=["df1", "df2"], vertical=True) + plt.show() + + df3 = pd.DataFrame({"X": [10, 12], "Z": [30, 32]}) + + p.plot( + [df1, df3], pd.concat([df1, df3], axis=1), labels=["df1", "df2"], vertical=False + ) + plt.show() + + idx = pd.MultiIndex.from_tuples( + [(1, "A"), (1, "B"), (1, "C"), (2, "A"), (2, "B"), (2, "C")] + ) + column = pd.MultiIndex.from_tuples([(1, "A"), (1, "B")]) + df3 = pd.DataFrame({"v1": [1, 2, 3, 4, 5, 6], "v2": [5, 6, 7, 8, 9, 10]}, index=idx) + df3.columns = column + p.plot(df3, df3, labels=["df3"]) + plt.show() + + +if __name__ == "__main__": + main() diff --git a/moondream/lib/python3.10/site-packages/pandas/util/_exceptions.py b/moondream/lib/python3.10/site-packages/pandas/util/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..5f50838d373154868ff7414775763a1c66853c65 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/util/_exceptions.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +import contextlib +import inspect +import os +import re +from typing import TYPE_CHECKING +import warnings + +if TYPE_CHECKING: + from collections.abc import Generator + from types import FrameType + + +@contextlib.contextmanager +def rewrite_exception(old_name: str, new_name: str) -> Generator[None, None, None]: + """ + Rewrite the message of an exception. + """ + try: + yield + except Exception as err: + if not err.args: + raise + msg = str(err.args[0]) + msg = msg.replace(old_name, new_name) + args: tuple[str, ...] = (msg,) + if len(err.args) > 1: + args = args + err.args[1:] + err.args = args + raise + + +def find_stack_level() -> int: + """ + Find the first place in the stack that is not inside pandas + (tests notwithstanding). + """ + + import pandas as pd + + pkg_dir = os.path.dirname(pd.__file__) + test_dir = os.path.join(pkg_dir, "tests") + + # https://stackoverflow.com/questions/17407119/python-inspect-stack-is-slow + frame: FrameType | None = inspect.currentframe() + try: + n = 0 + while frame: + filename = inspect.getfile(frame) + if filename.startswith(pkg_dir) and not filename.startswith(test_dir): + frame = frame.f_back + n += 1 + else: + break + finally: + # See note in + # https://docs.python.org/3/library/inspect.html#inspect.Traceback + del frame + return n + + +@contextlib.contextmanager +def rewrite_warning( + target_message: str, + target_category: type[Warning], + new_message: str, + new_category: type[Warning] | None = None, +) -> Generator[None, None, None]: + """ + Rewrite the message of a warning. + + Parameters + ---------- + target_message : str + Warning message to match. + target_category : Warning + Warning type to match. + new_message : str + New warning message to emit. + new_category : Warning or None, default None + New warning type to emit. When None, will be the same as target_category. + """ + if new_category is None: + new_category = target_category + with warnings.catch_warnings(record=True) as record: + yield + if len(record) > 0: + match = re.compile(target_message) + for warning in record: + if warning.category is target_category and re.search( + match, str(warning.message) + ): + category = new_category + message: Warning | str = new_message + else: + category, message = warning.category, warning.message + warnings.warn_explicit( + message=message, + category=category, + filename=warning.filename, + lineno=warning.lineno, + ) diff --git a/moondream/lib/python3.10/site-packages/pandas/util/_print_versions.py b/moondream/lib/python3.10/site-packages/pandas/util/_print_versions.py new file mode 100644 index 0000000000000000000000000000000000000000..4ede5627c28b9a3eaf97f09f6a28642523ce5833 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/util/_print_versions.py @@ -0,0 +1,158 @@ +from __future__ import annotations + +import codecs +import json +import locale +import os +import platform +import struct +import sys +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from pandas._typing import JSONSerializable + +from pandas.compat._optional import ( + VERSIONS, + get_version, + import_optional_dependency, +) + + +def _get_commit_hash() -> str | None: + """ + Use vendored versioneer code to get git hash, which handles + git worktree correctly. + """ + try: + from pandas._version_meson import ( # pyright: ignore [reportMissingImports] + __git_version__, + ) + + return __git_version__ + except ImportError: + from pandas._version import get_versions + + versions = get_versions() + return versions["full-revisionid"] + + +def _get_sys_info() -> dict[str, JSONSerializable]: + """ + Returns system information as a JSON serializable dictionary. + """ + uname_result = platform.uname() + language_code, encoding = locale.getlocale() + return { + "commit": _get_commit_hash(), + "python": platform.python_version(), + "python-bits": struct.calcsize("P") * 8, + "OS": uname_result.system, + "OS-release": uname_result.release, + "Version": uname_result.version, + "machine": uname_result.machine, + "processor": uname_result.processor, + "byteorder": sys.byteorder, + "LC_ALL": os.environ.get("LC_ALL"), + "LANG": os.environ.get("LANG"), + "LOCALE": {"language-code": language_code, "encoding": encoding}, + } + + +def _get_dependency_info() -> dict[str, JSONSerializable]: + """ + Returns dependency information as a JSON serializable dictionary. + """ + deps = [ + "pandas", + # required + "numpy", + "pytz", + "dateutil", + # install / build, + "pip", + "Cython", + # docs + "sphinx", + # Other, not imported. + "IPython", + ] + # Optional dependencies + deps.extend(list(VERSIONS)) + + result: dict[str, JSONSerializable] = {} + for modname in deps: + try: + mod = import_optional_dependency(modname, errors="ignore") + except Exception: + # Dependency conflicts may cause a non ImportError + result[modname] = "N/A" + else: + result[modname] = get_version(mod) if mod else None + return result + + +def show_versions(as_json: str | bool = False) -> None: + """ + Provide useful information, important for bug reports. + + It comprises info about hosting operation system, pandas version, + and versions of other installed relative packages. + + Parameters + ---------- + as_json : str or bool, default False + * If False, outputs info in a human readable form to the console. + * If str, it will be considered as a path to a file. + Info will be written to that file in JSON format. + * If True, outputs info in JSON format to the console. + + Examples + -------- + >>> pd.show_versions() # doctest: +SKIP + Your output may look something like this: + INSTALLED VERSIONS + ------------------ + commit : 37ea63d540fd27274cad6585082c91b1283f963d + python : 3.10.6.final.0 + python-bits : 64 + OS : Linux + OS-release : 5.10.102.1-microsoft-standard-WSL2 + Version : #1 SMP Wed Mar 2 00:30:59 UTC 2022 + machine : x86_64 + processor : x86_64 + byteorder : little + LC_ALL : None + LANG : en_GB.UTF-8 + LOCALE : en_GB.UTF-8 + pandas : 2.0.1 + numpy : 1.24.3 + ... + """ + sys_info = _get_sys_info() + deps = _get_dependency_info() + + if as_json: + j = {"system": sys_info, "dependencies": deps} + + if as_json is True: + sys.stdout.writelines(json.dumps(j, indent=2)) + else: + assert isinstance(as_json, str) # needed for mypy + with codecs.open(as_json, "wb", encoding="utf8") as f: + json.dump(j, f, indent=2) + + else: + assert isinstance(sys_info["LOCALE"], dict) # needed for mypy + language_code = sys_info["LOCALE"]["language-code"] + encoding = sys_info["LOCALE"]["encoding"] + sys_info["LOCALE"] = f"{language_code}.{encoding}" + + maxlen = max(len(x) for x in deps) + print("\nINSTALLED VERSIONS") + print("------------------") + for k, v in sys_info.items(): + print(f"{k:<{maxlen}}: {v}") + print("") + for k, v in deps.items(): + print(f"{k:<{maxlen}}: {v}") diff --git a/moondream/lib/python3.10/site-packages/pandas/util/_test_decorators.py b/moondream/lib/python3.10/site-packages/pandas/util/_test_decorators.py new file mode 100644 index 0000000000000000000000000000000000000000..2c1912bce856dd2694447d820ea2c5124be9c1a0 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/util/_test_decorators.py @@ -0,0 +1,173 @@ +""" +This module provides decorator functions which can be applied to test objects +in order to skip those objects when certain conditions occur. A sample use case +is to detect if the platform is missing ``matplotlib``. If so, any test objects +which require ``matplotlib`` and decorated with ``@td.skip_if_no("matplotlib")`` +will be skipped by ``pytest`` during the execution of the test suite. + +To illustrate, after importing this module: + +import pandas.util._test_decorators as td + +The decorators can be applied to classes: + +@td.skip_if_no("package") +class Foo: + ... + +Or individual functions: + +@td.skip_if_no("package") +def test_foo(): + ... + +For more information, refer to the ``pytest`` documentation on ``skipif``. +""" +from __future__ import annotations + +import locale +from typing import ( + TYPE_CHECKING, + Callable, +) + +import pytest + +from pandas._config import get_option + +if TYPE_CHECKING: + from pandas._typing import F + +from pandas._config.config import _get_option + +from pandas.compat import ( + IS64, + is_platform_windows, +) +from pandas.compat._optional import import_optional_dependency + + +def skip_if_installed(package: str) -> pytest.MarkDecorator: + """ + Skip a test if a package is installed. + + Parameters + ---------- + package : str + The name of the package. + + Returns + ------- + pytest.MarkDecorator + a pytest.mark.skipif to use as either a test decorator or a + parametrization mark. + """ + return pytest.mark.skipif( + bool(import_optional_dependency(package, errors="ignore")), + reason=f"Skipping because {package} is installed.", + ) + + +def skip_if_no(package: str, min_version: str | None = None) -> pytest.MarkDecorator: + """ + Generic function to help skip tests when required packages are not + present on the testing system. + + This function returns a pytest mark with a skip condition that will be + evaluated during test collection. An attempt will be made to import the + specified ``package`` and optionally ensure it meets the ``min_version`` + + The mark can be used as either a decorator for a test class or to be + applied to parameters in pytest.mark.parametrize calls or parametrized + fixtures. Use pytest.importorskip if an imported moduled is later needed + or for test functions. + + If the import and version check are unsuccessful, then the test function + (or test case when used in conjunction with parametrization) will be + skipped. + + Parameters + ---------- + package: str + The name of the required package. + min_version: str or None, default None + Optional minimum version of the package. + + Returns + ------- + pytest.MarkDecorator + a pytest.mark.skipif to use as either a test decorator or a + parametrization mark. + """ + msg = f"Could not import '{package}'" + if min_version: + msg += f" satisfying a min_version of {min_version}" + return pytest.mark.skipif( + not bool( + import_optional_dependency( + package, errors="ignore", min_version=min_version + ) + ), + reason=msg, + ) + + +skip_if_32bit = pytest.mark.skipif(not IS64, reason="skipping for 32 bit") +skip_if_windows = pytest.mark.skipif(is_platform_windows(), reason="Running on Windows") +skip_if_not_us_locale = pytest.mark.skipif( + locale.getlocale()[0] != "en_US", + reason=f"Set local {locale.getlocale()[0]} is not en_US", +) + + +def parametrize_fixture_doc(*args) -> Callable[[F], F]: + """ + Intended for use as a decorator for parametrized fixture, + this function will wrap the decorated function with a pytest + ``parametrize_fixture_doc`` mark. That mark will format + initial fixture docstring by replacing placeholders {0}, {1} etc + with parameters passed as arguments. + + Parameters + ---------- + args: iterable + Positional arguments for docstring. + + Returns + ------- + function + The decorated function wrapped within a pytest + ``parametrize_fixture_doc`` mark + """ + + def documented_fixture(fixture): + fixture.__doc__ = fixture.__doc__.format(*args) + return fixture + + return documented_fixture + + +def mark_array_manager_not_yet_implemented(request) -> None: + mark = pytest.mark.xfail(reason="Not yet implemented for ArrayManager") + request.applymarker(mark) + + +skip_array_manager_not_yet_implemented = pytest.mark.xfail( + _get_option("mode.data_manager", silent=True) == "array", + reason="Not yet implemented for ArrayManager", +) + +skip_array_manager_invalid_test = pytest.mark.skipif( + _get_option("mode.data_manager", silent=True) == "array", + reason="Test that relies on BlockManager internals or specific behaviour", +) + +skip_copy_on_write_not_yet_implemented = pytest.mark.xfail( + get_option("mode.copy_on_write") is True, + reason="Not yet implemented/adapted for Copy-on-Write mode", +) + +skip_copy_on_write_invalid_test = pytest.mark.skipif( + get_option("mode.copy_on_write") is True, + reason="Test not valid for Copy-on-Write mode", +) diff --git a/moondream/lib/python3.10/site-packages/pandas/util/_tester.py b/moondream/lib/python3.10/site-packages/pandas/util/_tester.py new file mode 100644 index 0000000000000000000000000000000000000000..7cfddef7ddff87275ebf31eb7ec10e65d26f8668 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/util/_tester.py @@ -0,0 +1,53 @@ +""" +Entrypoint for testing from the top-level namespace. +""" +from __future__ import annotations + +import os +import sys + +from pandas.compat._optional import import_optional_dependency + +PKG = os.path.dirname(os.path.dirname(__file__)) + + +def test(extra_args: list[str] | None = None, run_doctests: bool = False) -> None: + """ + Run the pandas test suite using pytest. + + By default, runs with the marks -m "not slow and not network and not db" + + Parameters + ---------- + extra_args : list[str], default None + Extra marks to run the tests. + run_doctests : bool, default False + Whether to only run the Python and Cython doctests. If you would like to run + both doctests/regular tests, just append "--doctest-modules"/"--doctest-cython" + to extra_args. + + Examples + -------- + >>> pd.test() # doctest: +SKIP + running: pytest... + """ + pytest = import_optional_dependency("pytest") + import_optional_dependency("hypothesis") + cmd = ["-m not slow and not network and not db"] + if extra_args: + if not isinstance(extra_args, list): + extra_args = [extra_args] + cmd = extra_args + if run_doctests: + cmd = [ + "--doctest-modules", + "--doctest-cython", + f"--ignore={os.path.join(PKG, 'tests')}", + ] + cmd += [PKG] + joined = " ".join(cmd) + print(f"running: pytest {joined}") + sys.exit(pytest.main(cmd)) + + +__all__ = ["test"] diff --git a/moondream/lib/python3.10/site-packages/pandas/util/_validators.py b/moondream/lib/python3.10/site-packages/pandas/util/_validators.py new file mode 100644 index 0000000000000000000000000000000000000000..cb0b4d549f49ea972d50c97986c60be64c021c3c --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/util/_validators.py @@ -0,0 +1,456 @@ +""" +Module that contains many useful utilities +for validating data or function arguments +""" +from __future__ import annotations + +from collections.abc import ( + Iterable, + Sequence, +) +from typing import ( + TypeVar, + overload, +) + +import numpy as np + +from pandas._libs import lib + +from pandas.core.dtypes.common import ( + is_bool, + is_integer, +) + +BoolishT = TypeVar("BoolishT", bool, int) +BoolishNoneT = TypeVar("BoolishNoneT", bool, int, None) + + +def _check_arg_length(fname, args, max_fname_arg_count, compat_args) -> None: + """ + Checks whether 'args' has length of at most 'compat_args'. Raises + a TypeError if that is not the case, similar to in Python when a + function is called with too many arguments. + """ + if max_fname_arg_count < 0: + raise ValueError("'max_fname_arg_count' must be non-negative") + + if len(args) > len(compat_args): + max_arg_count = len(compat_args) + max_fname_arg_count + actual_arg_count = len(args) + max_fname_arg_count + argument = "argument" if max_arg_count == 1 else "arguments" + + raise TypeError( + f"{fname}() takes at most {max_arg_count} {argument} " + f"({actual_arg_count} given)" + ) + + +def _check_for_default_values(fname, arg_val_dict, compat_args) -> None: + """ + Check that the keys in `arg_val_dict` are mapped to their + default values as specified in `compat_args`. + + Note that this function is to be called only when it has been + checked that arg_val_dict.keys() is a subset of compat_args + """ + for key in arg_val_dict: + # try checking equality directly with '=' operator, + # as comparison may have been overridden for the left + # hand object + try: + v1 = arg_val_dict[key] + v2 = compat_args[key] + + # check for None-ness otherwise we could end up + # comparing a numpy array vs None + if (v1 is not None and v2 is None) or (v1 is None and v2 is not None): + match = False + else: + match = v1 == v2 + + if not is_bool(match): + raise ValueError("'match' is not a boolean") + + # could not compare them directly, so try comparison + # using the 'is' operator + except ValueError: + match = arg_val_dict[key] is compat_args[key] + + if not match: + raise ValueError( + f"the '{key}' parameter is not supported in " + f"the pandas implementation of {fname}()" + ) + + +def validate_args(fname, args, max_fname_arg_count, compat_args) -> None: + """ + Checks whether the length of the `*args` argument passed into a function + has at most `len(compat_args)` arguments and whether or not all of these + elements in `args` are set to their default values. + + Parameters + ---------- + fname : str + The name of the function being passed the `*args` parameter + args : tuple + The `*args` parameter passed into a function + max_fname_arg_count : int + The maximum number of arguments that the function `fname` + can accept, excluding those in `args`. Used for displaying + appropriate error messages. Must be non-negative. + compat_args : dict + A dictionary of keys and their associated default values. + In order to accommodate buggy behaviour in some versions of `numpy`, + where a signature displayed keyword arguments but then passed those + arguments **positionally** internally when calling downstream + implementations, a dict ensures that the original + order of the keyword arguments is enforced. + + Raises + ------ + TypeError + If `args` contains more values than there are `compat_args` + ValueError + If `args` contains values that do not correspond to those + of the default values specified in `compat_args` + """ + _check_arg_length(fname, args, max_fname_arg_count, compat_args) + + # We do this so that we can provide a more informative + # error message about the parameters that we are not + # supporting in the pandas implementation of 'fname' + kwargs = dict(zip(compat_args, args)) + _check_for_default_values(fname, kwargs, compat_args) + + +def _check_for_invalid_keys(fname, kwargs, compat_args) -> None: + """ + Checks whether 'kwargs' contains any keys that are not + in 'compat_args' and raises a TypeError if there is one. + """ + # set(dict) --> set of the dictionary's keys + diff = set(kwargs) - set(compat_args) + + if diff: + bad_arg = next(iter(diff)) + raise TypeError(f"{fname}() got an unexpected keyword argument '{bad_arg}'") + + +def validate_kwargs(fname, kwargs, compat_args) -> None: + """ + Checks whether parameters passed to the **kwargs argument in a + function `fname` are valid parameters as specified in `*compat_args` + and whether or not they are set to their default values. + + Parameters + ---------- + fname : str + The name of the function being passed the `**kwargs` parameter + kwargs : dict + The `**kwargs` parameter passed into `fname` + compat_args: dict + A dictionary of keys that `kwargs` is allowed to have and their + associated default values + + Raises + ------ + TypeError if `kwargs` contains keys not in `compat_args` + ValueError if `kwargs` contains keys in `compat_args` that do not + map to the default values specified in `compat_args` + """ + kwds = kwargs.copy() + _check_for_invalid_keys(fname, kwargs, compat_args) + _check_for_default_values(fname, kwds, compat_args) + + +def validate_args_and_kwargs( + fname, args, kwargs, max_fname_arg_count, compat_args +) -> None: + """ + Checks whether parameters passed to the *args and **kwargs argument in a + function `fname` are valid parameters as specified in `*compat_args` + and whether or not they are set to their default values. + + Parameters + ---------- + fname: str + The name of the function being passed the `**kwargs` parameter + args: tuple + The `*args` parameter passed into a function + kwargs: dict + The `**kwargs` parameter passed into `fname` + max_fname_arg_count: int + The minimum number of arguments that the function `fname` + requires, excluding those in `args`. Used for displaying + appropriate error messages. Must be non-negative. + compat_args: dict + A dictionary of keys that `kwargs` is allowed to + have and their associated default values. + + Raises + ------ + TypeError if `args` contains more values than there are + `compat_args` OR `kwargs` contains keys not in `compat_args` + ValueError if `args` contains values not at the default value (`None`) + `kwargs` contains keys in `compat_args` that do not map to the default + value as specified in `compat_args` + + See Also + -------- + validate_args : Purely args validation. + validate_kwargs : Purely kwargs validation. + + """ + # Check that the total number of arguments passed in (i.e. + # args and kwargs) does not exceed the length of compat_args + _check_arg_length( + fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args + ) + + # Check there is no overlap with the positional and keyword + # arguments, similar to what is done in actual Python functions + args_dict = dict(zip(compat_args, args)) + + for key in args_dict: + if key in kwargs: + raise TypeError( + f"{fname}() got multiple values for keyword argument '{key}'" + ) + + kwargs.update(args_dict) + validate_kwargs(fname, kwargs, compat_args) + + +def validate_bool_kwarg( + value: BoolishNoneT, + arg_name: str, + none_allowed: bool = True, + int_allowed: bool = False, +) -> BoolishNoneT: + """ + Ensure that argument passed in arg_name can be interpreted as boolean. + + Parameters + ---------- + value : bool + Value to be validated. + arg_name : str + Name of the argument. To be reflected in the error message. + none_allowed : bool, default True + Whether to consider None to be a valid boolean. + int_allowed : bool, default False + Whether to consider integer value to be a valid boolean. + + Returns + ------- + value + The same value as input. + + Raises + ------ + ValueError + If the value is not a valid boolean. + """ + good_value = is_bool(value) + if none_allowed: + good_value = good_value or (value is None) + + if int_allowed: + good_value = good_value or isinstance(value, int) + + if not good_value: + raise ValueError( + f'For argument "{arg_name}" expected type bool, received ' + f"type {type(value).__name__}." + ) + return value # pyright: ignore[reportGeneralTypeIssues] + + +def validate_fillna_kwargs(value, method, validate_scalar_dict_value: bool = True): + """ + Validate the keyword arguments to 'fillna'. + + This checks that exactly one of 'value' and 'method' is specified. + If 'method' is specified, this validates that it's a valid method. + + Parameters + ---------- + value, method : object + The 'value' and 'method' keyword arguments for 'fillna'. + validate_scalar_dict_value : bool, default True + Whether to validate that 'value' is a scalar or dict. Specifically, + validate that it is not a list or tuple. + + Returns + ------- + value, method : object + """ + from pandas.core.missing import clean_fill_method + + if value is None and method is None: + raise ValueError("Must specify a fill 'value' or 'method'.") + if value is None and method is not None: + method = clean_fill_method(method) + + elif value is not None and method is None: + if validate_scalar_dict_value and isinstance(value, (list, tuple)): + raise TypeError( + '"value" parameter must be a scalar or dict, but ' + f'you passed a "{type(value).__name__}"' + ) + + elif value is not None and method is not None: + raise ValueError("Cannot specify both 'value' and 'method'.") + + return value, method + + +def validate_percentile(q: float | Iterable[float]) -> np.ndarray: + """ + Validate percentiles (used by describe and quantile). + + This function checks if the given float or iterable of floats is a valid percentile + otherwise raises a ValueError. + + Parameters + ---------- + q: float or iterable of floats + A single percentile or an iterable of percentiles. + + Returns + ------- + ndarray + An ndarray of the percentiles if valid. + + Raises + ------ + ValueError if percentiles are not in given interval([0, 1]). + """ + q_arr = np.asarray(q) + # Don't change this to an f-string. The string formatting + # is too expensive for cases where we don't need it. + msg = "percentiles should all be in the interval [0, 1]" + if q_arr.ndim == 0: + if not 0 <= q_arr <= 1: + raise ValueError(msg) + else: + if not all(0 <= qs <= 1 for qs in q_arr): + raise ValueError(msg) + return q_arr + + +@overload +def validate_ascending(ascending: BoolishT) -> BoolishT: + ... + + +@overload +def validate_ascending(ascending: Sequence[BoolishT]) -> list[BoolishT]: + ... + + +def validate_ascending( + ascending: bool | int | Sequence[BoolishT], +) -> bool | int | list[BoolishT]: + """Validate ``ascending`` kwargs for ``sort_index`` method.""" + kwargs = {"none_allowed": False, "int_allowed": True} + if not isinstance(ascending, Sequence): + return validate_bool_kwarg(ascending, "ascending", **kwargs) + + return [validate_bool_kwarg(item, "ascending", **kwargs) for item in ascending] + + +def validate_endpoints(closed: str | None) -> tuple[bool, bool]: + """ + Check that the `closed` argument is among [None, "left", "right"] + + Parameters + ---------- + closed : {None, "left", "right"} + + Returns + ------- + left_closed : bool + right_closed : bool + + Raises + ------ + ValueError : if argument is not among valid values + """ + left_closed = False + right_closed = False + + if closed is None: + left_closed = True + right_closed = True + elif closed == "left": + left_closed = True + elif closed == "right": + right_closed = True + else: + raise ValueError("Closed has to be either 'left', 'right' or None") + + return left_closed, right_closed + + +def validate_inclusive(inclusive: str | None) -> tuple[bool, bool]: + """ + Check that the `inclusive` argument is among {"both", "neither", "left", "right"}. + + Parameters + ---------- + inclusive : {"both", "neither", "left", "right"} + + Returns + ------- + left_right_inclusive : tuple[bool, bool] + + Raises + ------ + ValueError : if argument is not among valid values + """ + left_right_inclusive: tuple[bool, bool] | None = None + + if isinstance(inclusive, str): + left_right_inclusive = { + "both": (True, True), + "left": (True, False), + "right": (False, True), + "neither": (False, False), + }.get(inclusive) + + if left_right_inclusive is None: + raise ValueError( + "Inclusive has to be either 'both', 'neither', 'left' or 'right'" + ) + + return left_right_inclusive + + +def validate_insert_loc(loc: int, length: int) -> int: + """ + Check that we have an integer between -length and length, inclusive. + + Standardize negative loc to within [0, length]. + + The exceptions we raise on failure match np.insert. + """ + if not is_integer(loc): + raise TypeError(f"loc must be an integer between -{length} and {length}") + + if loc < 0: + loc += length + if not 0 <= loc <= length: + raise IndexError(f"loc must be an integer between -{length} and {length}") + return loc # pyright: ignore[reportGeneralTypeIssues] + + +def check_dtype_backend(dtype_backend) -> None: + if dtype_backend is not lib.no_default: + if dtype_backend not in ["numpy_nullable", "pyarrow"]: + raise ValueError( + f"dtype_backend {dtype_backend} is invalid, only 'numpy_nullable' and " + f"'pyarrow' are allowed.", + ) diff --git a/moondream/lib/python3.10/site-packages/pandas/util/version/__init__.py b/moondream/lib/python3.10/site-packages/pandas/util/version/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..3a5efbbb09c1e5085bf0564d819be345edd8c827 --- /dev/null +++ b/moondream/lib/python3.10/site-packages/pandas/util/version/__init__.py @@ -0,0 +1,579 @@ +# Vendored from https://github.com/pypa/packaging/blob/main/packaging/_structures.py +# and https://github.com/pypa/packaging/blob/main/packaging/_structures.py +# changeset ae891fd74d6dd4c6063bb04f2faeadaac6fc6313 +# 04/30/2021 + +# This file is dual licensed under the terms of the Apache License, Version +# 2.0, and the BSD License. Licence at LICENSES/PACKAGING_LICENSE +from __future__ import annotations + +import collections +from collections.abc import Iterator +import itertools +import re +from typing import ( + Callable, + SupportsInt, + Tuple, + Union, +) +import warnings + +__all__ = ["parse", "Version", "LegacyVersion", "InvalidVersion", "VERSION_PATTERN"] + + +class InfinityType: + def __repr__(self) -> str: + return "Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return False + + def __le__(self, other: object) -> bool: + return False + + def __eq__(self, other: object) -> bool: + return isinstance(other, type(self)) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, type(self)) + + def __gt__(self, other: object) -> bool: + return True + + def __ge__(self, other: object) -> bool: + return True + + def __neg__(self: object) -> NegativeInfinityType: + return NegativeInfinity + + +Infinity = InfinityType() + + +class NegativeInfinityType: + def __repr__(self) -> str: + return "-Infinity" + + def __hash__(self) -> int: + return hash(repr(self)) + + def __lt__(self, other: object) -> bool: + return True + + def __le__(self, other: object) -> bool: + return True + + def __eq__(self, other: object) -> bool: + return isinstance(other, type(self)) + + def __ne__(self, other: object) -> bool: + return not isinstance(other, type(self)) + + def __gt__(self, other: object) -> bool: + return False + + def __ge__(self, other: object) -> bool: + return False + + def __neg__(self: object) -> InfinityType: + return Infinity + + +NegativeInfinity = NegativeInfinityType() + + +InfiniteTypes = Union[InfinityType, NegativeInfinityType] +PrePostDevType = Union[InfiniteTypes, tuple[str, int]] +SubLocalType = Union[InfiniteTypes, int, str] +LocalType = Union[ + NegativeInfinityType, + tuple[ + Union[ + SubLocalType, + tuple[SubLocalType, str], + tuple[NegativeInfinityType, SubLocalType], + ], + ..., + ], +] +CmpKey = tuple[ + int, tuple[int, ...], PrePostDevType, PrePostDevType, PrePostDevType, LocalType +] +LegacyCmpKey = tuple[int, tuple[str, ...]] +VersionComparisonMethod = Callable[ + [Union[CmpKey, LegacyCmpKey], Union[CmpKey, LegacyCmpKey]], bool +] + +_Version = collections.namedtuple( + "_Version", ["epoch", "release", "dev", "pre", "post", "local"] +) + + +def parse(version: str) -> LegacyVersion | Version: + """ + Parse the given version string and return either a :class:`Version` object + or a :class:`LegacyVersion` object depending on if the given version is + a valid PEP 440 version or a legacy version. + """ + try: + return Version(version) + except InvalidVersion: + return LegacyVersion(version) + + +class InvalidVersion(ValueError): + """ + An invalid version was found, users should refer to PEP 440. + + Examples + -------- + >>> pd.util.version.Version('1.') + Traceback (most recent call last): + InvalidVersion: Invalid version: '1.' + """ + + +class _BaseVersion: + _key: CmpKey | LegacyCmpKey + + def __hash__(self) -> int: + return hash(self._key) + + # Please keep the duplicated `isinstance` check + # in the six comparisons hereunder + # unless you find a way to avoid adding overhead function calls. + def __lt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key < other._key + + def __le__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key <= other._key + + def __eq__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key == other._key + + def __ge__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key >= other._key + + def __gt__(self, other: _BaseVersion) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key > other._key + + def __ne__(self, other: object) -> bool: + if not isinstance(other, _BaseVersion): + return NotImplemented + + return self._key != other._key + + +class LegacyVersion(_BaseVersion): + def __init__(self, version: str) -> None: + self._version = str(version) + self._key = _legacy_cmpkey(self._version) + + warnings.warn( + "Creating a LegacyVersion has been deprecated and will be " + "removed in the next major release.", + DeprecationWarning, + ) + + def __str__(self) -> str: + return self._version + + def __repr__(self) -> str: + return f"" + + @property + def public(self) -> str: + return self._version + + @property + def base_version(self) -> str: + return self._version + + @property + def epoch(self) -> int: + return -1 + + @property + def release(self) -> None: + return None + + @property + def pre(self) -> None: + return None + + @property + def post(self) -> None: + return None + + @property + def dev(self) -> None: + return None + + @property + def local(self) -> None: + return None + + @property + def is_prerelease(self) -> bool: + return False + + @property + def is_postrelease(self) -> bool: + return False + + @property + def is_devrelease(self) -> bool: + return False + + +_legacy_version_component_re = re.compile(r"(\d+ | [a-z]+ | \.| -)", re.VERBOSE) + +_legacy_version_replacement_map = { + "pre": "c", + "preview": "c", + "-": "final-", + "rc": "c", + "dev": "@", +} + + +def _parse_version_parts(s: str) -> Iterator[str]: + for part in _legacy_version_component_re.split(s): + mapped_part = _legacy_version_replacement_map.get(part, part) + + if not mapped_part or mapped_part == ".": + continue + + if mapped_part[:1] in "0123456789": + # pad for numeric comparison + yield mapped_part.zfill(8) + else: + yield "*" + mapped_part + + # ensure that alpha/beta/candidate are before final + yield "*final" + + +def _legacy_cmpkey(version: str) -> LegacyCmpKey: + # We hardcode an epoch of -1 here. A PEP 440 version can only have a epoch + # greater than or equal to 0. This will effectively put the LegacyVersion, + # which uses the defacto standard originally implemented by setuptools, + # as before all PEP 440 versions. + epoch = -1 + + # This scheme is taken from pkg_resources.parse_version setuptools prior to + # it's adoption of the packaging library. + parts: list[str] = [] + for part in _parse_version_parts(version.lower()): + if part.startswith("*"): + # remove "-" before a prerelease tag + if part < "*final": + while parts and parts[-1] == "*final-": + parts.pop() + + # remove trailing zeros from each series of numeric parts + while parts and parts[-1] == "00000000": + parts.pop() + + parts.append(part) + + return epoch, tuple(parts) + + +# Deliberately not anchored to the start and end of the string, to make it +# easier for 3rd party code to reuse +VERSION_PATTERN = r""" + v? + (?: + (?:(?P[0-9]+)!)? # epoch + (?P[0-9]+(?:\.[0-9]+)*) # release segment + (?P
                                          # pre-release
+            [-_\.]?
+            (?P(a|b|c|rc|alpha|beta|pre|preview))
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+        (?P                                         # post release
+            (?:-(?P[0-9]+))
+            |
+            (?:
+                [-_\.]?
+                (?Ppost|rev|r)
+                [-_\.]?
+                (?P[0-9]+)?
+            )
+        )?
+        (?P                                          # dev release
+            [-_\.]?
+            (?Pdev)
+            [-_\.]?
+            (?P[0-9]+)?
+        )?
+    )
+    (?:\+(?P[a-z0-9]+(?:[-_\.][a-z0-9]+)*))?       # local version
+"""
+
+
+class Version(_BaseVersion):
+    _regex = re.compile(r"^\s*" + VERSION_PATTERN + r"\s*$", re.VERBOSE | re.IGNORECASE)
+
+    def __init__(self, version: str) -> None:
+        # Validate the version and parse it into pieces
+        match = self._regex.search(version)
+        if not match:
+            raise InvalidVersion(f"Invalid version: '{version}'")
+
+        # Store the parsed out pieces of the version
+        self._version = _Version(
+            epoch=int(match.group("epoch")) if match.group("epoch") else 0,
+            release=tuple(int(i) for i in match.group("release").split(".")),
+            pre=_parse_letter_version(match.group("pre_l"), match.group("pre_n")),
+            post=_parse_letter_version(
+                match.group("post_l"), match.group("post_n1") or match.group("post_n2")
+            ),
+            dev=_parse_letter_version(match.group("dev_l"), match.group("dev_n")),
+            local=_parse_local_version(match.group("local")),
+        )
+
+        # Generate a key which will be used for sorting
+        self._key = _cmpkey(
+            self._version.epoch,
+            self._version.release,
+            self._version.pre,
+            self._version.post,
+            self._version.dev,
+            self._version.local,
+        )
+
+    def __repr__(self) -> str:
+        return f""
+
+    def __str__(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join([str(x) for x in self.release]))
+
+        # Pre-release
+        if self.pre is not None:
+            parts.append("".join([str(x) for x in self.pre]))
+
+        # Post-release
+        if self.post is not None:
+            parts.append(f".post{self.post}")
+
+        # Development release
+        if self.dev is not None:
+            parts.append(f".dev{self.dev}")
+
+        # Local version segment
+        if self.local is not None:
+            parts.append(f"+{self.local}")
+
+        return "".join(parts)
+
+    @property
+    def epoch(self) -> int:
+        _epoch: int = self._version.epoch
+        return _epoch
+
+    @property
+    def release(self) -> tuple[int, ...]:
+        _release: tuple[int, ...] = self._version.release
+        return _release
+
+    @property
+    def pre(self) -> tuple[str, int] | None:
+        _pre: tuple[str, int] | None = self._version.pre
+        return _pre
+
+    @property
+    def post(self) -> int | None:
+        return self._version.post[1] if self._version.post else None
+
+    @property
+    def dev(self) -> int | None:
+        return self._version.dev[1] if self._version.dev else None
+
+    @property
+    def local(self) -> str | None:
+        if self._version.local:
+            return ".".join([str(x) for x in self._version.local])
+        else:
+            return None
+
+    @property
+    def public(self) -> str:
+        return str(self).split("+", 1)[0]
+
+    @property
+    def base_version(self) -> str:
+        parts = []
+
+        # Epoch
+        if self.epoch != 0:
+            parts.append(f"{self.epoch}!")
+
+        # Release segment
+        parts.append(".".join([str(x) for x in self.release]))
+
+        return "".join(parts)
+
+    @property
+    def is_prerelease(self) -> bool:
+        return self.dev is not None or self.pre is not None
+
+    @property
+    def is_postrelease(self) -> bool:
+        return self.post is not None
+
+    @property
+    def is_devrelease(self) -> bool:
+        return self.dev is not None
+
+    @property
+    def major(self) -> int:
+        return self.release[0] if len(self.release) >= 1 else 0
+
+    @property
+    def minor(self) -> int:
+        return self.release[1] if len(self.release) >= 2 else 0
+
+    @property
+    def micro(self) -> int:
+        return self.release[2] if len(self.release) >= 3 else 0
+
+
+def _parse_letter_version(
+    letter: str, number: str | bytes | SupportsInt
+) -> tuple[str, int] | None:
+    if letter:
+        # We consider there to be an implicit 0 in a pre-release if there is
+        # not a numeral associated with it.
+        if number is None:
+            number = 0
+
+        # We normalize any letters to their lower case form
+        letter = letter.lower()
+
+        # We consider some words to be alternate spellings of other words and
+        # in those cases we want to normalize the spellings to our preferred
+        # spelling.
+        if letter == "alpha":
+            letter = "a"
+        elif letter == "beta":
+            letter = "b"
+        elif letter in ["c", "pre", "preview"]:
+            letter = "rc"
+        elif letter in ["rev", "r"]:
+            letter = "post"
+
+        return letter, int(number)
+    if not letter and number:
+        # We assume if we are given a number, but we are not given a letter
+        # then this is using the implicit post release syntax (e.g. 1.0-1)
+        letter = "post"
+
+        return letter, int(number)
+
+    return None
+
+
+_local_version_separators = re.compile(r"[\._-]")
+
+
+def _parse_local_version(local: str) -> LocalType | None:
+    """
+    Takes a string like abc.1.twelve and turns it into ("abc", 1, "twelve").
+    """
+    if local is not None:
+        return tuple(
+            part.lower() if not part.isdigit() else int(part)
+            for part in _local_version_separators.split(local)
+        )
+    return None
+
+
+def _cmpkey(
+    epoch: int,
+    release: tuple[int, ...],
+    pre: tuple[str, int] | None,
+    post: tuple[str, int] | None,
+    dev: tuple[str, int] | None,
+    local: tuple[SubLocalType] | None,
+) -> CmpKey:
+    # When we compare a release version, we want to compare it with all of the
+    # trailing zeros removed. So we'll use a reverse the list, drop all the now
+    # leading zeros until we come to something non zero, then take the rest
+    # re-reverse it back into the correct order and make it a tuple and use
+    # that for our sorting key.
+    _release = tuple(
+        reversed(list(itertools.dropwhile(lambda x: x == 0, reversed(release))))
+    )
+
+    # We need to "trick" the sorting algorithm to put 1.0.dev0 before 1.0a0.
+    # We'll do this by abusing the pre segment, but we _only_ want to do this
+    # if there is not a pre or a post segment. If we have one of those then
+    # the normal sorting rules will handle this case correctly.
+    if pre is None and post is None and dev is not None:
+        _pre: PrePostDevType = NegativeInfinity
+    # Versions without a pre-release (except as noted above) should sort after
+    # those with one.
+    elif pre is None:
+        _pre = Infinity
+    else:
+        _pre = pre
+
+    # Versions without a post segment should sort before those with one.
+    if post is None:
+        _post: PrePostDevType = NegativeInfinity
+
+    else:
+        _post = post
+
+    # Versions without a development segment should sort after those with one.
+    if dev is None:
+        _dev: PrePostDevType = Infinity
+
+    else:
+        _dev = dev
+
+    if local is None:
+        # Versions without a local segment should sort before those with one.
+        _local: LocalType = NegativeInfinity
+    else:
+        # Versions with a local segment need that segment parsed to implement
+        # the sorting rules in PEP440.
+        # - Alpha numeric segments sort before numeric segments
+        # - Alpha numeric segments sort lexicographically
+        # - Numeric segments sort numerically
+        # - Shorter versions sort before longer versions when the prefixes
+        #   match exactly
+        _local = tuple(
+            (i, "") if isinstance(i, int) else (NegativeInfinity, i) for i in local
+        )
+
+    return epoch, _release, _pre, _post, _dev, _local
diff --git a/moondream/lib/python3.10/site-packages/pandas/util/version/__pycache__/__init__.cpython-310.pyc b/moondream/lib/python3.10/site-packages/pandas/util/version/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e893d43de1012580e7da0767bd3f1a17e314a100
Binary files /dev/null and b/moondream/lib/python3.10/site-packages/pandas/util/version/__pycache__/__init__.cpython-310.pyc differ