diff --git a/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/INSTALLER b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/LICENSE b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..38438c121a8731fcf91c7cb6cb268baccf24fc4c --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2014-2022 Matthew Brennan Jones + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/METADATA b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..3f2fd71bbe71d450630269664cdb269151ad9b1a --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/METADATA @@ -0,0 +1,27 @@ +Metadata-Version: 2.1 +Name: py-cpuinfo +Version: 9.0.0 +Summary: Get CPU info with pure Python +Home-page: https://github.com/workhorsy/py-cpuinfo +Author: Matthew Brennan Jones +Author-email: matthew.brennan.jones@gmail.com +License: MIT +Platform: UNKNOWN +Classifier: Development Status :: 5 - Production/Stable +Classifier: Topic :: Utilities +Classifier: License :: OSI Approved :: MIT License +Classifier: Programming Language :: Python :: 3 +License-File: LICENSE + +py-cpuinfo +========== + + +Py-cpuinfo gets CPU info with pure Python. Py-cpuinfo should work +without any extra programs or libraries, beyond what your OS provides. +It does not require any compilation(C/C++, assembly, et cetera) to use. +It works with Python 3. + +Documentation can be viewed here: https://github.com/workhorsy/py-cpuinfo + + diff --git a/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/RECORD b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..bdbdc50d7bc4439a1e19e4695373273ab181cd8e --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/RECORD @@ -0,0 +1,15 @@ +../../../bin/cpuinfo,sha256=0fOrVnZt9WoF2ubTTKZu5uP_4BTZTzW3UMEfI6lP8NQ,228 +cpuinfo/__init__.py,sha256=T6gndqGAggfJCu4_iOziTnomCN7KzaAK_OYTewE4FMA,44 +cpuinfo/__main__.py,sha256=nSxC6Hqhi-0lN7Z4WwtKdxQdf3cUJefb5hOahCzh4Yg,33 +cpuinfo/__pycache__/__init__.cpython-310.pyc,, +cpuinfo/__pycache__/__main__.cpython-310.pyc,, +cpuinfo/__pycache__/cpuinfo.cpython-310.pyc,, +cpuinfo/cpuinfo.py,sha256=HHyDlDUNovE3QzJ3hviiM1ngyOC4iD7i6oGiz2iTmVk,84388 +py_cpuinfo-9.0.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +py_cpuinfo-9.0.0.dist-info/LICENSE,sha256=3br3Y5a_XHqkWXWiHq_i4i7st9paoNt8sOYVL6r-800,1127 +py_cpuinfo-9.0.0.dist-info/METADATA,sha256=rRFelvhFdoYcXnXXYDAbgdIxQ8_iVUa5lUHgEmU3ncE,794 +py_cpuinfo-9.0.0.dist-info/RECORD,, +py_cpuinfo-9.0.0.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +py_cpuinfo-9.0.0.dist-info/WHEEL,sha256=G16H4A3IeoQmnOrYV4ueZGKSjhipXx8zc8nu9FGlvMA,92 +py_cpuinfo-9.0.0.dist-info/entry_points.txt,sha256=ZwrsclY_xUA0xJZK98bLxBdcowxnkK0ANYUT4FYcZJ8,42 +py_cpuinfo-9.0.0.dist-info/top_level.txt,sha256=XsjpunhkxD4hvznqQjrFNw0rtgizHEOGzewPZY3UEtU,8 diff --git a/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/REQUESTED b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/WHEEL b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..becc9a66ea739ba941d48a749e248761cc6e658a --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/WHEEL @@ -0,0 +1,5 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.37.1) +Root-Is-Purelib: true +Tag: py3-none-any + diff --git a/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/entry_points.txt b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..c10718f4d497f1e333eaec47651ab41f5d196efc --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/entry_points.txt @@ -0,0 +1,3 @@ +[console_scripts] +cpuinfo = cpuinfo:main + diff --git a/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/top_level.txt b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..b53b02d61061b32d70bf375f63e0e5d3ee8d4a1d --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/py_cpuinfo-9.0.0.dist-info/top_level.txt @@ -0,0 +1 @@ +cpuinfo diff --git a/evalkit_internvl/lib/python3.10/site-packages/pyasn1-0.6.1.dist-info/RECORD b/evalkit_internvl/lib/python3.10/site-packages/pyasn1-0.6.1.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..94ef838a34d249f0f0f44f9b5524abae1fc4b99b --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/pyasn1-0.6.1.dist-info/RECORD @@ -0,0 +1,72 @@ +pyasn1-0.6.1.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +pyasn1-0.6.1.dist-info/LICENSE.rst,sha256=Kq1fwA9wXEoa3bg-7RCmp10oajd58M-FGdh-YrxHNf0,1334 +pyasn1-0.6.1.dist-info/METADATA,sha256=8e1KBL3kvp1MlLUqCM1uOCMaBKxwlo4N0xHXk-_sd2Y,8383 +pyasn1-0.6.1.dist-info/RECORD,, +pyasn1-0.6.1.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +pyasn1-0.6.1.dist-info/WHEEL,sha256=cVxcB9AmuTcXqmwrtPhNK88dr7IR_b6qagTj0UvIEbY,91 +pyasn1-0.6.1.dist-info/top_level.txt,sha256=dnNEQt3nIDIO5mSCCOB5obQHrjDOUsRycdBujc2vrWE,7 +pyasn1-0.6.1.dist-info/zip-safe,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +pyasn1/__init__.py,sha256=tc4WulUv4ZkpkmVtee9-Fsgc6gi9jZFH1VIbAvSWj3s,66 +pyasn1/__pycache__/__init__.cpython-310.pyc,, +pyasn1/__pycache__/debug.cpython-310.pyc,, +pyasn1/__pycache__/error.cpython-310.pyc,, +pyasn1/codec/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59 +pyasn1/codec/__pycache__/__init__.cpython-310.pyc,, +pyasn1/codec/__pycache__/streaming.cpython-310.pyc,, +pyasn1/codec/ber/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59 +pyasn1/codec/ber/__pycache__/__init__.cpython-310.pyc,, +pyasn1/codec/ber/__pycache__/decoder.cpython-310.pyc,, +pyasn1/codec/ber/__pycache__/encoder.cpython-310.pyc,, +pyasn1/codec/ber/__pycache__/eoo.cpython-310.pyc,, +pyasn1/codec/ber/decoder.py,sha256=HZWc3M9406bhApuJF-TAYpRfLWvQT54CrREDqDMyU0Y,79192 +pyasn1/codec/ber/encoder.py,sha256=eO_--5b-0HXmPpIW2JhYlejU6V7FwdORmXFyCfKHyzI,29796 +pyasn1/codec/ber/eoo.py,sha256=dspLKc2xr_W5Tbcr2WcfLd_bJLhOjotq1YxKn3DCQNI,639 +pyasn1/codec/cer/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59 +pyasn1/codec/cer/__pycache__/__init__.cpython-310.pyc,, +pyasn1/codec/cer/__pycache__/decoder.cpython-310.pyc,, +pyasn1/codec/cer/__pycache__/encoder.cpython-310.pyc,, +pyasn1/codec/cer/decoder.py,sha256=S279_LRjwHyTUBuv4LPYOpib1X4hLmBh_3et49ocm4A,4589 +pyasn1/codec/cer/encoder.py,sha256=vsGrgOHJokTeZqBJwNGokejvqH5EfTvy8hExd_j5bbY,9838 +pyasn1/codec/der/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59 +pyasn1/codec/der/__pycache__/__init__.cpython-310.pyc,, +pyasn1/codec/der/__pycache__/decoder.cpython-310.pyc,, +pyasn1/codec/der/__pycache__/encoder.cpython-310.pyc,, +pyasn1/codec/der/decoder.py,sha256=GOpKZ1wFRYU0EEF3kSmIaMfe1h2w17VdGu57AHUqQFw,3428 +pyasn1/codec/der/encoder.py,sha256=ldxrpvXDFsxLxtvN7aiR61JNNtainNagZCSpsZM9DZs,3479 +pyasn1/codec/native/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59 +pyasn1/codec/native/__pycache__/__init__.cpython-310.pyc,, +pyasn1/codec/native/__pycache__/decoder.cpython-310.pyc,, +pyasn1/codec/native/__pycache__/encoder.cpython-310.pyc,, +pyasn1/codec/native/decoder.py,sha256=2vK9B0AJzLT2exSNtlCUlYzZvm0E7IzUU8Ygg_lLxNo,9118 +pyasn1/codec/native/encoder.py,sha256=C24L5FkwhXPSRytaLlcL0uuYDTC2BXD75ZwH_bCqKX8,9184 +pyasn1/codec/streaming.py,sha256=Vp-VDh0SlA5h7T133rne9UNlJlqv2ohpUzVlSCGjq24,6377 +pyasn1/compat/__init__.py,sha256=-9FOJV1STFBatf2pVRiOYn14GmCKC8RY3TYCxOqfRXY,112 +pyasn1/compat/__pycache__/__init__.cpython-310.pyc,, +pyasn1/compat/__pycache__/integer.cpython-310.pyc,, +pyasn1/compat/integer.py,sha256=lMXqbJBTyjg34Rhx6JlFcXyoQxDaeXGxhaIIab86hX8,404 +pyasn1/debug.py,sha256=u-WmIFfewqp0041ezvtTjvhZcU9K14OI6p00ArXZ63g,3494 +pyasn1/error.py,sha256=e352oqW33seeh2MbIF27sFSgpiegjstabCMFx2piR0M,3258 +pyasn1/type/__init__.py,sha256=EEDlJYS172EH39GUidN_8FbkNcWY9OVV8e30AV58pn0,59 +pyasn1/type/__pycache__/__init__.cpython-310.pyc,, +pyasn1/type/__pycache__/base.cpython-310.pyc,, +pyasn1/type/__pycache__/char.cpython-310.pyc,, +pyasn1/type/__pycache__/constraint.cpython-310.pyc,, +pyasn1/type/__pycache__/error.cpython-310.pyc,, +pyasn1/type/__pycache__/namedtype.cpython-310.pyc,, +pyasn1/type/__pycache__/namedval.cpython-310.pyc,, +pyasn1/type/__pycache__/opentype.cpython-310.pyc,, +pyasn1/type/__pycache__/tag.cpython-310.pyc,, +pyasn1/type/__pycache__/tagmap.cpython-310.pyc,, +pyasn1/type/__pycache__/univ.cpython-310.pyc,, +pyasn1/type/__pycache__/useful.cpython-310.pyc,, +pyasn1/type/base.py,sha256=tjBRvXIQSiHES5-e5rBbsnn5CtIvBgCuflujDbdrtkM,22050 +pyasn1/type/char.py,sha256=Rvj5ypQLPNXcdHkfUV8nul1XX66R_Akn0g2HUyLj1qY,9438 +pyasn1/type/constraint.py,sha256=jmrt5esLa095XdfS0beqaoRuUjnuHiTKdkTdCcKx1FI,21915 +pyasn1/type/error.py,sha256=2kwYYkbd2jXIVEE56ThLRmBEOGZfafwogEOo-9RV_GY,259 +pyasn1/type/namedtype.py,sha256=jnTClIUoRZi025GTY9GlMlMI-j5dqEcv_ilzZ7i0hUQ,16179 +pyasn1/type/namedval.py,sha256=84u6wKOfte7U47aWrFqIZRM3tO2ryivpsBqVblPezuc,4899 +pyasn1/type/opentype.py,sha256=jjqSbTgAaCxlSHSf66YcLbrxtfh_98nAx2v8wzW35MU,2861 +pyasn1/type/tag.py,sha256=hqIuspUhc5QwN182LeQMc23W_vFNTgASvnUUSX4SPHM,9497 +pyasn1/type/tagmap.py,sha256=alJ9ZfDGTAsPeygHT6yONTagUkCjlgij82YXpPaQ_-8,3000 +pyasn1/type/univ.py,sha256=Bnu2gHdA84UXMLtgb4LXbHI5TYw-kKljlsJ7dkJ8KfI,109212 +pyasn1/type/useful.py,sha256=-J7ej0hqdjF29h150dtNmIIcGcMBg_y-nKqcozvk-48,5284 diff --git a/evalkit_internvl/lib/python3.10/site-packages/pyasn1-0.6.1.dist-info/REQUESTED b/evalkit_internvl/lib/python3.10/site-packages/pyasn1-0.6.1.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__init__.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..865d72fb768b4cac4912efd3afc62af2c774dbab --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__init__.py @@ -0,0 +1,20 @@ +# ruff: noqa: F401 +import logging + +from .oauth1_auth import OAuth1 +from .oauth1_session import OAuth1Session +from .oauth2_auth import OAuth2 +from .oauth2_session import OAuth2Session, TokenUpdated + +__version__ = "2.0.0" + +import requests + +if requests.__version__ < "2.0.0": + msg = ( + "You are using requests version %s, which is older than " + "requests-oauthlib expects, please upgrade to 2.0.0 or later." + ) + raise Warning(msg % requests.__version__) + +logging.getLogger("requests_oauthlib").addHandler(logging.NullHandler()) diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3739b459fee34f145eacc37fa9db3a2bd89fed43 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth1_auth.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth1_auth.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb9b167943b045bde33067338a16f9c68e8bae41 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth1_auth.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth1_session.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth1_session.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a6d6b378cc7d3c46ccdf57c8c776c285e7cc72bf Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth1_session.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth2_auth.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth2_auth.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2517a0cb574732879b580bf6a63161565953d009 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth2_auth.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth2_session.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth2_session.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f639035cc296d86ee5bbdf41c5a3618e83dd37c Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/__pycache__/oauth2_session.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__init__.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8815ea0b8eec8630d6cf957f91796df695e365bf --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__init__.py @@ -0,0 +1,9 @@ +# ruff: noqa: F401 +from .facebook import facebook_compliance_fix +from .fitbit import fitbit_compliance_fix +from .slack import slack_compliance_fix +from .instagram import instagram_compliance_fix +from .mailchimp import mailchimp_compliance_fix +from .weibo import weibo_compliance_fix +from .plentymarkets import plentymarkets_compliance_fix +from .ebay import ebay_compliance_fix diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/douban.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/douban.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..112514585689b78c53399a188efd3cfe18e27f2b Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/douban.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/fitbit.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/fitbit.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ba6af04e992000e2480bc75bc9a5d9d1efa6821 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/fitbit.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/instagram.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/instagram.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a785ba52e1b4e3ba49c302ad41c2e63d0e1acd08 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/instagram.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/mailchimp.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/mailchimp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3f3ef7f3e9510aa7683890a79610f0fac3ac1186 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/mailchimp.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/plentymarkets.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/plentymarkets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f1548ef043ae1d9ce8e9b6a453024346a0d2987 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/plentymarkets.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/slack.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/slack.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..23cfa67751687c877ce2caf06e4603b253b9b158 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/__pycache__/slack.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/douban.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/douban.py new file mode 100644 index 0000000000000000000000000000000000000000..c8b99c721e9bb09e3a0127bb90e09f53814d714c --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/douban.py @@ -0,0 +1,15 @@ +import json + + +def douban_compliance_fix(session): + def fix_token_type(r): + token = json.loads(r.text) + token.setdefault("token_type", "Bearer") + fixed_token = json.dumps(token) + r._content = fixed_token.encode() + return r + + session._client_default_token_placement = "query" + session.register_compliance_hook("access_token_response", fix_token_type) + + return session diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/ebay.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/ebay.py new file mode 100644 index 0000000000000000000000000000000000000000..ef33f39101bd97ab2926b13289f8ff3886265fb1 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/ebay.py @@ -0,0 +1,22 @@ +import json + + +def ebay_compliance_fix(session): + def _compliance_fix(response): + token = json.loads(response.text) + + # eBay responds with non-compliant token types. + # https://developer.ebay.com/api-docs/static/oauth-client-credentials-grant.html + # https://developer.ebay.com/api-docs/static/oauth-auth-code-grant-request.html + # Modify these to be "Bearer". + if token.get("token_type") in ["Application Access Token", "User Access Token"]: + token["token_type"] = "Bearer" + fixed_token = json.dumps(token) + response._content = fixed_token.encode() + + return response + + session.register_compliance_hook("access_token_response", _compliance_fix) + session.register_compliance_hook("refresh_token_response", _compliance_fix) + + return session diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/facebook.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/facebook.py new file mode 100644 index 0000000000000000000000000000000000000000..f44558a83902a76022c38f2310a7eddb39cee150 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/facebook.py @@ -0,0 +1,27 @@ +from json import dumps +from urllib.parse import parse_qsl + + +def facebook_compliance_fix(session): + def _compliance_fix(r): + # if Facebook claims to be sending us json, let's trust them. + if "application/json" in r.headers.get("content-type", {}): + return r + + # Facebook returns a content-type of text/plain when sending their + # x-www-form-urlencoded responses, along with a 200. If not, let's + # assume we're getting JSON and bail on the fix. + if "text/plain" in r.headers.get("content-type", {}) and r.status_code == 200: + token = dict(parse_qsl(r.text, keep_blank_values=True)) + else: + return r + + expires = token.get("expires") + if expires is not None: + token["expires_in"] = expires + token["token_type"] = "Bearer" + r._content = dumps(token).encode() + return r + + session.register_compliance_hook("access_token_response", _compliance_fix) + return session diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/fitbit.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/fitbit.py new file mode 100644 index 0000000000000000000000000000000000000000..aacc68bfbb1b80be304d21a0f4d4a57cbb7db54b --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/fitbit.py @@ -0,0 +1,23 @@ +""" +The Fitbit API breaks from the OAuth2 RFC standard by returning an "errors" +object list, rather than a single "error" string. This puts hooks in place so +that oauthlib can process an error in the results from access token and refresh +token responses. This is necessary to prevent getting the generic red herring +MissingTokenError. +""" + +from json import loads, dumps + + +def fitbit_compliance_fix(session): + def _missing_error(r): + token = loads(r.text) + if "errors" in token: + # Set the error to the first one we have + token["error"] = token["errors"][0]["errorType"] + r._content = dumps(token).encode() + return r + + session.register_compliance_hook("access_token_response", _missing_error) + session.register_compliance_hook("refresh_token_response", _missing_error) + return session diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/instagram.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/instagram.py new file mode 100644 index 0000000000000000000000000000000000000000..7d5a2ad447eda3bbcefe65f417e1f9d82848c27e --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/instagram.py @@ -0,0 +1,23 @@ +from urllib.parse import urlparse, parse_qs + +from oauthlib.common import add_params_to_uri + + +def instagram_compliance_fix(session): + def _non_compliant_param_name(url, headers, data): + # If the user has already specified the token in the URL + # then there's nothing to do. + # If the specified token is different from ``session.access_token``, + # we assume the user intends to override the access token. + url_query = dict(parse_qs(urlparse(url).query)) + token = url_query.get("access_token") + if token: + # Nothing to do, just return. + return url, headers, data + + token = [("access_token", session.access_token)] + url = add_params_to_uri(url, token) + return url, headers, data + + session.register_compliance_hook("protected_request", _non_compliant_param_name) + return session diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/mailchimp.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/mailchimp.py new file mode 100644 index 0000000000000000000000000000000000000000..0d602659c646203a012f107e5651087762b93d37 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/mailchimp.py @@ -0,0 +1,21 @@ +import json + + +def mailchimp_compliance_fix(session): + def _null_scope(r): + token = json.loads(r.text) + if "scope" in token and token["scope"] is None: + token.pop("scope") + r._content = json.dumps(token).encode() + return r + + def _non_zero_expiration(r): + token = json.loads(r.text) + if "expires_in" in token and token["expires_in"] == 0: + token["expires_in"] = 3600 + r._content = json.dumps(token).encode() + return r + + session.register_compliance_hook("access_token_response", _null_scope) + session.register_compliance_hook("access_token_response", _non_zero_expiration) + return session diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/plentymarkets.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/plentymarkets.py new file mode 100644 index 0000000000000000000000000000000000000000..859f0566a5be32fb1a8d2f013c8205800bb84e23 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/plentymarkets.py @@ -0,0 +1,27 @@ +from json import dumps, loads +import re + + +def plentymarkets_compliance_fix(session): + def _to_snake_case(n): + return re.sub("(.)([A-Z][a-z]+)", r"\1_\2", n).lower() + + def _compliance_fix(r): + # Plenty returns the Token in CamelCase instead of _ + if ( + "application/json" in r.headers.get("content-type", {}) + and r.status_code == 200 + ): + token = loads(r.text) + else: + return r + + fixed_token = {} + for k, v in token.items(): + fixed_token[_to_snake_case(k)] = v + + r._content = dumps(fixed_token).encode() + return r + + session.register_compliance_hook("access_token_response", _compliance_fix) + return session diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/slack.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/slack.py new file mode 100644 index 0000000000000000000000000000000000000000..9095a470cd1ccb7783bd3129286310920c1e2b62 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/slack.py @@ -0,0 +1,34 @@ +from urllib.parse import urlparse, parse_qs + +from oauthlib.common import add_params_to_uri + + +def slack_compliance_fix(session): + def _non_compliant_param_name(url, headers, data): + # If the user has already specified the token, either in the URL + # or in a data dictionary, then there's nothing to do. + # If the specified token is different from ``session.access_token``, + # we assume the user intends to override the access token. + url_query = dict(parse_qs(urlparse(url).query)) + token = url_query.get("token") + if not token and isinstance(data, dict): + token = data.get("token") + + if token: + # Nothing to do, just return. + return url, headers, data + + if not data: + data = {"token": session.access_token} + elif isinstance(data, dict): + data["token"] = session.access_token + else: + # ``data`` is something other than a dict: maybe a stream, + # maybe a file object, maybe something else. We can't easily + # modify it, so we'll set the token by modifying the URL instead. + token = [("token", session.access_token)] + url = add_params_to_uri(url, token) + return url, headers, data + + session.register_compliance_hook("protected_request", _non_compliant_param_name) + return session diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/weibo.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/weibo.py new file mode 100644 index 0000000000000000000000000000000000000000..f1623fd6d768e5c5cdb91485f01a58c184040833 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/compliance_fixes/weibo.py @@ -0,0 +1,13 @@ +from json import loads, dumps + + +def weibo_compliance_fix(session): + def _missing_token_type(r): + token = loads(r.text) + token["token_type"] = "Bearer" + r._content = dumps(token).encode() + return r + + session._client.default_token_placement = "query" + session.register_compliance_hook("access_token_response", _missing_token_type) + return session diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth1_auth.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth1_auth.py new file mode 100644 index 0000000000000000000000000000000000000000..f8c0bd6e74e7caffb99874b62d363df92cd8f1a5 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth1_auth.py @@ -0,0 +1,112 @@ +# -*- coding: utf-8 -*- +import logging + +from oauthlib.common import extract_params +from oauthlib.oauth1 import Client, SIGNATURE_HMAC, SIGNATURE_TYPE_AUTH_HEADER +from oauthlib.oauth1 import SIGNATURE_TYPE_BODY +from requests.utils import to_native_string +from requests.auth import AuthBase + +CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded" +CONTENT_TYPE_MULTI_PART = "multipart/form-data" + + +log = logging.getLogger(__name__) + +# OBS!: Correct signing of requests are conditional on invoking OAuth1 +# as the last step of preparing a request, or at least having the +# content-type set properly. +class OAuth1(AuthBase): + """Signs the request using OAuth 1 (RFC5849)""" + + client_class = Client + + def __init__( + self, + client_key, + client_secret=None, + resource_owner_key=None, + resource_owner_secret=None, + callback_uri=None, + signature_method=SIGNATURE_HMAC, + signature_type=SIGNATURE_TYPE_AUTH_HEADER, + rsa_key=None, + verifier=None, + decoding="utf-8", + client_class=None, + force_include_body=False, + **kwargs + ): + + try: + signature_type = signature_type.upper() + except AttributeError: + pass + + client_class = client_class or self.client_class + + self.force_include_body = force_include_body + + self.client = client_class( + client_key, + client_secret, + resource_owner_key, + resource_owner_secret, + callback_uri, + signature_method, + signature_type, + rsa_key, + verifier, + decoding=decoding, + **kwargs + ) + + def __call__(self, r): + """Add OAuth parameters to the request. + + Parameters may be included from the body if the content-type is + urlencoded, if no content type is set a guess is made. + """ + # Overwriting url is safe here as request will not modify it past + # this point. + log.debug("Signing request %s using client %s", r, self.client) + + content_type = r.headers.get("Content-Type", "") + if ( + not content_type + and extract_params(r.body) + or self.client.signature_type == SIGNATURE_TYPE_BODY + ): + content_type = CONTENT_TYPE_FORM_URLENCODED + if not isinstance(content_type, str): + content_type = content_type.decode("utf-8") + + is_form_encoded = CONTENT_TYPE_FORM_URLENCODED in content_type + + log.debug( + "Including body in call to sign: %s", + is_form_encoded or self.force_include_body, + ) + + if is_form_encoded: + r.headers["Content-Type"] = CONTENT_TYPE_FORM_URLENCODED + r.url, headers, r.body = self.client.sign( + str(r.url), str(r.method), r.body or "", r.headers + ) + elif self.force_include_body: + # To allow custom clients to work on non form encoded bodies. + r.url, headers, r.body = self.client.sign( + str(r.url), str(r.method), r.body or "", r.headers + ) + else: + # Omit body data in the signing of non form-encoded requests + r.url, headers, _ = self.client.sign( + str(r.url), str(r.method), None, r.headers + ) + + r.prepare_headers(headers) + r.url = to_native_string(r.url) + log.debug("Updated url: %s", r.url) + log.debug("Updated headers: %s", headers) + log.debug("Updated body: %r", r.body) + return r diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth1_session.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth1_session.py new file mode 100644 index 0000000000000000000000000000000000000000..7625c8084eecd402705e4bb76ea0604dcaa20560 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth1_session.py @@ -0,0 +1,395 @@ +from urllib.parse import urlparse + +import logging + +from oauthlib.common import add_params_to_uri +from oauthlib.common import urldecode as _urldecode +from oauthlib.oauth1 import SIGNATURE_HMAC, SIGNATURE_RSA, SIGNATURE_TYPE_AUTH_HEADER +import requests + +from . import OAuth1 + + +log = logging.getLogger(__name__) + + +def urldecode(body): + """Parse query or json to python dictionary""" + try: + return _urldecode(body) + except Exception: + import json + + return json.loads(body) + + +class TokenRequestDenied(ValueError): + def __init__(self, message, response): + super(TokenRequestDenied, self).__init__(message) + self.response = response + + @property + def status_code(self): + """For backwards-compatibility purposes""" + return self.response.status_code + + +class TokenMissing(ValueError): + def __init__(self, message, response): + super(TokenMissing, self).__init__(message) + self.response = response + + +class VerifierMissing(ValueError): + pass + + +class OAuth1Session(requests.Session): + """Request signing and convenience methods for the oauth dance. + + What is the difference between OAuth1Session and OAuth1? + + OAuth1Session actually uses OAuth1 internally and its purpose is to assist + in the OAuth workflow through convenience methods to prepare authorization + URLs and parse the various token and redirection responses. It also provide + rudimentary validation of responses. + + An example of the OAuth workflow using a basic CLI app and Twitter. + + >>> # Credentials obtained during the registration. + >>> client_key = 'client key' + >>> client_secret = 'secret' + >>> callback_uri = 'https://127.0.0.1/callback' + >>> + >>> # Endpoints found in the OAuth provider API documentation + >>> request_token_url = 'https://api.twitter.com/oauth/request_token' + >>> authorization_url = 'https://api.twitter.com/oauth/authorize' + >>> access_token_url = 'https://api.twitter.com/oauth/access_token' + >>> + >>> oauth_session = OAuth1Session(client_key,client_secret=client_secret, callback_uri=callback_uri) + >>> + >>> # First step, fetch the request token. + >>> oauth_session.fetch_request_token(request_token_url) + { + 'oauth_token': 'kjerht2309u', + 'oauth_token_secret': 'lsdajfh923874', + } + >>> + >>> # Second step. Follow this link and authorize + >>> oauth_session.authorization_url(authorization_url) + 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback' + >>> + >>> # Third step. Fetch the access token + >>> redirect_response = input('Paste the full redirect URL here.') + >>> oauth_session.parse_authorization_response(redirect_response) + { + 'oauth_token: 'kjerht2309u', + 'oauth_token_secret: 'lsdajfh923874', + 'oauth_verifier: 'w34o8967345', + } + >>> oauth_session.fetch_access_token(access_token_url) + { + 'oauth_token': 'sdf0o9823sjdfsdf', + 'oauth_token_secret': '2kjshdfp92i34asdasd', + } + >>> # Done. You can now make OAuth requests. + >>> status_url = 'http://api.twitter.com/1/statuses/update.json' + >>> new_status = {'status': 'hello world!'} + >>> oauth_session.post(status_url, data=new_status) + + """ + + def __init__( + self, + client_key, + client_secret=None, + resource_owner_key=None, + resource_owner_secret=None, + callback_uri=None, + signature_method=SIGNATURE_HMAC, + signature_type=SIGNATURE_TYPE_AUTH_HEADER, + rsa_key=None, + verifier=None, + client_class=None, + force_include_body=False, + **kwargs + ): + """Construct the OAuth 1 session. + + :param client_key: A client specific identifier. + :param client_secret: A client specific secret used to create HMAC and + plaintext signatures. + :param resource_owner_key: A resource owner key, also referred to as + request token or access token depending on + when in the workflow it is used. + :param resource_owner_secret: A resource owner secret obtained with + either a request or access token. Often + referred to as token secret. + :param callback_uri: The URL the user is redirect back to after + authorization. + :param signature_method: Signature methods determine how the OAuth + signature is created. The three options are + oauthlib.oauth1.SIGNATURE_HMAC (default), + oauthlib.oauth1.SIGNATURE_RSA and + oauthlib.oauth1.SIGNATURE_PLAIN. + :param signature_type: Signature type decides where the OAuth + parameters are added. Either in the + Authorization header (default) or to the URL + query parameters or the request body. Defined as + oauthlib.oauth1.SIGNATURE_TYPE_AUTH_HEADER, + oauthlib.oauth1.SIGNATURE_TYPE_QUERY and + oauthlib.oauth1.SIGNATURE_TYPE_BODY + respectively. + :param rsa_key: The private RSA key as a string. Can only be used with + signature_method=oauthlib.oauth1.SIGNATURE_RSA. + :param verifier: A verifier string to prove authorization was granted. + :param client_class: A subclass of `oauthlib.oauth1.Client` to use with + `requests_oauthlib.OAuth1` instead of the default + :param force_include_body: Always include the request body in the + signature creation. + :param **kwargs: Additional keyword arguments passed to `OAuth1` + """ + super(OAuth1Session, self).__init__() + self._client = OAuth1( + client_key, + client_secret=client_secret, + resource_owner_key=resource_owner_key, + resource_owner_secret=resource_owner_secret, + callback_uri=callback_uri, + signature_method=signature_method, + signature_type=signature_type, + rsa_key=rsa_key, + verifier=verifier, + client_class=client_class, + force_include_body=force_include_body, + **kwargs + ) + self.auth = self._client + + @property + def token(self): + oauth_token = self._client.client.resource_owner_key + oauth_token_secret = self._client.client.resource_owner_secret + oauth_verifier = self._client.client.verifier + + token_dict = {} + if oauth_token: + token_dict["oauth_token"] = oauth_token + if oauth_token_secret: + token_dict["oauth_token_secret"] = oauth_token_secret + if oauth_verifier: + token_dict["oauth_verifier"] = oauth_verifier + + return token_dict + + @token.setter + def token(self, value): + self._populate_attributes(value) + + @property + def authorized(self): + """Boolean that indicates whether this session has an OAuth token + or not. If `self.authorized` is True, you can reasonably expect + OAuth-protected requests to the resource to succeed. If + `self.authorized` is False, you need the user to go through the OAuth + authentication dance before OAuth-protected requests to the resource + will succeed. + """ + if self._client.client.signature_method == SIGNATURE_RSA: + # RSA only uses resource_owner_key + return bool(self._client.client.resource_owner_key) + else: + # other methods of authentication use all three pieces + return ( + bool(self._client.client.client_secret) + and bool(self._client.client.resource_owner_key) + and bool(self._client.client.resource_owner_secret) + ) + + def authorization_url(self, url, request_token=None, **kwargs): + """Create an authorization URL by appending request_token and optional + kwargs to url. + + This is the second step in the OAuth 1 workflow. The user should be + redirected to this authorization URL, grant access to you, and then + be redirected back to you. The redirection back can either be specified + during client registration or by supplying a callback URI per request. + + :param url: The authorization endpoint URL. + :param request_token: The previously obtained request token. + :param kwargs: Optional parameters to append to the URL. + :returns: The authorization URL with new parameters embedded. + + An example using a registered default callback URI. + + >>> request_token_url = 'https://api.twitter.com/oauth/request_token' + >>> authorization_url = 'https://api.twitter.com/oauth/authorize' + >>> oauth_session = OAuth1Session('client-key', client_secret='secret') + >>> oauth_session.fetch_request_token(request_token_url) + { + 'oauth_token': 'sdf0o9823sjdfsdf', + 'oauth_token_secret': '2kjshdfp92i34asdasd', + } + >>> oauth_session.authorization_url(authorization_url) + 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf' + >>> oauth_session.authorization_url(authorization_url, foo='bar') + 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&foo=bar' + + An example using an explicit callback URI. + + >>> request_token_url = 'https://api.twitter.com/oauth/request_token' + >>> authorization_url = 'https://api.twitter.com/oauth/authorize' + >>> oauth_session = OAuth1Session('client-key', client_secret='secret', callback_uri='https://127.0.0.1/callback') + >>> oauth_session.fetch_request_token(request_token_url) + { + 'oauth_token': 'sdf0o9823sjdfsdf', + 'oauth_token_secret': '2kjshdfp92i34asdasd', + } + >>> oauth_session.authorization_url(authorization_url) + 'https://api.twitter.com/oauth/authorize?oauth_token=sdf0o9823sjdfsdf&oauth_callback=https%3A%2F%2F127.0.0.1%2Fcallback' + """ + kwargs["oauth_token"] = request_token or self._client.client.resource_owner_key + log.debug("Adding parameters %s to url %s", kwargs, url) + return add_params_to_uri(url, kwargs.items()) + + def fetch_request_token(self, url, realm=None, **request_kwargs): + """Fetch a request token. + + This is the first step in the OAuth 1 workflow. A request token is + obtained by making a signed post request to url. The token is then + parsed from the application/x-www-form-urlencoded response and ready + to be used to construct an authorization url. + + :param url: The request token endpoint URL. + :param realm: A list of realms to request access to. + :param request_kwargs: Optional arguments passed to ''post'' + function in ''requests.Session'' + :returns: The response in dict format. + + Note that a previously set callback_uri will be reset for your + convenience, or else signature creation will be incorrect on + consecutive requests. + + >>> request_token_url = 'https://api.twitter.com/oauth/request_token' + >>> oauth_session = OAuth1Session('client-key', client_secret='secret') + >>> oauth_session.fetch_request_token(request_token_url) + { + 'oauth_token': 'sdf0o9823sjdfsdf', + 'oauth_token_secret': '2kjshdfp92i34asdasd', + } + """ + self._client.client.realm = " ".join(realm) if realm else None + token = self._fetch_token(url, **request_kwargs) + log.debug("Resetting callback_uri and realm (not needed in next phase).") + self._client.client.callback_uri = None + self._client.client.realm = None + return token + + def fetch_access_token(self, url, verifier=None, **request_kwargs): + """Fetch an access token. + + This is the final step in the OAuth 1 workflow. An access token is + obtained using all previously obtained credentials, including the + verifier from the authorization step. + + Note that a previously set verifier will be reset for your + convenience, or else signature creation will be incorrect on + consecutive requests. + + >>> access_token_url = 'https://api.twitter.com/oauth/access_token' + >>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345' + >>> oauth_session = OAuth1Session('client-key', client_secret='secret') + >>> oauth_session.parse_authorization_response(redirect_response) + { + 'oauth_token: 'kjerht2309u', + 'oauth_token_secret: 'lsdajfh923874', + 'oauth_verifier: 'w34o8967345', + } + >>> oauth_session.fetch_access_token(access_token_url) + { + 'oauth_token': 'sdf0o9823sjdfsdf', + 'oauth_token_secret': '2kjshdfp92i34asdasd', + } + """ + if verifier: + self._client.client.verifier = verifier + if not getattr(self._client.client, "verifier", None): + raise VerifierMissing("No client verifier has been set.") + token = self._fetch_token(url, **request_kwargs) + log.debug("Resetting verifier attribute, should not be used anymore.") + self._client.client.verifier = None + return token + + def parse_authorization_response(self, url): + """Extract parameters from the post authorization redirect response URL. + + :param url: The full URL that resulted from the user being redirected + back from the OAuth provider to you, the client. + :returns: A dict of parameters extracted from the URL. + + >>> redirect_response = 'https://127.0.0.1/callback?oauth_token=kjerht2309uf&oauth_token_secret=lsdajfh923874&oauth_verifier=w34o8967345' + >>> oauth_session = OAuth1Session('client-key', client_secret='secret') + >>> oauth_session.parse_authorization_response(redirect_response) + { + 'oauth_token: 'kjerht2309u', + 'oauth_token_secret: 'lsdajfh923874', + 'oauth_verifier: 'w34o8967345', + } + """ + log.debug("Parsing token from query part of url %s", url) + token = dict(urldecode(urlparse(url).query)) + log.debug("Updating internal client token attribute.") + self._populate_attributes(token) + self.token = token + return token + + def _populate_attributes(self, token): + if "oauth_token" in token: + self._client.client.resource_owner_key = token["oauth_token"] + else: + raise TokenMissing( + "Response does not contain a token: {resp}".format(resp=token), token + ) + if "oauth_token_secret" in token: + self._client.client.resource_owner_secret = token["oauth_token_secret"] + if "oauth_verifier" in token: + self._client.client.verifier = token["oauth_verifier"] + + def _fetch_token(self, url, **request_kwargs): + log.debug("Fetching token from %s using client %s", url, self._client.client) + r = self.post(url, **request_kwargs) + + if r.status_code >= 400: + error = "Token request failed with code %s, response was '%s'." + raise TokenRequestDenied(error % (r.status_code, r.text), r) + + log.debug('Decoding token from response "%s"', r.text) + try: + token = dict(urldecode(r.text.strip())) + except ValueError as e: + error = ( + "Unable to decode token from token response. " + "This is commonly caused by an unsuccessful request where" + " a non urlencoded error message is returned. " + "The decoding error was %s" + "" % e + ) + raise ValueError(error) + + log.debug("Obtained token %s", token) + log.debug("Updating internal client attributes from token data.") + self._populate_attributes(token) + self.token = token + return token + + def rebuild_auth(self, prepared_request, response): + """ + When being redirected we should always strip Authorization + header, since nonce may not be reused as per OAuth spec. + """ + if "Authorization" in prepared_request.headers: + # If we get redirected to a new host, we should strip out + # any authentication headers. + prepared_request.headers.pop("Authorization", True) + prepared_request.prepare_auth(self.auth) + return diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth2_auth.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth2_auth.py new file mode 100644 index 0000000000000000000000000000000000000000..f19f52ac900cd33c38500f184076c8422bcba81e --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth2_auth.py @@ -0,0 +1,36 @@ +from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError +from oauthlib.oauth2 import is_secure_transport +from requests.auth import AuthBase + + +class OAuth2(AuthBase): + """Adds proof of authorization (OAuth2 token) to the request.""" + + def __init__(self, client_id=None, client=None, token=None): + """Construct a new OAuth 2 authorization object. + + :param client_id: Client id obtained during registration + :param client: :class:`oauthlib.oauth2.Client` to be used. Default is + WebApplicationClient which is useful for any + hosted application but not mobile or desktop. + :param token: Token dictionary, must include access_token + and token_type. + """ + self._client = client or WebApplicationClient(client_id, token=token) + if token: + for k, v in token.items(): + setattr(self._client, k, v) + + def __call__(self, r): + """Append an OAuth 2 token to the request. + + Note that currently HTTPS is required for all requests. There may be + a token type that allows for plain HTTP in the future and then this + should be updated to allow plain HTTP on a white list basis. + """ + if not is_secure_transport(r.url): + raise InsecureTransportError() + r.url, r.headers, r.body = self._client.add_token( + r.url, http_method=r.method, body=r.body, headers=r.headers + ) + return r diff --git a/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth2_session.py b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth2_session.py new file mode 100644 index 0000000000000000000000000000000000000000..93cc4d7bbd2af7e74e1c8d79a805d3d5540b1a5d --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/requests_oauthlib/oauth2_session.py @@ -0,0 +1,587 @@ +import logging + +from oauthlib.common import generate_token, urldecode +from oauthlib.oauth2 import WebApplicationClient, InsecureTransportError +from oauthlib.oauth2 import LegacyApplicationClient +from oauthlib.oauth2 import TokenExpiredError, is_secure_transport +import requests + +log = logging.getLogger(__name__) + + +class TokenUpdated(Warning): + def __init__(self, token): + super(TokenUpdated, self).__init__() + self.token = token + + +class OAuth2Session(requests.Session): + """Versatile OAuth 2 extension to :class:`requests.Session`. + + Supports any grant type adhering to :class:`oauthlib.oauth2.Client` spec + including the four core OAuth 2 grants. + + Can be used to create authorization urls, fetch tokens and access protected + resources using the :class:`requests.Session` interface you are used to. + + - :class:`oauthlib.oauth2.WebApplicationClient` (default): Authorization Code Grant + - :class:`oauthlib.oauth2.MobileApplicationClient`: Implicit Grant + - :class:`oauthlib.oauth2.LegacyApplicationClient`: Password Credentials Grant + - :class:`oauthlib.oauth2.BackendApplicationClient`: Client Credentials Grant + + Note that the only time you will be using Implicit Grant from python is if + you are driving a user agent able to obtain URL fragments. + """ + + def __init__( + self, + client_id=None, + client=None, + auto_refresh_url=None, + auto_refresh_kwargs=None, + scope=None, + redirect_uri=None, + token=None, + state=None, + token_updater=None, + pkce=None, + **kwargs + ): + """Construct a new OAuth 2 client session. + + :param client_id: Client id obtained during registration + :param client: :class:`oauthlib.oauth2.Client` to be used. Default is + WebApplicationClient which is useful for any + hosted application but not mobile or desktop. + :param scope: List of scopes you wish to request access to + :param redirect_uri: Redirect URI you registered as callback + :param token: Token dictionary, must include access_token + and token_type. + :param state: State string used to prevent CSRF. This will be given + when creating the authorization url and must be supplied + when parsing the authorization response. + Can be either a string or a no argument callable. + :auto_refresh_url: Refresh token endpoint URL, must be HTTPS. Supply + this if you wish the client to automatically refresh + your access tokens. + :auto_refresh_kwargs: Extra arguments to pass to the refresh token + endpoint. + :token_updater: Method with one argument, token, to be used to update + your token database on automatic token refresh. If not + set a TokenUpdated warning will be raised when a token + has been refreshed. This warning will carry the token + in its token argument. + :param pkce: Set "S256" or "plain" to enable PKCE. Default is disabled. + :param kwargs: Arguments to pass to the Session constructor. + """ + super(OAuth2Session, self).__init__(**kwargs) + self._client = client or WebApplicationClient(client_id, token=token) + self.token = token or {} + self._scope = scope + self.redirect_uri = redirect_uri + self.state = state or generate_token + self._state = state + self.auto_refresh_url = auto_refresh_url + self.auto_refresh_kwargs = auto_refresh_kwargs or {} + self.token_updater = token_updater + self._pkce = pkce + + if self._pkce not in ["S256", "plain", None]: + raise AttributeError("Wrong value for {}(.., pkce={})".format(self.__class__, self._pkce)) + + # Ensure that requests doesn't do any automatic auth. See #278. + # The default behavior can be re-enabled by setting auth to None. + self.auth = lambda r: r + + # Allow customizations for non compliant providers through various + # hooks to adjust requests and responses. + self.compliance_hook = { + "access_token_response": set(), + "refresh_token_response": set(), + "protected_request": set(), + "refresh_token_request": set(), + "access_token_request": set(), + } + + @property + def scope(self): + """By default the scope from the client is used, except if overridden""" + if self._scope is not None: + return self._scope + elif self._client is not None: + return self._client.scope + else: + return None + + @scope.setter + def scope(self, scope): + self._scope = scope + + def new_state(self): + """Generates a state string to be used in authorizations.""" + try: + self._state = self.state() + log.debug("Generated new state %s.", self._state) + except TypeError: + self._state = self.state + log.debug("Re-using previously supplied state %s.", self._state) + return self._state + + @property + def client_id(self): + return getattr(self._client, "client_id", None) + + @client_id.setter + def client_id(self, value): + self._client.client_id = value + + @client_id.deleter + def client_id(self): + del self._client.client_id + + @property + def token(self): + return getattr(self._client, "token", None) + + @token.setter + def token(self, value): + self._client.token = value + self._client.populate_token_attributes(value) + + @property + def access_token(self): + return getattr(self._client, "access_token", None) + + @access_token.setter + def access_token(self, value): + self._client.access_token = value + + @access_token.deleter + def access_token(self): + del self._client.access_token + + @property + def authorized(self): + """Boolean that indicates whether this session has an OAuth token + or not. If `self.authorized` is True, you can reasonably expect + OAuth-protected requests to the resource to succeed. If + `self.authorized` is False, you need the user to go through the OAuth + authentication dance before OAuth-protected requests to the resource + will succeed. + """ + return bool(self.access_token) + + def authorization_url(self, url, state=None, **kwargs): + """Form an authorization URL. + + :param url: Authorization endpoint url, must be HTTPS. + :param state: An optional state string for CSRF protection. If not + given it will be generated for you. + :param kwargs: Extra parameters to include. + :return: authorization_url, state + """ + state = state or self.new_state() + if self._pkce: + self._code_verifier = self._client.create_code_verifier(43) + kwargs["code_challenge_method"] = self._pkce + kwargs["code_challenge"] = self._client.create_code_challenge( + code_verifier=self._code_verifier, + code_challenge_method=self._pkce + ) + return ( + self._client.prepare_request_uri( + url, + redirect_uri=self.redirect_uri, + scope=self.scope, + state=state, + **kwargs + ), + state, + ) + + def fetch_token( + self, + token_url, + code=None, + authorization_response=None, + body="", + auth=None, + username=None, + password=None, + method="POST", + force_querystring=False, + timeout=None, + headers=None, + verify=None, + proxies=None, + include_client_id=None, + client_secret=None, + cert=None, + **kwargs + ): + """Generic method for fetching an access token from the token endpoint. + + If you are using the MobileApplicationClient you will want to use + `token_from_fragment` instead of `fetch_token`. + + The current implementation enforces the RFC guidelines. + + :param token_url: Token endpoint URL, must use HTTPS. + :param code: Authorization code (used by WebApplicationClients). + :param authorization_response: Authorization response URL, the callback + URL of the request back to you. Used by + WebApplicationClients instead of code. + :param body: Optional application/x-www-form-urlencoded body to add the + include in the token request. Prefer kwargs over body. + :param auth: An auth tuple or method as accepted by `requests`. + :param username: Username required by LegacyApplicationClients to appear + in the request body. + :param password: Password required by LegacyApplicationClients to appear + in the request body. + :param method: The HTTP method used to make the request. Defaults + to POST, but may also be GET. Other methods should + be added as needed. + :param force_querystring: If True, force the request body to be sent + in the querystring instead. + :param timeout: Timeout of the request in seconds. + :param headers: Dict to default request headers with. + :param verify: Verify SSL certificate. + :param proxies: The `proxies` argument is passed onto `requests`. + :param include_client_id: Should the request body include the + `client_id` parameter. Default is `None`, + which will attempt to autodetect. This can be + forced to always include (True) or never + include (False). + :param client_secret: The `client_secret` paired to the `client_id`. + This is generally required unless provided in the + `auth` tuple. If the value is `None`, it will be + omitted from the request, however if the value is + an empty string, an empty string will be sent. + :param cert: Client certificate to send for OAuth 2.0 Mutual-TLS Client + Authentication (draft-ietf-oauth-mtls). Can either be the + path of a file containing the private key and certificate or + a tuple of two filenames for certificate and key. + :param kwargs: Extra parameters to include in the token request. + :return: A token dict + """ + if not is_secure_transport(token_url): + raise InsecureTransportError() + + if not code and authorization_response: + self._client.parse_request_uri_response( + authorization_response, state=self._state + ) + code = self._client.code + elif not code and isinstance(self._client, WebApplicationClient): + code = self._client.code + if not code: + raise ValueError( + "Please supply either code or " "authorization_response parameters." + ) + + if self._pkce: + if self._code_verifier is None: + raise ValueError( + "Code verifier is not found, authorization URL must be generated before" + ) + kwargs["code_verifier"] = self._code_verifier + + # Earlier versions of this library build an HTTPBasicAuth header out of + # `username` and `password`. The RFC states, however these attributes + # must be in the request body and not the header. + # If an upstream server is not spec compliant and requires them to + # appear as an Authorization header, supply an explicit `auth` header + # to this function. + # This check will allow for empty strings, but not `None`. + # + # References + # 4.3.2 - Resource Owner Password Credentials Grant + # https://tools.ietf.org/html/rfc6749#section-4.3.2 + + if isinstance(self._client, LegacyApplicationClient): + if username is None: + raise ValueError( + "`LegacyApplicationClient` requires both the " + "`username` and `password` parameters." + ) + if password is None: + raise ValueError( + "The required parameter `username` was supplied, " + "but `password` was not." + ) + + # merge username and password into kwargs for `prepare_request_body` + if username is not None: + kwargs["username"] = username + if password is not None: + kwargs["password"] = password + + # is an auth explicitly supplied? + if auth is not None: + # if we're dealing with the default of `include_client_id` (None): + # we will assume the `auth` argument is for an RFC compliant server + # and we should not send the `client_id` in the body. + # This approach allows us to still force the client_id by submitting + # `include_client_id=True` along with an `auth` object. + if include_client_id is None: + include_client_id = False + + # otherwise we may need to create an auth header + else: + # since we don't have an auth header, we MAY need to create one + # it is possible that we want to send the `client_id` in the body + # if so, `include_client_id` should be set to True + # otherwise, we will generate an auth header + if include_client_id is not True: + client_id = self.client_id + if client_id: + log.debug( + 'Encoding `client_id` "%s" with `client_secret` ' + "as Basic auth credentials.", + client_id, + ) + client_secret = client_secret if client_secret is not None else "" + auth = requests.auth.HTTPBasicAuth(client_id, client_secret) + + if include_client_id: + # this was pulled out of the params + # it needs to be passed into prepare_request_body + if client_secret is not None: + kwargs["client_secret"] = client_secret + + body = self._client.prepare_request_body( + code=code, + body=body, + redirect_uri=self.redirect_uri, + include_client_id=include_client_id, + **kwargs + ) + + headers = headers or { + "Accept": "application/json", + "Content-Type": "application/x-www-form-urlencoded", + } + self.token = {} + request_kwargs = {} + if method.upper() == "POST": + request_kwargs["params" if force_querystring else "data"] = dict( + urldecode(body) + ) + elif method.upper() == "GET": + request_kwargs["params"] = dict(urldecode(body)) + else: + raise ValueError("The method kwarg must be POST or GET.") + + for hook in self.compliance_hook["access_token_request"]: + log.debug("Invoking access_token_request hook %s.", hook) + token_url, headers, request_kwargs = hook( + token_url, headers, request_kwargs + ) + + r = self.request( + method=method, + url=token_url, + timeout=timeout, + headers=headers, + auth=auth, + verify=verify, + proxies=proxies, + cert=cert, + **request_kwargs + ) + + log.debug("Request to fetch token completed with status %s.", r.status_code) + log.debug("Request url was %s", r.request.url) + log.debug("Request headers were %s", r.request.headers) + log.debug("Request body was %s", r.request.body) + log.debug("Response headers were %s and content %s.", r.headers, r.text) + log.debug( + "Invoking %d token response hooks.", + len(self.compliance_hook["access_token_response"]), + ) + for hook in self.compliance_hook["access_token_response"]: + log.debug("Invoking hook %s.", hook) + r = hook(r) + + self._client.parse_request_body_response(r.text, scope=self.scope) + self.token = self._client.token + log.debug("Obtained token %s.", self.token) + return self.token + + def token_from_fragment(self, authorization_response): + """Parse token from the URI fragment, used by MobileApplicationClients. + + :param authorization_response: The full URL of the redirect back to you + :return: A token dict + """ + self._client.parse_request_uri_response( + authorization_response, state=self._state + ) + self.token = self._client.token + return self.token + + def refresh_token( + self, + token_url, + refresh_token=None, + body="", + auth=None, + timeout=None, + headers=None, + verify=None, + proxies=None, + **kwargs + ): + """Fetch a new access token using a refresh token. + + :param token_url: The token endpoint, must be HTTPS. + :param refresh_token: The refresh_token to use. + :param body: Optional application/x-www-form-urlencoded body to add the + include in the token request. Prefer kwargs over body. + :param auth: An auth tuple or method as accepted by `requests`. + :param timeout: Timeout of the request in seconds. + :param headers: A dict of headers to be used by `requests`. + :param verify: Verify SSL certificate. + :param proxies: The `proxies` argument will be passed to `requests`. + :param kwargs: Extra parameters to include in the token request. + :return: A token dict + """ + if not token_url: + raise ValueError("No token endpoint set for auto_refresh.") + + if not is_secure_transport(token_url): + raise InsecureTransportError() + + refresh_token = refresh_token or self.token.get("refresh_token") + + log.debug( + "Adding auto refresh key word arguments %s.", self.auto_refresh_kwargs + ) + kwargs.update(self.auto_refresh_kwargs) + body = self._client.prepare_refresh_body( + body=body, refresh_token=refresh_token, scope=self.scope, **kwargs + ) + log.debug("Prepared refresh token request body %s", body) + + if headers is None: + headers = { + "Accept": "application/json", + "Content-Type": ("application/x-www-form-urlencoded"), + } + + for hook in self.compliance_hook["refresh_token_request"]: + log.debug("Invoking refresh_token_request hook %s.", hook) + token_url, headers, body = hook(token_url, headers, body) + + r = self.post( + token_url, + data=dict(urldecode(body)), + auth=auth, + timeout=timeout, + headers=headers, + verify=verify, + withhold_token=True, + proxies=proxies, + ) + log.debug("Request to refresh token completed with status %s.", r.status_code) + log.debug("Response headers were %s and content %s.", r.headers, r.text) + log.debug( + "Invoking %d token response hooks.", + len(self.compliance_hook["refresh_token_response"]), + ) + for hook in self.compliance_hook["refresh_token_response"]: + log.debug("Invoking hook %s.", hook) + r = hook(r) + + self.token = self._client.parse_request_body_response(r.text, scope=self.scope) + if "refresh_token" not in self.token: + log.debug("No new refresh token given. Re-using old.") + self.token["refresh_token"] = refresh_token + return self.token + + def request( + self, + method, + url, + data=None, + headers=None, + withhold_token=False, + client_id=None, + client_secret=None, + files=None, + **kwargs + ): + """Intercept all requests and add the OAuth 2 token if present.""" + if not is_secure_transport(url): + raise InsecureTransportError() + if self.token and not withhold_token: + log.debug( + "Invoking %d protected resource request hooks.", + len(self.compliance_hook["protected_request"]), + ) + for hook in self.compliance_hook["protected_request"]: + log.debug("Invoking hook %s.", hook) + url, headers, data = hook(url, headers, data) + + log.debug("Adding token %s to request.", self.token) + try: + url, headers, data = self._client.add_token( + url, http_method=method, body=data, headers=headers + ) + # Attempt to retrieve and save new access token if expired + except TokenExpiredError: + if self.auto_refresh_url: + log.debug( + "Auto refresh is set, attempting to refresh at %s.", + self.auto_refresh_url, + ) + + # We mustn't pass auth twice. + auth = kwargs.pop("auth", None) + if client_id and client_secret and (auth is None): + log.debug( + 'Encoding client_id "%s" with client_secret as Basic auth credentials.', + client_id, + ) + auth = requests.auth.HTTPBasicAuth(client_id, client_secret) + token = self.refresh_token( + self.auto_refresh_url, auth=auth, **kwargs + ) + if self.token_updater: + log.debug( + "Updating token to %s using %s.", token, self.token_updater + ) + self.token_updater(token) + url, headers, data = self._client.add_token( + url, http_method=method, body=data, headers=headers + ) + else: + raise TokenUpdated(token) + else: + raise + + log.debug("Requesting url %s using method %s.", url, method) + log.debug("Supplying headers %s and data %s", headers, data) + log.debug("Passing through key word arguments %s.", kwargs) + return super(OAuth2Session, self).request( + method, url, headers=headers, data=data, files=files, **kwargs + ) + + def register_compliance_hook(self, hook_type, hook): + """Register a hook for request/response tweaking. + + Available hooks are: + access_token_response invoked before token parsing. + refresh_token_response invoked before refresh token parsing. + protected_request invoked before making a request. + access_token_request invoked before making a token fetch request. + refresh_token_request invoked before making a refresh request. + + If you find a new hook is needed please send a GitHub PR request + or open an issue. + """ + if hook_type not in self.compliance_hook: + raise ValueError( + "Hook type %s is not in %s.", hook_type, self.compliance_hook + ) + self.compliance_hook[hook_type].add(hook) diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/__init__.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f6bdacd7febc3801891efb4c1bd4ad471972819 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/__init__.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/assign.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/assign.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90fe02ce398860216ca10b9e13e40d563494eb85 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/assign.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/attrdict.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/attrdict.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c3b626b470905fef1a2f42d925cb5b016201260 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/attrdict.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/base.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b886cb6e87f6fdc2aacf0c0cfc3f91dc1102830d Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/base.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/bitfield.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/bitfield.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..06f3361bfec7ca597bd44e3bc0a6e0bfc9ad05e3 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/bitfield.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/css.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/css.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d266d8467aec2b906e920a81fd5a50d7dcb7ced6 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/css.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/tspan.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/tspan.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a1235288251998e8de82c61a0632e0acac9159af Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/tspan.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/version.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/version.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14cd9c0b54896b1c8fd9f0ec0d41210a753d766c Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/version.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/waveform.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/waveform.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e88692c8974becb49f66688457f30d1634a2e00e Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/waveform.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/waveskin.cpython-310.pyc b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/waveskin.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed8ff95131712953c297686d5c05d108a7bbcb91 Binary files /dev/null and b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/__pycache__/waveskin.cpython-310.pyc differ diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/assign.py b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/assign.py new file mode 100644 index 0000000000000000000000000000000000000000..8213cecc50e57def24f42458ee04500d23e97891 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/assign.py @@ -0,0 +1,174 @@ +# Copyright wavedrompy contributors. +# SPDX-License-Identifier: MIT + +# Translated to Python from original file: +# https://github.com/drom/wavedrom/blob/master/src/WaveDrom.js + +from collections import namedtuple +import svgwrite + +from .base import SVGBase + + +class RenderState: + def __init__(self, x=0, y=0, xmax=0): + self.x = x + self.y = y + self.xmax = xmax + + def __str__(self): + return "x={} y={}, xmax={}".format(self.x, self.y, self.xmax) + + +RenderObject = namedtuple("RenderObject", "name x y") + + +class Assign(SVGBase): + def render_tree(self, tree, state): + state.xmax = max(state.xmax, state.x) + y = state.y + for i in range(1, len(tree)): + if isinstance(tree[i], list): + state = self.render_tree(tree[i], RenderState(x=state.x+1, y=state.y, xmax=state.xmax)) + else: + tree[i] = RenderObject(name=tree[i], x=state.x+1, y=state.y) + state.y += 2 + tree[0] = RenderObject(name=tree[0], x=state.x, y=round((y+state.y-2)/2)) + state.x -= 1 + + return state + + def draw_body(self, type, ymin, ymax): + circle = ' M 4,0 C 4,1.1 3.1,2 2,2 0.9,2 0,1.1 0,0 c 0,-1.1 0.9,-2 2,-2 1.1,0 2,0.9 2,2 z' + gates = { + '~': 'M -11,-6 -11,6 0,0 z m -5,6 5,0' + circle, + '=': 'M -11,-6 -11,6 0,0 z m -5,6 5,0', + '&': 'm -16,-10 5,0 c 6,0 11,4 11,10 0,6 -5,10 -11,10 l -5,0 z', + '~&': 'm -16,-10 5,0 c 6,0 11,4 11,10 0,6 -5,10 -11,10 l -5,0 z' + circle, + '|': 'm -18,-10 4,0 c 6,0 12,5 14,10 -2,5 -8,10 -14,10 l -4,0 c 2.5,-5 2.5,-15 0,-20 z', + '~|': 'm -18,-10 4,0 c 6,0 12,5 14,10 -2,5 -8,10 -14,10 l -4,0 c 2.5,-5 2.5,-15 0,-20 z' + circle, + '^': 'm -21,-10 c 1,3 2,6 2,10 m 0,0 c 0,4 -1,7 -2,10 m 3,-20 4,0 c 6,0 12,5 14,10 -2,5 -8,10 -14,10 l -4,0 c 1,-3 2,-6 2,-10 0,-4 -1,-7 -2,-10 z', + '~^': 'm -21,-10 c 1,3 2,6 2,10 m 0,0 c 0,4 -1,7 -2,10 m 3,-20 4,0 c 6,0 12,5 14,10 -2,5 -8,10 -14,10 l -4,0 c 1,-3 2,-6 2,-10 0,-4 -1,-7 -2,-10 z' + circle, + '+': 'm -8,5 0,-10 m -5,5 10,0 m 3,0 c 0,4.418278 -3.581722,8 -8,8 -4.418278,0 -8,-3.581722 -8,-8 0,-4.418278 3.581722,-8 8,-8 4.418278,0 8,3.581722 8,8 z', + '*': 'm -4,4 -8,-8 m 0,8 8,-8 m 4,4 c 0,4.418278 -3.581722,8 -8,8 -4.418278,0 -8,-3.581722 -8,-8 0,-4.418278 3.581722,-8 8,-8 4.418278,0 8,3.581722 8,8 z' + } + iec = { + "BUF": 1, "INV": 1, "AND": '&', "NAND": '&', + "OR": '\u22651', "NOR": '\u22651', "XOR": '=1', "XNOR": '=1', "box": '' + } + circled = { "INV", "NAND", "NOR", "XNOR" } + + if ymax == ymin: + ymax = 4 + ymin = -4 + + if type in gates: + return self.element.path(class_='gate', d=gates[type]) + elif type in iec: + g = self.container.g() + if type in circled: + path = self.element.path(class_="gate", d="m -16,{} 16,0 0,{} -16,0 z {}".format(ymin-3, ymax-ymin+6, circle)) + else: + path = self.element.path(class_="gate", d="m -16,{} 16,0 0,{} -16,0 z".format(ymin-3, ymax-ymin+6)) + g.add(path) + tspan = self.element.tspan(iec[type], x=[-14], y=[4], class_='wirename') + text = self.element.text('') + text.add(tspan) + g.add(text) + return g + else: + tspan = self.element.tspan(type, x=[-14], y=[4], class_='wirename') + text = self.element.text('') + text.add(tspan) + return text + + def draw_gate(self, spec): + ret = self.container.g() + + ys = [s[1] for s in spec[2:]] + + ymin = min(ys) + ymax = max(ys) + + g = self.container.g(transform="translate(16,0)") + g.add(self.element.path(d="M {},{} {},{}".format(spec[2][0], ymin, spec[2][0], ymax), class_='wire')) + ret.add(g) + + for s in spec[2:]: + path = self.element.path(d="m {},{} 16,0".format(s[0], s[1]), class_='wire') + ret.add(self.container.g().add(path)) + + g = self.container.g(transform="translate({},{})".format(spec[1][0], spec[1][1])) + g.add(self.element.title(spec[0])) + g.add(self.draw_body(spec[0], ymin - spec[1][1], ymax - spec[1][1])) + ret.add(g) + + return ret + + def draw_boxes(self, tree, xmax): + ret = self.container.g() + spec = [] + + if isinstance(tree, list): + spec.append(tree[0].name); + spec.append([32 * (xmax - tree[0].x), 8 * tree[0].y]); + for t in tree[1:]: + if isinstance(t, list): + spec.append([32 * (xmax - t[0].x), 8 * t[0].y]) + else: + spec.append([32 * (xmax - t.x), 8 * t.y]) + + ret.add(self.draw_gate(spec)) + + for t in tree[1:]: + ret.add(self.draw_boxes(t, xmax)) + else: + fname = tree.name + fx = 32 * (xmax - tree.x) + fy = 8 * tree.y + g = self.container.g(transform="translate({},{})".format(fx, fy)) + g.add(self.element.title(fname)) + g.add(self.element.path(d='M 2,0 a 2,2 0 1 1 -4,0 2,2 0 1 1 4,0 z')) + tspan = self.element.tspan(fname, x=[-4], y=[4], class_='pinname') + text = self.element.text('') + text.add(tspan) + g.add(text) + ret.add(g) + + return ret + + def render(self, index = 0, source = {}, output = []): + STYLE = ".pinname {font-size:12px; font-style:normal; font-variant:normal; font-weight:500; font-stretch:normal; text-align:center; text-anchor:end; font-family:Helvetica} .wirename {font-size:12px; font-style:normal; font-variant:normal; font-weight:500; font-stretch:normal; text-align:center; text-anchor:start; font-family:Helvetica} .wirename:hover {fill:blue} .gate {color:#000; fill:#ffc; fill-opacity: 1;stroke:#000; stroke-width:1; stroke-opacity:1} .gate:hover {fill:red !important; } .wire {fill:none; stroke:#000; stroke-width:1; stroke-opacity:1} .grid {fill:#fff; fill-opacity:1; stroke:none}" + + tree = source.get("assign") + state = RenderState(x=0, y=2, xmax=0) + + for t in tree: + state = self.render_tree(t, state) + state.x += 1 + + xmax = state.xmax + 3 + + width = 32 * (xmax + 1) + 1 + height = 8 * (state.y + 1) - 7 + ilen = 4 * (xmax + 1) + jlen = state.y + 1 + + grid = self.container.g() + + for i in range(ilen+1): + for j in range(jlen+1): + grid.add(self.element.rect(height=1, width=1, x=(i * 8 - 0.5), y=(j * 8 - 0.5), class_='grid')) + + for t in tree: + content = self.draw_boxes(t, xmax) + + + attr = { 'viewBox': "0 0 {} {}".format(width, height)} + template = svgwrite.Drawing(id="svgcontent_{index}".format(index=index), size=[width, height], **attr) + template.defs.add(svgwrite.container.Style(content=STYLE)) + g = self.container.g(transform="translate(0.5,0.5)") + g.add(grid) + g.add(content) + template.add(g) + return template diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/attrdict.py b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/attrdict.py new file mode 100644 index 0000000000000000000000000000000000000000..2692e56aba78c52bfa59536264206aa64ca9b3d2 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/attrdict.py @@ -0,0 +1,4 @@ +class AttrDict(dict): + def __init__(self, *args, **kwargs): + super(AttrDict, self).__init__(*args, **kwargs) + self.__dict__ = self \ No newline at end of file diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/base.py b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/base.py new file mode 100644 index 0000000000000000000000000000000000000000..25b2663cdab910a59a8212422e0c0b03443e3cd3 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/base.py @@ -0,0 +1,21 @@ +# Copyright wavedrompy contributors. +# SPDX-License-Identifier: MIT + +import svgwrite +from .attrdict import AttrDict + +class SVGBase(object): + container = AttrDict({ + "defs": svgwrite.container.Defs, + "g": svgwrite.container.Group, + "marker": svgwrite.container.Marker, + "use": svgwrite.container.Use, + }) + element = AttrDict({ + "rect": svgwrite.shapes.Rect, + "path": svgwrite.path.Path, + "text": svgwrite.text.Text, + "tspan": svgwrite.text.TSpan, + "title": svgwrite.base.Title, + "line": svgwrite.shapes.Line, + }) diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/bitfield.py b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/bitfield.py new file mode 100644 index 0000000000000000000000000000000000000000..3df13af15d82591418a2cf600ca123cd2fa8947a --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/bitfield.py @@ -0,0 +1,259 @@ +# Copyright wavedrompy contributors. +# SPDX-License-Identifier: MIT + +# Translated to Python from original file: +# https://github.com/drom/wavedrom/blob/master/src/WaveDrom.js + +from math import floor + +import svgwrite + +from .base import SVGBase +from .tspan import TspanParser + +class Options: + def __init__(self, vspace=80, hspace=800, lanes=1, bits=32, hflip=False, vflip=False, fontsize=14, fontfamily='sans-serif', fontweight='normal'): + self.vspace = vspace if vspace > 19 else 80 + self.hspace = hspace if hspace > 39 else 800 + self.lanes = lanes if lanes > 0 else 1 + self.bits = bits if bits > 4 else 32 + self.hflip = hflip + self.vflip = vflip + self.fontsize = fontsize if fontsize > 5 else 14 + self.fontfamily = fontfamily + self.fontweight = fontweight + +colors = {2: 0, 3: 80, 4: 170, 5: 45, 6: 126, 7: 215} + +def type_style(t): + if t in colors.keys(): + return ";fill:hsl({},100%,50%)".format(colors[t]) + else: + return '' + + +class BitField(SVGBase): + def tspan_parse(self, text): + parser = TspanParser() + parser.feed(text) + return parser.get_text() + + def hline(self, len, x=0, y=0): + return self.element.line(start=(x,y), end=(x+len,y)) + + def vline(self, len, x=0, y=0): + return self.element.line(start=(x,y), end=(x,y+len)) + + def get_text(self, body, x, y=None): + x_list = None + if x: + x_list = [x] + y_list = None + if y: + y_list = [y] + text = self.element.text('', x=x_list, y=y_list) + for t in self.tspan_parse(str(body)): + text.add(t) + return text + + def get_label(self, attr, x, y, step=0, length=0): + if isinstance(attr, int): + attr = int(attr) + res = [] + for i in range(length): + val = (attr >> i) & 1 + xi = x + step * (length / 2 - i - 0.5) + res.append(self.get_text(val, xi, y)) + return res + else: + if '\n' in attr: + names = attr.split('\n') + count = len(names) + return [ + self.get_text(name, x, y + (-(count - 1) / 2 + i) * self.opt.fontsize) + for (i, name) in enumerate(names) + ] + return [self.get_text(attr, x, y)] + + def get_attrs(self, e, step, lsbm, msbm): + if self.opt.vflip: + x = step * (msbm + lsbm) / 2 + else: + x = step * (self.mod - ((msbm + lsbm) / 2) - 1) + attr = e['attr'] + bits = e['bits'] + attrs = [attr] + # 'attr' supports both a scalar and a list. + if isinstance(attr, list): + attrs = attr + return [self.get_label(a, x, 16 * i, step, bits) + for (i, a) in enumerate(attrs)] + + def labelArr(self, desc): + step = self.opt.hspace / self.mod + bits = self.container.g(transform="translate({},{})".format(step/2, self.opt.vspace/5)) + names = self.container.g(transform="translate({},{})".format(step/2, self.opt.vspace/2+4)) + attrs = self.container.g(transform="translate({},{})".format(step/2, self.opt.vspace)) + blanks = self.container.g(transform="translate(0,{})".format(self.opt.vspace/4)) + + for e in desc: + lsbm = 0 + msbm = self.mod - 1 + lsb = self.index * self.mod + msb = (self.index + 1) * self.mod - 1 + + if floor(e["lsb"] / self.mod) == self.index: + lsbm = e["lsbm"] + lsb = e["lsb"] + if floor(e["msb"] / self.mod) == self.index: + msb = e["msb"] + msbm = e["msbm"] + else: + if floor(e["msb"] / self.mod) == self.index: + msb = e["msb"] + msbm = e["msbm"] + else: + continue + + if self.opt.vflip: + bits.add(self.get_text(lsb, x=[step*lsbm])) + else: + bits.add(self.get_text(lsb, x=[step*(self.mod-lsbm - 1)])) + if lsbm != msbm: + if self.opt.vflip: + bits.add(self.get_text(msb, x=[step * msbm])) + else: + bits.add(self.get_text(msb, x=[step * (self.mod - msbm - 1)])) + if e.get('name'): + if self.opt.vflip: + x = step*(msbm+lsbm)/2 + else: + x = step*(self.mod-((msbm+lsbm)/2)-1) + for n in self.get_label(e['name'], x, 0): + names.add(n) + + if not e.get('name') or e.get('type'): + style = 'fill-opacity:0.1' + type_style(e.get('type', 0)) + if self.opt.vflip: + insert_x = lsbm + else: + insert_x = self.mod - msbm - 1 + insert = [step * insert_x, 0] + size = [step * (msbm - lsbm + 1), self.opt.vspace/2] + blanks.add(self.element.rect(insert=insert, size=size, style=style)) + if e.get('attr') is not None: + for attr in self.get_attrs(e, step, lsbm, msbm): + for a in attr: + attrs.add(a) + + g = self.container.g() + g.add(blanks) + g.add(bits) + g.add(names) + g.add(attrs) + return g + + def labels(self, desc): + g = self.container.g(text_anchor='middle') + g.add(self.labelArr(desc)) + return g + + def cage(self, desc): + hspace = self.opt.hspace + vspace = self.opt.vspace + mod = self.mod + + g = self.container.g(stroke='black', stroke_width=1, stroke_linecap='round', transform="translate(0,{})".format(vspace/4)) + + g.add(self.hline(hspace)); + if self.opt.vflip: + g.add(self.vline(0)); + else: + g.add(self.vline(vspace / 2)); + g.add(self.hline(hspace, 0, vspace / 2)); + + i = self.index * mod + if self.opt.vflip: + r = range(0, mod + 1) + else: + r = range(mod, 0, -1) + for j in r: + if j == mod or any([(e["lsb"] == i) for e in desc]): + g.add(self.vline((vspace / 2), j * (hspace / mod))); + else: + g.add(self.vline((vspace / 16), j * (hspace / mod))); + g.add(self.vline((vspace / 16), j * (hspace / mod), vspace * 7 / 16)); + i += 1 + + return g + + def lane(self, desc): + x = 4.5 + if self.opt.hflip: + i = self.index + else: + i = self.opt.lanes-self.index-1 + y = i * self.opt.vspace + 0.5 + g = self.container.g(transform = "translate({},{})".format(x, y), + text_anchor = "middle", + font_size = self.opt.fontsize, + font_family = self.opt.fontfamily, + font_weight = self.opt.fontweight) + + g.add(self.cage(desc)) + g.add(self.labels(desc)) + return g + + def get_max_attrs(self, desc): + max_count = 0 + for e in desc: + if 'attr' in e: + if isinstance(e['attr'], list): + max_count = max(max_count, len(e['attr'])) + else: + max_count = max(max_count, 1) + return max_count + + def render(self, desc, opt = Options()): + self.opt = opt + + # Compute extra per-lane space needed if there are more than one attr + # for any field. This spaces all lanes uniformly, matching the lane + # with the most attr's. + extra_attrs = 0 + max_attrs = self.get_max_attrs(desc) + if max_attrs > 1: + extra_attrs = max_attrs - 1 + self.extra_attr_space = extra_attrs * 16 + + width = opt.hspace + 9 + height = (opt.vspace + self.extra_attr_space) * opt.lanes + 5 + + template = svgwrite.Drawing() + template["width"] = width + template["height"] = height + template["class"] = "WaveDrom" + template.viewbox(0, 0, width, height) + + lsb = 0 + self.mod = int(opt.bits / opt.lanes) + + for e in desc: + e["lsb"] = lsb + e["lsbm"] = lsb % self.mod + lsb += e['bits'] + e['msb'] = lsb - 1 + e['msbm'] = e['msb'] % self.mod + + for i in range(opt.lanes): + self.index = i + template.add(self.lane(desc)) + + return template + + def renderJson(self, source): + opt = Options() + if source.get("config"): + opt = Options(**source['config']) + if source.get("reg"): + return self.render(source['reg'], opt) diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/css.py b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/css.py new file mode 100644 index 0000000000000000000000000000000000000000..bb76f58dbd4b0e66a4187275c14e5f94c5fbe1cc --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/css.py @@ -0,0 +1,421 @@ +# Copyright wavedrompy contributors. +# SPDX-License-Identifier: MIT + +# Translated to Python from original file: +# https://github.com/drom/wavedrom/blob/master/src/WaveDrom.js + +from .attrdict import AttrDict +css = AttrDict({}) +css.default = """ +text{font-size:11pt; + font-style:normal; + font-variant:normal; + font-weight:normal; + font-stretch:normal; + text-align:center; + fill-opacity:1; + font-family:Helvetica} +.h1{font-size:33pt; + font-weight:bold} +.h2{font-size:27pt; + font-weight:bold} +.h3{font-size:20pt; + font-weight:bold} +.h4{font-size:14pt; + font-weight:bold} +.h5{font-size:11pt; + font-weight:bold} +.h6{font-size:8pt; + font-weight:bold} +.muted{fill:#aaa} +.warning{fill:#f6b900} +.error{fill:#f60000} +.info{fill:#0041c4} +.success{fill:#00ab00} +.s1{fill:none; + stroke:#000; + stroke-width:1; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none} +.s2{fill:none; + stroke:#000; + stroke-width:0.5; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none} +.s3{color:#000; + fill:none; + stroke:#000; + stroke-width:1; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:1, 3; + stroke-dashoffset:0; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s4{color:#000; + fill:none; + stroke:#000; + stroke-width:1; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none; + stroke-dashoffset:0; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s5{fill:#fff; + stroke:none} +.s6{fill:#000; + fill-opacity:1; + stroke:none} +.s7{color:#000; + fill:#fff; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s8{color:#000; + fill:#ffffb4; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s9{color:#000; + fill:#ffe0b9; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s10{color:#000; + fill:#b9e0ff; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s11{color:#000; + fill:#ccfdfe; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s12{color:#000; + fill:#cdfdc5; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s13{color:#000; + fill:#f0c1fb; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s14{color:#000; + fill:#f5c2c0; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s15{fill:#0041c4; + fill-opacity:1; + stroke:none} +.s16{fill:none; + stroke:#0041c4; + stroke-width:1; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none} +""" + +css.narrow = """ +text{font-size:11pt; + font-style:normal; + font-variant:normal; + font-weight:normal; + font-stretch:normal; + text-align:center; + fill-opacity:1; + font-family:Helvetica} +.muted{fill:#aaa} +.warning{fill:#f6b900} +.error{fill:#f60000} +.info{fill:#0041c4} +.success{fill:#00ab00} +.h1{font-size:33pt;font-weight:bold} +.h2{font-size:27pt;font-weight:bold} +.h3{font-size:20pt;font-weight:bold} +.h4{font-size:14pt;font-weight:bold} +.h5{font-size:11pt;font-weight:bold} +.h6{font-size:8pt;font-weight:bold} +.s1{fill:none; + stroke:#000000; + stroke-width:1; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none} +.s2{fill:none; + stroke:#000000; + stroke-width:0.5; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none} +.s3{color:#000000; + fill:none; + stroke:#000000; + stroke-width:1; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:1, 3; + stroke-dashoffset:0; + marker:none; + visibility:visible; + display:inline; + overflow:visible; + enable-background:accumulate} +.s4{color:#000000; + fill:none; + stroke:#000000; + stroke-width:1; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none; + stroke-dashoffset:0; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s5{fill:#ffffff;stroke:none} +.s6{color:#000000; + fill:#ffffb4; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible; + enable-background:accumulate} +.s7{color:#000000; + fill:#ffe0b9; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible; + enable-background:accumulate} +.s8{color:#000000; + fill:#b9e0ff; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible; + enable-background:accumulate} +.s9{fill:#000000;fill-opacity:1;stroke:none} +.s10{color:#000000; + fill:#ffffff; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:1px; + marker:none; + visibility:visible; + display:inline; + overflow:visible; + enable-background:accumulate} +""" + +css.lowkey=""" +text{font-size:11pt; + font-style:normal; + font-variant:normal; + font-weight:normal; + font-stretch:normal; + text-align:center; + fill-opacity:1; + font-family:Helvetica} +.muted{fill:#aaa} +.warning{fill:#f6b900} +.error{fill:#f60000} +.info{fill:#0041c4} +.success{fill:#00ab00} +.h1{font-size:33pt; + font-weight:bold} +.h2{font-size:27pt; + font-weight:bold} +.h3{font-size:20pt; + font-weight:bold} +.h4{font-size:14pt; + font-weight:bold} +.h5{font-size:11pt; + font-weight:bold} +.h6{font-size:8pt; + font-weight:bold} +.s1{fill:none; + stroke:#606060; + stroke-width:0.75px; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none} +.s2{fill:none; + stroke:#606060; + stroke-width:0.5; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none} +.s3{color:#000; + fill:none; + stroke:#606060; + stroke-width:0.75px; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:1, 3; + stroke-dashoffset:0; + marker:none; + visibility:visible; + display:inline; + overflow:visible; + enable-background:accumulate} +.s4{color:#000; + fill:none; + stroke:#606060; + stroke-width:0.75px; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none; + stroke-dashoffset:0; + marker:none; + visibility:visible; + display:inline; + overflow:visible} +.s5{fill:#ffffff; + stroke:none} +.s6{color:#000; + fill:#eaeaea; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:0.25px; + marker:none; + visibility:visible; + display:inline; + overflow:visible; + enable-background:accumulate} +.s7{color:#000; + fill:#d7d7d7; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:0.25px; + marker:none; + visibility:visible; + display:inline; + overflow:visible; + enable-background:accumulate} +.s8{color:#000; + fill:#c0c0c0; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:0.25px; + marker:none; + visibility:visible; + display:inline; + overflow:visible; + enable-background:accumulate} +.s9{fill:#606060; + fill-opacity:1; + stroke:none} +.s10{color:#000; + fill:#fff; + fill-opacity:1; + fill-rule:nonzero; + stroke:none; + stroke-width:0.25px; + marker:none; + visibility:visible; + display:inline; + overflow:visible; + enable-background:accumulate} +.s11{fill:#0041c4; + fill-opacity:1; + stroke:none} +.s12{fill:none; + stroke:#0041c4; + stroke-width:0.75px; + stroke-linecap:round; + stroke-linejoin:miter; + stroke-miterlimit:4; + stroke-opacity:1; + stroke-dasharray:none} +""" diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/tspan.py b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/tspan.py new file mode 100644 index 0000000000000000000000000000000000000000..f2037baffa565df66196757b45817cf6b63a0a4a --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/tspan.py @@ -0,0 +1,140 @@ +# Copyright wavedrompy contributors. +# SPDX-License-Identifier: MIT + +import sys + +import svgwrite +from six import string_types +from svgwrite.base import BaseElement +from svgwrite.etree import etree +from .attrdict import AttrDict + +if sys.version_info < (3, 0): + from HTMLParser import HTMLParser +else: + from html.parser import HTMLParser + + +class TspanParser(HTMLParser, object): + tags = { + 'o': {'text_decoration': 'overline'}, + 'ins': {'text_decoration': 'underline'}, + 'sub': {'baseline_shift': 'sub'}, + 'sup': {'baseline_shift': 'super'}, + 'b': {'font_weight': 'bold'}, + 'i': {'font_style': 'italic'}, + 's': {'text_decoration': 'line-through'}, + 'tt': {'font_family': 'monospace'}, + } + + def __init__(self): + super(TspanParser, self).__init__() + self.text = [] + self.state = [] + + def handle_starttag(self, tag, attrs): + self.state.append(tag) + + def handle_endtag(self, tag): + if self.state.pop() != tag: + raise RuntimeError("Unexpected closing tag: {}".format(tag)) + + def get_style(self): + return {k: v for d in [self.tags[t] for t in self.state] for k, v in d.items()} + + def handle_data(self, data): + if len(self.state) == 0: + self.text.append(svgwrite.text.TSpan(data)) + else: + self.text.append(svgwrite.text.TSpan(data, **self.get_style())) + + def get_text(self): + return self.text + + +class JsonMLElement(BaseElement): + """Class that generates xml elements from jsonml.""" + def __init__(self, source, **extra): + """Constructs from jsonml source.""" + self._jsonml = self.extract_element(source) + self.elementname = self._jsonml.tagname + self._jsonml.attributes.update(extra) + super(JsonMLElement, self).__init__(**extra) + + def extract_element(self, e): + """Extract AttrDict from jsonml + + This function non-recursively extracts an AttrDict from jsonml. + This AttrDict has the three elements tagname, attributes and + element_list according to the jsonml specification. + + :param e: element as jsonml list/tuple + :return: AttrDict + """ + if not isinstance(e, (list, tuple)): + raise ValueError("JsonML must be a list") + if len(e) == 0: + raise ValueError("JsonML cannot be an empty list") + if not isinstance(e[0], string_types): + raise ValueError("JsonML tagname must be string") + ret = AttrDict({"tagname": e[0], "attributes": {}, "element-list": []}) + if len(e) > 1: + if isinstance(e[1], dict): + ret.attributes = e[1] + if len(e) > 2: + ret.element_list = e[2:] + else: + ret.element_list = e[1:] + return ret + + def get_xml_element(self, e): + """Generate xml element from jsonml AttrDict + + Recursively generates xml element from jsonml AttrDict. + + :param e: jsonml AttrDict + :return: xml element + """ + + # create element + element = etree.Element(e.tagname) + # set element attributes + for attribute, value in sorted(e.attributes.items()): + # filter 'None' values + if value is not None: + value = self.value_to_string(value) + if value: # just add not empty attributes + element.set(attribute, value) + # store the last xml sibling, because we may need to add + # text to it's tail. This is to support the tagged text + # style ("abc") + last = None + for c in e.element_list: + if isinstance(c, string_types): + # Strings need special treatment for insertion + # as those are not elements + if last is None: + # No non-text element seen so far + if element.text is None: + # No other element seen so far + element.text = c + else: + # Append to other texts + element.text += c + else: + # There was an element already + if last.tail is None: + # No text after that so far + last.tail = c + else: + # Append to other text + last.tail += c + else: + # Recurse + last = self.get_xml_element(self.extract_element(c)) + element.append(last) + + return element + + def get_xml(self): + return self.get_xml_element(self._jsonml) \ No newline at end of file diff --git a/evalkit_internvl/lib/python3.10/site-packages/wavedrom/waveform.py b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/waveform.py new file mode 100644 index 0000000000000000000000000000000000000000..d53f7de31b88d7ffae018dde3e38e625c8623940 --- /dev/null +++ b/evalkit_internvl/lib/python3.10/site-packages/wavedrom/waveform.py @@ -0,0 +1,925 @@ +# Copyright wavedrompy contributors. +# SPDX-License-Identifier: MIT + +# Originally translated to Python from original file: +# https://github.com/drom/wavedrom/blob/master/src/WaveDrom.js +# Now many parts have been rewritten and diverged + +import sys +import math +import re +from itertools import chain +import svgwrite +from .attrdict import AttrDict +from collections import deque + +from six import string_types + +from wavedrom.tspan import JsonMLElement +from . import waveskin, css +from .base import SVGBase + + +class WaveDrom(SVGBase): + def __init__(self): + self.font_width = 7 + self.lane = AttrDict({ + "xs": 20, # tmpgraphlane0.width + "ys": 20, # tmpgraphlane0.height + "xg": 120, # tmpgraphlane0.x + "yg": 0, # head gap + "yh0": 0, # head gap title + "yh1": 0, # head gap + "yf0": 0, # foot gap + "yf1": 0, # foot gap + "y0": 5, # tmpgraphlane0.y + "yo": 30, # tmpgraphlane1.y - y0 + "tgo": -10, # tmptextlane0.x - xg + "ym": 15, # tmptextlane0.y - y0 + "xlabel": 6, # tmptextlabel.x - xg + "xmax": 1, + "scale": 1, + "head": {}, + "foot": {} + }) + + @staticmethod + def stretch_bricks(wave, stretch): + stretcher = { + "Pclk": "111", "Nclk": "000", + "pclk": "111", "nclk": "000", + "0": "000", "1": "111", "x": "xxx", "d": "ddd", "u": "uuu", "z": "zzz", + "2": "vvv-2", "3": "vvv-3", "4": "vvv-4", "5": "vvv-5", "6": "vvv-6", + "7": "vvv-7", "8": "vvv-8", "9": "vvv-9"} + + if stretch == -0.5: + # This is the only valid non-integer value, it essentially means halfing down. Further subsampling + # does not work I think.. + return wave[0::2] + else: + stretch = int(stretch) + + def getBrick(w): + if w in stretcher: + return stretcher[w] + elif w[2] in stretcher: + return stretcher[w[2]] + else: + return stretcher[w[-1]] + + if stretch > 0: + return list(chain.from_iterable(([w] + [getBrick(w)]*stretch for w in wave))) + else: + return wave + + def gen_wave_brick(self, prev=None, this=None, stretch=0, repeat=0, subcycle=False): + sharpedge_clk = { "p": "pclk", "n": "nclk", "P": "Pclk", "N": "Nclk" } + sharpedge_sig = { "h": "pclk", "l": "nclk", "H": "Pclk", "L": "Nclk" } + sharpedge = sharpedge_clk.copy() + sharpedge.update(sharpedge_sig) + + # level: logical levels of symbols at wave + level = {"=": "v", "2": "v", "3": "v", "4": "v", "5": "v", "6": "v", + "7": "v", "8": "v", "9": "v", "h": "1", "H": "1", "l": "0", "L": "0"} + # translevel: Those are the levels at the end of a cycle (special for clocks) + translevel = level.copy() + translevel.update({"p": "0", "P": "0", "n": "1", "N": "1"}) + # data: Modifiers of wavebricks that add data + data = {"=": "-2", "2": "-2", "3": "-3", "4": "-4", "5": "-5", "6": + "-6", "7": "-7", "8": "-8", "9": "-9"} + # clkinvert: The inverse brick to clock symbols + clkinvert = {"p": "nclk", "n": "pclk", "P": "nclk", "N": "pclk"} + # xclude: Those are actually identical levels, no transition + xclude = {"hp": "111", "Hp": "111", "ln": "000", "Ln": "000", + "nh": "111", "Nh": "111", "pl": "000", "Pl": "000"} + + if this in sharpedge.keys(): + if prev is None: + if this in sharpedge_clk.keys(): + first = sharpedge[this] + else: + first = level.get(this, this)*3 + else: + first = xclude.get(prev+this, sharpedge[this]) + + if this in sharpedge_clk.keys(): + wave = [first, clkinvert[this]] * (1 + repeat) + else: + wave = [first] + [level.get(this, this)*3]*(2 * repeat + 1) + else: + if prev is None: + transition = level.get(this, this)*3 + data.get(this, "") + else: + transition = translevel.get(prev, prev) + 'm' + level.get(this, this) + data.get(prev, "") + data.get(this, "") + value = level.get(this, this)*3 + data.get(this, "") + wave = [transition, value] + [value, value] * repeat + + if subcycle: + wave = wave[0:repeat+1] + + if not (stretch == -0.5 and this in sharpedge_clk.keys()): + wave = self.stretch_bricks(wave, stretch) + + return wave + + def parse_wave_lane(self, text, stretch=0): + R = [] + + Stack = deque(text) + + This = None + subCycle = False + + while len(Stack) > 0: + Top = This + This = Stack.popleft() + repeat = 0 + if This == '|': + This = 'x' + if This == '<': + subCycle = True + This = Top + Top = None + if Stack[0] in ['.', '|']: + Stack.popleft() + else: + continue + if This == '>': + subCycle = False + This = Top + Top = None + if Stack and Stack[0] in ['.', '|']: + Stack.popleft() + else: + continue + while Stack and Stack[0] in ['.', '|']: + Stack.popleft() + repeat += 1 + R.extend(self.gen_wave_brick(Top, This, stretch, repeat, subCycle)) + + for i in range(int(math.ceil(self.lane.phase))): + R = R[1:] + + return R + + def parse_wave_lanes(self, sig=""): + def data_extract(e): + tmp = e.get("data") + if tmp is not None: + tmp = tmp.split() if isinstance(tmp, string_types) else tmp + return tmp + + content = [] + for sigx in sig: + self.lane.period = sigx.get("period", 1) + self.lane.phase = sigx.get("phase", 0) * 2 + sub_content = [] + sub_content.append([sigx.get("name", " "), sigx.get("phase", 0)]) + if sigx.get("wave"): + sub_content.append(self.parse_wave_lane(sigx["wave"], self.lane.period * self.lane.hscale - 1)) + else: + sub_content.append(None) + sub_content.append(data_extract(sigx)) + content.append(sub_content) + + return content + + def find_lane_markers(self, lanetext=""): + + lcount = 0 + gcount = 0 + ret = [] + for idx, val in enumerate(lanetext): + if val in ["vvv-2", "vvv-3", "vvv-4", "vvv-5", "vvv-6", "vvv-7", "vvv-8", "vvv-9"]: + lcount += 1 + else: + if lcount != 0: + ret.append(gcount - ((lcount + 1) / 2)) + lcount = 0 + + gcount += 1 + + if lcount != 0: + ret.append(gcount - ((lcount + 1) / 2)) + + return ret + + def render_lane_uses(self, val, g): + if val[1]: + for i in range(len(val[1])): + b = self.container.use(href="#{}".format(val[1][i])) + b.translate(i * self.lane.xs) + g.add(b) + + if val[2] and len(val[2]): + labels = self.find_lane_markers(val[1]) + if len(labels) != 0: + for k in range(len(labels)): + if val[2] and k < len(val[2]): + tx = int(labels[k]) * self.lane.xs + self.lane.xlabel + title = self.element.text("", x=[tx], y=[self.lane.ym], text_anchor="middle") + title.add(self.element.tspan(val[2][k])) + title["xml:space"] = "preserve" + g.add(title) + + def text_width(self, string, size=11): + chars = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,34,47,74,74,118,89,25,44,44,52,78,37, + 44,37,37,74,74,74,74,74,74,74,74,74,74,37,37,78,78,78,74,135,89,89,96,96,89,81,103,96,37,67,89,74,109, + 96,103,89,103,96,89,81,96,89,127,89,87,81,37,37,37,61,74,44,74,74,67,74,74,37,74,74,30,30,67,30,112,74, + 74,74,74,44,67,37,74,67,95,66,65,67,44,34,44,78,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, + 0,0,0,0,0,0,37,43,74,74,74,74,34,74,44,98,49,74,78,0,98,73,53,73,44,44,44,77,71,37,44,44,49,74,111,111, + 111,81,89,89,89,89,89,89,133,96,89,89,89,89,37,37,37,37,96,96,103,103,103,103,103,78,103,96,96,96,96, + 87,89,81,74,74,74,74,74,74,118,67,74,74,74,74,36,36,36,36,74,74,74,74,74,74,74,73,81,74,74,74,74,65,74, + 65,89,74,89,74,89,74,96,67,96,67,96,67,96,67,96,82,96,74,89,74,89,74,89,74,89,74,89,74,103,74,103,74, + 103,74,103,74,96,74,96,74,37,36,37,36,37,36,37,30,37,36,98,59,67,30,89,67,67,74,30,74,30,74,39,74,44, + 74,30,96,74,96,74,96,74,80,96,74,103,74,103,74,103,74,133,126,96,44,96,44,96,44,89,67,89,67,89,67,89, + 67,81,38,81,50,81,37,96,74,96,74,96,74,96,74,96,74,96,74,127,95,87,65,87,81,67,81,67,81,67,30,84,97,91, + 84,91,84,94,92,73,104,109,91,84,81,84,100,82,76,74,103,91,131,47,40,99,77,37,79,130,100,84,104,114,87, + 126,101,87,84,93,84,69,84,46,52,82,52,82,114,89,102,96,100,98,91,70,88,88,77,70,85,89,77,67,84,39,65, + 61,39,189,173,153,111,105,61,123,123,106,89,74,37,30,103,74,96,74,96,74,96,74,96,74,96,74,81,91,81,91, + 81,130,131,102,84,103,84,87,78,104,81,104,81,88,76,37,189,173,153,103,84,148,90,100,84,89,74,133,118, + 103,81] + + return sum([(chars[ord(c)] if ord(c) <= len(chars) else 114) for c in string])*size/100 + + def render_wave_lane(self, content="", index=0): + xmax = 0 + xgmax = 0 + glengths = [] + groups = [] + + for j, val in enumerate(content): + name = val[0][0].strip() + if name is not None: + dy = self.lane.y0 + j * self.lane.yo + g = self.container.g(id="wavelane_{j}_{index}".format(j=j, index=index)) + g.translate(0, dy) + title = self.element.text("", x=[self.lane.tgo], y=[self.lane.ym], text_anchor="end") + title.add(self.element.tspan(name)) + title["xml:space"] = "preserve" + title["class"] = "info" + g.add(title) + + glengths.append(self.text_width(name)) + + xoffset = val[0][1] + xoffset = math.ceil(2 * xoffset) - 2 * xoffset if xoffset > 0 else -2 * xoffset + gg = self.container.g(id="wavelane_draw_{j}_{index}".format(j=j, index=index)) + gg.translate(xoffset * self.lane.xs, 0) + + self.render_lane_uses(val, gg) + + if val[1] and len(val[1]) > xmax: + xmax = len(val[1]) + + g.add(gg) + groups.append(g) + self.lane.xmax = xmax + self.lane.xg = xgmax + 20 + return (glengths, groups) + + def captext(self, g, cxt, anchor, y): + if cxt.get(anchor) and cxt[anchor].get("text"): + tmark = self.element.text("", x=[float(cxt.xmax)*float(cxt.xs)/2], y=[y], text_anchor="middle", fill="#000") + tmark["xml:space"] = "preserve" + if isinstance(cxt[anchor]["text"], string_types): + tmark.add(self.element.tspan(cxt[anchor]["text"])) + else: + tmark.add(JsonMLElement(cxt[anchor]["text"])) + g.add(tmark) + + def ticktock(self, g, cxt, ref1, ref2, x, dx, y, length): + L = [] + + if cxt.get(ref1) is None or cxt[ref1].get(ref2) is None: + return + + val = cxt[ref1][ref2] + + if isinstance(val, string_types): + val = val.split() + elif isinstance(val, (int, float, bool)): + offset = int(val) + val = [] + for i in range(length): + val.append(i + offset) + + if type(val) is list: + if len(val) == 0: + return + elif len(val) == 1: + offset = val[0] + if isinstance(offset, string_types): + L = val + else: + for i in range(length): + L[i] = i + offset + elif len(val) == 2: + offset = int(val[0]) + step = int(val[1]) + tmp = val[1].split(".") + if len(tmp) == 2: + dp = len(tmp[1]) + + if isinstance(offset, string_types) or isinstance(step, string_types): + L = val + else: + offset = step * offset + for i in range(length): + L[i] = "{0:.", dp, "f}".format(step * i + offset) + else: + L = val + + else: + return + + for i in range(length): + tmp = L[i] + tmark = self.element.text(tmp, x=[i * dx + x], y=[y], text_anchor="middle") + tmark["class"] = "muted" + tmark["xml:space"] = "preserve" + g.add(tmark) + + def render_marks(self, content="", index=0): + def get_elem(e): + if len(e) == 3: + ret = self.element[e[0]](e[2]) + ret.attribs = e[1] + elif len(e) == 2: + ret = self.element[e[0]](e[1]) + else: + ret = self.element.tspan(e) + return ret + + mstep = 2 * int(self.lane.hscale) + mmstep = mstep * self.lane.xs + marks = int(self.lane.xmax / mstep) + gy = len(content) * int(self.lane.yo) + + g = self.container.g(id="gmarks_{}".format(index)) + + for i in range(marks + 1): + gg = self.element.path(id="gmark_{i}_{index}".format(i=i, index=index), + d="m {dx},0 0,{gy}".format(dx=i * mmstep, gy=gy), + style="stroke:#888;stroke-width:0.5;stroke-dasharray:1,3") + g.add(gg) + + self.captext(g, self.lane, "head", -33 if (self.lane.yh0 > 0) else -13) + self.captext(g, self.lane, "foot", gy + (45 if (self.lane.yf0 > 0) else 25)) + self.ticktock(g, self.lane, "head", "tick", 0, mmstep, -5, marks + 1) + self.ticktock(g, self.lane, "head", "tock", mmstep / 2, mmstep, -5, marks) + self.ticktock(g, self.lane, "foot", "tick", 0, mmstep, gy + 15, marks + 1) + self.ticktock(g, self.lane, "foot", "tock", mmstep / 2, mmstep, gy + 15, marks) + + return g + + def render_labels(self, root, source, index): + if source: + gg = self.container.g(id="labels_{index}".format(index=index)) + + for idx, val in enumerate(source): + self.lane.period = val.get("period", 1) + self.lane.phase = val.get("phase", 0) * 2 + + dy = self.lane.y0 + idx * self.lane.yo + g = self.container.g(id="labels_{i}_{index}".format(i=idx, index=index)) + g.translate(0, dy) + + label = val.get("label") + if label: + pos = 0 + for l in re.findall(r"([\.\w]|(?:\{\w+\}))(?:\((\d*\.?\d+)\))?", label): + if l[0] == ".": + pos += 1 + continue + + text = l[0] + try: + offset = float(l[1]) + except ValueError: + offset = 0 + + m = re.match(r"\{(\w+)\}", l[0]) + if m: + text = m.group(1) + x = int(float(self.lane.xs) * (2 * (pos + offset) * self.lane.period * + self.lane.hscale - self.lane.phase) + float(self.lane.xlabel)) + y = int(idx * self.lane.yo + self.lane.y0 + float(self.lane.ys) * 0.5) - dy + + lwidth = len(text) * self.font_width + lx = float(x) - float(lwidth) / 2 + ly = int(y) - 5 + underlabel = self.element.rect(insert=(lx, ly), + size=(lwidth, 8), style="fill:#FFF;") + g.add(underlabel) + lx = float(x) + ly = int(y) + 2 + label = self.element.text(text, style="font-size:8px;", text_anchor="middle", + x=[lx], y=[ly]) + g.add(label) + pos += 1 + gg.add(g) + root.add(gg) + + def arc_shape(self, Edge, frm, to): + dx = float(to.x) - float(frm.x) + dy = float(to.y) - float(frm.y) + lx = (float(frm.x) + float(to.x)) / 2 + ly = (float(frm.y) + float(to.y)) / 2 + + const_style = AttrDict({ + "a": "marker-end:url(#arrowhead);stroke:#0041c4;stroke-width:1;fill:none", + "b": "marker-end:url(#arrowhead);marker-start:url(#arrowtail);stroke:#0041c4;stroke-width:1;fill:none" + }) + + pattern = { + "-": { }, + "~": {"d": "M {fx},{fy} c {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=(0.7 * dx), dy=0, + dxx=(0.3 * dx), dyy=dy, + dxxx=dx, dyyy=dy)}, + "-~": {"d": "M {fx},{fy} c {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=(0.7 * dx), dy=0, + dxx=dx, dyy=dy, + dxxx=dx, dyyy=dy)}, + "~-": {"d": "M {fx},{fy} c {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=0, dy=0, + dxx=(0.3 * dx), dyy=dy, + dxxx=dx, dyyy=dy)}, + "-|": {"d": "m {fx},{fy} {dx},{dy} {dxx},{dyy}".format(fx=frm.x, fy=frm.y, + dx=dx, dy=0, + dxx=0, dyy=dy)}, + "|-": {"d": "m {fx},{fy} {dx},{dy} {dxx},{dyy}".format(fx=frm.x, fy=frm.y, + dx=0, dy=dy, + dxx=dx, dyy=0)}, + "-|-": {"d": "m {fx},{fy} {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=(dx / 2), dy=0, + dxx=0, dyy=dy, + dxxx=(dx / 2), dyyy=0)}, + "->": {"style": const_style.a}, + "~>": {"style": const_style.a, + "d": "M {fx},{fy} c {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=(0.7 * dx), dy=0, + dxx=(0.3 * dx), dyy=dy, + dxxx=dx, dyyy=dy)}, + "-~>": {"style": const_style.a, + "d": "M {fx},{fy} c {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=(0.7 * dx), dy=0, + dxx=dx, dyy=dy, + dxxx=dx, dyyy=dy)}, + "~->": {"style": const_style.a, + "d": "M {fx},{fy} c {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=0, dy=0, + dxx=(0.3 * dx), dyy=dy, + dxxx=dx, dyyy=dy)}, + "-|>": {"style": const_style.a, + "d": "m {fx},{fy} {dx},{dy} {dxx},{dyy}".format(fx=frm.x, fy=frm.y, + dx=dx, dy=0, + dxx=0, dyy=dy)}, + "|->": {"style": const_style.a, + "d": "m {fx},{fy} {dx},{dy} {dxx},{dyy}".format(fx=frm.x, fy=frm.y, + dx=0, dy=dy, + dxx=dx, dyy=0 + )}, + "-|->": {"style": const_style.a, + "d": "m {fx},{fy} {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=(dx / 2), dy=0, + dxx=0, dyy=dy, + dxxx=(dx / 2), dyyy=0 + )}, + "<->": {"style": const_style.b}, + "<~>": {"style": const_style.b, + "d": "M {fx},{fy} c {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=(0.7 * dx), dy=0, + dxx=(0.3 * dx), dyy=dy, + dxxx=dx, dyyy=dy + )}, + "<-~>": {"style": const_style.b, + "d": "M {fx},{fy} c {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=(0.7 * dx), dy=0, + dxx=dx, dyy=dy, + dxxx=dx, dyyy=dy + )}, + "<-|>": {"style": const_style.b, + "d": "m {fx},{fy} {dx},{dy} {dxx},{dyy}".format(fx=frm.x, fy=frm.y, + dx=dx, dy=0, + dxx=0, dyy=dy + )}, + "<-|->": {"style": const_style.b, + "d": "m {fx},{fy} {dx},{dy} {dxx},{dyy} {dxxx},{dyyy}".format(fx=frm.x, fy=frm.y, + dx=(dx / 2), dy=0, + dxx=0, dyy=dy, + dxxx=(dx / 2), dyyy=0, + )} + } + + props = AttrDict({"lx": lx, "ly": ly, "style": "fill:none;stroke:#00F;stroke-width:1", + "d": "M {fx},{fy} {tx},{ty}".format(fx=frm.x, fy=frm.y, tx=to.x, ty=to.y)}) + + if Edge.shape in pattern: + props.d = pattern[Edge.shape].get("d", props.d) + props.style = pattern[Edge.shape].get("style", props.style) + + if Edge.label: + if Edge.shape in ["-~", "-~>", "<-~>"]: + props.lx = float(frm.x) + (float(to.x) - float(frm.x)) * 0.75 + elif Edge.shape in ["~-", "~->"]: + props.lx = float(frm.x) + (float(to.x) - float(frm.x)) * 0.25 + elif Edge.shape in ["-|", "-|>", "<-|>"]: + props.lx = float(to.x) + elif Edge.shape in ["|-", "|->"]: + props.lx = float(frm.x) + + return props + + def render_arc(self, Edge, frm, to, shapeProps): + return self.element.path(id="gmark_{frm}_{to}".format(frm=Edge.frm, to=Edge.to), + d=shapeProps.d, style=shapeProps.style) + + def render_label(self, p, text): + w = self.text_width(text,8) + 2 + g = self.container.g(transform = "translate({},{})".format(p.x, p.y)) + # todo: I don't think this is correct. reported: + # https://github.com/wavedrom/wavedrom/issues/252 + rect = self.element.rect(insert=(int(0-w/2), -5), size=(w, 10), style="fill:#FFF;") + label = self.element.text("", style="font-size:8px;", text_anchor="middle", y=[3]) + label.add(self.element.tspan(text)) + g.add(rect) + g.add(label) + return g + + def render_arcs(self, source, index, top): + Edge = AttrDict({"words": [], "frm": 0, "shape": "", "to": 0, "label": ""}) + Events = AttrDict({}) + + if source: + for idx, val in enumerate(source): + self.lane.period = val.get("period", 1) + self.lane.phase = val.get("phase", 0) * 2 + text = val.get("node") + if text: + Stack = list(text) + Stack.reverse() + pos = 0 + step = 1 + while len(Stack) > 0: + eventname = Stack.pop() + if eventname == "<": + step = 0.25 + continue + elif eventname == ">": + step = 1 + continue + x = int(float(self.lane.xs) * (2 * pos * self.lane.period * + self.lane.hscale - self.lane.phase) + float(self.lane.xlabel)) + y = int(idx * self.lane.yo + self.lane.y0 + float(self.lane.ys) * 0.5) + if eventname != ".": + Events[eventname] = AttrDict({"x": str(x), "y": str(y)}) + pos += step + + gg = self.container.g(id="wavearcs_{index}".format(index=index)) + + if top.get("edge"): + for i, val in enumerate(top["edge"]): + Edge.words = val.split() + Edge.label = val[len(Edge.words[0]):] + Edge.label = Edge.label[1:] + Edge.frm = Edge.words[0][0] + Edge.to = Edge.words[0][-1] + Edge.shape = Edge.words[0][1:-1] + frm = AttrDict(Events[Edge.frm]) + to = AttrDict(Events[Edge.to]) + + shapeProps = self.arc_shape(Edge, frm, to) + gg.add(self.render_arc(Edge, frm, to, shapeProps)) + + if Edge.label: + gg.add(self.render_label(AttrDict({"x": shapeProps.lx, "y": shapeProps.ly}), Edge.label)) + + + for k in Events: + if k.islower() or k.isdigit(): + if int(Events[k].x) > 0: + gg.add(self.render_label(AttrDict({"x": Events[k].x, "y": Events[k].y}), k)) + + return gg + + def parse_config(self, source={}): + self.lane.hscale = 1 + if self.lane.get("hscale0"): + self.lane.hscale = self.lane.hscale0 + + if source and source.get("config") and source.get("config").get("hscale"): + hscale = round(source.get("config").get("hscale")) + if hscale > 0: + if hscale > 100: + hscale = 100 + self.lane.hscale = hscale + + self.lane.xmin_cfg = 0 + self.lane.xmax_cfg = sys.maxsize + if source and "config" in source and "hbounds" in source["config"]: + if len(source["config"]["hbounds"]) == 2: + source["config"]["hbounds"][0] = math.floor(source["config"]["hbounds"][0]) + source["config"]["hbounds"][1] = math.ceil(source["config"]["hbounds"][0]) + if source["config"]["hbounds"][0] < source["config"]["hbounds"][1]: + self.lane.xmin_cfg = 2 * source["config"]["hbounds"][0] + self.lane.xmax_cfg = 2 * source["config"]["hbounds"][1] + + self.lane.yh0 = 0 + self.lane.yh1 = 0 + if source and source.get("head"): + self.lane.head = source["head"] + if "tick" in source["head"] or "tock" in source["head"]: + self.lane.yh0 = 20 + if "tick" in source["head"]: + source["head"]["tick"] += self.lane.xmin_cfg/2 + if "tock" in source["head"]: + source["head"]["tock"] += self.lane.xmin_cfg/2 + if source.get("head").get("text"): + self.lane.yh1 = 46 + self.lane.head["text"] = source["head"]["text"] + + self.lane.yf0 = 0 + self.lane.yf1 = 0 + if source and source.get("foot"): + self.lane.foot = source["foot"] + if "tick" in source["foot"] or "tock" in source["foot"]: + self.lane.yf0 = 20 + if "tick" in source["foot"]: + source["foot"]["tick"] += self.lane.xmin_cfg/2 + if "tock" in source["foot"]: + source["foot"]["tock"] += self.lane.xmin_cfg/2 + if source.get("foot").get("text"): + self.lane.yf1 = 46 + self.lane.foot["text"] = source["foot"]["text"] + + def rec(self, tmp=[], state={}): + name = None + delta = AttrDict({"x": 10}) + if isinstance(tmp[0], str) or isinstance(tmp[0], int): + name = str(tmp[0]) + delta.x = 25 + + state.x += delta.x + for idx, val in enumerate(tmp): + if isinstance(val, list): + old_y = state.y + self.rec(val, state) + state["groups"].append({"x": state.xx, + "y": old_y, + "height": state.y - old_y, + "name": state.name}) + elif isinstance(val, dict): + state["lanes"].append(val) + state["width"].append(state.x) + state.y += 1 + + state.xx = state.x + state.x -= delta.x + state.name = name + + def another_template(self, index, source): + def get_container(elem): + ctype = elem[0] + ret = self.container[ctype]() + ret.attribs = elem[1] + + def gen_elem(e): + if e[0] == "path": + attr = e[1] + elem = self.element.path(d=attr["d"]) + elem.attribs = attr + elif e[0] == "rect": + attr = e[1] + x = attr["x"] + y = attr["y"] + w = attr["width"] + h = attr["height"] + elem = self.element.rect(insert=(x, y), size=(w, h)) + elem.attribs = attr + + return elem + [ret.add(gen_elem(e)) for e in elem[2:]] + + return ret + + skinname = source.get("config", {"skin" : "default"}).get("skin", "default") + skin = waveskin.WaveSkin.get(skinname, waveskin.WaveSkin["default"]) + + template = svgwrite.Drawing(id="svgcontent_{index}".format(index=index)) + if index == 0: + template.add(template.style(skin[2][2])) + [template.defs.add(get_container(e)) for e in skin[3][1:]] + self.lane.xs = int(skin[3][1][2][1]["width"]) + self.lane.ys = int(skin[3][1][2][1]["height"]) + self.lane.xlabel = int(skin[3][1][2][1]["x"]) + self.lane.ym = int(skin[3][1][2][1]["y"]) + + template["class"] = "WaveDrom" + template["overflow"] = "hidden" + + return template + + def insert_svg_template(self, index=0, parent=[], source={}): + e = waveskin.WaveSkin["default"] + + if source.get("config") and source.get("config").get("skin"): + if waveskin.WaveSkin.get(source.get("config").get("skin")): + e = waveskin.WaveSkin[source.get("config").get("skin")] + + if index == 0: + self.lane.xs = int(e[3][1][2][1]["width"]) + self.lane.ys = int(e[3][1][2][1]["height"]) + self.lane.xlabel = int(e[3][1][2][1]["x"]) + self.lane.ym = int(e[3][1][2][1]["y"]) + + else: + e = ["svg", + {"id": "svg", + "xmlns": "http://www.w3.org/2000/svg", + "xmlns:xlink": "http://www.w3.org/1999/xlink", + "height": "0"}, + [ # e[-1] + "g", # e[-1][0] + {"id": "waves"}, # e[-1][1] + [ # e[-1][2] + "g", # e[-1][2][0] + {"id": "lanes"} # e[-1][2][1] + ], + [ # e[-1][3] + "g", # e[-1][3][0] + {"id": "groups"} # e[-1][3][1] + ] + ] + ] + + e[-1][1]["id"] = "waves_{index}".format(index=index) + e[-1][2][1]["id"] = "lanes_{index}".format(index=index) + e[-1][3][1]["id"] = "groups_{index}".format(index=index) + e[1]["id"] = "svgcontent_{index}".format(index=index) + e[1]["height"] = 0 + + parent.extend(e) + + def render_waveform(self, index=0, source={}, output=[], strict_js_features=False): + xmax = 0 + + if source.get("signal"): + template = self.another_template(index, source) + waves = template.g(id="waves_{index}".format(index=index)) + lanes = template.g(id="lanes_{index}".format(index=index)) + groups = template.g(id="groups_{index}".format(index=index)) + self.parse_config(source) + ret = AttrDict({"x": 0, "y": 0, "xmax": 0, "width": [], "lanes": [], "groups": []}) + self.rec(source["signal"], ret) # parse lanes + content = self.parse_wave_lanes(ret.lanes) + (glengths, lanegroups) = self.render_wave_lane(content, index) + for i, val in enumerate(glengths): + xmax = max(xmax, (val + ret.width[i])) + marks = self.render_marks(content, index) + gaps = self.render_gaps(ret.lanes, index) + if not strict_js_features: + self.render_labels(lanes, ret.lanes, index) + arcs = self.render_arcs(ret.lanes, index, source) + + # Render + lanes.add(marks) + [lanes.add(l) for l in lanegroups] + lanes.add(arcs) + lanes.add(gaps) + + self.render_groups(groups, ret.groups, index) + self.lane.xg = int(math.ceil(float(xmax - self.lane.tgo) / float(self.lane.xs))) * self.lane.xs + width = self.lane.xg + self.lane.xs * (self.lane.xmax + 1) + height = len(content) * self.lane.yo + self.lane.yh0 + self.lane.yh1 + self.lane.yf0 + self.lane.yf1 + template["width"] = width + template["height"] = height + template.viewbox(0, 0, width, height) + dx = self.lane.xg + 0.5 + dy = float(self.lane.yh0) + float(self.lane.yh1) + 0.5 + lanes.translate(dx, dy) + + waves.add(lanes) + waves.add(groups) + template.add(waves) + return template + + def render_groups(self, root=[], groups=[], index=0): + for i, val in enumerate(groups): + dx = groups[i]["x"] + 0.5 + dy = groups[i]["y"] * self.lane.yo + 3.5 + self.lane.yh0 + self.lane.yh1 + h = int(groups[i]["height"] * self.lane.yo - 16) + group = self.element.path(id="group_{i}_{index}".format(i=i, index=index), + d="m {dx},{dy} c -3,0 -5,2 -5,5 l 0,{h} c 0,3 2,5 5,5".format(dx=dx, dy=dy, h=h), + style="stroke:#0041c4;stroke-width:1;fill:none") + + root.add(group) + + name = groups[i]["name"] + x = int(groups[i]["x"] - 10) + y = int(self.lane.yo * (groups[i]["y"] + (float(groups[i]["height"]) / 2)) + + self.lane.yh0 + self.lane.yh1) + label = self.container.g() + label.translate(x, y) + gg = self.container.g() + gg.rotate(270) + t = self.element.text("", text_anchor="middle") + t["class"] = "info" + t["xml:space"] = "preserve" + t.add(self.element.tspan(name)) + gg.add(t) + label.add(gg) + root.add(label) + + def render_gap_uses(self, wave, g): + subCycle = False + + if wave: + Stack = deque(wave) + pos = 0 + while len(Stack): + next = Stack.popleft() + if next == '<': + subCycle = True + continue + if next == '>': + subCycle = False + continue + if subCycle: + pos += self.lane.period + else: + pos += 2 * self.lane.period + if next == "|": + if subCycle: + dx = float(self.lane.xs) * (pos * float(self.lane.hscale) - float(self.lane.phase)) + else: + dx = float(self.lane.xs) * ((pos - self.lane.period) * float(self.lane.hscale) - float(self.lane.phase)) + b = self.container.use(href="#gap") + b.translate(dx) + g.add(b) + + def render_gaps(self, source, index): + if source: + gg = self.container.g(id="wavegaps_{index}".format(index=index)) + + for idx, val in enumerate(source): + self.lane.period = val.get("period", 1) + self.lane.phase = int(val.get("phase", 0) * 2) + self.lane.xmin_cfg + + dy = self.lane.y0 + idx * self.lane.yo + g = self.container.g(id="wavegap_{i}_{index}".format(i=idx, index=index)) + g.translate(0, dy) + + if "wave" in val: + self.render_gap_uses(val["wave"], g) + + gg.add(g) + + return gg + + def convert_to_svg(self, root): + svg_output = "" + + if type(root) is list: + if len(root) >= 2 and type(root[1]) is dict: + if len(root) == 2: + svg_output += "<{}{}/>\n".format(root[0], self.convert_to_svg(root[1])) + elif len(root) >= 3: + svg_output += "<{}{}/>\n".format(root[0], self.convert_to_svg(root[1])) + if len(root) == 3: + svg_output += self.convert_to_svg(root[2]) + else: + svg_output += self.convert_to_svg(root[2:]) + svg_output += "\n".format(root[0]) + elif type(root[0]) is list: + for eleml in root: + svg_output += self.convert_to_svg(eleml) + else: + svg_output += "<{}>\n".format(root[0]) + for eleml in root[1:]: + svg_output += self.convert_to_svg(eleml) + svg_output += "\n".format(root[0]) + elif type(root) is dict: + for elemd in root: + svg_output += " {}=\"{}\"".format(elemd, root[elemd]) + else: + svg_output += root + + return svg_output + + # Backward compatibility + genWaveBrick = gen_wave_brick + parseWaveLane = parse_wave_lane + parseWaveLanes = parse_wave_lanes + findLaneMarkers = find_lane_markers + renderWaveLane = render_wave_lane + renderMarks = render_marks + renderLabels = render_labels + renderArcs = render_arcs + parseConfig = parse_config + anotherTemplate = another_template + insertSVGTemplate = insert_svg_template + renderWaveForm = render_waveform + renderGroups = render_groups + renderGaps = render_gaps diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1ff28642bfb81a343ba071373d28cea8d80886e5 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/__init__.py @@ -0,0 +1,95 @@ +"""A variety of linear models.""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +# See http://scikit-learn.sourceforge.net/modules/sgd.html and +# http://scikit-learn.sourceforge.net/modules/linear_model.html for +# complete documentation. + +from ._base import LinearRegression +from ._bayes import ARDRegression, BayesianRidge +from ._coordinate_descent import ( + ElasticNet, + ElasticNetCV, + Lasso, + LassoCV, + MultiTaskElasticNet, + MultiTaskElasticNetCV, + MultiTaskLasso, + MultiTaskLassoCV, + enet_path, + lasso_path, +) +from ._glm import GammaRegressor, PoissonRegressor, TweedieRegressor +from ._huber import HuberRegressor +from ._least_angle import ( + Lars, + LarsCV, + LassoLars, + LassoLarsCV, + LassoLarsIC, + lars_path, + lars_path_gram, +) +from ._logistic import LogisticRegression, LogisticRegressionCV +from ._omp import ( + OrthogonalMatchingPursuit, + OrthogonalMatchingPursuitCV, + orthogonal_mp, + orthogonal_mp_gram, +) +from ._passive_aggressive import PassiveAggressiveClassifier, PassiveAggressiveRegressor +from ._perceptron import Perceptron +from ._quantile import QuantileRegressor +from ._ransac import RANSACRegressor +from ._ridge import Ridge, RidgeClassifier, RidgeClassifierCV, RidgeCV, ridge_regression +from ._stochastic_gradient import SGDClassifier, SGDOneClassSVM, SGDRegressor +from ._theil_sen import TheilSenRegressor + +__all__ = [ + "ARDRegression", + "BayesianRidge", + "ElasticNet", + "ElasticNetCV", + "HuberRegressor", + "Lars", + "LarsCV", + "Lasso", + "LassoCV", + "LassoLars", + "LassoLarsCV", + "LassoLarsIC", + "LinearRegression", + "LogisticRegression", + "LogisticRegressionCV", + "MultiTaskElasticNet", + "MultiTaskElasticNetCV", + "MultiTaskLasso", + "MultiTaskLassoCV", + "OrthogonalMatchingPursuit", + "OrthogonalMatchingPursuitCV", + "PassiveAggressiveClassifier", + "PassiveAggressiveRegressor", + "Perceptron", + "QuantileRegressor", + "Ridge", + "RidgeCV", + "RidgeClassifier", + "RidgeClassifierCV", + "SGDClassifier", + "SGDRegressor", + "SGDOneClassSVM", + "TheilSenRegressor", + "enet_path", + "lars_path", + "lars_path_gram", + "lasso_path", + "orthogonal_mp", + "orthogonal_mp_gram", + "ridge_regression", + "RANSACRegressor", + "PoissonRegressor", + "GammaRegressor", + "TweedieRegressor", +] diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_bayes.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_bayes.py new file mode 100644 index 0000000000000000000000000000000000000000..b6527d4f22b1fb862427ad0a30f4b29b72e431bb --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_bayes.py @@ -0,0 +1,797 @@ +""" +Various bayesian regression +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +from math import log +from numbers import Integral, Real + +import numpy as np +from scipy import linalg +from scipy.linalg import pinvh + +from ..base import RegressorMixin, _fit_context +from ..utils import _safe_indexing +from ..utils._param_validation import Interval +from ..utils.extmath import fast_logdet +from ..utils.validation import _check_sample_weight, validate_data +from ._base import LinearModel, _preprocess_data, _rescale_data + +############################################################################### +# BayesianRidge regression + + +class BayesianRidge(RegressorMixin, LinearModel): + """Bayesian ridge regression. + + Fit a Bayesian ridge model. See the Notes section for details on this + implementation and the optimization of the regularization parameters + lambda (precision of the weights) and alpha (precision of the noise). + + Read more in the :ref:`User Guide `. + For an intuitive visualization of how the sinusoid is approximated by + a polynomial using different pairs of initial values, see + :ref:`sphx_glr_auto_examples_linear_model_plot_bayesian_ridge_curvefit.py`. + + Parameters + ---------- + max_iter : int, default=300 + Maximum number of iterations over the complete dataset before + stopping independently of any early stopping criterion. + + .. versionchanged:: 1.3 + + tol : float, default=1e-3 + Stop the algorithm if w has converged. + + alpha_1 : float, default=1e-6 + Hyper-parameter : shape parameter for the Gamma distribution prior + over the alpha parameter. + + alpha_2 : float, default=1e-6 + Hyper-parameter : inverse scale parameter (rate parameter) for the + Gamma distribution prior over the alpha parameter. + + lambda_1 : float, default=1e-6 + Hyper-parameter : shape parameter for the Gamma distribution prior + over the lambda parameter. + + lambda_2 : float, default=1e-6 + Hyper-parameter : inverse scale parameter (rate parameter) for the + Gamma distribution prior over the lambda parameter. + + alpha_init : float, default=None + Initial value for alpha (precision of the noise). + If not set, alpha_init is 1/Var(y). + + .. versionadded:: 0.22 + + lambda_init : float, default=None + Initial value for lambda (precision of the weights). + If not set, lambda_init is 1. + + .. versionadded:: 0.22 + + compute_score : bool, default=False + If True, compute the log marginal likelihood at each iteration of the + optimization. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. + The intercept is not treated as a probabilistic parameter + and thus has no associated variance. If set + to False, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + verbose : bool, default=False + Verbose mode when fitting the model. + + Attributes + ---------- + coef_ : array-like of shape (n_features,) + Coefficients of the regression model (mean of distribution) + + intercept_ : float + Independent term in decision function. Set to 0.0 if + `fit_intercept = False`. + + alpha_ : float + Estimated precision of the noise. + + lambda_ : float + Estimated precision of the weights. + + sigma_ : array-like of shape (n_features, n_features) + Estimated variance-covariance matrix of the weights + + scores_ : array-like of shape (n_iter_+1,) + If computed_score is True, value of the log marginal likelihood (to be + maximized) at each iteration of the optimization. The array starts + with the value of the log marginal likelihood obtained for the initial + values of alpha and lambda and ends with the value obtained for the + estimated alpha and lambda. + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + + X_offset_ : ndarray of shape (n_features,) + If `fit_intercept=True`, offset subtracted for centering data to a + zero mean. Set to np.zeros(n_features) otherwise. + + X_scale_ : ndarray of shape (n_features,) + Set to np.ones(n_features). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + ARDRegression : Bayesian ARD regression. + + Notes + ----- + There exist several strategies to perform Bayesian ridge regression. This + implementation is based on the algorithm described in Appendix A of + (Tipping, 2001) where updates of the regularization parameters are done as + suggested in (MacKay, 1992). Note that according to A New + View of Automatic Relevance Determination (Wipf and Nagarajan, 2008) these + update rules do not guarantee that the marginal likelihood is increasing + between two consecutive iterations of the optimization. + + References + ---------- + D. J. C. MacKay, Bayesian Interpolation, Computation and Neural Systems, + Vol. 4, No. 3, 1992. + + M. E. Tipping, Sparse Bayesian Learning and the Relevance Vector Machine, + Journal of Machine Learning Research, Vol. 1, 2001. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.BayesianRidge() + >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) + BayesianRidge() + >>> clf.predict([[1, 1]]) + array([1.]) + """ + + _parameter_constraints: dict = { + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="neither")], + "alpha_1": [Interval(Real, 0, None, closed="left")], + "alpha_2": [Interval(Real, 0, None, closed="left")], + "lambda_1": [Interval(Real, 0, None, closed="left")], + "lambda_2": [Interval(Real, 0, None, closed="left")], + "alpha_init": [None, Interval(Real, 0, None, closed="left")], + "lambda_init": [None, Interval(Real, 0, None, closed="left")], + "compute_score": ["boolean"], + "fit_intercept": ["boolean"], + "copy_X": ["boolean"], + "verbose": ["verbose"], + } + + def __init__( + self, + *, + max_iter=300, + tol=1.0e-3, + alpha_1=1.0e-6, + alpha_2=1.0e-6, + lambda_1=1.0e-6, + lambda_2=1.0e-6, + alpha_init=None, + lambda_init=None, + compute_score=False, + fit_intercept=True, + copy_X=True, + verbose=False, + ): + self.max_iter = max_iter + self.tol = tol + self.alpha_1 = alpha_1 + self.alpha_2 = alpha_2 + self.lambda_1 = lambda_1 + self.lambda_2 = lambda_2 + self.alpha_init = alpha_init + self.lambda_init = lambda_init + self.compute_score = compute_score + self.fit_intercept = fit_intercept + self.copy_X = copy_X + self.verbose = verbose + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """Fit the model. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Training data. + y : ndarray of shape (n_samples,) + Target values. Will be cast to X's dtype if necessary. + + sample_weight : ndarray of shape (n_samples,), default=None + Individual weights for each sample. + + .. versionadded:: 0.20 + parameter *sample_weight* support to BayesianRidge. + + Returns + ------- + self : object + Returns the instance itself. + """ + X, y = validate_data( + self, + X, + y, + dtype=[np.float64, np.float32], + force_writeable=True, + y_numeric=True, + ) + dtype = X.dtype + + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=dtype) + + X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data( + X, + y, + fit_intercept=self.fit_intercept, + copy=self.copy_X, + sample_weight=sample_weight, + ) + + if sample_weight is not None: + # Sample weight can be implemented via a simple rescaling. + X, y, _ = _rescale_data(X, y, sample_weight) + + self.X_offset_ = X_offset_ + self.X_scale_ = X_scale_ + n_samples, n_features = X.shape + + # Initialization of the values of the parameters + eps = np.finfo(np.float64).eps + # Add `eps` in the denominator to omit division by zero if `np.var(y)` + # is zero + alpha_ = self.alpha_init + lambda_ = self.lambda_init + if alpha_ is None: + alpha_ = 1.0 / (np.var(y) + eps) + if lambda_ is None: + lambda_ = 1.0 + + # Avoid unintended type promotion to float64 with numpy 2 + alpha_ = np.asarray(alpha_, dtype=dtype) + lambda_ = np.asarray(lambda_, dtype=dtype) + + verbose = self.verbose + lambda_1 = self.lambda_1 + lambda_2 = self.lambda_2 + alpha_1 = self.alpha_1 + alpha_2 = self.alpha_2 + + self.scores_ = list() + coef_old_ = None + + XT_y = np.dot(X.T, y) + U, S, Vh = linalg.svd(X, full_matrices=False) + eigen_vals_ = S**2 + + # Convergence loop of the bayesian ridge regression + for iter_ in range(self.max_iter): + # update posterior mean coef_ based on alpha_ and lambda_ and + # compute corresponding rmse + coef_, rmse_ = self._update_coef_( + X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_ + ) + if self.compute_score: + # compute the log marginal likelihood + s = self._log_marginal_likelihood( + n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_ + ) + self.scores_.append(s) + + # Update alpha and lambda according to (MacKay, 1992) + gamma_ = np.sum((alpha_ * eigen_vals_) / (lambda_ + alpha_ * eigen_vals_)) + lambda_ = (gamma_ + 2 * lambda_1) / (np.sum(coef_**2) + 2 * lambda_2) + alpha_ = (n_samples - gamma_ + 2 * alpha_1) / (rmse_ + 2 * alpha_2) + + # Check for convergence + if iter_ != 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: + if verbose: + print("Convergence after ", str(iter_), " iterations") + break + coef_old_ = np.copy(coef_) + + self.n_iter_ = iter_ + 1 + + # return regularization parameters and corresponding posterior mean, + # log marginal likelihood and posterior covariance + self.alpha_ = alpha_ + self.lambda_ = lambda_ + self.coef_, rmse_ = self._update_coef_( + X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_ + ) + if self.compute_score: + # compute the log marginal likelihood + s = self._log_marginal_likelihood( + n_samples, n_features, eigen_vals_, alpha_, lambda_, coef_, rmse_ + ) + self.scores_.append(s) + self.scores_ = np.array(self.scores_) + + # posterior covariance is given by 1/alpha_ * scaled_sigma_ + scaled_sigma_ = np.dot( + Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis] + ) + self.sigma_ = (1.0 / alpha_) * scaled_sigma_ + + self._set_intercept(X_offset_, y_offset_, X_scale_) + + return self + + def predict(self, X, return_std=False): + """Predict using the linear model. + + In addition to the mean of the predictive distribution, also its + standard deviation can be returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + return_std : bool, default=False + Whether to return the standard deviation of posterior prediction. + + Returns + ------- + y_mean : array-like of shape (n_samples,) + Mean of predictive distribution of query points. + + y_std : array-like of shape (n_samples,) + Standard deviation of predictive distribution of query points. + """ + y_mean = self._decision_function(X) + if not return_std: + return y_mean + else: + sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1) + y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_)) + return y_mean, y_std + + def _update_coef_( + self, X, y, n_samples, n_features, XT_y, U, Vh, eigen_vals_, alpha_, lambda_ + ): + """Update posterior mean and compute corresponding rmse. + + Posterior mean is given by coef_ = scaled_sigma_ * X.T * y where + scaled_sigma_ = (lambda_/alpha_ * np.eye(n_features) + + np.dot(X.T, X))^-1 + """ + + if n_samples > n_features: + coef_ = np.linalg.multi_dot( + [Vh.T, Vh / (eigen_vals_ + lambda_ / alpha_)[:, np.newaxis], XT_y] + ) + else: + coef_ = np.linalg.multi_dot( + [X.T, U / (eigen_vals_ + lambda_ / alpha_)[None, :], U.T, y] + ) + + rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) + + return coef_, rmse_ + + def _log_marginal_likelihood( + self, n_samples, n_features, eigen_vals, alpha_, lambda_, coef, rmse + ): + """Log marginal likelihood.""" + alpha_1 = self.alpha_1 + alpha_2 = self.alpha_2 + lambda_1 = self.lambda_1 + lambda_2 = self.lambda_2 + + # compute the log of the determinant of the posterior covariance. + # posterior covariance is given by + # sigma = (lambda_ * np.eye(n_features) + alpha_ * np.dot(X.T, X))^-1 + if n_samples > n_features: + logdet_sigma = -np.sum(np.log(lambda_ + alpha_ * eigen_vals)) + else: + logdet_sigma = np.full(n_features, lambda_, dtype=np.array(lambda_).dtype) + logdet_sigma[:n_samples] += alpha_ * eigen_vals + logdet_sigma = -np.sum(np.log(logdet_sigma)) + + score = lambda_1 * log(lambda_) - lambda_2 * lambda_ + score += alpha_1 * log(alpha_) - alpha_2 * alpha_ + score += 0.5 * ( + n_features * log(lambda_) + + n_samples * log(alpha_) + - alpha_ * rmse + - lambda_ * np.sum(coef**2) + + logdet_sigma + - n_samples * log(2 * np.pi) + ) + + return score + + +############################################################################### +# ARD (Automatic Relevance Determination) regression + + +class ARDRegression(RegressorMixin, LinearModel): + """Bayesian ARD regression. + + Fit the weights of a regression model, using an ARD prior. The weights of + the regression model are assumed to be in Gaussian distributions. + Also estimate the parameters lambda (precisions of the distributions of the + weights) and alpha (precision of the distribution of the noise). + The estimation is done by an iterative procedures (Evidence Maximization) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + max_iter : int, default=300 + Maximum number of iterations. + + .. versionchanged:: 1.3 + + tol : float, default=1e-3 + Stop the algorithm if w has converged. + + alpha_1 : float, default=1e-6 + Hyper-parameter : shape parameter for the Gamma distribution prior + over the alpha parameter. + + alpha_2 : float, default=1e-6 + Hyper-parameter : inverse scale parameter (rate parameter) for the + Gamma distribution prior over the alpha parameter. + + lambda_1 : float, default=1e-6 + Hyper-parameter : shape parameter for the Gamma distribution prior + over the lambda parameter. + + lambda_2 : float, default=1e-6 + Hyper-parameter : inverse scale parameter (rate parameter) for the + Gamma distribution prior over the lambda parameter. + + compute_score : bool, default=False + If True, compute the objective function at each step of the model. + + threshold_lambda : float, default=10 000 + Threshold for removing (pruning) weights with high precision from + the computation. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + verbose : bool, default=False + Verbose mode when fitting the model. + + Attributes + ---------- + coef_ : array-like of shape (n_features,) + Coefficients of the regression model (mean of distribution) + + alpha_ : float + estimated precision of the noise. + + lambda_ : array-like of shape (n_features,) + estimated precisions of the weights. + + sigma_ : array-like of shape (n_features, n_features) + estimated variance-covariance matrix of the weights + + scores_ : float + if computed, value of the objective function (to be maximized) + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + + .. versionadded:: 1.3 + + intercept_ : float + Independent term in decision function. Set to 0.0 if + ``fit_intercept = False``. + + X_offset_ : float + If `fit_intercept=True`, offset subtracted for centering data to a + zero mean. Set to np.zeros(n_features) otherwise. + + X_scale_ : float + Set to np.ones(n_features). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + BayesianRidge : Bayesian ridge regression. + + Notes + ----- + For an example, see :ref:`examples/linear_model/plot_ard.py + `. + + References + ---------- + D. J. C. MacKay, Bayesian nonlinear modeling for the prediction + competition, ASHRAE Transactions, 1994. + + R. Salakhutdinov, Lecture notes on Statistical Machine Learning, + http://www.utstat.toronto.edu/~rsalakhu/sta4273/notes/Lecture2.pdf#page=15 + Their beta is our ``self.alpha_`` + Their alpha is our ``self.lambda_`` + ARD is a little different than the slide: only dimensions/features for + which ``self.lambda_ < self.threshold_lambda`` are kept and the rest are + discarded. + + Examples + -------- + >>> from sklearn import linear_model + >>> clf = linear_model.ARDRegression() + >>> clf.fit([[0,0], [1, 1], [2, 2]], [0, 1, 2]) + ARDRegression() + >>> clf.predict([[1, 1]]) + array([1.]) + """ + + _parameter_constraints: dict = { + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left")], + "alpha_1": [Interval(Real, 0, None, closed="left")], + "alpha_2": [Interval(Real, 0, None, closed="left")], + "lambda_1": [Interval(Real, 0, None, closed="left")], + "lambda_2": [Interval(Real, 0, None, closed="left")], + "compute_score": ["boolean"], + "threshold_lambda": [Interval(Real, 0, None, closed="left")], + "fit_intercept": ["boolean"], + "copy_X": ["boolean"], + "verbose": ["verbose"], + } + + def __init__( + self, + *, + max_iter=300, + tol=1.0e-3, + alpha_1=1.0e-6, + alpha_2=1.0e-6, + lambda_1=1.0e-6, + lambda_2=1.0e-6, + compute_score=False, + threshold_lambda=1.0e4, + fit_intercept=True, + copy_X=True, + verbose=False, + ): + self.max_iter = max_iter + self.tol = tol + self.fit_intercept = fit_intercept + self.alpha_1 = alpha_1 + self.alpha_2 = alpha_2 + self.lambda_1 = lambda_1 + self.lambda_2 = lambda_2 + self.compute_score = compute_score + self.threshold_lambda = threshold_lambda + self.copy_X = copy_X + self.verbose = verbose + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y): + """Fit the model according to the given training data and parameters. + + Iterative procedure to maximize the evidence + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + y : array-like of shape (n_samples,) + Target values (integers). Will be cast to X's dtype if necessary. + + Returns + ------- + self : object + Fitted estimator. + """ + X, y = validate_data( + self, + X, + y, + dtype=[np.float64, np.float32], + force_writeable=True, + y_numeric=True, + ensure_min_samples=2, + ) + dtype = X.dtype + + n_samples, n_features = X.shape + coef_ = np.zeros(n_features, dtype=dtype) + + X, y, X_offset_, y_offset_, X_scale_ = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=self.copy_X + ) + + self.X_offset_ = X_offset_ + self.X_scale_ = X_scale_ + + # Launch the convergence loop + keep_lambda = np.ones(n_features, dtype=bool) + + lambda_1 = self.lambda_1 + lambda_2 = self.lambda_2 + alpha_1 = self.alpha_1 + alpha_2 = self.alpha_2 + verbose = self.verbose + + # Initialization of the values of the parameters + eps = np.finfo(np.float64).eps + # Add `eps` in the denominator to omit division by zero if `np.var(y)` + # is zero. + # Explicitly set dtype to avoid unintended type promotion with numpy 2. + alpha_ = np.asarray(1.0 / (np.var(y) + eps), dtype=dtype) + lambda_ = np.ones(n_features, dtype=dtype) + + self.scores_ = list() + coef_old_ = None + + def update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_): + coef_[keep_lambda] = alpha_ * np.linalg.multi_dot( + [sigma_, X[:, keep_lambda].T, y] + ) + return coef_ + + update_sigma = ( + self._update_sigma + if n_samples >= n_features + else self._update_sigma_woodbury + ) + # Iterative procedure of ARDRegression + for iter_ in range(self.max_iter): + sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda) + coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_) + + # Update alpha and lambda + rmse_ = np.sum((y - np.dot(X, coef_)) ** 2) + gamma_ = 1.0 - lambda_[keep_lambda] * np.diag(sigma_) + lambda_[keep_lambda] = (gamma_ + 2.0 * lambda_1) / ( + (coef_[keep_lambda]) ** 2 + 2.0 * lambda_2 + ) + alpha_ = (n_samples - gamma_.sum() + 2.0 * alpha_1) / ( + rmse_ + 2.0 * alpha_2 + ) + + # Prune the weights with a precision over a threshold + keep_lambda = lambda_ < self.threshold_lambda + coef_[~keep_lambda] = 0 + + # Compute the objective function + if self.compute_score: + s = (lambda_1 * np.log(lambda_) - lambda_2 * lambda_).sum() + s += alpha_1 * log(alpha_) - alpha_2 * alpha_ + s += 0.5 * ( + fast_logdet(sigma_) + + n_samples * log(alpha_) + + np.sum(np.log(lambda_)) + ) + s -= 0.5 * (alpha_ * rmse_ + (lambda_ * coef_**2).sum()) + self.scores_.append(s) + + # Check for convergence + if iter_ > 0 and np.sum(np.abs(coef_old_ - coef_)) < self.tol: + if verbose: + print("Converged after %s iterations" % iter_) + break + coef_old_ = np.copy(coef_) + + if not keep_lambda.any(): + break + + self.n_iter_ = iter_ + 1 + + if keep_lambda.any(): + # update sigma and mu using updated params from the last iteration + sigma_ = update_sigma(X, alpha_, lambda_, keep_lambda) + coef_ = update_coeff(X, y, coef_, alpha_, keep_lambda, sigma_) + else: + sigma_ = np.array([]).reshape(0, 0) + + self.coef_ = coef_ + self.alpha_ = alpha_ + self.sigma_ = sigma_ + self.lambda_ = lambda_ + self._set_intercept(X_offset_, y_offset_, X_scale_) + return self + + def _update_sigma_woodbury(self, X, alpha_, lambda_, keep_lambda): + # See slides as referenced in the docstring note + # this function is used when n_samples < n_features and will invert + # a matrix of shape (n_samples, n_samples) making use of the + # woodbury formula: + # https://en.wikipedia.org/wiki/Woodbury_matrix_identity + n_samples = X.shape[0] + X_keep = X[:, keep_lambda] + inv_lambda = 1 / lambda_[keep_lambda].reshape(1, -1) + sigma_ = pinvh( + np.eye(n_samples, dtype=X.dtype) / alpha_ + + np.dot(X_keep * inv_lambda, X_keep.T) + ) + sigma_ = np.dot(sigma_, X_keep * inv_lambda) + sigma_ = -np.dot(inv_lambda.reshape(-1, 1) * X_keep.T, sigma_) + sigma_[np.diag_indices(sigma_.shape[1])] += 1.0 / lambda_[keep_lambda] + return sigma_ + + def _update_sigma(self, X, alpha_, lambda_, keep_lambda): + # See slides as referenced in the docstring note + # this function is used when n_samples >= n_features and will + # invert a matrix of shape (n_features, n_features) + X_keep = X[:, keep_lambda] + gram = np.dot(X_keep.T, X_keep) + eye = np.eye(gram.shape[0], dtype=X.dtype) + sigma_inv = lambda_[keep_lambda] * eye + alpha_ * gram + sigma_ = pinvh(sigma_inv) + return sigma_ + + def predict(self, X, return_std=False): + """Predict using the linear model. + + In addition to the mean of the predictive distribution, also its + standard deviation can be returned. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Samples. + + return_std : bool, default=False + Whether to return the standard deviation of posterior prediction. + + Returns + ------- + y_mean : array-like of shape (n_samples,) + Mean of predictive distribution of query points. + + y_std : array-like of shape (n_samples,) + Standard deviation of predictive distribution of query points. + """ + y_mean = self._decision_function(X) + if return_std is False: + return y_mean + else: + col_index = self.lambda_ < self.threshold_lambda + X = _safe_indexing(X, indices=col_index, axis=1) + sigmas_squared_data = (np.dot(X, self.sigma_) * X).sum(axis=1) + y_std = np.sqrt(sigmas_squared_data + (1.0 / self.alpha_)) + return y_mean, y_std diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py new file mode 100644 index 0000000000000000000000000000000000000000..25f956e5fadda1591b4edc1ab3dc23adb9ae0789 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_least_angle.py @@ -0,0 +1,2346 @@ +""" +Least Angle Regression algorithm. See the documentation on the +Generalized Linear Model for a complete discussion. +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import sys +import warnings +from math import log +from numbers import Integral, Real + +import numpy as np +from scipy import interpolate, linalg +from scipy.linalg.lapack import get_lapack_funcs + +from ..base import MultiOutputMixin, RegressorMixin, _fit_context +from ..exceptions import ConvergenceWarning +from ..model_selection import check_cv + +# mypy error: Module 'sklearn.utils' has no attribute 'arrayfuncs' +from ..utils import ( # type: ignore + Bunch, + arrayfuncs, + as_float_array, + check_random_state, +) +from ..utils._metadata_requests import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils._param_validation import Hidden, Interval, StrOptions, validate_params +from ..utils.parallel import Parallel, delayed +from ..utils.validation import validate_data +from ._base import LinearModel, LinearRegression, _preprocess_data + +SOLVE_TRIANGULAR_ARGS = {"check_finite": False} + + +@validate_params( + { + "X": [np.ndarray, None], + "y": [np.ndarray, None], + "Xy": [np.ndarray, None], + "Gram": [StrOptions({"auto"}), "boolean", np.ndarray, None], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "alpha_min": [Interval(Real, 0, None, closed="left")], + "method": [StrOptions({"lar", "lasso"})], + "copy_X": ["boolean"], + "eps": [Interval(Real, 0, None, closed="neither"), None], + "copy_Gram": ["boolean"], + "verbose": ["verbose"], + "return_path": ["boolean"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def lars_path( + X, + y, + Xy=None, + *, + Gram=None, + max_iter=500, + alpha_min=0, + method="lar", + copy_X=True, + eps=np.finfo(float).eps, + copy_Gram=True, + verbose=0, + return_path=True, + return_n_iter=False, + positive=False, +): + """Compute Least Angle Regression or Lasso path using the LARS algorithm. + + The optimization objective for the case method='lasso' is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + in the case of method='lar', the objective function is only known in + the form of an implicit equation (see discussion in [1]_). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : None or ndarray of shape (n_samples, n_features) + Input data. If X is `None`, Gram must also be `None`. + If only the Gram matrix is available, use `lars_path_gram` instead. + + y : None or ndarray of shape (n_samples,) + Input targets. + + Xy : array-like of shape (n_features,), default=None + `Xy = X.T @ y` that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + Gram : None, 'auto', bool, ndarray of shape (n_features, n_features), \ + default=None + Precomputed Gram matrix `X.T @ X`, if `'auto'`, the Gram + matrix is precomputed from the given X, if there are more samples + than features. + + max_iter : int, default=500 + Maximum number of iterations to perform, set to infinity for no limit. + + alpha_min : float, default=0 + Minimum correlation along the path. It corresponds to the + regularization parameter `alpha` in the Lasso. + + method : {'lar', 'lasso'}, default='lar' + Specifies the returned model. Select `'lar'` for Least Angle + Regression, `'lasso'` for the Lasso. + + copy_X : bool, default=True + If `False`, `X` is overwritten. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the `tol` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_Gram : bool, default=True + If `False`, `Gram` is overwritten. + + verbose : int, default=0 + Controls output verbosity. + + return_path : bool, default=True + If `True`, returns the entire path, else returns only the + last point of the path. + + return_n_iter : bool, default=False + Whether to return the number of iterations. + + positive : bool, default=False + Restrict coefficients to be >= 0. + This option is only allowed with method 'lasso'. Note that the model + coefficients will not converge to the ordinary-least-squares solution + for small values of alpha. Only coefficients up to the smallest alpha + value (`alphas_[alphas_ > 0.].min()` when fit_path=True) reached by + the stepwise Lars-Lasso algorithm are typically in congruence with the + solution of the coordinate descent `lasso_path` function. + + Returns + ------- + alphas : ndarray of shape (n_alphas + 1,) + Maximum of covariances (in absolute value) at each iteration. + `n_alphas` is either `max_iter`, `n_features`, or the + number of nodes in the path with `alpha >= alpha_min`, whichever + is smaller. + + active : ndarray of shape (n_alphas,) + Indices of active variables at the end of the path. + + coefs : ndarray of shape (n_features, n_alphas + 1) + Coefficients along the path. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is set + to True. + + See Also + -------- + lars_path_gram : Compute LARS path in the sufficient stats mode. + lasso_path : Compute Lasso path with coordinate descent. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + Lars : Least Angle Regression model a.k.a. LAR. + LassoLarsCV : Cross-validated Lasso, using the LARS algorithm. + LarsCV : Cross-validated Least Angle Regression model. + sklearn.decomposition.sparse_encode : Sparse coding. + + References + ---------- + .. [1] "Least Angle Regression", Efron et al. + http://statweb.stanford.edu/~tibs/ftp/lars.pdf + + .. [2] `Wikipedia entry on the Least-angle regression + `_ + + .. [3] `Wikipedia entry on the Lasso + `_ + + Examples + -------- + >>> from sklearn.linear_model import lars_path + >>> from sklearn.datasets import make_regression + >>> X, y, true_coef = make_regression( + ... n_samples=100, n_features=5, n_informative=2, coef=True, random_state=0 + ... ) + >>> true_coef + array([ 0. , 0. , 0. , 97.9..., 45.7...]) + >>> alphas, _, estimated_coef = lars_path(X, y) + >>> alphas.shape + (3,) + >>> estimated_coef + array([[ 0. , 0. , 0. ], + [ 0. , 0. , 0. ], + [ 0. , 0. , 0. ], + [ 0. , 46.96..., 97.99...], + [ 0. , 0. , 45.70...]]) + """ + if X is None and Gram is not None: + raise ValueError( + "X cannot be None if Gram is not None" + "Use lars_path_gram to avoid passing X and y." + ) + return _lars_path_solver( + X=X, + y=y, + Xy=Xy, + Gram=Gram, + n_samples=None, + max_iter=max_iter, + alpha_min=alpha_min, + method=method, + copy_X=copy_X, + eps=eps, + copy_Gram=copy_Gram, + verbose=verbose, + return_path=return_path, + return_n_iter=return_n_iter, + positive=positive, + ) + + +@validate_params( + { + "Xy": [np.ndarray], + "Gram": [np.ndarray], + "n_samples": [Interval(Integral, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "alpha_min": [Interval(Real, 0, None, closed="left")], + "method": [StrOptions({"lar", "lasso"})], + "copy_X": ["boolean"], + "eps": [Interval(Real, 0, None, closed="neither"), None], + "copy_Gram": ["boolean"], + "verbose": ["verbose"], + "return_path": ["boolean"], + "return_n_iter": ["boolean"], + "positive": ["boolean"], + }, + prefer_skip_nested_validation=True, +) +def lars_path_gram( + Xy, + Gram, + *, + n_samples, + max_iter=500, + alpha_min=0, + method="lar", + copy_X=True, + eps=np.finfo(float).eps, + copy_Gram=True, + verbose=0, + return_path=True, + return_n_iter=False, + positive=False, +): + """The lars_path in the sufficient stats mode. + + The optimization objective for the case method='lasso' is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + in the case of method='lar', the objective function is only known in + the form of an implicit equation (see discussion in [1]_). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + Xy : ndarray of shape (n_features,) + `Xy = X.T @ y`. + + Gram : ndarray of shape (n_features, n_features) + `Gram = X.T @ X`. + + n_samples : int + Equivalent size of sample. + + max_iter : int, default=500 + Maximum number of iterations to perform, set to infinity for no limit. + + alpha_min : float, default=0 + Minimum correlation along the path. It corresponds to the + regularization parameter alpha parameter in the Lasso. + + method : {'lar', 'lasso'}, default='lar' + Specifies the returned model. Select `'lar'` for Least Angle + Regression, ``'lasso'`` for the Lasso. + + copy_X : bool, default=True + If `False`, `X` is overwritten. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the `tol` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_Gram : bool, default=True + If `False`, `Gram` is overwritten. + + verbose : int, default=0 + Controls output verbosity. + + return_path : bool, default=True + If `return_path==True` returns the entire path, else returns only the + last point of the path. + + return_n_iter : bool, default=False + Whether to return the number of iterations. + + positive : bool, default=False + Restrict coefficients to be >= 0. + This option is only allowed with method 'lasso'. Note that the model + coefficients will not converge to the ordinary-least-squares solution + for small values of alpha. Only coefficients up to the smallest alpha + value (`alphas_[alphas_ > 0.].min()` when `fit_path=True`) reached by + the stepwise Lars-Lasso algorithm are typically in congruence with the + solution of the coordinate descent lasso_path function. + + Returns + ------- + alphas : ndarray of shape (n_alphas + 1,) + Maximum of covariances (in absolute value) at each iteration. + `n_alphas` is either `max_iter`, `n_features` or the + number of nodes in the path with `alpha >= alpha_min`, whichever + is smaller. + + active : ndarray of shape (n_alphas,) + Indices of active variables at the end of the path. + + coefs : ndarray of shape (n_features, n_alphas + 1) + Coefficients along the path. + + n_iter : int + Number of iterations run. Returned only if `return_n_iter` is set + to True. + + See Also + -------- + lars_path_gram : Compute LARS path. + lasso_path : Compute Lasso path with coordinate descent. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + Lars : Least Angle Regression model a.k.a. LAR. + LassoLarsCV : Cross-validated Lasso, using the LARS algorithm. + LarsCV : Cross-validated Least Angle Regression model. + sklearn.decomposition.sparse_encode : Sparse coding. + + References + ---------- + .. [1] "Least Angle Regression", Efron et al. + http://statweb.stanford.edu/~tibs/ftp/lars.pdf + + .. [2] `Wikipedia entry on the Least-angle regression + `_ + + .. [3] `Wikipedia entry on the Lasso + `_ + + Examples + -------- + >>> from sklearn.linear_model import lars_path_gram + >>> from sklearn.datasets import make_regression + >>> X, y, true_coef = make_regression( + ... n_samples=100, n_features=5, n_informative=2, coef=True, random_state=0 + ... ) + >>> true_coef + array([ 0. , 0. , 0. , 97.9..., 45.7...]) + >>> alphas, _, estimated_coef = lars_path_gram(X.T @ y, X.T @ X, n_samples=100) + >>> alphas.shape + (3,) + >>> estimated_coef + array([[ 0. , 0. , 0. ], + [ 0. , 0. , 0. ], + [ 0. , 0. , 0. ], + [ 0. , 46.96..., 97.99...], + [ 0. , 0. , 45.70...]]) + """ + return _lars_path_solver( + X=None, + y=None, + Xy=Xy, + Gram=Gram, + n_samples=n_samples, + max_iter=max_iter, + alpha_min=alpha_min, + method=method, + copy_X=copy_X, + eps=eps, + copy_Gram=copy_Gram, + verbose=verbose, + return_path=return_path, + return_n_iter=return_n_iter, + positive=positive, + ) + + +def _lars_path_solver( + X, + y, + Xy=None, + Gram=None, + n_samples=None, + max_iter=500, + alpha_min=0, + method="lar", + copy_X=True, + eps=np.finfo(float).eps, + copy_Gram=True, + verbose=0, + return_path=True, + return_n_iter=False, + positive=False, +): + """Compute Least Angle Regression or Lasso path using LARS algorithm [1] + + The optimization objective for the case method='lasso' is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + in the case of method='lar', the objective function is only known in + the form of an implicit equation (see discussion in [1]) + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : None or ndarray of shape (n_samples, n_features) + Input data. Note that if X is None then Gram must be specified, + i.e., cannot be None or False. + + y : None or ndarray of shape (n_samples,) + Input targets. + + Xy : array-like of shape (n_features,), default=None + `Xy = np.dot(X.T, y)` that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + Gram : None, 'auto' or array-like of shape (n_features, n_features), \ + default=None + Precomputed Gram matrix `(X' * X)`, if ``'auto'``, the Gram + matrix is precomputed from the given X, if there are more samples + than features. + + n_samples : int or float, default=None + Equivalent size of sample. If `None`, it will be `n_samples`. + + max_iter : int, default=500 + Maximum number of iterations to perform, set to infinity for no limit. + + alpha_min : float, default=0 + Minimum correlation along the path. It corresponds to the + regularization parameter alpha parameter in the Lasso. + + method : {'lar', 'lasso'}, default='lar' + Specifies the returned model. Select ``'lar'`` for Least Angle + Regression, ``'lasso'`` for the Lasso. + + copy_X : bool, default=True + If ``False``, ``X`` is overwritten. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_Gram : bool, default=True + If ``False``, ``Gram`` is overwritten. + + verbose : int, default=0 + Controls output verbosity. + + return_path : bool, default=True + If ``return_path==True`` returns the entire path, else returns only the + last point of the path. + + return_n_iter : bool, default=False + Whether to return the number of iterations. + + positive : bool, default=False + Restrict coefficients to be >= 0. + This option is only allowed with method 'lasso'. Note that the model + coefficients will not converge to the ordinary-least-squares solution + for small values of alpha. Only coefficients up to the smallest alpha + value (``alphas_[alphas_ > 0.].min()`` when fit_path=True) reached by + the stepwise Lars-Lasso algorithm are typically in congruence with the + solution of the coordinate descent lasso_path function. + + Returns + ------- + alphas : array-like of shape (n_alphas + 1,) + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. + + active : array-like of shape (n_alphas,) + Indices of active variables at the end of the path. + + coefs : array-like of shape (n_features, n_alphas + 1) + Coefficients along the path + + n_iter : int + Number of iterations run. Returned only if return_n_iter is set + to True. + + See Also + -------- + lasso_path + LassoLars + Lars + LassoLarsCV + LarsCV + sklearn.decomposition.sparse_encode + + References + ---------- + .. [1] "Least Angle Regression", Efron et al. + http://statweb.stanford.edu/~tibs/ftp/lars.pdf + + .. [2] `Wikipedia entry on the Least-angle regression + `_ + + .. [3] `Wikipedia entry on the Lasso + `_ + + """ + if method == "lar" and positive: + raise ValueError("Positive constraint not supported for 'lar' coding method.") + + n_samples = n_samples if n_samples is not None else y.size + + if Xy is None: + Cov = np.dot(X.T, y) + else: + Cov = Xy.copy() + + if Gram is None or Gram is False: + Gram = None + if X is None: + raise ValueError("X and Gram cannot both be unspecified.") + elif isinstance(Gram, str) and Gram == "auto" or Gram is True: + if Gram is True or X.shape[0] > X.shape[1]: + Gram = np.dot(X.T, X) + else: + Gram = None + elif copy_Gram: + Gram = Gram.copy() + + if Gram is None: + n_features = X.shape[1] + else: + n_features = Cov.shape[0] + if Gram.shape != (n_features, n_features): + raise ValueError("The shapes of the inputs Gram and Xy do not match.") + + if copy_X and X is not None and Gram is None: + # force copy. setting the array to be fortran-ordered + # speeds up the calculation of the (partial) Gram matrix + # and allows to easily swap columns + X = X.copy("F") + + max_features = min(max_iter, n_features) + + dtypes = set(a.dtype for a in (X, y, Xy, Gram) if a is not None) + if len(dtypes) == 1: + # use the precision level of input data if it is consistent + return_dtype = next(iter(dtypes)) + else: + # fallback to double precision otherwise + return_dtype = np.float64 + + if return_path: + coefs = np.zeros((max_features + 1, n_features), dtype=return_dtype) + alphas = np.zeros(max_features + 1, dtype=return_dtype) + else: + coef, prev_coef = ( + np.zeros(n_features, dtype=return_dtype), + np.zeros(n_features, dtype=return_dtype), + ) + alpha, prev_alpha = ( + np.array([0.0], dtype=return_dtype), + np.array([0.0], dtype=return_dtype), + ) + # above better ideas? + + n_iter, n_active = 0, 0 + active, indices = list(), np.arange(n_features) + # holds the sign of covariance + sign_active = np.empty(max_features, dtype=np.int8) + drop = False + + # will hold the cholesky factorization. Only lower part is + # referenced. + if Gram is None: + L = np.empty((max_features, max_features), dtype=X.dtype) + swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (X,)) + else: + L = np.empty((max_features, max_features), dtype=Gram.dtype) + swap, nrm2 = linalg.get_blas_funcs(("swap", "nrm2"), (Cov,)) + (solve_cholesky,) = get_lapack_funcs(("potrs",), (L,)) + + if verbose: + if verbose > 1: + print("Step\t\tAdded\t\tDropped\t\tActive set size\t\tC") + else: + sys.stdout.write(".") + sys.stdout.flush() + + tiny32 = np.finfo(np.float32).tiny # to avoid division by 0 warning + cov_precision = np.finfo(Cov.dtype).precision + equality_tolerance = np.finfo(np.float32).eps + + if Gram is not None: + Gram_copy = Gram.copy() + Cov_copy = Cov.copy() + + while True: + if Cov.size: + if positive: + C_idx = np.argmax(Cov) + else: + C_idx = np.argmax(np.abs(Cov)) + + C_ = Cov[C_idx] + + if positive: + C = C_ + else: + C = np.fabs(C_) + else: + C = 0.0 + + if return_path: + alpha = alphas[n_iter, np.newaxis] + coef = coefs[n_iter] + prev_alpha = alphas[n_iter - 1, np.newaxis] + prev_coef = coefs[n_iter - 1] + + alpha[0] = C / n_samples + if alpha[0] <= alpha_min + equality_tolerance: # early stopping + if abs(alpha[0] - alpha_min) > equality_tolerance: + # interpolation factor 0 <= ss < 1 + if n_iter > 0: + # In the first iteration, all alphas are zero, the formula + # below would make ss a NaN + ss = (prev_alpha[0] - alpha_min) / (prev_alpha[0] - alpha[0]) + coef[:] = prev_coef + ss * (coef - prev_coef) + alpha[0] = alpha_min + if return_path: + coefs[n_iter] = coef + break + + if n_iter >= max_iter or n_active >= n_features: + break + if not drop: + ########################################################## + # Append x_j to the Cholesky factorization of (Xa * Xa') # + # # + # ( L 0 ) # + # L -> ( ) , where L * w = Xa' x_j # + # ( w z ) and z = ||x_j|| # + # # + ########################################################## + + if positive: + sign_active[n_active] = np.ones_like(C_) + else: + sign_active[n_active] = np.sign(C_) + m, n = n_active, C_idx + n_active + + Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) + indices[n], indices[m] = indices[m], indices[n] + Cov_not_shortened = Cov + Cov = Cov[1:] # remove Cov[0] + + if Gram is None: + X.T[n], X.T[m] = swap(X.T[n], X.T[m]) + c = nrm2(X.T[n_active]) ** 2 + L[n_active, :n_active] = np.dot(X.T[n_active], X.T[:n_active].T) + else: + # swap does only work inplace if matrix is fortran + # contiguous ... + Gram[m], Gram[n] = swap(Gram[m], Gram[n]) + Gram[:, m], Gram[:, n] = swap(Gram[:, m], Gram[:, n]) + c = Gram[n_active, n_active] + L[n_active, :n_active] = Gram[n_active, :n_active] + + # Update the cholesky decomposition for the Gram matrix + if n_active: + linalg.solve_triangular( + L[:n_active, :n_active], + L[n_active, :n_active], + trans=0, + lower=1, + overwrite_b=True, + **SOLVE_TRIANGULAR_ARGS, + ) + + v = np.dot(L[n_active, :n_active], L[n_active, :n_active]) + diag = max(np.sqrt(np.abs(c - v)), eps) + L[n_active, n_active] = diag + + if diag < 1e-7: + # The system is becoming too ill-conditioned. + # We have degenerate vectors in our active set. + # We'll 'drop for good' the last regressor added. + warnings.warn( + "Regressors in active set degenerate. " + "Dropping a regressor, after %i iterations, " + "i.e. alpha=%.3e, " + "with an active set of %i regressors, and " + "the smallest cholesky pivot element being %.3e." + " Reduce max_iter or increase eps parameters." + % (n_iter, alpha.item(), n_active, diag), + ConvergenceWarning, + ) + + # XXX: need to figure a 'drop for good' way + Cov = Cov_not_shortened + Cov[0] = 0 + Cov[C_idx], Cov[0] = swap(Cov[C_idx], Cov[0]) + continue + + active.append(indices[n_active]) + n_active += 1 + + if verbose > 1: + print( + "%s\t\t%s\t\t%s\t\t%s\t\t%s" % (n_iter, active[-1], "", n_active, C) + ) + + if method == "lasso" and n_iter > 0 and prev_alpha[0] < alpha[0]: + # alpha is increasing. This is because the updates of Cov are + # bringing in too much numerical error that is greater than + # than the remaining correlation with the + # regressors. Time to bail out + warnings.warn( + "Early stopping the lars path, as the residues " + "are small and the current value of alpha is no " + "longer well controlled. %i iterations, alpha=%.3e, " + "previous alpha=%.3e, with an active set of %i " + "regressors." % (n_iter, alpha.item(), prev_alpha.item(), n_active), + ConvergenceWarning, + ) + break + + # least squares solution + least_squares, _ = solve_cholesky( + L[:n_active, :n_active], sign_active[:n_active], lower=True + ) + + if least_squares.size == 1 and least_squares == 0: + # This happens because sign_active[:n_active] = 0 + least_squares[...] = 1 + AA = 1.0 + else: + # is this really needed ? + AA = 1.0 / np.sqrt(np.sum(least_squares * sign_active[:n_active])) + + if not np.isfinite(AA): + # L is too ill-conditioned + i = 0 + L_ = L[:n_active, :n_active].copy() + while not np.isfinite(AA): + L_.flat[:: n_active + 1] += (2**i) * eps + least_squares, _ = solve_cholesky( + L_, sign_active[:n_active], lower=True + ) + tmp = max(np.sum(least_squares * sign_active[:n_active]), eps) + AA = 1.0 / np.sqrt(tmp) + i += 1 + least_squares *= AA + + if Gram is None: + # equiangular direction of variables in the active set + eq_dir = np.dot(X.T[:n_active].T, least_squares) + # correlation between each unactive variables and + # eqiangular vector + corr_eq_dir = np.dot(X.T[n_active:], eq_dir) + else: + # if huge number of features, this takes 50% of time, I + # think could be avoided if we just update it using an + # orthogonal (QR) decomposition of X + corr_eq_dir = np.dot(Gram[:n_active, n_active:].T, least_squares) + + # Explicit rounding can be necessary to avoid `np.argmax(Cov)` yielding + # unstable results because of rounding errors. + np.around(corr_eq_dir, decimals=cov_precision, out=corr_eq_dir) + + g1 = arrayfuncs.min_pos((C - Cov) / (AA - corr_eq_dir + tiny32)) + if positive: + gamma_ = min(g1, C / AA) + else: + g2 = arrayfuncs.min_pos((C + Cov) / (AA + corr_eq_dir + tiny32)) + gamma_ = min(g1, g2, C / AA) + + # TODO: better names for these variables: z + drop = False + z = -coef[active] / (least_squares + tiny32) + z_pos = arrayfuncs.min_pos(z) + if z_pos < gamma_: + # some coefficients have changed sign + idx = np.where(z == z_pos)[0][::-1] + + # update the sign, important for LAR + sign_active[idx] = -sign_active[idx] + + if method == "lasso": + gamma_ = z_pos + drop = True + + n_iter += 1 + + if return_path: + if n_iter >= coefs.shape[0]: + del coef, alpha, prev_alpha, prev_coef + # resize the coefs and alphas array + add_features = 2 * max(1, (max_features - n_active)) + coefs = np.resize(coefs, (n_iter + add_features, n_features)) + coefs[-add_features:] = 0 + alphas = np.resize(alphas, n_iter + add_features) + alphas[-add_features:] = 0 + coef = coefs[n_iter] + prev_coef = coefs[n_iter - 1] + else: + # mimic the effect of incrementing n_iter on the array references + prev_coef = coef + prev_alpha[0] = alpha[0] + coef = np.zeros_like(coef) + + coef[active] = prev_coef[active] + gamma_ * least_squares + + # update correlations + Cov -= gamma_ * corr_eq_dir + + # See if any coefficient has changed sign + if drop and method == "lasso": + # handle the case when idx is not length of 1 + for ii in idx: + arrayfuncs.cholesky_delete(L[:n_active, :n_active], ii) + + n_active -= 1 + # handle the case when idx is not length of 1 + drop_idx = [active.pop(ii) for ii in idx] + + if Gram is None: + # propagate dropped variable + for ii in idx: + for i in range(ii, n_active): + X.T[i], X.T[i + 1] = swap(X.T[i], X.T[i + 1]) + # yeah this is stupid + indices[i], indices[i + 1] = indices[i + 1], indices[i] + + # TODO: this could be updated + residual = y - np.dot(X[:, :n_active], coef[active]) + temp = np.dot(X.T[n_active], residual) + + Cov = np.r_[temp, Cov] + else: + for ii in idx: + for i in range(ii, n_active): + indices[i], indices[i + 1] = indices[i + 1], indices[i] + Gram[i], Gram[i + 1] = swap(Gram[i], Gram[i + 1]) + Gram[:, i], Gram[:, i + 1] = swap(Gram[:, i], Gram[:, i + 1]) + + # Cov_n = Cov_j + x_j * X + increment(betas) TODO: + # will this still work with multiple drops ? + + # recompute covariance. Probably could be done better + # wrong as Xy is not swapped with the rest of variables + + # TODO: this could be updated + temp = Cov_copy[drop_idx] - np.dot(Gram_copy[drop_idx], coef) + Cov = np.r_[temp, Cov] + + sign_active = np.delete(sign_active, idx) + sign_active = np.append(sign_active, 0.0) # just to maintain size + if verbose > 1: + print( + "%s\t\t%s\t\t%s\t\t%s\t\t%s" + % (n_iter, "", drop_idx, n_active, abs(temp)) + ) + + if return_path: + # resize coefs in case of early stop + alphas = alphas[: n_iter + 1] + coefs = coefs[: n_iter + 1] + + if return_n_iter: + return alphas, active, coefs.T, n_iter + else: + return alphas, active, coefs.T + else: + if return_n_iter: + return alpha, active, coef, n_iter + else: + return alpha, active, coef + + +############################################################################### +# Estimator classes + + +class Lars(MultiOutputMixin, RegressorMixin, LinearModel): + """Least Angle Regression model a.k.a. LAR. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + precompute : bool, 'auto' or array-like , default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + n_nonzero_coefs : int, default=500 + Target number of non-zero coefficients. Use ``np.inf`` for no limit. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + fit_path : bool, default=True + If True the full path is stored in the ``coef_path_`` attribute. + If you compute the solution for a large problem or many targets, + setting ``fit_path`` to ``False`` will lead to a speedup, especially + with a small alpha. + + jitter : float, default=None + Upper bound on a uniform noise parameter to be added to the + `y` values, to satisfy the model's assumption of + one-at-a-time computations. Might help with stability. + + .. versionadded:: 0.23 + + random_state : int, RandomState instance or None, default=None + Determines random number generation for jittering. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. Ignored if `jitter` is None. + + .. versionadded:: 0.23 + + Attributes + ---------- + alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. If this is a list of array-like, the length of the outer + list is `n_targets`. + + active_ : list of shape (n_alphas,) or list of such lists + Indices of active variables at the end of the path. + If this is a list of list, the length of the outer list is `n_targets`. + + coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \ + of such arrays + The varying values of the coefficients along the path. It is not + present if the ``fit_path`` parameter is ``False``. If this is a list + of array-like, the length of the outer list is `n_targets`. + + coef_ : array-like of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the formulation formula). + + intercept_ : float or array-like of shape (n_targets,) + Independent term in decision function. + + n_iter_ : array-like or int + The number of iterations taken by lars_path to find the + grid of alphas for each target. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path: Compute Least Angle Regression or Lasso + path using LARS algorithm. + LarsCV : Cross-validated Least Angle Regression model. + sklearn.decomposition.sparse_encode : Sparse coding. + + Examples + -------- + >>> from sklearn import linear_model + >>> reg = linear_model.Lars(n_nonzero_coefs=1) + >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1.1111, 0, -1.1111]) + Lars(n_nonzero_coefs=1) + >>> print(reg.coef_) + [ 0. -1.11...] + """ + + _parameter_constraints: dict = { + "fit_intercept": ["boolean"], + "verbose": ["verbose"], + "precompute": ["boolean", StrOptions({"auto"}), np.ndarray, Hidden(None)], + "n_nonzero_coefs": [Interval(Integral, 1, None, closed="left")], + "eps": [Interval(Real, 0, None, closed="left")], + "copy_X": ["boolean"], + "fit_path": ["boolean"], + "jitter": [Interval(Real, 0, None, closed="left"), None], + "random_state": ["random_state"], + } + + method = "lar" + positive = False + + def __init__( + self, + *, + fit_intercept=True, + verbose=False, + precompute="auto", + n_nonzero_coefs=500, + eps=np.finfo(float).eps, + copy_X=True, + fit_path=True, + jitter=None, + random_state=None, + ): + self.fit_intercept = fit_intercept + self.verbose = verbose + self.precompute = precompute + self.n_nonzero_coefs = n_nonzero_coefs + self.eps = eps + self.copy_X = copy_X + self.fit_path = fit_path + self.jitter = jitter + self.random_state = random_state + + @staticmethod + def _get_gram(precompute, X, y): + if (not hasattr(precompute, "__array__")) and ( + (precompute is True) + or (precompute == "auto" and X.shape[0] > X.shape[1]) + or (precompute == "auto" and y.shape[1] > 1) + ): + precompute = np.dot(X.T, X) + + return precompute + + def _fit(self, X, y, max_iter, alpha, fit_path, Xy=None): + """Auxiliary method to fit the model using X, y as training data""" + n_features = X.shape[1] + + X, y, X_offset, y_offset, X_scale = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=self.copy_X + ) + + if y.ndim == 1: + y = y[:, np.newaxis] + + n_targets = y.shape[1] + + Gram = self._get_gram(self.precompute, X, y) + + self.alphas_ = [] + self.n_iter_ = [] + self.coef_ = np.empty((n_targets, n_features), dtype=X.dtype) + + if fit_path: + self.active_ = [] + self.coef_path_ = [] + for k in range(n_targets): + this_Xy = None if Xy is None else Xy[:, k] + alphas, active, coef_path, n_iter_ = lars_path( + X, + y[:, k], + Gram=Gram, + Xy=this_Xy, + copy_X=self.copy_X, + copy_Gram=True, + alpha_min=alpha, + method=self.method, + verbose=max(0, self.verbose - 1), + max_iter=max_iter, + eps=self.eps, + return_path=True, + return_n_iter=True, + positive=self.positive, + ) + self.alphas_.append(alphas) + self.active_.append(active) + self.n_iter_.append(n_iter_) + self.coef_path_.append(coef_path) + self.coef_[k] = coef_path[:, -1] + + if n_targets == 1: + self.alphas_, self.active_, self.coef_path_, self.coef_ = [ + a[0] + for a in (self.alphas_, self.active_, self.coef_path_, self.coef_) + ] + self.n_iter_ = self.n_iter_[0] + else: + for k in range(n_targets): + this_Xy = None if Xy is None else Xy[:, k] + alphas, _, self.coef_[k], n_iter_ = lars_path( + X, + y[:, k], + Gram=Gram, + Xy=this_Xy, + copy_X=self.copy_X, + copy_Gram=True, + alpha_min=alpha, + method=self.method, + verbose=max(0, self.verbose - 1), + max_iter=max_iter, + eps=self.eps, + return_path=False, + return_n_iter=True, + positive=self.positive, + ) + self.alphas_.append(alphas) + self.n_iter_.append(n_iter_) + if n_targets == 1: + self.alphas_ = self.alphas_[0] + self.n_iter_ = self.n_iter_[0] + + self._set_intercept(X_offset, y_offset, X_scale) + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, Xy=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target values. + + Xy : array-like of shape (n_features,) or (n_features, n_targets), \ + default=None + Xy = np.dot(X.T, y) that can be precomputed. It is useful + only when the Gram matrix is precomputed. + + Returns + ------- + self : object + Returns an instance of self. + """ + X, y = validate_data( + self, X, y, force_writeable=True, y_numeric=True, multi_output=True + ) + + alpha = getattr(self, "alpha", 0.0) + if hasattr(self, "n_nonzero_coefs"): + alpha = 0.0 # n_nonzero_coefs parametrization takes priority + max_iter = self.n_nonzero_coefs + else: + max_iter = self.max_iter + + if self.jitter is not None: + rng = check_random_state(self.random_state) + + noise = rng.uniform(high=self.jitter, size=len(y)) + y = y + noise + + self._fit( + X, + y, + max_iter=max_iter, + alpha=alpha, + fit_path=self.fit_path, + Xy=Xy, + ) + + return self + + +class LassoLars(Lars): + """Lasso model fit with Least Angle Regression a.k.a. Lars. + + It is a Linear Model trained with an L1 prior as regularizer. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + alpha : float, default=1.0 + Constant that multiplies the penalty term. Defaults to 1.0. + ``alpha = 0`` is equivalent to an ordinary least square, solved + by :class:`LinearRegression`. For numerical reasons, using + ``alpha = 0`` with the LassoLars object is not advised and you + should prefer the LinearRegression object. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + precompute : bool, 'auto' or array-like, default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + fit_path : bool, default=True + If ``True`` the full path is stored in the ``coef_path_`` attribute. + If you compute the solution for a large problem or many targets, + setting ``fit_path`` to ``False`` will lead to a speedup, especially + with a small alpha. + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + Under the positive restriction the model coefficients will not converge + to the ordinary-least-squares solution for small values of alpha. + Only coefficients up to the smallest alpha value (``alphas_[alphas_ > + 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso + algorithm are typically in congruence with the solution of the + coordinate descent Lasso estimator. + + jitter : float, default=None + Upper bound on a uniform noise parameter to be added to the + `y` values, to satisfy the model's assumption of + one-at-a-time computations. Might help with stability. + + .. versionadded:: 0.23 + + random_state : int, RandomState instance or None, default=None + Determines random number generation for jittering. Pass an int + for reproducible output across multiple function calls. + See :term:`Glossary `. Ignored if `jitter` is None. + + .. versionadded:: 0.23 + + Attributes + ---------- + alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. If this is a list of array-like, the length of the outer + list is `n_targets`. + + active_ : list of length n_alphas or list of such lists + Indices of active variables at the end of the path. + If this is a list of list, the length of the outer list is `n_targets`. + + coef_path_ : array-like of shape (n_features, n_alphas + 1) or list \ + of such arrays + If a list is passed it's expected to be one of n_targets such arrays. + The varying values of the coefficients along the path. It is not + present if the ``fit_path`` parameter is ``False``. If this is a list + of array-like, the length of the outer list is `n_targets`. + + coef_ : array-like of shape (n_features,) or (n_targets, n_features) + Parameter vector (w in the formulation formula). + + intercept_ : float or array-like of shape (n_targets,) + Independent term in decision function. + + n_iter_ : array-like or int + The number of iterations taken by lars_path to find the + grid of alphas for each target. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLarsCV: Cross-validated Lasso, using the LARS algorithm. + LassoLarsIC : Lasso model fit with Lars using BIC + or AIC for model selection. + sklearn.decomposition.sparse_encode : Sparse coding. + + Examples + -------- + >>> from sklearn import linear_model + >>> reg = linear_model.LassoLars(alpha=0.01) + >>> reg.fit([[-1, 1], [0, 0], [1, 1]], [-1, 0, -1]) + LassoLars(alpha=0.01) + >>> print(reg.coef_) + [ 0. -0.955...] + """ + + _parameter_constraints: dict = { + **Lars._parameter_constraints, + "alpha": [Interval(Real, 0, None, closed="left")], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "positive": ["boolean"], + } + _parameter_constraints.pop("n_nonzero_coefs") + + method = "lasso" + + def __init__( + self, + alpha=1.0, + *, + fit_intercept=True, + verbose=False, + precompute="auto", + max_iter=500, + eps=np.finfo(float).eps, + copy_X=True, + fit_path=True, + positive=False, + jitter=None, + random_state=None, + ): + self.alpha = alpha + self.fit_intercept = fit_intercept + self.max_iter = max_iter + self.verbose = verbose + self.positive = positive + self.precompute = precompute + self.copy_X = copy_X + self.eps = eps + self.fit_path = fit_path + self.jitter = jitter + self.random_state = random_state + + +############################################################################### +# Cross-validated estimator classes + + +def _check_copy_and_writeable(array, copy=False): + if copy or not array.flags.writeable: + return array.copy() + return array + + +def _lars_path_residues( + X_train, + y_train, + X_test, + y_test, + Gram=None, + copy=True, + method="lar", + verbose=False, + fit_intercept=True, + max_iter=500, + eps=np.finfo(float).eps, + positive=False, +): + """Compute the residues on left-out data for a full LARS path + + Parameters + ----------- + X_train : array-like of shape (n_samples, n_features) + The data to fit the LARS on + + y_train : array-like of shape (n_samples,) + The target variable to fit LARS on + + X_test : array-like of shape (n_samples, n_features) + The data to compute the residues on + + y_test : array-like of shape (n_samples,) + The target variable to compute the residues on + + Gram : None, 'auto' or array-like of shape (n_features, n_features), \ + default=None + Precomputed Gram matrix (X' * X), if ``'auto'``, the Gram + matrix is precomputed from the given X, if there are more samples + than features + + copy : bool, default=True + Whether X_train, X_test, y_train and y_test should be copied; + if False, they may be overwritten. + + method : {'lar' , 'lasso'}, default='lar' + Specifies the returned model. Select ``'lar'`` for Least Angle + Regression, ``'lasso'`` for the Lasso. + + verbose : bool or int, default=False + Sets the amount of verbosity + + fit_intercept : bool, default=True + whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + See reservations for using this option in combination with method + 'lasso' for expected small values of alpha in the doc of LassoLarsCV + and LassoLarsIC. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + Returns + -------- + alphas : array-like of shape (n_alphas,) + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter`` or ``n_features``, whichever + is smaller. + + active : list + Indices of active variables at the end of the path. + + coefs : array-like of shape (n_features, n_alphas) + Coefficients along the path + + residues : array-like of shape (n_alphas, n_samples) + Residues of the prediction on the test data + """ + X_train = _check_copy_and_writeable(X_train, copy) + y_train = _check_copy_and_writeable(y_train, copy) + X_test = _check_copy_and_writeable(X_test, copy) + y_test = _check_copy_and_writeable(y_test, copy) + + if fit_intercept: + X_mean = X_train.mean(axis=0) + X_train -= X_mean + X_test -= X_mean + y_mean = y_train.mean(axis=0) + y_train = as_float_array(y_train, copy=False) + y_train -= y_mean + y_test = as_float_array(y_test, copy=False) + y_test -= y_mean + + alphas, active, coefs = lars_path( + X_train, + y_train, + Gram=Gram, + copy_X=False, + copy_Gram=False, + method=method, + verbose=max(0, verbose - 1), + max_iter=max_iter, + eps=eps, + positive=positive, + ) + residues = np.dot(X_test, coefs) - y_test[:, np.newaxis] + return alphas, active, coefs, residues.T + + +class LarsCV(Lars): + """Cross-validated Least Angle Regression model. + + See glossary entry for :term:`cross-validation estimator`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + precompute : bool, 'auto' or array-like , default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram matrix + cannot be passed as argument since we will use only subsets of X. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + max_n_alphas : int, default=1000 + The maximum number of points on the path used to compute the + residuals in the cross-validation. + + n_jobs : int or None, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If ``True``, X will be copied; else, it may be overwritten. + + Attributes + ---------- + active_ : list of length n_alphas or list of such lists + Indices of active variables at the end of the path. + If this is a list of lists, the outer list length is `n_targets`. + + coef_ : array-like of shape (n_features,) + parameter vector (w in the formulation formula) + + intercept_ : float + independent term in decision function + + coef_path_ : array-like of shape (n_features, n_alphas) + the varying values of the coefficients along the path + + alpha_ : float + the estimated regularization parameter alpha + + alphas_ : array-like of shape (n_alphas,) + the different values of alpha along the path + + cv_alphas_ : array-like of shape (n_cv_alphas,) + all the values of alpha along the path for the different folds + + mse_path_ : array-like of shape (n_folds, n_cv_alphas) + the mean square error on left-out for each fold along the path + (alpha values given by ``cv_alphas``) + + n_iter_ : array-like or int + the number of iterations run by Lars with the optimal alpha. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoLarsIC : Lasso model fit with Lars using BIC + or AIC for model selection. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> from sklearn.linear_model import LarsCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(n_samples=200, noise=4.0, random_state=0) + >>> reg = LarsCV(cv=5).fit(X, y) + >>> reg.score(X, y) + 0.9996... + >>> reg.alpha_ + np.float64(0.2961...) + >>> reg.predict(X[:1,]) + array([154.3996...]) + """ + + _parameter_constraints: dict = { + **Lars._parameter_constraints, + "max_iter": [Interval(Integral, 0, None, closed="left")], + "cv": ["cv_object"], + "max_n_alphas": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + } + + for parameter in ["n_nonzero_coefs", "jitter", "fit_path", "random_state"]: + _parameter_constraints.pop(parameter) + + method = "lar" + + def __init__( + self, + *, + fit_intercept=True, + verbose=False, + max_iter=500, + precompute="auto", + cv=None, + max_n_alphas=1000, + n_jobs=None, + eps=np.finfo(float).eps, + copy_X=True, + ): + self.max_iter = max_iter + self.cv = cv + self.max_n_alphas = max_n_alphas + self.n_jobs = n_jobs + super().__init__( + fit_intercept=fit_intercept, + verbose=verbose, + precompute=precompute, + n_nonzero_coefs=500, + eps=eps, + copy_X=copy_X, + fit_path=True, + ) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.target_tags.multi_output = False + return tags + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, **params): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. + + **params : dict, default=None + Parameters to be passed to the CV splitter. + + .. versionadded:: 1.4 + Only available if `enable_metadata_routing=True`, + which can be set by using + ``sklearn.set_config(enable_metadata_routing=True)``. + See :ref:`Metadata Routing User Guide ` for + more details. + + Returns + ------- + self : object + Returns an instance of self. + """ + _raise_for_params(params, self, "fit") + + X, y = validate_data(self, X, y, force_writeable=True, y_numeric=True) + X = as_float_array(X, copy=self.copy_X) + y = as_float_array(y, copy=self.copy_X) + + # init cross-validation generator + cv = check_cv(self.cv, classifier=False) + + if _routing_enabled(): + routed_params = process_routing(self, "fit", **params) + else: + routed_params = Bunch(splitter=Bunch(split={})) + + # As we use cross-validation, the Gram matrix is not precomputed here + Gram = self.precompute + if hasattr(Gram, "__array__"): + warnings.warn( + 'Parameter "precompute" cannot be an array in ' + '%s. Automatically switch to "auto" instead.' % self.__class__.__name__ + ) + Gram = "auto" + + cv_paths = Parallel(n_jobs=self.n_jobs, verbose=self.verbose)( + delayed(_lars_path_residues)( + X[train], + y[train], + X[test], + y[test], + Gram=Gram, + copy=False, + method=self.method, + verbose=max(0, self.verbose - 1), + fit_intercept=self.fit_intercept, + max_iter=self.max_iter, + eps=self.eps, + positive=self.positive, + ) + for train, test in cv.split(X, y, **routed_params.splitter.split) + ) + all_alphas = np.concatenate(list(zip(*cv_paths))[0]) + # Unique also sorts + all_alphas = np.unique(all_alphas) + # Take at most max_n_alphas values + stride = int(max(1, int(len(all_alphas) / float(self.max_n_alphas)))) + all_alphas = all_alphas[::stride] + + mse_path = np.empty((len(all_alphas), len(cv_paths))) + for index, (alphas, _, _, residues) in enumerate(cv_paths): + alphas = alphas[::-1] + residues = residues[::-1] + if alphas[0] != 0: + alphas = np.r_[0, alphas] + residues = np.r_[residues[0, np.newaxis], residues] + if alphas[-1] != all_alphas[-1]: + alphas = np.r_[alphas, all_alphas[-1]] + residues = np.r_[residues, residues[-1, np.newaxis]] + this_residues = interpolate.interp1d(alphas, residues, axis=0)(all_alphas) + this_residues **= 2 + mse_path[:, index] = np.mean(this_residues, axis=-1) + + mask = np.all(np.isfinite(mse_path), axis=-1) + all_alphas = all_alphas[mask] + mse_path = mse_path[mask] + # Select the alpha that minimizes left-out error + i_best_alpha = np.argmin(mse_path.mean(axis=-1)) + best_alpha = all_alphas[i_best_alpha] + + # Store our parameters + self.alpha_ = best_alpha + self.cv_alphas_ = all_alphas + self.mse_path_ = mse_path + + # Now compute the full model using best_alpha + # it will call a lasso internally when self if LassoLarsCV + # as self.method == 'lasso' + self._fit( + X, + y, + max_iter=self.max_iter, + alpha=best_alpha, + Xy=None, + fit_path=True, + ) + return self + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + router = MetadataRouter(owner=self.__class__.__name__).add( + splitter=check_cv(self.cv), + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + return router + + +class LassoLarsCV(LarsCV): + """Cross-validated Lasso, using the LARS algorithm. + + See glossary entry for :term:`cross-validation estimator`. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + max_iter : int, default=500 + Maximum number of iterations to perform. + + precompute : bool or 'auto' , default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram matrix + cannot be passed as argument since we will use only subsets of X. + + cv : int, cross-validation generator or an iterable, default=None + Determines the cross-validation splitting strategy. + Possible inputs for cv are: + + - None, to use the default 5-fold cross-validation, + - integer, to specify the number of folds. + - :term:`CV splitter`, + - An iterable yielding (train, test) splits as arrays of indices. + + For integer/None inputs, :class:`~sklearn.model_selection.KFold` is used. + + Refer :ref:`User Guide ` for the various + cross-validation strategies that can be used here. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + max_n_alphas : int, default=1000 + The maximum number of points on the path used to compute the + residuals in the cross-validation. + + n_jobs : int or None, default=None + Number of CPUs to use during the cross validation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + Under the positive restriction the model coefficients do not converge + to the ordinary-least-squares solution for small values of alpha. + Only coefficients up to the smallest alpha value (``alphas_[alphas_ > + 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso + algorithm are typically in congruence with the solution of the + coordinate descent Lasso estimator. + As a consequence using LassoLarsCV only makes sense for problems where + a sparse solution is expected and/or reached. + + Attributes + ---------- + coef_ : array-like of shape (n_features,) + parameter vector (w in the formulation formula) + + intercept_ : float + independent term in decision function. + + coef_path_ : array-like of shape (n_features, n_alphas) + the varying values of the coefficients along the path + + alpha_ : float + the estimated regularization parameter alpha + + alphas_ : array-like of shape (n_alphas,) + the different values of alpha along the path + + cv_alphas_ : array-like of shape (n_cv_alphas,) + all the values of alpha along the path for the different folds + + mse_path_ : array-like of shape (n_folds, n_cv_alphas) + the mean square error on left-out for each fold along the path + (alpha values given by ``cv_alphas``) + + n_iter_ : array-like or int + the number of iterations run by Lars with the optimal alpha. + + active_ : list of int + Indices of active variables at the end of the path. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoLarsIC : Lasso model fit with Lars using BIC + or AIC for model selection. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + The object solves the same problem as the + :class:`~sklearn.linear_model.LassoCV` object. However, unlike the + :class:`~sklearn.linear_model.LassoCV`, it find the relevant alphas values + by itself. In general, because of this property, it will be more stable. + However, it is more fragile to heavily multicollinear datasets. + + It is more efficient than the :class:`~sklearn.linear_model.LassoCV` if + only a small number of features are selected compared to the total number, + for instance if there are very few samples compared to the number of + features. + + In `fit`, once the best parameter `alpha` is found through + cross-validation, the model is fit again using the entire training set. + + Examples + -------- + >>> from sklearn.linear_model import LassoLarsCV + >>> from sklearn.datasets import make_regression + >>> X, y = make_regression(noise=4.0, random_state=0) + >>> reg = LassoLarsCV(cv=5).fit(X, y) + >>> reg.score(X, y) + 0.9993... + >>> reg.alpha_ + np.float64(0.3972...) + >>> reg.predict(X[:1,]) + array([-78.4831...]) + """ + + _parameter_constraints = { + **LarsCV._parameter_constraints, + "positive": ["boolean"], + } + + method = "lasso" + + def __init__( + self, + *, + fit_intercept=True, + verbose=False, + max_iter=500, + precompute="auto", + cv=None, + max_n_alphas=1000, + n_jobs=None, + eps=np.finfo(float).eps, + copy_X=True, + positive=False, + ): + self.fit_intercept = fit_intercept + self.verbose = verbose + self.max_iter = max_iter + self.precompute = precompute + self.cv = cv + self.max_n_alphas = max_n_alphas + self.n_jobs = n_jobs + self.eps = eps + self.copy_X = copy_X + self.positive = positive + # XXX : we don't use super().__init__ + # to avoid setting n_nonzero_coefs + + +class LassoLarsIC(LassoLars): + """Lasso model fit with Lars using BIC or AIC for model selection. + + The optimization objective for Lasso is:: + + (1 / (2 * n_samples)) * ||y - Xw||^2_2 + alpha * ||w||_1 + + AIC is the Akaike information criterion [2]_ and BIC is the Bayes + Information criterion [3]_. Such criteria are useful to select the value + of the regularization parameter by making a trade-off between the + goodness of fit and the complexity of the model. A good model should + explain well the data while being simple. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + criterion : {'aic', 'bic'}, default='aic' + The type of criterion to use. + + fit_intercept : bool, default=True + Whether to calculate the intercept for this model. If set + to false, no intercept will be used in calculations + (i.e. data is expected to be centered). + + verbose : bool or int, default=False + Sets the verbosity amount. + + precompute : bool, 'auto' or array-like, default='auto' + Whether to use a precomputed Gram matrix to speed up + calculations. If set to ``'auto'`` let us decide. The Gram + matrix can also be passed as argument. + + max_iter : int, default=500 + Maximum number of iterations to perform. Can be used for + early stopping. + + eps : float, default=np.finfo(float).eps + The machine-precision regularization in the computation of the + Cholesky diagonal factors. Increase this for very ill-conditioned + systems. Unlike the ``tol`` parameter in some iterative + optimization-based algorithms, this parameter does not control + the tolerance of the optimization. + + copy_X : bool, default=True + If True, X will be copied; else, it may be overwritten. + + positive : bool, default=False + Restrict coefficients to be >= 0. Be aware that you might want to + remove fit_intercept which is set True by default. + Under the positive restriction the model coefficients do not converge + to the ordinary-least-squares solution for small values of alpha. + Only coefficients up to the smallest alpha value (``alphas_[alphas_ > + 0.].min()`` when fit_path=True) reached by the stepwise Lars-Lasso + algorithm are typically in congruence with the solution of the + coordinate descent Lasso estimator. + As a consequence using LassoLarsIC only makes sense for problems where + a sparse solution is expected and/or reached. + + noise_variance : float, default=None + The estimated noise variance of the data. If `None`, an unbiased + estimate is computed by an OLS model. However, it is only possible + in the case where `n_samples > n_features + fit_intercept`. + + .. versionadded:: 1.1 + + Attributes + ---------- + coef_ : array-like of shape (n_features,) + parameter vector (w in the formulation formula) + + intercept_ : float + independent term in decision function. + + alpha_ : float + the alpha parameter chosen by the information criterion + + alphas_ : array-like of shape (n_alphas + 1,) or list of such arrays + Maximum of covariances (in absolute value) at each iteration. + ``n_alphas`` is either ``max_iter``, ``n_features`` or the + number of nodes in the path with ``alpha >= alpha_min``, whichever + is smaller. If a list, it will be of length `n_targets`. + + n_iter_ : int + number of iterations run by lars_path to find the grid of + alphas. + + criterion_ : array-like of shape (n_alphas,) + The value of the information criteria ('aic', 'bic') across all + alphas. The alpha which has the smallest information criterion is + chosen, as specified in [1]_. + + noise_variance_ : float + The estimated noise variance from the data used to compute the + criterion. + + .. versionadded:: 1.1 + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + lars_path : Compute Least Angle Regression or Lasso + path using LARS algorithm. + lasso_path : Compute Lasso path with coordinate descent. + Lasso : Linear Model trained with L1 prior as + regularizer (aka the Lasso). + LassoCV : Lasso linear model with iterative fitting + along a regularization path. + LassoLars : Lasso model fit with Least Angle Regression a.k.a. Lars. + LassoLarsCV: Cross-validated Lasso, using the LARS algorithm. + sklearn.decomposition.sparse_encode : Sparse coding. + + Notes + ----- + The number of degrees of freedom is computed as in [1]_. + + To have more details regarding the mathematical formulation of the + AIC and BIC criteria, please refer to :ref:`User Guide `. + + References + ---------- + .. [1] :arxiv:`Zou, Hui, Trevor Hastie, and Robert Tibshirani. + "On the degrees of freedom of the lasso." + The Annals of Statistics 35.5 (2007): 2173-2192. + <0712.0881>` + + .. [2] `Wikipedia entry on the Akaike information criterion + `_ + + .. [3] `Wikipedia entry on the Bayesian information criterion + `_ + + Examples + -------- + >>> from sklearn import linear_model + >>> reg = linear_model.LassoLarsIC(criterion='bic') + >>> X = [[-2, 2], [-1, 1], [0, 0], [1, 1], [2, 2]] + >>> y = [-2.2222, -1.1111, 0, -1.1111, -2.2222] + >>> reg.fit(X, y) + LassoLarsIC(criterion='bic') + >>> print(reg.coef_) + [ 0. -1.11...] + """ + + _parameter_constraints: dict = { + **LassoLars._parameter_constraints, + "criterion": [StrOptions({"aic", "bic"})], + "noise_variance": [Interval(Real, 0, None, closed="left"), None], + } + + for parameter in ["jitter", "fit_path", "alpha", "random_state"]: + _parameter_constraints.pop(parameter) + + def __init__( + self, + criterion="aic", + *, + fit_intercept=True, + verbose=False, + precompute="auto", + max_iter=500, + eps=np.finfo(float).eps, + copy_X=True, + positive=False, + noise_variance=None, + ): + self.criterion = criterion + self.fit_intercept = fit_intercept + self.positive = positive + self.max_iter = max_iter + self.verbose = verbose + self.copy_X = copy_X + self.precompute = precompute + self.eps = eps + self.fit_path = True + self.noise_variance = noise_variance + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.target_tags.multi_output = False + return tags + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, copy_X=None): + """Fit the model using X, y as training data. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) + Target values. Will be cast to X's dtype if necessary. + + copy_X : bool, default=None + If provided, this parameter will override the choice + of copy_X made at instance creation. + If ``True``, X will be copied; else, it may be overwritten. + + Returns + ------- + self : object + Returns an instance of self. + """ + if copy_X is None: + copy_X = self.copy_X + X, y = validate_data(self, X, y, force_writeable=True, y_numeric=True) + + X, y, Xmean, ymean, Xstd = _preprocess_data( + X, y, fit_intercept=self.fit_intercept, copy=copy_X + ) + + Gram = self.precompute + + alphas_, _, coef_path_, self.n_iter_ = lars_path( + X, + y, + Gram=Gram, + copy_X=copy_X, + copy_Gram=True, + alpha_min=0.0, + method="lasso", + verbose=self.verbose, + max_iter=self.max_iter, + eps=self.eps, + return_n_iter=True, + positive=self.positive, + ) + + n_samples = X.shape[0] + + if self.criterion == "aic": + criterion_factor = 2 + elif self.criterion == "bic": + criterion_factor = log(n_samples) + else: + raise ValueError( + f"criterion should be either bic or aic, got {self.criterion!r}" + ) + + residuals = y[:, np.newaxis] - np.dot(X, coef_path_) + residuals_sum_squares = np.sum(residuals**2, axis=0) + degrees_of_freedom = np.zeros(coef_path_.shape[1], dtype=int) + for k, coef in enumerate(coef_path_.T): + mask = np.abs(coef) > np.finfo(coef.dtype).eps + if not np.any(mask): + continue + # get the number of degrees of freedom equal to: + # Xc = X[:, mask] + # Trace(Xc * inv(Xc.T, Xc) * Xc.T) ie the number of non-zero coefs + degrees_of_freedom[k] = np.sum(mask) + + self.alphas_ = alphas_ + + if self.noise_variance is None: + self.noise_variance_ = self._estimate_noise_variance( + X, y, positive=self.positive + ) + else: + self.noise_variance_ = self.noise_variance + + self.criterion_ = ( + n_samples * np.log(2 * np.pi * self.noise_variance_) + + residuals_sum_squares / self.noise_variance_ + + criterion_factor * degrees_of_freedom + ) + n_best = np.argmin(self.criterion_) + + self.alpha_ = alphas_[n_best] + self.coef_ = coef_path_[:, n_best] + self._set_intercept(Xmean, ymean, Xstd) + return self + + def _estimate_noise_variance(self, X, y, positive): + """Compute an estimate of the variance with an OLS model. + + Parameters + ---------- + X : ndarray of shape (n_samples, n_features) + Data to be fitted by the OLS model. We expect the data to be + centered. + + y : ndarray of shape (n_samples,) + Associated target. + + positive : bool, default=False + Restrict coefficients to be >= 0. This should be inline with + the `positive` parameter from `LassoLarsIC`. + + Returns + ------- + noise_variance : float + An estimator of the noise variance of an OLS model. + """ + if X.shape[0] <= X.shape[1] + self.fit_intercept: + raise ValueError( + f"You are using {self.__class__.__name__} in the case where the number " + "of samples is smaller than the number of features. In this setting, " + "getting a good estimate for the variance of the noise is not " + "possible. Provide an estimate of the noise variance in the " + "constructor." + ) + # X and y are already centered and we don't need to fit with an intercept + ols_model = LinearRegression(positive=positive, fit_intercept=False) + y_pred = ols_model.fit(X, y).predict(X) + return np.sum((y - y_pred) ** 2) / ( + X.shape[0] - X.shape[1] - self.fit_intercept + ) diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..3bfd5fcd094913b5bc429643efb0dec73fbdf3fc --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_linear_loss.py @@ -0,0 +1,825 @@ +""" +Loss functions for linear models with raw_prediction = X @ coef +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numpy as np +from scipy import sparse + +from ..utils.extmath import squared_norm + + +def sandwich_dot(X, W): + """Compute the sandwich product X.T @ diag(W) @ X.""" + # TODO: This "sandwich product" is the main computational bottleneck for solvers + # that use the full hessian matrix. Here, thread parallelism would pay-off the + # most. + # While a dedicated Cython routine could exploit the symmetry, it is very hard to + # beat BLAS GEMM, even thought the latter cannot exploit the symmetry, unless one + # pays the price of taking square roots and implements + # sqrtWX = sqrt(W)[: None] * X + # return sqrtWX.T @ sqrtWX + # which (might) detect the symmetry and use BLAS SYRK under the hood. + n_samples = X.shape[0] + if sparse.issparse(X): + return ( + X.T @ sparse.dia_matrix((W, 0), shape=(n_samples, n_samples)) @ X + ).toarray() + else: + # np.einsum may use less memory but the following, using BLAS matrix + # multiplication (gemm), is by far faster. + WX = W[:, None] * X + return X.T @ WX + + +class LinearModelLoss: + """General class for loss functions with raw_prediction = X @ coef + intercept. + + Note that raw_prediction is also known as linear predictor. + + The loss is the average of per sample losses and includes a term for L2 + regularization:: + + loss = 1 / s_sum * sum_i s_i loss(y_i, X_i @ coef + intercept) + + 1/2 * l2_reg_strength * ||coef||_2^2 + + with sample weights s_i=1 if sample_weight=None and s_sum=sum_i s_i. + + Gradient and hessian, for simplicity without intercept, are:: + + gradient = 1 / s_sum * X.T @ loss.gradient + l2_reg_strength * coef + hessian = 1 / s_sum * X.T @ diag(loss.hessian) @ X + + l2_reg_strength * identity + + Conventions: + if fit_intercept: + n_dof = n_features + 1 + else: + n_dof = n_features + + if base_loss.is_multiclass: + coef.shape = (n_classes, n_dof) or ravelled (n_classes * n_dof,) + else: + coef.shape = (n_dof,) + + The intercept term is at the end of the coef array: + if base_loss.is_multiclass: + if coef.shape (n_classes, n_dof): + intercept = coef[:, -1] + if coef.shape (n_classes * n_dof,) + intercept = coef[n_features::n_dof] = coef[(n_dof-1)::n_dof] + intercept.shape = (n_classes,) + else: + intercept = coef[-1] + + Shape of gradient follows shape of coef. + gradient.shape = coef.shape + + But hessian (to make our lives simpler) are always 2-d: + if base_loss.is_multiclass: + hessian.shape = (n_classes * n_dof, n_classes * n_dof) + else: + hessian.shape = (n_dof, n_dof) + + Note: If coef has shape (n_classes * n_dof,), the 2d-array can be reconstructed as + + coef.reshape((n_classes, -1), order="F") + + The option order="F" makes coef[:, i] contiguous. This, in turn, makes the + coefficients without intercept, coef[:, :-1], contiguous and speeds up + matrix-vector computations. + + Note: If the average loss per sample is wanted instead of the sum of the loss per + sample, one can simply use a rescaled sample_weight such that + sum(sample_weight) = 1. + + Parameters + ---------- + base_loss : instance of class BaseLoss from sklearn._loss. + fit_intercept : bool + """ + + def __init__(self, base_loss, fit_intercept): + self.base_loss = base_loss + self.fit_intercept = fit_intercept + + def init_zero_coef(self, X, dtype=None): + """Allocate coef of correct shape with zeros. + + Parameters: + ----------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + dtype : data-type, default=None + Overrides the data type of coef. With dtype=None, coef will have the same + dtype as X. + + Returns + ------- + coef : ndarray of shape (n_dof,) or (n_classes, n_dof) + Coefficients of a linear model. + """ + n_features = X.shape[1] + n_classes = self.base_loss.n_classes + if self.fit_intercept: + n_dof = n_features + 1 + else: + n_dof = n_features + if self.base_loss.is_multiclass: + coef = np.zeros_like(X, shape=(n_classes, n_dof), dtype=dtype, order="F") + else: + coef = np.zeros_like(X, shape=n_dof, dtype=dtype) + return coef + + def weight_intercept(self, coef): + """Helper function to get coefficients and intercept. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + + Returns + ------- + weights : ndarray of shape (n_features,) or (n_classes, n_features) + Coefficients without intercept term. + intercept : float or ndarray of shape (n_classes,) + Intercept terms. + """ + if not self.base_loss.is_multiclass: + if self.fit_intercept: + intercept = coef[-1] + weights = coef[:-1] + else: + intercept = 0.0 + weights = coef + else: + # reshape to (n_classes, n_dof) + if coef.ndim == 1: + weights = coef.reshape((self.base_loss.n_classes, -1), order="F") + else: + weights = coef + if self.fit_intercept: + intercept = weights[:, -1] + weights = weights[:, :-1] + else: + intercept = 0.0 + + return weights, intercept + + def weight_intercept_raw(self, coef, X): + """Helper function to get coefficients, intercept and raw_prediction. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + Returns + ------- + weights : ndarray of shape (n_features,) or (n_classes, n_features) + Coefficients without intercept term. + intercept : float or ndarray of shape (n_classes,) + Intercept terms. + raw_prediction : ndarray of shape (n_samples,) or \ + (n_samples, n_classes) + """ + weights, intercept = self.weight_intercept(coef) + + if not self.base_loss.is_multiclass: + raw_prediction = X @ weights + intercept + else: + # weights has shape (n_classes, n_dof) + raw_prediction = X @ weights.T + intercept # ndarray, likely C-contiguous + + return weights, intercept, raw_prediction + + def l2_penalty(self, weights, l2_reg_strength): + """Compute L2 penalty term l2_reg_strength/2 *||w||_2^2.""" + norm2_w = weights @ weights if weights.ndim == 1 else squared_norm(weights) + return 0.5 * l2_reg_strength * norm2_w + + def loss( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + raw_prediction=None, + ): + """Compute the loss as weighted average over point-wise losses. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + loss : float + Weighted average of losses per sample, plus penalty. + """ + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + loss = self.base_loss.loss( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=None, + n_threads=n_threads, + ) + loss = np.average(loss, weights=sample_weight) + + return loss + self.l2_penalty(weights, l2_reg_strength) + + def loss_gradient( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + raw_prediction=None, + ): + """Computes the sum of loss and gradient w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + loss : float + Weighted average of losses per sample, plus penalty. + + gradient : ndarray of shape coef.shape + The gradient of the loss. + """ + (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes + n_dof = n_features + int(self.fit_intercept) + + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + loss, grad_pointwise = self.base_loss.loss_gradient( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + loss = loss.sum() / sw_sum + loss += self.l2_penalty(weights, l2_reg_strength) + + grad_pointwise /= sw_sum + + if not self.base_loss.is_multiclass: + grad = np.empty_like(coef, dtype=weights.dtype) + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + else: + grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + # grad_pointwise.shape = (n_samples, n_classes) + grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights + if self.fit_intercept: + grad[:, -1] = grad_pointwise.sum(axis=0) + if coef.ndim == 1: + grad = grad.ravel(order="F") + + return loss, grad + + def gradient( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + raw_prediction=None, + ): + """Computes the gradient w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + gradient : ndarray of shape coef.shape + The gradient of the loss. + """ + (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes + n_dof = n_features + int(self.fit_intercept) + + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + + grad_pointwise = self.base_loss.gradient( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + grad_pointwise /= sw_sum + + if not self.base_loss.is_multiclass: + grad = np.empty_like(coef, dtype=weights.dtype) + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + return grad + else: + grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + # gradient.shape = (n_samples, n_classes) + grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights + if self.fit_intercept: + grad[:, -1] = grad_pointwise.sum(axis=0) + if coef.ndim == 1: + return grad.ravel(order="F") + else: + return grad + + def gradient_hessian( + self, + coef, + X, + y, + sample_weight=None, + l2_reg_strength=0.0, + n_threads=1, + gradient_out=None, + hessian_out=None, + raw_prediction=None, + ): + """Computes gradient and hessian w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + gradient_out : None or ndarray of shape coef.shape + A location into which the gradient is stored. If None, a new array + might be created. + hessian_out : None or ndarray of shape (n_dof, n_dof) or \ + (n_classes * n_dof, n_classes * n_dof) + A location into which the hessian is stored. If None, a new array + might be created. + raw_prediction : C-contiguous array of shape (n_samples,) or array of \ + shape (n_samples, n_classes) + Raw prediction values (in link space). If provided, these are used. If + None, then raw_prediction = X @ coef + intercept is calculated. + + Returns + ------- + gradient : ndarray of shape coef.shape + The gradient of the loss. + + hessian : ndarray of shape (n_dof, n_dof) or \ + (n_classes, n_dof, n_dof, n_classes) + Hessian matrix. + + hessian_warning : bool + True if pointwise hessian has more than 25% of its elements non-positive. + """ + (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes + n_dof = n_features + int(self.fit_intercept) + if raw_prediction is None: + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + else: + weights, intercept = self.weight_intercept(coef) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + + # Allocate gradient. + if gradient_out is None: + grad = np.empty_like(coef, dtype=weights.dtype, order="F") + elif gradient_out.shape != coef.shape: + raise ValueError( + f"gradient_out is required to have shape coef.shape = {coef.shape}; " + f"got {gradient_out.shape}." + ) + elif self.base_loss.is_multiclass and not gradient_out.flags.f_contiguous: + raise ValueError("gradient_out must be F-contiguous.") + else: + grad = gradient_out + # Allocate hessian. + n = coef.size # for multinomial this equals n_dof * n_classes + if hessian_out is None: + hess = np.empty((n, n), dtype=weights.dtype) + elif hessian_out.shape != (n, n): + raise ValueError( + f"hessian_out is required to have shape ({n, n}); got " + f"{hessian_out.shape=}." + ) + elif self.base_loss.is_multiclass and ( + not hessian_out.flags.c_contiguous and not hessian_out.flags.f_contiguous + ): + raise ValueError("hessian_out must be contiguous.") + else: + hess = hessian_out + + if not self.base_loss.is_multiclass: + grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + grad_pointwise /= sw_sum + hess_pointwise /= sw_sum + + # For non-canonical link functions and far away from the optimum, the + # pointwise hessian can be negative. We take care that 75% of the hessian + # entries are positive. + hessian_warning = ( + np.average(hess_pointwise <= 0, weights=sample_weight) > 0.25 + ) + hess_pointwise = np.abs(hess_pointwise) + + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + + if hessian_warning: + # Exit early without computing the hessian. + return grad, hess, hessian_warning + + hess[:n_features, :n_features] = sandwich_dot(X, hess_pointwise) + + if l2_reg_strength > 0: + # The L2 penalty enters the Hessian on the diagonal only. To add those + # terms, we use a flattened view of the array. + order = "C" if hess.flags.c_contiguous else "F" + hess.reshape(-1, order=order)[ + : (n_features * n_dof) : (n_dof + 1) + ] += l2_reg_strength + + if self.fit_intercept: + # With intercept included as added column to X, the hessian becomes + # hess = (X, 1)' @ diag(h) @ (X, 1) + # = (X' @ diag(h) @ X, X' @ h) + # ( h @ X, sum(h)) + # The left upper part has already been filled, it remains to compute + # the last row and the last column. + Xh = X.T @ hess_pointwise + hess[:-1, -1] = Xh + hess[-1, :-1] = Xh + hess[-1, -1] = hess_pointwise.sum() + else: + # Here we may safely assume HalfMultinomialLoss aka categorical + # cross-entropy. + # HalfMultinomialLoss computes only the diagonal part of the hessian, i.e. + # diagonal in the classes. Here, we want the full hessian. Therefore, we + # call gradient_proba. + grad_pointwise, proba = self.base_loss.gradient_proba( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + grad_pointwise /= sw_sum + grad = grad.reshape((n_classes, n_dof), order="F") + grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights + if self.fit_intercept: + grad[:, -1] = grad_pointwise.sum(axis=0) + if coef.ndim == 1: + grad = grad.ravel(order="F") + + # The full hessian matrix, i.e. not only the diagonal part, dropping most + # indices, is given by: + # + # hess = X' @ h @ X + # + # Here, h is a priori a 4-dimensional matrix of shape + # (n_samples, n_samples, n_classes, n_classes). It is diagonal its first + # two dimensions (the ones with n_samples), i.e. it is + # effectively a 3-dimensional matrix (n_samples, n_classes, n_classes). + # + # h = diag(p) - p' p + # + # or with indices k and l for classes + # + # h_kl = p_k * delta_kl - p_k * p_l + # + # with p_k the (predicted) probability for class k. Only the dimension in + # n_samples multiplies with X. + # For 3 classes and n_samples = 1, this looks like ("@" is a bit misused + # here): + # + # hess = X' @ (h00 h10 h20) @ X + # (h10 h11 h12) + # (h20 h12 h22) + # = (X' @ diag(h00) @ X, X' @ diag(h10), X' @ diag(h20)) + # (X' @ diag(h10) @ X, X' @ diag(h11), X' @ diag(h12)) + # (X' @ diag(h20) @ X, X' @ diag(h12), X' @ diag(h22)) + # + # Now coef of shape (n_classes * n_dof) is contiguous in n_classes. + # Therefore, we want the hessian to follow this convention, too, i.e. + # hess[:n_classes, :n_classes] = (x0' @ h00 @ x0, x0' @ h10 @ x0, ..) + # (x0' @ h10 @ x0, x0' @ h11 @ x0, ..) + # (x0' @ h20 @ x0, x0' @ h12 @ x0, ..) + # is the first feature, x0, for all classes. In our implementation, we + # still want to take advantage of BLAS "X.T @ X". Therefore, we have some + # index/slicing battle to fight. + if sample_weight is not None: + sw = sample_weight / sw_sum + else: + sw = 1.0 / sw_sum + + for k in range(n_classes): + # Diagonal terms (in classes) hess_kk. + # Note that this also writes to some of the lower triangular part. + h = proba[:, k] * (1 - proba[:, k]) * sw + hess[ + k : n_classes * n_features : n_classes, + k : n_classes * n_features : n_classes, + ] = sandwich_dot(X, h) + if self.fit_intercept: + # See above in the non multiclass case. + Xh = X.T @ h + hess[ + k : n_classes * n_features : n_classes, + n_classes * n_features + k, + ] = Xh + hess[ + n_classes * n_features + k, + k : n_classes * n_features : n_classes, + ] = Xh + hess[n_classes * n_features + k, n_classes * n_features + k] = ( + h.sum() + ) + # Off diagonal terms (in classes) hess_kl. + for l in range(k + 1, n_classes): + # Upper triangle (in classes). + h = -proba[:, k] * proba[:, l] * sw + hess[ + k : n_classes * n_features : n_classes, + l : n_classes * n_features : n_classes, + ] = sandwich_dot(X, h) + if self.fit_intercept: + Xh = X.T @ h + hess[ + k : n_classes * n_features : n_classes, + n_classes * n_features + l, + ] = Xh + hess[ + n_classes * n_features + k, + l : n_classes * n_features : n_classes, + ] = Xh + hess[n_classes * n_features + k, n_classes * n_features + l] = ( + h.sum() + ) + # Fill lower triangle (in classes). + hess[l::n_classes, k::n_classes] = hess[k::n_classes, l::n_classes] + + if l2_reg_strength > 0: + # See above in the non multiclass case. + order = "C" if hess.flags.c_contiguous else "F" + hess.reshape(-1, order=order)[ + : (n_classes**2 * n_features * n_dof) : (n_classes * n_dof + 1) + ] += l2_reg_strength + + # The pointwise hessian is always non-negative for the multinomial loss. + hessian_warning = False + + return grad, hess, hessian_warning + + def gradient_hessian_product( + self, coef, X, y, sample_weight=None, l2_reg_strength=0.0, n_threads=1 + ): + """Computes gradient and hessp (hessian product function) w.r.t. coef. + + Parameters + ---------- + coef : ndarray of shape (n_dof,), (n_classes, n_dof) or (n_classes * n_dof,) + Coefficients of a linear model. + If shape (n_classes * n_dof,), the classes of one feature are contiguous, + i.e. one reconstructs the 2d-array via + coef.reshape((n_classes, -1), order="F"). + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + y : contiguous array of shape (n_samples,) + Observed, true target values. + sample_weight : None or contiguous array of shape (n_samples,), default=None + Sample weights. + l2_reg_strength : float, default=0.0 + L2 regularization strength + n_threads : int, default=1 + Number of OpenMP threads to use. + + Returns + ------- + gradient : ndarray of shape coef.shape + The gradient of the loss. + + hessp : callable + Function that takes in a vector input of shape of gradient and + and returns matrix-vector product with hessian. + """ + (n_samples, n_features), n_classes = X.shape, self.base_loss.n_classes + n_dof = n_features + int(self.fit_intercept) + weights, intercept, raw_prediction = self.weight_intercept_raw(coef, X) + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + + if not self.base_loss.is_multiclass: + grad_pointwise, hess_pointwise = self.base_loss.gradient_hessian( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + grad_pointwise /= sw_sum + hess_pointwise /= sw_sum + grad = np.empty_like(coef, dtype=weights.dtype) + grad[:n_features] = X.T @ grad_pointwise + l2_reg_strength * weights + if self.fit_intercept: + grad[-1] = grad_pointwise.sum() + + # Precompute as much as possible: hX, hX_sum and hessian_sum + hessian_sum = hess_pointwise.sum() + if sparse.issparse(X): + hX = ( + sparse.dia_matrix((hess_pointwise, 0), shape=(n_samples, n_samples)) + @ X + ) + else: + hX = hess_pointwise[:, np.newaxis] * X + + if self.fit_intercept: + # Calculate the double derivative with respect to intercept. + # Note: In case hX is sparse, hX.sum is a matrix object. + hX_sum = np.squeeze(np.asarray(hX.sum(axis=0))) + # prevent squeezing to zero-dim array if n_features == 1 + hX_sum = np.atleast_1d(hX_sum) + + # With intercept included and l2_reg_strength = 0, hessp returns + # res = (X, 1)' @ diag(h) @ (X, 1) @ s + # = (X, 1)' @ (hX @ s[:n_features], sum(h) * s[-1]) + # res[:n_features] = X' @ hX @ s[:n_features] + sum(h) * s[-1] + # res[-1] = 1' @ hX @ s[:n_features] + sum(h) * s[-1] + def hessp(s): + ret = np.empty_like(s) + if sparse.issparse(X): + ret[:n_features] = X.T @ (hX @ s[:n_features]) + else: + ret[:n_features] = np.linalg.multi_dot([X.T, hX, s[:n_features]]) + ret[:n_features] += l2_reg_strength * s[:n_features] + + if self.fit_intercept: + ret[:n_features] += s[-1] * hX_sum + ret[-1] = hX_sum @ s[:n_features] + hessian_sum * s[-1] + return ret + + else: + # Here we may safely assume HalfMultinomialLoss aka categorical + # cross-entropy. + # HalfMultinomialLoss computes only the diagonal part of the hessian, i.e. + # diagonal in the classes. Here, we want the matrix-vector product of the + # full hessian. Therefore, we call gradient_proba. + grad_pointwise, proba = self.base_loss.gradient_proba( + y_true=y, + raw_prediction=raw_prediction, + sample_weight=sample_weight, + n_threads=n_threads, + ) + grad_pointwise /= sw_sum + grad = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + grad[:, :n_features] = grad_pointwise.T @ X + l2_reg_strength * weights + if self.fit_intercept: + grad[:, -1] = grad_pointwise.sum(axis=0) + + # Full hessian-vector product, i.e. not only the diagonal part of the + # hessian. Derivation with some index battle for input vector s: + # - sample index i + # - feature indices j, m + # - class indices k, l + # - 1_{k=l} is one if k=l else 0 + # - p_i_k is the (predicted) probability that sample i belongs to class k + # for all i: sum_k p_i_k = 1 + # - s_l_m is input vector for class l and feature m + # - X' = X transposed + # + # Note: Hessian with dropping most indices is just: + # X' @ p_k (1(k=l) - p_l) @ X + # + # result_{k j} = sum_{i, l, m} Hessian_{i, k j, m l} * s_l_m + # = sum_{i, l, m} (X')_{ji} * p_i_k * (1_{k=l} - p_i_l) + # * X_{im} s_l_m + # = sum_{i, m} (X')_{ji} * p_i_k + # * (X_{im} * s_k_m - sum_l p_i_l * X_{im} * s_l_m) + # + # See also https://github.com/scikit-learn/scikit-learn/pull/3646#discussion_r17461411 # noqa + def hessp(s): + s = s.reshape((n_classes, -1), order="F") # shape = (n_classes, n_dof) + if self.fit_intercept: + s_intercept = s[:, -1] + s = s[:, :-1] # shape = (n_classes, n_features) + else: + s_intercept = 0 + tmp = X @ s.T + s_intercept # X_{im} * s_k_m + tmp += (-proba * tmp).sum(axis=1)[:, np.newaxis] # - sum_l .. + tmp *= proba # * p_i_k + if sample_weight is not None: + tmp *= sample_weight[:, np.newaxis] + # hess_prod = empty_like(grad), but we ravel grad below and this + # function is run after that. + hess_prod = np.empty((n_classes, n_dof), dtype=weights.dtype, order="F") + hess_prod[:, :n_features] = (tmp.T @ X) / sw_sum + l2_reg_strength * s + if self.fit_intercept: + hess_prod[:, -1] = tmp.sum(axis=0) / sw_sum + if coef.ndim == 1: + return hess_prod.ravel(order="F") + else: + return hess_prod + + if coef.ndim == 1: + return grad.ravel(order="F"), hessp + + return grad, hessp diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py new file mode 100644 index 0000000000000000000000000000000000000000..291c3972eb3e5013a4f7b31b3acb2f1c44bdfb2a --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_logistic.py @@ -0,0 +1,2286 @@ +""" +Logistic Regression +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import numbers +import warnings +from numbers import Integral, Real + +import numpy as np +from joblib import effective_n_jobs +from scipy import optimize + +from sklearn.metrics import get_scorer_names + +from .._loss.loss import HalfBinomialLoss, HalfMultinomialLoss +from ..base import _fit_context +from ..metrics import get_scorer +from ..model_selection import check_cv +from ..preprocessing import LabelBinarizer, LabelEncoder +from ..svm._base import _fit_liblinear +from ..utils import ( + Bunch, + check_array, + check_consistent_length, + check_random_state, + compute_class_weight, +) +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.extmath import row_norms, softmax +from ..utils.metadata_routing import ( + MetadataRouter, + MethodMapping, + _raise_for_params, + _routing_enabled, + process_routing, +) +from ..utils.multiclass import check_classification_targets +from ..utils.optimize import _check_optimize_result, _newton_cg +from ..utils.parallel import Parallel, delayed +from ..utils.validation import ( + _check_method_params, + _check_sample_weight, + check_is_fitted, + validate_data, +) +from ._base import BaseEstimator, LinearClassifierMixin, SparseCoefMixin +from ._glm.glm import NewtonCholeskySolver +from ._linear_loss import LinearModelLoss +from ._sag import sag_solver + +_LOGISTIC_SOLVER_CONVERGENCE_MSG = ( + "Please also refer to the documentation for alternative solver options:\n" + " https://scikit-learn.org/stable/modules/linear_model.html" + "#logistic-regression" +) + + +def _check_solver(solver, penalty, dual): + if solver not in ["liblinear", "saga"] and penalty not in ("l2", None): + raise ValueError( + f"Solver {solver} supports only 'l2' or None penalties, got {penalty} " + "penalty." + ) + if solver != "liblinear" and dual: + raise ValueError(f"Solver {solver} supports only dual=False, got dual={dual}") + + if penalty == "elasticnet" and solver != "saga": + raise ValueError( + f"Only 'saga' solver supports elasticnet penalty, got solver={solver}." + ) + + if solver == "liblinear" and penalty is None: + raise ValueError("penalty=None is not supported for the liblinear solver") + + return solver + + +def _check_multi_class(multi_class, solver, n_classes): + """Computes the multi class type, either "multinomial" or "ovr". + + For `n_classes` > 2 and a solver that supports it, returns "multinomial". + For all other cases, in particular binary classification, return "ovr". + """ + if multi_class == "auto": + if solver in ("liblinear",): + multi_class = "ovr" + elif n_classes > 2: + multi_class = "multinomial" + else: + multi_class = "ovr" + if multi_class == "multinomial" and solver in ("liblinear",): + raise ValueError("Solver %s does not support a multinomial backend." % solver) + return multi_class + + +def _logistic_regression_path( + X, + y, + pos_class=None, + Cs=10, + fit_intercept=True, + max_iter=100, + tol=1e-4, + verbose=0, + solver="lbfgs", + coef=None, + class_weight=None, + dual=False, + penalty="l2", + intercept_scaling=1.0, + multi_class="auto", + random_state=None, + check_input=True, + max_squared_sum=None, + sample_weight=None, + l1_ratio=None, + n_threads=1, +): + """Compute a Logistic Regression model for a list of regularization + parameters. + + This is an implementation that uses the result of the previous model + to speed up computations along the set of solutions, making it faster + than sequentially calling LogisticRegression for the different parameters. + Note that there will be no speedup with liblinear solver, since it does + not handle warm-starting. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Input data, target values. + + pos_class : int, default=None + The class with respect to which we perform a one-vs-all fit. + If None, then it is assumed that the given problem is binary. + + Cs : int or array-like of shape (n_cs,), default=10 + List of values for the regularization parameter or integer specifying + the number of regularization parameters that should be used. In this + case, the parameters will be chosen in a logarithmic scale between + 1e-4 and 1e4. + + fit_intercept : bool, default=True + Whether to fit an intercept for the model. In this case the shape of + the returned array is (n_cs, n_features + 1). + + max_iter : int, default=100 + Maximum number of iterations for the solver. + + tol : float, default=1e-4 + Stopping criterion. For the newton-cg and lbfgs solvers, the iteration + will stop when ``max{|g_i | i = 1, ..., n} <= tol`` + where ``g_i`` is the i-th component of the gradient. + + verbose : int, default=0 + For the liblinear and lbfgs solvers set verbose to any positive + number for verbosity. + + solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \ + default='lbfgs' + Numerical solver to use. + + coef : array-like of shape (n_features,), default=None + Initialization value for coefficients of logistic regression. + Useless for liblinear solver. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + dual : bool, default=False + Dual or primal formulation. Dual formulation is only implemented for + l2 penalty with liblinear solver. Prefer dual=False when + n_samples > n_features. + + penalty : {'l1', 'l2', 'elasticnet'}, default='l2' + Used to specify the norm used in the penalization. The 'newton-cg', + 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is + only supported by the 'saga' solver. + + intercept_scaling : float, default=1. + Useful only when the solver 'liblinear' is used + and self.fit_intercept is set to True. In this case, x becomes + [x, self.intercept_scaling], + i.e. a "synthetic" feature with constant value equal to + intercept_scaling is appended to the instance vector. + The intercept becomes ``intercept_scaling * synthetic_feature_weight``. + + Note! the synthetic feature weight is subject to l1/l2 regularization + as all other features. + To lessen the effect of regularization on synthetic feature weight + (and therefore on the intercept) intercept_scaling has to be increased. + + multi_class : {'ovr', 'multinomial', 'auto'}, default='auto' + If the option chosen is 'ovr', then a binary problem is fit for each + label. For 'multinomial' the loss minimised is the multinomial loss fit + across the entire probability distribution, *even when the data is + binary*. 'multinomial' is unavailable when solver='liblinear'. + 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', + and otherwise selects 'multinomial'. + + .. versionadded:: 0.18 + Stochastic Average Gradient descent solver for 'multinomial' case. + .. versionchanged:: 0.22 + Default changed from 'ovr' to 'auto' in 0.22. + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the + data. See :term:`Glossary ` for details. + + check_input : bool, default=True + If False, the input arrays X and y will not be checked. + + max_squared_sum : float, default=None + Maximum squared sum of X over samples. Used only in SAG solver. + If None, it will be computed, going through all the samples. + The value should be precomputed to speed up cross validation. + + sample_weight : array-like of shape(n_samples,), default=None + Array of weights that are assigned to individual samples. + If not provided, then each sample is given unit weight. + + l1_ratio : float, default=None + The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only + used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent + to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent + to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a + combination of L1 and L2. + + n_threads : int, default=1 + Number of OpenMP threads to use. + + Returns + ------- + coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1) + List of coefficients for the Logistic Regression model. If + fit_intercept is set to True then the second dimension will be + n_features + 1, where the last item represents the intercept. For + ``multiclass='multinomial'``, the shape is (n_classes, n_cs, + n_features) or (n_classes, n_cs, n_features + 1). + + Cs : ndarray + Grid of Cs used for cross-validation. + + n_iter : array of shape (n_cs,) + Actual number of iteration for each Cs. + + Notes + ----- + You might get slightly different results with the solver liblinear than + with the others since this uses LIBLINEAR which penalizes the intercept. + + .. versionchanged:: 0.19 + The "copy" parameter was removed. + """ + if isinstance(Cs, numbers.Integral): + Cs = np.logspace(-4, 4, Cs) + + solver = _check_solver(solver, penalty, dual) + + # Preprocessing. + if check_input: + X = check_array( + X, + accept_sparse="csr", + dtype=np.float64, + accept_large_sparse=solver not in ["liblinear", "sag", "saga"], + ) + y = check_array(y, ensure_2d=False, dtype=None) + check_consistent_length(X, y) + n_samples, n_features = X.shape + + classes = np.unique(y) + random_state = check_random_state(random_state) + + multi_class = _check_multi_class(multi_class, solver, len(classes)) + if pos_class is None and multi_class != "multinomial": + if classes.size > 2: + raise ValueError("To fit OvR, use the pos_class argument") + # np.unique(y) gives labels in sorted order. + pos_class = classes[1] + + if sample_weight is not None or class_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype, copy=True) + + # If class_weights is a dict (provided by the user), the weights + # are assigned to the original labels. If it is "balanced", then + # the class_weights are assigned after masking the labels with a OvR. + le = LabelEncoder() + if isinstance(class_weight, dict) or ( + multi_class == "multinomial" and class_weight is not None + ): + class_weight_ = compute_class_weight(class_weight, classes=classes, y=y) + sample_weight *= class_weight_[le.fit_transform(y)] + + # For doing a ovr, we need to mask the labels first. For the + # multinomial case this is not necessary. + if multi_class == "ovr": + w0 = np.zeros(n_features + int(fit_intercept), dtype=X.dtype) + mask = y == pos_class + y_bin = np.ones(y.shape, dtype=X.dtype) + if solver == "liblinear": + mask_classes = np.array([-1, 1]) + y_bin[~mask] = -1.0 + else: + # HalfBinomialLoss, used for those solvers, represents y in [0, 1] instead + # of in [-1, 1]. + mask_classes = np.array([0, 1]) + y_bin[~mask] = 0.0 + + # for compute_class_weight + if class_weight == "balanced": + class_weight_ = compute_class_weight( + class_weight, classes=mask_classes, y=y_bin + ) + sample_weight *= class_weight_[le.fit_transform(y_bin)] + + else: + if solver in ["sag", "saga", "lbfgs", "newton-cg", "newton-cholesky"]: + # SAG, lbfgs, newton-cg and newton-cg multinomial solvers need + # LabelEncoder, not LabelBinarizer, i.e. y as a 1d-array of integers. + # LabelEncoder also saves memory compared to LabelBinarizer, especially + # when n_classes is large. + le = LabelEncoder() + Y_multi = le.fit_transform(y).astype(X.dtype, copy=False) + else: + # For liblinear solver, apply LabelBinarizer, i.e. y is one-hot encoded. + lbin = LabelBinarizer() + Y_multi = lbin.fit_transform(y) + if Y_multi.shape[1] == 1: + Y_multi = np.hstack([1 - Y_multi, Y_multi]) + + w0 = np.zeros( + (classes.size, n_features + int(fit_intercept)), order="F", dtype=X.dtype + ) + + # IMPORTANT NOTE: + # All solvers relying on LinearModelLoss need to scale the penalty with n_samples + # or the sum of sample weights because the implemented logistic regression + # objective here is (unfortunately) + # C * sum(pointwise_loss) + penalty + # instead of (as LinearModelLoss does) + # mean(pointwise_loss) + 1/C * penalty + if solver in ["lbfgs", "newton-cg", "newton-cholesky"]: + # This needs to be calculated after sample_weight is multiplied by + # class_weight. It is even tested that passing class_weight is equivalent to + # passing sample_weights according to class_weight. + sw_sum = n_samples if sample_weight is None else np.sum(sample_weight) + + if coef is not None: + # it must work both giving the bias term and not + if multi_class == "ovr": + if coef.size not in (n_features, w0.size): + raise ValueError( + "Initialization coef is of shape %d, expected shape %d or %d" + % (coef.size, n_features, w0.size) + ) + w0[: coef.size] = coef + else: + # For binary problems coef.shape[0] should be 1, otherwise it + # should be classes.size. + n_classes = classes.size + if n_classes == 2: + n_classes = 1 + + if coef.shape[0] != n_classes or coef.shape[1] not in ( + n_features, + n_features + 1, + ): + raise ValueError( + "Initialization coef is of shape (%d, %d), expected " + "shape (%d, %d) or (%d, %d)" + % ( + coef.shape[0], + coef.shape[1], + classes.size, + n_features, + classes.size, + n_features + 1, + ) + ) + + if n_classes == 1: + w0[0, : coef.shape[1]] = -coef + w0[1, : coef.shape[1]] = coef + else: + w0[:, : coef.shape[1]] = coef + + if multi_class == "multinomial": + if solver in ["lbfgs", "newton-cg", "newton-cholesky"]: + # scipy.optimize.minimize and newton-cg accept only ravelled parameters, + # i.e. 1d-arrays. LinearModelLoss expects classes to be contiguous and + # reconstructs the 2d-array via w0.reshape((n_classes, -1), order="F"). + # As w0 is F-contiguous, ravel(order="F") also avoids a copy. + w0 = w0.ravel(order="F") + loss = LinearModelLoss( + base_loss=HalfMultinomialLoss(n_classes=classes.size), + fit_intercept=fit_intercept, + ) + target = Y_multi + if solver == "lbfgs": + func = loss.loss_gradient + elif solver == "newton-cg": + func = loss.loss + grad = loss.gradient + hess = loss.gradient_hessian_product # hess = [gradient, hessp] + warm_start_sag = {"coef": w0.T} + else: + target = y_bin + if solver == "lbfgs": + loss = LinearModelLoss( + base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept + ) + func = loss.loss_gradient + elif solver == "newton-cg": + loss = LinearModelLoss( + base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept + ) + func = loss.loss + grad = loss.gradient + hess = loss.gradient_hessian_product # hess = [gradient, hessp] + elif solver == "newton-cholesky": + loss = LinearModelLoss( + base_loss=HalfBinomialLoss(), fit_intercept=fit_intercept + ) + warm_start_sag = {"coef": np.expand_dims(w0, axis=1)} + + coefs = list() + n_iter = np.zeros(len(Cs), dtype=np.int32) + for i, C in enumerate(Cs): + if solver == "lbfgs": + l2_reg_strength = 1.0 / (C * sw_sum) + iprint = [-1, 50, 1, 100, 101][ + np.searchsorted(np.array([0, 1, 2, 3]), verbose) + ] + opt_res = optimize.minimize( + func, + w0, + method="L-BFGS-B", + jac=True, + args=(X, target, sample_weight, l2_reg_strength, n_threads), + options={ + "maxiter": max_iter, + "maxls": 50, # default is 20 + "iprint": iprint, + "gtol": tol, + "ftol": 64 * np.finfo(float).eps, + }, + ) + n_iter_i = _check_optimize_result( + solver, + opt_res, + max_iter, + extra_warning_msg=_LOGISTIC_SOLVER_CONVERGENCE_MSG, + ) + w0, loss = opt_res.x, opt_res.fun + elif solver == "newton-cg": + l2_reg_strength = 1.0 / (C * sw_sum) + args = (X, target, sample_weight, l2_reg_strength, n_threads) + w0, n_iter_i = _newton_cg( + grad_hess=hess, + func=func, + grad=grad, + x0=w0, + args=args, + maxiter=max_iter, + tol=tol, + verbose=verbose, + ) + elif solver == "newton-cholesky": + l2_reg_strength = 1.0 / (C * sw_sum) + sol = NewtonCholeskySolver( + coef=w0, + linear_loss=loss, + l2_reg_strength=l2_reg_strength, + tol=tol, + max_iter=max_iter, + n_threads=n_threads, + verbose=verbose, + ) + w0 = sol.solve(X=X, y=target, sample_weight=sample_weight) + n_iter_i = sol.iteration + elif solver == "liblinear": + ( + coef_, + intercept_, + n_iter_i, + ) = _fit_liblinear( + X, + target, + C, + fit_intercept, + intercept_scaling, + None, + penalty, + dual, + verbose, + max_iter, + tol, + random_state, + sample_weight=sample_weight, + ) + if fit_intercept: + w0 = np.concatenate([coef_.ravel(), intercept_]) + else: + w0 = coef_.ravel() + # n_iter_i is an array for each class. However, `target` is always encoded + # in {-1, 1}, so we only take the first element of n_iter_i. + n_iter_i = n_iter_i.item() + + elif solver in ["sag", "saga"]: + if multi_class == "multinomial": + target = target.astype(X.dtype, copy=False) + loss = "multinomial" + else: + loss = "log" + # alpha is for L2-norm, beta is for L1-norm + if penalty == "l1": + alpha = 0.0 + beta = 1.0 / C + elif penalty == "l2": + alpha = 1.0 / C + beta = 0.0 + else: # Elastic-Net penalty + alpha = (1.0 / C) * (1 - l1_ratio) + beta = (1.0 / C) * l1_ratio + + w0, n_iter_i, warm_start_sag = sag_solver( + X, + target, + sample_weight, + loss, + alpha, + beta, + max_iter, + tol, + verbose, + random_state, + False, + max_squared_sum, + warm_start_sag, + is_saga=(solver == "saga"), + ) + + else: + raise ValueError( + "solver must be one of {'liblinear', 'lbfgs', " + "'newton-cg', 'sag'}, got '%s' instead" % solver + ) + + if multi_class == "multinomial": + n_classes = max(2, classes.size) + if solver in ["lbfgs", "newton-cg", "newton-cholesky"]: + multi_w0 = np.reshape(w0, (n_classes, -1), order="F") + else: + multi_w0 = w0 + if n_classes == 2: + multi_w0 = multi_w0[1][np.newaxis, :] + coefs.append(multi_w0.copy()) + else: + coefs.append(w0.copy()) + + n_iter[i] = n_iter_i + + return np.array(coefs), np.array(Cs), n_iter + + +# helper function for LogisticCV +def _log_reg_scoring_path( + X, + y, + train, + test, + *, + pos_class, + Cs, + scoring, + fit_intercept, + max_iter, + tol, + class_weight, + verbose, + solver, + penalty, + dual, + intercept_scaling, + multi_class, + random_state, + max_squared_sum, + sample_weight, + l1_ratio, + score_params, +): + """Computes scores across logistic_regression_path + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training data. + + y : array-like of shape (n_samples,) or (n_samples, n_targets) + Target labels. + + train : list of indices + The indices of the train set. + + test : list of indices + The indices of the test set. + + pos_class : int + The class with respect to which we perform a one-vs-all fit. + If None, then it is assumed that the given problem is binary. + + Cs : int or list of floats + Each of the values in Cs describes the inverse of + regularization strength. If Cs is as an int, then a grid of Cs + values are chosen in a logarithmic scale between 1e-4 and 1e4. + + scoring : callable + A string (see :ref:`scoring_parameter`) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. For a list of scoring functions + that can be used, look at :mod:`sklearn.metrics`. + + fit_intercept : bool + If False, then the bias term is set to zero. Else the last + term of each coef_ gives us the intercept. + + max_iter : int + Maximum number of iterations for the solver. + + tol : float + Tolerance for stopping criteria. + + class_weight : dict or 'balanced' + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))`` + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + verbose : int + For the liblinear and lbfgs solvers set verbose to any positive + number for verbosity. + + solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'} + Decides which solver to use. + + penalty : {'l1', 'l2', 'elasticnet'} + Used to specify the norm used in the penalization. The 'newton-cg', + 'sag' and 'lbfgs' solvers support only l2 penalties. 'elasticnet' is + only supported by the 'saga' solver. + + dual : bool + Dual or primal formulation. Dual formulation is only implemented for + l2 penalty with liblinear solver. Prefer dual=False when + n_samples > n_features. + + intercept_scaling : float + Useful only when the solver 'liblinear' is used + and self.fit_intercept is set to True. In this case, x becomes + [x, self.intercept_scaling], + i.e. a "synthetic" feature with constant value equals to + intercept_scaling is appended to the instance vector. + The intercept becomes intercept_scaling * synthetic feature weight + Note! the synthetic feature weight is subject to l1/l2 regularization + as all other features. + To lessen the effect of regularization on synthetic feature weight + (and therefore on the intercept) intercept_scaling has to be increased. + + multi_class : {'auto', 'ovr', 'multinomial'} + If the option chosen is 'ovr', then a binary problem is fit for each + label. For 'multinomial' the loss minimised is the multinomial loss fit + across the entire probability distribution, *even when the data is + binary*. 'multinomial' is unavailable when solver='liblinear'. + + random_state : int, RandomState instance + Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the + data. See :term:`Glossary ` for details. + + max_squared_sum : float + Maximum squared sum of X over samples. Used only in SAG solver. + If None, it will be computed, going through all the samples. + The value should be precomputed to speed up cross validation. + + sample_weight : array-like of shape(n_samples,) + Array of weights that are assigned to individual samples. + If not provided, then each sample is given unit weight. + + l1_ratio : float + The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only + used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent + to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent + to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a + combination of L1 and L2. + + score_params : dict + Parameters to pass to the `score` method of the underlying scorer. + + Returns + ------- + coefs : ndarray of shape (n_cs, n_features) or (n_cs, n_features + 1) + List of coefficients for the Logistic Regression model. If + fit_intercept is set to True then the second dimension will be + n_features + 1, where the last item represents the intercept. + + Cs : ndarray + Grid of Cs used for cross-validation. + + scores : ndarray of shape (n_cs,) + Scores obtained for each Cs. + + n_iter : ndarray of shape(n_cs,) + Actual number of iteration for each Cs. + """ + X_train = X[train] + X_test = X[test] + y_train = y[train] + y_test = y[test] + + sw_train, sw_test = None, None + if sample_weight is not None: + sample_weight = _check_sample_weight(sample_weight, X) + sw_train = sample_weight[train] + sw_test = sample_weight[test] + + coefs, Cs, n_iter = _logistic_regression_path( + X_train, + y_train, + Cs=Cs, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + solver=solver, + max_iter=max_iter, + class_weight=class_weight, + pos_class=pos_class, + multi_class=multi_class, + tol=tol, + verbose=verbose, + dual=dual, + penalty=penalty, + intercept_scaling=intercept_scaling, + random_state=random_state, + check_input=False, + max_squared_sum=max_squared_sum, + sample_weight=sw_train, + ) + + log_reg = LogisticRegression(solver=solver, multi_class=multi_class) + + # The score method of Logistic Regression has a classes_ attribute. + if multi_class == "ovr": + log_reg.classes_ = np.array([-1, 1]) + elif multi_class == "multinomial": + log_reg.classes_ = np.unique(y_train) + else: + raise ValueError( + "multi_class should be either multinomial or ovr, got %d" % multi_class + ) + + if pos_class is not None: + mask = y_test == pos_class + y_test = np.ones(y_test.shape, dtype=np.float64) + y_test[~mask] = -1.0 + + scores = list() + + scoring = get_scorer(scoring) + for w in coefs: + if multi_class == "ovr": + w = w[np.newaxis, :] + if fit_intercept: + log_reg.coef_ = w[:, :-1] + log_reg.intercept_ = w[:, -1] + else: + log_reg.coef_ = w + log_reg.intercept_ = 0.0 + + if scoring is None: + scores.append(log_reg.score(X_test, y_test, sample_weight=sw_test)) + else: + score_params = score_params or {} + score_params = _check_method_params(X=X, params=score_params, indices=test) + scores.append(scoring(log_reg, X_test, y_test, **score_params)) + return coefs, Cs, np.array(scores), n_iter + + +class LogisticRegression(LinearClassifierMixin, SparseCoefMixin, BaseEstimator): + """ + Logistic Regression (aka logit, MaxEnt) classifier. + + This class implements regularized logistic regression using the + 'liblinear' library, 'newton-cg', 'sag', 'saga' and 'lbfgs' solvers. **Note + that regularization is applied by default**. It can handle both dense + and sparse input. Use C-ordered arrays or CSR matrices containing 64-bit + floats for optimal performance; any other input format will be converted + (and copied). + + The 'newton-cg', 'sag', and 'lbfgs' solvers support only L2 regularization + with primal formulation, or no regularization. The 'liblinear' solver + supports both L1 and L2 regularization, with a dual formulation only for + the L2 penalty. The Elastic-Net regularization is only supported by the + 'saga' solver. + + For :term:`multiclass` problems, only 'newton-cg', 'sag', 'saga' and 'lbfgs' + handle multinomial loss. 'liblinear' and 'newton-cholesky' only handle binary + classification but can be extended to handle multiclass by using + :class:`~sklearn.multiclass.OneVsRestClassifier`. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + penalty : {'l1', 'l2', 'elasticnet', None}, default='l2' + Specify the norm of the penalty: + + - `None`: no penalty is added; + - `'l2'`: add a L2 penalty term and it is the default choice; + - `'l1'`: add a L1 penalty term; + - `'elasticnet'`: both L1 and L2 penalty terms are added. + + .. warning:: + Some penalties may not work with some solvers. See the parameter + `solver` below, to know the compatibility between the penalty and + solver. + + .. versionadded:: 0.19 + l1 penalty with SAGA solver (allowing 'multinomial' + L1) + + dual : bool, default=False + Dual (constrained) or primal (regularized, see also + :ref:`this equation `) formulation. Dual formulation + is only implemented for l2 penalty with liblinear solver. Prefer dual=False when + n_samples > n_features. + + tol : float, default=1e-4 + Tolerance for stopping criteria. + + C : float, default=1.0 + Inverse of regularization strength; must be a positive float. + Like in support vector machines, smaller values specify stronger + regularization. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the decision function. + + intercept_scaling : float, default=1 + Useful only when the solver 'liblinear' is used + and self.fit_intercept is set to True. In this case, x becomes + [x, self.intercept_scaling], + i.e. a "synthetic" feature with constant value equal to + intercept_scaling is appended to the instance vector. + The intercept becomes ``intercept_scaling * synthetic_feature_weight``. + + Note! the synthetic feature weight is subject to l1/l2 regularization + as all other features. + To lessen the effect of regularization on synthetic feature weight + (and therefore on the intercept) intercept_scaling has to be increased. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + .. versionadded:: 0.17 + *class_weight='balanced'* + + random_state : int, RandomState instance, default=None + Used when ``solver`` == 'sag', 'saga' or 'liblinear' to shuffle the + data. See :term:`Glossary ` for details. + + solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \ + default='lbfgs' + + Algorithm to use in the optimization problem. Default is 'lbfgs'. + To choose a solver, you might want to consider the following aspects: + + - For small datasets, 'liblinear' is a good choice, whereas 'sag' + and 'saga' are faster for large ones; + - For :term:`multiclass` problems, all solvers except 'liblinear' minimize the + full multinomial loss; + - 'liblinear' can only handle binary classification by default. To apply a + one-versus-rest scheme for the multiclass setting one can wrap it with the + :class:`~sklearn.multiclass.OneVsRestClassifier`. + - 'newton-cholesky' is a good choice for + `n_samples` >> `n_features * n_classes`, especially with one-hot encoded + categorical features with rare categories. Be aware that the memory usage + of this solver has a quadratic dependency on `n_features * n_classes` + because it explicitly computes the full Hessian matrix. + + .. warning:: + The choice of the algorithm depends on the penalty chosen and on + (multinomial) multiclass support: + + ================= ============================== ====================== + solver penalty multinomial multiclass + ================= ============================== ====================== + 'lbfgs' 'l2', None yes + 'liblinear' 'l1', 'l2' no + 'newton-cg' 'l2', None yes + 'newton-cholesky' 'l2', None no + 'sag' 'l2', None yes + 'saga' 'elasticnet', 'l1', 'l2', None yes + ================= ============================== ====================== + + .. note:: + 'sag' and 'saga' fast convergence is only guaranteed on features + with approximately the same scale. You can preprocess the data with + a scaler from :mod:`sklearn.preprocessing`. + + .. seealso:: + Refer to the :ref:`User Guide ` for more + information regarding :class:`LogisticRegression` and more specifically the + :ref:`Table ` + summarizing solver/penalty supports. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + .. versionchanged:: 0.22 + The default solver changed from 'liblinear' to 'lbfgs' in 0.22. + .. versionadded:: 1.2 + newton-cholesky solver. + + max_iter : int, default=100 + Maximum number of iterations taken for the solvers to converge. + + multi_class : {'auto', 'ovr', 'multinomial'}, default='auto' + If the option chosen is 'ovr', then a binary problem is fit for each + label. For 'multinomial' the loss minimised is the multinomial loss fit + across the entire probability distribution, *even when the data is + binary*. 'multinomial' is unavailable when solver='liblinear'. + 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', + and otherwise selects 'multinomial'. + + .. versionadded:: 0.18 + Stochastic Average Gradient descent solver for 'multinomial' case. + .. versionchanged:: 0.22 + Default changed from 'ovr' to 'auto' in 0.22. + .. deprecated:: 1.5 + ``multi_class`` was deprecated in version 1.5 and will be removed in 1.7. + From then on, the recommended 'multinomial' will always be used for + `n_classes >= 3`. + Solvers that do not support 'multinomial' will raise an error. + Use `sklearn.multiclass.OneVsRestClassifier(LogisticRegression())` if you + still want to use OvR. + + verbose : int, default=0 + For the liblinear and lbfgs solvers set verbose to any positive + number for verbosity. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + Useless for liblinear solver. See :term:`the Glossary `. + + .. versionadded:: 0.17 + *warm_start* to support *lbfgs*, *newton-cg*, *sag*, *saga* solvers. + + n_jobs : int, default=None + Number of CPU cores used when parallelizing over classes if + multi_class='ovr'". This parameter is ignored when the ``solver`` is + set to 'liblinear' regardless of whether 'multi_class' is specified or + not. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` + context. ``-1`` means using all processors. + See :term:`Glossary ` for more details. + + l1_ratio : float, default=None + The Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. Only + used if ``penalty='elasticnet'``. Setting ``l1_ratio=0`` is equivalent + to using ``penalty='l2'``, while setting ``l1_ratio=1`` is equivalent + to using ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a + combination of L1 and L2. + + Attributes + ---------- + + classes_ : ndarray of shape (n_classes, ) + A list of class labels known to the classifier. + + coef_ : ndarray of shape (1, n_features) or (n_classes, n_features) + Coefficient of the features in the decision function. + + `coef_` is of shape (1, n_features) when the given problem is binary. + In particular, when `multi_class='multinomial'`, `coef_` corresponds + to outcome 1 (True) and `-coef_` corresponds to outcome 0 (False). + + intercept_ : ndarray of shape (1,) or (n_classes,) + Intercept (a.k.a. bias) added to the decision function. + + If `fit_intercept` is set to False, the intercept is set to zero. + `intercept_` is of shape (1,) when the given problem is binary. + In particular, when `multi_class='multinomial'`, `intercept_` + corresponds to outcome 1 (True) and `-intercept_` corresponds to + outcome 0 (False). + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + n_iter_ : ndarray of shape (n_classes,) or (1, ) + Actual number of iterations for all classes. If binary or multinomial, + it returns only 1 element. For liblinear solver, only the maximum + number of iteration across all classes is given. + + .. versionchanged:: 0.20 + + In SciPy <= 1.0.0 the number of lbfgs iterations may exceed + ``max_iter``. ``n_iter_`` will now report at most ``max_iter``. + + See Also + -------- + SGDClassifier : Incrementally trained logistic regression (when given + the parameter ``loss="log_loss"``). + LogisticRegressionCV : Logistic regression with built-in cross validation. + + Notes + ----- + The underlying C implementation uses a random number generator to + select features when fitting the model. It is thus not uncommon, + to have slightly different results for the same input data. If + that happens, try with a smaller tol parameter. + + Predict output may not match that of standalone liblinear in certain + cases. See :ref:`differences from liblinear ` + in the narrative documentation. + + References + ---------- + + L-BFGS-B -- Software for Large-scale Bound-constrained Optimization + Ciyou Zhu, Richard Byrd, Jorge Nocedal and Jose Luis Morales. + http://users.iems.northwestern.edu/~nocedal/lbfgsb.html + + LIBLINEAR -- A Library for Large Linear Classification + https://www.csie.ntu.edu.tw/~cjlin/liblinear/ + + SAG -- Mark Schmidt, Nicolas Le Roux, and Francis Bach + Minimizing Finite Sums with the Stochastic Average Gradient + https://hal.inria.fr/hal-00860051/document + + SAGA -- Defazio, A., Bach F. & Lacoste-Julien S. (2014). + :arxiv:`"SAGA: A Fast Incremental Gradient Method With Support + for Non-Strongly Convex Composite Objectives" <1407.0202>` + + Hsiang-Fu Yu, Fang-Lan Huang, Chih-Jen Lin (2011). Dual coordinate descent + methods for logistic regression and maximum entropy models. + Machine Learning 85(1-2):41-75. + https://www.csie.ntu.edu.tw/~cjlin/papers/maxent_dual.pdf + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegression + >>> X, y = load_iris(return_X_y=True) + >>> clf = LogisticRegression(random_state=0).fit(X, y) + >>> clf.predict(X[:2, :]) + array([0, 0]) + >>> clf.predict_proba(X[:2, :]) + array([[9.8...e-01, 1.8...e-02, 1.4...e-08], + [9.7...e-01, 2.8...e-02, ...e-08]]) + >>> clf.score(X, y) + 0.97... + + For a comaprison of the LogisticRegression with other classifiers see: + :ref:`sphx_glr_auto_examples_classification_plot_classification_probability.py`. + """ + + _parameter_constraints: dict = { + "penalty": [StrOptions({"l1", "l2", "elasticnet"}), None], + "dual": ["boolean"], + "tol": [Interval(Real, 0, None, closed="left")], + "C": [Interval(Real, 0, None, closed="right")], + "fit_intercept": ["boolean"], + "intercept_scaling": [Interval(Real, 0, None, closed="neither")], + "class_weight": [dict, StrOptions({"balanced"}), None], + "random_state": ["random_state"], + "solver": [ + StrOptions( + {"lbfgs", "liblinear", "newton-cg", "newton-cholesky", "sag", "saga"} + ) + ], + "max_iter": [Interval(Integral, 0, None, closed="left")], + "verbose": ["verbose"], + "warm_start": ["boolean"], + "n_jobs": [None, Integral], + "l1_ratio": [Interval(Real, 0, 1, closed="both"), None], + "multi_class": [ + StrOptions({"auto", "ovr", "multinomial"}), + Hidden(StrOptions({"deprecated"})), + ], + } + + def __init__( + self, + penalty="l2", + *, + dual=False, + tol=1e-4, + C=1.0, + fit_intercept=True, + intercept_scaling=1, + class_weight=None, + random_state=None, + solver="lbfgs", + max_iter=100, + multi_class="deprecated", + verbose=0, + warm_start=False, + n_jobs=None, + l1_ratio=None, + ): + self.penalty = penalty + self.dual = dual + self.tol = tol + self.C = C + self.fit_intercept = fit_intercept + self.intercept_scaling = intercept_scaling + self.class_weight = class_weight + self.random_state = random_state + self.solver = solver + self.max_iter = max_iter + self.multi_class = multi_class + self.verbose = verbose + self.warm_start = warm_start + self.n_jobs = n_jobs + self.l1_ratio = l1_ratio + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None): + """ + Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target vector relative to X. + + sample_weight : array-like of shape (n_samples,) default=None + Array of weights that are assigned to individual samples. + If not provided, then each sample is given unit weight. + + .. versionadded:: 0.17 + *sample_weight* support to LogisticRegression. + + Returns + ------- + self + Fitted estimator. + + Notes + ----- + The SAGA solver supports both float64 and float32 bit arrays. + """ + solver = _check_solver(self.solver, self.penalty, self.dual) + + if self.penalty != "elasticnet" and self.l1_ratio is not None: + warnings.warn( + "l1_ratio parameter is only used when penalty is " + "'elasticnet'. Got " + "(penalty={})".format(self.penalty) + ) + + if self.penalty == "elasticnet" and self.l1_ratio is None: + raise ValueError("l1_ratio must be specified when penalty is elasticnet.") + + if self.penalty is None: + if self.C != 1.0: # default values + warnings.warn( + "Setting penalty=None will ignore the C and l1_ratio parameters" + ) + # Note that check for l1_ratio is done right above + C_ = np.inf + penalty = "l2" + else: + C_ = self.C + penalty = self.penalty + + if solver == "lbfgs": + _dtype = np.float64 + else: + _dtype = [np.float64, np.float32] + + X, y = validate_data( + self, + X, + y, + accept_sparse="csr", + dtype=_dtype, + order="C", + accept_large_sparse=solver not in ["liblinear", "sag", "saga"], + ) + check_classification_targets(y) + self.classes_ = np.unique(y) + + # TODO(1.7) remove multi_class + multi_class = self.multi_class + if self.multi_class == "multinomial" and len(self.classes_) == 2: + warnings.warn( + ( + "'multi_class' was deprecated in version 1.5 and will be removed in" + " 1.7. From then on, binary problems will be fit as proper binary " + " logistic regression models (as if multi_class='ovr' were set)." + " Leave it to its default value to avoid this warning." + ), + FutureWarning, + ) + elif self.multi_class in ("multinomial", "auto"): + warnings.warn( + ( + "'multi_class' was deprecated in version 1.5 and will be removed in" + " 1.7. From then on, it will always use 'multinomial'." + " Leave it to its default value to avoid this warning." + ), + FutureWarning, + ) + elif self.multi_class == "ovr": + warnings.warn( + ( + "'multi_class' was deprecated in version 1.5 and will be removed in" + " 1.7. Use OneVsRestClassifier(LogisticRegression(..)) instead." + " Leave it to its default value to avoid this warning." + ), + FutureWarning, + ) + else: + # Set to old default value. + multi_class = "auto" + multi_class = _check_multi_class(multi_class, solver, len(self.classes_)) + + if solver == "liblinear": + if effective_n_jobs(self.n_jobs) != 1: + warnings.warn( + "'n_jobs' > 1 does not have any effect when" + " 'solver' is set to 'liblinear'. Got 'n_jobs'" + " = {}.".format(effective_n_jobs(self.n_jobs)) + ) + self.coef_, self.intercept_, self.n_iter_ = _fit_liblinear( + X, + y, + self.C, + self.fit_intercept, + self.intercept_scaling, + self.class_weight, + self.penalty, + self.dual, + self.verbose, + self.max_iter, + self.tol, + self.random_state, + sample_weight=sample_weight, + ) + return self + + if solver in ["sag", "saga"]: + max_squared_sum = row_norms(X, squared=True).max() + else: + max_squared_sum = None + + n_classes = len(self.classes_) + classes_ = self.classes_ + if n_classes < 2: + raise ValueError( + "This solver needs samples of at least 2 classes" + " in the data, but the data contains only one" + " class: %r" % classes_[0] + ) + + if len(self.classes_) == 2: + n_classes = 1 + classes_ = classes_[1:] + + if self.warm_start: + warm_start_coef = getattr(self, "coef_", None) + else: + warm_start_coef = None + if warm_start_coef is not None and self.fit_intercept: + warm_start_coef = np.append( + warm_start_coef, self.intercept_[:, np.newaxis], axis=1 + ) + + # Hack so that we iterate only once for the multinomial case. + if multi_class == "multinomial": + classes_ = [None] + warm_start_coef = [warm_start_coef] + if warm_start_coef is None: + warm_start_coef = [None] * n_classes + + path_func = delayed(_logistic_regression_path) + + # The SAG solver releases the GIL so it's more efficient to use + # threads for this solver. + if solver in ["sag", "saga"]: + prefer = "threads" + else: + prefer = "processes" + + # TODO: Refactor this to avoid joblib parallelism entirely when doing binary + # and multinomial multiclass classification and use joblib only for the + # one-vs-rest multiclass case. + if ( + solver in ["lbfgs", "newton-cg", "newton-cholesky"] + and len(classes_) == 1 + and effective_n_jobs(self.n_jobs) == 1 + ): + # In the future, we would like n_threads = _openmp_effective_n_threads() + # For the time being, we just do + n_threads = 1 + else: + n_threads = 1 + + fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)( + path_func( + X, + y, + pos_class=class_, + Cs=[C_], + l1_ratio=self.l1_ratio, + fit_intercept=self.fit_intercept, + tol=self.tol, + verbose=self.verbose, + solver=solver, + multi_class=multi_class, + max_iter=self.max_iter, + class_weight=self.class_weight, + check_input=False, + random_state=self.random_state, + coef=warm_start_coef_, + penalty=penalty, + max_squared_sum=max_squared_sum, + sample_weight=sample_weight, + n_threads=n_threads, + ) + for class_, warm_start_coef_ in zip(classes_, warm_start_coef) + ) + + fold_coefs_, _, n_iter_ = zip(*fold_coefs_) + self.n_iter_ = np.asarray(n_iter_, dtype=np.int32)[:, 0] + + n_features = X.shape[1] + if multi_class == "multinomial": + self.coef_ = fold_coefs_[0][0] + else: + self.coef_ = np.asarray(fold_coefs_) + self.coef_ = self.coef_.reshape( + n_classes, n_features + int(self.fit_intercept) + ) + + if self.fit_intercept: + self.intercept_ = self.coef_[:, -1] + self.coef_ = self.coef_[:, :-1] + else: + self.intercept_ = np.zeros(n_classes) + + return self + + def predict_proba(self, X): + """ + Probability estimates. + + The returned estimates for all classes are ordered by the + label of classes. + + For a multi_class problem, if multi_class is set to be "multinomial" + the softmax function is used to find the predicted probability of + each class. + Else use a one-vs-rest approach, i.e. calculate the probability + of each class assuming it to be positive using the logistic function + and normalize these values across all the classes. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Vector to be scored, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + T : array-like of shape (n_samples, n_classes) + Returns the probability of the sample for each class in the model, + where classes are ordered as they are in ``self.classes_``. + """ + check_is_fitted(self) + + ovr = self.multi_class in ["ovr", "warn"] or ( + self.multi_class in ["auto", "deprecated"] + and (self.classes_.size <= 2 or self.solver == "liblinear") + ) + if ovr: + return super()._predict_proba_lr(X) + else: + decision = self.decision_function(X) + if decision.ndim == 1: + # Workaround for multi_class="multinomial" and binary outcomes + # which requires softmax prediction with only a 1D decision. + decision_2d = np.c_[-decision, decision] + else: + decision_2d = decision + return softmax(decision_2d, copy=False) + + def predict_log_proba(self, X): + """ + Predict logarithm of probability estimates. + + The returned estimates for all classes are ordered by the + label of classes. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Vector to be scored, where `n_samples` is the number of samples and + `n_features` is the number of features. + + Returns + ------- + T : array-like of shape (n_samples, n_classes) + Returns the log-probability of the sample for each class in the + model, where classes are ordered as they are in ``self.classes_``. + """ + return np.log(self.predict_proba(X)) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags + + +class LogisticRegressionCV(LogisticRegression, LinearClassifierMixin, BaseEstimator): + """Logistic Regression CV (aka logit, MaxEnt) classifier. + + See glossary entry for :term:`cross-validation estimator`. + + This class implements logistic regression using liblinear, newton-cg, sag + or lbfgs optimizer. The newton-cg, sag and lbfgs solvers support only L2 + regularization with primal formulation. The liblinear solver supports both + L1 and L2 regularization, with a dual formulation only for the L2 penalty. + Elastic-Net penalty is only supported by the saga solver. + + For the grid of `Cs` values and `l1_ratios` values, the best hyperparameter + is selected by the cross-validator + :class:`~sklearn.model_selection.StratifiedKFold`, but it can be changed + using the :term:`cv` parameter. The 'newton-cg', 'sag', 'saga' and 'lbfgs' + solvers can warm-start the coefficients (see :term:`Glossary`). + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + Cs : int or list of floats, default=10 + Each of the values in Cs describes the inverse of regularization + strength. If Cs is as an int, then a grid of Cs values are chosen + in a logarithmic scale between 1e-4 and 1e4. + Like in support vector machines, smaller values specify stronger + regularization. + + fit_intercept : bool, default=True + Specifies if a constant (a.k.a. bias or intercept) should be + added to the decision function. + + cv : int or cross-validation generator, default=None + The default cross-validation generator used is Stratified K-Folds. + If an integer is provided, then it is the number of folds used. + See the module :mod:`sklearn.model_selection` module for the + list of possible cross-validation objects. + + .. versionchanged:: 0.22 + ``cv`` default value if None changed from 3-fold to 5-fold. + + dual : bool, default=False + Dual (constrained) or primal (regularized, see also + :ref:`this equation `) formulation. Dual formulation + is only implemented for l2 penalty with liblinear solver. Prefer dual=False when + n_samples > n_features. + + penalty : {'l1', 'l2', 'elasticnet'}, default='l2' + Specify the norm of the penalty: + + - `'l2'`: add a L2 penalty term (used by default); + - `'l1'`: add a L1 penalty term; + - `'elasticnet'`: both L1 and L2 penalty terms are added. + + .. warning:: + Some penalties may not work with some solvers. See the parameter + `solver` below, to know the compatibility between the penalty and + solver. + + scoring : str or callable, default=None + A string (see :ref:`scoring_parameter`) or + a scorer callable object / function with signature + ``scorer(estimator, X, y)``. For a list of scoring functions + that can be used, look at :mod:`sklearn.metrics`. The + default scoring option used is 'accuracy'. + + solver : {'lbfgs', 'liblinear', 'newton-cg', 'newton-cholesky', 'sag', 'saga'}, \ + default='lbfgs' + + Algorithm to use in the optimization problem. Default is 'lbfgs'. + To choose a solver, you might want to consider the following aspects: + + - For small datasets, 'liblinear' is a good choice, whereas 'sag' + and 'saga' are faster for large ones; + - For multiclass problems, all solvers except 'liblinear' minimize the full + multinomial loss; + - 'liblinear' might be slower in :class:`LogisticRegressionCV` + because it does not handle warm-starting. + - 'liblinear' can only handle binary classification by default. To apply a + one-versus-rest scheme for the multiclass setting one can wrap it with the + :class:`~sklearn.multiclass.OneVsRestClassifier`. + - 'newton-cholesky' is a good choice for + `n_samples` >> `n_features * n_classes`, especially with one-hot encoded + categorical features with rare categories. Be aware that the memory usage + of this solver has a quadratic dependency on `n_features * n_classes` + because it explicitly computes the full Hessian matrix. + + .. warning:: + The choice of the algorithm depends on the penalty chosen and on + (multinomial) multiclass support: + + ================= ============================== ====================== + solver penalty multinomial multiclass + ================= ============================== ====================== + 'lbfgs' 'l2' yes + 'liblinear' 'l1', 'l2' no + 'newton-cg' 'l2' yes + 'newton-cholesky' 'l2', no + 'sag' 'l2', yes + 'saga' 'elasticnet', 'l1', 'l2' yes + ================= ============================== ====================== + + .. note:: + 'sag' and 'saga' fast convergence is only guaranteed on features + with approximately the same scale. You can preprocess the data with + a scaler from :mod:`sklearn.preprocessing`. + + .. versionadded:: 0.17 + Stochastic Average Gradient descent solver. + .. versionadded:: 0.19 + SAGA solver. + .. versionadded:: 1.2 + newton-cholesky solver. + + tol : float, default=1e-4 + Tolerance for stopping criteria. + + max_iter : int, default=100 + Maximum number of iterations of the optimization algorithm. + + class_weight : dict or 'balanced', default=None + Weights associated with classes in the form ``{class_label: weight}``. + If not given, all classes are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + Note that these weights will be multiplied with sample_weight (passed + through the fit method) if sample_weight is specified. + + .. versionadded:: 0.17 + class_weight == 'balanced' + + n_jobs : int, default=None + Number of CPU cores used during the cross-validation loop. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + verbose : int, default=0 + For the 'liblinear', 'sag' and 'lbfgs' solvers set verbose to any + positive number for verbosity. + + refit : bool, default=True + If set to True, the scores are averaged across all folds, and the + coefs and the C that corresponds to the best score is taken, and a + final refit is done using these parameters. + Otherwise the coefs, intercepts and C that correspond to the + best scores across folds are averaged. + + intercept_scaling : float, default=1 + Useful only when the solver 'liblinear' is used + and self.fit_intercept is set to True. In this case, x becomes + [x, self.intercept_scaling], + i.e. a "synthetic" feature with constant value equal to + intercept_scaling is appended to the instance vector. + The intercept becomes ``intercept_scaling * synthetic_feature_weight``. + + Note! the synthetic feature weight is subject to l1/l2 regularization + as all other features. + To lessen the effect of regularization on synthetic feature weight + (and therefore on the intercept) intercept_scaling has to be increased. + + multi_class : {'auto, 'ovr', 'multinomial'}, default='auto' + If the option chosen is 'ovr', then a binary problem is fit for each + label. For 'multinomial' the loss minimised is the multinomial loss fit + across the entire probability distribution, *even when the data is + binary*. 'multinomial' is unavailable when solver='liblinear'. + 'auto' selects 'ovr' if the data is binary, or if solver='liblinear', + and otherwise selects 'multinomial'. + + .. versionadded:: 0.18 + Stochastic Average Gradient descent solver for 'multinomial' case. + .. versionchanged:: 0.22 + Default changed from 'ovr' to 'auto' in 0.22. + .. deprecated:: 1.5 + ``multi_class`` was deprecated in version 1.5 and will be removed in 1.7. + From then on, the recommended 'multinomial' will always be used for + `n_classes >= 3`. + Solvers that do not support 'multinomial' will raise an error. + Use `sklearn.multiclass.OneVsRestClassifier(LogisticRegressionCV())` if you + still want to use OvR. + + random_state : int, RandomState instance, default=None + Used when `solver='sag'`, 'saga' or 'liblinear' to shuffle the data. + Note that this only applies to the solver and not the cross-validation + generator. See :term:`Glossary ` for details. + + l1_ratios : list of float, default=None + The list of Elastic-Net mixing parameter, with ``0 <= l1_ratio <= 1``. + Only used if ``penalty='elasticnet'``. A value of 0 is equivalent to + using ``penalty='l2'``, while 1 is equivalent to using + ``penalty='l1'``. For ``0 < l1_ratio <1``, the penalty is a combination + of L1 and L2. + + Attributes + ---------- + classes_ : ndarray of shape (n_classes, ) + A list of class labels known to the classifier. + + coef_ : ndarray of shape (1, n_features) or (n_classes, n_features) + Coefficient of the features in the decision function. + + `coef_` is of shape (1, n_features) when the given problem + is binary. + + intercept_ : ndarray of shape (1,) or (n_classes,) + Intercept (a.k.a. bias) added to the decision function. + + If `fit_intercept` is set to False, the intercept is set to zero. + `intercept_` is of shape(1,) when the problem is binary. + + Cs_ : ndarray of shape (n_cs) + Array of C i.e. inverse of regularization parameter values used + for cross-validation. + + l1_ratios_ : ndarray of shape (n_l1_ratios) + Array of l1_ratios used for cross-validation. If no l1_ratio is used + (i.e. penalty is not 'elasticnet'), this is set to ``[None]`` + + coefs_paths_ : ndarray of shape (n_folds, n_cs, n_features) or \ + (n_folds, n_cs, n_features + 1) + dict with classes as the keys, and the path of coefficients obtained + during cross-validating across each fold and then across each Cs + after doing an OvR for the corresponding class as values. + If the 'multi_class' option is set to 'multinomial', then + the coefs_paths are the coefficients corresponding to each class. + Each dict value has shape ``(n_folds, n_cs, n_features)`` or + ``(n_folds, n_cs, n_features + 1)`` depending on whether the + intercept is fit or not. If ``penalty='elasticnet'``, the shape is + ``(n_folds, n_cs, n_l1_ratios_, n_features)`` or + ``(n_folds, n_cs, n_l1_ratios_, n_features + 1)``. + + scores_ : dict + dict with classes as the keys, and the values as the + grid of scores obtained during cross-validating each fold, after doing + an OvR for the corresponding class. If the 'multi_class' option + given is 'multinomial' then the same scores are repeated across + all classes, since this is the multinomial class. Each dict value + has shape ``(n_folds, n_cs)`` or ``(n_folds, n_cs, n_l1_ratios)`` if + ``penalty='elasticnet'``. + + C_ : ndarray of shape (n_classes,) or (n_classes - 1,) + Array of C that maps to the best scores across every class. If refit is + set to False, then for each class, the best C is the average of the + C's that correspond to the best scores for each fold. + `C_` is of shape(n_classes,) when the problem is binary. + + l1_ratio_ : ndarray of shape (n_classes,) or (n_classes - 1,) + Array of l1_ratio that maps to the best scores across every class. If + refit is set to False, then for each class, the best l1_ratio is the + average of the l1_ratio's that correspond to the best scores for each + fold. `l1_ratio_` is of shape(n_classes,) when the problem is binary. + + n_iter_ : ndarray of shape (n_classes, n_folds, n_cs) or (1, n_folds, n_cs) + Actual number of iterations for all classes, folds and Cs. + In the binary or multinomial cases, the first dimension is equal to 1. + If ``penalty='elasticnet'``, the shape is ``(n_classes, n_folds, + n_cs, n_l1_ratios)`` or ``(1, n_folds, n_cs, n_l1_ratios)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + LogisticRegression : Logistic regression without tuning the + hyperparameter `C`. + + Examples + -------- + >>> from sklearn.datasets import load_iris + >>> from sklearn.linear_model import LogisticRegressionCV + >>> X, y = load_iris(return_X_y=True) + >>> clf = LogisticRegressionCV(cv=5, random_state=0).fit(X, y) + >>> clf.predict(X[:2, :]) + array([0, 0]) + >>> clf.predict_proba(X[:2, :]).shape + (2, 3) + >>> clf.score(X, y) + 0.98... + """ + + _parameter_constraints: dict = {**LogisticRegression._parameter_constraints} + + for param in ["C", "warm_start", "l1_ratio"]: + _parameter_constraints.pop(param) + + _parameter_constraints.update( + { + "Cs": [Interval(Integral, 1, None, closed="left"), "array-like"], + "cv": ["cv_object"], + "scoring": [StrOptions(set(get_scorer_names())), callable, None], + "l1_ratios": ["array-like", None], + "refit": ["boolean"], + "penalty": [StrOptions({"l1", "l2", "elasticnet"})], + } + ) + + def __init__( + self, + *, + Cs=10, + fit_intercept=True, + cv=None, + dual=False, + penalty="l2", + scoring=None, + solver="lbfgs", + tol=1e-4, + max_iter=100, + class_weight=None, + n_jobs=None, + verbose=0, + refit=True, + intercept_scaling=1.0, + multi_class="deprecated", + random_state=None, + l1_ratios=None, + ): + self.Cs = Cs + self.fit_intercept = fit_intercept + self.cv = cv + self.dual = dual + self.penalty = penalty + self.scoring = scoring + self.tol = tol + self.max_iter = max_iter + self.class_weight = class_weight + self.n_jobs = n_jobs + self.verbose = verbose + self.solver = solver + self.refit = refit + self.intercept_scaling = intercept_scaling + self.multi_class = multi_class + self.random_state = random_state + self.l1_ratios = l1_ratios + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, sample_weight=None, **params): + """Fit the model according to the given training data. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Training vector, where `n_samples` is the number of samples and + `n_features` is the number of features. + + y : array-like of shape (n_samples,) + Target vector relative to X. + + sample_weight : array-like of shape (n_samples,) default=None + Array of weights that are assigned to individual samples. + If not provided, then each sample is given unit weight. + + **params : dict + Parameters to pass to the underlying splitter and scorer. + + .. versionadded:: 1.4 + + Returns + ------- + self : object + Fitted LogisticRegressionCV estimator. + """ + _raise_for_params(params, self, "fit") + + solver = _check_solver(self.solver, self.penalty, self.dual) + + if self.penalty == "elasticnet": + if ( + self.l1_ratios is None + or len(self.l1_ratios) == 0 + or any( + ( + not isinstance(l1_ratio, numbers.Number) + or l1_ratio < 0 + or l1_ratio > 1 + ) + for l1_ratio in self.l1_ratios + ) + ): + raise ValueError( + "l1_ratios must be a list of numbers between " + "0 and 1; got (l1_ratios=%r)" % self.l1_ratios + ) + l1_ratios_ = self.l1_ratios + else: + if self.l1_ratios is not None: + warnings.warn( + "l1_ratios parameter is only used when penalty " + "is 'elasticnet'. Got (penalty={})".format(self.penalty) + ) + + l1_ratios_ = [None] + + X, y = validate_data( + self, + X, + y, + accept_sparse="csr", + dtype=np.float64, + order="C", + accept_large_sparse=solver not in ["liblinear", "sag", "saga"], + ) + check_classification_targets(y) + + class_weight = self.class_weight + + # Encode for string labels + label_encoder = LabelEncoder().fit(y) + y = label_encoder.transform(y) + if isinstance(class_weight, dict): + class_weight = { + label_encoder.transform([cls])[0]: v for cls, v in class_weight.items() + } + + # The original class labels + classes = self.classes_ = label_encoder.classes_ + encoded_labels = label_encoder.transform(label_encoder.classes_) + + # TODO(1.7) remove multi_class + multi_class = self.multi_class + if self.multi_class == "multinomial" and len(self.classes_) == 2: + warnings.warn( + ( + "'multi_class' was deprecated in version 1.5 and will be removed in" + " 1.7. From then on, binary problems will be fit as proper binary " + " logistic regression models (as if multi_class='ovr' were set)." + " Leave it to its default value to avoid this warning." + ), + FutureWarning, + ) + elif self.multi_class in ("multinomial", "auto"): + warnings.warn( + ( + "'multi_class' was deprecated in version 1.5 and will be removed in" + " 1.7. From then on, it will always use 'multinomial'." + " Leave it to its default value to avoid this warning." + ), + FutureWarning, + ) + elif self.multi_class == "ovr": + warnings.warn( + ( + "'multi_class' was deprecated in version 1.5 and will be removed in" + " 1.7. Use OneVsRestClassifier(LogisticRegressionCV(..)) instead." + " Leave it to its default value to avoid this warning." + ), + FutureWarning, + ) + else: + # Set to old default value. + multi_class = "auto" + multi_class = _check_multi_class(multi_class, solver, len(classes)) + + if solver in ["sag", "saga"]: + max_squared_sum = row_norms(X, squared=True).max() + else: + max_squared_sum = None + + if _routing_enabled(): + routed_params = process_routing( + self, + "fit", + sample_weight=sample_weight, + **params, + ) + else: + routed_params = Bunch() + routed_params.splitter = Bunch(split={}) + routed_params.scorer = Bunch(score=params) + if sample_weight is not None: + routed_params.scorer.score["sample_weight"] = sample_weight + + # init cross-validation generator + cv = check_cv(self.cv, y, classifier=True) + folds = list(cv.split(X, y, **routed_params.splitter.split)) + + # Use the label encoded classes + n_classes = len(encoded_labels) + + if n_classes < 2: + raise ValueError( + "This solver needs samples of at least 2 classes" + " in the data, but the data contains only one" + " class: %r" % classes[0] + ) + + if n_classes == 2: + # OvR in case of binary problems is as good as fitting + # the higher label + n_classes = 1 + encoded_labels = encoded_labels[1:] + classes = classes[1:] + + # We need this hack to iterate only once over labels, in the case of + # multi_class = multinomial, without changing the value of the labels. + if multi_class == "multinomial": + iter_encoded_labels = iter_classes = [None] + else: + iter_encoded_labels = encoded_labels + iter_classes = classes + + # compute the class weights for the entire dataset y + if class_weight == "balanced": + class_weight = compute_class_weight( + class_weight, classes=np.arange(len(self.classes_)), y=y + ) + class_weight = dict(enumerate(class_weight)) + + path_func = delayed(_log_reg_scoring_path) + + # The SAG solver releases the GIL so it's more efficient to use + # threads for this solver. + if self.solver in ["sag", "saga"]: + prefer = "threads" + else: + prefer = "processes" + + fold_coefs_ = Parallel(n_jobs=self.n_jobs, verbose=self.verbose, prefer=prefer)( + path_func( + X, + y, + train, + test, + pos_class=label, + Cs=self.Cs, + fit_intercept=self.fit_intercept, + penalty=self.penalty, + dual=self.dual, + solver=solver, + tol=self.tol, + max_iter=self.max_iter, + verbose=self.verbose, + class_weight=class_weight, + scoring=self.scoring, + multi_class=multi_class, + intercept_scaling=self.intercept_scaling, + random_state=self.random_state, + max_squared_sum=max_squared_sum, + sample_weight=sample_weight, + l1_ratio=l1_ratio, + score_params=routed_params.scorer.score, + ) + for label in iter_encoded_labels + for train, test in folds + for l1_ratio in l1_ratios_ + ) + + # _log_reg_scoring_path will output different shapes depending on the + # multi_class param, so we need to reshape the outputs accordingly. + # Cs is of shape (n_classes . n_folds . n_l1_ratios, n_Cs) and all the + # rows are equal, so we just take the first one. + # After reshaping, + # - scores is of shape (n_classes, n_folds, n_Cs . n_l1_ratios) + # - coefs_paths is of shape + # (n_classes, n_folds, n_Cs . n_l1_ratios, n_features) + # - n_iter is of shape + # (n_classes, n_folds, n_Cs . n_l1_ratios) or + # (1, n_folds, n_Cs . n_l1_ratios) + coefs_paths, Cs, scores, n_iter_ = zip(*fold_coefs_) + self.Cs_ = Cs[0] + if multi_class == "multinomial": + coefs_paths = np.reshape( + coefs_paths, + (len(folds), len(l1_ratios_) * len(self.Cs_), n_classes, -1), + ) + # equiv to coefs_paths = np.moveaxis(coefs_paths, (0, 1, 2, 3), + # (1, 2, 0, 3)) + coefs_paths = np.swapaxes(coefs_paths, 0, 1) + coefs_paths = np.swapaxes(coefs_paths, 0, 2) + self.n_iter_ = np.reshape( + n_iter_, (1, len(folds), len(self.Cs_) * len(l1_ratios_)) + ) + # repeat same scores across all classes + scores = np.tile(scores, (n_classes, 1, 1)) + else: + coefs_paths = np.reshape( + coefs_paths, + (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_), -1), + ) + self.n_iter_ = np.reshape( + n_iter_, (n_classes, len(folds), len(self.Cs_) * len(l1_ratios_)) + ) + scores = np.reshape(scores, (n_classes, len(folds), -1)) + self.scores_ = dict(zip(classes, scores)) + self.coefs_paths_ = dict(zip(classes, coefs_paths)) + + self.C_ = list() + self.l1_ratio_ = list() + self.coef_ = np.empty((n_classes, X.shape[1])) + self.intercept_ = np.zeros(n_classes) + for index, (cls, encoded_label) in enumerate( + zip(iter_classes, iter_encoded_labels) + ): + if multi_class == "ovr": + scores = self.scores_[cls] + coefs_paths = self.coefs_paths_[cls] + else: + # For multinomial, all scores are the same across classes + scores = scores[0] + # coefs_paths will keep its original shape because + # logistic_regression_path expects it this way + + if self.refit: + # best_index is between 0 and (n_Cs . n_l1_ratios - 1) + # for example, with n_cs=2 and n_l1_ratios=3 + # the layout of scores is + # [c1, c2, c1, c2, c1, c2] + # l1_1 , l1_2 , l1_3 + best_index = scores.sum(axis=0).argmax() + + best_index_C = best_index % len(self.Cs_) + C_ = self.Cs_[best_index_C] + self.C_.append(C_) + + best_index_l1 = best_index // len(self.Cs_) + l1_ratio_ = l1_ratios_[best_index_l1] + self.l1_ratio_.append(l1_ratio_) + + if multi_class == "multinomial": + coef_init = np.mean(coefs_paths[:, :, best_index, :], axis=1) + else: + coef_init = np.mean(coefs_paths[:, best_index, :], axis=0) + + # Note that y is label encoded and hence pos_class must be + # the encoded label / None (for 'multinomial') + w, _, _ = _logistic_regression_path( + X, + y, + pos_class=encoded_label, + Cs=[C_], + solver=solver, + fit_intercept=self.fit_intercept, + coef=coef_init, + max_iter=self.max_iter, + tol=self.tol, + penalty=self.penalty, + class_weight=class_weight, + multi_class=multi_class, + verbose=max(0, self.verbose - 1), + random_state=self.random_state, + check_input=False, + max_squared_sum=max_squared_sum, + sample_weight=sample_weight, + l1_ratio=l1_ratio_, + ) + w = w[0] + + else: + # Take the best scores across every fold and the average of + # all coefficients corresponding to the best scores. + best_indices = np.argmax(scores, axis=1) + if multi_class == "ovr": + w = np.mean( + [coefs_paths[i, best_indices[i], :] for i in range(len(folds))], + axis=0, + ) + else: + w = np.mean( + [ + coefs_paths[:, i, best_indices[i], :] + for i in range(len(folds)) + ], + axis=0, + ) + + best_indices_C = best_indices % len(self.Cs_) + self.C_.append(np.mean(self.Cs_[best_indices_C])) + + if self.penalty == "elasticnet": + best_indices_l1 = best_indices // len(self.Cs_) + self.l1_ratio_.append(np.mean(l1_ratios_[best_indices_l1])) + else: + self.l1_ratio_.append(None) + + if multi_class == "multinomial": + self.C_ = np.tile(self.C_, n_classes) + self.l1_ratio_ = np.tile(self.l1_ratio_, n_classes) + self.coef_ = w[:, : X.shape[1]] + if self.fit_intercept: + self.intercept_ = w[:, -1] + else: + self.coef_[index] = w[: X.shape[1]] + if self.fit_intercept: + self.intercept_[index] = w[-1] + + self.C_ = np.asarray(self.C_) + self.l1_ratio_ = np.asarray(self.l1_ratio_) + self.l1_ratios_ = np.asarray(l1_ratios_) + # if elasticnet was used, add the l1_ratios dimension to some + # attributes + if self.l1_ratios is not None: + # with n_cs=2 and n_l1_ratios=3 + # the layout of scores is + # [c1, c2, c1, c2, c1, c2] + # l1_1 , l1_2 , l1_3 + # To get a 2d array with the following layout + # l1_1, l1_2, l1_3 + # c1 [[ . , . , . ], + # c2 [ . , . , . ]] + # We need to first reshape and then transpose. + # The same goes for the other arrays + for cls, coefs_path in self.coefs_paths_.items(): + self.coefs_paths_[cls] = coefs_path.reshape( + (len(folds), self.l1_ratios_.size, self.Cs_.size, -1) + ) + self.coefs_paths_[cls] = np.transpose( + self.coefs_paths_[cls], (0, 2, 1, 3) + ) + for cls, score in self.scores_.items(): + self.scores_[cls] = score.reshape( + (len(folds), self.l1_ratios_.size, self.Cs_.size) + ) + self.scores_[cls] = np.transpose(self.scores_[cls], (0, 2, 1)) + + self.n_iter_ = self.n_iter_.reshape( + (-1, len(folds), self.l1_ratios_.size, self.Cs_.size) + ) + self.n_iter_ = np.transpose(self.n_iter_, (0, 1, 3, 2)) + + return self + + def score(self, X, y, sample_weight=None, **score_params): + """Score using the `scoring` option on the given test data and labels. + + Parameters + ---------- + X : array-like of shape (n_samples, n_features) + Test samples. + + y : array-like of shape (n_samples,) + True labels for X. + + sample_weight : array-like of shape (n_samples,), default=None + Sample weights. + + **score_params : dict + Parameters to pass to the `score` method of the underlying scorer. + + .. versionadded:: 1.4 + + Returns + ------- + score : float + Score of self.predict(X) w.r.t. y. + """ + _raise_for_params(score_params, self, "score") + + scoring = self._get_scorer() + if _routing_enabled(): + routed_params = process_routing( + self, + "score", + sample_weight=sample_weight, + **score_params, + ) + else: + routed_params = Bunch() + routed_params.scorer = Bunch(score={}) + if sample_weight is not None: + routed_params.scorer.score["sample_weight"] = sample_weight + + return scoring( + self, + X, + y, + **routed_params.scorer.score, + ) + + def get_metadata_routing(self): + """Get metadata routing of this object. + + Please check :ref:`User Guide ` on how the routing + mechanism works. + + .. versionadded:: 1.4 + + Returns + ------- + routing : MetadataRouter + A :class:`~sklearn.utils.metadata_routing.MetadataRouter` encapsulating + routing information. + """ + + router = ( + MetadataRouter(owner=self.__class__.__name__) + .add_self_request(self) + .add( + splitter=self.cv, + method_mapping=MethodMapping().add(caller="fit", callee="split"), + ) + .add( + scorer=self._get_scorer(), + method_mapping=MethodMapping() + .add(caller="score", callee="score") + .add(caller="fit", callee="score"), + ) + ) + return router + + def _get_scorer(self): + """Get the scorer based on the scoring method specified. + The default scoring method is `accuracy`. + """ + scoring = self.scoring or "accuracy" + return get_scorer(scoring) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py new file mode 100644 index 0000000000000000000000000000000000000000..d4c196a6fc8ca1e9c37fdf32011206b4903ac340 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/linear_model/_stochastic_gradient.py @@ -0,0 +1,2650 @@ +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +"""Classification, regression and One-Class SVM using Stochastic Gradient +Descent (SGD). +""" + +import warnings +from abc import ABCMeta, abstractmethod +from numbers import Integral, Real + +import numpy as np + +from .._loss._loss import CyHalfBinomialLoss, CyHalfSquaredError, CyHuberLoss +from ..base import ( + BaseEstimator, + OutlierMixin, + RegressorMixin, + _fit_context, + clone, + is_classifier, +) +from ..exceptions import ConvergenceWarning +from ..model_selection import ShuffleSplit, StratifiedShuffleSplit +from ..utils import check_random_state, compute_class_weight +from ..utils._param_validation import Hidden, Interval, StrOptions +from ..utils.extmath import safe_sparse_dot +from ..utils.metaestimators import available_if +from ..utils.multiclass import _check_partial_fit_first_call +from ..utils.parallel import Parallel, delayed +from ..utils.validation import _check_sample_weight, check_is_fitted, validate_data +from ._base import LinearClassifierMixin, SparseCoefMixin, make_dataset +from ._sgd_fast import ( + EpsilonInsensitive, + Hinge, + ModifiedHuber, + SquaredEpsilonInsensitive, + SquaredHinge, + _plain_sgd32, + _plain_sgd64, +) + +LEARNING_RATE_TYPES = { + "constant": 1, + "optimal": 2, + "invscaling": 3, + "adaptive": 4, + "pa1": 5, + "pa2": 6, +} + +PENALTY_TYPES = {"none": 0, "l2": 2, "l1": 1, "elasticnet": 3} + +DEFAULT_EPSILON = 0.1 +# Default value of ``epsilon`` parameter. + +MAX_INT = np.iinfo(np.int32).max + + +class _ValidationScoreCallback: + """Callback for early stopping based on validation score""" + + def __init__(self, estimator, X_val, y_val, sample_weight_val, classes=None): + self.estimator = clone(estimator) + self.estimator.t_ = 1 # to pass check_is_fitted + if classes is not None: + self.estimator.classes_ = classes + self.X_val = X_val + self.y_val = y_val + self.sample_weight_val = sample_weight_val + + def __call__(self, coef, intercept): + est = self.estimator + est.coef_ = coef.reshape(1, -1) + est.intercept_ = np.atleast_1d(intercept) + return est.score(self.X_val, self.y_val, self.sample_weight_val) + + +class BaseSGD(SparseCoefMixin, BaseEstimator, metaclass=ABCMeta): + """Base class for SGD classification and regression.""" + + _parameter_constraints: dict = { + "fit_intercept": ["boolean"], + "max_iter": [Interval(Integral, 1, None, closed="left")], + "tol": [Interval(Real, 0, None, closed="left"), None], + "shuffle": ["boolean"], + "verbose": ["verbose"], + "random_state": ["random_state"], + "warm_start": ["boolean"], + "average": [Interval(Integral, 0, None, closed="left"), "boolean"], + } + + def __init__( + self, + loss, + *, + penalty="l2", + alpha=0.0001, + C=1.0, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=0.1, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=False, + average=False, + ): + self.loss = loss + self.penalty = penalty + self.learning_rate = learning_rate + self.epsilon = epsilon + self.alpha = alpha + self.C = C + self.l1_ratio = l1_ratio + self.fit_intercept = fit_intercept + self.shuffle = shuffle + self.random_state = random_state + self.verbose = verbose + self.eta0 = eta0 + self.power_t = power_t + self.early_stopping = early_stopping + self.validation_fraction = validation_fraction + self.n_iter_no_change = n_iter_no_change + self.warm_start = warm_start + self.average = average + self.max_iter = max_iter + self.tol = tol + + @abstractmethod + def fit(self, X, y): + """Fit model.""" + + def _more_validate_params(self, for_partial_fit=False): + """Validate input params.""" + if self.early_stopping and for_partial_fit: + raise ValueError("early_stopping should be False with partial_fit") + if ( + self.learning_rate in ("constant", "invscaling", "adaptive") + and self.eta0 <= 0.0 + ): + raise ValueError("eta0 must be > 0") + if self.learning_rate == "optimal" and self.alpha == 0: + raise ValueError( + "alpha must be > 0 since " + "learning_rate is 'optimal'. alpha is used " + "to compute the optimal learning rate." + ) + + # raises ValueError if not registered + self._get_penalty_type(self.penalty) + self._get_learning_rate_type(self.learning_rate) + + def _get_loss_function(self, loss): + """Get concrete ``LossFunction`` object for str ``loss``.""" + loss_ = self.loss_functions[loss] + loss_class, args = loss_[0], loss_[1:] + if loss in ("huber", "epsilon_insensitive", "squared_epsilon_insensitive"): + args = (self.epsilon,) + return loss_class(*args) + + def _get_learning_rate_type(self, learning_rate): + return LEARNING_RATE_TYPES[learning_rate] + + def _get_penalty_type(self, penalty): + penalty = str(penalty).lower() + return PENALTY_TYPES[penalty] + + def _allocate_parameter_mem( + self, + n_classes, + n_features, + input_dtype, + coef_init=None, + intercept_init=None, + one_class=0, + ): + """Allocate mem for parameters; initialize if provided.""" + if n_classes > 2: + # allocate coef_ for multi-class + if coef_init is not None: + coef_init = np.asarray(coef_init, dtype=input_dtype, order="C") + if coef_init.shape != (n_classes, n_features): + raise ValueError("Provided ``coef_`` does not match dataset. ") + self.coef_ = coef_init + else: + self.coef_ = np.zeros( + (n_classes, n_features), dtype=input_dtype, order="C" + ) + + # allocate intercept_ for multi-class + if intercept_init is not None: + intercept_init = np.asarray( + intercept_init, order="C", dtype=input_dtype + ) + if intercept_init.shape != (n_classes,): + raise ValueError("Provided intercept_init does not match dataset.") + self.intercept_ = intercept_init + else: + self.intercept_ = np.zeros(n_classes, dtype=input_dtype, order="C") + else: + # allocate coef_ + if coef_init is not None: + coef_init = np.asarray(coef_init, dtype=input_dtype, order="C") + coef_init = coef_init.ravel() + if coef_init.shape != (n_features,): + raise ValueError("Provided coef_init does not match dataset.") + self.coef_ = coef_init + else: + self.coef_ = np.zeros(n_features, dtype=input_dtype, order="C") + + # allocate intercept_ + if intercept_init is not None: + intercept_init = np.asarray(intercept_init, dtype=input_dtype) + if intercept_init.shape != (1,) and intercept_init.shape != (): + raise ValueError("Provided intercept_init does not match dataset.") + if one_class: + self.offset_ = intercept_init.reshape( + 1, + ) + else: + self.intercept_ = intercept_init.reshape( + 1, + ) + else: + if one_class: + self.offset_ = np.zeros(1, dtype=input_dtype, order="C") + else: + self.intercept_ = np.zeros(1, dtype=input_dtype, order="C") + + # initialize average parameters + if self.average > 0: + self._standard_coef = self.coef_ + self._average_coef = np.zeros( + self.coef_.shape, dtype=input_dtype, order="C" + ) + if one_class: + self._standard_intercept = 1 - self.offset_ + else: + self._standard_intercept = self.intercept_ + + self._average_intercept = np.zeros( + self._standard_intercept.shape, dtype=input_dtype, order="C" + ) + + def _make_validation_split(self, y, sample_mask): + """Split the dataset between training set and validation set. + + Parameters + ---------- + y : ndarray of shape (n_samples, ) + Target values. + + sample_mask : ndarray of shape (n_samples, ) + A boolean array indicating whether each sample should be included + for validation set. + + Returns + ------- + validation_mask : ndarray of shape (n_samples, ) + Equal to True on the validation set, False on the training set. + """ + n_samples = y.shape[0] + validation_mask = np.zeros(n_samples, dtype=np.bool_) + if not self.early_stopping: + # use the full set for training, with an empty validation set + return validation_mask + + if is_classifier(self): + splitter_type = StratifiedShuffleSplit + else: + splitter_type = ShuffleSplit + cv = splitter_type( + test_size=self.validation_fraction, random_state=self.random_state + ) + idx_train, idx_val = next(cv.split(np.zeros(shape=(y.shape[0], 1)), y)) + + if not np.any(sample_mask[idx_val]): + raise ValueError( + "The sample weights for validation set are all zero, consider using a" + " different random state." + ) + + if idx_train.shape[0] == 0 or idx_val.shape[0] == 0: + raise ValueError( + "Splitting %d samples into a train set and a validation set " + "with validation_fraction=%r led to an empty set (%d and %d " + "samples). Please either change validation_fraction, increase " + "number of samples, or disable early_stopping." + % ( + n_samples, + self.validation_fraction, + idx_train.shape[0], + idx_val.shape[0], + ) + ) + + validation_mask[idx_val] = True + return validation_mask + + def _make_validation_score_cb( + self, validation_mask, X, y, sample_weight, classes=None + ): + if not self.early_stopping: + return None + + return _ValidationScoreCallback( + self, + X[validation_mask], + y[validation_mask], + sample_weight[validation_mask], + classes=classes, + ) + + +def _prepare_fit_binary(est, y, i, input_dtype, label_encode=True): + """Initialization for fit_binary. + + Returns y, coef, intercept, average_coef, average_intercept. + """ + y_i = np.ones(y.shape, dtype=input_dtype, order="C") + if label_encode: + # y in {0, 1} + y_i[y != est.classes_[i]] = 0.0 + else: + # y in {-1, +1} + y_i[y != est.classes_[i]] = -1.0 + average_intercept = 0 + average_coef = None + + if len(est.classes_) == 2: + if not est.average: + coef = est.coef_.ravel() + intercept = est.intercept_[0] + else: + coef = est._standard_coef.ravel() + intercept = est._standard_intercept[0] + average_coef = est._average_coef.ravel() + average_intercept = est._average_intercept[0] + else: + if not est.average: + coef = est.coef_[i] + intercept = est.intercept_[i] + else: + coef = est._standard_coef[i] + intercept = est._standard_intercept[i] + average_coef = est._average_coef[i] + average_intercept = est._average_intercept[i] + + return y_i, coef, intercept, average_coef, average_intercept + + +def fit_binary( + est, + i, + X, + y, + alpha, + C, + learning_rate, + max_iter, + pos_weight, + neg_weight, + sample_weight, + validation_mask=None, + random_state=None, +): + """Fit a single binary classifier. + + The i'th class is considered the "positive" class. + + Parameters + ---------- + est : Estimator object + The estimator to fit + + i : int + Index of the positive class + + X : numpy array or sparse matrix of shape [n_samples,n_features] + Training data + + y : numpy array of shape [n_samples, ] + Target values + + alpha : float + The regularization parameter + + C : float + Maximum step size for passive aggressive + + learning_rate : str + The learning rate. Accepted values are 'constant', 'optimal', + 'invscaling', 'pa1' and 'pa2'. + + max_iter : int + The maximum number of iterations (epochs) + + pos_weight : float + The weight of the positive class + + neg_weight : float + The weight of the negative class + + sample_weight : numpy array of shape [n_samples, ] + The weight of each sample + + validation_mask : numpy array of shape [n_samples, ], default=None + Precomputed validation mask in case _fit_binary is called in the + context of a one-vs-rest reduction. + + random_state : int, RandomState instance, default=None + If int, random_state is the seed used by the random number generator; + If RandomState instance, random_state is the random number generator; + If None, the random number generator is the RandomState instance used + by `np.random`. + """ + # if average is not true, average_coef, and average_intercept will be + # unused + label_encode = isinstance(est._loss_function_, CyHalfBinomialLoss) + y_i, coef, intercept, average_coef, average_intercept = _prepare_fit_binary( + est, y, i, input_dtype=X.dtype, label_encode=label_encode + ) + assert y_i.shape[0] == y.shape[0] == sample_weight.shape[0] + + random_state = check_random_state(random_state) + dataset, intercept_decay = make_dataset( + X, y_i, sample_weight, random_state=random_state + ) + + penalty_type = est._get_penalty_type(est.penalty) + learning_rate_type = est._get_learning_rate_type(learning_rate) + + if validation_mask is None: + validation_mask = est._make_validation_split(y_i, sample_mask=sample_weight > 0) + classes = np.array([-1, 1], dtype=y_i.dtype) + validation_score_cb = est._make_validation_score_cb( + validation_mask, X, y_i, sample_weight, classes=classes + ) + + # numpy mtrand expects a C long which is a signed 32 bit integer under + # Windows + seed = random_state.randint(MAX_INT) + + tol = est.tol if est.tol is not None else -np.inf + + _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype) + coef, intercept, average_coef, average_intercept, n_iter_ = _plain_sgd( + coef, + intercept, + average_coef, + average_intercept, + est._loss_function_, + penalty_type, + alpha, + C, + est.l1_ratio, + dataset, + validation_mask, + est.early_stopping, + validation_score_cb, + int(est.n_iter_no_change), + max_iter, + tol, + int(est.fit_intercept), + int(est.verbose), + int(est.shuffle), + seed, + pos_weight, + neg_weight, + learning_rate_type, + est.eta0, + est.power_t, + 0, + est.t_, + intercept_decay, + est.average, + ) + + if est.average: + if len(est.classes_) == 2: + est._average_intercept[0] = average_intercept + else: + est._average_intercept[i] = average_intercept + + return coef, intercept, n_iter_ + + +def _get_plain_sgd_function(input_dtype): + return _plain_sgd32 if input_dtype == np.float32 else _plain_sgd64 + + +class BaseSGDClassifier(LinearClassifierMixin, BaseSGD, metaclass=ABCMeta): + loss_functions = { + "hinge": (Hinge, 1.0), + "squared_hinge": (SquaredHinge, 1.0), + "perceptron": (Hinge, 0.0), + "log_loss": (CyHalfBinomialLoss,), + "modified_huber": (ModifiedHuber,), + "squared_error": (CyHalfSquaredError,), + "huber": (CyHuberLoss, DEFAULT_EPSILON), + "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), + "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), + } + + _parameter_constraints: dict = { + **BaseSGD._parameter_constraints, + "loss": [StrOptions(set(loss_functions))], + "early_stopping": ["boolean"], + "validation_fraction": [Interval(Real, 0, 1, closed="neither")], + "n_iter_no_change": [Interval(Integral, 1, None, closed="left")], + "n_jobs": [Integral, None], + "class_weight": [StrOptions({"balanced"}), dict, None], + } + + @abstractmethod + def __init__( + self, + loss="hinge", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + n_jobs=None, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + class_weight=None, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + warm_start=warm_start, + average=average, + ) + self.class_weight = class_weight + self.n_jobs = n_jobs + + def _partial_fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + max_iter, + classes, + sample_weight, + coef_init, + intercept_init, + ): + first_call = not hasattr(self, "classes_") + X, y = validate_data( + self, + X, + y, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + reset=first_call, + ) + + if first_call: + # TODO(1.7) remove 0 from average parameter constraint + if not isinstance(self.average, (bool, np.bool_)) and self.average == 0: + warnings.warn( + ( + "Passing average=0 to disable averaging is deprecated and will" + " be removed in 1.7. Please use average=False instead." + ), + FutureWarning, + ) + + n_samples, n_features = X.shape + + _check_partial_fit_first_call(self, classes) + + n_classes = self.classes_.shape[0] + + # Allocate datastructures from input arguments + self._expanded_class_weight = compute_class_weight( + self.class_weight, classes=self.classes_, y=y + ) + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + if getattr(self, "coef_", None) is None or coef_init is not None: + self._allocate_parameter_mem( + n_classes=n_classes, + n_features=n_features, + input_dtype=X.dtype, + coef_init=coef_init, + intercept_init=intercept_init, + ) + elif n_features != self.coef_.shape[-1]: + raise ValueError( + "Number of features %d does not match previous data %d." + % (n_features, self.coef_.shape[-1]) + ) + + self._loss_function_ = self._get_loss_function(loss) + if not hasattr(self, "t_"): + self.t_ = 1.0 + + # delegate to concrete training procedure + if n_classes > 2: + self._fit_multiclass( + X, + y, + alpha=alpha, + C=C, + learning_rate=learning_rate, + sample_weight=sample_weight, + max_iter=max_iter, + ) + elif n_classes == 2: + self._fit_binary( + X, + y, + alpha=alpha, + C=C, + learning_rate=learning_rate, + sample_weight=sample_weight, + max_iter=max_iter, + ) + else: + raise ValueError( + "The number of classes has to be greater than one; got %d class" + % n_classes + ) + + return self + + def _fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + coef_init=None, + intercept_init=None, + sample_weight=None, + ): + if hasattr(self, "classes_"): + # delete the attribute otherwise _partial_fit thinks it's not the first call + delattr(self, "classes_") + + # TODO(1.7) remove 0 from average parameter constraint + if not isinstance(self.average, (bool, np.bool_)) and self.average == 0: + warnings.warn( + ( + "Passing average=0 to disable averaging is deprecated and will be " + "removed in 1.7. Please use average=False instead." + ), + FutureWarning, + ) + + # labels can be encoded as float, int, or string literals + # np.unique sorts in asc order; largest class id is positive class + y = validate_data(self, y=y) + classes = np.unique(y) + + if self.warm_start and hasattr(self, "coef_"): + if coef_init is None: + coef_init = self.coef_ + if intercept_init is None: + intercept_init = self.intercept_ + else: + self.coef_ = None + self.intercept_ = None + + if self.average > 0: + self._standard_coef = self.coef_ + self._standard_intercept = self.intercept_ + self._average_coef = None + self._average_intercept = None + + # Clear iteration count for multiple call to fit. + self.t_ = 1.0 + + self._partial_fit( + X, + y, + alpha, + C, + loss, + learning_rate, + self.max_iter, + classes, + sample_weight, + coef_init, + intercept_init, + ) + + if ( + self.tol is not None + and self.tol > -np.inf + and self.n_iter_ == self.max_iter + ): + warnings.warn( + ( + "Maximum number of iteration reached before " + "convergence. Consider increasing max_iter to " + "improve the fit." + ), + ConvergenceWarning, + ) + return self + + def _fit_binary(self, X, y, alpha, C, sample_weight, learning_rate, max_iter): + """Fit a binary classifier on X and y.""" + coef, intercept, n_iter_ = fit_binary( + self, + 1, + X, + y, + alpha, + C, + learning_rate, + max_iter, + self._expanded_class_weight[1], + self._expanded_class_weight[0], + sample_weight, + random_state=self.random_state, + ) + + self.t_ += n_iter_ * X.shape[0] + self.n_iter_ = n_iter_ + + # need to be 2d + if self.average > 0: + if self.average <= self.t_ - 1: + self.coef_ = self._average_coef.reshape(1, -1) + self.intercept_ = self._average_intercept + else: + self.coef_ = self._standard_coef.reshape(1, -1) + self._standard_intercept = np.atleast_1d(intercept) + self.intercept_ = self._standard_intercept + else: + self.coef_ = coef.reshape(1, -1) + # intercept is a float, need to convert it to an array of length 1 + self.intercept_ = np.atleast_1d(intercept) + + def _fit_multiclass(self, X, y, alpha, C, learning_rate, sample_weight, max_iter): + """Fit a multi-class classifier by combining binary classifiers + + Each binary classifier predicts one class versus all others. This + strategy is called OvA (One versus All) or OvR (One versus Rest). + """ + # Precompute the validation split using the multiclass labels + # to ensure proper balancing of the classes. + validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0) + + # Use joblib to fit OvA in parallel. + # Pick the random seed for each job outside of fit_binary to avoid + # sharing the estimator random state between threads which could lead + # to non-deterministic behavior + random_state = check_random_state(self.random_state) + seeds = random_state.randint(MAX_INT, size=len(self.classes_)) + result = Parallel( + n_jobs=self.n_jobs, verbose=self.verbose, require="sharedmem" + )( + delayed(fit_binary)( + self, + i, + X, + y, + alpha, + C, + learning_rate, + max_iter, + self._expanded_class_weight[i], + 1.0, + sample_weight, + validation_mask=validation_mask, + random_state=seed, + ) + for i, seed in enumerate(seeds) + ) + + # take the maximum of n_iter_ over every binary fit + n_iter_ = 0.0 + for i, (_, intercept, n_iter_i) in enumerate(result): + self.intercept_[i] = intercept + n_iter_ = max(n_iter_, n_iter_i) + + self.t_ += n_iter_ * X.shape[0] + self.n_iter_ = n_iter_ + + if self.average > 0: + if self.average <= self.t_ - 1.0: + self.coef_ = self._average_coef + self.intercept_ = self._average_intercept + else: + self.coef_ = self._standard_coef + self._standard_intercept = np.atleast_1d(self.intercept_) + self.intercept_ = self._standard_intercept + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, classes=None, sample_weight=None): + """Perform one epoch of stochastic gradient descent on given samples. + + Internally, this method uses ``max_iter = 1``. Therefore, it is not + guaranteed that a minimum of the cost function is reached after calling + it once. Matters such as objective convergence, early stopping, and + learning rate adjustments should be handled by the user. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Subset of the training data. + + y : ndarray of shape (n_samples,) + Subset of the target values. + + classes : ndarray of shape (n_classes,), default=None + Classes across all calls to partial_fit. + Can be obtained by via `np.unique(y_all)`, where y_all is the + target vector of the entire dataset. + This argument is required for the first call to partial_fit + and can be omitted in the subsequent calls. + Note that y doesn't need to contain all labels in `classes`. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples. + If not provided, uniform weights are assumed. + + Returns + ------- + self : object + Returns an instance of self. + """ + if not hasattr(self, "classes_"): + self._more_validate_params(for_partial_fit=True) + + if self.class_weight == "balanced": + raise ValueError( + "class_weight '{0}' is not supported for " + "partial_fit. In order to use 'balanced' weights," + " use compute_class_weight('{0}', " + "classes=classes, y=y). " + "In place of y you can use a large enough sample " + "of the full training set target to properly " + "estimate the class frequency distributions. " + "Pass the resulting weights as the class_weight " + "parameter.".format(self.class_weight) + ) + + return self._partial_fit( + X, + y, + alpha=self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + max_iter=1, + classes=classes, + sample_weight=sample_weight, + coef_init=None, + intercept_init=None, + ) + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): + """Fit linear model with Stochastic Gradient Descent. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + coef_init : ndarray of shape (n_classes, n_features), default=None + The initial coefficients to warm-start the optimization. + + intercept_init : ndarray of shape (n_classes,), default=None + The initial intercept to warm-start the optimization. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples. + If not provided, uniform weights are assumed. These weights will + be multiplied with class_weight (passed through the + constructor) if class_weight is specified. + + Returns + ------- + self : object + Returns an instance of self. + """ + self._more_validate_params() + + return self._fit( + X, + y, + alpha=self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + coef_init=coef_init, + intercept_init=intercept_init, + sample_weight=sample_weight, + ) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags + + +class SGDClassifier(BaseSGDClassifier): + """Linear classifiers (SVM, logistic regression, etc.) with SGD training. + + This estimator implements regularized linear models with stochastic + gradient descent (SGD) learning: the gradient of the loss is estimated + each sample at a time and the model is updated along the way with a + decreasing strength schedule (aka learning rate). SGD allows minibatch + (online/out-of-core) learning via the `partial_fit` method. + For best results using the default learning rate schedule, the data should + have zero mean and unit variance. + + This implementation works with data represented as dense or sparse arrays + of floating point values for the features. The model it fits can be + controlled with the loss parameter; by default, it fits a linear support + vector machine (SVM). + + The regularizer is a penalty added to the loss function that shrinks model + parameters towards the zero vector using either the squared euclidean norm + L2 or the absolute norm L1 or a combination of both (Elastic Net). If the + parameter update crosses the 0.0 value because of the regularizer, the + update is truncated to 0.0 to allow for learning sparse models and achieve + online feature selection. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + loss : {'hinge', 'log_loss', 'modified_huber', 'squared_hinge',\ + 'perceptron', 'squared_error', 'huber', 'epsilon_insensitive',\ + 'squared_epsilon_insensitive'}, default='hinge' + The loss function to be used. + + - 'hinge' gives a linear SVM. + - 'log_loss' gives logistic regression, a probabilistic classifier. + - 'modified_huber' is another smooth loss that brings tolerance to + outliers as well as probability estimates. + - 'squared_hinge' is like hinge but is quadratically penalized. + - 'perceptron' is the linear loss used by the perceptron algorithm. + - The other losses, 'squared_error', 'huber', 'epsilon_insensitive' and + 'squared_epsilon_insensitive' are designed for regression but can be useful + in classification as well; see + :class:`~sklearn.linear_model.SGDRegressor` for a description. + + More details about the losses formulas can be found in the :ref:`User Guide + ` and you can find a visualisation of the loss + functions in + :ref:`sphx_glr_auto_examples_linear_model_plot_sgd_loss_functions.py`. + + penalty : {'l2', 'l1', 'elasticnet', None}, default='l2' + The penalty (aka regularization term) to be used. Defaults to 'l2' + which is the standard regularizer for linear SVM models. 'l1' and + 'elasticnet' might bring sparsity to the model (feature selection) + not achievable with 'l2'. No penalty is added when set to `None`. + + You can see a visualisation of the penalties in + :ref:`sphx_glr_auto_examples_linear_model_plot_sgd_penalties.py`. + + alpha : float, default=0.0001 + Constant that multiplies the regularization term. The higher the + value, the stronger the regularization. Also used to compute the + learning rate when `learning_rate` is set to 'optimal'. + Values must be in the range `[0.0, inf)`. + + l1_ratio : float, default=0.15 + The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. + l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. + Only used if `penalty` is 'elasticnet'. + Values must be in the range `[0.0, 1.0]`. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`partial_fit` method. + Values must be in the range `[1, inf)`. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, training will stop + when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive + epochs. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Values must be in the range `[0.0, inf)`. + + .. versionadded:: 0.19 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + Values must be in the range `[0, inf)`. + + epsilon : float, default=0.1 + Epsilon in the epsilon-insensitive loss functions; only if `loss` is + 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. + For 'huber', determines the threshold at which it becomes less + important to get the prediction exactly right. + For epsilon-insensitive, any differences between the current prediction + and the correct label are ignored if they are less than this threshold. + Values must be in the range `[0.0, inf)`. + + n_jobs : int, default=None + The number of CPUs to use to do the OVA (One Versus All, for + multi-class problems) computation. + ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. + ``-1`` means using all processors. See :term:`Glossary ` + for more details. + + random_state : int, RandomState instance, default=None + Used for shuffling the data, when ``shuffle`` is set to ``True``. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + Integer values must be in the range `[0, 2**32 - 1]`. + + learning_rate : str, default='optimal' + The learning rate schedule: + + - 'constant': `eta = eta0` + - 'optimal': `eta = 1.0 / (alpha * (t + t0))` + where `t0` is chosen by a heuristic proposed by Leon Bottou. + - 'invscaling': `eta = eta0 / pow(t, power_t)` + - 'adaptive': `eta = eta0`, as long as the training keeps decreasing. + Each time n_iter_no_change consecutive epochs fail to decrease the + training loss by tol or fail to increase validation score by tol if + `early_stopping` is `True`, the current learning rate is divided by 5. + + .. versionadded:: 0.20 + Added 'adaptive' option. + + eta0 : float, default=0.0 + The initial learning rate for the 'constant', 'invscaling' or + 'adaptive' schedules. The default value is 0.0 as eta0 is not used by + the default schedule 'optimal'. + Values must be in the range `[0.0, inf)`. + + power_t : float, default=0.5 + The exponent for inverse scaling learning rate. + Values must be in the range `(-inf, inf)`. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to `True`, it will automatically set aside + a stratified fraction of training data as validation and terminate + training when validation score returned by the `score` method is not + improving by at least tol for n_iter_no_change consecutive epochs. + + See :ref:`sphx_glr_auto_examples_linear_model_plot_sgd_early_stopping.py` for an + example of the effects of early stopping. + + .. versionadded:: 0.20 + Added 'early_stopping' option + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if `early_stopping` is True. + Values must be in the range `(0.0, 1.0)`. + + .. versionadded:: 0.20 + Added 'validation_fraction' option + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before stopping + fitting. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Integer values must be in the range `[1, max_iter)`. + + .. versionadded:: 0.20 + Added 'n_iter_no_change' option + + class_weight : dict, {class_label: weight} or "balanced", default=None + Preset for the class_weight fit parameter. + + Weights associated with classes. If not given, all classes + are supposed to have weight one. + + The "balanced" mode uses the values of y to automatically adjust + weights inversely proportional to class frequencies in the input data + as ``n_samples / (n_classes * np.bincount(y))``. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + If a dynamic learning rate is used, the learning rate is adapted + depending on the number of samples already seen. Calling ``fit`` resets + this counter, while ``partial_fit`` will result in increasing the + existing counter. + + average : bool or int, default=False + When set to `True`, computes the averaged SGD weights across all + updates and stores the result in the ``coef_`` attribute. If set to + an int greater than 1, averaging will begin once the total number of + samples seen reaches `average`. So ``average=10`` will begin + averaging after seeing 10 samples. + Integer values must be in the range `[1, n_samples]`. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) if n_classes == 2 else \ + (n_classes, n_features) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) if n_classes == 2 else (n_classes,) + Constants in decision function. + + n_iter_ : int + The actual number of iterations before reaching the stopping criterion. + For multiclass fits, it is the maximum over every binary fit. + + classes_ : array of shape (n_classes,) + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.svm.LinearSVC : Linear support vector classification. + LogisticRegression : Logistic regression. + Perceptron : Inherits from SGDClassifier. ``Perceptron()`` is equivalent to + ``SGDClassifier(loss="perceptron", eta0=1, learning_rate="constant", + penalty=None)``. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import SGDClassifier + >>> from sklearn.preprocessing import StandardScaler + >>> from sklearn.pipeline import make_pipeline + >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) + >>> Y = np.array([1, 1, 2, 2]) + >>> # Always scale the input. The most convenient way is to use a pipeline. + >>> clf = make_pipeline(StandardScaler(), + ... SGDClassifier(max_iter=1000, tol=1e-3)) + >>> clf.fit(X, Y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('sgdclassifier', SGDClassifier())]) + >>> print(clf.predict([[-0.8, -1]])) + [1] + """ + + _parameter_constraints: dict = { + **BaseSGDClassifier._parameter_constraints, + "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "power_t": [Interval(Real, None, None, closed="neither")], + "epsilon": [Interval(Real, 0, None, closed="left")], + "learning_rate": [ + StrOptions({"constant", "optimal", "invscaling", "adaptive"}), + Hidden(StrOptions({"pa1", "pa2"})), + ], + "eta0": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + loss="hinge", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + n_jobs=None, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + class_weight=None, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + n_jobs=n_jobs, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + class_weight=class_weight, + warm_start=warm_start, + average=average, + ) + + def _check_proba(self): + if self.loss not in ("log_loss", "modified_huber"): + raise AttributeError( + "probability estimates are not available for loss=%r" % self.loss + ) + return True + + @available_if(_check_proba) + def predict_proba(self, X): + """Probability estimates. + + This method is only available for log loss and modified Huber loss. + + Multiclass probability estimates are derived from binary (one-vs.-rest) + estimates by simple normalization, as recommended by Zadrozny and + Elkan. + + Binary probability estimates for loss="modified_huber" are given by + (clip(decision_function(X), -1, 1) + 1) / 2. For other loss functions + it is necessary to perform proper probability calibration by wrapping + the classifier with + :class:`~sklearn.calibration.CalibratedClassifierCV` instead. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Input data for prediction. + + Returns + ------- + ndarray of shape (n_samples, n_classes) + Returns the probability of the sample for each class in the model, + where classes are ordered as they are in `self.classes_`. + + References + ---------- + Zadrozny and Elkan, "Transforming classifier scores into multiclass + probability estimates", SIGKDD'02, + https://dl.acm.org/doi/pdf/10.1145/775047.775151 + + The justification for the formula in the loss="modified_huber" + case is in the appendix B in: + http://jmlr.csail.mit.edu/papers/volume2/zhang02c/zhang02c.pdf + """ + check_is_fitted(self) + + if self.loss == "log_loss": + return self._predict_proba_lr(X) + + elif self.loss == "modified_huber": + binary = len(self.classes_) == 2 + scores = self.decision_function(X) + + if binary: + prob2 = np.ones((scores.shape[0], 2)) + prob = prob2[:, 1] + else: + prob = scores + + np.clip(scores, -1, 1, prob) + prob += 1.0 + prob /= 2.0 + + if binary: + prob2[:, 0] -= prob + prob = prob2 + else: + # the above might assign zero to all classes, which doesn't + # normalize neatly; work around this to produce uniform + # probabilities + prob_sum = prob.sum(axis=1) + all_zero = prob_sum == 0 + if np.any(all_zero): + prob[all_zero, :] = 1 + prob_sum[all_zero] = len(self.classes_) + + # normalize + prob /= prob_sum.reshape((prob.shape[0], -1)) + + return prob + + else: + raise NotImplementedError( + "predict_(log_)proba only supported when" + " loss='log_loss' or loss='modified_huber' " + "(%r given)" % self.loss + ) + + @available_if(_check_proba) + def predict_log_proba(self, X): + """Log of probability estimates. + + This method is only available for log loss and modified Huber loss. + + When loss="modified_huber", probability estimates may be hard zeros + and ones, so taking the logarithm is not possible. + + See ``predict_proba`` for details. + + Parameters + ---------- + X : {array-like, sparse matrix} of shape (n_samples, n_features) + Input data for prediction. + + Returns + ------- + T : array-like, shape (n_samples, n_classes) + Returns the log-probability of the sample for each class in the + model, where classes are ordered as they are in + `self.classes_`. + """ + return np.log(self.predict_proba(X)) + + +class BaseSGDRegressor(RegressorMixin, BaseSGD): + loss_functions = { + "squared_error": (CyHalfSquaredError,), + "huber": (CyHuberLoss, DEFAULT_EPSILON), + "epsilon_insensitive": (EpsilonInsensitive, DEFAULT_EPSILON), + "squared_epsilon_insensitive": (SquaredEpsilonInsensitive, DEFAULT_EPSILON), + } + + _parameter_constraints: dict = { + **BaseSGD._parameter_constraints, + "loss": [StrOptions(set(loss_functions))], + "early_stopping": ["boolean"], + "validation_fraction": [Interval(Real, 0, 1, closed="neither")], + "n_iter_no_change": [Interval(Integral, 1, None, closed="left")], + } + + @abstractmethod + def __init__( + self, + loss="squared_error", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + random_state=None, + learning_rate="invscaling", + eta0=0.01, + power_t=0.25, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + warm_start=warm_start, + average=average, + ) + + def _partial_fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + max_iter, + sample_weight, + coef_init, + intercept_init, + ): + first_call = getattr(self, "coef_", None) is None + X, y = validate_data( + self, + X, + y, + accept_sparse="csr", + copy=False, + order="C", + dtype=[np.float64, np.float32], + accept_large_sparse=False, + reset=first_call, + ) + y = y.astype(X.dtype, copy=False) + + if first_call: + # TODO(1.7) remove 0 from average parameter constraint + if not isinstance(self.average, (bool, np.bool_)) and self.average == 0: + warnings.warn( + ( + "Passing average=0 to disable averaging is deprecated and will" + " be removed in 1.7. Please use average=False instead." + ), + FutureWarning, + ) + + n_samples, n_features = X.shape + + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # Allocate datastructures from input arguments + if first_call: + self._allocate_parameter_mem( + n_classes=1, + n_features=n_features, + input_dtype=X.dtype, + coef_init=coef_init, + intercept_init=intercept_init, + ) + if self.average > 0 and getattr(self, "_average_coef", None) is None: + self._average_coef = np.zeros(n_features, dtype=X.dtype, order="C") + self._average_intercept = np.zeros(1, dtype=X.dtype, order="C") + + self._fit_regressor( + X, y, alpha, C, loss, learning_rate, sample_weight, max_iter + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y, sample_weight=None): + """Perform one epoch of stochastic gradient descent on given samples. + + Internally, this method uses ``max_iter = 1``. Therefore, it is not + guaranteed that a minimum of the cost function is reached after calling + it once. Matters such as objective convergence and early stopping + should be handled by the user. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Subset of training data. + + y : numpy array of shape (n_samples,) + Subset of target values. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples. + If not provided, uniform weights are assumed. + + Returns + ------- + self : object + Returns an instance of self. + """ + if not hasattr(self, "coef_"): + self._more_validate_params(for_partial_fit=True) + + return self._partial_fit( + X, + y, + self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + max_iter=1, + sample_weight=sample_weight, + coef_init=None, + intercept_init=None, + ) + + def _fit( + self, + X, + y, + alpha, + C, + loss, + learning_rate, + coef_init=None, + intercept_init=None, + sample_weight=None, + ): + # TODO(1.7) remove 0 from average parameter constraint + if not isinstance(self.average, (bool, np.bool_)) and self.average == 0: + warnings.warn( + ( + "Passing average=0 to disable averaging is deprecated and will be " + "removed in 1.7. Please use average=False instead." + ), + FutureWarning, + ) + + if self.warm_start and getattr(self, "coef_", None) is not None: + if coef_init is None: + coef_init = self.coef_ + if intercept_init is None: + intercept_init = self.intercept_ + else: + self.coef_ = None + self.intercept_ = None + + # Clear iteration count for multiple call to fit. + self.t_ = 1.0 + + self._partial_fit( + X, + y, + alpha, + C, + loss, + learning_rate, + self.max_iter, + sample_weight, + coef_init, + intercept_init, + ) + + if ( + self.tol is not None + and self.tol > -np.inf + and self.n_iter_ == self.max_iter + ): + warnings.warn( + ( + "Maximum number of iteration reached before " + "convergence. Consider increasing max_iter to " + "improve the fit." + ), + ConvergenceWarning, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y, coef_init=None, intercept_init=None, sample_weight=None): + """Fit linear model with Stochastic Gradient Descent. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data. + + y : ndarray of shape (n_samples,) + Target values. + + coef_init : ndarray of shape (n_features,), default=None + The initial coefficients to warm-start the optimization. + + intercept_init : ndarray of shape (1,), default=None + The initial intercept to warm-start the optimization. + + sample_weight : array-like, shape (n_samples,), default=None + Weights applied to individual samples (1. for unweighted). + + Returns + ------- + self : object + Fitted `SGDRegressor` estimator. + """ + self._more_validate_params() + + return self._fit( + X, + y, + alpha=self.alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + coef_init=coef_init, + intercept_init=intercept_init, + sample_weight=sample_weight, + ) + + def _decision_function(self, X): + """Predict using the linear model + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + + Returns + ------- + ndarray of shape (n_samples,) + Predicted target values per element in X. + """ + check_is_fitted(self) + + X = validate_data(self, X, accept_sparse="csr", reset=False) + + scores = safe_sparse_dot(X, self.coef_.T, dense_output=True) + self.intercept_ + return scores.ravel() + + def predict(self, X): + """Predict using the linear model. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Input data. + + Returns + ------- + ndarray of shape (n_samples,) + Predicted target values per element in X. + """ + return self._decision_function(X) + + def _fit_regressor( + self, X, y, alpha, C, loss, learning_rate, sample_weight, max_iter + ): + loss_function = self._get_loss_function(loss) + penalty_type = self._get_penalty_type(self.penalty) + learning_rate_type = self._get_learning_rate_type(learning_rate) + + if not hasattr(self, "t_"): + self.t_ = 1.0 + + validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0) + validation_score_cb = self._make_validation_score_cb( + validation_mask, X, y, sample_weight + ) + + random_state = check_random_state(self.random_state) + # numpy mtrand expects a C long which is a signed 32 bit integer under + # Windows + seed = random_state.randint(0, MAX_INT) + + dataset, intercept_decay = make_dataset( + X, y, sample_weight, random_state=random_state + ) + + tol = self.tol if self.tol is not None else -np.inf + + if self.average: + coef = self._standard_coef + intercept = self._standard_intercept + average_coef = self._average_coef + average_intercept = self._average_intercept + else: + coef = self.coef_ + intercept = self.intercept_ + average_coef = None # Not used + average_intercept = [0] # Not used + + _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype) + coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd( + coef, + intercept[0], + average_coef, + average_intercept[0], + loss_function, + penalty_type, + alpha, + C, + self.l1_ratio, + dataset, + validation_mask, + self.early_stopping, + validation_score_cb, + int(self.n_iter_no_change), + max_iter, + tol, + int(self.fit_intercept), + int(self.verbose), + int(self.shuffle), + seed, + 1.0, + 1.0, + learning_rate_type, + self.eta0, + self.power_t, + 0, + self.t_, + intercept_decay, + self.average, + ) + + self.t_ += self.n_iter_ * X.shape[0] + + if self.average > 0: + self._average_intercept = np.atleast_1d(average_intercept) + self._standard_intercept = np.atleast_1d(intercept) + + if self.average <= self.t_ - 1.0: + # made enough updates for averaging to be taken into account + self.coef_ = average_coef + self.intercept_ = np.atleast_1d(average_intercept) + else: + self.coef_ = coef + self.intercept_ = np.atleast_1d(intercept) + + else: + self.intercept_ = np.atleast_1d(intercept) + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags + + +class SGDRegressor(BaseSGDRegressor): + """Linear model fitted by minimizing a regularized empirical loss with SGD. + + SGD stands for Stochastic Gradient Descent: the gradient of the loss is + estimated each sample at a time and the model is updated along the way with + a decreasing strength schedule (aka learning rate). + + The regularizer is a penalty added to the loss function that shrinks model + parameters towards the zero vector using either the squared euclidean norm + L2 or the absolute norm L1 or a combination of both (Elastic Net). If the + parameter update crosses the 0.0 value because of the regularizer, the + update is truncated to 0.0 to allow for learning sparse models and achieve + online feature selection. + + This implementation works with data represented as dense numpy arrays of + floating point values for the features. + + Read more in the :ref:`User Guide `. + + Parameters + ---------- + loss : str, default='squared_error' + The loss function to be used. The possible values are 'squared_error', + 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive' + + The 'squared_error' refers to the ordinary least squares fit. + 'huber' modifies 'squared_error' to focus less on getting outliers + correct by switching from squared to linear loss past a distance of + epsilon. 'epsilon_insensitive' ignores errors less than epsilon and is + linear past that; this is the loss function used in SVR. + 'squared_epsilon_insensitive' is the same but becomes squared loss past + a tolerance of epsilon. + + More details about the losses formulas can be found in the + :ref:`User Guide `. + + penalty : {'l2', 'l1', 'elasticnet', None}, default='l2' + The penalty (aka regularization term) to be used. Defaults to 'l2' + which is the standard regularizer for linear SVM models. 'l1' and + 'elasticnet' might bring sparsity to the model (feature selection) + not achievable with 'l2'. No penalty is added when set to `None`. + + You can see a visualisation of the penalties in + :ref:`sphx_glr_auto_examples_linear_model_plot_sgd_penalties.py`. + + alpha : float, default=0.0001 + Constant that multiplies the regularization term. The higher the + value, the stronger the regularization. Also used to compute the + learning rate when `learning_rate` is set to 'optimal'. + Values must be in the range `[0.0, inf)`. + + l1_ratio : float, default=0.15 + The Elastic Net mixing parameter, with 0 <= l1_ratio <= 1. + l1_ratio=0 corresponds to L2 penalty, l1_ratio=1 to L1. + Only used if `penalty` is 'elasticnet'. + Values must be in the range `[0.0, 1.0]`. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. If False, the + data is assumed to be already centered. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + :meth:`partial_fit` method. + Values must be in the range `[1, inf)`. + + .. versionadded:: 0.19 + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, training will stop + when (loss > best_loss - tol) for ``n_iter_no_change`` consecutive + epochs. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Values must be in the range `[0.0, inf)`. + + .. versionadded:: 0.19 + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + + verbose : int, default=0 + The verbosity level. + Values must be in the range `[0, inf)`. + + epsilon : float, default=0.1 + Epsilon in the epsilon-insensitive loss functions; only if `loss` is + 'huber', 'epsilon_insensitive', or 'squared_epsilon_insensitive'. + For 'huber', determines the threshold at which it becomes less + important to get the prediction exactly right. + For epsilon-insensitive, any differences between the current prediction + and the correct label are ignored if they are less than this threshold. + Values must be in the range `[0.0, inf)`. + + random_state : int, RandomState instance, default=None + Used for shuffling the data, when ``shuffle`` is set to ``True``. + Pass an int for reproducible output across multiple function calls. + See :term:`Glossary `. + + learning_rate : str, default='invscaling' + The learning rate schedule: + + - 'constant': `eta = eta0` + - 'optimal': `eta = 1.0 / (alpha * (t + t0))` + where t0 is chosen by a heuristic proposed by Leon Bottou. + - 'invscaling': `eta = eta0 / pow(t, power_t)` + - 'adaptive': eta = eta0, as long as the training keeps decreasing. + Each time n_iter_no_change consecutive epochs fail to decrease the + training loss by tol or fail to increase validation score by tol if + early_stopping is True, the current learning rate is divided by 5. + + .. versionadded:: 0.20 + Added 'adaptive' option. + + eta0 : float, default=0.01 + The initial learning rate for the 'constant', 'invscaling' or + 'adaptive' schedules. The default value is 0.01. + Values must be in the range `[0.0, inf)`. + + power_t : float, default=0.25 + The exponent for inverse scaling learning rate. + Values must be in the range `(-inf, inf)`. + + early_stopping : bool, default=False + Whether to use early stopping to terminate training when validation + score is not improving. If set to True, it will automatically set aside + a fraction of training data as validation and terminate + training when validation score returned by the `score` method is not + improving by at least `tol` for `n_iter_no_change` consecutive + epochs. + + See :ref:`sphx_glr_auto_examples_linear_model_plot_sgd_early_stopping.py` for an + example of the effects of early stopping. + + .. versionadded:: 0.20 + Added 'early_stopping' option + + validation_fraction : float, default=0.1 + The proportion of training data to set aside as validation set for + early stopping. Must be between 0 and 1. + Only used if `early_stopping` is True. + Values must be in the range `(0.0, 1.0)`. + + .. versionadded:: 0.20 + Added 'validation_fraction' option + + n_iter_no_change : int, default=5 + Number of iterations with no improvement to wait before stopping + fitting. + Convergence is checked against the training loss or the + validation loss depending on the `early_stopping` parameter. + Integer values must be in the range `[1, max_iter)`. + + .. versionadded:: 0.20 + Added 'n_iter_no_change' option + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + If a dynamic learning rate is used, the learning rate is adapted + depending on the number of samples already seen. Calling ``fit`` resets + this counter, while ``partial_fit`` will result in increasing the + existing counter. + + average : bool or int, default=False + When set to True, computes the averaged SGD weights across all + updates and stores the result in the ``coef_`` attribute. If set to + an int greater than 1, averaging will begin once the total number of + samples seen reaches `average`. So ``average=10`` will begin + averaging after seeing 10 samples. + + Attributes + ---------- + coef_ : ndarray of shape (n_features,) + Weights assigned to the features. + + intercept_ : ndarray of shape (1,) + The intercept term. + + n_iter_ : int + The actual number of iterations before reaching the stopping criterion. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + HuberRegressor : Linear regression model that is robust to outliers. + Lars : Least Angle Regression model. + Lasso : Linear Model trained with L1 prior as regularizer. + RANSACRegressor : RANSAC (RANdom SAmple Consensus) algorithm. + Ridge : Linear least squares with l2 regularization. + sklearn.svm.SVR : Epsilon-Support Vector Regression. + TheilSenRegressor : Theil-Sen Estimator robust multivariate regression model. + + Examples + -------- + >>> import numpy as np + >>> from sklearn.linear_model import SGDRegressor + >>> from sklearn.pipeline import make_pipeline + >>> from sklearn.preprocessing import StandardScaler + >>> n_samples, n_features = 10, 5 + >>> rng = np.random.RandomState(0) + >>> y = rng.randn(n_samples) + >>> X = rng.randn(n_samples, n_features) + >>> # Always scale the input. The most convenient way is to use a pipeline. + >>> reg = make_pipeline(StandardScaler(), + ... SGDRegressor(max_iter=1000, tol=1e-3)) + >>> reg.fit(X, y) + Pipeline(steps=[('standardscaler', StandardScaler()), + ('sgdregressor', SGDRegressor())]) + """ + + _parameter_constraints: dict = { + **BaseSGDRegressor._parameter_constraints, + "penalty": [StrOptions({"l2", "l1", "elasticnet"}), None], + "alpha": [Interval(Real, 0, None, closed="left")], + "l1_ratio": [Interval(Real, 0, 1, closed="both")], + "power_t": [Interval(Real, None, None, closed="neither")], + "learning_rate": [ + StrOptions({"constant", "optimal", "invscaling", "adaptive"}), + Hidden(StrOptions({"pa1", "pa2"})), + ], + "epsilon": [Interval(Real, 0, None, closed="left")], + "eta0": [Interval(Real, 0, None, closed="left")], + } + + def __init__( + self, + loss="squared_error", + *, + penalty="l2", + alpha=0.0001, + l1_ratio=0.15, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + epsilon=DEFAULT_EPSILON, + random_state=None, + learning_rate="invscaling", + eta0=0.01, + power_t=0.25, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=False, + average=False, + ): + super().__init__( + loss=loss, + penalty=penalty, + alpha=alpha, + l1_ratio=l1_ratio, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=epsilon, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=early_stopping, + validation_fraction=validation_fraction, + n_iter_no_change=n_iter_no_change, + warm_start=warm_start, + average=average, + ) + + +class SGDOneClassSVM(OutlierMixin, BaseSGD): + """Solves linear One-Class SVM using Stochastic Gradient Descent. + + This implementation is meant to be used with a kernel approximation + technique (e.g. `sklearn.kernel_approximation.Nystroem`) to obtain results + similar to `sklearn.svm.OneClassSVM` which uses a Gaussian kernel by + default. + + Read more in the :ref:`User Guide `. + + .. versionadded:: 1.0 + + Parameters + ---------- + nu : float, default=0.5 + The nu parameter of the One Class SVM: an upper bound on the + fraction of training errors and a lower bound of the fraction of + support vectors. Should be in the interval (0, 1]. By default 0.5 + will be taken. + + fit_intercept : bool, default=True + Whether the intercept should be estimated or not. Defaults to True. + + max_iter : int, default=1000 + The maximum number of passes over the training data (aka epochs). + It only impacts the behavior in the ``fit`` method, and not the + `partial_fit`. Defaults to 1000. + Values must be in the range `[1, inf)`. + + tol : float or None, default=1e-3 + The stopping criterion. If it is not None, the iterations will stop + when (loss > previous_loss - tol). Defaults to 1e-3. + Values must be in the range `[0.0, inf)`. + + shuffle : bool, default=True + Whether or not the training data should be shuffled after each epoch. + Defaults to True. + + verbose : int, default=0 + The verbosity level. + + random_state : int, RandomState instance or None, default=None + The seed of the pseudo random number generator to use when shuffling + the data. If int, random_state is the seed used by the random number + generator; If RandomState instance, random_state is the random number + generator; If None, the random number generator is the RandomState + instance used by `np.random`. + + learning_rate : {'constant', 'optimal', 'invscaling', 'adaptive'}, default='optimal' + The learning rate schedule to use with `fit`. (If using `partial_fit`, + learning rate must be controlled directly). + + - 'constant': `eta = eta0` + - 'optimal': `eta = 1.0 / (alpha * (t + t0))` + where t0 is chosen by a heuristic proposed by Leon Bottou. + - 'invscaling': `eta = eta0 / pow(t, power_t)` + - 'adaptive': eta = eta0, as long as the training keeps decreasing. + Each time n_iter_no_change consecutive epochs fail to decrease the + training loss by tol or fail to increase validation score by tol if + early_stopping is True, the current learning rate is divided by 5. + + eta0 : float, default=0.0 + The initial learning rate for the 'constant', 'invscaling' or + 'adaptive' schedules. The default value is 0.0 as eta0 is not used by + the default schedule 'optimal'. + Values must be in the range `[0.0, inf)`. + + power_t : float, default=0.5 + The exponent for inverse scaling learning rate. + Values must be in the range `(-inf, inf)`. + + warm_start : bool, default=False + When set to True, reuse the solution of the previous call to fit as + initialization, otherwise, just erase the previous solution. + See :term:`the Glossary `. + + Repeatedly calling fit or partial_fit when warm_start is True can + result in a different solution than when calling fit a single time + because of the way the data is shuffled. + If a dynamic learning rate is used, the learning rate is adapted + depending on the number of samples already seen. Calling ``fit`` resets + this counter, while ``partial_fit`` will result in increasing the + existing counter. + + average : bool or int, default=False + When set to True, computes the averaged SGD weights and stores the + result in the ``coef_`` attribute. If set to an int greater than 1, + averaging will begin once the total number of samples seen reaches + average. So ``average=10`` will begin averaging after seeing 10 + samples. + + Attributes + ---------- + coef_ : ndarray of shape (1, n_features) + Weights assigned to the features. + + offset_ : ndarray of shape (1,) + Offset used to define the decision function from the raw scores. + We have the relation: decision_function = score_samples - offset. + + n_iter_ : int + The actual number of iterations to reach the stopping criterion. + + t_ : int + Number of weight updates performed during training. + Same as ``(n_iter_ * n_samples + 1)``. + + n_features_in_ : int + Number of features seen during :term:`fit`. + + .. versionadded:: 0.24 + + feature_names_in_ : ndarray of shape (`n_features_in_`,) + Names of features seen during :term:`fit`. Defined only when `X` + has feature names that are all strings. + + .. versionadded:: 1.0 + + See Also + -------- + sklearn.svm.OneClassSVM : Unsupervised Outlier Detection. + + Notes + ----- + This estimator has a linear complexity in the number of training samples + and is thus better suited than the `sklearn.svm.OneClassSVM` + implementation for datasets with a large number of training samples (say + > 10,000). + + Examples + -------- + >>> import numpy as np + >>> from sklearn import linear_model + >>> X = np.array([[-1, -1], [-2, -1], [1, 1], [2, 1]]) + >>> clf = linear_model.SGDOneClassSVM(random_state=42) + >>> clf.fit(X) + SGDOneClassSVM(random_state=42) + + >>> print(clf.predict([[4, 4]])) + [1] + """ + + loss_functions = {"hinge": (Hinge, 1.0)} + + _parameter_constraints: dict = { + **BaseSGD._parameter_constraints, + "nu": [Interval(Real, 0.0, 1.0, closed="right")], + "learning_rate": [ + StrOptions({"constant", "optimal", "invscaling", "adaptive"}), + Hidden(StrOptions({"pa1", "pa2"})), + ], + "eta0": [Interval(Real, 0, None, closed="left")], + "power_t": [Interval(Real, None, None, closed="neither")], + } + + def __init__( + self, + nu=0.5, + fit_intercept=True, + max_iter=1000, + tol=1e-3, + shuffle=True, + verbose=0, + random_state=None, + learning_rate="optimal", + eta0=0.0, + power_t=0.5, + warm_start=False, + average=False, + ): + self.nu = nu + super(SGDOneClassSVM, self).__init__( + loss="hinge", + penalty="l2", + C=1.0, + l1_ratio=0, + fit_intercept=fit_intercept, + max_iter=max_iter, + tol=tol, + shuffle=shuffle, + verbose=verbose, + epsilon=DEFAULT_EPSILON, + random_state=random_state, + learning_rate=learning_rate, + eta0=eta0, + power_t=power_t, + early_stopping=False, + validation_fraction=0.1, + n_iter_no_change=5, + warm_start=warm_start, + average=average, + ) + + def _fit_one_class(self, X, alpha, C, sample_weight, learning_rate, max_iter): + """Uses SGD implementation with X and y=np.ones(n_samples).""" + + # The One-Class SVM uses the SGD implementation with + # y=np.ones(n_samples). + n_samples = X.shape[0] + y = np.ones(n_samples, dtype=X.dtype, order="C") + + dataset, offset_decay = make_dataset(X, y, sample_weight) + + penalty_type = self._get_penalty_type(self.penalty) + learning_rate_type = self._get_learning_rate_type(learning_rate) + + # early stopping is set to False for the One-Class SVM. thus + # validation_mask and validation_score_cb will be set to values + # associated to early_stopping=False in _make_validation_split and + # _make_validation_score_cb respectively. + validation_mask = self._make_validation_split(y, sample_mask=sample_weight > 0) + validation_score_cb = self._make_validation_score_cb( + validation_mask, X, y, sample_weight + ) + + random_state = check_random_state(self.random_state) + # numpy mtrand expects a C long which is a signed 32 bit integer under + # Windows + seed = random_state.randint(0, np.iinfo(np.int32).max) + + tol = self.tol if self.tol is not None else -np.inf + + one_class = 1 + # There are no class weights for the One-Class SVM and they are + # therefore set to 1. + pos_weight = 1 + neg_weight = 1 + + if self.average: + coef = self._standard_coef + intercept = self._standard_intercept + average_coef = self._average_coef + average_intercept = self._average_intercept + else: + coef = self.coef_ + intercept = 1 - self.offset_ + average_coef = None # Not used + average_intercept = [0] # Not used + + _plain_sgd = _get_plain_sgd_function(input_dtype=coef.dtype) + coef, intercept, average_coef, average_intercept, self.n_iter_ = _plain_sgd( + coef, + intercept[0], + average_coef, + average_intercept[0], + self._loss_function_, + penalty_type, + alpha, + C, + self.l1_ratio, + dataset, + validation_mask, + self.early_stopping, + validation_score_cb, + int(self.n_iter_no_change), + max_iter, + tol, + int(self.fit_intercept), + int(self.verbose), + int(self.shuffle), + seed, + neg_weight, + pos_weight, + learning_rate_type, + self.eta0, + self.power_t, + one_class, + self.t_, + offset_decay, + self.average, + ) + + self.t_ += self.n_iter_ * n_samples + + if self.average > 0: + self._average_intercept = np.atleast_1d(average_intercept) + self._standard_intercept = np.atleast_1d(intercept) + + if self.average <= self.t_ - 1.0: + # made enough updates for averaging to be taken into account + self.coef_ = average_coef + self.offset_ = 1 - np.atleast_1d(average_intercept) + else: + self.coef_ = coef + self.offset_ = 1 - np.atleast_1d(intercept) + + else: + self.offset_ = 1 - np.atleast_1d(intercept) + + def _partial_fit( + self, + X, + alpha, + C, + loss, + learning_rate, + max_iter, + sample_weight, + coef_init, + offset_init, + ): + first_call = getattr(self, "coef_", None) is None + X = validate_data( + self, + X, + None, + accept_sparse="csr", + dtype=[np.float64, np.float32], + order="C", + accept_large_sparse=False, + reset=first_call, + ) + + if first_call: + # TODO(1.7) remove 0 from average parameter constraint + if not isinstance(self.average, (bool, np.bool_)) and self.average == 0: + warnings.warn( + ( + "Passing average=0 to disable averaging is deprecated and will" + " be removed in 1.7. Please use average=False instead." + ), + FutureWarning, + ) + + n_features = X.shape[1] + + # Allocate datastructures from input arguments + sample_weight = _check_sample_weight(sample_weight, X, dtype=X.dtype) + + # We use intercept = 1 - offset where intercept is the intercept of + # the SGD implementation and offset is the offset of the One-Class SVM + # optimization problem. + if getattr(self, "coef_", None) is None or coef_init is not None: + self._allocate_parameter_mem( + n_classes=1, + n_features=n_features, + input_dtype=X.dtype, + coef_init=coef_init, + intercept_init=offset_init, + one_class=1, + ) + elif n_features != self.coef_.shape[-1]: + raise ValueError( + "Number of features %d does not match previous data %d." + % (n_features, self.coef_.shape[-1]) + ) + + if self.average and getattr(self, "_average_coef", None) is None: + self._average_coef = np.zeros(n_features, dtype=X.dtype, order="C") + self._average_intercept = np.zeros(1, dtype=X.dtype, order="C") + + self._loss_function_ = self._get_loss_function(loss) + if not hasattr(self, "t_"): + self.t_ = 1.0 + + # delegate to concrete training procedure + self._fit_one_class( + X, + alpha=alpha, + C=C, + learning_rate=learning_rate, + sample_weight=sample_weight, + max_iter=max_iter, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def partial_fit(self, X, y=None, sample_weight=None): + """Fit linear One-Class SVM with Stochastic Gradient Descent. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Subset of the training data. + y : Ignored + Not used, present for API consistency by convention. + + sample_weight : array-like, shape (n_samples,), optional + Weights applied to individual samples. + If not provided, uniform weights are assumed. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + if not hasattr(self, "coef_"): + self._more_validate_params(for_partial_fit=True) + + alpha = self.nu / 2 + return self._partial_fit( + X, + alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + max_iter=1, + sample_weight=sample_weight, + coef_init=None, + offset_init=None, + ) + + def _fit( + self, + X, + alpha, + C, + loss, + learning_rate, + coef_init=None, + offset_init=None, + sample_weight=None, + ): + # TODO(1.7) remove 0 from average parameter constraint + if not isinstance(self.average, (bool, np.bool_)) and self.average == 0: + warnings.warn( + ( + "Passing average=0 to disable averaging is deprecated and will be " + "removed in 1.7. Please use average=False instead." + ), + FutureWarning, + ) + + if self.warm_start and hasattr(self, "coef_"): + if coef_init is None: + coef_init = self.coef_ + if offset_init is None: + offset_init = self.offset_ + else: + self.coef_ = None + self.offset_ = None + + # Clear iteration count for multiple call to fit. + self.t_ = 1.0 + + self._partial_fit( + X, + alpha, + C, + loss, + learning_rate, + self.max_iter, + sample_weight, + coef_init, + offset_init, + ) + + if ( + self.tol is not None + and self.tol > -np.inf + and self.n_iter_ == self.max_iter + ): + warnings.warn( + ( + "Maximum number of iteration reached before " + "convergence. Consider increasing max_iter to " + "improve the fit." + ), + ConvergenceWarning, + ) + + return self + + @_fit_context(prefer_skip_nested_validation=True) + def fit(self, X, y=None, coef_init=None, offset_init=None, sample_weight=None): + """Fit linear One-Class SVM with Stochastic Gradient Descent. + + This solves an equivalent optimization problem of the + One-Class SVM primal optimization problem and returns a weight vector + w and an offset rho such that the decision function is given by + - rho. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Training data. + y : Ignored + Not used, present for API consistency by convention. + + coef_init : array, shape (n_classes, n_features) + The initial coefficients to warm-start the optimization. + + offset_init : array, shape (n_classes,) + The initial offset to warm-start the optimization. + + sample_weight : array-like, shape (n_samples,), optional + Weights applied to individual samples. + If not provided, uniform weights are assumed. These weights will + be multiplied with class_weight (passed through the + constructor) if class_weight is specified. + + Returns + ------- + self : object + Returns a fitted instance of self. + """ + self._more_validate_params() + + alpha = self.nu / 2 + self._fit( + X, + alpha=alpha, + C=1.0, + loss=self.loss, + learning_rate=self.learning_rate, + coef_init=coef_init, + offset_init=offset_init, + sample_weight=sample_weight, + ) + + return self + + def decision_function(self, X): + """Signed distance to the separating hyperplane. + + Signed distance is positive for an inlier and negative for an + outlier. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Testing data. + + Returns + ------- + dec : array-like, shape (n_samples,) + Decision function values of the samples. + """ + + check_is_fitted(self, "coef_") + + X = validate_data(self, X, accept_sparse="csr", reset=False) + decisions = safe_sparse_dot(X, self.coef_.T, dense_output=True) - self.offset_ + + return decisions.ravel() + + def score_samples(self, X): + """Raw scoring function of the samples. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Testing data. + + Returns + ------- + score_samples : array-like, shape (n_samples,) + Unshiffted scoring function values of the samples. + """ + score_samples = self.decision_function(X) + self.offset_ + return score_samples + + def predict(self, X): + """Return labels (1 inlier, -1 outlier) of the samples. + + Parameters + ---------- + X : {array-like, sparse matrix}, shape (n_samples, n_features) + Testing data. + + Returns + ------- + y : array, shape (n_samples,) + Labels of the samples. + """ + y = (self.decision_function(X) >= 0).astype(np.int32) + y[y == 0] = -1 # for consistency with outlier detectors + return y + + def __sklearn_tags__(self): + tags = super().__sklearn_tags__() + tags.input_tags.sparse = True + return tags diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14bb0bd1f9e8a12d6ffaf0c17f79f26fb29e393b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_base.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72cecb2b97feef384b1284d95d5bd42bfd13cc03 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_calibration.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1f002fdc8faa0af1b9a3aa6988d49924fd71024 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_check_build.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7874adf433c5c22f7b4ca5e9fe3b9d16e3e0a50a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_config.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5e1ecaa833e0a8d9a162fcd1bbeca88b2de3df3a Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_discriminant_analysis.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05327427f7a59fa935c69c3b30daaf791d4a5a6c Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_docstring_parameters.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7d8aca0019e4467e4f34f624cfa94fb754af444 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_dummy.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b746060a5750f8323a3d7196a8e318ad84562ca8 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_kernel_ridge.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce4679f9a0aee282b31ce2412b7cb4425e0b7cb7 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metadata_routing.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c94babae977a79bbff9a303c1f05f1bc0578386 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_metaestimators.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d96284a35845d21f490205b1dd4f6c17f554d801 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_multiclass.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..583c7cfe3cf0e282e0e9c518eb5fb04bb1b90fc1 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_naive_bayes.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5745cfa5f6bdcfc849478f09d98d7ea2bb969f5d Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_pipeline.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..96749f4fb62ad1929251d1b46afc050111c4da7b Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_public_functions.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76878e12dff2d2d7a3df58a87ea296793712f216 Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/__pycache__/test_random_projection.cpython-310.pyc differ diff --git a/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/test_check_build.py b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/test_check_build.py new file mode 100644 index 0000000000000000000000000000000000000000..baf72093354e19a2554ad0b29693c10f87d33a85 --- /dev/null +++ b/evalkit_tf437/lib/python3.10/site-packages/sklearn/tests/test_check_build.py @@ -0,0 +1,15 @@ +""" +Smoke Test the check_build module +""" + +# Authors: The scikit-learn developers +# SPDX-License-Identifier: BSD-3-Clause + +import pytest + +from sklearn.__check_build import raise_build_error + + +def test_raise_build_error(): + with pytest.raises(ImportError): + raise_build_error(ImportError())