diff --git a/.venv/lib/python3.13/site-packages/certifi-2025.7.14.dist-info/licenses/LICENSE b/.venv/lib/python3.13/site-packages/certifi-2025.7.14.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..62b076cdee58ec8f34034141ba0befd9015b0c7e --- /dev/null +++ b/.venv/lib/python3.13/site-packages/certifi-2025.7.14.dist-info/licenses/LICENSE @@ -0,0 +1,20 @@ +This package contains a modified version of ca-bundle.crt: + +ca-bundle.crt -- Bundle of CA Root Certificates + +This is a bundle of X.509 certificates of public Certificate Authorities +(CA). These were automatically extracted from Mozilla's root certificates +file (certdata.txt). This file can be found in the mozilla source tree: +https://hg.mozilla.org/mozilla-central/file/tip/security/nss/lib/ckfw/builtins/certdata.txt +It contains the certificates in PEM format and therefore +can be directly used with curl / libcurl / php_curl, or with +an Apache+mod_ssl webserver for SSL client authentication. +Just configure this file as the SSLCACertificateFile.# + +***** BEGIN LICENSE BLOCK ***** +This Source Code Form is subject to the terms of the Mozilla Public License, +v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain +one at http://mozilla.org/MPL/2.0/. + +***** END LICENSE BLOCK ***** +@(#) $RCSfile: certdata.txt,v $ $Revision: 1.80 $ $Date: 2011/11/03 15:11:58 $ diff --git a/.venv/lib/python3.13/site-packages/certifi/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/certifi/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81d0166bfab5defbb19419ce66e68c215adc25f2 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/certifi/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/certifi/__pycache__/core.cpython-313.pyc b/.venv/lib/python3.13/site-packages/certifi/__pycache__/core.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8c11f404b9a4c2cd1427c867839940e0c08d5b7c Binary files /dev/null and b/.venv/lib/python3.13/site-packages/certifi/__pycache__/core.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a0e5d0e9fc372bbe31b6241d8b307900d61eb07c Binary files /dev/null and b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/api.cpython-313.pyc b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/api.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ffcbf3ff0a72ff8987421c93c092746acba3df0 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/api.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/cd.cpython-313.pyc b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/cd.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..40b8b9fc6dc5303994ecf88b1b875b65786b2611 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/cd.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/constant.cpython-313.pyc b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/constant.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a0fd8c7a880bf5255dfacf56d05fd1eaab62967 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/constant.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/legacy.cpython-313.pyc b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/legacy.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d483630f6c1aaf1f8afe475a9529f1a58912844e Binary files /dev/null and b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/legacy.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/models.cpython-313.pyc b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/models.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31fa6a08981ded11465ca9b9d191dae4bdd934a4 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/models.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/utils.cpython-313.pyc b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2a915aff99afa8f6e184b6d019b024ae1cbe786 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/utils.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/version.cpython-313.pyc b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/version.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e3965f553959551532058efa363f7b9b874640d Binary files /dev/null and b/.venv/lib/python3.13/site-packages/charset_normalizer/__pycache__/version.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/charset_normalizer/cli/__init__.py b/.venv/lib/python3.13/site-packages/charset_normalizer/cli/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..543a5a4de49d07690e73df778aa580589d0789c6 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/charset_normalizer/cli/__init__.py @@ -0,0 +1,8 @@ +from __future__ import annotations + +from .__main__ import cli_detect, query_yes_no + +__all__ = ( + "cli_detect", + "query_yes_no", +) diff --git a/.venv/lib/python3.13/site-packages/charset_normalizer/cli/__main__.py b/.venv/lib/python3.13/site-packages/charset_normalizer/cli/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..cb64156a0fc164442acc4f4517975a5699d26354 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/charset_normalizer/cli/__main__.py @@ -0,0 +1,381 @@ +from __future__ import annotations + +import argparse +import sys +import typing +from json import dumps +from os.path import abspath, basename, dirname, join, realpath +from platform import python_version +from unicodedata import unidata_version + +import charset_normalizer.md as md_module +from charset_normalizer import from_fp +from charset_normalizer.models import CliDetectionResult +from charset_normalizer.version import __version__ + + +def query_yes_no(question: str, default: str = "yes") -> bool: + """Ask a yes/no question via input() and return their answer. + + "question" is a string that is presented to the user. + "default" is the presumed answer if the user just hits . + It must be "yes" (the default), "no" or None (meaning + an answer is required of the user). + + The "answer" return value is True for "yes" or False for "no". + + Credit goes to (c) https://stackoverflow.com/questions/3041986/apt-command-line-interface-like-yes-no-input + """ + valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False} + if default is None: + prompt = " [y/n] " + elif default == "yes": + prompt = " [Y/n] " + elif default == "no": + prompt = " [y/N] " + else: + raise ValueError("invalid default answer: '%s'" % default) + + while True: + sys.stdout.write(question + prompt) + choice = input().lower() + if default is not None and choice == "": + return valid[default] + elif choice in valid: + return valid[choice] + else: + sys.stdout.write("Please respond with 'yes' or 'no' (or 'y' or 'n').\n") + + +class FileType: + """Factory for creating file object types + + Instances of FileType are typically passed as type= arguments to the + ArgumentParser add_argument() method. + + Keyword Arguments: + - mode -- A string indicating how the file is to be opened. Accepts the + same values as the builtin open() function. + - bufsize -- The file's desired buffer size. Accepts the same values as + the builtin open() function. + - encoding -- The file's encoding. Accepts the same values as the + builtin open() function. + - errors -- A string indicating how encoding and decoding errors are to + be handled. Accepts the same value as the builtin open() function. + + Backported from CPython 3.12 + """ + + def __init__( + self, + mode: str = "r", + bufsize: int = -1, + encoding: str | None = None, + errors: str | None = None, + ): + self._mode = mode + self._bufsize = bufsize + self._encoding = encoding + self._errors = errors + + def __call__(self, string: str) -> typing.IO: # type: ignore[type-arg] + # the special argument "-" means sys.std{in,out} + if string == "-": + if "r" in self._mode: + return sys.stdin.buffer if "b" in self._mode else sys.stdin + elif any(c in self._mode for c in "wax"): + return sys.stdout.buffer if "b" in self._mode else sys.stdout + else: + msg = f'argument "-" with mode {self._mode}' + raise ValueError(msg) + + # all other arguments are used as file names + try: + return open(string, self._mode, self._bufsize, self._encoding, self._errors) + except OSError as e: + message = f"can't open '{string}': {e}" + raise argparse.ArgumentTypeError(message) + + def __repr__(self) -> str: + args = self._mode, self._bufsize + kwargs = [("encoding", self._encoding), ("errors", self._errors)] + args_str = ", ".join( + [repr(arg) for arg in args if arg != -1] + + [f"{kw}={arg!r}" for kw, arg in kwargs if arg is not None] + ) + return f"{type(self).__name__}({args_str})" + + +def cli_detect(argv: list[str] | None = None) -> int: + """ + CLI assistant using ARGV and ArgumentParser + :param argv: + :return: 0 if everything is fine, anything else equal trouble + """ + parser = argparse.ArgumentParser( + description="The Real First Universal Charset Detector. " + "Discover originating encoding used on text file. " + "Normalize text to unicode." + ) + + parser.add_argument( + "files", type=FileType("rb"), nargs="+", help="File(s) to be analysed" + ) + parser.add_argument( + "-v", + "--verbose", + action="store_true", + default=False, + dest="verbose", + help="Display complementary information about file if any. " + "Stdout will contain logs about the detection process.", + ) + parser.add_argument( + "-a", + "--with-alternative", + action="store_true", + default=False, + dest="alternatives", + help="Output complementary possibilities if any. Top-level JSON WILL be a list.", + ) + parser.add_argument( + "-n", + "--normalize", + action="store_true", + default=False, + dest="normalize", + help="Permit to normalize input file. If not set, program does not write anything.", + ) + parser.add_argument( + "-m", + "--minimal", + action="store_true", + default=False, + dest="minimal", + help="Only output the charset detected to STDOUT. Disabling JSON output.", + ) + parser.add_argument( + "-r", + "--replace", + action="store_true", + default=False, + dest="replace", + help="Replace file when trying to normalize it instead of creating a new one.", + ) + parser.add_argument( + "-f", + "--force", + action="store_true", + default=False, + dest="force", + help="Replace file without asking if you are sure, use this flag with caution.", + ) + parser.add_argument( + "-i", + "--no-preemptive", + action="store_true", + default=False, + dest="no_preemptive", + help="Disable looking at a charset declaration to hint the detector.", + ) + parser.add_argument( + "-t", + "--threshold", + action="store", + default=0.2, + type=float, + dest="threshold", + help="Define a custom maximum amount of noise allowed in decoded content. 0. <= noise <= 1.", + ) + parser.add_argument( + "--version", + action="version", + version="Charset-Normalizer {} - Python {} - Unicode {} - SpeedUp {}".format( + __version__, + python_version(), + unidata_version, + "OFF" if md_module.__file__.lower().endswith(".py") else "ON", + ), + help="Show version information and exit.", + ) + + args = parser.parse_args(argv) + + if args.replace is True and args.normalize is False: + if args.files: + for my_file in args.files: + my_file.close() + print("Use --replace in addition of --normalize only.", file=sys.stderr) + return 1 + + if args.force is True and args.replace is False: + if args.files: + for my_file in args.files: + my_file.close() + print("Use --force in addition of --replace only.", file=sys.stderr) + return 1 + + if args.threshold < 0.0 or args.threshold > 1.0: + if args.files: + for my_file in args.files: + my_file.close() + print("--threshold VALUE should be between 0. AND 1.", file=sys.stderr) + return 1 + + x_ = [] + + for my_file in args.files: + matches = from_fp( + my_file, + threshold=args.threshold, + explain=args.verbose, + preemptive_behaviour=args.no_preemptive is False, + ) + + best_guess = matches.best() + + if best_guess is None: + print( + 'Unable to identify originating encoding for "{}". {}'.format( + my_file.name, + ( + "Maybe try increasing maximum amount of chaos." + if args.threshold < 1.0 + else "" + ), + ), + file=sys.stderr, + ) + x_.append( + CliDetectionResult( + abspath(my_file.name), + None, + [], + [], + "Unknown", + [], + False, + 1.0, + 0.0, + None, + True, + ) + ) + else: + x_.append( + CliDetectionResult( + abspath(my_file.name), + best_guess.encoding, + best_guess.encoding_aliases, + [ + cp + for cp in best_guess.could_be_from_charset + if cp != best_guess.encoding + ], + best_guess.language, + best_guess.alphabets, + best_guess.bom, + best_guess.percent_chaos, + best_guess.percent_coherence, + None, + True, + ) + ) + + if len(matches) > 1 and args.alternatives: + for el in matches: + if el != best_guess: + x_.append( + CliDetectionResult( + abspath(my_file.name), + el.encoding, + el.encoding_aliases, + [ + cp + for cp in el.could_be_from_charset + if cp != el.encoding + ], + el.language, + el.alphabets, + el.bom, + el.percent_chaos, + el.percent_coherence, + None, + False, + ) + ) + + if args.normalize is True: + if best_guess.encoding.startswith("utf") is True: + print( + '"{}" file does not need to be normalized, as it already came from unicode.'.format( + my_file.name + ), + file=sys.stderr, + ) + if my_file.closed is False: + my_file.close() + continue + + dir_path = dirname(realpath(my_file.name)) + file_name = basename(realpath(my_file.name)) + + o_: list[str] = file_name.split(".") + + if args.replace is False: + o_.insert(-1, best_guess.encoding) + if my_file.closed is False: + my_file.close() + elif ( + args.force is False + and query_yes_no( + 'Are you sure to normalize "{}" by replacing it ?'.format( + my_file.name + ), + "no", + ) + is False + ): + if my_file.closed is False: + my_file.close() + continue + + try: + x_[0].unicode_path = join(dir_path, ".".join(o_)) + + with open(x_[0].unicode_path, "wb") as fp: + fp.write(best_guess.output()) + except OSError as e: + print(str(e), file=sys.stderr) + if my_file.closed is False: + my_file.close() + return 2 + + if my_file.closed is False: + my_file.close() + + if args.minimal is False: + print( + dumps( + [el.__dict__ for el in x_] if len(x_) > 1 else x_[0].__dict__, + ensure_ascii=True, + indent=4, + ) + ) + else: + for my_file in args.files: + print( + ", ".join( + [ + el.encoding or "undefined" + for el in x_ + if el.path == abspath(my_file.name) + ] + ) + ) + + return 0 + + +if __name__ == "__main__": + cli_detect() diff --git a/.venv/lib/python3.13/site-packages/filelock-3.18.0.dist-info/licenses/LICENSE b/.venv/lib/python3.13/site-packages/filelock-3.18.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..cf1ab25da0349f84a3fdd40032f0ce99db813b8b --- /dev/null +++ b/.venv/lib/python3.13/site-packages/filelock-3.18.0.dist-info/licenses/LICENSE @@ -0,0 +1,24 @@ +This is free and unencumbered software released into the public domain. + +Anyone is free to copy, modify, publish, use, compile, sell, or +distribute this software, either in source code form or as a compiled +binary, for any purpose, commercial or non-commercial, and by any +means. + +In jurisdictions that recognize copyright laws, the author or authors +of this software dedicate any and all copyright interest in the +software to the public domain. We make this dedication for the benefit +of the public at large and to the detriment of our heirs and +successors. We intend this dedication to be an overt act of +relinquishment in perpetuity of all present and future rights to this +software under copyright law. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. +IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR +OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. + +For more information, please refer to diff --git a/.venv/lib/python3.13/site-packages/filelock/__pycache__/_soft.cpython-313.pyc b/.venv/lib/python3.13/site-packages/filelock/__pycache__/_soft.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3abafcb041d81e2e5f420853c8822a5907273658 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/filelock/__pycache__/_soft.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/filelock/__pycache__/_windows.cpython-313.pyc b/.venv/lib/python3.13/site-packages/filelock/__pycache__/_windows.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8859a6e6a1b01ad6f08b445afce5f7a48c775efe Binary files /dev/null and b/.venv/lib/python3.13/site-packages/filelock/__pycache__/_windows.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/filelock/__pycache__/asyncio.cpython-313.pyc b/.venv/lib/python3.13/site-packages/filelock/__pycache__/asyncio.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72bad0575eac1e65c481553abb19cf750901c36d Binary files /dev/null and b/.venv/lib/python3.13/site-packages/filelock/__pycache__/asyncio.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__init__.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..49d088214505b9604964ab142e7f8a5b38ccd5ef --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__init__.py @@ -0,0 +1,27 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from abc import ABC, abstractmethod +from argparse import _SubParsersAction + + +class BaseHuggingfaceCLICommand(ABC): + @staticmethod + @abstractmethod + def register_subcommand(parser: _SubParsersAction): + raise NotImplementedError() + + @abstractmethod + def run(self): + raise NotImplementedError() diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1337467ae15487d832f4bde8769aa56d54c61c5c Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..76e3cf5895385e4ce4a37ed00ddb739504a48584 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/_cli_utils.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90960b50ad2e5741148f692302f5dc629993b899 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/delete_cache.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/download.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/download.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..14f3e7d62e9f57cdf2bbbfc540d82dcacb41edea Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/download.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/env.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/env.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59c2a551ca1358442e44152629e5f43a45f8d640 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/env.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c7e676c75e1bc62307d3e5bf0d26b0d457aed788 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/huggingface_cli.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f58bd4589c82585cec19950753468ac3ffedf5e3 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/lfs.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/repo.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/repo.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1fd38bfa5a1ad75084934a0b72f7db6f5810341a Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/repo.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/repo_files.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/repo_files.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6654429bbef9e93ea620d57c134bcae7c7619d87 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/repo_files.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e80ac92643650e6662388e6054663f120945f51 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/scan_cache.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/tag.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/tag.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94fe0e5355703e9c1a6d7fd2fe2a542930a5bbfa Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/tag.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..be071506f84a545b95a486743022c2f3d5a475e6 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/upload.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/upload_large_folder.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/upload_large_folder.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2f46569d9b6a58d5ebc110447d823db15510819 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/upload_large_folder.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/user.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/user.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3e7053d30720c218a5504f89f1aebe1ec7bd9730 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/user.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/version.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/version.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c366e15ae42bae002353a4ba2d9c40f4639a25f2 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/__pycache__/version.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/_cli_utils.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/_cli_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..bd56ad6896db2a257323e022896940c0ba0d68d3 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/_cli_utils.py @@ -0,0 +1,69 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains a utility for good-looking prints.""" + +import os +from typing import List, Union + + +class ANSI: + """ + Helper for en.wikipedia.org/wiki/ANSI_escape_code + """ + + _bold = "\u001b[1m" + _gray = "\u001b[90m" + _red = "\u001b[31m" + _reset = "\u001b[0m" + _yellow = "\u001b[33m" + + @classmethod + def bold(cls, s: str) -> str: + return cls._format(s, cls._bold) + + @classmethod + def gray(cls, s: str) -> str: + return cls._format(s, cls._gray) + + @classmethod + def red(cls, s: str) -> str: + return cls._format(s, cls._bold + cls._red) + + @classmethod + def yellow(cls, s: str) -> str: + return cls._format(s, cls._yellow) + + @classmethod + def _format(cls, s: str, code: str) -> str: + if os.environ.get("NO_COLOR"): + # See https://no-color.org/ + return s + return f"{code}{s}{cls._reset}" + + +def tabulate(rows: List[List[Union[str, int]]], headers: List[str]) -> str: + """ + Inspired by: + + - stackoverflow.com/a/8356620/593036 + - stackoverflow.com/questions/9535954/printing-lists-as-tabular-data + """ + col_widths = [max(len(str(x)) for x in col) for col in zip(*rows, headers)] + row_format = ("{{:{}}} " * len(headers)).format(*col_widths) + lines = [] + lines.append(row_format.format(*headers)) + lines.append(row_format.format(*["-" * w for w in col_widths])) + for row in rows: + lines.append(row_format.format(*row)) + return "\n".join(lines) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/delete_cache.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/delete_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..fc9eecf46977e631730c4d985bc8c4bd3c5286db --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/delete_cache.py @@ -0,0 +1,474 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to delete some revisions from the HF cache directory. + +Usage: + huggingface-cli delete-cache + huggingface-cli delete-cache --disable-tui + huggingface-cli delete-cache --dir ~/.cache/huggingface/hub + huggingface-cli delete-cache --sort=size + +NOTE: + This command is based on `InquirerPy` to build the multiselect menu in the terminal. + This dependency has to be installed with `pip install huggingface_hub[cli]`. Since + we want to avoid as much as possible cross-platform issues, I chose a library that + is built on top of `python-prompt-toolkit` which seems to be a reference in terminal + GUI (actively maintained on both Unix and Windows, 7.9k stars). + + For the moment, the TUI feature is in beta. + + See: + - https://github.com/kazhala/InquirerPy + - https://inquirerpy.readthedocs.io/en/latest/ + - https://github.com/prompt-toolkit/python-prompt-toolkit + + Other solutions could have been: + - `simple_term_menu`: would be good as well for our use case but some issues suggest + that Windows is less supported. + See: https://github.com/IngoMeyer441/simple-term-menu + - `PyInquirer`: very similar to `InquirerPy` but older and not maintained anymore. + In particular, no support of Python3.10. + See: https://github.com/CITGuru/PyInquirer + - `pick` (or `pickpack`): easy to use and flexible but built on top of Python's + standard library `curses` that is specific to Unix (not implemented on Windows). + See https://github.com/wong2/pick and https://github.com/anafvana/pickpack. + - `inquirer`: lot of traction (700 stars) but explicitly states "experimental + support of Windows". Not built on top of `python-prompt-toolkit`. + See https://github.com/magmax/python-inquirer + +TODO: add support for `huggingface-cli delete-cache aaaaaa bbbbbb cccccc (...)` ? +TODO: add "--keep-last" arg to delete revisions that are not on `main` ref +TODO: add "--filter" arg to filter repositories by name ? +TODO: add "--limit" arg to limit to X repos ? +TODO: add "-y" arg for immediate deletion ? +See discussions in https://github.com/huggingface/huggingface_hub/issues/1025. +""" + +import os +from argparse import Namespace, _SubParsersAction +from functools import wraps +from tempfile import mkstemp +from typing import Any, Callable, Iterable, List, Literal, Optional, Union + +from ..utils import CachedRepoInfo, CachedRevisionInfo, HFCacheInfo, scan_cache_dir +from . import BaseHuggingfaceCLICommand +from ._cli_utils import ANSI + + +try: + from InquirerPy import inquirer + from InquirerPy.base.control import Choice + from InquirerPy.separator import Separator + + _inquirer_py_available = True +except ImportError: + _inquirer_py_available = False + +SortingOption_T = Literal["alphabetical", "lastUpdated", "lastUsed", "size"] + + +def require_inquirer_py(fn: Callable) -> Callable: + """Decorator to flag methods that require `InquirerPy`.""" + + # TODO: refactor this + imports in a unified pattern across codebase + @wraps(fn) + def _inner(*args, **kwargs): + if not _inquirer_py_available: + raise ImportError( + "The `delete-cache` command requires extra dependencies to work with" + " the TUI.\nPlease run `pip install huggingface_hub[cli]` to install" + " them.\nOtherwise, disable TUI using the `--disable-tui` flag." + ) + + return fn(*args, **kwargs) + + return _inner + + +# Possibility for the user to cancel deletion +_CANCEL_DELETION_STR = "CANCEL_DELETION" + + +class DeleteCacheCommand(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + delete_cache_parser = parser.add_parser("delete-cache", help="Delete revisions from the cache directory.") + + delete_cache_parser.add_argument( + "--dir", + type=str, + default=None, + help="cache directory (optional). Default to the default HuggingFace cache.", + ) + + delete_cache_parser.add_argument( + "--disable-tui", + action="store_true", + help=( + "Disable Terminal User Interface (TUI) mode. Useful if your" + " platform/terminal doesn't support the multiselect menu." + ), + ) + + delete_cache_parser.add_argument( + "--sort", + nargs="?", + choices=["alphabetical", "lastUpdated", "lastUsed", "size"], + help=( + "Sort repositories by the specified criteria. Options: " + "'alphabetical' (A-Z), " + "'lastUpdated' (newest first), " + "'lastUsed' (most recent first), " + "'size' (largest first)." + ), + ) + + delete_cache_parser.set_defaults(func=DeleteCacheCommand) + + def __init__(self, args: Namespace) -> None: + self.cache_dir: Optional[str] = args.dir + self.disable_tui: bool = args.disable_tui + self.sort_by: Optional[SortingOption_T] = args.sort + + def run(self): + """Run `delete-cache` command with or without TUI.""" + # Scan cache directory + hf_cache_info = scan_cache_dir(self.cache_dir) + + # Manual review from the user + if self.disable_tui: + selected_hashes = _manual_review_no_tui(hf_cache_info, preselected=[], sort_by=self.sort_by) + else: + selected_hashes = _manual_review_tui(hf_cache_info, preselected=[], sort_by=self.sort_by) + + # If deletion is not cancelled + if len(selected_hashes) > 0 and _CANCEL_DELETION_STR not in selected_hashes: + confirm_message = _get_expectations_str(hf_cache_info, selected_hashes) + " Confirm deletion ?" + + # Confirm deletion + if self.disable_tui: + confirmed = _ask_for_confirmation_no_tui(confirm_message) + else: + confirmed = _ask_for_confirmation_tui(confirm_message) + + # Deletion is confirmed + if confirmed: + strategy = hf_cache_info.delete_revisions(*selected_hashes) + print("Start deletion.") + strategy.execute() + print( + f"Done. Deleted {len(strategy.repos)} repo(s) and" + f" {len(strategy.snapshots)} revision(s) for a total of" + f" {strategy.expected_freed_size_str}." + ) + return + + # Deletion is cancelled + print("Deletion is cancelled. Do nothing.") + + +def _get_repo_sorting_key(repo: CachedRepoInfo, sort_by: Optional[SortingOption_T] = None): + if sort_by == "alphabetical": + return (repo.repo_type, repo.repo_id.lower()) # by type then name + elif sort_by == "lastUpdated": + return -max(rev.last_modified for rev in repo.revisions) # newest first + elif sort_by == "lastUsed": + return -repo.last_accessed # most recently used first + elif sort_by == "size": + return -repo.size_on_disk # largest first + else: + return (repo.repo_type, repo.repo_id) # default stable order + + +@require_inquirer_py +def _manual_review_tui( + hf_cache_info: HFCacheInfo, + preselected: List[str], + sort_by: Optional[SortingOption_T] = None, +) -> List[str]: + """Ask the user for a manual review of the revisions to delete. + + Displays a multi-select menu in the terminal (TUI). + """ + # Define multiselect list + choices = _get_tui_choices_from_scan( + repos=hf_cache_info.repos, + preselected=preselected, + sort_by=sort_by, + ) + checkbox = inquirer.checkbox( + message="Select revisions to delete:", + choices=choices, # List of revisions with some pre-selection + cycle=False, # No loop between top and bottom + height=100, # Large list if possible + # We use the instruction to display to the user the expected effect of the + # deletion. + instruction=_get_expectations_str( + hf_cache_info, + selected_hashes=[c.value for c in choices if isinstance(c, Choice) and c.enabled], + ), + # We use the long instruction to should keybindings instructions to the user + long_instruction="Press to select, to validate and to quit without modification.", + # Message that is displayed once the user validates its selection. + transformer=lambda result: f"{len(result)} revision(s) selected.", + ) + + # Add a callback to update the information line when a revision is + # selected/unselected + def _update_expectations(_) -> None: + # Hacky way to dynamically set an instruction message to the checkbox when + # a revision hash is selected/unselected. + checkbox._instruction = _get_expectations_str( + hf_cache_info, + selected_hashes=[choice["value"] for choice in checkbox.content_control.choices if choice["enabled"]], + ) + + checkbox.kb_func_lookup["toggle"].append({"func": _update_expectations}) + + # Finally display the form to the user. + try: + return checkbox.execute() + except KeyboardInterrupt: + return [] # Quit without deletion + + +@require_inquirer_py +def _ask_for_confirmation_tui(message: str, default: bool = True) -> bool: + """Ask for confirmation using Inquirer.""" + return inquirer.confirm(message, default=default).execute() + + +def _get_tui_choices_from_scan( + repos: Iterable[CachedRepoInfo], + preselected: List[str], + sort_by: Optional[SortingOption_T] = None, +) -> List: + """Build a list of choices from the scanned repos. + + Args: + repos (*Iterable[`CachedRepoInfo`]*): + List of scanned repos on which we want to delete revisions. + preselected (*List[`str`]*): + List of revision hashes that will be preselected. + sort_by (*Optional[SortingOption_T]*): + Sorting direction. Choices: "alphabetical", "lastUpdated", "lastUsed", "size". + + Return: + The list of choices to pass to `inquirer.checkbox`. + """ + choices: List[Union[Choice, Separator]] = [] + + # First choice is to cancel the deletion + choices.append( + Choice( + _CANCEL_DELETION_STR, + name="None of the following (if selected, nothing will be deleted).", + enabled=False, + ) + ) + + # Sort repos based on specified criteria + sorted_repos = sorted(repos, key=lambda repo: _get_repo_sorting_key(repo, sort_by)) + + for repo in sorted_repos: + # Repo as separator + choices.append( + Separator( + f"\n{repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str}," + f" used {repo.last_accessed_str})" + ) + ) + for revision in sorted(repo.revisions, key=_revision_sorting_order): + # Revision as choice + choices.append( + Choice( + revision.commit_hash, + name=( + f"{revision.commit_hash[:8]}:" + f" {', '.join(sorted(revision.refs)) or '(detached)'} #" + f" modified {revision.last_modified_str}" + ), + enabled=revision.commit_hash in preselected, + ) + ) + + # Return choices + return choices + + +def _manual_review_no_tui( + hf_cache_info: HFCacheInfo, + preselected: List[str], + sort_by: Optional[SortingOption_T] = None, +) -> List[str]: + """Ask the user for a manual review of the revisions to delete. + + Used when TUI is disabled. Manual review happens in a separate tmp file that the + user can manually edit. + """ + # 1. Generate temporary file with delete commands. + fd, tmp_path = mkstemp(suffix=".txt") # suffix to make it easier to find by editors + os.close(fd) + + lines = [] + + sorted_repos = sorted(hf_cache_info.repos, key=lambda repo: _get_repo_sorting_key(repo, sort_by)) + + for repo in sorted_repos: + lines.append( + f"\n# {repo.repo_type.capitalize()} {repo.repo_id} ({repo.size_on_disk_str}," + f" used {repo.last_accessed_str})" + ) + for revision in sorted(repo.revisions, key=_revision_sorting_order): + lines.append( + # Deselect by prepending a '#' + f"{'' if revision.commit_hash in preselected else '#'} " + f" {revision.commit_hash} # Refs:" + # Print `refs` as comment on same line + f" {', '.join(sorted(revision.refs)) or '(detached)'} # modified" + # Print `last_modified` as comment on same line + f" {revision.last_modified_str}" + ) + + with open(tmp_path, "w") as f: + f.write(_MANUAL_REVIEW_NO_TUI_INSTRUCTIONS) + f.write("\n".join(lines)) + + # 2. Prompt instructions to user. + instructions = f""" + TUI is disabled. In order to select which revisions you want to delete, please edit + the following file using the text editor of your choice. Instructions for manual + editing are located at the beginning of the file. Edit the file, save it and confirm + to continue. + File to edit: {ANSI.bold(tmp_path)} + """ + print("\n".join(line.strip() for line in instructions.strip().split("\n"))) + + # 3. Wait for user confirmation. + while True: + selected_hashes = _read_manual_review_tmp_file(tmp_path) + if _ask_for_confirmation_no_tui( + _get_expectations_str(hf_cache_info, selected_hashes) + " Continue ?", + default=False, + ): + break + + # 4. Return selected_hashes sorted to maintain stable order + os.remove(tmp_path) + return sorted(selected_hashes) # Sort to maintain stable order + + +def _ask_for_confirmation_no_tui(message: str, default: bool = True) -> bool: + """Ask for confirmation using pure-python.""" + YES = ("y", "yes", "1") + NO = ("n", "no", "0") + DEFAULT = "" + ALL = YES + NO + (DEFAULT,) + full_message = message + (" (Y/n) " if default else " (y/N) ") + while True: + answer = input(full_message).lower() + if answer == DEFAULT: + return default + if answer in YES: + return True + if answer in NO: + return False + print(f"Invalid input. Must be one of {ALL}") + + +def _get_expectations_str(hf_cache_info: HFCacheInfo, selected_hashes: List[str]) -> str: + """Format a string to display to the user how much space would be saved. + + Example: + ``` + >>> _get_expectations_str(hf_cache_info, selected_hashes) + '7 revisions selected counting for 4.3G.' + ``` + """ + if _CANCEL_DELETION_STR in selected_hashes: + return "Nothing will be deleted." + strategy = hf_cache_info.delete_revisions(*selected_hashes) + return f"{len(selected_hashes)} revisions selected counting for {strategy.expected_freed_size_str}." + + +def _read_manual_review_tmp_file(tmp_path: str) -> List[str]: + """Read the manually reviewed instruction file and return a list of revision hash. + + Example: + ```txt + # This is the tmp file content + ### + + # Commented out line + 123456789 # revision hash + + # Something else + # a_newer_hash # 2 days ago + an_older_hash # 3 days ago + ``` + + ```py + >>> _read_manual_review_tmp_file(tmp_path) + ['123456789', 'an_older_hash'] + ``` + """ + with open(tmp_path) as f: + content = f.read() + + # Split lines + lines = [line.strip() for line in content.split("\n")] + + # Filter commented lines + selected_lines = [line for line in lines if not line.startswith("#")] + + # Select only before comment + selected_hashes = [line.split("#")[0].strip() for line in selected_lines] + + # Return revision hashes + return [hash for hash in selected_hashes if len(hash) > 0] + + +_MANUAL_REVIEW_NO_TUI_INSTRUCTIONS = f""" +# INSTRUCTIONS +# ------------ +# This is a temporary file created by running `huggingface-cli delete-cache` with the +# `--disable-tui` option. It contains a set of revisions that can be deleted from your +# local cache directory. +# +# Please manually review the revisions you want to delete: +# - Revision hashes can be commented out with '#'. +# - Only non-commented revisions in this file will be deleted. +# - Revision hashes that are removed from this file are ignored as well. +# - If `{_CANCEL_DELETION_STR}` line is uncommented, the all cache deletion is cancelled and +# no changes will be applied. +# +# Once you've manually reviewed this file, please confirm deletion in the terminal. This +# file will be automatically removed once done. +# ------------ + +# KILL SWITCH +# ------------ +# Un-comment following line to completely cancel the deletion process +# {_CANCEL_DELETION_STR} +# ------------ + +# REVISIONS +# ------------ +""".strip() + + +def _revision_sorting_order(revision: CachedRevisionInfo) -> Any: + # Sort by last modified (oldest first) + return revision.last_modified diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/download.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/download.py new file mode 100644 index 0000000000000000000000000000000000000000..10e22c3d1eb83dbb52c4a633fb66f19b3f35d8e7 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/download.py @@ -0,0 +1,200 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to download files from the Hub with the CLI. + +Usage: + huggingface-cli download --help + + # Download file + huggingface-cli download gpt2 config.json + + # Download entire repo + huggingface-cli download fffiloni/zeroscope --repo-type=space --revision=refs/pr/78 + + # Download repo with filters + huggingface-cli download gpt2 --include="*.safetensors" + + # Download with token + huggingface-cli download Wauplin/private-model --token=hf_*** + + # Download quietly (no progress bar, no warnings, only the returned path) + huggingface-cli download gpt2 config.json --quiet + + # Download to local dir + huggingface-cli download gpt2 --local-dir=./models/gpt2 +""" + +import warnings +from argparse import Namespace, _SubParsersAction +from typing import List, Optional + +from huggingface_hub import logging +from huggingface_hub._snapshot_download import snapshot_download +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.file_download import hf_hub_download +from huggingface_hub.utils import disable_progress_bars, enable_progress_bars + + +logger = logging.get_logger(__name__) + + +class DownloadCommand(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + download_parser = parser.add_parser("download", help="Download files from the Hub") + download_parser.add_argument( + "repo_id", type=str, help="ID of the repo to download from (e.g. `username/repo-name`)." + ) + download_parser.add_argument( + "filenames", type=str, nargs="*", help="Files to download (e.g. `config.json`, `data/metadata.jsonl`)." + ) + download_parser.add_argument( + "--repo-type", + choices=["model", "dataset", "space"], + default="model", + help="Type of repo to download from (defaults to 'model').", + ) + download_parser.add_argument( + "--revision", + type=str, + help="An optional Git revision id which can be a branch name, a tag, or a commit hash.", + ) + download_parser.add_argument( + "--include", nargs="*", type=str, help="Glob patterns to match files to download." + ) + download_parser.add_argument( + "--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to download." + ) + download_parser.add_argument( + "--cache-dir", type=str, help="Path to the directory where to save the downloaded files." + ) + download_parser.add_argument( + "--local-dir", + type=str, + help=( + "If set, the downloaded file will be placed under this directory. Check out" + " https://huggingface.co/docs/huggingface_hub/guides/download#download-files-to-local-folder for more" + " details." + ), + ) + download_parser.add_argument( + "--local-dir-use-symlinks", + choices=["auto", "True", "False"], + help=("Deprecated and ignored. Downloading to a local directory does not use symlinks anymore."), + ) + download_parser.add_argument( + "--force-download", + action="store_true", + help="If True, the files will be downloaded even if they are already cached.", + ) + download_parser.add_argument( + "--resume-download", + action="store_true", + help="Deprecated and ignored. Downloading a file to local dir always attempts to resume previously interrupted downloads (unless hf-transfer is enabled).", + ) + download_parser.add_argument( + "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens" + ) + download_parser.add_argument( + "--quiet", + action="store_true", + help="If True, progress bars are disabled and only the path to the download files is printed.", + ) + download_parser.add_argument( + "--max-workers", + type=int, + default=8, + help="Maximum number of workers to use for downloading files. Default is 8.", + ) + download_parser.set_defaults(func=DownloadCommand) + + def __init__(self, args: Namespace) -> None: + self.token = args.token + self.repo_id: str = args.repo_id + self.filenames: List[str] = args.filenames + self.repo_type: str = args.repo_type + self.revision: Optional[str] = args.revision + self.include: Optional[List[str]] = args.include + self.exclude: Optional[List[str]] = args.exclude + self.cache_dir: Optional[str] = args.cache_dir + self.local_dir: Optional[str] = args.local_dir + self.force_download: bool = args.force_download + self.resume_download: Optional[bool] = args.resume_download or None + self.quiet: bool = args.quiet + self.max_workers: int = args.max_workers + + if args.local_dir_use_symlinks is not None: + warnings.warn( + "Ignoring --local-dir-use-symlinks. Downloading to a local directory does not use symlinks anymore.", + FutureWarning, + ) + + def run(self) -> None: + if self.quiet: + disable_progress_bars() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + print(self._download()) # Print path to downloaded files + enable_progress_bars() + else: + logging.set_verbosity_info() + print(self._download()) # Print path to downloaded files + logging.set_verbosity_warning() + + def _download(self) -> str: + # Warn user if patterns are ignored + if len(self.filenames) > 0: + if self.include is not None and len(self.include) > 0: + warnings.warn("Ignoring `--include` since filenames have being explicitly set.") + if self.exclude is not None and len(self.exclude) > 0: + warnings.warn("Ignoring `--exclude` since filenames have being explicitly set.") + + # Single file to download: use `hf_hub_download` + if len(self.filenames) == 1: + return hf_hub_download( + repo_id=self.repo_id, + repo_type=self.repo_type, + revision=self.revision, + filename=self.filenames[0], + cache_dir=self.cache_dir, + resume_download=self.resume_download, + force_download=self.force_download, + token=self.token, + local_dir=self.local_dir, + library_name="huggingface-cli", + ) + + # Otherwise: use `snapshot_download` to ensure all files comes from same revision + elif len(self.filenames) == 0: + allow_patterns = self.include + ignore_patterns = self.exclude + else: + allow_patterns = self.filenames + ignore_patterns = None + + return snapshot_download( + repo_id=self.repo_id, + repo_type=self.repo_type, + revision=self.revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + resume_download=self.resume_download, + force_download=self.force_download, + cache_dir=self.cache_dir, + token=self.token, + local_dir=self.local_dir, + library_name="huggingface-cli", + max_workers=self.max_workers, + ) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/env.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/env.py new file mode 100644 index 0000000000000000000000000000000000000000..23f2828bbfebda0a633b4b3c6883432e4a534c79 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/env.py @@ -0,0 +1,36 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to print information about the environment. + +Usage: + huggingface-cli env +""" + +from argparse import _SubParsersAction + +from ..utils import dump_environment_info +from . import BaseHuggingfaceCLICommand + + +class EnvironmentCommand(BaseHuggingfaceCLICommand): + def __init__(self, args): + self.args = args + + @staticmethod + def register_subcommand(parser: _SubParsersAction): + env_parser = parser.add_parser("env", help="Print information about the environment.") + env_parser.set_defaults(func=EnvironmentCommand) + + def run(self) -> None: + dump_environment_info() diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/huggingface_cli.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/huggingface_cli.py new file mode 100644 index 0000000000000000000000000000000000000000..4e30f305c26a57e1fa7f74fd8e5e0cd022b3f147 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/huggingface_cli.py @@ -0,0 +1,63 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from argparse import ArgumentParser + +from huggingface_hub.commands.delete_cache import DeleteCacheCommand +from huggingface_hub.commands.download import DownloadCommand +from huggingface_hub.commands.env import EnvironmentCommand +from huggingface_hub.commands.lfs import LfsCommands +from huggingface_hub.commands.repo import RepoCommands +from huggingface_hub.commands.repo_files import RepoFilesCommand +from huggingface_hub.commands.scan_cache import ScanCacheCommand +from huggingface_hub.commands.tag import TagCommands +from huggingface_hub.commands.upload import UploadCommand +from huggingface_hub.commands.upload_large_folder import UploadLargeFolderCommand +from huggingface_hub.commands.user import UserCommands +from huggingface_hub.commands.version import VersionCommand + + +def main(): + parser = ArgumentParser("huggingface-cli", usage="huggingface-cli []") + commands_parser = parser.add_subparsers(help="huggingface-cli command helpers") + + # Register commands + DownloadCommand.register_subcommand(commands_parser) + UploadCommand.register_subcommand(commands_parser) + RepoFilesCommand.register_subcommand(commands_parser) + EnvironmentCommand.register_subcommand(commands_parser) + UserCommands.register_subcommand(commands_parser) + RepoCommands.register_subcommand(commands_parser) + LfsCommands.register_subcommand(commands_parser) + ScanCacheCommand.register_subcommand(commands_parser) + DeleteCacheCommand.register_subcommand(commands_parser) + TagCommands.register_subcommand(commands_parser) + VersionCommand.register_subcommand(commands_parser) + + # Experimental + UploadLargeFolderCommand.register_subcommand(commands_parser) + + # Let's go + args = parser.parse_args() + if not hasattr(args, "func"): + parser.print_help() + exit(1) + + # Run + service = args.func(args) + service.run() + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/lfs.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/lfs.py new file mode 100644 index 0000000000000000000000000000000000000000..e510e345e6a4bf6da03f71b35cbfa2a4f0eb7325 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/lfs.py @@ -0,0 +1,200 @@ +""" +Implementation of a custom transfer agent for the transfer type "multipart" for +git-lfs. + +Inspired by: +github.com/cbartz/git-lfs-swift-transfer-agent/blob/master/git_lfs_swift_transfer.py + +Spec is: github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md + + +To launch debugger while developing: + +``` [lfs "customtransfer.multipart"] +path = /path/to/huggingface_hub/.env/bin/python args = -m debugpy --listen 5678 +--wait-for-client +/path/to/huggingface_hub/src/huggingface_hub/commands/huggingface_cli.py +lfs-multipart-upload ```""" + +import json +import os +import subprocess +import sys +from argparse import _SubParsersAction +from typing import Dict, List, Optional + +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.lfs import LFS_MULTIPART_UPLOAD_COMMAND + +from ..utils import get_session, hf_raise_for_status, logging +from ..utils._lfs import SliceFileObj + + +logger = logging.get_logger(__name__) + + +class LfsCommands(BaseHuggingfaceCLICommand): + """ + Implementation of a custom transfer agent for the transfer type "multipart" + for git-lfs. This lets users upload large files >5GB 🔥. Spec for LFS custom + transfer agent is: + https://github.com/git-lfs/git-lfs/blob/master/docs/custom-transfers.md + + This introduces two commands to the CLI: + + 1. $ huggingface-cli lfs-enable-largefiles + + This should be executed once for each model repo that contains a model file + >5GB. It's documented in the error message you get if you just try to git + push a 5GB file without having enabled it before. + + 2. $ huggingface-cli lfs-multipart-upload + + This command is called by lfs directly and is not meant to be called by the + user. + """ + + @staticmethod + def register_subcommand(parser: _SubParsersAction): + enable_parser = parser.add_parser( + "lfs-enable-largefiles", help="Configure your repository to enable upload of files > 5GB." + ) + enable_parser.add_argument("path", type=str, help="Local path to repository you want to configure.") + enable_parser.set_defaults(func=lambda args: LfsEnableCommand(args)) + + # Command will get called by git-lfs, do not call it directly. + upload_parser = parser.add_parser(LFS_MULTIPART_UPLOAD_COMMAND, add_help=False) + upload_parser.set_defaults(func=lambda args: LfsUploadCommand(args)) + + +class LfsEnableCommand: + def __init__(self, args): + self.args = args + + def run(self): + local_path = os.path.abspath(self.args.path) + if not os.path.isdir(local_path): + print("This does not look like a valid git repo.") + exit(1) + subprocess.run( + "git config lfs.customtransfer.multipart.path huggingface-cli".split(), + check=True, + cwd=local_path, + ) + subprocess.run( + f"git config lfs.customtransfer.multipart.args {LFS_MULTIPART_UPLOAD_COMMAND}".split(), + check=True, + cwd=local_path, + ) + print("Local repo set up for largefiles") + + +def write_msg(msg: Dict): + """Write out the message in Line delimited JSON.""" + msg_str = json.dumps(msg) + "\n" + sys.stdout.write(msg_str) + sys.stdout.flush() + + +def read_msg() -> Optional[Dict]: + """Read Line delimited JSON from stdin.""" + msg = json.loads(sys.stdin.readline().strip()) + + if "terminate" in (msg.get("type"), msg.get("event")): + # terminate message received + return None + + if msg.get("event") not in ("download", "upload"): + logger.critical("Received unexpected message") + sys.exit(1) + + return msg + + +class LfsUploadCommand: + def __init__(self, args) -> None: + self.args = args + + def run(self) -> None: + # Immediately after invoking a custom transfer process, git-lfs + # sends initiation data to the process over stdin. + # This tells the process useful information about the configuration. + init_msg = json.loads(sys.stdin.readline().strip()) + if not (init_msg.get("event") == "init" and init_msg.get("operation") == "upload"): + write_msg({"error": {"code": 32, "message": "Wrong lfs init operation"}}) + sys.exit(1) + + # The transfer process should use the information it needs from the + # initiation structure, and also perform any one-off setup tasks it + # needs to do. It should then respond on stdout with a simple empty + # confirmation structure, as follows: + write_msg({}) + + # After the initiation exchange, git-lfs will send any number of + # transfer requests to the stdin of the transfer process, in a serial sequence. + while True: + msg = read_msg() + if msg is None: + # When all transfers have been processed, git-lfs will send + # a terminate event to the stdin of the transfer process. + # On receiving this message the transfer process should + # clean up and terminate. No response is expected. + sys.exit(0) + + oid = msg["oid"] + filepath = msg["path"] + completion_url = msg["action"]["href"] + header = msg["action"]["header"] + chunk_size = int(header.pop("chunk_size")) + presigned_urls: List[str] = list(header.values()) + + # Send a "started" progress event to allow other workers to start. + # Otherwise they're delayed until first "progress" event is reported, + # i.e. after the first 5GB by default (!) + write_msg( + { + "event": "progress", + "oid": oid, + "bytesSoFar": 1, + "bytesSinceLast": 0, + } + ) + + parts = [] + with open(filepath, "rb") as file: + for i, presigned_url in enumerate(presigned_urls): + with SliceFileObj( + file, + seek_from=i * chunk_size, + read_limit=chunk_size, + ) as data: + r = get_session().put(presigned_url, data=data) + hf_raise_for_status(r) + parts.append( + { + "etag": r.headers.get("etag"), + "partNumber": i + 1, + } + ) + # In order to support progress reporting while data is uploading / downloading, + # the transfer process should post messages to stdout + write_msg( + { + "event": "progress", + "oid": oid, + "bytesSoFar": (i + 1) * chunk_size, + "bytesSinceLast": chunk_size, + } + ) + # Not precise but that's ok. + + r = get_session().post( + completion_url, + json={ + "oid": oid, + "parts": parts, + }, + ) + hf_raise_for_status(r) + + write_msg({"event": "complete", "oid": oid}) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/repo.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/repo.py new file mode 100644 index 0000000000000000000000000000000000000000..5d12778040f373e42c0b06643948d2a5ec4a24a9 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/repo.py @@ -0,0 +1,147 @@ +# Copyright 2025 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains commands to interact with repositories on the Hugging Face Hub. + +Usage: + # create a new dataset repo on the Hub + huggingface-cli repo create my-cool-dataset --repo-type=dataset + + # create a private model repo on the Hub + huggingface-cli repo create my-cool-model --private +""" + +import argparse +from argparse import _SubParsersAction +from typing import Optional + +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.commands._cli_utils import ANSI +from huggingface_hub.constants import SPACES_SDK_TYPES +from huggingface_hub.hf_api import HfApi +from huggingface_hub.utils import logging + + +logger = logging.get_logger(__name__) + + +class RepoCommands(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + repo_parser = parser.add_parser("repo", help="{create} Commands to interact with your huggingface.co repos.") + repo_subparsers = repo_parser.add_subparsers(help="huggingface.co repos related commands") + repo_create_parser = repo_subparsers.add_parser("create", help="Create a new repo on huggingface.co") + repo_create_parser.add_argument( + "repo_id", + type=str, + help="The ID of the repo to create to (e.g. `username/repo-name`). The username is optional and will be set to your username if not provided.", + ) + repo_create_parser.add_argument( + "--repo-type", + type=str, + help='Optional: set to "dataset" or "space" if creating a dataset or space, default is model.', + ) + repo_create_parser.add_argument( + "--space_sdk", + type=str, + help='Optional: Hugging Face Spaces SDK type. Required when --type is set to "space".', + choices=SPACES_SDK_TYPES, + ) + repo_create_parser.add_argument( + "--private", + action="store_true", + help="Whether to create a private repository. Defaults to public unless the organization's default is private.", + ) + repo_create_parser.add_argument( + "--token", + type=str, + help="Hugging Face token. Will default to the locally saved token if not provided.", + ) + repo_create_parser.add_argument( + "--exist-ok", + action="store_true", + help="Do not raise an error if repo already exists.", + ) + repo_create_parser.add_argument( + "--resource-group-id", + type=str, + help="Resource group in which to create the repo. Resource groups is only available for Enterprise Hub organizations.", + ) + repo_create_parser.add_argument( + "--type", + type=str, + help="[Deprecated]: use --repo-type instead.", + ) + repo_create_parser.add_argument( + "-y", + "--yes", + action="store_true", + help="[Deprecated] no effect.", + ) + repo_create_parser.add_argument( + "--organization", type=str, help="[Deprecated] Pass the organization namespace directly in the repo_id." + ) + repo_create_parser.set_defaults(func=lambda args: RepoCreateCommand(args)) + + +class RepoCreateCommand: + def __init__(self, args: argparse.Namespace): + self.repo_id: str = args.repo_id + self.repo_type: Optional[str] = args.repo_type or args.type + self.space_sdk: Optional[str] = args.space_sdk + self.organization: Optional[str] = args.organization + self.yes: bool = args.yes + self.private: bool = args.private + self.token: Optional[str] = args.token + self.exist_ok: bool = args.exist_ok + self.resource_group_id: Optional[str] = args.resource_group_id + + if args.type is not None: + print( + ANSI.yellow( + "The --type argument is deprecated and will be removed in a future version. Use --repo-type instead." + ) + ) + if self.organization is not None: + print( + ANSI.yellow( + "The --organization argument is deprecated and will be removed in a future version. Pass the organization namespace directly in the repo_id." + ) + ) + if self.yes: + print( + ANSI.yellow( + "The --yes argument is deprecated and will be removed in a future version. It does not have any effect." + ) + ) + + self._api = HfApi() + + def run(self): + if self.organization is not None: + if "/" in self.repo_id: + print(ANSI.red("You cannot pass both --organization and a repo_id with a namespace.")) + exit(1) + self.repo_id = f"{self.organization}/{self.repo_id}" + + repo_url = self._api.create_repo( + repo_id=self.repo_id, + repo_type=self.repo_type, + private=self.private, + token=self.token, + exist_ok=self.exist_ok, + resource_group_id=self.resource_group_id, + space_sdk=self.space_sdk, + ) + print(f"Successfully created {ANSI.bold(repo_url.repo_id)} on the Hub.") + print(f"Your repo is now available at {ANSI.bold(repo_url)}") diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/repo_files.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/repo_files.py new file mode 100644 index 0000000000000000000000000000000000000000..f15bbed04f3634d7783d8230324cdaee44df4f59 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/repo_files.py @@ -0,0 +1,128 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to update or delete files in a repository using the CLI. + +Usage: + # delete all + huggingface-cli repo-files delete "*" + + # delete single file + huggingface-cli repo-files delete file.txt + + # delete single folder + huggingface-cli repo-files delete folder/ + + # delete multiple + huggingface-cli repo-files delete file.txt folder/ file2.txt + + # delete multiple patterns + huggingface-cli repo-files delete file.txt "*.json" "folder/*.parquet" + + # delete from different revision / repo-type + huggingface-cli repo-files delete file.txt --revision=refs/pr/1 --repo-type=dataset +""" + +from argparse import _SubParsersAction +from typing import List, Optional + +from huggingface_hub import logging +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.hf_api import HfApi + + +logger = logging.get_logger(__name__) + + +class DeleteFilesSubCommand: + def __init__(self, args) -> None: + self.args = args + self.repo_id: str = args.repo_id + self.repo_type: Optional[str] = args.repo_type + self.revision: Optional[str] = args.revision + self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli") + self.patterns: List[str] = args.patterns + self.commit_message: Optional[str] = args.commit_message + self.commit_description: Optional[str] = args.commit_description + self.create_pr: bool = args.create_pr + self.token: Optional[str] = args.token + + def run(self) -> None: + logging.set_verbosity_info() + url = self.api.delete_files( + delete_patterns=self.patterns, + repo_id=self.repo_id, + repo_type=self.repo_type, + revision=self.revision, + commit_message=self.commit_message, + commit_description=self.commit_description, + create_pr=self.create_pr, + ) + print(f"Files correctly deleted from repo. Commit: {url}.") + logging.set_verbosity_warning() + + +class RepoFilesCommand(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + repo_files_parser = parser.add_parser("repo-files", help="Manage files in a repo on the Hub") + repo_files_parser.add_argument( + "repo_id", type=str, help="The ID of the repo to manage (e.g. `username/repo-name`)." + ) + repo_files_subparsers = repo_files_parser.add_subparsers( + help="Action to execute against the files.", + required=True, + ) + delete_subparser = repo_files_subparsers.add_parser( + "delete", + help="Delete files from a repo on the Hub", + ) + delete_subparser.set_defaults(func=lambda args: DeleteFilesSubCommand(args)) + delete_subparser.add_argument( + "patterns", + nargs="+", + type=str, + help="Glob patterns to match files to delete.", + ) + delete_subparser.add_argument( + "--repo-type", + choices=["model", "dataset", "space"], + default="model", + help="Type of the repo to upload to (e.g. `dataset`).", + ) + delete_subparser.add_argument( + "--revision", + type=str, + help=( + "An optional Git revision to push to. It can be a branch name " + "or a PR reference. If revision does not" + " exist and `--create-pr` is not set, a branch will be automatically created." + ), + ) + delete_subparser.add_argument( + "--commit-message", type=str, help="The summary / title / first line of the generated commit." + ) + delete_subparser.add_argument( + "--commit-description", type=str, help="The description of the generated commit." + ) + delete_subparser.add_argument( + "--create-pr", action="store_true", help="Whether to create a new Pull Request for these changes." + ) + repo_files_parser.add_argument( + "--token", + type=str, + help="A User Access Token generated from https://huggingface.co/settings/tokens", + ) + + repo_files_parser.set_defaults(func=RepoFilesCommand) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/scan_cache.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/scan_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..799b9ba5523134a668aa0171e9f3668694299341 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/scan_cache.py @@ -0,0 +1,181 @@ +# coding=utf-8 +# Copyright 2022-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to scan the HF cache directory. + +Usage: + huggingface-cli scan-cache + huggingface-cli scan-cache -v + huggingface-cli scan-cache -vvv + huggingface-cli scan-cache --dir ~/.cache/huggingface/hub +""" + +import time +from argparse import Namespace, _SubParsersAction +from typing import Optional + +from ..utils import CacheNotFound, HFCacheInfo, scan_cache_dir +from . import BaseHuggingfaceCLICommand +from ._cli_utils import ANSI, tabulate + + +class ScanCacheCommand(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + scan_cache_parser = parser.add_parser("scan-cache", help="Scan cache directory.") + + scan_cache_parser.add_argument( + "--dir", + type=str, + default=None, + help="cache directory to scan (optional). Default to the default HuggingFace cache.", + ) + scan_cache_parser.add_argument( + "-v", + "--verbose", + action="count", + default=0, + help="show a more verbose output", + ) + scan_cache_parser.set_defaults(func=ScanCacheCommand) + + def __init__(self, args: Namespace) -> None: + self.verbosity: int = args.verbose + self.cache_dir: Optional[str] = args.dir + + def run(self): + try: + t0 = time.time() + hf_cache_info = scan_cache_dir(self.cache_dir) + t1 = time.time() + except CacheNotFound as exc: + cache_dir = exc.cache_dir + print(f"Cache directory not found: {cache_dir}") + return + + self._print_hf_cache_info_as_table(hf_cache_info) + + print( + f"\nDone in {round(t1 - t0, 1)}s. Scanned {len(hf_cache_info.repos)} repo(s)" + f" for a total of {ANSI.red(hf_cache_info.size_on_disk_str)}." + ) + if len(hf_cache_info.warnings) > 0: + message = f"Got {len(hf_cache_info.warnings)} warning(s) while scanning." + if self.verbosity >= 3: + print(ANSI.gray(message)) + for warning in hf_cache_info.warnings: + print(ANSI.gray(warning)) + else: + print(ANSI.gray(message + " Use -vvv to print details.")) + + def _print_hf_cache_info_as_table(self, hf_cache_info: HFCacheInfo) -> None: + print(get_table(hf_cache_info, verbosity=self.verbosity)) + + +def get_table(hf_cache_info: HFCacheInfo, *, verbosity: int = 0) -> str: + """Generate a table from the [`HFCacheInfo`] object. + + Pass `verbosity=0` to get a table with a single row per repo, with columns + "repo_id", "repo_type", "size_on_disk", "nb_files", "last_accessed", "last_modified", "refs", "local_path". + + Pass `verbosity=1` to get a table with a row per repo and revision (thus multiple rows can appear for a single repo), with columns + "repo_id", "repo_type", "revision", "size_on_disk", "nb_files", "last_modified", "refs", "local_path". + + Example: + ```py + >>> from huggingface_hub.utils import scan_cache_dir + >>> from huggingface_hub.commands.scan_cache import get_table + + >>> hf_cache_info = scan_cache_dir() + HFCacheInfo(...) + + >>> print(get_table(hf_cache_info, verbosity=0)) + REPO ID REPO TYPE SIZE ON DISK NB FILES LAST_ACCESSED LAST_MODIFIED REFS LOCAL PATH + --------------------------------------------------- --------- ------------ -------- ------------- ------------- ---- -------------------------------------------------------------------------------------------------- + roberta-base model 2.7M 5 1 day ago 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--roberta-base + suno/bark model 8.8K 1 1 week ago 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--suno--bark + t5-base model 893.8M 4 4 days ago 7 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-base + t5-large model 3.0G 4 5 weeks ago 5 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-large + + >>> print(get_table(hf_cache_info, verbosity=1)) + REPO ID REPO TYPE REVISION SIZE ON DISK NB FILES LAST_MODIFIED REFS LOCAL PATH + --------------------------------------------------- --------- ---------------------------------------- ------------ -------- ------------- ---- ----------------------------------------------------------------------------------------------------------------------------------------------------- + roberta-base model e2da8e2f811d1448a5b465c236feacd80ffbac7b 2.7M 5 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--roberta-base\\snapshots\\e2da8e2f811d1448a5b465c236feacd80ffbac7b + suno/bark model 70a8a7d34168586dc5d028fa9666aceade177992 8.8K 1 1 week ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--suno--bark\\snapshots\\70a8a7d34168586dc5d028fa9666aceade177992 + t5-base model a9723ea7f1b39c1eae772870f3b547bf6ef7e6c1 893.8M 4 7 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-base\\snapshots\\a9723ea7f1b39c1eae772870f3b547bf6ef7e6c1 + t5-large model 150ebc2c4b72291e770f58e6057481c8d2ed331a 3.0G 4 5 months ago main C:\\Users\\admin\\.cache\\huggingface\\hub\\models--t5-large\\snapshots\\150ebc2c4b72291e770f58e6057481c8d2ed331a ``` + ``` + + Args: + hf_cache_info ([`HFCacheInfo`]): + The HFCacheInfo object to print. + verbosity (`int`, *optional*): + The verbosity level. Defaults to 0. + + Returns: + `str`: The table as a string. + """ + if verbosity == 0: + return tabulate( + rows=[ + [ + repo.repo_id, + repo.repo_type, + "{:>12}".format(repo.size_on_disk_str), + repo.nb_files, + repo.last_accessed_str, + repo.last_modified_str, + ", ".join(sorted(repo.refs)), + str(repo.repo_path), + ] + for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path) + ], + headers=[ + "REPO ID", + "REPO TYPE", + "SIZE ON DISK", + "NB FILES", + "LAST_ACCESSED", + "LAST_MODIFIED", + "REFS", + "LOCAL PATH", + ], + ) + else: + return tabulate( + rows=[ + [ + repo.repo_id, + repo.repo_type, + revision.commit_hash, + "{:>12}".format(revision.size_on_disk_str), + revision.nb_files, + revision.last_modified_str, + ", ".join(sorted(revision.refs)), + str(revision.snapshot_path), + ] + for repo in sorted(hf_cache_info.repos, key=lambda repo: repo.repo_path) + for revision in sorted(repo.revisions, key=lambda revision: revision.commit_hash) + ], + headers=[ + "REPO ID", + "REPO TYPE", + "REVISION", + "SIZE ON DISK", + "NB FILES", + "LAST_MODIFIED", + "REFS", + "LOCAL PATH", + ], + ) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/tag.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/tag.py new file mode 100644 index 0000000000000000000000000000000000000000..c3beab90a0a2858906c848fd1e3f54edfb9d4864 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/tag.py @@ -0,0 +1,159 @@ +# coding=utf-8 +# Copyright 2024-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Contains commands to perform tag management with the CLI. + +Usage Examples: + - Create a tag: + $ huggingface-cli tag user/my-model 1.0 --message "First release" + $ huggingface-cli tag user/my-model 1.0 -m "First release" --revision develop + $ huggingface-cli tag user/my-dataset 1.0 -m "First release" --repo-type dataset + $ huggingface-cli tag user/my-space 1.0 + - List all tags: + $ huggingface-cli tag -l user/my-model + $ huggingface-cli tag --list user/my-dataset --repo-type dataset + - Delete a tag: + $ huggingface-cli tag -d user/my-model 1.0 + $ huggingface-cli tag --delete user/my-dataset 1.0 --repo-type dataset + $ huggingface-cli tag -d user/my-space 1.0 -y +""" + +from argparse import Namespace, _SubParsersAction + +from requests.exceptions import HTTPError + +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.constants import ( + REPO_TYPES, +) +from huggingface_hub.hf_api import HfApi + +from ..errors import HfHubHTTPError, RepositoryNotFoundError, RevisionNotFoundError +from ._cli_utils import ANSI + + +class TagCommands(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + tag_parser = parser.add_parser("tag", help="(create, list, delete) tags for a repo in the hub") + + tag_parser.add_argument("repo_id", type=str, help="The ID of the repo to tag (e.g. `username/repo-name`).") + tag_parser.add_argument("tag", nargs="?", type=str, help="The name of the tag for creation or deletion.") + tag_parser.add_argument("-m", "--message", type=str, help="The description of the tag to create.") + tag_parser.add_argument("--revision", type=str, help="The git revision to tag.") + tag_parser.add_argument( + "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens." + ) + tag_parser.add_argument( + "--repo-type", + choices=["model", "dataset", "space"], + default="model", + help="Set the type of repository (model, dataset, or space).", + ) + tag_parser.add_argument("-y", "--yes", action="store_true", help="Answer Yes to prompts automatically.") + + tag_parser.add_argument("-l", "--list", action="store_true", help="List tags for a repository.") + tag_parser.add_argument("-d", "--delete", action="store_true", help="Delete a tag for a repository.") + + tag_parser.set_defaults(func=lambda args: handle_commands(args)) + + +def handle_commands(args: Namespace): + if args.list: + return TagListCommand(args) + elif args.delete: + return TagDeleteCommand(args) + else: + return TagCreateCommand(args) + + +class TagCommand: + def __init__(self, args: Namespace): + self.args = args + self.api = HfApi(token=self.args.token) + self.repo_id = self.args.repo_id + self.repo_type = self.args.repo_type + if self.repo_type not in REPO_TYPES: + print("Invalid repo --repo-type") + exit(1) + + +class TagCreateCommand(TagCommand): + def run(self): + print(f"You are about to create tag {ANSI.bold(self.args.tag)} on {self.repo_type} {ANSI.bold(self.repo_id)}") + + try: + self.api.create_tag( + repo_id=self.repo_id, + tag=self.args.tag, + tag_message=self.args.message, + revision=self.args.revision, + repo_type=self.repo_type, + ) + except RepositoryNotFoundError: + print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.") + exit(1) + except RevisionNotFoundError: + print(f"Revision {ANSI.bold(self.args.revision)} not found.") + exit(1) + except HfHubHTTPError as e: + if e.response.status_code == 409: + print(f"Tag {ANSI.bold(self.args.tag)} already exists on {ANSI.bold(self.repo_id)}") + exit(1) + raise e + + print(f"Tag {ANSI.bold(self.args.tag)} created on {ANSI.bold(self.repo_id)}") + + +class TagListCommand(TagCommand): + def run(self): + try: + refs = self.api.list_repo_refs( + repo_id=self.repo_id, + repo_type=self.repo_type, + ) + except RepositoryNotFoundError: + print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.") + exit(1) + except HTTPError as e: + print(e) + print(ANSI.red(e.response.text)) + exit(1) + if len(refs.tags) == 0: + print("No tags found") + exit(0) + print(f"Tags for {self.repo_type} {ANSI.bold(self.repo_id)}:") + for tag in refs.tags: + print(tag.name) + + +class TagDeleteCommand(TagCommand): + def run(self): + print(f"You are about to delete tag {ANSI.bold(self.args.tag)} on {self.repo_type} {ANSI.bold(self.repo_id)}") + + if not self.args.yes: + choice = input("Proceed? [Y/n] ").lower() + if choice not in ("", "y", "yes"): + print("Abort") + exit() + try: + self.api.delete_tag(repo_id=self.repo_id, tag=self.args.tag, repo_type=self.repo_type) + except RepositoryNotFoundError: + print(f"{self.repo_type.capitalize()} {ANSI.bold(self.repo_id)} not found.") + exit(1) + except RevisionNotFoundError: + print(f"Tag {ANSI.bold(self.args.tag)} not found on {ANSI.bold(self.repo_id)}") + exit(1) + print(f"Tag {ANSI.bold(self.args.tag)} deleted on {ANSI.bold(self.repo_id)}") diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/upload.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/upload.py new file mode 100644 index 0000000000000000000000000000000000000000..3d4caebd5fec1872986db3730d1ce87407511d21 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/upload.py @@ -0,0 +1,314 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to upload a repo or file with the CLI. + +Usage: + # Upload file (implicit) + huggingface-cli upload my-cool-model ./my-cool-model.safetensors + + # Upload file (explicit) + huggingface-cli upload my-cool-model ./my-cool-model.safetensors model.safetensors + + # Upload directory (implicit). If `my-cool-model/` is a directory it will be uploaded, otherwise an exception is raised. + huggingface-cli upload my-cool-model + + # Upload directory (explicit) + huggingface-cli upload my-cool-model ./models/my-cool-model . + + # Upload filtered directory (example: tensorboard logs except for the last run) + huggingface-cli upload my-cool-model ./model/training /logs --include "*.tfevents.*" --exclude "*20230905*" + + # Upload with wildcard + huggingface-cli upload my-cool-model "./model/training/*.safetensors" + + # Upload private dataset + huggingface-cli upload Wauplin/my-cool-dataset ./data . --repo-type=dataset --private + + # Upload with token + huggingface-cli upload Wauplin/my-cool-model --token=hf_**** + + # Sync local Space with Hub (upload new files, delete removed files) + huggingface-cli upload Wauplin/space-example --repo-type=space --exclude="/logs/*" --delete="*" --commit-message="Sync local Space with Hub" + + # Schedule commits every 30 minutes + huggingface-cli upload Wauplin/my-cool-model --every=30 +""" + +import os +import time +import warnings +from argparse import Namespace, _SubParsersAction +from typing import List, Optional + +from huggingface_hub import logging +from huggingface_hub._commit_scheduler import CommitScheduler +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.constants import HF_HUB_ENABLE_HF_TRANSFER +from huggingface_hub.errors import RevisionNotFoundError +from huggingface_hub.hf_api import HfApi +from huggingface_hub.utils import disable_progress_bars, enable_progress_bars +from huggingface_hub.utils._runtime import is_xet_available + + +logger = logging.get_logger(__name__) + + +class UploadCommand(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + upload_parser = parser.add_parser("upload", help="Upload a file or a folder to a repo on the Hub") + upload_parser.add_argument( + "repo_id", type=str, help="The ID of the repo to upload to (e.g. `username/repo-name`)." + ) + upload_parser.add_argument( + "local_path", + nargs="?", + help="Local path to the file or folder to upload. Wildcard patterns are supported. Defaults to current directory.", + ) + upload_parser.add_argument( + "path_in_repo", + nargs="?", + help="Path of the file or folder in the repo. Defaults to the relative path of the file or folder.", + ) + upload_parser.add_argument( + "--repo-type", + choices=["model", "dataset", "space"], + default="model", + help="Type of the repo to upload to (e.g. `dataset`).", + ) + upload_parser.add_argument( + "--revision", + type=str, + help=( + "An optional Git revision to push to. It can be a branch name or a PR reference. If revision does not" + " exist and `--create-pr` is not set, a branch will be automatically created." + ), + ) + upload_parser.add_argument( + "--private", + action="store_true", + help=( + "Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already" + " exists." + ), + ) + upload_parser.add_argument("--include", nargs="*", type=str, help="Glob patterns to match files to upload.") + upload_parser.add_argument( + "--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to upload." + ) + upload_parser.add_argument( + "--delete", + nargs="*", + type=str, + help="Glob patterns for file to be deleted from the repo while committing.", + ) + upload_parser.add_argument( + "--commit-message", type=str, help="The summary / title / first line of the generated commit." + ) + upload_parser.add_argument("--commit-description", type=str, help="The description of the generated commit.") + upload_parser.add_argument( + "--create-pr", action="store_true", help="Whether to upload content as a new Pull Request." + ) + upload_parser.add_argument( + "--every", + type=float, + help="If set, a background job is scheduled to create commits every `every` minutes.", + ) + upload_parser.add_argument( + "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens" + ) + upload_parser.add_argument( + "--quiet", + action="store_true", + help="If True, progress bars are disabled and only the path to the uploaded files is printed.", + ) + upload_parser.set_defaults(func=UploadCommand) + + def __init__(self, args: Namespace) -> None: + self.repo_id: str = args.repo_id + self.repo_type: Optional[str] = args.repo_type + self.revision: Optional[str] = args.revision + self.private: bool = args.private + + self.include: Optional[List[str]] = args.include + self.exclude: Optional[List[str]] = args.exclude + self.delete: Optional[List[str]] = args.delete + + self.commit_message: Optional[str] = args.commit_message + self.commit_description: Optional[str] = args.commit_description + self.create_pr: bool = args.create_pr + self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli") + self.quiet: bool = args.quiet # disable warnings and progress bars + + # Check `--every` is valid + if args.every is not None and args.every <= 0: + raise ValueError(f"`every` must be a positive value (got '{args.every}')") + self.every: Optional[float] = args.every + + # Resolve `local_path` and `path_in_repo` + repo_name: str = args.repo_id.split("/")[-1] # e.g. "Wauplin/my-cool-model" => "my-cool-model" + self.local_path: str + self.path_in_repo: str + + if args.local_path is not None and any(c in args.local_path for c in ["*", "?", "["]): + if args.include is not None: + raise ValueError("Cannot set `--include` when passing a `local_path` containing a wildcard.") + if args.path_in_repo is not None and args.path_in_repo != ".": + raise ValueError("Cannot set `path_in_repo` when passing a `local_path` containing a wildcard.") + self.local_path = "." + self.include = args.local_path + self.path_in_repo = "." + elif args.local_path is None and os.path.isfile(repo_name): + # Implicit case 1: user provided only a repo_id which happen to be a local file as well => upload it with same name + self.local_path = repo_name + self.path_in_repo = repo_name + elif args.local_path is None and os.path.isdir(repo_name): + # Implicit case 2: user provided only a repo_id which happen to be a local folder as well => upload it at root + self.local_path = repo_name + self.path_in_repo = "." + elif args.local_path is None: + # Implicit case 3: user provided only a repo_id that does not match a local file or folder + # => the user must explicitly provide a local_path => raise exception + raise ValueError(f"'{repo_name}' is not a local file or folder. Please set `local_path` explicitly.") + elif args.path_in_repo is None and os.path.isfile(args.local_path): + # Explicit local path to file, no path in repo => upload it at root with same name + self.local_path = args.local_path + self.path_in_repo = os.path.basename(args.local_path) + elif args.path_in_repo is None: + # Explicit local path to folder, no path in repo => upload at root + self.local_path = args.local_path + self.path_in_repo = "." + else: + # Finally, if both paths are explicit + self.local_path = args.local_path + self.path_in_repo = args.path_in_repo + + def run(self) -> None: + if self.quiet: + disable_progress_bars() + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + print(self._upload()) + enable_progress_bars() + else: + logging.set_verbosity_info() + print(self._upload()) + logging.set_verbosity_warning() + + def _upload(self) -> str: + if os.path.isfile(self.local_path): + if self.include is not None and len(self.include) > 0: + warnings.warn("Ignoring `--include` since a single file is uploaded.") + if self.exclude is not None and len(self.exclude) > 0: + warnings.warn("Ignoring `--exclude` since a single file is uploaded.") + if self.delete is not None and len(self.delete) > 0: + warnings.warn("Ignoring `--delete` since a single file is uploaded.") + + if not is_xet_available() and not HF_HUB_ENABLE_HF_TRANSFER: + logger.info( + "Consider using `hf_transfer` for faster uploads. This solution comes with some limitations. See" + " https://huggingface.co/docs/huggingface_hub/hf_transfer for more details." + ) + + # Schedule commits if `every` is set + if self.every is not None: + if os.path.isfile(self.local_path): + # If file => watch entire folder + use allow_patterns + folder_path = os.path.dirname(self.local_path) + path_in_repo = ( + self.path_in_repo[: -len(self.local_path)] # remove filename from path_in_repo + if self.path_in_repo.endswith(self.local_path) + else self.path_in_repo + ) + allow_patterns = [self.local_path] + ignore_patterns = [] + else: + folder_path = self.local_path + path_in_repo = self.path_in_repo + allow_patterns = self.include or [] + ignore_patterns = self.exclude or [] + if self.delete is not None and len(self.delete) > 0: + warnings.warn("Ignoring `--delete` when uploading with scheduled commits.") + + scheduler = CommitScheduler( + folder_path=folder_path, + repo_id=self.repo_id, + repo_type=self.repo_type, + revision=self.revision, + allow_patterns=allow_patterns, + ignore_patterns=ignore_patterns, + path_in_repo=path_in_repo, + private=self.private, + every=self.every, + hf_api=self.api, + ) + print(f"Scheduling commits every {self.every} minutes to {scheduler.repo_id}.") + try: # Block main thread until KeyboardInterrupt + while True: + time.sleep(100) + except KeyboardInterrupt: + scheduler.stop() + return "Stopped scheduled commits." + + # Otherwise, create repo and proceed with the upload + if not os.path.isfile(self.local_path) and not os.path.isdir(self.local_path): + raise FileNotFoundError(f"No such file or directory: '{self.local_path}'.") + repo_id = self.api.create_repo( + repo_id=self.repo_id, + repo_type=self.repo_type, + exist_ok=True, + private=self.private, + space_sdk="gradio" if self.repo_type == "space" else None, + # ^ We don't want it to fail when uploading to a Space => let's set Gradio by default. + # ^ I'd rather not add CLI args to set it explicitly as we already have `huggingface-cli repo create` for that. + ).repo_id + + # Check if branch already exists and if not, create it + if self.revision is not None and not self.create_pr: + try: + self.api.repo_info(repo_id=repo_id, repo_type=self.repo_type, revision=self.revision) + except RevisionNotFoundError: + logger.info(f"Branch '{self.revision}' not found. Creating it...") + self.api.create_branch(repo_id=repo_id, repo_type=self.repo_type, branch=self.revision, exist_ok=True) + # ^ `exist_ok=True` to avoid race concurrency issues + + # File-based upload + if os.path.isfile(self.local_path): + return self.api.upload_file( + path_or_fileobj=self.local_path, + path_in_repo=self.path_in_repo, + repo_id=repo_id, + repo_type=self.repo_type, + revision=self.revision, + commit_message=self.commit_message, + commit_description=self.commit_description, + create_pr=self.create_pr, + ) + + # Folder-based upload + else: + return self.api.upload_folder( + folder_path=self.local_path, + path_in_repo=self.path_in_repo, + repo_id=repo_id, + repo_type=self.repo_type, + revision=self.revision, + commit_message=self.commit_message, + commit_description=self.commit_description, + create_pr=self.create_pr, + allow_patterns=self.include, + ignore_patterns=self.exclude, + delete_patterns=self.delete, + ) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/upload_large_folder.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/upload_large_folder.py new file mode 100644 index 0000000000000000000000000000000000000000..61c12a9f62f8e12591d8db4c9defc50dd91db705 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/upload_large_folder.py @@ -0,0 +1,129 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to upload a large folder with the CLI.""" + +import os +from argparse import Namespace, _SubParsersAction +from typing import List, Optional + +from huggingface_hub import logging +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.hf_api import HfApi +from huggingface_hub.utils import disable_progress_bars + +from ._cli_utils import ANSI + + +logger = logging.get_logger(__name__) + + +class UploadLargeFolderCommand(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + subparser = parser.add_parser("upload-large-folder", help="Upload a large folder to a repo on the Hub") + subparser.add_argument( + "repo_id", type=str, help="The ID of the repo to upload to (e.g. `username/repo-name`)." + ) + subparser.add_argument("local_path", type=str, help="Local path to the file or folder to upload.") + subparser.add_argument( + "--repo-type", + choices=["model", "dataset", "space"], + help="Type of the repo to upload to (e.g. `dataset`).", + ) + subparser.add_argument( + "--revision", + type=str, + help=("An optional Git revision to push to. It can be a branch name or a PR reference."), + ) + subparser.add_argument( + "--private", + action="store_true", + help=( + "Whether to create a private repo if repo doesn't exist on the Hub. Ignored if the repo already exists." + ), + ) + subparser.add_argument("--include", nargs="*", type=str, help="Glob patterns to match files to upload.") + subparser.add_argument("--exclude", nargs="*", type=str, help="Glob patterns to exclude from files to upload.") + subparser.add_argument( + "--token", type=str, help="A User Access Token generated from https://huggingface.co/settings/tokens" + ) + subparser.add_argument( + "--num-workers", type=int, help="Number of workers to use to hash, upload and commit files." + ) + subparser.add_argument("--no-report", action="store_true", help="Whether to disable regular status report.") + subparser.add_argument("--no-bars", action="store_true", help="Whether to disable progress bars.") + subparser.set_defaults(func=UploadLargeFolderCommand) + + def __init__(self, args: Namespace) -> None: + self.repo_id: str = args.repo_id + self.local_path: str = args.local_path + self.repo_type: str = args.repo_type + self.revision: Optional[str] = args.revision + self.private: bool = args.private + + self.include: Optional[List[str]] = args.include + self.exclude: Optional[List[str]] = args.exclude + + self.api: HfApi = HfApi(token=args.token, library_name="huggingface-cli") + + self.num_workers: Optional[int] = args.num_workers + self.no_report: bool = args.no_report + self.no_bars: bool = args.no_bars + + if not os.path.isdir(self.local_path): + raise ValueError("Large upload is only supported for folders.") + + def run(self) -> None: + logging.set_verbosity_info() + + print( + ANSI.yellow( + "You are about to upload a large folder to the Hub using `huggingface-cli upload-large-folder`. " + "This is a new feature so feedback is very welcome!\n" + "\n" + "A few things to keep in mind:\n" + " - Repository limits still apply: https://huggingface.co/docs/hub/repositories-recommendations\n" + " - Do not start several processes in parallel.\n" + " - You can interrupt and resume the process at any time. " + "The script will pick up where it left off except for partially uploaded files that would have to be entirely reuploaded.\n" + " - Do not upload the same folder to several repositories. If you need to do so, you must delete the `./.cache/huggingface/` folder first.\n" + "\n" + f"Some temporary metadata will be stored under `{self.local_path}/.cache/huggingface`.\n" + " - You must not modify those files manually.\n" + " - You must not delete the `./.cache/huggingface/` folder while a process is running.\n" + " - You can delete the `./.cache/huggingface/` folder to reinitialize the upload state when process is not running. Files will have to be hashed and preuploaded again, except for already committed files.\n" + "\n" + "If the process output is too verbose, you can disable the progress bars with `--no-bars`. " + "You can also entirely disable the status report with `--no-report`.\n" + "\n" + "For more details, run `huggingface-cli upload-large-folder --help` or check the documentation at " + "https://huggingface.co/docs/huggingface_hub/guides/upload#upload-a-large-folder." + ) + ) + + if self.no_bars: + disable_progress_bars() + + self.api.upload_large_folder( + repo_id=self.repo_id, + folder_path=self.local_path, + repo_type=self.repo_type, + revision=self.revision, + private=self.private, + allow_patterns=self.include, + ignore_patterns=self.exclude, + num_workers=self.num_workers, + print_report=not self.no_report, + ) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/user.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/user.py new file mode 100644 index 0000000000000000000000000000000000000000..c8b943f7613a38551c16cbe5bc961e205a5776af --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/user.py @@ -0,0 +1,198 @@ +# Copyright 2020 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains commands to authenticate to the Hugging Face Hub and interact with your repositories. + +Usage: + # login and save token locally. + huggingface-cli login --token=hf_*** --add-to-git-credential + + # switch between tokens + huggingface-cli auth switch + + # list all tokens + huggingface-cli auth list + + # logout from a specific token, if no token-name is provided, all tokens will be deleted from your machine. + huggingface-cli logout --token-name=your_token_name + + # find out which huggingface.co account you are logged in as + huggingface-cli whoami +""" + +from argparse import _SubParsersAction +from typing import List, Optional + +from requests.exceptions import HTTPError + +from huggingface_hub.commands import BaseHuggingfaceCLICommand +from huggingface_hub.constants import ENDPOINT +from huggingface_hub.hf_api import HfApi + +from .._login import auth_list, auth_switch, login, logout +from ..utils import get_stored_tokens, get_token, logging +from ._cli_utils import ANSI + + +logger = logging.get_logger(__name__) + +try: + from InquirerPy import inquirer + from InquirerPy.base.control import Choice + + _inquirer_py_available = True +except ImportError: + _inquirer_py_available = False + + +class UserCommands(BaseHuggingfaceCLICommand): + @staticmethod + def register_subcommand(parser: _SubParsersAction): + login_parser = parser.add_parser("login", help="Log in using a token from huggingface.co/settings/tokens") + login_parser.add_argument( + "--token", + type=str, + help="Token generated from https://huggingface.co/settings/tokens", + ) + login_parser.add_argument( + "--add-to-git-credential", + action="store_true", + help="Optional: Save token to git credential helper.", + ) + login_parser.set_defaults(func=lambda args: LoginCommand(args)) + whoami_parser = parser.add_parser("whoami", help="Find out which huggingface.co account you are logged in as.") + whoami_parser.set_defaults(func=lambda args: WhoamiCommand(args)) + + logout_parser = parser.add_parser("logout", help="Log out") + logout_parser.add_argument( + "--token-name", + type=str, + help="Optional: Name of the access token to log out from.", + ) + logout_parser.set_defaults(func=lambda args: LogoutCommand(args)) + + auth_parser = parser.add_parser("auth", help="Other authentication related commands") + auth_subparsers = auth_parser.add_subparsers(help="Authentication subcommands") + auth_switch_parser = auth_subparsers.add_parser("switch", help="Switch between access tokens") + auth_switch_parser.add_argument( + "--token-name", + type=str, + help="Optional: Name of the access token to switch to.", + ) + auth_switch_parser.add_argument( + "--add-to-git-credential", + action="store_true", + help="Optional: Save token to git credential helper.", + ) + auth_switch_parser.set_defaults(func=lambda args: AuthSwitchCommand(args)) + auth_list_parser = auth_subparsers.add_parser("list", help="List all stored access tokens") + auth_list_parser.set_defaults(func=lambda args: AuthListCommand(args)) + + +class BaseUserCommand: + def __init__(self, args): + self.args = args + self._api = HfApi() + + +class LoginCommand(BaseUserCommand): + def run(self): + logging.set_verbosity_info() + login( + token=self.args.token, + add_to_git_credential=self.args.add_to_git_credential, + ) + + +class LogoutCommand(BaseUserCommand): + def run(self): + logging.set_verbosity_info() + logout(token_name=self.args.token_name) + + +class AuthSwitchCommand(BaseUserCommand): + def run(self): + logging.set_verbosity_info() + token_name = self.args.token_name + if token_name is None: + token_name = self._select_token_name() + + if token_name is None: + print("No token name provided. Aborting.") + exit() + auth_switch(token_name, add_to_git_credential=self.args.add_to_git_credential) + + def _select_token_name(self) -> Optional[str]: + token_names = list(get_stored_tokens().keys()) + + if not token_names: + logger.error("No stored tokens found. Please login first.") + return None + + if _inquirer_py_available: + return self._select_token_name_tui(token_names) + # if inquirer is not available, use a simpler terminal UI + print("Available stored tokens:") + for i, token_name in enumerate(token_names, 1): + print(f"{i}. {token_name}") + while True: + try: + choice = input("Enter the number of the token to switch to (or 'q' to quit): ") + if choice.lower() == "q": + return None + index = int(choice) - 1 + if 0 <= index < len(token_names): + return token_names[index] + else: + print("Invalid selection. Please try again.") + except ValueError: + print("Invalid input. Please enter a number or 'q' to quit.") + + def _select_token_name_tui(self, token_names: List[str]) -> Optional[str]: + choices = [Choice(token_name, name=token_name) for token_name in token_names] + try: + return inquirer.select( + message="Select a token to switch to:", + choices=choices, + default=None, + ).execute() + except KeyboardInterrupt: + logger.info("Token selection cancelled.") + return None + + +class AuthListCommand(BaseUserCommand): + def run(self): + logging.set_verbosity_info() + auth_list() + + +class WhoamiCommand(BaseUserCommand): + def run(self): + token = get_token() + if token is None: + print("Not logged in") + exit() + try: + info = self._api.whoami(token) + print(info["name"]) + orgs = [org["name"] for org in info["orgs"]] + if orgs: + print(ANSI.bold("orgs: "), ",".join(orgs)) + + if ENDPOINT != "https://huggingface.co": + print(f"Authenticated through private endpoint: {ENDPOINT}") + except HTTPError as e: + print(e) + print(ANSI.red(e.response.text)) + exit(1) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/commands/version.py b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/version.py new file mode 100644 index 0000000000000000000000000000000000000000..f7e866b76f1dcbfbb90a4ec494c47cf3d61c17dd --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/commands/version.py @@ -0,0 +1,37 @@ +# Copyright 2022 The HuggingFace Team. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains command to print information about the version. + +Usage: + huggingface-cli version +""" + +from argparse import _SubParsersAction + +from huggingface_hub import __version__ + +from . import BaseHuggingfaceCLICommand + + +class VersionCommand(BaseHuggingfaceCLICommand): + def __init__(self, args): + self.args = args + + @staticmethod + def register_subcommand(parser: _SubParsersAction): + version_parser = parser.add_parser("version", help="Print information about the huggingface-cli version.") + version_parser.set_defaults(func=VersionCommand) + + def run(self) -> None: + print(f"huggingface_hub version: {__version__}") diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_client.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_client.py new file mode 100644 index 0000000000000000000000000000000000000000..08c8972fa48c147cc68ffc0d2aa4e49745582d15 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_client.py @@ -0,0 +1,3473 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# Related resources: +# https://huggingface.co/tasks +# https://huggingface.co/docs/huggingface.js/inference/README +# https://github.com/huggingface/huggingface.js/tree/main/packages/inference/src +# https://github.com/huggingface/text-generation-inference/tree/main/clients/python +# https://github.com/huggingface/text-generation-inference/blob/main/clients/python/text_generation/client.py +# https://huggingface.slack.com/archives/C03E4DQ9LAJ/p1680169099087869 +# https://github.com/huggingface/unity-api#tasks +# +# Some TODO: +# - add all tasks +# +# NOTE: the philosophy of this client is "let's make it as easy as possible to use it, even if less optimized". Some +# examples of how it translates: +# - Timeout / Server unavailable is handled by the client in a single "timeout" parameter. +# - Files can be provided as bytes, file paths, or URLs and the client will try to "guess" the type. +# - Images are parsed as PIL.Image for easier manipulation. +# - Provides a "recommended model" for each task => suboptimal but user-wise quicker to get a first script running. +# - Only the main parameters are publicly exposed. Power users can always read the docs for more options. +import base64 +import logging +import re +import warnings +from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Literal, Optional, Union, overload + +from requests import HTTPError + +from huggingface_hub import constants +from huggingface_hub.errors import BadRequestError, InferenceTimeoutError +from huggingface_hub.inference._common import ( + TASKS_EXPECTING_IMAGES, + ContentT, + ModelStatus, + RequestParameters, + _b64_encode, + _b64_to_image, + _bytes_to_dict, + _bytes_to_image, + _bytes_to_list, + _get_unsupported_text_generation_kwargs, + _import_numpy, + _open_as_binary, + _set_unsupported_text_generation_kwargs, + _stream_chat_completion_response, + _stream_text_generation_response, + raise_text_generation_error, +) +from huggingface_hub.inference._generated.types import ( + AudioClassificationOutputElement, + AudioClassificationOutputTransform, + AudioToAudioOutputElement, + AutomaticSpeechRecognitionOutput, + ChatCompletionInputGrammarType, + ChatCompletionInputMessage, + ChatCompletionInputStreamOptions, + ChatCompletionInputTool, + ChatCompletionInputToolChoiceClass, + ChatCompletionInputToolChoiceEnum, + ChatCompletionOutput, + ChatCompletionStreamOutput, + DocumentQuestionAnsweringOutputElement, + FillMaskOutputElement, + ImageClassificationOutputElement, + ImageClassificationOutputTransform, + ImageSegmentationOutputElement, + ImageSegmentationSubtask, + ImageToImageTargetSize, + ImageToTextOutput, + ObjectDetectionOutputElement, + Padding, + QuestionAnsweringOutputElement, + SummarizationOutput, + SummarizationTruncationStrategy, + TableQuestionAnsweringOutputElement, + TextClassificationOutputElement, + TextClassificationOutputTransform, + TextGenerationInputGrammarType, + TextGenerationOutput, + TextGenerationStreamOutput, + TextToSpeechEarlyStoppingEnum, + TokenClassificationAggregationStrategy, + TokenClassificationOutputElement, + TranslationOutput, + TranslationTruncationStrategy, + VisualQuestionAnsweringOutputElement, + ZeroShotClassificationOutputElement, + ZeroShotImageClassificationOutputElement, +) +from huggingface_hub.inference._providers import PROVIDER_OR_POLICY_T, get_provider_helper +from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status +from huggingface_hub.utils._auth import get_token +from huggingface_hub.utils._deprecation import _deprecate_method + + +if TYPE_CHECKING: + import numpy as np + from PIL.Image import Image + +logger = logging.getLogger(__name__) + + +MODEL_KWARGS_NOT_USED_REGEX = re.compile(r"The following `model_kwargs` are not used by the model: \[(.*?)\]") + + +class InferenceClient: + """ + Initialize a new Inference Client. + + [`InferenceClient`] aims to provide a unified experience to perform inference. The client can be used + seamlessly with either the (free) Inference API, self-hosted Inference Endpoints, or third-party Inference Providers. + + Args: + model (`str`, `optional`): + The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct` + or a URL to a deployed Inference Endpoint. Defaults to None, in which case a recommended model is + automatically selected for the task. + Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2 + arguments are mutually exclusive. If using `base_url` for chat completion, the `/chat/completions` suffix + path will be appended to the base URL (see the [TGI Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api) + documentation for details). When passing a URL as `model`, the client will not append any suffix path to it. + provider (`str`, *optional*): + Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`. + Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers. + If model is a URL or `base_url` is passed, then `provider` is not used. + token (`str`, *optional*): + Hugging Face token. Will default to the locally saved token if not provided. + Note: for better compatibility with OpenAI's client, `token` has been aliased as `api_key`. Those 2 + arguments are mutually exclusive and have the exact same behavior. + timeout (`float`, `optional`): + The maximum number of seconds to wait for a response from the server. Defaults to None, meaning it will loop until the server is available. + headers (`Dict[str, str]`, `optional`): + Additional headers to send to the server. By default only the authorization and user-agent headers are sent. + Values in this dictionary will override the default values. + bill_to (`str`, `optional`): + The billing account to use for the requests. By default the requests are billed on the user's account. + Requests can only be billed to an organization the user is a member of, and which has subscribed to Enterprise Hub. + cookies (`Dict[str, str]`, `optional`): + Additional cookies to send to the server. + proxies (`Any`, `optional`): + Proxies to use for the request. + base_url (`str`, `optional`): + Base URL to run inference. This is a duplicated argument from `model` to make [`InferenceClient`] + follow the same pattern as `openai.OpenAI` client. Cannot be used if `model` is set. Defaults to None. + api_key (`str`, `optional`): + Token to use for authentication. This is a duplicated argument from `token` to make [`InferenceClient`] + follow the same pattern as `openai.OpenAI` client. Cannot be used if `token` is set. Defaults to None. + """ + + def __init__( + self, + model: Optional[str] = None, + *, + provider: Optional[PROVIDER_OR_POLICY_T] = None, + token: Optional[str] = None, + timeout: Optional[float] = None, + headers: Optional[Dict[str, str]] = None, + cookies: Optional[Dict[str, str]] = None, + proxies: Optional[Any] = None, + bill_to: Optional[str] = None, + # OpenAI compatibility + base_url: Optional[str] = None, + api_key: Optional[str] = None, + ) -> None: + if model is not None and base_url is not None: + raise ValueError( + "Received both `model` and `base_url` arguments. Please provide only one of them." + " `base_url` is an alias for `model` to make the API compatible with OpenAI's client." + " If using `base_url` for chat completion, the `/chat/completions` suffix path will be appended to the base url." + " When passing a URL as `model`, the client will not append any suffix path to it." + ) + if token is not None and api_key is not None: + raise ValueError( + "Received both `token` and `api_key` arguments. Please provide only one of them." + " `api_key` is an alias for `token` to make the API compatible with OpenAI's client." + " It has the exact same behavior as `token`." + ) + token = token if token is not None else api_key + if isinstance(token, bool): + # Legacy behavior: previously is was possible to pass `token=False` to disable authentication. This is not + # supported anymore as authentication is required. Better to explicitly raise here rather than risking + # sending the locally saved token without the user knowing about it. + if token is False: + raise ValueError( + "Cannot use `token=False` to disable authentication as authentication is required to run Inference." + ) + warnings.warn( + "Using `token=True` to automatically use the locally saved token is deprecated and will be removed in a future release. " + "Please use `token=None` instead (default).", + DeprecationWarning, + ) + token = get_token() + + self.model: Optional[str] = base_url or model + self.token: Optional[str] = token + + self.headers = {**headers} if headers is not None else {} + if bill_to is not None: + if ( + constants.HUGGINGFACE_HEADER_X_BILL_TO in self.headers + and self.headers[constants.HUGGINGFACE_HEADER_X_BILL_TO] != bill_to + ): + warnings.warn( + f"Overriding existing '{self.headers[constants.HUGGINGFACE_HEADER_X_BILL_TO]}' value in headers with '{bill_to}'.", + UserWarning, + ) + self.headers[constants.HUGGINGFACE_HEADER_X_BILL_TO] = bill_to + + if token is not None and not token.startswith("hf_"): + warnings.warn( + "You've provided an external provider's API key, so requests will be billed directly by the provider. " + "The `bill_to` parameter is only applicable for Hugging Face billing and will be ignored.", + UserWarning, + ) + + # Configure provider + self.provider = provider + + self.cookies = cookies + self.timeout = timeout + self.proxies = proxies + + def __repr__(self): + return f"" + + @overload + def _inner_post( # type: ignore[misc] + self, request_parameters: RequestParameters, *, stream: Literal[False] = ... + ) -> bytes: ... + + @overload + def _inner_post( # type: ignore[misc] + self, request_parameters: RequestParameters, *, stream: Literal[True] = ... + ) -> Iterable[bytes]: ... + + @overload + def _inner_post( + self, request_parameters: RequestParameters, *, stream: bool = False + ) -> Union[bytes, Iterable[bytes]]: ... + + def _inner_post( + self, request_parameters: RequestParameters, *, stream: bool = False + ) -> Union[bytes, Iterable[bytes]]: + """Make a request to the inference server.""" + # TODO: this should be handled in provider helpers directly + if request_parameters.task in TASKS_EXPECTING_IMAGES and "Accept" not in request_parameters.headers: + request_parameters.headers["Accept"] = "image/png" + + with _open_as_binary(request_parameters.data) as data_as_binary: + try: + response = get_session().post( + request_parameters.url, + json=request_parameters.json, + data=data_as_binary, + headers=request_parameters.headers, + cookies=self.cookies, + timeout=self.timeout, + stream=stream, + proxies=self.proxies, + ) + except TimeoutError as error: + # Convert any `TimeoutError` to a `InferenceTimeoutError` + raise InferenceTimeoutError(f"Inference call timed out: {request_parameters.url}") from error # type: ignore + + try: + hf_raise_for_status(response) + return response.iter_lines() if stream else response.content + except HTTPError as error: + if error.response.status_code == 422 and request_parameters.task != "unknown": + msg = str(error.args[0]) + if len(error.response.text) > 0: + msg += f"\n{error.response.text}\n" + error.args = (msg,) + error.args[1:] + raise + + def audio_classification( + self, + audio: ContentT, + *, + model: Optional[str] = None, + top_k: Optional[int] = None, + function_to_apply: Optional["AudioClassificationOutputTransform"] = None, + ) -> List[AudioClassificationOutputElement]: + """ + Perform audio classification on the provided audio content. + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The audio content to classify. It can be raw audio bytes, a local audio file, or a URL pointing to an + audio file. + model (`str`, *optional*): + The model to use for audio classification. Can be a model ID hosted on the Hugging Face Hub + or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for + audio classification will be used. + top_k (`int`, *optional*): + When specified, limits the output to the top K most probable classes. + function_to_apply (`"AudioClassificationOutputTransform"`, *optional*): + The function to apply to the model outputs in order to retrieve the scores. + + Returns: + `List[AudioClassificationOutputElement]`: List of [`AudioClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.audio_classification("audio.flac") + [ + AudioClassificationOutputElement(score=0.4976358711719513, label='hap'), + AudioClassificationOutputElement(score=0.3677836060523987, label='neu'), + ... + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="audio-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=audio, + parameters={"function_to_apply": function_to_apply, "top_k": top_k}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return AudioClassificationOutputElement.parse_obj_as_list(response) + + def audio_to_audio( + self, + audio: ContentT, + *, + model: Optional[str] = None, + ) -> List[AudioToAudioOutputElement]: + """ + Performs multiple tasks related to audio-to-audio depending on the model (eg: speech enhancement, source separation). + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The audio content for the model. It can be raw audio bytes, a local audio file, or a URL pointing to an + audio file. + model (`str`, *optional*): + The model can be any model which takes an audio file and returns another audio file. Can be a model ID hosted on the Hugging Face Hub + or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for + audio_to_audio will be used. + + Returns: + `List[AudioToAudioOutputElement]`: A list of [`AudioToAudioOutputElement`] items containing audios label, content-type, and audio content in blob. + + Raises: + `InferenceTimeoutError`: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> audio_output = client.audio_to_audio("audio.flac") + >>> for i, item in enumerate(audio_output): + >>> with open(f"output_{i}.flac", "wb") as f: + f.write(item.blob) + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="audio-to-audio", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=audio, + parameters={}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + audio_output = AudioToAudioOutputElement.parse_obj_as_list(response) + for item in audio_output: + item.blob = base64.b64decode(item.blob) + return audio_output + + def automatic_speech_recognition( + self, + audio: ContentT, + *, + model: Optional[str] = None, + extra_body: Optional[Dict] = None, + ) -> AutomaticSpeechRecognitionOutput: + """ + Perform automatic speech recognition (ASR or audio-to-text) on the given audio content. + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The content to transcribe. It can be raw audio bytes, local audio file, or a URL to an audio file. + model (`str`, *optional*): + The model to use for ASR. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended model for ASR will be used. + extra_body (`Dict`, *optional*): + Additional provider-specific parameters to pass to the model. Refer to the provider's documentation + for supported parameters. + Returns: + [`AutomaticSpeechRecognitionOutput`]: An item containing the transcribed text and optionally the timestamp chunks. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.automatic_speech_recognition("hello_world.flac").text + "hello world" + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="automatic-speech-recognition", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=audio, + parameters={**(extra_body or {})}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return AutomaticSpeechRecognitionOutput.parse_obj_as_instance(response) + + @overload + def chat_completion( # type: ignore + self, + messages: List[Union[Dict, ChatCompletionInputMessage]], + *, + model: Optional[str] = None, + stream: Literal[False] = False, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + response_format: Optional[ChatCompletionInputGrammarType] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stream_options: Optional[ChatCompletionInputStreamOptions] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + extra_body: Optional[Dict] = None, + ) -> ChatCompletionOutput: ... + + @overload + def chat_completion( # type: ignore + self, + messages: List[Union[Dict, ChatCompletionInputMessage]], + *, + model: Optional[str] = None, + stream: Literal[True] = True, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + response_format: Optional[ChatCompletionInputGrammarType] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stream_options: Optional[ChatCompletionInputStreamOptions] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + extra_body: Optional[Dict] = None, + ) -> Iterable[ChatCompletionStreamOutput]: ... + + @overload + def chat_completion( + self, + messages: List[Union[Dict, ChatCompletionInputMessage]], + *, + model: Optional[str] = None, + stream: bool = False, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + response_format: Optional[ChatCompletionInputGrammarType] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stream_options: Optional[ChatCompletionInputStreamOptions] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + extra_body: Optional[Dict] = None, + ) -> Union[ChatCompletionOutput, Iterable[ChatCompletionStreamOutput]]: ... + + def chat_completion( + self, + messages: List[Union[Dict, ChatCompletionInputMessage]], + *, + model: Optional[str] = None, + stream: bool = False, + # Parameters from ChatCompletionInput (handled manually) + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + response_format: Optional[ChatCompletionInputGrammarType] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stream_options: Optional[ChatCompletionInputStreamOptions] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + extra_body: Optional[Dict] = None, + ) -> Union[ChatCompletionOutput, Iterable[ChatCompletionStreamOutput]]: + """ + A method for completing conversations using a specified language model. + + + + The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client. + Inputs and outputs are strictly the same and using either syntax will yield the same results. + Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility) + for more details about OpenAI's compatibility. + + + + + You can pass provider-specific parameters to the model by using the `extra_body` argument. + + + Args: + messages (List of [`ChatCompletionInputMessage`]): + Conversation history consisting of roles and content pairs. + model (`str`, *optional*): + The model to use for chat-completion. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended model for chat-based text-generation will be used. + See https://huggingface.co/tasks/text-generation for more details. + If `model` is a model ID, it is passed to the server as the `model` parameter. If you want to define a + custom URL while setting `model` in the request payload, you must set `base_url` when initializing [`InferenceClient`]. + frequency_penalty (`float`, *optional*): + Penalizes new tokens based on their existing frequency + in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0. + logit_bias (`List[float]`, *optional*): + Adjusts the likelihood of specific tokens appearing in the generated output. + logprobs (`bool`, *optional*): + Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each output token returned in the content of message. + max_tokens (`int`, *optional*): + Maximum number of tokens allowed in the response. Defaults to 100. + n (`int`, *optional*): + The number of completions to generate for each prompt. + presence_penalty (`float`, *optional*): + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the + text so far, increasing the model's likelihood to talk about new topics. + response_format ([`ChatCompletionInputGrammarType`], *optional*): + Grammar constraints. Can be either a JSONSchema or a regex. + seed (Optional[`int`], *optional*): + Seed for reproducible control flow. Defaults to None. + stop (`List[str]`, *optional*): + Up to four strings which trigger the end of the response. + Defaults to None. + stream (`bool`, *optional*): + Enable realtime streaming of responses. Defaults to False. + stream_options ([`ChatCompletionInputStreamOptions`], *optional*): + Options for streaming completions. + temperature (`float`, *optional*): + Controls randomness of the generations. Lower values ensure + less random completions. Range: [0, 2]. Defaults to 1.0. + top_logprobs (`int`, *optional*): + An integer between 0 and 5 specifying the number of most likely tokens to return at each token + position, each with an associated log probability. logprobs must be set to true if this parameter is + used. + top_p (`float`, *optional*): + Fraction of the most likely next words to sample from. + Must be between 0 and 1. Defaults to 1.0. + tool_choice ([`ChatCompletionInputToolChoiceClass`] or [`ChatCompletionInputToolChoiceEnum`], *optional*): + The tool to use for the completion. Defaults to "auto". + tool_prompt (`str`, *optional*): + A prompt to be appended before the tools. + tools (List of [`ChatCompletionInputTool`], *optional*): + A list of tools the model may call. Currently, only functions are supported as a tool. Use this to + provide a list of functions the model may generate JSON inputs for. + extra_body (`Dict`, *optional*): + Additional provider-specific parameters to pass to the model. Refer to the provider's documentation + for supported parameters. + Returns: + [`ChatCompletionOutput`] or Iterable of [`ChatCompletionStreamOutput`]: + Generated text returned from the server: + - if `stream=False`, the generated text is returned as a [`ChatCompletionOutput`] (default). + - if `stream=True`, the generated text is returned token by token as a sequence of [`ChatCompletionStreamOutput`]. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + + ```py + >>> from huggingface_hub import InferenceClient + >>> messages = [{"role": "user", "content": "What is the capital of France?"}] + >>> client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct") + >>> client.chat_completion(messages, max_tokens=100) + ChatCompletionOutput( + choices=[ + ChatCompletionOutputComplete( + finish_reason='eos_token', + index=0, + message=ChatCompletionOutputMessage( + role='assistant', + content='The capital of France is Paris.', + name=None, + tool_calls=None + ), + logprobs=None + ) + ], + created=1719907176, + id='', + model='meta-llama/Meta-Llama-3-8B-Instruct', + object='text_completion', + system_fingerprint='2.0.4-sha-f426a33', + usage=ChatCompletionOutputUsage( + completion_tokens=8, + prompt_tokens=17, + total_tokens=25 + ) + ) + ``` + + Example using streaming: + ```py + >>> from huggingface_hub import InferenceClient + >>> messages = [{"role": "user", "content": "What is the capital of France?"}] + >>> client = InferenceClient("meta-llama/Meta-Llama-3-8B-Instruct") + >>> for token in client.chat_completion(messages, max_tokens=10, stream=True): + ... print(token) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content='The', role='assistant'), index=0, finish_reason=None)], created=1710498504) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' capital', role='assistant'), index=0, finish_reason=None)], created=1710498504) + (...) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' may', role='assistant'), index=0, finish_reason=None)], created=1710498504) + ``` + + Example using OpenAI's syntax: + ```py + # instead of `from openai import OpenAI` + from huggingface_hub import InferenceClient + + # instead of `client = OpenAI(...)` + client = InferenceClient( + base_url=..., + api_key=..., + ) + + output = client.chat.completions.create( + model="meta-llama/Meta-Llama-3-8B-Instruct", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Count to 10"}, + ], + stream=True, + max_tokens=1024, + ) + + for chunk in output: + print(chunk.choices[0].delta.content) + ``` + + Example using a third-party provider directly with extra (provider-specific) parameters. Usage will be billed on your Together AI account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="together", # Use Together AI provider + ... api_key="", # Pass your Together API key directly + ... ) + >>> client.chat_completion( + ... model="meta-llama/Meta-Llama-3-8B-Instruct", + ... messages=[{"role": "user", "content": "What is the capital of France?"}], + ... extra_body={"safety_model": "Meta-Llama/Llama-Guard-7b"}, + ... ) + ``` + + Example using a third-party provider through Hugging Face Routing. Usage will be billed on your Hugging Face account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="sambanova", # Use Sambanova provider + ... api_key="hf_...", # Pass your HF token + ... ) + >>> client.chat_completion( + ... model="meta-llama/Meta-Llama-3-8B-Instruct", + ... messages=[{"role": "user", "content": "What is the capital of France?"}], + ... ) + ``` + + Example using Image + Text as input: + ```py + >>> from huggingface_hub import InferenceClient + + # provide a remote URL + >>> image_url ="https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" + # or a base64-encoded image + >>> image_path = "/path/to/image.jpeg" + >>> with open(image_path, "rb") as f: + ... base64_image = base64.b64encode(f.read()).decode("utf-8") + >>> image_url = f"data:image/jpeg;base64,{base64_image}" + + >>> client = InferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct") + >>> output = client.chat.completions.create( + ... messages=[ + ... { + ... "role": "user", + ... "content": [ + ... { + ... "type": "image_url", + ... "image_url": {"url": image_url}, + ... }, + ... { + ... "type": "text", + ... "text": "Describe this image in one sentence.", + ... }, + ... ], + ... }, + ... ], + ... ) + >>> output + The image depicts the iconic Statue of Liberty situated in New York Harbor, New York, on a clear day. + ``` + + Example using tools: + ```py + >>> client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct") + >>> messages = [ + ... { + ... "role": "system", + ... "content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.", + ... }, + ... { + ... "role": "user", + ... "content": "What's the weather like the next 3 days in San Francisco, CA?", + ... }, + ... ] + >>> tools = [ + ... { + ... "type": "function", + ... "function": { + ... "name": "get_current_weather", + ... "description": "Get the current weather", + ... "parameters": { + ... "type": "object", + ... "properties": { + ... "location": { + ... "type": "string", + ... "description": "The city and state, e.g. San Francisco, CA", + ... }, + ... "format": { + ... "type": "string", + ... "enum": ["celsius", "fahrenheit"], + ... "description": "The temperature unit to use. Infer this from the users location.", + ... }, + ... }, + ... "required": ["location", "format"], + ... }, + ... }, + ... }, + ... { + ... "type": "function", + ... "function": { + ... "name": "get_n_day_weather_forecast", + ... "description": "Get an N-day weather forecast", + ... "parameters": { + ... "type": "object", + ... "properties": { + ... "location": { + ... "type": "string", + ... "description": "The city and state, e.g. San Francisco, CA", + ... }, + ... "format": { + ... "type": "string", + ... "enum": ["celsius", "fahrenheit"], + ... "description": "The temperature unit to use. Infer this from the users location.", + ... }, + ... "num_days": { + ... "type": "integer", + ... "description": "The number of days to forecast", + ... }, + ... }, + ... "required": ["location", "format", "num_days"], + ... }, + ... }, + ... }, + ... ] + + >>> response = client.chat_completion( + ... model="meta-llama/Meta-Llama-3-70B-Instruct", + ... messages=messages, + ... tools=tools, + ... tool_choice="auto", + ... max_tokens=500, + ... ) + >>> response.choices[0].message.tool_calls[0].function + ChatCompletionOutputFunctionDefinition( + arguments={ + 'location': 'San Francisco, CA', + 'format': 'fahrenheit', + 'num_days': 3 + }, + name='get_n_day_weather_forecast', + description=None + ) + ``` + + Example using response_format: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct") + >>> messages = [ + ... { + ... "role": "user", + ... "content": "I saw a puppy a cat and a raccoon during my bike ride in the park. What did I saw and when?", + ... }, + ... ] + >>> response_format = { + ... "type": "json", + ... "value": { + ... "properties": { + ... "location": {"type": "string"}, + ... "activity": {"type": "string"}, + ... "animals_seen": {"type": "integer", "minimum": 1, "maximum": 5}, + ... "animals": {"type": "array", "items": {"type": "string"}}, + ... }, + ... "required": ["location", "activity", "animals_seen", "animals"], + ... }, + ... } + >>> response = client.chat_completion( + ... messages=messages, + ... response_format=response_format, + ... max_tokens=500, + ... ) + >>> response.choices[0].message.content + '{\n\n"activity": "bike ride",\n"animals": ["puppy", "cat", "raccoon"],\n"animals_seen": 3,\n"location": "park"}' + ``` + """ + # Since `chat_completion(..., model=xxx)` is also a payload parameter for the server, we need to handle 'model' differently. + # `self.model` takes precedence over 'model' argument for building URL. + # `model` takes precedence for payload value. + model_id_or_url = self.model or model + payload_model = model or self.model + + # Get the provider helper + provider_helper = get_provider_helper( + self.provider, + task="conversational", + model=model_id_or_url + if model_id_or_url is not None and model_id_or_url.startswith(("http://", "https://")) + else payload_model, + ) + + # Prepare the payload + parameters = { + "model": payload_model, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_tokens": max_tokens, + "n": n, + "presence_penalty": presence_penalty, + "response_format": response_format, + "seed": seed, + "stop": stop, + "temperature": temperature, + "tool_choice": tool_choice, + "tool_prompt": tool_prompt, + "tools": tools, + "top_logprobs": top_logprobs, + "top_p": top_p, + "stream": stream, + "stream_options": stream_options, + **(extra_body or {}), + } + request_parameters = provider_helper.prepare_request( + inputs=messages, + parameters=parameters, + headers=self.headers, + model=model_id_or_url, + api_key=self.token, + ) + data = self._inner_post(request_parameters, stream=stream) + + if stream: + return _stream_chat_completion_response(data) # type: ignore[arg-type] + + return ChatCompletionOutput.parse_obj_as_instance(data) # type: ignore[arg-type] + + def document_question_answering( + self, + image: ContentT, + question: str, + *, + model: Optional[str] = None, + doc_stride: Optional[int] = None, + handle_impossible_answer: Optional[bool] = None, + lang: Optional[str] = None, + max_answer_len: Optional[int] = None, + max_question_len: Optional[int] = None, + max_seq_len: Optional[int] = None, + top_k: Optional[int] = None, + word_boxes: Optional[List[Union[List[float], str]]] = None, + ) -> List[DocumentQuestionAnsweringOutputElement]: + """ + Answer questions on document images. + + Args: + image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`): + The input image for the context. It can be raw bytes, an image file, a URL to an online image, or a PIL Image. + question (`str`): + Question to be answered. + model (`str`, *optional*): + The model to use for the document question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended document question answering model will be used. + Defaults to None. + doc_stride (`int`, *optional*): + If the words in the document are too long to fit with the question for the model, it will be split in + several chunks with some overlap. This argument controls the size of that overlap. + handle_impossible_answer (`bool`, *optional*): + Whether to accept impossible as an answer + lang (`str`, *optional*): + Language to use while running OCR. Defaults to english. + max_answer_len (`int`, *optional*): + The maximum length of predicted answers (e.g., only answers with a shorter length are considered). + max_question_len (`int`, *optional*): + The maximum length of the question after tokenization. It will be truncated if needed. + max_seq_len (`int`, *optional*): + The maximum length of the total sentence (context + question) in tokens of each chunk passed to the + model. The context will be split in several chunks (using doc_stride as overlap) if needed. + top_k (`int`, *optional*): + The number of answers to return (will be chosen by order of likelihood). Can return less than top_k + answers if there are not enough options available within the context. + word_boxes (`List[Union[List[float], str`, *optional*): + A list of words and bounding boxes (normalized 0->1000). If provided, the inference will skip the OCR + step and use the provided bounding boxes instead. + Returns: + `List[DocumentQuestionAnsweringOutputElement]`: a list of [`DocumentQuestionAnsweringOutputElement`] items containing the predicted label, associated probability, word ids, and page number. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.document_question_answering(image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", question="What is the invoice number?") + [DocumentQuestionAnsweringOutputElement(answer='us-001', end=16, score=0.9999666213989258, start=16)] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="document-question-answering", model=model_id) + inputs: Dict[str, Any] = {"question": question, "image": _b64_encode(image)} + request_parameters = provider_helper.prepare_request( + inputs=inputs, + parameters={ + "doc_stride": doc_stride, + "handle_impossible_answer": handle_impossible_answer, + "lang": lang, + "max_answer_len": max_answer_len, + "max_question_len": max_question_len, + "max_seq_len": max_seq_len, + "top_k": top_k, + "word_boxes": word_boxes, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return DocumentQuestionAnsweringOutputElement.parse_obj_as_list(response) + + def feature_extraction( + self, + text: str, + *, + normalize: Optional[bool] = None, + prompt_name: Optional[str] = None, + truncate: Optional[bool] = None, + truncation_direction: Optional[Literal["Left", "Right"]] = None, + model: Optional[str] = None, + ) -> "np.ndarray": + """ + Generate embeddings for a given text. + + Args: + text (`str`): + The text to embed. + model (`str`, *optional*): + The model to use for the feature extraction task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended feature extraction model will be used. + Defaults to None. + normalize (`bool`, *optional*): + Whether to normalize the embeddings or not. + Only available on server powered by Text-Embedding-Inference. + prompt_name (`str`, *optional*): + The name of the prompt that should be used by for encoding. If not set, no prompt will be applied. + Must be a key in the `Sentence Transformers` configuration `prompts` dictionary. + For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",...}, + then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?" + because the prompt text will be prepended before any text to encode. + truncate (`bool`, *optional*): + Whether to truncate the embeddings or not. + Only available on server powered by Text-Embedding-Inference. + truncation_direction (`Literal["Left", "Right"]`, *optional*): + Which side of the input should be truncated when `truncate=True` is passed. + + Returns: + `np.ndarray`: The embedding representing the input text as a float32 numpy array. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.feature_extraction("Hi, who are you?") + array([[ 2.424802 , 2.93384 , 1.1750331 , ..., 1.240499, -0.13776633, -0.7889173 ], + [-0.42943227, -0.6364878 , -1.693462 , ..., 0.41978157, -2.4336355 , 0.6162071 ], + ..., + [ 0.28552425, -0.928395 , -1.2077185 , ..., 0.76810825, -2.1069427 , 0.6236161 ]], dtype=float32) + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="feature-extraction", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "normalize": normalize, + "prompt_name": prompt_name, + "truncate": truncate, + "truncation_direction": truncation_direction, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + np = _import_numpy() + return np.array(provider_helper.get_response(response), dtype="float32") + + def fill_mask( + self, + text: str, + *, + model: Optional[str] = None, + targets: Optional[List[str]] = None, + top_k: Optional[int] = None, + ) -> List[FillMaskOutputElement]: + """ + Fill in a hole with a missing word (token to be precise). + + Args: + text (`str`): + a string to be filled from, must contain the [MASK] token (check model card for exact name of the mask). + model (`str`, *optional*): + The model to use for the fill mask task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended fill mask model will be used. + targets (`List[str`, *optional*): + When passed, the model will limit the scores to the passed targets instead of looking up in the whole + vocabulary. If the provided targets are not in the model vocab, they will be tokenized and the first + resulting token will be used (with a warning, and that might be slower). + top_k (`int`, *optional*): + When passed, overrides the number of predictions to return. + Returns: + `List[FillMaskOutputElement]`: a list of [`FillMaskOutputElement`] items containing the predicted label, associated + probability, token reference, and completed text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.fill_mask("The goal of life is .") + [ + FillMaskOutputElement(score=0.06897063553333282, token=11098, token_str=' happiness', sequence='The goal of life is happiness.'), + FillMaskOutputElement(score=0.06554922461509705, token=45075, token_str=' immortality', sequence='The goal of life is immortality.') + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="fill-mask", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={"targets": targets, "top_k": top_k}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return FillMaskOutputElement.parse_obj_as_list(response) + + def image_classification( + self, + image: ContentT, + *, + model: Optional[str] = None, + function_to_apply: Optional["ImageClassificationOutputTransform"] = None, + top_k: Optional[int] = None, + ) -> List[ImageClassificationOutputElement]: + """ + Perform image classification on the given image using the specified model. + + Args: + image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`): + The image to classify. It can be raw bytes, an image file, a URL to an online image, or a PIL Image. + model (`str`, *optional*): + The model to use for image classification. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for image classification will be used. + function_to_apply (`"ImageClassificationOutputTransform"`, *optional*): + The function to apply to the model outputs in order to retrieve the scores. + top_k (`int`, *optional*): + When specified, limits the output to the top K most probable classes. + Returns: + `List[ImageClassificationOutputElement]`: a list of [`ImageClassificationOutputElement`] items containing the predicted label and associated probability. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.image_classification("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg") + [ImageClassificationOutputElement(label='Blenheim spaniel', score=0.9779096841812134), ...] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="image-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={"function_to_apply": function_to_apply, "top_k": top_k}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return ImageClassificationOutputElement.parse_obj_as_list(response) + + def image_segmentation( + self, + image: ContentT, + *, + model: Optional[str] = None, + mask_threshold: Optional[float] = None, + overlap_mask_area_threshold: Optional[float] = None, + subtask: Optional["ImageSegmentationSubtask"] = None, + threshold: Optional[float] = None, + ) -> List[ImageSegmentationOutputElement]: + """ + Perform image segmentation on the given image using the specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`): + The image to segment. It can be raw bytes, an image file, a URL to an online image, or a PIL Image. + model (`str`, *optional*): + The model to use for image segmentation. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for image segmentation will be used. + mask_threshold (`float`, *optional*): + Threshold to use when turning the predicted masks into binary values. + overlap_mask_area_threshold (`float`, *optional*): + Mask overlap threshold to eliminate small, disconnected segments. + subtask (`"ImageSegmentationSubtask"`, *optional*): + Segmentation task to be performed, depending on model capabilities. + threshold (`float`, *optional*): + Probability threshold to filter out predicted masks. + Returns: + `List[ImageSegmentationOutputElement]`: A list of [`ImageSegmentationOutputElement`] items containing the segmented masks and associated attributes. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.image_segmentation("cat.jpg") + [ImageSegmentationOutputElement(score=0.989008, label='LABEL_184', mask=), ...] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="image-segmentation", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={ + "mask_threshold": mask_threshold, + "overlap_mask_area_threshold": overlap_mask_area_threshold, + "subtask": subtask, + "threshold": threshold, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + output = ImageSegmentationOutputElement.parse_obj_as_list(response) + for item in output: + item.mask = _b64_to_image(item.mask) # type: ignore [assignment] + return output + + def image_to_image( + self, + image: ContentT, + prompt: Optional[str] = None, + *, + negative_prompt: Optional[str] = None, + num_inference_steps: Optional[int] = None, + guidance_scale: Optional[float] = None, + model: Optional[str] = None, + target_size: Optional[ImageToImageTargetSize] = None, + **kwargs, + ) -> "Image": + """ + Perform image-to-image translation using a specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`): + The input image for translation. It can be raw bytes, an image file, a URL to an online image, or a PIL Image. + prompt (`str`, *optional*): + The text prompt to guide the image generation. + negative_prompt (`str`, *optional*): + One prompt to guide what NOT to include in image generation. + num_inference_steps (`int`, *optional*): + For diffusion models. The number of denoising steps. More denoising steps usually lead to a higher + quality image at the expense of slower inference. + guidance_scale (`float`, *optional*): + For diffusion models. A higher guidance scale value encourages the model to generate images closely + linked to the text prompt at the expense of lower image quality. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + target_size (`ImageToImageTargetSize`, *optional*): + The size in pixel of the output image. + + Returns: + `Image`: The translated image. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> image = client.image_to_image("cat.jpg", prompt="turn the cat into a tiger") + >>> image.save("tiger.jpg") + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="image-to-image", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={ + "prompt": prompt, + "negative_prompt": negative_prompt, + "target_size": target_size, + "num_inference_steps": num_inference_steps, + "guidance_scale": guidance_scale, + **kwargs, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return _bytes_to_image(response) + + def image_to_text(self, image: ContentT, *, model: Optional[str] = None) -> ImageToTextOutput: + """ + Takes an input image and return text. + + Models can have very different outputs depending on your use case (image captioning, optical character recognition + (OCR), Pix2Struct, etc). Please have a look to the model card to learn more about a model's specificities. + + Args: + image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`): + The input image to caption. It can be raw bytes, an image file, a URL to an online image, or a PIL Image. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + [`ImageToTextOutput`]: The generated text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.image_to_text("cat.jpg") + 'a cat standing in a grassy field ' + >>> client.image_to_text("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg") + 'a dog laying on the grass next to a flower pot ' + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="image-to-text", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + output = ImageToTextOutput.parse_obj(response) + return output[0] if isinstance(output, list) else output + + def object_detection( + self, image: ContentT, *, model: Optional[str] = None, threshold: Optional[float] = None + ) -> List[ObjectDetectionOutputElement]: + """ + Perform object detection on the given image using the specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`): + The image to detect objects on. It can be raw bytes, an image file, a URL to an online image, or a PIL Image. + model (`str`, *optional*): + The model to use for object detection. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for object detection (DETR) will be used. + threshold (`float`, *optional*): + The probability necessary to make a prediction. + Returns: + `List[ObjectDetectionOutputElement]`: A list of [`ObjectDetectionOutputElement`] items containing the bounding boxes and associated attributes. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + `ValueError`: + If the request output is not a List. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.object_detection("people.jpg") + [ObjectDetectionOutputElement(score=0.9486683011054993, label='person', box=ObjectDetectionBoundingBox(xmin=59, ymin=39, xmax=420, ymax=510)), ...] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="object-detection", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={"threshold": threshold}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return ObjectDetectionOutputElement.parse_obj_as_list(response) + + def question_answering( + self, + question: str, + context: str, + *, + model: Optional[str] = None, + align_to_words: Optional[bool] = None, + doc_stride: Optional[int] = None, + handle_impossible_answer: Optional[bool] = None, + max_answer_len: Optional[int] = None, + max_question_len: Optional[int] = None, + max_seq_len: Optional[int] = None, + top_k: Optional[int] = None, + ) -> Union[QuestionAnsweringOutputElement, List[QuestionAnsweringOutputElement]]: + """ + Retrieve the answer to a question from a given text. + + Args: + question (`str`): + Question to be answered. + context (`str`): + The context of the question. + model (`str`): + The model to use for the question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. + align_to_words (`bool`, *optional*): + Attempts to align the answer to real words. Improves quality on space separated languages. Might hurt + on non-space-separated languages (like Japanese or Chinese) + doc_stride (`int`, *optional*): + If the context is too long to fit with the question for the model, it will be split in several chunks + with some overlap. This argument controls the size of that overlap. + handle_impossible_answer (`bool`, *optional*): + Whether to accept impossible as an answer. + max_answer_len (`int`, *optional*): + The maximum length of predicted answers (e.g., only answers with a shorter length are considered). + max_question_len (`int`, *optional*): + The maximum length of the question after tokenization. It will be truncated if needed. + max_seq_len (`int`, *optional*): + The maximum length of the total sentence (context + question) in tokens of each chunk passed to the + model. The context will be split in several chunks (using docStride as overlap) if needed. + top_k (`int`, *optional*): + The number of answers to return (will be chosen by order of likelihood). Note that we return less than + topk answers if there are not enough options available within the context. + + Returns: + Union[`QuestionAnsweringOutputElement`, List[`QuestionAnsweringOutputElement`]]: + When top_k is 1 or not provided, it returns a single `QuestionAnsweringOutputElement`. + When top_k is greater than 1, it returns a list of `QuestionAnsweringOutputElement`. + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.") + QuestionAnsweringOutputElement(answer='Clara', end=16, score=0.9326565265655518, start=11) + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="question-answering", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs={"question": question, "context": context}, + parameters={ + "align_to_words": align_to_words, + "doc_stride": doc_stride, + "handle_impossible_answer": handle_impossible_answer, + "max_answer_len": max_answer_len, + "max_question_len": max_question_len, + "max_seq_len": max_seq_len, + "top_k": top_k, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + # Parse the response as a single `QuestionAnsweringOutputElement` when top_k is 1 or not provided, or a list of `QuestionAnsweringOutputElement` to ensure backward compatibility. + output = QuestionAnsweringOutputElement.parse_obj(response) + return output + + def sentence_similarity( + self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None + ) -> List[float]: + """ + Compute the semantic similarity between a sentence and a list of other sentences by comparing their embeddings. + + Args: + sentence (`str`): + The main sentence to compare to others. + other_sentences (`List[str]`): + The list of sentences to compare to. + model (`str`, *optional*): + The model to use for the sentence similarity task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended sentence similarity model will be used. + Defaults to None. + + Returns: + `List[float]`: The embedding representing the input text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.sentence_similarity( + ... "Machine learning is so easy.", + ... other_sentences=[ + ... "Deep learning is so straightforward.", + ... "This is so difficult, like rocket science.", + ... "I can't believe how much I struggled with this.", + ... ], + ... ) + [0.7785726189613342, 0.45876261591911316, 0.2906220555305481] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="sentence-similarity", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs={"source_sentence": sentence, "sentences": other_sentences}, + parameters={}, + extra_payload={}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return _bytes_to_list(response) + + def summarization( + self, + text: str, + *, + model: Optional[str] = None, + clean_up_tokenization_spaces: Optional[bool] = None, + generate_parameters: Optional[Dict[str, Any]] = None, + truncation: Optional["SummarizationTruncationStrategy"] = None, + ) -> SummarizationOutput: + """ + Generate a summary of a given text using a specified model. + + Args: + text (`str`): + The input text to summarize. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended model for summarization will be used. + clean_up_tokenization_spaces (`bool`, *optional*): + Whether to clean up the potential extra spaces in the text output. + generate_parameters (`Dict[str, Any]`, *optional*): + Additional parametrization of the text generation algorithm. + truncation (`"SummarizationTruncationStrategy"`, *optional*): + The truncation strategy to use. + Returns: + [`SummarizationOutput`]: The generated summary text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.summarization("The Eiffel tower...") + SummarizationOutput(generated_text="The Eiffel tower is one of the most famous landmarks in the world....") + ``` + """ + parameters = { + "clean_up_tokenization_spaces": clean_up_tokenization_spaces, + "generate_parameters": generate_parameters, + "truncation": truncation, + } + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="summarization", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters=parameters, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return SummarizationOutput.parse_obj_as_list(response)[0] + + def table_question_answering( + self, + table: Dict[str, Any], + query: str, + *, + model: Optional[str] = None, + padding: Optional["Padding"] = None, + sequential: Optional[bool] = None, + truncation: Optional[bool] = None, + ) -> TableQuestionAnsweringOutputElement: + """ + Retrieve the answer to a question from information given in a table. + + Args: + table (`str`): + A table of data represented as a dict of lists where entries are headers and the lists are all the + values, all lists must have the same size. + query (`str`): + The query in plain text that you want to ask the table. + model (`str`): + The model to use for the table-question-answering task. Can be a model ID hosted on the Hugging Face + Hub or a URL to a deployed Inference Endpoint. + padding (`"Padding"`, *optional*): + Activates and controls padding. + sequential (`bool`, *optional*): + Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the + inference to be done sequentially to extract relations within sequences, given their conversational + nature. + truncation (`bool`, *optional*): + Activates and controls truncation. + + Returns: + [`TableQuestionAnsweringOutputElement`]: a table question answering output containing the answer, coordinates, cells and the aggregator used. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> query = "How many stars does the transformers repository have?" + >>> table = {"Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"]} + >>> client.table_question_answering(table, query, model="google/tapas-base-finetuned-wtq") + TableQuestionAnsweringOutputElement(answer='36542', coordinates=[[0, 1]], cells=['36542'], aggregator='AVERAGE') + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="table-question-answering", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs={"query": query, "table": table}, + parameters={"model": model, "padding": padding, "sequential": sequential, "truncation": truncation}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return TableQuestionAnsweringOutputElement.parse_obj_as_instance(response) + + def tabular_classification(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[str]: + """ + Classifying a target category (a group) based on a set of attributes. + + Args: + table (`Dict[str, Any]`): + Set of attributes to classify. + model (`str`, *optional*): + The model to use for the tabular classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended tabular classification model will be used. + Defaults to None. + + Returns: + `List`: a list of labels, one per row in the initial table. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> table = { + ... "fixed_acidity": ["7.4", "7.8", "10.3"], + ... "volatile_acidity": ["0.7", "0.88", "0.32"], + ... "citric_acid": ["0", "0", "0.45"], + ... "residual_sugar": ["1.9", "2.6", "6.4"], + ... "chlorides": ["0.076", "0.098", "0.073"], + ... "free_sulfur_dioxide": ["11", "25", "5"], + ... "total_sulfur_dioxide": ["34", "67", "13"], + ... "density": ["0.9978", "0.9968", "0.9976"], + ... "pH": ["3.51", "3.2", "3.23"], + ... "sulphates": ["0.56", "0.68", "0.82"], + ... "alcohol": ["9.4", "9.8", "12.6"], + ... } + >>> client.tabular_classification(table=table, model="julien-c/wine-quality") + ["5", "5", "5"] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="tabular-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=None, + extra_payload={"table": table}, + parameters={}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return _bytes_to_list(response) + + def tabular_regression(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[float]: + """ + Predicting a numerical target value given a set of attributes/features in a table. + + Args: + table (`Dict[str, Any]`): + Set of attributes stored in a table. The attributes used to predict the target can be both numerical and categorical. + model (`str`, *optional*): + The model to use for the tabular regression task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended tabular regression model will be used. + Defaults to None. + + Returns: + `List`: a list of predicted numerical target values. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> table = { + ... "Height": ["11.52", "12.48", "12.3778"], + ... "Length1": ["23.2", "24", "23.9"], + ... "Length2": ["25.4", "26.3", "26.5"], + ... "Length3": ["30", "31.2", "31.1"], + ... "Species": ["Bream", "Bream", "Bream"], + ... "Width": ["4.02", "4.3056", "4.6961"], + ... } + >>> client.tabular_regression(table, model="scikit-learn/Fish-Weight") + [110, 120, 130] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="tabular-regression", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=None, + parameters={}, + extra_payload={"table": table}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return _bytes_to_list(response) + + def text_classification( + self, + text: str, + *, + model: Optional[str] = None, + top_k: Optional[int] = None, + function_to_apply: Optional["TextClassificationOutputTransform"] = None, + ) -> List[TextClassificationOutputElement]: + """ + Perform text classification (e.g. sentiment-analysis) on the given text. + + Args: + text (`str`): + A string to be classified. + model (`str`, *optional*): + The model to use for the text classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended text classification model will be used. + Defaults to None. + top_k (`int`, *optional*): + When specified, limits the output to the top K most probable classes. + function_to_apply (`"TextClassificationOutputTransform"`, *optional*): + The function to apply to the model outputs in order to retrieve the scores. + + Returns: + `List[TextClassificationOutputElement]`: a list of [`TextClassificationOutputElement`] items containing the predicted label and associated probability. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.text_classification("I like you") + [ + TextClassificationOutputElement(label='POSITIVE', score=0.9998695850372314), + TextClassificationOutputElement(label='NEGATIVE', score=0.0001304351753788069), + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="text-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "function_to_apply": function_to_apply, + "top_k": top_k, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return TextClassificationOutputElement.parse_obj_as_list(response)[0] # type: ignore [return-value] + + @overload + def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[False] = ..., + stream: Literal[False] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> str: ... + + @overload + def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: Literal[False] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> TextGenerationOutput: ... + + @overload + def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[False] = ..., + stream: Literal[True] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> Iterable[str]: ... + + @overload + def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: Literal[True] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> Iterable[TextGenerationStreamOutput]: ... + + @overload + def text_generation( + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: bool = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> Union[TextGenerationOutput, Iterable[TextGenerationStreamOutput]]: ... + + def text_generation( + self, + prompt: str, + *, + details: bool = False, + stream: bool = False, + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> Union[str, TextGenerationOutput, Iterable[str], Iterable[TextGenerationStreamOutput]]: + """ + Given a prompt, generate the following text. + + + + If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method. + It accepts a list of messages instead of a single text prompt and handles the chat templating for you. + + + + Args: + prompt (`str`): + Input text. + details (`bool`, *optional*): + By default, text_generation returns a string. Pass `details=True` if you want a detailed output (tokens, + probabilities, seed, finish reason, etc.). Only available for models running on with the + `text-generation-inference` backend. + stream (`bool`, *optional*): + By default, text_generation returns the full generated text. Pass `stream=True` if you want a stream of + tokens to be returned. Only available for models running on with the `text-generation-inference` + backend. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + adapter_id (`str`, *optional*): + Lora adapter id. + best_of (`int`, *optional*): + Generate best_of sequences and return the one if the highest token logprobs. + decoder_input_details (`bool`, *optional*): + Return the decoder input token logprobs and ids. You must set `details=True` as well for it to be taken + into account. Defaults to `False`. + do_sample (`bool`, *optional*): + Activate logits sampling + frequency_penalty (`float`, *optional*): + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in + the text so far, decreasing the model's likelihood to repeat the same line verbatim. + grammar ([`TextGenerationInputGrammarType`], *optional*): + Grammar constraints. Can be either a JSONSchema or a regex. + max_new_tokens (`int`, *optional*): + Maximum number of generated tokens. Defaults to 100. + repetition_penalty (`float`, *optional*): + The parameter for repetition penalty. 1.0 means no penalty. See [this + paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. + return_full_text (`bool`, *optional*): + Whether to prepend the prompt to the generated text + seed (`int`, *optional*): + Random sampling seed + stop (`List[str]`, *optional*): + Stop generating tokens if a member of `stop` is generated. + stop_sequences (`List[str]`, *optional*): + Deprecated argument. Use `stop` instead. + temperature (`float`, *optional*): + The value used to module the logits distribution. + top_n_tokens (`int`, *optional*): + Return information about the `top_n_tokens` most likely tokens at each generation step, instead of + just the sampled token. + top_k (`int`, *optional`): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + top_p (`float`, *optional`): + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. + truncate (`int`, *optional`): + Truncate inputs tokens to the given size. + typical_p (`float`, *optional`): + Typical Decoding mass + See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information + watermark (`bool`, *optional`): + Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) + + Returns: + `Union[str, TextGenerationOutput, Iterable[str], Iterable[TextGenerationStreamOutput]]`: + Generated text returned from the server: + - if `stream=False` and `details=False`, the generated text is returned as a `str` (default) + - if `stream=True` and `details=False`, the generated text is returned token by token as a `Iterable[str]` + - if `stream=False` and `details=True`, the generated text is returned with more details as a [`~huggingface_hub.TextGenerationOutput`] + - if `details=True` and `stream=True`, the generated text is returned token by token as a iterable of [`~huggingface_hub.TextGenerationStreamOutput`] + + Raises: + `ValidationError`: + If input values are not valid. No HTTP call is made to the server. + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + + # Case 1: generate text + >>> client.text_generation("The huggingface_hub library is ", max_new_tokens=12) + '100% open source and built to be easy to use.' + + # Case 2: iterate over the generated tokens. Useful for large generation. + >>> for token in client.text_generation("The huggingface_hub library is ", max_new_tokens=12, stream=True): + ... print(token) + 100 + % + open + source + and + built + to + be + easy + to + use + . + + # Case 3: get more details about the generation process. + >>> client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True) + TextGenerationOutput( + generated_text='100% open source and built to be easy to use.', + details=TextGenerationDetails( + finish_reason='length', + generated_tokens=12, + seed=None, + prefill=[ + TextGenerationPrefillOutputToken(id=487, text='The', logprob=None), + TextGenerationPrefillOutputToken(id=53789, text=' hugging', logprob=-13.171875), + (...) + TextGenerationPrefillOutputToken(id=204, text=' ', logprob=-7.0390625) + ], + tokens=[ + TokenElement(id=1425, text='100', logprob=-1.0175781, special=False), + TokenElement(id=16, text='%', logprob=-0.0463562, special=False), + (...) + TokenElement(id=25, text='.', logprob=-0.5703125, special=False) + ], + best_of_sequences=None + ) + ) + + # Case 4: iterate over the generated tokens with more details. + # Last object is more complete, containing the full generated text and the finish reason. + >>> for details in client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True, stream=True): + ... print(details) + ... + TextGenerationStreamOutput(token=TokenElement(id=1425, text='100', logprob=-1.0175781, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=16, text='%', logprob=-0.0463562, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=1314, text=' open', logprob=-1.3359375, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=3178, text=' source', logprob=-0.28100586, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=273, text=' and', logprob=-0.5961914, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=3426, text=' built', logprob=-1.9423828, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-1.4121094, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=314, text=' be', logprob=-1.5224609, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=1833, text=' easy', logprob=-2.1132812, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-0.08520508, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=745, text=' use', logprob=-0.39453125, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement( + id=25, + text='.', + logprob=-0.5703125, + special=False), + generated_text='100% open source and built to be easy to use.', + details=TextGenerationStreamOutputStreamDetails(finish_reason='length', generated_tokens=12, seed=None) + ) + + # Case 5: generate constrained output using grammar + >>> response = client.text_generation( + ... prompt="I saw a puppy a cat and a raccoon during my bike ride in the park", + ... model="HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1", + ... max_new_tokens=100, + ... repetition_penalty=1.3, + ... grammar={ + ... "type": "json", + ... "value": { + ... "properties": { + ... "location": {"type": "string"}, + ... "activity": {"type": "string"}, + ... "animals_seen": {"type": "integer", "minimum": 1, "maximum": 5}, + ... "animals": {"type": "array", "items": {"type": "string"}}, + ... }, + ... "required": ["location", "activity", "animals_seen", "animals"], + ... }, + ... }, + ... ) + >>> json.loads(response) + { + "activity": "bike riding", + "animals": ["puppy", "cat", "raccoon"], + "animals_seen": 3, + "location": "park" + } + ``` + """ + if decoder_input_details and not details: + warnings.warn( + "`decoder_input_details=True` has been passed to the server but `details=False` is set meaning that" + " the output from the server will be truncated." + ) + decoder_input_details = False + + if stop_sequences is not None: + warnings.warn( + "`stop_sequences` is a deprecated argument for `text_generation` task" + " and will be removed in version '0.28.0'. Use `stop` instead.", + FutureWarning, + ) + if stop is None: + stop = stop_sequences # use deprecated arg if provided + + # Build payload + parameters = { + "adapter_id": adapter_id, + "best_of": best_of, + "decoder_input_details": decoder_input_details, + "details": details, + "do_sample": do_sample, + "frequency_penalty": frequency_penalty, + "grammar": grammar, + "max_new_tokens": max_new_tokens, + "repetition_penalty": repetition_penalty, + "return_full_text": return_full_text, + "seed": seed, + "stop": stop if stop is not None else [], + "temperature": temperature, + "top_k": top_k, + "top_n_tokens": top_n_tokens, + "top_p": top_p, + "truncate": truncate, + "typical_p": typical_p, + "watermark": watermark, + } + + # Remove some parameters if not a TGI server + unsupported_kwargs = _get_unsupported_text_generation_kwargs(model) + if len(unsupported_kwargs) > 0: + # The server does not support some parameters + # => means it is not a TGI server + # => remove unsupported parameters and warn the user + + ignored_parameters = [] + for key in unsupported_kwargs: + if parameters.get(key): + ignored_parameters.append(key) + parameters.pop(key, None) + if len(ignored_parameters) > 0: + warnings.warn( + "API endpoint/model for text-generation is not served via TGI. Ignoring following parameters:" + f" {', '.join(ignored_parameters)}.", + UserWarning, + ) + if details: + warnings.warn( + "API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will" + " be ignored meaning only the generated text will be returned.", + UserWarning, + ) + details = False + if stream: + raise ValueError( + "API endpoint/model for text-generation is not served via TGI. Cannot return output as a stream." + " Please pass `stream=False` as input." + ) + + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="text-generation", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=prompt, + parameters=parameters, + extra_payload={"stream": stream}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + + # Handle errors separately for more precise error messages + try: + bytes_output = self._inner_post(request_parameters, stream=stream) + except HTTPError as e: + match = MODEL_KWARGS_NOT_USED_REGEX.search(str(e)) + if isinstance(e, BadRequestError) and match: + unused_params = [kwarg.strip("' ") for kwarg in match.group(1).split(",")] + _set_unsupported_text_generation_kwargs(model, unused_params) + return self.text_generation( # type: ignore + prompt=prompt, + details=details, + stream=stream, + model=model_id, + adapter_id=adapter_id, + best_of=best_of, + decoder_input_details=decoder_input_details, + do_sample=do_sample, + frequency_penalty=frequency_penalty, + grammar=grammar, + max_new_tokens=max_new_tokens, + repetition_penalty=repetition_penalty, + return_full_text=return_full_text, + seed=seed, + stop=stop, + temperature=temperature, + top_k=top_k, + top_n_tokens=top_n_tokens, + top_p=top_p, + truncate=truncate, + typical_p=typical_p, + watermark=watermark, + ) + raise_text_generation_error(e) + + # Parse output + if stream: + return _stream_text_generation_response(bytes_output, details) # type: ignore + + data = _bytes_to_dict(bytes_output) # type: ignore[arg-type] + + # Data can be a single element (dict) or an iterable of dicts where we select the first element of. + if isinstance(data, list): + data = data[0] + response = provider_helper.get_response(data, request_parameters) + return TextGenerationOutput.parse_obj_as_instance(response) if details else response["generated_text"] + + def text_to_image( + self, + prompt: str, + *, + negative_prompt: Optional[str] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: Optional[int] = None, + guidance_scale: Optional[float] = None, + model: Optional[str] = None, + scheduler: Optional[str] = None, + seed: Optional[int] = None, + extra_body: Optional[Dict[str, Any]] = None, + ) -> "Image": + """ + Generate an image based on a given text using a specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + + You can pass provider-specific parameters to the model by using the `extra_body` argument. + + + Args: + prompt (`str`): + The prompt to generate an image from. + negative_prompt (`str`, *optional*): + One prompt to guide what NOT to include in image generation. + height (`int`, *optional*): + The height in pixels of the output image + width (`int`, *optional*): + The width in pixels of the output image + num_inference_steps (`int`, *optional*): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*): + A higher guidance scale value encourages the model to generate images closely linked to the text + prompt, but values too high may cause saturation and other artifacts. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended text-to-image model will be used. + Defaults to None. + scheduler (`str`, *optional*): + Override the scheduler with a compatible one. + seed (`int`, *optional*): + Seed for the random number generator. + extra_body (`Dict[str, Any]`, *optional*): + Additional provider-specific parameters to pass to the model. Refer to the provider's documentation + for supported parameters. + + Returns: + `Image`: The generated image. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + + >>> image = client.text_to_image("An astronaut riding a horse on the moon.") + >>> image.save("astronaut.png") + + >>> image = client.text_to_image( + ... "An astronaut riding a horse on the moon.", + ... negative_prompt="low resolution, blurry", + ... model="stabilityai/stable-diffusion-2-1", + ... ) + >>> image.save("better_astronaut.png") + ``` + Example using a third-party provider directly. Usage will be billed on your fal.ai account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="fal-ai", # Use fal.ai provider + ... api_key="fal-ai-api-key", # Pass your fal.ai API key + ... ) + >>> image = client.text_to_image( + ... "A majestic lion in a fantasy forest", + ... model="black-forest-labs/FLUX.1-schnell", + ... ) + >>> image.save("lion.png") + ``` + + Example using a third-party provider through Hugging Face Routing. Usage will be billed on your Hugging Face account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", # Use replicate provider + ... api_key="hf_...", # Pass your HF token + ... ) + >>> image = client.text_to_image( + ... "An astronaut riding a horse on the moon.", + ... model="black-forest-labs/FLUX.1-dev", + ... ) + >>> image.save("astronaut.png") + ``` + + Example using Replicate provider with extra parameters + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", # Use replicate provider + ... api_key="hf_...", # Pass your HF token + ... ) + >>> image = client.text_to_image( + ... "An astronaut riding a horse on the moon.", + ... model="black-forest-labs/FLUX.1-schnell", + ... extra_body={"output_quality": 100}, + ... ) + >>> image.save("astronaut.png") + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="text-to-image", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=prompt, + parameters={ + "negative_prompt": negative_prompt, + "height": height, + "width": width, + "num_inference_steps": num_inference_steps, + "guidance_scale": guidance_scale, + "scheduler": scheduler, + "seed": seed, + **(extra_body or {}), + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + response = provider_helper.get_response(response) + return _bytes_to_image(response) + + def text_to_video( + self, + prompt: str, + *, + model: Optional[str] = None, + guidance_scale: Optional[float] = None, + negative_prompt: Optional[List[str]] = None, + num_frames: Optional[float] = None, + num_inference_steps: Optional[int] = None, + seed: Optional[int] = None, + extra_body: Optional[Dict[str, Any]] = None, + ) -> bytes: + """ + Generate a video based on a given text. + + + You can pass provider-specific parameters to the model by using the `extra_body` argument. + + + Args: + prompt (`str`): + The prompt to generate a video from. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended text-to-video model will be used. + Defaults to None. + guidance_scale (`float`, *optional*): + A higher guidance scale value encourages the model to generate videos closely linked to the text + prompt, but values too high may cause saturation and other artifacts. + negative_prompt (`List[str]`, *optional*): + One or several prompt to guide what NOT to include in video generation. + num_frames (`float`, *optional*): + The num_frames parameter determines how many video frames are generated. + num_inference_steps (`int`, *optional*): + The number of denoising steps. More denoising steps usually lead to a higher quality video at the + expense of slower inference. + seed (`int`, *optional*): + Seed for the random number generator. + extra_body (`Dict[str, Any]`, *optional*): + Additional provider-specific parameters to pass to the model. Refer to the provider's documentation + for supported parameters. + + Returns: + `bytes`: The generated video. + + Example: + + Example using a third-party provider directly. Usage will be billed on your fal.ai account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="fal-ai", # Using fal.ai provider + ... api_key="fal-ai-api-key", # Pass your fal.ai API key + ... ) + >>> video = client.text_to_video( + ... "A majestic lion running in a fantasy forest", + ... model="tencent/HunyuanVideo", + ... ) + >>> with open("lion.mp4", "wb") as file: + ... file.write(video) + ``` + + Example using a third-party provider through Hugging Face Routing. Usage will be billed on your Hugging Face account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", # Using replicate provider + ... api_key="hf_...", # Pass your HF token + ... ) + >>> video = client.text_to_video( + ... "A cat running in a park", + ... model="genmo/mochi-1-preview", + ... ) + >>> with open("cat.mp4", "wb") as file: + ... file.write(video) + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="text-to-video", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=prompt, + parameters={ + "guidance_scale": guidance_scale, + "negative_prompt": negative_prompt, + "num_frames": num_frames, + "num_inference_steps": num_inference_steps, + "seed": seed, + **(extra_body or {}), + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + response = provider_helper.get_response(response, request_parameters) + return response + + def text_to_speech( + self, + text: str, + *, + model: Optional[str] = None, + do_sample: Optional[bool] = None, + early_stopping: Optional[Union[bool, "TextToSpeechEarlyStoppingEnum"]] = None, + epsilon_cutoff: Optional[float] = None, + eta_cutoff: Optional[float] = None, + max_length: Optional[int] = None, + max_new_tokens: Optional[int] = None, + min_length: Optional[int] = None, + min_new_tokens: Optional[int] = None, + num_beam_groups: Optional[int] = None, + num_beams: Optional[int] = None, + penalty_alpha: Optional[float] = None, + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + typical_p: Optional[float] = None, + use_cache: Optional[bool] = None, + extra_body: Optional[Dict[str, Any]] = None, + ) -> bytes: + """ + Synthesize an audio of a voice pronouncing a given text. + + + You can pass provider-specific parameters to the model by using the `extra_body` argument. + + + Args: + text (`str`): + The text to synthesize. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended text-to-speech model will be used. + Defaults to None. + do_sample (`bool`, *optional*): + Whether to use sampling instead of greedy decoding when generating new tokens. + early_stopping (`Union[bool, "TextToSpeechEarlyStoppingEnum"]`, *optional*): + Controls the stopping condition for beam-based methods. + epsilon_cutoff (`float`, *optional*): + If set to float strictly between 0 and 1, only tokens with a conditional probability greater than + epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on + the size of the model. See [Truncation Sampling as Language Model + Desmoothing](https://hf.co/papers/2210.15191) for more details. + eta_cutoff (`float`, *optional*): + Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly + between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff) + * exp(-entropy(softmax(next_token_logits))). The latter term is intuitively the expected next token + probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3, + depending on the size of the model. See [Truncation Sampling as Language Model + Desmoothing](https://hf.co/papers/2210.15191) for more details. + max_length (`int`, *optional*): + The maximum length (in tokens) of the generated text, including the input. + max_new_tokens (`int`, *optional*): + The maximum number of tokens to generate. Takes precedence over max_length. + min_length (`int`, *optional*): + The minimum length (in tokens) of the generated text, including the input. + min_new_tokens (`int`, *optional*): + The minimum number of tokens to generate. Takes precedence over min_length. + num_beam_groups (`int`, *optional*): + Number of groups to divide num_beams into in order to ensure diversity among different groups of beams. + See [this paper](https://hf.co/papers/1610.02424) for more details. + num_beams (`int`, *optional*): + Number of beams to use for beam search. + penalty_alpha (`float`, *optional*): + The value balances the model confidence and the degeneration penalty in contrastive search decoding. + temperature (`float`, *optional*): + The value used to modulate the next token probabilities. + top_k (`int`, *optional*): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + top_p (`float`, *optional*): + If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to + top_p or higher are kept for generation. + typical_p (`float`, *optional*): + Local typicality measures how similar the conditional probability of predicting a target token next is + to the expected conditional probability of predicting a random token next, given the partial text + already generated. If set to float < 1, the smallest set of the most locally typical tokens with + probabilities that add up to typical_p or higher are kept for generation. See [this + paper](https://hf.co/papers/2202.00666) for more details. + use_cache (`bool`, *optional*): + Whether the model should use the past last key/values attentions to speed up decoding + extra_body (`Dict[str, Any]`, *optional*): + Additional provider-specific parameters to pass to the model. Refer to the provider's documentation + for supported parameters. + Returns: + `bytes`: The generated audio. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from pathlib import Path + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + + >>> audio = client.text_to_speech("Hello world") + >>> Path("hello_world.flac").write_bytes(audio) + ``` + + Example using a third-party provider directly. Usage will be billed on your Replicate account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", + ... api_key="your-replicate-api-key", # Pass your Replicate API key directly + ... ) + >>> audio = client.text_to_speech( + ... text="Hello world", + ... model="OuteAI/OuteTTS-0.3-500M", + ... ) + >>> Path("hello_world.flac").write_bytes(audio) + ``` + + Example using a third-party provider through Hugging Face Routing. Usage will be billed on your Hugging Face account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", + ... api_key="hf_...", # Pass your HF token + ... ) + >>> audio =client.text_to_speech( + ... text="Hello world", + ... model="OuteAI/OuteTTS-0.3-500M", + ... ) + >>> Path("hello_world.flac").write_bytes(audio) + ``` + Example using Replicate provider with extra parameters + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", # Use replicate provider + ... api_key="hf_...", # Pass your HF token + ... ) + >>> audio = client.text_to_speech( + ... "Hello, my name is Kororo, an awesome text-to-speech model.", + ... model="hexgrad/Kokoro-82M", + ... extra_body={"voice": "af_nicole"}, + ... ) + >>> Path("hello.flac").write_bytes(audio) + ``` + + Example music-gen using "YuE-s1-7B-anneal-en-cot" on fal.ai + ```py + >>> from huggingface_hub import InferenceClient + >>> lyrics = ''' + ... [verse] + ... In the town where I was born + ... Lived a man who sailed to sea + ... And he told us of his life + ... In the land of submarines + ... So we sailed on to the sun + ... 'Til we found a sea of green + ... And we lived beneath the waves + ... In our yellow submarine + + ... [chorus] + ... We all live in a yellow submarine + ... Yellow submarine, yellow submarine + ... We all live in a yellow submarine + ... Yellow submarine, yellow submarine + ... ''' + >>> genres = "pavarotti-style tenor voice" + >>> client = InferenceClient( + ... provider="fal-ai", + ... model="m-a-p/YuE-s1-7B-anneal-en-cot", + ... api_key=..., + ... ) + >>> audio = client.text_to_speech(lyrics, extra_body={"genres": genres}) + >>> with open("output.mp3", "wb") as f: + ... f.write(audio) + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="text-to-speech", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "do_sample": do_sample, + "early_stopping": early_stopping, + "epsilon_cutoff": epsilon_cutoff, + "eta_cutoff": eta_cutoff, + "max_length": max_length, + "max_new_tokens": max_new_tokens, + "min_length": min_length, + "min_new_tokens": min_new_tokens, + "num_beam_groups": num_beam_groups, + "num_beams": num_beams, + "penalty_alpha": penalty_alpha, + "temperature": temperature, + "top_k": top_k, + "top_p": top_p, + "typical_p": typical_p, + "use_cache": use_cache, + **(extra_body or {}), + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + response = provider_helper.get_response(response) + return response + + def token_classification( + self, + text: str, + *, + model: Optional[str] = None, + aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None, + ignore_labels: Optional[List[str]] = None, + stride: Optional[int] = None, + ) -> List[TokenClassificationOutputElement]: + """ + Perform token classification on the given text. + Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. + + Args: + text (`str`): + A string to be classified. + model (`str`, *optional*): + The model to use for the token classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended token classification model will be used. + Defaults to None. + aggregation_strategy (`"TokenClassificationAggregationStrategy"`, *optional*): + The strategy used to fuse tokens based on model predictions + ignore_labels (`List[str`, *optional*): + A list of labels to ignore + stride (`int`, *optional*): + The number of overlapping tokens between chunks when splitting the input text. + + Returns: + `List[TokenClassificationOutputElement]`: List of [`TokenClassificationOutputElement`] items containing the entity group, confidence score, word, start and end index. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.token_classification("My name is Sarah Jessica Parker but you can call me Jessica") + [ + TokenClassificationOutputElement( + entity_group='PER', + score=0.9971321225166321, + word='Sarah Jessica Parker', + start=11, + end=31, + ), + TokenClassificationOutputElement( + entity_group='PER', + score=0.9773476123809814, + word='Jessica', + start=52, + end=59, + ) + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="token-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "aggregation_strategy": aggregation_strategy, + "ignore_labels": ignore_labels, + "stride": stride, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return TokenClassificationOutputElement.parse_obj_as_list(response) + + def translation( + self, + text: str, + *, + model: Optional[str] = None, + src_lang: Optional[str] = None, + tgt_lang: Optional[str] = None, + clean_up_tokenization_spaces: Optional[bool] = None, + truncation: Optional["TranslationTruncationStrategy"] = None, + generate_parameters: Optional[Dict[str, Any]] = None, + ) -> TranslationOutput: + """ + Convert text from one language to another. + + Check out https://huggingface.co/tasks/translation for more information on how to choose the best model for + your specific use case. Source and target languages usually depend on the model. + However, it is possible to specify source and target languages for certain models. If you are working with one of these models, + you can use `src_lang` and `tgt_lang` arguments to pass the relevant information. + + Args: + text (`str`): + A string to be translated. + model (`str`, *optional*): + The model to use for the translation task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended translation model will be used. + Defaults to None. + src_lang (`str`, *optional*): + The source language of the text. Required for models that can translate from multiple languages. + tgt_lang (`str`, *optional*): + Target language to translate to. Required for models that can translate to multiple languages. + clean_up_tokenization_spaces (`bool`, *optional*): + Whether to clean up the potential extra spaces in the text output. + truncation (`"TranslationTruncationStrategy"`, *optional*): + The truncation strategy to use. + generate_parameters (`Dict[str, Any]`, *optional*): + Additional parametrization of the text generation algorithm. + + Returns: + [`TranslationOutput`]: The generated translated text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + `ValueError`: + If only one of the `src_lang` and `tgt_lang` arguments are provided. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.translation("My name is Wolfgang and I live in Berlin") + 'Mein Name ist Wolfgang und ich lebe in Berlin.' + >>> client.translation("My name is Wolfgang and I live in Berlin", model="Helsinki-NLP/opus-mt-en-fr") + TranslationOutput(translation_text='Je m'appelle Wolfgang et je vis à Berlin.') + ``` + + Specifying languages: + ```py + >>> client.translation("My name is Sarah Jessica Parker but you can call me Jessica", model="facebook/mbart-large-50-many-to-many-mmt", src_lang="en_XX", tgt_lang="fr_XX") + "Mon nom est Sarah Jessica Parker mais vous pouvez m'appeler Jessica" + ``` + """ + # Throw error if only one of `src_lang` and `tgt_lang` was given + if src_lang is not None and tgt_lang is None: + raise ValueError("You cannot specify `src_lang` without specifying `tgt_lang`.") + + if src_lang is None and tgt_lang is not None: + raise ValueError("You cannot specify `tgt_lang` without specifying `src_lang`.") + + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="translation", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "src_lang": src_lang, + "tgt_lang": tgt_lang, + "clean_up_tokenization_spaces": clean_up_tokenization_spaces, + "truncation": truncation, + "generate_parameters": generate_parameters, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return TranslationOutput.parse_obj_as_list(response)[0] + + def visual_question_answering( + self, + image: ContentT, + question: str, + *, + model: Optional[str] = None, + top_k: Optional[int] = None, + ) -> List[VisualQuestionAnsweringOutputElement]: + """ + Answering open-ended questions based on an image. + + Args: + image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`): + The input image for the context. It can be raw bytes, an image file, a URL to an online image, or a PIL Image. + question (`str`): + Question to be answered. + model (`str`, *optional*): + The model to use for the visual question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended visual question answering model will be used. + Defaults to None. + top_k (`int`, *optional*): + The number of answers to return (will be chosen by order of likelihood). Note that we return less than + topk answers if there are not enough options available within the context. + Returns: + `List[VisualQuestionAnsweringOutputElement]`: a list of [`VisualQuestionAnsweringOutputElement`] items containing the predicted label and associated probability. + + Raises: + `InferenceTimeoutError`: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.visual_question_answering( + ... image="https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg", + ... question="What is the animal doing?" + ... ) + [ + VisualQuestionAnsweringOutputElement(score=0.778609573841095, answer='laying down'), + VisualQuestionAnsweringOutputElement(score=0.6957435607910156, answer='sitting'), + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="visual-question-answering", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={"top_k": top_k}, + headers=self.headers, + model=model_id, + api_key=self.token, + extra_payload={"question": question, "image": _b64_encode(image)}, + ) + response = self._inner_post(request_parameters) + return VisualQuestionAnsweringOutputElement.parse_obj_as_list(response) + + def zero_shot_classification( + self, + text: str, + candidate_labels: List[str], + *, + multi_label: Optional[bool] = False, + hypothesis_template: Optional[str] = None, + model: Optional[str] = None, + ) -> List[ZeroShotClassificationOutputElement]: + """ + Provide as input a text and a set of candidate labels to classify the input text. + + Args: + text (`str`): + The input text to classify. + candidate_labels (`List[str]`): + The set of possible class labels to classify the text into. + labels (`List[str]`, *optional*): + (deprecated) List of strings. Each string is the verbalization of a possible label for the input text. + multi_label (`bool`, *optional*): + Whether multiple candidate labels can be true. If false, the scores are normalized such that the sum of + the label likelihoods for each sequence is 1. If true, the labels are considered independent and + probabilities are normalized for each candidate. + hypothesis_template (`str`, *optional*): + The sentence used in conjunction with `candidate_labels` to attempt the text classification by + replacing the placeholder with the candidate labels. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. If not provided, the default recommended zero-shot classification model will be used. + + + Returns: + `List[ZeroShotClassificationOutputElement]`: List of [`ZeroShotClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example with `multi_label=False`: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> text = ( + ... "A new model offers an explanation for how the Galilean satellites formed around the solar system's" + ... "largest world. Konstantin Batygin did not set out to solve one of the solar system's most puzzling" + ... " mysteries when he went for a run up a hill in Nice, France." + ... ) + >>> labels = ["space & cosmos", "scientific discovery", "microbiology", "robots", "archeology"] + >>> client.zero_shot_classification(text, labels) + [ + ZeroShotClassificationOutputElement(label='scientific discovery', score=0.7961668968200684), + ZeroShotClassificationOutputElement(label='space & cosmos', score=0.18570658564567566), + ZeroShotClassificationOutputElement(label='microbiology', score=0.00730885099619627), + ZeroShotClassificationOutputElement(label='archeology', score=0.006258360575884581), + ZeroShotClassificationOutputElement(label='robots', score=0.004559356719255447), + ] + >>> client.zero_shot_classification(text, labels, multi_label=True) + [ + ZeroShotClassificationOutputElement(label='scientific discovery', score=0.9829297661781311), + ZeroShotClassificationOutputElement(label='space & cosmos', score=0.755190908908844), + ZeroShotClassificationOutputElement(label='microbiology', score=0.0005462635890580714), + ZeroShotClassificationOutputElement(label='archeology', score=0.00047131875180639327), + ZeroShotClassificationOutputElement(label='robots', score=0.00030448526376858354), + ] + ``` + + Example with `multi_label=True` and a custom `hypothesis_template`: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.zero_shot_classification( + ... text="I really like our dinner and I'm very happy. I don't like the weather though.", + ... labels=["positive", "negative", "pessimistic", "optimistic"], + ... multi_label=True, + ... hypothesis_template="This text is {} towards the weather" + ... ) + [ + ZeroShotClassificationOutputElement(label='negative', score=0.9231801629066467), + ZeroShotClassificationOutputElement(label='pessimistic', score=0.8760990500450134), + ZeroShotClassificationOutputElement(label='optimistic', score=0.0008674879791215062), + ZeroShotClassificationOutputElement(label='positive', score=0.0005250611575320363) + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="zero-shot-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "candidate_labels": candidate_labels, + "multi_label": multi_label, + "hypothesis_template": hypothesis_template, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + output = _bytes_to_dict(response) + return [ + ZeroShotClassificationOutputElement.parse_obj_as_instance({"label": label, "score": score}) + for label, score in zip(output["labels"], output["scores"]) + ] + + def zero_shot_image_classification( + self, + image: ContentT, + candidate_labels: List[str], + *, + model: Optional[str] = None, + hypothesis_template: Optional[str] = None, + # deprecated argument + labels: List[str] = None, # type: ignore + ) -> List[ZeroShotImageClassificationOutputElement]: + """ + Provide input image and text labels to predict text labels for the image. + + Args: + image (`Union[str, Path, bytes, BinaryIO, PIL.Image.Image]`): + The input image to caption. It can be raw bytes, an image file, a URL to an online image, or a PIL Image. + candidate_labels (`List[str]`): + The candidate labels for this image + labels (`List[str]`, *optional*): + (deprecated) List of string possible labels. There must be at least 2 labels. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. If not provided, the default recommended zero-shot image classification model will be used. + hypothesis_template (`str`, *optional*): + The sentence used in conjunction with `candidate_labels` to attempt the image classification by + replacing the placeholder with the candidate labels. + + Returns: + `List[ZeroShotImageClassificationOutputElement]`: List of [`ZeroShotImageClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `HTTPError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + + >>> client.zero_shot_image_classification( + ... "https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg", + ... labels=["dog", "cat", "horse"], + ... ) + [ZeroShotImageClassificationOutputElement(label='dog', score=0.956),...] + ``` + """ + # Raise ValueError if input is less than 2 labels + if len(candidate_labels) < 2: + raise ValueError("You must specify at least 2 classes to compare.") + + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="zero-shot-image-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={ + "candidate_labels": candidate_labels, + "hypothesis_template": hypothesis_template, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = self._inner_post(request_parameters) + return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response) + + @_deprecate_method( + version="0.35.0", + message=( + "HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)." + " Use `HfApi.list_models(..., inference_provider='...')` to list warm models per provider." + ), + ) + def list_deployed_models( + self, frameworks: Union[None, str, Literal["all"], List[str]] = None + ) -> Dict[str, List[str]]: + """ + List models deployed on the HF Serverless Inference API service. + + This helper checks deployed models framework by framework. By default, it will check the 4 main frameworks that + are supported and account for 95% of the hosted models. However, if you want a complete list of models you can + specify `frameworks="all"` as input. Alternatively, if you know before-hand which framework you are interested + in, you can also restrict to search to this one (e.g. `frameworks="text-generation-inference"`). The more + frameworks are checked, the more time it will take. + + + + This endpoint method does not return a live list of all models available for the HF Inference API service. + It searches over a cached list of models that were recently available and the list may not be up to date. + If you want to know the live status of a specific model, use [`~InferenceClient.get_model_status`]. + + + + + + This endpoint method is mostly useful for discoverability. If you already know which model you want to use and want to + check its availability, you can directly use [`~InferenceClient.get_model_status`]. + + + + Args: + frameworks (`Literal["all"]` or `List[str]` or `str`, *optional*): + The frameworks to filter on. By default only a subset of the available frameworks are tested. If set to + "all", all available frameworks will be tested. It is also possible to provide a single framework or a + custom set of frameworks to check. + + Returns: + `Dict[str, List[str]]`: A dictionary mapping task names to a sorted list of model IDs. + + Example: + ```python + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + + # Discover zero-shot-classification models currently deployed + >>> models = client.list_deployed_models() + >>> models["zero-shot-classification"] + ['Narsil/deberta-large-mnli-zero-cls', 'facebook/bart-large-mnli', ...] + + # List from only 1 framework + >>> client.list_deployed_models("text-generation-inference") + {'text-generation': ['bigcode/starcoder', 'meta-llama/Llama-2-70b-chat-hf', ...], ...} + ``` + """ + if self.provider != "hf-inference": + raise ValueError(f"Listing deployed models is not supported on '{self.provider}'.") + + # Resolve which frameworks to check + if frameworks is None: + frameworks = constants.MAIN_INFERENCE_API_FRAMEWORKS + elif frameworks == "all": + frameworks = constants.ALL_INFERENCE_API_FRAMEWORKS + elif isinstance(frameworks, str): + frameworks = [frameworks] + frameworks = list(set(frameworks)) + + # Fetch them iteratively + models_by_task: Dict[str, List[str]] = {} + + def _unpack_response(framework: str, items: List[Dict]) -> None: + for model in items: + if framework == "sentence-transformers": + # Model running with the `sentence-transformers` framework can work with both tasks even if not + # branded as such in the API response + models_by_task.setdefault("feature-extraction", []).append(model["model_id"]) + models_by_task.setdefault("sentence-similarity", []).append(model["model_id"]) + else: + models_by_task.setdefault(model["task"], []).append(model["model_id"]) + + for framework in frameworks: + response = get_session().get( + f"{constants.INFERENCE_ENDPOINT}/framework/{framework}", headers=build_hf_headers(token=self.token) + ) + hf_raise_for_status(response) + _unpack_response(framework, response.json()) + + # Sort alphabetically for discoverability and return + for task, models in models_by_task.items(): + models_by_task[task] = sorted(set(models), key=lambda x: x.lower()) + return models_by_task + + def get_endpoint_info(self, *, model: Optional[str] = None) -> Dict[str, Any]: + """ + Get information about the deployed endpoint. + + This endpoint is only available on endpoints powered by Text-Generation-Inference (TGI) or Text-Embedding-Inference (TEI). + Endpoints powered by `transformers` return an empty payload. + + Args: + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `Dict[str, Any]`: Information about the endpoint. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient("meta-llama/Meta-Llama-3-70B-Instruct") + >>> client.get_endpoint_info() + { + 'model_id': 'meta-llama/Meta-Llama-3-70B-Instruct', + 'model_sha': None, + 'model_dtype': 'torch.float16', + 'model_device_type': 'cuda', + 'model_pipeline_tag': None, + 'max_concurrent_requests': 128, + 'max_best_of': 2, + 'max_stop_sequences': 4, + 'max_input_length': 8191, + 'max_total_tokens': 8192, + 'waiting_served_ratio': 0.3, + 'max_batch_total_tokens': 1259392, + 'max_waiting_tokens': 20, + 'max_batch_size': None, + 'validation_workers': 32, + 'max_client_batch_size': 4, + 'version': '2.0.2', + 'sha': 'dccab72549635c7eb5ddb17f43f0b7cdff07c214', + 'docker_label': 'sha-dccab72' + } + ``` + """ + if self.provider != "hf-inference": + raise ValueError(f"Getting endpoint info is not supported on '{self.provider}'.") + + model = model or self.model + if model is None: + raise ValueError("Model id not provided.") + if model.startswith(("http://", "https://")): + url = model.rstrip("/") + "/info" + else: + url = f"{constants.INFERENCE_ENDPOINT}/models/{model}/info" + + response = get_session().get(url, headers=build_hf_headers(token=self.token)) + hf_raise_for_status(response) + return response.json() + + def health_check(self, model: Optional[str] = None) -> bool: + """ + Check the health of the deployed endpoint. + + Health check is only available with Inference Endpoints powered by Text-Generation-Inference (TGI) or Text-Embedding-Inference (TEI). + For Inference API, please use [`InferenceClient.get_model_status`] instead. + + Args: + model (`str`, *optional*): + URL of the Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `bool`: True if everything is working fine. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient("https://jzgu0buei5.us-east-1.aws.endpoints.huggingface.cloud") + >>> client.health_check() + True + ``` + """ + if self.provider != "hf-inference": + raise ValueError(f"Health check is not supported on '{self.provider}'.") + + model = model or self.model + if model is None: + raise ValueError("Model id not provided.") + if not model.startswith(("http://", "https://")): + raise ValueError( + "Model must be an Inference Endpoint URL. For serverless Inference API, please use `InferenceClient.get_model_status`." + ) + url = model.rstrip("/") + "/health" + + response = get_session().get(url, headers=build_hf_headers(token=self.token)) + return response.status_code == 200 + + @_deprecate_method( + version="0.35.0", + message=( + "HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)." + " Use `HfApi.model_info` to get the model status both with HF Inference API and external providers." + ), + ) + def get_model_status(self, model: Optional[str] = None) -> ModelStatus: + """ + Get the status of a model hosted on the HF Inference API. + + + + This endpoint is mostly useful when you already know which model you want to use and want to check its + availability. If you want to discover already deployed models, you should rather use [`~InferenceClient.list_deployed_models`]. + + + + Args: + model (`str`, *optional*): + Identifier of the model for witch the status gonna be checked. If model is not provided, + the model associated with this instance of [`InferenceClient`] will be used. Only HF Inference API service can be checked so the + identifier cannot be a URL. + + + Returns: + [`ModelStatus`]: An instance of ModelStatus dataclass, containing information, + about the state of the model: load, state, compute type and framework. + + Example: + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient() + >>> client.get_model_status("meta-llama/Meta-Llama-3-8B-Instruct") + ModelStatus(loaded=True, state='Loaded', compute_type='gpu', framework='text-generation-inference') + ``` + """ + if self.provider != "hf-inference": + raise ValueError(f"Getting model status is not supported on '{self.provider}'.") + + model = model or self.model + if model is None: + raise ValueError("Model id not provided.") + if model.startswith("https://"): + raise NotImplementedError("Model status is only available for Inference API endpoints.") + url = f"{constants.INFERENCE_ENDPOINT}/status/{model}" + + response = get_session().get(url, headers=build_hf_headers(token=self.token)) + hf_raise_for_status(response) + response_data = response.json() + + if "error" in response_data: + raise ValueError(response_data["error"]) + + return ModelStatus( + loaded=response_data["loaded"], + state=response_data["state"], + compute_type=response_data["compute_type"], + framework=response_data["framework"], + ) + + @property + def chat(self) -> "ProxyClientChat": + return ProxyClientChat(self) + + +class _ProxyClient: + """Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client.""" + + def __init__(self, client: InferenceClient): + self._client = client + + +class ProxyClientChat(_ProxyClient): + """Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client.""" + + @property + def completions(self) -> "ProxyClientChatCompletions": + return ProxyClientChatCompletions(self._client) + + +class ProxyClientChatCompletions(_ProxyClient): + """Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client.""" + + @property + def create(self): + return self._client.chat_completion diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_common.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..c842ee4ea361ea1ddb5df59c99bbdb9fa6cd2671 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_common.py @@ -0,0 +1,427 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Contains utilities used by both the sync and async inference clients.""" + +import base64 +import io +import json +import logging +from contextlib import contextmanager +from dataclasses import dataclass +from pathlib import Path +from typing import ( + TYPE_CHECKING, + Any, + AsyncIterable, + BinaryIO, + ContextManager, + Dict, + Generator, + Iterable, + List, + Literal, + NoReturn, + Optional, + Union, + overload, +) + +from requests import HTTPError + +from huggingface_hub.errors import ( + GenerationError, + IncompleteGenerationError, + OverloadedError, + TextGenerationError, + UnknownError, + ValidationError, +) + +from ..utils import get_session, is_aiohttp_available, is_numpy_available, is_pillow_available +from ._generated.types import ChatCompletionStreamOutput, TextGenerationStreamOutput + + +if TYPE_CHECKING: + from aiohttp import ClientResponse, ClientSession + from PIL.Image import Image + +# TYPES +UrlT = str +PathT = Union[str, Path] +BinaryT = Union[bytes, BinaryIO] +ContentT = Union[BinaryT, PathT, UrlT, "Image"] + +# Use to set a Accept: image/png header +TASKS_EXPECTING_IMAGES = {"text-to-image", "image-to-image"} + +logger = logging.getLogger(__name__) + + +@dataclass +class RequestParameters: + url: str + task: str + model: Optional[str] + json: Optional[Union[str, Dict, List]] + data: Optional[ContentT] + headers: Dict[str, Any] + + +# Add dataclass for ModelStatus. We use this dataclass in get_model_status function. +@dataclass +class ModelStatus: + """ + This Dataclass represents the model status in the HF Inference API. + + Args: + loaded (`bool`): + If the model is currently loaded into HF's Inference API. Models + are loaded on-demand, leading to the user's first request taking longer. + If a model is loaded, you can be assured that it is in a healthy state. + state (`str`): + The current state of the model. This can be 'Loaded', 'Loadable', 'TooBig'. + If a model's state is 'Loadable', it's not too big and has a supported + backend. Loadable models are automatically loaded when the user first + requests inference on the endpoint. This means it is transparent for the + user to load a model, except that the first call takes longer to complete. + compute_type (`Dict`): + Information about the compute resource the model is using or will use, such as 'gpu' type and number of + replicas. + framework (`str`): + The name of the framework that the model was built with, such as 'transformers' + or 'text-generation-inference'. + """ + + loaded: bool + state: str + compute_type: Dict + framework: str + + +## IMPORT UTILS + + +def _import_aiohttp(): + # Make sure `aiohttp` is installed on the machine. + if not is_aiohttp_available(): + raise ImportError("Please install aiohttp to use `AsyncInferenceClient` (`pip install aiohttp`).") + import aiohttp + + return aiohttp + + +def _import_numpy(): + """Make sure `numpy` is installed on the machine.""" + if not is_numpy_available(): + raise ImportError("Please install numpy to use deal with embeddings (`pip install numpy`).") + import numpy + + return numpy + + +def _import_pil_image(): + """Make sure `PIL` is installed on the machine.""" + if not is_pillow_available(): + raise ImportError( + "Please install Pillow to use deal with images (`pip install Pillow`). If you don't want the image to be" + " post-processed, use `client.post(...)` and get the raw response from the server." + ) + from PIL import Image + + return Image + + +## ENCODING / DECODING UTILS + + +@overload +def _open_as_binary( + content: ContentT, +) -> ContextManager[BinaryT]: ... # means "if input is not None, output is not None" + + +@overload +def _open_as_binary( + content: Literal[None], +) -> ContextManager[Literal[None]]: ... # means "if input is None, output is None" + + +@contextmanager # type: ignore +def _open_as_binary(content: Optional[ContentT]) -> Generator[Optional[BinaryT], None, None]: + """Open `content` as a binary file, either from a URL, a local path, or raw bytes. + + Do nothing if `content` is None, + + TODO: handle base64 as input + """ + # If content is a string => must be either a URL or a path + if isinstance(content, str): + if content.startswith("https://") or content.startswith("http://"): + logger.debug(f"Downloading content from {content}") + yield get_session().get(content).content # TODO: retrieve as stream and pipe to post request ? + return + content = Path(content) + if not content.exists(): + raise FileNotFoundError( + f"File not found at {content}. If `data` is a string, it must either be a URL or a path to a local" + " file. To pass raw content, please encode it as bytes first." + ) + + # If content is a Path => open it + if isinstance(content, Path): + logger.debug(f"Opening content from {content}") + with content.open("rb") as f: + yield f + elif hasattr(content, "save"): # PIL Image + logger.debug("Converting PIL Image to bytes") + buffer = io.BytesIO() + content.save(buffer, format="PNG") + buffer.seek(0) + yield buffer + else: + # Otherwise: already a file-like object or None + yield content + + +def _b64_encode(content: ContentT) -> str: + """Encode a raw file (image, audio) into base64. Can be bytes, an opened file, a path or a URL.""" + with _open_as_binary(content) as data: + data_as_bytes = data if isinstance(data, bytes) else data.read() + return base64.b64encode(data_as_bytes).decode() + + +def _b64_to_image(encoded_image: str) -> "Image": + """Parse a base64-encoded string into a PIL Image.""" + Image = _import_pil_image() + return Image.open(io.BytesIO(base64.b64decode(encoded_image))) + + +def _bytes_to_list(content: bytes) -> List: + """Parse bytes from a Response object into a Python list. + + Expects the response body to be JSON-encoded data. + + NOTE: This is exactly the same implementation as `_bytes_to_dict` and will not complain if the returned data is a + dictionary. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect. + """ + return json.loads(content.decode()) + + +def _bytes_to_dict(content: bytes) -> Dict: + """Parse bytes from a Response object into a Python dictionary. + + Expects the response body to be JSON-encoded data. + + NOTE: This is exactly the same implementation as `_bytes_to_list` and will not complain if the returned data is a + list. The only advantage of having both is to help the user (and mypy) understand what kind of data to expect. + """ + return json.loads(content.decode()) + + +def _bytes_to_image(content: bytes) -> "Image": + """Parse bytes from a Response object into a PIL Image. + + Expects the response body to be raw bytes. To deal with b64 encoded images, use `_b64_to_image` instead. + """ + Image = _import_pil_image() + return Image.open(io.BytesIO(content)) + + +def _as_dict(response: Union[bytes, Dict]) -> Dict: + return json.loads(response) if isinstance(response, bytes) else response + + +## PAYLOAD UTILS + + +## STREAMING UTILS + + +def _stream_text_generation_response( + bytes_output_as_lines: Iterable[bytes], details: bool +) -> Union[Iterable[str], Iterable[TextGenerationStreamOutput]]: + """Used in `InferenceClient.text_generation`.""" + # Parse ServerSentEvents + for byte_payload in bytes_output_as_lines: + try: + output = _format_text_generation_stream_output(byte_payload, details) + except StopIteration: + break + if output is not None: + yield output + + +async def _async_stream_text_generation_response( + bytes_output_as_lines: AsyncIterable[bytes], details: bool +) -> Union[AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]: + """Used in `AsyncInferenceClient.text_generation`.""" + # Parse ServerSentEvents + async for byte_payload in bytes_output_as_lines: + try: + output = _format_text_generation_stream_output(byte_payload, details) + except StopIteration: + break + if output is not None: + yield output + + +def _format_text_generation_stream_output( + byte_payload: bytes, details: bool +) -> Optional[Union[str, TextGenerationStreamOutput]]: + if not byte_payload.startswith(b"data:"): + return None # empty line + + if byte_payload.strip() == b"data: [DONE]": + raise StopIteration("[DONE] signal received.") + + # Decode payload + payload = byte_payload.decode("utf-8") + json_payload = json.loads(payload.lstrip("data:").rstrip("/n")) + + # Either an error as being returned + if json_payload.get("error") is not None: + raise _parse_text_generation_error(json_payload["error"], json_payload.get("error_type")) + + # Or parse token payload + output = TextGenerationStreamOutput.parse_obj_as_instance(json_payload) + return output.token.text if not details else output + + +def _stream_chat_completion_response( + bytes_lines: Iterable[bytes], +) -> Iterable[ChatCompletionStreamOutput]: + """Used in `InferenceClient.chat_completion` if model is served with TGI.""" + for item in bytes_lines: + try: + output = _format_chat_completion_stream_output(item) + except StopIteration: + break + if output is not None: + yield output + + +async def _async_stream_chat_completion_response( + bytes_lines: AsyncIterable[bytes], +) -> AsyncIterable[ChatCompletionStreamOutput]: + """Used in `AsyncInferenceClient.chat_completion`.""" + async for item in bytes_lines: + try: + output = _format_chat_completion_stream_output(item) + except StopIteration: + break + if output is not None: + yield output + + +def _format_chat_completion_stream_output( + byte_payload: bytes, +) -> Optional[ChatCompletionStreamOutput]: + if not byte_payload.startswith(b"data:"): + return None # empty line + + if byte_payload.strip() == b"data: [DONE]": + raise StopIteration("[DONE] signal received.") + + # Decode payload + payload = byte_payload.decode("utf-8") + json_payload = json.loads(payload.lstrip("data:").rstrip("/n")) + + # Either an error as being returned + if json_payload.get("error") is not None: + raise _parse_text_generation_error(json_payload["error"], json_payload.get("error_type")) + + # Or parse token payload + return ChatCompletionStreamOutput.parse_obj_as_instance(json_payload) + + +async def _async_yield_from(client: "ClientSession", response: "ClientResponse") -> AsyncIterable[bytes]: + async for byte_payload in response.content: + yield byte_payload.strip() + await client.close() + + +# "TGI servers" are servers running with the `text-generation-inference` backend. +# This backend is the go-to solution to run large language models at scale. However, +# for some smaller models (e.g. "gpt2") the default `transformers` + `api-inference` +# solution is still in use. +# +# Both approaches have very similar APIs, but not exactly the same. What we do first in +# the `text_generation` method is to assume the model is served via TGI. If we realize +# it's not the case (i.e. we receive an HTTP 400 Bad Request), we fallback to the +# default API with a warning message. When that's the case, We remember the unsupported +# attributes for this model in the `_UNSUPPORTED_TEXT_GENERATION_KWARGS` global variable. +# +# In addition, TGI servers have a built-in API route for chat-completion, which is not +# available on the default API. We use this route to provide a more consistent behavior +# when available. +# +# For more details, see https://github.com/huggingface/text-generation-inference and +# https://huggingface.co/docs/api-inference/detailed_parameters#text-generation-task. + +_UNSUPPORTED_TEXT_GENERATION_KWARGS: Dict[Optional[str], List[str]] = {} + + +def _set_unsupported_text_generation_kwargs(model: Optional[str], unsupported_kwargs: List[str]) -> None: + _UNSUPPORTED_TEXT_GENERATION_KWARGS.setdefault(model, []).extend(unsupported_kwargs) + + +def _get_unsupported_text_generation_kwargs(model: Optional[str]) -> List[str]: + return _UNSUPPORTED_TEXT_GENERATION_KWARGS.get(model, []) + + +# TEXT GENERATION ERRORS +# ---------------------- +# Text-generation errors are parsed separately to handle as much as possible the errors returned by the text generation +# inference project (https://github.com/huggingface/text-generation-inference). +# ---------------------- + + +def raise_text_generation_error(http_error: HTTPError) -> NoReturn: + """ + Try to parse text-generation-inference error message and raise HTTPError in any case. + + Args: + error (`HTTPError`): + The HTTPError that have been raised. + """ + # Try to parse a Text Generation Inference error + + try: + # Hacky way to retrieve payload in case of aiohttp error + payload = getattr(http_error, "response_error_payload", None) or http_error.response.json() + error = payload.get("error") + error_type = payload.get("error_type") + except Exception: # no payload + raise http_error + + # If error_type => more information than `hf_raise_for_status` + if error_type is not None: + exception = _parse_text_generation_error(error, error_type) + raise exception from http_error + + # Otherwise, fallback to default error + raise http_error + + +def _parse_text_generation_error(error: Optional[str], error_type: Optional[str]) -> TextGenerationError: + if error_type == "generation": + return GenerationError(error) # type: ignore + if error_type == "incomplete_generation": + return IncompleteGenerationError(error) # type: ignore + if error_type == "overloaded": + return OverloadedError(error) # type: ignore + if error_type == "validation": + return ValidationError(error) # type: ignore + return UnknownError(error) # type: ignore diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/__init__.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/_async_client.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/_async_client.py new file mode 100644 index 0000000000000000000000000000000000000000..162d89369fc6aa2e7df4f5c126a3fedc092c48b6 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/_async_client.py @@ -0,0 +1,3585 @@ +# coding=utf-8 +# Copyright 2023-present, the HuggingFace Inc. team. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# WARNING +# This entire file has been adapted from the sync-client code in `src/huggingface_hub/inference/_client.py`. +# Any change in InferenceClient will be automatically reflected in AsyncInferenceClient. +# To re-generate the code, run `make style` or `python ./utils/generate_async_inference_client.py --update`. +# WARNING +import asyncio +import base64 +import logging +import re +import warnings +from typing import TYPE_CHECKING, Any, AsyncIterable, Dict, List, Literal, Optional, Set, Union, overload + +from huggingface_hub import constants +from huggingface_hub.errors import InferenceTimeoutError +from huggingface_hub.inference._common import ( + TASKS_EXPECTING_IMAGES, + ContentT, + ModelStatus, + RequestParameters, + _async_stream_chat_completion_response, + _async_stream_text_generation_response, + _b64_encode, + _b64_to_image, + _bytes_to_dict, + _bytes_to_image, + _bytes_to_list, + _get_unsupported_text_generation_kwargs, + _import_numpy, + _open_as_binary, + _set_unsupported_text_generation_kwargs, + raise_text_generation_error, +) +from huggingface_hub.inference._generated.types import ( + AudioClassificationOutputElement, + AudioClassificationOutputTransform, + AudioToAudioOutputElement, + AutomaticSpeechRecognitionOutput, + ChatCompletionInputGrammarType, + ChatCompletionInputMessage, + ChatCompletionInputStreamOptions, + ChatCompletionInputTool, + ChatCompletionInputToolChoiceClass, + ChatCompletionInputToolChoiceEnum, + ChatCompletionOutput, + ChatCompletionStreamOutput, + DocumentQuestionAnsweringOutputElement, + FillMaskOutputElement, + ImageClassificationOutputElement, + ImageClassificationOutputTransform, + ImageSegmentationOutputElement, + ImageSegmentationSubtask, + ImageToImageTargetSize, + ImageToTextOutput, + ObjectDetectionOutputElement, + Padding, + QuestionAnsweringOutputElement, + SummarizationOutput, + SummarizationTruncationStrategy, + TableQuestionAnsweringOutputElement, + TextClassificationOutputElement, + TextClassificationOutputTransform, + TextGenerationInputGrammarType, + TextGenerationOutput, + TextGenerationStreamOutput, + TextToSpeechEarlyStoppingEnum, + TokenClassificationAggregationStrategy, + TokenClassificationOutputElement, + TranslationOutput, + TranslationTruncationStrategy, + VisualQuestionAnsweringOutputElement, + ZeroShotClassificationOutputElement, + ZeroShotImageClassificationOutputElement, +) +from huggingface_hub.inference._providers import PROVIDER_OR_POLICY_T, get_provider_helper +from huggingface_hub.utils import build_hf_headers, get_session, hf_raise_for_status +from huggingface_hub.utils._auth import get_token +from huggingface_hub.utils._deprecation import _deprecate_method + +from .._common import _async_yield_from, _import_aiohttp + + +if TYPE_CHECKING: + import numpy as np + from aiohttp import ClientResponse, ClientSession + from PIL.Image import Image + +logger = logging.getLogger(__name__) + + +MODEL_KWARGS_NOT_USED_REGEX = re.compile(r"The following `model_kwargs` are not used by the model: \[(.*?)\]") + + +class AsyncInferenceClient: + """ + Initialize a new Inference Client. + + [`InferenceClient`] aims to provide a unified experience to perform inference. The client can be used + seamlessly with either the (free) Inference API, self-hosted Inference Endpoints, or third-party Inference Providers. + + Args: + model (`str`, `optional`): + The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct` + or a URL to a deployed Inference Endpoint. Defaults to None, in which case a recommended model is + automatically selected for the task. + Note: for better compatibility with OpenAI's client, `model` has been aliased as `base_url`. Those 2 + arguments are mutually exclusive. If using `base_url` for chat completion, the `/chat/completions` suffix + path will be appended to the base URL (see the [TGI Messages API](https://huggingface.co/docs/text-generation-inference/en/messages_api) + documentation for details). When passing a URL as `model`, the client will not append any suffix path to it. + provider (`str`, *optional*): + Name of the provider to use for inference. Can be `"black-forest-labs"`, `"cerebras"`, `"cohere"`, `"fal-ai"`, `"featherless-ai"`, `"fireworks-ai"`, `"groq"`, `"hf-inference"`, `"hyperbolic"`, `"nebius"`, `"novita"`, `"nscale"`, `"openai"`, `"replicate"`, "sambanova"` or `"together"`. + Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers. + If model is a URL or `base_url` is passed, then `provider` is not used. + token (`str`, *optional*): + Hugging Face token. Will default to the locally saved token if not provided. + Note: for better compatibility with OpenAI's client, `token` has been aliased as `api_key`. Those 2 + arguments are mutually exclusive and have the exact same behavior. + timeout (`float`, `optional`): + The maximum number of seconds to wait for a response from the server. Defaults to None, meaning it will loop until the server is available. + headers (`Dict[str, str]`, `optional`): + Additional headers to send to the server. By default only the authorization and user-agent headers are sent. + Values in this dictionary will override the default values. + bill_to (`str`, `optional`): + The billing account to use for the requests. By default the requests are billed on the user's account. + Requests can only be billed to an organization the user is a member of, and which has subscribed to Enterprise Hub. + cookies (`Dict[str, str]`, `optional`): + Additional cookies to send to the server. + trust_env ('bool', 'optional'): + Trust environment settings for proxy configuration if the parameter is `True` (`False` by default). + proxies (`Any`, `optional`): + Proxies to use for the request. + base_url (`str`, `optional`): + Base URL to run inference. This is a duplicated argument from `model` to make [`InferenceClient`] + follow the same pattern as `openai.OpenAI` client. Cannot be used if `model` is set. Defaults to None. + api_key (`str`, `optional`): + Token to use for authentication. This is a duplicated argument from `token` to make [`InferenceClient`] + follow the same pattern as `openai.OpenAI` client. Cannot be used if `token` is set. Defaults to None. + """ + + def __init__( + self, + model: Optional[str] = None, + *, + provider: Optional[PROVIDER_OR_POLICY_T] = None, + token: Optional[str] = None, + timeout: Optional[float] = None, + headers: Optional[Dict[str, str]] = None, + cookies: Optional[Dict[str, str]] = None, + trust_env: bool = False, + proxies: Optional[Any] = None, + bill_to: Optional[str] = None, + # OpenAI compatibility + base_url: Optional[str] = None, + api_key: Optional[str] = None, + ) -> None: + if model is not None and base_url is not None: + raise ValueError( + "Received both `model` and `base_url` arguments. Please provide only one of them." + " `base_url` is an alias for `model` to make the API compatible with OpenAI's client." + " If using `base_url` for chat completion, the `/chat/completions` suffix path will be appended to the base url." + " When passing a URL as `model`, the client will not append any suffix path to it." + ) + if token is not None and api_key is not None: + raise ValueError( + "Received both `token` and `api_key` arguments. Please provide only one of them." + " `api_key` is an alias for `token` to make the API compatible with OpenAI's client." + " It has the exact same behavior as `token`." + ) + token = token if token is not None else api_key + if isinstance(token, bool): + # Legacy behavior: previously is was possible to pass `token=False` to disable authentication. This is not + # supported anymore as authentication is required. Better to explicitly raise here rather than risking + # sending the locally saved token without the user knowing about it. + if token is False: + raise ValueError( + "Cannot use `token=False` to disable authentication as authentication is required to run Inference." + ) + warnings.warn( + "Using `token=True` to automatically use the locally saved token is deprecated and will be removed in a future release. " + "Please use `token=None` instead (default).", + DeprecationWarning, + ) + token = get_token() + + self.model: Optional[str] = base_url or model + self.token: Optional[str] = token + + self.headers = {**headers} if headers is not None else {} + if bill_to is not None: + if ( + constants.HUGGINGFACE_HEADER_X_BILL_TO in self.headers + and self.headers[constants.HUGGINGFACE_HEADER_X_BILL_TO] != bill_to + ): + warnings.warn( + f"Overriding existing '{self.headers[constants.HUGGINGFACE_HEADER_X_BILL_TO]}' value in headers with '{bill_to}'.", + UserWarning, + ) + self.headers[constants.HUGGINGFACE_HEADER_X_BILL_TO] = bill_to + + if token is not None and not token.startswith("hf_"): + warnings.warn( + "You've provided an external provider's API key, so requests will be billed directly by the provider. " + "The `bill_to` parameter is only applicable for Hugging Face billing and will be ignored.", + UserWarning, + ) + + # Configure provider + self.provider = provider + + self.cookies = cookies + self.timeout = timeout + self.trust_env = trust_env + self.proxies = proxies + + # Keep track of the sessions to close them properly + self._sessions: Dict["ClientSession", Set["ClientResponse"]] = dict() + + def __repr__(self): + return f"" + + @overload + async def _inner_post( # type: ignore[misc] + self, request_parameters: RequestParameters, *, stream: Literal[False] = ... + ) -> bytes: ... + + @overload + async def _inner_post( # type: ignore[misc] + self, request_parameters: RequestParameters, *, stream: Literal[True] = ... + ) -> AsyncIterable[bytes]: ... + + @overload + async def _inner_post( + self, request_parameters: RequestParameters, *, stream: bool = False + ) -> Union[bytes, AsyncIterable[bytes]]: ... + + async def _inner_post( + self, request_parameters: RequestParameters, *, stream: bool = False + ) -> Union[bytes, AsyncIterable[bytes]]: + """Make a request to the inference server.""" + + aiohttp = _import_aiohttp() + + # TODO: this should be handled in provider helpers directly + if request_parameters.task in TASKS_EXPECTING_IMAGES and "Accept" not in request_parameters.headers: + request_parameters.headers["Accept"] = "image/png" + + with _open_as_binary(request_parameters.data) as data_as_binary: + # Do not use context manager as we don't want to close the connection immediately when returning + # a stream + session = self._get_client_session(headers=request_parameters.headers) + + try: + response = await session.post( + request_parameters.url, json=request_parameters.json, data=data_as_binary, proxy=self.proxies + ) + response_error_payload = None + if response.status != 200: + try: + response_error_payload = await response.json() # get payload before connection closed + except Exception: + pass + response.raise_for_status() + if stream: + return _async_yield_from(session, response) + else: + content = await response.read() + await session.close() + return content + except asyncio.TimeoutError as error: + await session.close() + # Convert any `TimeoutError` to a `InferenceTimeoutError` + raise InferenceTimeoutError(f"Inference call timed out: {request_parameters.url}") from error # type: ignore + except aiohttp.ClientResponseError as error: + error.response_error_payload = response_error_payload + await session.close() + raise error + except Exception: + await session.close() + raise + + async def __aenter__(self): + return self + + async def __aexit__(self, exc_type, exc_value, traceback): + await self.close() + + def __del__(self): + if len(self._sessions) > 0: + warnings.warn( + "Deleting 'AsyncInferenceClient' client but some sessions are still open. " + "This can happen if you've stopped streaming data from the server before the stream was complete. " + "To close the client properly, you must call `await client.close()` " + "or use an async context (e.g. `async with AsyncInferenceClient(): ...`." + ) + + async def close(self): + """Close all open sessions. + + By default, 'aiohttp.ClientSession' objects are closed automatically when a call is completed. However, if you + are streaming data from the server and you stop before the stream is complete, you must call this method to + close the session properly. + + Another possibility is to use an async context (e.g. `async with AsyncInferenceClient(): ...`). + """ + await asyncio.gather(*[session.close() for session in self._sessions.keys()]) + + async def audio_classification( + self, + audio: ContentT, + *, + model: Optional[str] = None, + top_k: Optional[int] = None, + function_to_apply: Optional["AudioClassificationOutputTransform"] = None, + ) -> List[AudioClassificationOutputElement]: + """ + Perform audio classification on the provided audio content. + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The audio content to classify. It can be raw audio bytes, a local audio file, or a URL pointing to an + audio file. + model (`str`, *optional*): + The model to use for audio classification. Can be a model ID hosted on the Hugging Face Hub + or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for + audio classification will be used. + top_k (`int`, *optional*): + When specified, limits the output to the top K most probable classes. + function_to_apply (`"AudioClassificationOutputTransform"`, *optional*): + The function to apply to the model outputs in order to retrieve the scores. + + Returns: + `List[AudioClassificationOutputElement]`: List of [`AudioClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.audio_classification("audio.flac") + [ + AudioClassificationOutputElement(score=0.4976358711719513, label='hap'), + AudioClassificationOutputElement(score=0.3677836060523987, label='neu'), + ... + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="audio-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=audio, + parameters={"function_to_apply": function_to_apply, "top_k": top_k}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return AudioClassificationOutputElement.parse_obj_as_list(response) + + async def audio_to_audio( + self, + audio: ContentT, + *, + model: Optional[str] = None, + ) -> List[AudioToAudioOutputElement]: + """ + Performs multiple tasks related to audio-to-audio depending on the model (eg: speech enhancement, source separation). + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The audio content for the model. It can be raw audio bytes, a local audio file, or a URL pointing to an + audio file. + model (`str`, *optional*): + The model can be any model which takes an audio file and returns another audio file. Can be a model ID hosted on the Hugging Face Hub + or a URL to a deployed Inference Endpoint. If not provided, the default recommended model for + audio_to_audio will be used. + + Returns: + `List[AudioToAudioOutputElement]`: A list of [`AudioToAudioOutputElement`] items containing audios label, content-type, and audio content in blob. + + Raises: + `InferenceTimeoutError`: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> audio_output = await client.audio_to_audio("audio.flac") + >>> async for i, item in enumerate(audio_output): + >>> with open(f"output_{i}.flac", "wb") as f: + f.write(item.blob) + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="audio-to-audio", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=audio, + parameters={}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + audio_output = AudioToAudioOutputElement.parse_obj_as_list(response) + for item in audio_output: + item.blob = base64.b64decode(item.blob) + return audio_output + + async def automatic_speech_recognition( + self, + audio: ContentT, + *, + model: Optional[str] = None, + extra_body: Optional[Dict] = None, + ) -> AutomaticSpeechRecognitionOutput: + """ + Perform automatic speech recognition (ASR or audio-to-text) on the given audio content. + + Args: + audio (Union[str, Path, bytes, BinaryIO]): + The content to transcribe. It can be raw audio bytes, local audio file, or a URL to an audio file. + model (`str`, *optional*): + The model to use for ASR. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended model for ASR will be used. + extra_body (`Dict`, *optional*): + Additional provider-specific parameters to pass to the model. Refer to the provider's documentation + for supported parameters. + Returns: + [`AutomaticSpeechRecognitionOutput`]: An item containing the transcribed text and optionally the timestamp chunks. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.automatic_speech_recognition("hello_world.flac").text + "hello world" + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="automatic-speech-recognition", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=audio, + parameters={**(extra_body or {})}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return AutomaticSpeechRecognitionOutput.parse_obj_as_instance(response) + + @overload + async def chat_completion( # type: ignore + self, + messages: List[Union[Dict, ChatCompletionInputMessage]], + *, + model: Optional[str] = None, + stream: Literal[False] = False, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + response_format: Optional[ChatCompletionInputGrammarType] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stream_options: Optional[ChatCompletionInputStreamOptions] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + extra_body: Optional[Dict] = None, + ) -> ChatCompletionOutput: ... + + @overload + async def chat_completion( # type: ignore + self, + messages: List[Union[Dict, ChatCompletionInputMessage]], + *, + model: Optional[str] = None, + stream: Literal[True] = True, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + response_format: Optional[ChatCompletionInputGrammarType] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stream_options: Optional[ChatCompletionInputStreamOptions] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + extra_body: Optional[Dict] = None, + ) -> AsyncIterable[ChatCompletionStreamOutput]: ... + + @overload + async def chat_completion( + self, + messages: List[Union[Dict, ChatCompletionInputMessage]], + *, + model: Optional[str] = None, + stream: bool = False, + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + response_format: Optional[ChatCompletionInputGrammarType] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stream_options: Optional[ChatCompletionInputStreamOptions] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + extra_body: Optional[Dict] = None, + ) -> Union[ChatCompletionOutput, AsyncIterable[ChatCompletionStreamOutput]]: ... + + async def chat_completion( + self, + messages: List[Union[Dict, ChatCompletionInputMessage]], + *, + model: Optional[str] = None, + stream: bool = False, + # Parameters from ChatCompletionInput (handled manually) + frequency_penalty: Optional[float] = None, + logit_bias: Optional[List[float]] = None, + logprobs: Optional[bool] = None, + max_tokens: Optional[int] = None, + n: Optional[int] = None, + presence_penalty: Optional[float] = None, + response_format: Optional[ChatCompletionInputGrammarType] = None, + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stream_options: Optional[ChatCompletionInputStreamOptions] = None, + temperature: Optional[float] = None, + tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None, + tool_prompt: Optional[str] = None, + tools: Optional[List[ChatCompletionInputTool]] = None, + top_logprobs: Optional[int] = None, + top_p: Optional[float] = None, + extra_body: Optional[Dict] = None, + ) -> Union[ChatCompletionOutput, AsyncIterable[ChatCompletionStreamOutput]]: + """ + A method for completing conversations using a specified language model. + + + + The `client.chat_completion` method is aliased as `client.chat.completions.create` for compatibility with OpenAI's client. + Inputs and outputs are strictly the same and using either syntax will yield the same results. + Check out the [Inference guide](https://huggingface.co/docs/huggingface_hub/guides/inference#openai-compatibility) + for more details about OpenAI's compatibility. + + + + + You can pass provider-specific parameters to the model by using the `extra_body` argument. + + + Args: + messages (List of [`ChatCompletionInputMessage`]): + Conversation history consisting of roles and content pairs. + model (`str`, *optional*): + The model to use for chat-completion. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended model for chat-based text-generation will be used. + See https://huggingface.co/tasks/text-generation for more details. + If `model` is a model ID, it is passed to the server as the `model` parameter. If you want to define a + custom URL while setting `model` in the request payload, you must set `base_url` when initializing [`InferenceClient`]. + frequency_penalty (`float`, *optional*): + Penalizes new tokens based on their existing frequency + in the text so far. Range: [-2.0, 2.0]. Defaults to 0.0. + logit_bias (`List[float]`, *optional*): + Adjusts the likelihood of specific tokens appearing in the generated output. + logprobs (`bool`, *optional*): + Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each output token returned in the content of message. + max_tokens (`int`, *optional*): + Maximum number of tokens allowed in the response. Defaults to 100. + n (`int`, *optional*): + The number of completions to generate for each prompt. + presence_penalty (`float`, *optional*): + Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they appear in the + text so far, increasing the model's likelihood to talk about new topics. + response_format ([`ChatCompletionInputGrammarType`], *optional*): + Grammar constraints. Can be either a JSONSchema or a regex. + seed (Optional[`int`], *optional*): + Seed for reproducible control flow. Defaults to None. + stop (`List[str]`, *optional*): + Up to four strings which trigger the end of the response. + Defaults to None. + stream (`bool`, *optional*): + Enable realtime streaming of responses. Defaults to False. + stream_options ([`ChatCompletionInputStreamOptions`], *optional*): + Options for streaming completions. + temperature (`float`, *optional*): + Controls randomness of the generations. Lower values ensure + less random completions. Range: [0, 2]. Defaults to 1.0. + top_logprobs (`int`, *optional*): + An integer between 0 and 5 specifying the number of most likely tokens to return at each token + position, each with an associated log probability. logprobs must be set to true if this parameter is + used. + top_p (`float`, *optional*): + Fraction of the most likely next words to sample from. + Must be between 0 and 1. Defaults to 1.0. + tool_choice ([`ChatCompletionInputToolChoiceClass`] or [`ChatCompletionInputToolChoiceEnum`], *optional*): + The tool to use for the completion. Defaults to "auto". + tool_prompt (`str`, *optional*): + A prompt to be appended before the tools. + tools (List of [`ChatCompletionInputTool`], *optional*): + A list of tools the model may call. Currently, only functions are supported as a tool. Use this to + provide a list of functions the model may generate JSON inputs for. + extra_body (`Dict`, *optional*): + Additional provider-specific parameters to pass to the model. Refer to the provider's documentation + for supported parameters. + Returns: + [`ChatCompletionOutput`] or Iterable of [`ChatCompletionStreamOutput`]: + Generated text returned from the server: + - if `stream=False`, the generated text is returned as a [`ChatCompletionOutput`] (default). + - if `stream=True`, the generated text is returned token by token as a sequence of [`ChatCompletionStreamOutput`]. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> messages = [{"role": "user", "content": "What is the capital of France?"}] + >>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-8B-Instruct") + >>> await client.chat_completion(messages, max_tokens=100) + ChatCompletionOutput( + choices=[ + ChatCompletionOutputComplete( + finish_reason='eos_token', + index=0, + message=ChatCompletionOutputMessage( + role='assistant', + content='The capital of France is Paris.', + name=None, + tool_calls=None + ), + logprobs=None + ) + ], + created=1719907176, + id='', + model='meta-llama/Meta-Llama-3-8B-Instruct', + object='text_completion', + system_fingerprint='2.0.4-sha-f426a33', + usage=ChatCompletionOutputUsage( + completion_tokens=8, + prompt_tokens=17, + total_tokens=25 + ) + ) + ``` + + Example using streaming: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> messages = [{"role": "user", "content": "What is the capital of France?"}] + >>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-8B-Instruct") + >>> async for token in await client.chat_completion(messages, max_tokens=10, stream=True): + ... print(token) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content='The', role='assistant'), index=0, finish_reason=None)], created=1710498504) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' capital', role='assistant'), index=0, finish_reason=None)], created=1710498504) + (...) + ChatCompletionStreamOutput(choices=[ChatCompletionStreamOutputChoice(delta=ChatCompletionStreamOutputDelta(content=' may', role='assistant'), index=0, finish_reason=None)], created=1710498504) + ``` + + Example using OpenAI's syntax: + ```py + # Must be run in an async context + # instead of `from openai import OpenAI` + from huggingface_hub import AsyncInferenceClient + + # instead of `client = OpenAI(...)` + client = AsyncInferenceClient( + base_url=..., + api_key=..., + ) + + output = await client.chat.completions.create( + model="meta-llama/Meta-Llama-3-8B-Instruct", + messages=[ + {"role": "system", "content": "You are a helpful assistant."}, + {"role": "user", "content": "Count to 10"}, + ], + stream=True, + max_tokens=1024, + ) + + for chunk in output: + print(chunk.choices[0].delta.content) + ``` + + Example using a third-party provider directly with extra (provider-specific) parameters. Usage will be billed on your Together AI account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="together", # Use Together AI provider + ... api_key="", # Pass your Together API key directly + ... ) + >>> client.chat_completion( + ... model="meta-llama/Meta-Llama-3-8B-Instruct", + ... messages=[{"role": "user", "content": "What is the capital of France?"}], + ... extra_body={"safety_model": "Meta-Llama/Llama-Guard-7b"}, + ... ) + ``` + + Example using a third-party provider through Hugging Face Routing. Usage will be billed on your Hugging Face account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="sambanova", # Use Sambanova provider + ... api_key="hf_...", # Pass your HF token + ... ) + >>> client.chat_completion( + ... model="meta-llama/Meta-Llama-3-8B-Instruct", + ... messages=[{"role": "user", "content": "What is the capital of France?"}], + ... ) + ``` + + Example using Image + Text as input: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + + # provide a remote URL + >>> image_url ="https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" + # or a base64-encoded image + >>> image_path = "/path/to/image.jpeg" + >>> with open(image_path, "rb") as f: + ... base64_image = base64.b64encode(f.read()).decode("utf-8") + >>> image_url = f"data:image/jpeg;base64,{base64_image}" + + >>> client = AsyncInferenceClient("meta-llama/Llama-3.2-11B-Vision-Instruct") + >>> output = await client.chat.completions.create( + ... messages=[ + ... { + ... "role": "user", + ... "content": [ + ... { + ... "type": "image_url", + ... "image_url": {"url": image_url}, + ... }, + ... { + ... "type": "text", + ... "text": "Describe this image in one sentence.", + ... }, + ... ], + ... }, + ... ], + ... ) + >>> output + The image depicts the iconic Statue of Liberty situated in New York Harbor, New York, on a clear day. + ``` + + Example using tools: + ```py + # Must be run in an async context + >>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-70B-Instruct") + >>> messages = [ + ... { + ... "role": "system", + ... "content": "Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous.", + ... }, + ... { + ... "role": "user", + ... "content": "What's the weather like the next 3 days in San Francisco, CA?", + ... }, + ... ] + >>> tools = [ + ... { + ... "type": "function", + ... "function": { + ... "name": "get_current_weather", + ... "description": "Get the current weather", + ... "parameters": { + ... "type": "object", + ... "properties": { + ... "location": { + ... "type": "string", + ... "description": "The city and state, e.g. San Francisco, CA", + ... }, + ... "format": { + ... "type": "string", + ... "enum": ["celsius", "fahrenheit"], + ... "description": "The temperature unit to use. Infer this from the users location.", + ... }, + ... }, + ... "required": ["location", "format"], + ... }, + ... }, + ... }, + ... { + ... "type": "function", + ... "function": { + ... "name": "get_n_day_weather_forecast", + ... "description": "Get an N-day weather forecast", + ... "parameters": { + ... "type": "object", + ... "properties": { + ... "location": { + ... "type": "string", + ... "description": "The city and state, e.g. San Francisco, CA", + ... }, + ... "format": { + ... "type": "string", + ... "enum": ["celsius", "fahrenheit"], + ... "description": "The temperature unit to use. Infer this from the users location.", + ... }, + ... "num_days": { + ... "type": "integer", + ... "description": "The number of days to forecast", + ... }, + ... }, + ... "required": ["location", "format", "num_days"], + ... }, + ... }, + ... }, + ... ] + + >>> response = await client.chat_completion( + ... model="meta-llama/Meta-Llama-3-70B-Instruct", + ... messages=messages, + ... tools=tools, + ... tool_choice="auto", + ... max_tokens=500, + ... ) + >>> response.choices[0].message.tool_calls[0].function + ChatCompletionOutputFunctionDefinition( + arguments={ + 'location': 'San Francisco, CA', + 'format': 'fahrenheit', + 'num_days': 3 + }, + name='get_n_day_weather_forecast', + description=None + ) + ``` + + Example using response_format: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-70B-Instruct") + >>> messages = [ + ... { + ... "role": "user", + ... "content": "I saw a puppy a cat and a raccoon during my bike ride in the park. What did I saw and when?", + ... }, + ... ] + >>> response_format = { + ... "type": "json", + ... "value": { + ... "properties": { + ... "location": {"type": "string"}, + ... "activity": {"type": "string"}, + ... "animals_seen": {"type": "integer", "minimum": 1, "maximum": 5}, + ... "animals": {"type": "array", "items": {"type": "string"}}, + ... }, + ... "required": ["location", "activity", "animals_seen", "animals"], + ... }, + ... } + >>> response = await client.chat_completion( + ... messages=messages, + ... response_format=response_format, + ... max_tokens=500, + ... ) + >>> response.choices[0].message.content + '{\n\n"activity": "bike ride",\n"animals": ["puppy", "cat", "raccoon"],\n"animals_seen": 3,\n"location": "park"}' + ``` + """ + # Since `chat_completion(..., model=xxx)` is also a payload parameter for the server, we need to handle 'model' differently. + # `self.model` takes precedence over 'model' argument for building URL. + # `model` takes precedence for payload value. + model_id_or_url = self.model or model + payload_model = model or self.model + + # Get the provider helper + provider_helper = get_provider_helper( + self.provider, + task="conversational", + model=model_id_or_url + if model_id_or_url is not None and model_id_or_url.startswith(("http://", "https://")) + else payload_model, + ) + + # Prepare the payload + parameters = { + "model": payload_model, + "frequency_penalty": frequency_penalty, + "logit_bias": logit_bias, + "logprobs": logprobs, + "max_tokens": max_tokens, + "n": n, + "presence_penalty": presence_penalty, + "response_format": response_format, + "seed": seed, + "stop": stop, + "temperature": temperature, + "tool_choice": tool_choice, + "tool_prompt": tool_prompt, + "tools": tools, + "top_logprobs": top_logprobs, + "top_p": top_p, + "stream": stream, + "stream_options": stream_options, + **(extra_body or {}), + } + request_parameters = provider_helper.prepare_request( + inputs=messages, + parameters=parameters, + headers=self.headers, + model=model_id_or_url, + api_key=self.token, + ) + data = await self._inner_post(request_parameters, stream=stream) + + if stream: + return _async_stream_chat_completion_response(data) # type: ignore[arg-type] + + return ChatCompletionOutput.parse_obj_as_instance(data) # type: ignore[arg-type] + + async def document_question_answering( + self, + image: ContentT, + question: str, + *, + model: Optional[str] = None, + doc_stride: Optional[int] = None, + handle_impossible_answer: Optional[bool] = None, + lang: Optional[str] = None, + max_answer_len: Optional[int] = None, + max_question_len: Optional[int] = None, + max_seq_len: Optional[int] = None, + top_k: Optional[int] = None, + word_boxes: Optional[List[Union[List[float], str]]] = None, + ) -> List[DocumentQuestionAnsweringOutputElement]: + """ + Answer questions on document images. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image for the context. It can be raw bytes, an image file, or a URL to an online image. + question (`str`): + Question to be answered. + model (`str`, *optional*): + The model to use for the document question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended document question answering model will be used. + Defaults to None. + doc_stride (`int`, *optional*): + If the words in the document are too long to fit with the question for the model, it will be split in + several chunks with some overlap. This argument controls the size of that overlap. + handle_impossible_answer (`bool`, *optional*): + Whether to accept impossible as an answer + lang (`str`, *optional*): + Language to use while running OCR. Defaults to english. + max_answer_len (`int`, *optional*): + The maximum length of predicted answers (e.g., only answers with a shorter length are considered). + max_question_len (`int`, *optional*): + The maximum length of the question after tokenization. It will be truncated if needed. + max_seq_len (`int`, *optional*): + The maximum length of the total sentence (context + question) in tokens of each chunk passed to the + model. The context will be split in several chunks (using doc_stride as overlap) if needed. + top_k (`int`, *optional*): + The number of answers to return (will be chosen by order of likelihood). Can return less than top_k + answers if there are not enough options available within the context. + word_boxes (`List[Union[List[float], str`, *optional*): + A list of words and bounding boxes (normalized 0->1000). If provided, the inference will skip the OCR + step and use the provided bounding boxes instead. + Returns: + `List[DocumentQuestionAnsweringOutputElement]`: a list of [`DocumentQuestionAnsweringOutputElement`] items containing the predicted label, associated probability, word ids, and page number. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.document_question_answering(image="https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png", question="What is the invoice number?") + [DocumentQuestionAnsweringOutputElement(answer='us-001', end=16, score=0.9999666213989258, start=16)] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="document-question-answering", model=model_id) + inputs: Dict[str, Any] = {"question": question, "image": _b64_encode(image)} + request_parameters = provider_helper.prepare_request( + inputs=inputs, + parameters={ + "doc_stride": doc_stride, + "handle_impossible_answer": handle_impossible_answer, + "lang": lang, + "max_answer_len": max_answer_len, + "max_question_len": max_question_len, + "max_seq_len": max_seq_len, + "top_k": top_k, + "word_boxes": word_boxes, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return DocumentQuestionAnsweringOutputElement.parse_obj_as_list(response) + + async def feature_extraction( + self, + text: str, + *, + normalize: Optional[bool] = None, + prompt_name: Optional[str] = None, + truncate: Optional[bool] = None, + truncation_direction: Optional[Literal["Left", "Right"]] = None, + model: Optional[str] = None, + ) -> "np.ndarray": + """ + Generate embeddings for a given text. + + Args: + text (`str`): + The text to embed. + model (`str`, *optional*): + The model to use for the feature extraction task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended feature extraction model will be used. + Defaults to None. + normalize (`bool`, *optional*): + Whether to normalize the embeddings or not. + Only available on server powered by Text-Embedding-Inference. + prompt_name (`str`, *optional*): + The name of the prompt that should be used by for encoding. If not set, no prompt will be applied. + Must be a key in the `Sentence Transformers` configuration `prompts` dictionary. + For example if ``prompt_name`` is "query" and the ``prompts`` is {"query": "query: ",...}, + then the sentence "What is the capital of France?" will be encoded as "query: What is the capital of France?" + because the prompt text will be prepended before any text to encode. + truncate (`bool`, *optional*): + Whether to truncate the embeddings or not. + Only available on server powered by Text-Embedding-Inference. + truncation_direction (`Literal["Left", "Right"]`, *optional*): + Which side of the input should be truncated when `truncate=True` is passed. + + Returns: + `np.ndarray`: The embedding representing the input text as a float32 numpy array. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.feature_extraction("Hi, who are you?") + array([[ 2.424802 , 2.93384 , 1.1750331 , ..., 1.240499, -0.13776633, -0.7889173 ], + [-0.42943227, -0.6364878 , -1.693462 , ..., 0.41978157, -2.4336355 , 0.6162071 ], + ..., + [ 0.28552425, -0.928395 , -1.2077185 , ..., 0.76810825, -2.1069427 , 0.6236161 ]], dtype=float32) + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="feature-extraction", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "normalize": normalize, + "prompt_name": prompt_name, + "truncate": truncate, + "truncation_direction": truncation_direction, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + np = _import_numpy() + return np.array(provider_helper.get_response(response), dtype="float32") + + async def fill_mask( + self, + text: str, + *, + model: Optional[str] = None, + targets: Optional[List[str]] = None, + top_k: Optional[int] = None, + ) -> List[FillMaskOutputElement]: + """ + Fill in a hole with a missing word (token to be precise). + + Args: + text (`str`): + a string to be filled from, must contain the [MASK] token (check model card for exact name of the mask). + model (`str`, *optional*): + The model to use for the fill mask task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended fill mask model will be used. + targets (`List[str`, *optional*): + When passed, the model will limit the scores to the passed targets instead of looking up in the whole + vocabulary. If the provided targets are not in the model vocab, they will be tokenized and the first + resulting token will be used (with a warning, and that might be slower). + top_k (`int`, *optional*): + When passed, overrides the number of predictions to return. + Returns: + `List[FillMaskOutputElement]`: a list of [`FillMaskOutputElement`] items containing the predicted label, associated + probability, token reference, and completed text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.fill_mask("The goal of life is .") + [ + FillMaskOutputElement(score=0.06897063553333282, token=11098, token_str=' happiness', sequence='The goal of life is happiness.'), + FillMaskOutputElement(score=0.06554922461509705, token=45075, token_str=' immortality', sequence='The goal of life is immortality.') + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="fill-mask", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={"targets": targets, "top_k": top_k}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return FillMaskOutputElement.parse_obj_as_list(response) + + async def image_classification( + self, + image: ContentT, + *, + model: Optional[str] = None, + function_to_apply: Optional["ImageClassificationOutputTransform"] = None, + top_k: Optional[int] = None, + ) -> List[ImageClassificationOutputElement]: + """ + Perform image classification on the given image using the specified model. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The image to classify. It can be raw bytes, an image file, or a URL to an online image. + model (`str`, *optional*): + The model to use for image classification. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for image classification will be used. + function_to_apply (`"ImageClassificationOutputTransform"`, *optional*): + The function to apply to the model outputs in order to retrieve the scores. + top_k (`int`, *optional*): + When specified, limits the output to the top K most probable classes. + Returns: + `List[ImageClassificationOutputElement]`: a list of [`ImageClassificationOutputElement`] items containing the predicted label and associated probability. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.image_classification("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg") + [ImageClassificationOutputElement(label='Blenheim spaniel', score=0.9779096841812134), ...] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="image-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={"function_to_apply": function_to_apply, "top_k": top_k}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return ImageClassificationOutputElement.parse_obj_as_list(response) + + async def image_segmentation( + self, + image: ContentT, + *, + model: Optional[str] = None, + mask_threshold: Optional[float] = None, + overlap_mask_area_threshold: Optional[float] = None, + subtask: Optional["ImageSegmentationSubtask"] = None, + threshold: Optional[float] = None, + ) -> List[ImageSegmentationOutputElement]: + """ + Perform image segmentation on the given image using the specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The image to segment. It can be raw bytes, an image file, or a URL to an online image. + model (`str`, *optional*): + The model to use for image segmentation. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for image segmentation will be used. + mask_threshold (`float`, *optional*): + Threshold to use when turning the predicted masks into binary values. + overlap_mask_area_threshold (`float`, *optional*): + Mask overlap threshold to eliminate small, disconnected segments. + subtask (`"ImageSegmentationSubtask"`, *optional*): + Segmentation task to be performed, depending on model capabilities. + threshold (`float`, *optional*): + Probability threshold to filter out predicted masks. + Returns: + `List[ImageSegmentationOutputElement]`: A list of [`ImageSegmentationOutputElement`] items containing the segmented masks and associated attributes. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.image_segmentation("cat.jpg") + [ImageSegmentationOutputElement(score=0.989008, label='LABEL_184', mask=), ...] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="image-segmentation", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={ + "mask_threshold": mask_threshold, + "overlap_mask_area_threshold": overlap_mask_area_threshold, + "subtask": subtask, + "threshold": threshold, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + output = ImageSegmentationOutputElement.parse_obj_as_list(response) + for item in output: + item.mask = _b64_to_image(item.mask) # type: ignore [assignment] + return output + + async def image_to_image( + self, + image: ContentT, + prompt: Optional[str] = None, + *, + negative_prompt: Optional[str] = None, + num_inference_steps: Optional[int] = None, + guidance_scale: Optional[float] = None, + model: Optional[str] = None, + target_size: Optional[ImageToImageTargetSize] = None, + **kwargs, + ) -> "Image": + """ + Perform image-to-image translation using a specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image for translation. It can be raw bytes, an image file, or a URL to an online image. + prompt (`str`, *optional*): + The text prompt to guide the image generation. + negative_prompt (`str`, *optional*): + One prompt to guide what NOT to include in image generation. + num_inference_steps (`int`, *optional*): + For diffusion models. The number of denoising steps. More denoising steps usually lead to a higher + quality image at the expense of slower inference. + guidance_scale (`float`, *optional*): + For diffusion models. A higher guidance scale value encourages the model to generate images closely + linked to the text prompt at the expense of lower image quality. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + target_size (`ImageToImageTargetSize`, *optional*): + The size in pixel of the output image. + + Returns: + `Image`: The translated image. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> image = await client.image_to_image("cat.jpg", prompt="turn the cat into a tiger") + >>> image.save("tiger.jpg") + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="image-to-image", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={ + "prompt": prompt, + "negative_prompt": negative_prompt, + "target_size": target_size, + "num_inference_steps": num_inference_steps, + "guidance_scale": guidance_scale, + **kwargs, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return _bytes_to_image(response) + + async def image_to_text(self, image: ContentT, *, model: Optional[str] = None) -> ImageToTextOutput: + """ + Takes an input image and return text. + + Models can have very different outputs depending on your use case (image captioning, optical character recognition + (OCR), Pix2Struct, etc). Please have a look to the model card to learn more about a model's specificities. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image to caption. It can be raw bytes, an image file, or a URL to an online image.. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + [`ImageToTextOutput`]: The generated text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.image_to_text("cat.jpg") + 'a cat standing in a grassy field ' + >>> await client.image_to_text("https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg") + 'a dog laying on the grass next to a flower pot ' + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="image-to-text", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + output = ImageToTextOutput.parse_obj(response) + return output[0] if isinstance(output, list) else output + + async def object_detection( + self, image: ContentT, *, model: Optional[str] = None, threshold: Optional[float] = None + ) -> List[ObjectDetectionOutputElement]: + """ + Perform object detection on the given image using the specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The image to detect objects on. It can be raw bytes, an image file, or a URL to an online image. + model (`str`, *optional*): + The model to use for object detection. Can be a model ID hosted on the Hugging Face Hub or a URL to a + deployed Inference Endpoint. If not provided, the default recommended model for object detection (DETR) will be used. + threshold (`float`, *optional*): + The probability necessary to make a prediction. + Returns: + `List[ObjectDetectionOutputElement]`: A list of [`ObjectDetectionOutputElement`] items containing the bounding boxes and associated attributes. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + `ValueError`: + If the request output is not a List. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.object_detection("people.jpg") + [ObjectDetectionOutputElement(score=0.9486683011054993, label='person', box=ObjectDetectionBoundingBox(xmin=59, ymin=39, xmax=420, ymax=510)), ...] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="object-detection", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={"threshold": threshold}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return ObjectDetectionOutputElement.parse_obj_as_list(response) + + async def question_answering( + self, + question: str, + context: str, + *, + model: Optional[str] = None, + align_to_words: Optional[bool] = None, + doc_stride: Optional[int] = None, + handle_impossible_answer: Optional[bool] = None, + max_answer_len: Optional[int] = None, + max_question_len: Optional[int] = None, + max_seq_len: Optional[int] = None, + top_k: Optional[int] = None, + ) -> Union[QuestionAnsweringOutputElement, List[QuestionAnsweringOutputElement]]: + """ + Retrieve the answer to a question from a given text. + + Args: + question (`str`): + Question to be answered. + context (`str`): + The context of the question. + model (`str`): + The model to use for the question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. + align_to_words (`bool`, *optional*): + Attempts to align the answer to real words. Improves quality on space separated languages. Might hurt + on non-space-separated languages (like Japanese or Chinese) + doc_stride (`int`, *optional*): + If the context is too long to fit with the question for the model, it will be split in several chunks + with some overlap. This argument controls the size of that overlap. + handle_impossible_answer (`bool`, *optional*): + Whether to accept impossible as an answer. + max_answer_len (`int`, *optional*): + The maximum length of predicted answers (e.g., only answers with a shorter length are considered). + max_question_len (`int`, *optional*): + The maximum length of the question after tokenization. It will be truncated if needed. + max_seq_len (`int`, *optional*): + The maximum length of the total sentence (context + question) in tokens of each chunk passed to the + model. The context will be split in several chunks (using docStride as overlap) if needed. + top_k (`int`, *optional*): + The number of answers to return (will be chosen by order of likelihood). Note that we return less than + topk answers if there are not enough options available within the context. + + Returns: + Union[`QuestionAnsweringOutputElement`, List[`QuestionAnsweringOutputElement`]]: + When top_k is 1 or not provided, it returns a single `QuestionAnsweringOutputElement`. + When top_k is greater than 1, it returns a list of `QuestionAnsweringOutputElement`. + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.question_answering(question="What's my name?", context="My name is Clara and I live in Berkeley.") + QuestionAnsweringOutputElement(answer='Clara', end=16, score=0.9326565265655518, start=11) + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="question-answering", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs={"question": question, "context": context}, + parameters={ + "align_to_words": align_to_words, + "doc_stride": doc_stride, + "handle_impossible_answer": handle_impossible_answer, + "max_answer_len": max_answer_len, + "max_question_len": max_question_len, + "max_seq_len": max_seq_len, + "top_k": top_k, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + # Parse the response as a single `QuestionAnsweringOutputElement` when top_k is 1 or not provided, or a list of `QuestionAnsweringOutputElement` to ensure backward compatibility. + output = QuestionAnsweringOutputElement.parse_obj(response) + return output + + async def sentence_similarity( + self, sentence: str, other_sentences: List[str], *, model: Optional[str] = None + ) -> List[float]: + """ + Compute the semantic similarity between a sentence and a list of other sentences by comparing their embeddings. + + Args: + sentence (`str`): + The main sentence to compare to others. + other_sentences (`List[str]`): + The list of sentences to compare to. + model (`str`, *optional*): + The model to use for the sentence similarity task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended sentence similarity model will be used. + Defaults to None. + + Returns: + `List[float]`: The embedding representing the input text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.sentence_similarity( + ... "Machine learning is so easy.", + ... other_sentences=[ + ... "Deep learning is so straightforward.", + ... "This is so difficult, like rocket science.", + ... "I can't believe how much I struggled with this.", + ... ], + ... ) + [0.7785726189613342, 0.45876261591911316, 0.2906220555305481] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="sentence-similarity", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs={"source_sentence": sentence, "sentences": other_sentences}, + parameters={}, + extra_payload={}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return _bytes_to_list(response) + + async def summarization( + self, + text: str, + *, + model: Optional[str] = None, + clean_up_tokenization_spaces: Optional[bool] = None, + generate_parameters: Optional[Dict[str, Any]] = None, + truncation: Optional["SummarizationTruncationStrategy"] = None, + ) -> SummarizationOutput: + """ + Generate a summary of a given text using a specified model. + + Args: + text (`str`): + The input text to summarize. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended model for summarization will be used. + clean_up_tokenization_spaces (`bool`, *optional*): + Whether to clean up the potential extra spaces in the text output. + generate_parameters (`Dict[str, Any]`, *optional*): + Additional parametrization of the text generation algorithm. + truncation (`"SummarizationTruncationStrategy"`, *optional*): + The truncation strategy to use. + Returns: + [`SummarizationOutput`]: The generated summary text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.summarization("The Eiffel tower...") + SummarizationOutput(generated_text="The Eiffel tower is one of the most famous landmarks in the world....") + ``` + """ + parameters = { + "clean_up_tokenization_spaces": clean_up_tokenization_spaces, + "generate_parameters": generate_parameters, + "truncation": truncation, + } + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="summarization", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters=parameters, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return SummarizationOutput.parse_obj_as_list(response)[0] + + async def table_question_answering( + self, + table: Dict[str, Any], + query: str, + *, + model: Optional[str] = None, + padding: Optional["Padding"] = None, + sequential: Optional[bool] = None, + truncation: Optional[bool] = None, + ) -> TableQuestionAnsweringOutputElement: + """ + Retrieve the answer to a question from information given in a table. + + Args: + table (`str`): + A table of data represented as a dict of lists where entries are headers and the lists are all the + values, all lists must have the same size. + query (`str`): + The query in plain text that you want to ask the table. + model (`str`): + The model to use for the table-question-answering task. Can be a model ID hosted on the Hugging Face + Hub or a URL to a deployed Inference Endpoint. + padding (`"Padding"`, *optional*): + Activates and controls padding. + sequential (`bool`, *optional*): + Whether to do inference sequentially or as a batch. Batching is faster, but models like SQA require the + inference to be done sequentially to extract relations within sequences, given their conversational + nature. + truncation (`bool`, *optional*): + Activates and controls truncation. + + Returns: + [`TableQuestionAnsweringOutputElement`]: a table question answering output containing the answer, coordinates, cells and the aggregator used. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> query = "How many stars does the transformers repository have?" + >>> table = {"Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"]} + >>> await client.table_question_answering(table, query, model="google/tapas-base-finetuned-wtq") + TableQuestionAnsweringOutputElement(answer='36542', coordinates=[[0, 1]], cells=['36542'], aggregator='AVERAGE') + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="table-question-answering", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs={"query": query, "table": table}, + parameters={"model": model, "padding": padding, "sequential": sequential, "truncation": truncation}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return TableQuestionAnsweringOutputElement.parse_obj_as_instance(response) + + async def tabular_classification(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[str]: + """ + Classifying a target category (a group) based on a set of attributes. + + Args: + table (`Dict[str, Any]`): + Set of attributes to classify. + model (`str`, *optional*): + The model to use for the tabular classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended tabular classification model will be used. + Defaults to None. + + Returns: + `List`: a list of labels, one per row in the initial table. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> table = { + ... "fixed_acidity": ["7.4", "7.8", "10.3"], + ... "volatile_acidity": ["0.7", "0.88", "0.32"], + ... "citric_acid": ["0", "0", "0.45"], + ... "residual_sugar": ["1.9", "2.6", "6.4"], + ... "chlorides": ["0.076", "0.098", "0.073"], + ... "free_sulfur_dioxide": ["11", "25", "5"], + ... "total_sulfur_dioxide": ["34", "67", "13"], + ... "density": ["0.9978", "0.9968", "0.9976"], + ... "pH": ["3.51", "3.2", "3.23"], + ... "sulphates": ["0.56", "0.68", "0.82"], + ... "alcohol": ["9.4", "9.8", "12.6"], + ... } + >>> await client.tabular_classification(table=table, model="julien-c/wine-quality") + ["5", "5", "5"] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="tabular-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=None, + extra_payload={"table": table}, + parameters={}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return _bytes_to_list(response) + + async def tabular_regression(self, table: Dict[str, Any], *, model: Optional[str] = None) -> List[float]: + """ + Predicting a numerical target value given a set of attributes/features in a table. + + Args: + table (`Dict[str, Any]`): + Set of attributes stored in a table. The attributes used to predict the target can be both numerical and categorical. + model (`str`, *optional*): + The model to use for the tabular regression task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended tabular regression model will be used. + Defaults to None. + + Returns: + `List`: a list of predicted numerical target values. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> table = { + ... "Height": ["11.52", "12.48", "12.3778"], + ... "Length1": ["23.2", "24", "23.9"], + ... "Length2": ["25.4", "26.3", "26.5"], + ... "Length3": ["30", "31.2", "31.1"], + ... "Species": ["Bream", "Bream", "Bream"], + ... "Width": ["4.02", "4.3056", "4.6961"], + ... } + >>> await client.tabular_regression(table, model="scikit-learn/Fish-Weight") + [110, 120, 130] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="tabular-regression", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=None, + parameters={}, + extra_payload={"table": table}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return _bytes_to_list(response) + + async def text_classification( + self, + text: str, + *, + model: Optional[str] = None, + top_k: Optional[int] = None, + function_to_apply: Optional["TextClassificationOutputTransform"] = None, + ) -> List[TextClassificationOutputElement]: + """ + Perform text classification (e.g. sentiment-analysis) on the given text. + + Args: + text (`str`): + A string to be classified. + model (`str`, *optional*): + The model to use for the text classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended text classification model will be used. + Defaults to None. + top_k (`int`, *optional*): + When specified, limits the output to the top K most probable classes. + function_to_apply (`"TextClassificationOutputTransform"`, *optional*): + The function to apply to the model outputs in order to retrieve the scores. + + Returns: + `List[TextClassificationOutputElement]`: a list of [`TextClassificationOutputElement`] items containing the predicted label and associated probability. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.text_classification("I like you") + [ + TextClassificationOutputElement(label='POSITIVE', score=0.9998695850372314), + TextClassificationOutputElement(label='NEGATIVE', score=0.0001304351753788069), + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="text-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "function_to_apply": function_to_apply, + "top_k": top_k, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return TextClassificationOutputElement.parse_obj_as_list(response)[0] # type: ignore [return-value] + + @overload + async def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[False] = ..., + stream: Literal[False] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> str: ... + + @overload + async def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: Literal[False] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> TextGenerationOutput: ... + + @overload + async def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[False] = ..., + stream: Literal[True] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> AsyncIterable[str]: ... + + @overload + async def text_generation( # type: ignore + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: Literal[True] = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> AsyncIterable[TextGenerationStreamOutput]: ... + + @overload + async def text_generation( + self, + prompt: str, + *, + details: Literal[True] = ..., + stream: bool = ..., + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> Union[TextGenerationOutput, AsyncIterable[TextGenerationStreamOutput]]: ... + + async def text_generation( + self, + prompt: str, + *, + details: bool = False, + stream: bool = False, + model: Optional[str] = None, + # Parameters from `TextGenerationInputGenerateParameters` (maintained manually) + adapter_id: Optional[str] = None, + best_of: Optional[int] = None, + decoder_input_details: Optional[bool] = None, + do_sample: Optional[bool] = False, # Manual default value + frequency_penalty: Optional[float] = None, + grammar: Optional[TextGenerationInputGrammarType] = None, + max_new_tokens: Optional[int] = None, + repetition_penalty: Optional[float] = None, + return_full_text: Optional[bool] = False, # Manual default value + seed: Optional[int] = None, + stop: Optional[List[str]] = None, + stop_sequences: Optional[List[str]] = None, # Deprecated, use `stop` instead + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_n_tokens: Optional[int] = None, + top_p: Optional[float] = None, + truncate: Optional[int] = None, + typical_p: Optional[float] = None, + watermark: Optional[bool] = None, + ) -> Union[str, TextGenerationOutput, AsyncIterable[str], AsyncIterable[TextGenerationStreamOutput]]: + """ + Given a prompt, generate the following text. + + + + If you want to generate a response from chat messages, you should use the [`InferenceClient.chat_completion`] method. + It accepts a list of messages instead of a single text prompt and handles the chat templating for you. + + + + Args: + prompt (`str`): + Input text. + details (`bool`, *optional*): + By default, text_generation returns a string. Pass `details=True` if you want a detailed output (tokens, + probabilities, seed, finish reason, etc.). Only available for models running on with the + `text-generation-inference` backend. + stream (`bool`, *optional*): + By default, text_generation returns the full generated text. Pass `stream=True` if you want a stream of + tokens to be returned. Only available for models running on with the `text-generation-inference` + backend. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + adapter_id (`str`, *optional*): + Lora adapter id. + best_of (`int`, *optional*): + Generate best_of sequences and return the one if the highest token logprobs. + decoder_input_details (`bool`, *optional*): + Return the decoder input token logprobs and ids. You must set `details=True` as well for it to be taken + into account. Defaults to `False`. + do_sample (`bool`, *optional*): + Activate logits sampling + frequency_penalty (`float`, *optional*): + Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing frequency in + the text so far, decreasing the model's likelihood to repeat the same line verbatim. + grammar ([`TextGenerationInputGrammarType`], *optional*): + Grammar constraints. Can be either a JSONSchema or a regex. + max_new_tokens (`int`, *optional*): + Maximum number of generated tokens. Defaults to 100. + repetition_penalty (`float`, *optional*): + The parameter for repetition penalty. 1.0 means no penalty. See [this + paper](https://arxiv.org/pdf/1909.05858.pdf) for more details. + return_full_text (`bool`, *optional*): + Whether to prepend the prompt to the generated text + seed (`int`, *optional*): + Random sampling seed + stop (`List[str]`, *optional*): + Stop generating tokens if a member of `stop` is generated. + stop_sequences (`List[str]`, *optional*): + Deprecated argument. Use `stop` instead. + temperature (`float`, *optional*): + The value used to module the logits distribution. + top_n_tokens (`int`, *optional*): + Return information about the `top_n_tokens` most likely tokens at each generation step, instead of + just the sampled token. + top_k (`int`, *optional`): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + top_p (`float`, *optional`): + If set to < 1, only the smallest set of most probable tokens with probabilities that add up to `top_p` or + higher are kept for generation. + truncate (`int`, *optional`): + Truncate inputs tokens to the given size. + typical_p (`float`, *optional`): + Typical Decoding mass + See [Typical Decoding for Natural Language Generation](https://arxiv.org/abs/2202.00666) for more information + watermark (`bool`, *optional`): + Watermarking with [A Watermark for Large Language Models](https://arxiv.org/abs/2301.10226) + + Returns: + `Union[str, TextGenerationOutput, Iterable[str], Iterable[TextGenerationStreamOutput]]`: + Generated text returned from the server: + - if `stream=False` and `details=False`, the generated text is returned as a `str` (default) + - if `stream=True` and `details=False`, the generated text is returned token by token as a `Iterable[str]` + - if `stream=False` and `details=True`, the generated text is returned with more details as a [`~huggingface_hub.TextGenerationOutput`] + - if `details=True` and `stream=True`, the generated text is returned token by token as a iterable of [`~huggingface_hub.TextGenerationStreamOutput`] + + Raises: + `ValidationError`: + If input values are not valid. No HTTP call is made to the server. + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + + # Case 1: generate text + >>> await client.text_generation("The huggingface_hub library is ", max_new_tokens=12) + '100% open source and built to be easy to use.' + + # Case 2: iterate over the generated tokens. Useful for large generation. + >>> async for token in await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, stream=True): + ... print(token) + 100 + % + open + source + and + built + to + be + easy + to + use + . + + # Case 3: get more details about the generation process. + >>> await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True) + TextGenerationOutput( + generated_text='100% open source and built to be easy to use.', + details=TextGenerationDetails( + finish_reason='length', + generated_tokens=12, + seed=None, + prefill=[ + TextGenerationPrefillOutputToken(id=487, text='The', logprob=None), + TextGenerationPrefillOutputToken(id=53789, text=' hugging', logprob=-13.171875), + (...) + TextGenerationPrefillOutputToken(id=204, text=' ', logprob=-7.0390625) + ], + tokens=[ + TokenElement(id=1425, text='100', logprob=-1.0175781, special=False), + TokenElement(id=16, text='%', logprob=-0.0463562, special=False), + (...) + TokenElement(id=25, text='.', logprob=-0.5703125, special=False) + ], + best_of_sequences=None + ) + ) + + # Case 4: iterate over the generated tokens with more details. + # Last object is more complete, containing the full generated text and the finish reason. + >>> async for details in await client.text_generation("The huggingface_hub library is ", max_new_tokens=12, details=True, stream=True): + ... print(details) + ... + TextGenerationStreamOutput(token=TokenElement(id=1425, text='100', logprob=-1.0175781, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=16, text='%', logprob=-0.0463562, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=1314, text=' open', logprob=-1.3359375, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=3178, text=' source', logprob=-0.28100586, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=273, text=' and', logprob=-0.5961914, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=3426, text=' built', logprob=-1.9423828, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-1.4121094, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=314, text=' be', logprob=-1.5224609, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=1833, text=' easy', logprob=-2.1132812, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=271, text=' to', logprob=-0.08520508, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement(id=745, text=' use', logprob=-0.39453125, special=False), generated_text=None, details=None) + TextGenerationStreamOutput(token=TokenElement( + id=25, + text='.', + logprob=-0.5703125, + special=False), + generated_text='100% open source and built to be easy to use.', + details=TextGenerationStreamOutputStreamDetails(finish_reason='length', generated_tokens=12, seed=None) + ) + + # Case 5: generate constrained output using grammar + >>> response = await client.text_generation( + ... prompt="I saw a puppy a cat and a raccoon during my bike ride in the park", + ... model="HuggingFaceH4/zephyr-orpo-141b-A35b-v0.1", + ... max_new_tokens=100, + ... repetition_penalty=1.3, + ... grammar={ + ... "type": "json", + ... "value": { + ... "properties": { + ... "location": {"type": "string"}, + ... "activity": {"type": "string"}, + ... "animals_seen": {"type": "integer", "minimum": 1, "maximum": 5}, + ... "animals": {"type": "array", "items": {"type": "string"}}, + ... }, + ... "required": ["location", "activity", "animals_seen", "animals"], + ... }, + ... }, + ... ) + >>> json.loads(response) + { + "activity": "bike riding", + "animals": ["puppy", "cat", "raccoon"], + "animals_seen": 3, + "location": "park" + } + ``` + """ + if decoder_input_details and not details: + warnings.warn( + "`decoder_input_details=True` has been passed to the server but `details=False` is set meaning that" + " the output from the server will be truncated." + ) + decoder_input_details = False + + if stop_sequences is not None: + warnings.warn( + "`stop_sequences` is a deprecated argument for `text_generation` task" + " and will be removed in version '0.28.0'. Use `stop` instead.", + FutureWarning, + ) + if stop is None: + stop = stop_sequences # use deprecated arg if provided + + # Build payload + parameters = { + "adapter_id": adapter_id, + "best_of": best_of, + "decoder_input_details": decoder_input_details, + "details": details, + "do_sample": do_sample, + "frequency_penalty": frequency_penalty, + "grammar": grammar, + "max_new_tokens": max_new_tokens, + "repetition_penalty": repetition_penalty, + "return_full_text": return_full_text, + "seed": seed, + "stop": stop if stop is not None else [], + "temperature": temperature, + "top_k": top_k, + "top_n_tokens": top_n_tokens, + "top_p": top_p, + "truncate": truncate, + "typical_p": typical_p, + "watermark": watermark, + } + + # Remove some parameters if not a TGI server + unsupported_kwargs = _get_unsupported_text_generation_kwargs(model) + if len(unsupported_kwargs) > 0: + # The server does not support some parameters + # => means it is not a TGI server + # => remove unsupported parameters and warn the user + + ignored_parameters = [] + for key in unsupported_kwargs: + if parameters.get(key): + ignored_parameters.append(key) + parameters.pop(key, None) + if len(ignored_parameters) > 0: + warnings.warn( + "API endpoint/model for text-generation is not served via TGI. Ignoring following parameters:" + f" {', '.join(ignored_parameters)}.", + UserWarning, + ) + if details: + warnings.warn( + "API endpoint/model for text-generation is not served via TGI. Parameter `details=True` will" + " be ignored meaning only the generated text will be returned.", + UserWarning, + ) + details = False + if stream: + raise ValueError( + "API endpoint/model for text-generation is not served via TGI. Cannot return output as a stream." + " Please pass `stream=False` as input." + ) + + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="text-generation", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=prompt, + parameters=parameters, + extra_payload={"stream": stream}, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + + # Handle errors separately for more precise error messages + try: + bytes_output = await self._inner_post(request_parameters, stream=stream) + except _import_aiohttp().ClientResponseError as e: + match = MODEL_KWARGS_NOT_USED_REGEX.search(e.response_error_payload["error"]) + if e.status == 400 and match: + unused_params = [kwarg.strip("' ") for kwarg in match.group(1).split(",")] + _set_unsupported_text_generation_kwargs(model, unused_params) + return await self.text_generation( # type: ignore + prompt=prompt, + details=details, + stream=stream, + model=model_id, + adapter_id=adapter_id, + best_of=best_of, + decoder_input_details=decoder_input_details, + do_sample=do_sample, + frequency_penalty=frequency_penalty, + grammar=grammar, + max_new_tokens=max_new_tokens, + repetition_penalty=repetition_penalty, + return_full_text=return_full_text, + seed=seed, + stop=stop, + temperature=temperature, + top_k=top_k, + top_n_tokens=top_n_tokens, + top_p=top_p, + truncate=truncate, + typical_p=typical_p, + watermark=watermark, + ) + raise_text_generation_error(e) + + # Parse output + if stream: + return _async_stream_text_generation_response(bytes_output, details) # type: ignore + + data = _bytes_to_dict(bytes_output) # type: ignore[arg-type] + + # Data can be a single element (dict) or an iterable of dicts where we select the first element of. + if isinstance(data, list): + data = data[0] + response = provider_helper.get_response(data, request_parameters) + return TextGenerationOutput.parse_obj_as_instance(response) if details else response["generated_text"] + + async def text_to_image( + self, + prompt: str, + *, + negative_prompt: Optional[str] = None, + height: Optional[int] = None, + width: Optional[int] = None, + num_inference_steps: Optional[int] = None, + guidance_scale: Optional[float] = None, + model: Optional[str] = None, + scheduler: Optional[str] = None, + seed: Optional[int] = None, + extra_body: Optional[Dict[str, Any]] = None, + ) -> "Image": + """ + Generate an image based on a given text using a specified model. + + + + You must have `PIL` installed if you want to work with images (`pip install Pillow`). + + + + + You can pass provider-specific parameters to the model by using the `extra_body` argument. + + + Args: + prompt (`str`): + The prompt to generate an image from. + negative_prompt (`str`, *optional*): + One prompt to guide what NOT to include in image generation. + height (`int`, *optional*): + The height in pixels of the output image + width (`int`, *optional*): + The width in pixels of the output image + num_inference_steps (`int`, *optional*): + The number of denoising steps. More denoising steps usually lead to a higher quality image at the + expense of slower inference. + guidance_scale (`float`, *optional*): + A higher guidance scale value encourages the model to generate images closely linked to the text + prompt, but values too high may cause saturation and other artifacts. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended text-to-image model will be used. + Defaults to None. + scheduler (`str`, *optional*): + Override the scheduler with a compatible one. + seed (`int`, *optional*): + Seed for the random number generator. + extra_body (`Dict[str, Any]`, *optional*): + Additional provider-specific parameters to pass to the model. Refer to the provider's documentation + for supported parameters. + + Returns: + `Image`: The generated image. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + + >>> image = await client.text_to_image("An astronaut riding a horse on the moon.") + >>> image.save("astronaut.png") + + >>> image = await client.text_to_image( + ... "An astronaut riding a horse on the moon.", + ... negative_prompt="low resolution, blurry", + ... model="stabilityai/stable-diffusion-2-1", + ... ) + >>> image.save("better_astronaut.png") + ``` + Example using a third-party provider directly. Usage will be billed on your fal.ai account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="fal-ai", # Use fal.ai provider + ... api_key="fal-ai-api-key", # Pass your fal.ai API key + ... ) + >>> image = client.text_to_image( + ... "A majestic lion in a fantasy forest", + ... model="black-forest-labs/FLUX.1-schnell", + ... ) + >>> image.save("lion.png") + ``` + + Example using a third-party provider through Hugging Face Routing. Usage will be billed on your Hugging Face account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", # Use replicate provider + ... api_key="hf_...", # Pass your HF token + ... ) + >>> image = client.text_to_image( + ... "An astronaut riding a horse on the moon.", + ... model="black-forest-labs/FLUX.1-dev", + ... ) + >>> image.save("astronaut.png") + ``` + + Example using Replicate provider with extra parameters + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", # Use replicate provider + ... api_key="hf_...", # Pass your HF token + ... ) + >>> image = client.text_to_image( + ... "An astronaut riding a horse on the moon.", + ... model="black-forest-labs/FLUX.1-schnell", + ... extra_body={"output_quality": 100}, + ... ) + >>> image.save("astronaut.png") + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="text-to-image", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=prompt, + parameters={ + "negative_prompt": negative_prompt, + "height": height, + "width": width, + "num_inference_steps": num_inference_steps, + "guidance_scale": guidance_scale, + "scheduler": scheduler, + "seed": seed, + **(extra_body or {}), + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + response = provider_helper.get_response(response) + return _bytes_to_image(response) + + async def text_to_video( + self, + prompt: str, + *, + model: Optional[str] = None, + guidance_scale: Optional[float] = None, + negative_prompt: Optional[List[str]] = None, + num_frames: Optional[float] = None, + num_inference_steps: Optional[int] = None, + seed: Optional[int] = None, + extra_body: Optional[Dict[str, Any]] = None, + ) -> bytes: + """ + Generate a video based on a given text. + + + You can pass provider-specific parameters to the model by using the `extra_body` argument. + + + Args: + prompt (`str`): + The prompt to generate a video from. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended text-to-video model will be used. + Defaults to None. + guidance_scale (`float`, *optional*): + A higher guidance scale value encourages the model to generate videos closely linked to the text + prompt, but values too high may cause saturation and other artifacts. + negative_prompt (`List[str]`, *optional*): + One or several prompt to guide what NOT to include in video generation. + num_frames (`float`, *optional*): + The num_frames parameter determines how many video frames are generated. + num_inference_steps (`int`, *optional*): + The number of denoising steps. More denoising steps usually lead to a higher quality video at the + expense of slower inference. + seed (`int`, *optional*): + Seed for the random number generator. + extra_body (`Dict[str, Any]`, *optional*): + Additional provider-specific parameters to pass to the model. Refer to the provider's documentation + for supported parameters. + + Returns: + `bytes`: The generated video. + + Example: + + Example using a third-party provider directly. Usage will be billed on your fal.ai account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="fal-ai", # Using fal.ai provider + ... api_key="fal-ai-api-key", # Pass your fal.ai API key + ... ) + >>> video = client.text_to_video( + ... "A majestic lion running in a fantasy forest", + ... model="tencent/HunyuanVideo", + ... ) + >>> with open("lion.mp4", "wb") as file: + ... file.write(video) + ``` + + Example using a third-party provider through Hugging Face Routing. Usage will be billed on your Hugging Face account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", # Using replicate provider + ... api_key="hf_...", # Pass your HF token + ... ) + >>> video = client.text_to_video( + ... "A cat running in a park", + ... model="genmo/mochi-1-preview", + ... ) + >>> with open("cat.mp4", "wb") as file: + ... file.write(video) + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="text-to-video", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=prompt, + parameters={ + "guidance_scale": guidance_scale, + "negative_prompt": negative_prompt, + "num_frames": num_frames, + "num_inference_steps": num_inference_steps, + "seed": seed, + **(extra_body or {}), + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + response = provider_helper.get_response(response, request_parameters) + return response + + async def text_to_speech( + self, + text: str, + *, + model: Optional[str] = None, + do_sample: Optional[bool] = None, + early_stopping: Optional[Union[bool, "TextToSpeechEarlyStoppingEnum"]] = None, + epsilon_cutoff: Optional[float] = None, + eta_cutoff: Optional[float] = None, + max_length: Optional[int] = None, + max_new_tokens: Optional[int] = None, + min_length: Optional[int] = None, + min_new_tokens: Optional[int] = None, + num_beam_groups: Optional[int] = None, + num_beams: Optional[int] = None, + penalty_alpha: Optional[float] = None, + temperature: Optional[float] = None, + top_k: Optional[int] = None, + top_p: Optional[float] = None, + typical_p: Optional[float] = None, + use_cache: Optional[bool] = None, + extra_body: Optional[Dict[str, Any]] = None, + ) -> bytes: + """ + Synthesize an audio of a voice pronouncing a given text. + + + You can pass provider-specific parameters to the model by using the `extra_body` argument. + + + Args: + text (`str`): + The text to synthesize. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. If not provided, the default recommended text-to-speech model will be used. + Defaults to None. + do_sample (`bool`, *optional*): + Whether to use sampling instead of greedy decoding when generating new tokens. + early_stopping (`Union[bool, "TextToSpeechEarlyStoppingEnum"]`, *optional*): + Controls the stopping condition for beam-based methods. + epsilon_cutoff (`float`, *optional*): + If set to float strictly between 0 and 1, only tokens with a conditional probability greater than + epsilon_cutoff will be sampled. In the paper, suggested values range from 3e-4 to 9e-4, depending on + the size of the model. See [Truncation Sampling as Language Model + Desmoothing](https://hf.co/papers/2210.15191) for more details. + eta_cutoff (`float`, *optional*): + Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to float strictly + between 0 and 1, a token is only considered if it is greater than either eta_cutoff or sqrt(eta_cutoff) + * exp(-entropy(softmax(next_token_logits))). The latter term is intuitively the expected next token + probability, scaled by sqrt(eta_cutoff). In the paper, suggested values range from 3e-4 to 2e-3, + depending on the size of the model. See [Truncation Sampling as Language Model + Desmoothing](https://hf.co/papers/2210.15191) for more details. + max_length (`int`, *optional*): + The maximum length (in tokens) of the generated text, including the input. + max_new_tokens (`int`, *optional*): + The maximum number of tokens to generate. Takes precedence over max_length. + min_length (`int`, *optional*): + The minimum length (in tokens) of the generated text, including the input. + min_new_tokens (`int`, *optional*): + The minimum number of tokens to generate. Takes precedence over min_length. + num_beam_groups (`int`, *optional*): + Number of groups to divide num_beams into in order to ensure diversity among different groups of beams. + See [this paper](https://hf.co/papers/1610.02424) for more details. + num_beams (`int`, *optional*): + Number of beams to use for beam search. + penalty_alpha (`float`, *optional*): + The value balances the model confidence and the degeneration penalty in contrastive search decoding. + temperature (`float`, *optional*): + The value used to modulate the next token probabilities. + top_k (`int`, *optional*): + The number of highest probability vocabulary tokens to keep for top-k-filtering. + top_p (`float`, *optional*): + If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to + top_p or higher are kept for generation. + typical_p (`float`, *optional*): + Local typicality measures how similar the conditional probability of predicting a target token next is + to the expected conditional probability of predicting a random token next, given the partial text + already generated. If set to float < 1, the smallest set of the most locally typical tokens with + probabilities that add up to typical_p or higher are kept for generation. See [this + paper](https://hf.co/papers/2202.00666) for more details. + use_cache (`bool`, *optional*): + Whether the model should use the past last key/values attentions to speed up decoding + extra_body (`Dict[str, Any]`, *optional*): + Additional provider-specific parameters to pass to the model. Refer to the provider's documentation + for supported parameters. + Returns: + `bytes`: The generated audio. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from pathlib import Path + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + + >>> audio = await client.text_to_speech("Hello world") + >>> Path("hello_world.flac").write_bytes(audio) + ``` + + Example using a third-party provider directly. Usage will be billed on your Replicate account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", + ... api_key="your-replicate-api-key", # Pass your Replicate API key directly + ... ) + >>> audio = client.text_to_speech( + ... text="Hello world", + ... model="OuteAI/OuteTTS-0.3-500M", + ... ) + >>> Path("hello_world.flac").write_bytes(audio) + ``` + + Example using a third-party provider through Hugging Face Routing. Usage will be billed on your Hugging Face account. + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", + ... api_key="hf_...", # Pass your HF token + ... ) + >>> audio =client.text_to_speech( + ... text="Hello world", + ... model="OuteAI/OuteTTS-0.3-500M", + ... ) + >>> Path("hello_world.flac").write_bytes(audio) + ``` + Example using Replicate provider with extra parameters + ```py + >>> from huggingface_hub import InferenceClient + >>> client = InferenceClient( + ... provider="replicate", # Use replicate provider + ... api_key="hf_...", # Pass your HF token + ... ) + >>> audio = client.text_to_speech( + ... "Hello, my name is Kororo, an awesome text-to-speech model.", + ... model="hexgrad/Kokoro-82M", + ... extra_body={"voice": "af_nicole"}, + ... ) + >>> Path("hello.flac").write_bytes(audio) + ``` + + Example music-gen using "YuE-s1-7B-anneal-en-cot" on fal.ai + ```py + >>> from huggingface_hub import InferenceClient + >>> lyrics = ''' + ... [verse] + ... In the town where I was born + ... Lived a man who sailed to sea + ... And he told us of his life + ... In the land of submarines + ... So we sailed on to the sun + ... 'Til we found a sea of green + ... And we lived beneath the waves + ... In our yellow submarine + + ... [chorus] + ... We all live in a yellow submarine + ... Yellow submarine, yellow submarine + ... We all live in a yellow submarine + ... Yellow submarine, yellow submarine + ... ''' + >>> genres = "pavarotti-style tenor voice" + >>> client = InferenceClient( + ... provider="fal-ai", + ... model="m-a-p/YuE-s1-7B-anneal-en-cot", + ... api_key=..., + ... ) + >>> audio = client.text_to_speech(lyrics, extra_body={"genres": genres}) + >>> with open("output.mp3", "wb") as f: + ... f.write(audio) + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="text-to-speech", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "do_sample": do_sample, + "early_stopping": early_stopping, + "epsilon_cutoff": epsilon_cutoff, + "eta_cutoff": eta_cutoff, + "max_length": max_length, + "max_new_tokens": max_new_tokens, + "min_length": min_length, + "min_new_tokens": min_new_tokens, + "num_beam_groups": num_beam_groups, + "num_beams": num_beams, + "penalty_alpha": penalty_alpha, + "temperature": temperature, + "top_k": top_k, + "top_p": top_p, + "typical_p": typical_p, + "use_cache": use_cache, + **(extra_body or {}), + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + response = provider_helper.get_response(response) + return response + + async def token_classification( + self, + text: str, + *, + model: Optional[str] = None, + aggregation_strategy: Optional["TokenClassificationAggregationStrategy"] = None, + ignore_labels: Optional[List[str]] = None, + stride: Optional[int] = None, + ) -> List[TokenClassificationOutputElement]: + """ + Perform token classification on the given text. + Usually used for sentence parsing, either grammatical, or Named Entity Recognition (NER) to understand keywords contained within text. + + Args: + text (`str`): + A string to be classified. + model (`str`, *optional*): + The model to use for the token classification task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended token classification model will be used. + Defaults to None. + aggregation_strategy (`"TokenClassificationAggregationStrategy"`, *optional*): + The strategy used to fuse tokens based on model predictions + ignore_labels (`List[str`, *optional*): + A list of labels to ignore + stride (`int`, *optional*): + The number of overlapping tokens between chunks when splitting the input text. + + Returns: + `List[TokenClassificationOutputElement]`: List of [`TokenClassificationOutputElement`] items containing the entity group, confidence score, word, start and end index. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.token_classification("My name is Sarah Jessica Parker but you can call me Jessica") + [ + TokenClassificationOutputElement( + entity_group='PER', + score=0.9971321225166321, + word='Sarah Jessica Parker', + start=11, + end=31, + ), + TokenClassificationOutputElement( + entity_group='PER', + score=0.9773476123809814, + word='Jessica', + start=52, + end=59, + ) + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="token-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "aggregation_strategy": aggregation_strategy, + "ignore_labels": ignore_labels, + "stride": stride, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return TokenClassificationOutputElement.parse_obj_as_list(response) + + async def translation( + self, + text: str, + *, + model: Optional[str] = None, + src_lang: Optional[str] = None, + tgt_lang: Optional[str] = None, + clean_up_tokenization_spaces: Optional[bool] = None, + truncation: Optional["TranslationTruncationStrategy"] = None, + generate_parameters: Optional[Dict[str, Any]] = None, + ) -> TranslationOutput: + """ + Convert text from one language to another. + + Check out https://huggingface.co/tasks/translation for more information on how to choose the best model for + your specific use case. Source and target languages usually depend on the model. + However, it is possible to specify source and target languages for certain models. If you are working with one of these models, + you can use `src_lang` and `tgt_lang` arguments to pass the relevant information. + + Args: + text (`str`): + A string to be translated. + model (`str`, *optional*): + The model to use for the translation task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended translation model will be used. + Defaults to None. + src_lang (`str`, *optional*): + The source language of the text. Required for models that can translate from multiple languages. + tgt_lang (`str`, *optional*): + Target language to translate to. Required for models that can translate to multiple languages. + clean_up_tokenization_spaces (`bool`, *optional*): + Whether to clean up the potential extra spaces in the text output. + truncation (`"TranslationTruncationStrategy"`, *optional*): + The truncation strategy to use. + generate_parameters (`Dict[str, Any]`, *optional*): + Additional parametrization of the text generation algorithm. + + Returns: + [`TranslationOutput`]: The generated translated text. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + `ValueError`: + If only one of the `src_lang` and `tgt_lang` arguments are provided. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.translation("My name is Wolfgang and I live in Berlin") + 'Mein Name ist Wolfgang und ich lebe in Berlin.' + >>> await client.translation("My name is Wolfgang and I live in Berlin", model="Helsinki-NLP/opus-mt-en-fr") + TranslationOutput(translation_text='Je m'appelle Wolfgang et je vis à Berlin.') + ``` + + Specifying languages: + ```py + >>> client.translation("My name is Sarah Jessica Parker but you can call me Jessica", model="facebook/mbart-large-50-many-to-many-mmt", src_lang="en_XX", tgt_lang="fr_XX") + "Mon nom est Sarah Jessica Parker mais vous pouvez m'appeler Jessica" + ``` + """ + # Throw error if only one of `src_lang` and `tgt_lang` was given + if src_lang is not None and tgt_lang is None: + raise ValueError("You cannot specify `src_lang` without specifying `tgt_lang`.") + + if src_lang is None and tgt_lang is not None: + raise ValueError("You cannot specify `tgt_lang` without specifying `src_lang`.") + + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="translation", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "src_lang": src_lang, + "tgt_lang": tgt_lang, + "clean_up_tokenization_spaces": clean_up_tokenization_spaces, + "truncation": truncation, + "generate_parameters": generate_parameters, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return TranslationOutput.parse_obj_as_list(response)[0] + + async def visual_question_answering( + self, + image: ContentT, + question: str, + *, + model: Optional[str] = None, + top_k: Optional[int] = None, + ) -> List[VisualQuestionAnsweringOutputElement]: + """ + Answering open-ended questions based on an image. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image for the context. It can be raw bytes, an image file, or a URL to an online image. + question (`str`): + Question to be answered. + model (`str`, *optional*): + The model to use for the visual question answering task. Can be a model ID hosted on the Hugging Face Hub or a URL to + a deployed Inference Endpoint. If not provided, the default recommended visual question answering model will be used. + Defaults to None. + top_k (`int`, *optional*): + The number of answers to return (will be chosen by order of likelihood). Note that we return less than + topk answers if there are not enough options available within the context. + Returns: + `List[VisualQuestionAnsweringOutputElement]`: a list of [`VisualQuestionAnsweringOutputElement`] items containing the predicted label and associated probability. + + Raises: + `InferenceTimeoutError`: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.visual_question_answering( + ... image="https://huggingface.co/datasets/mishig/sample_images/resolve/main/tiger.jpg", + ... question="What is the animal doing?" + ... ) + [ + VisualQuestionAnsweringOutputElement(score=0.778609573841095, answer='laying down'), + VisualQuestionAnsweringOutputElement(score=0.6957435607910156, answer='sitting'), + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="visual-question-answering", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={"top_k": top_k}, + headers=self.headers, + model=model_id, + api_key=self.token, + extra_payload={"question": question, "image": _b64_encode(image)}, + ) + response = await self._inner_post(request_parameters) + return VisualQuestionAnsweringOutputElement.parse_obj_as_list(response) + + async def zero_shot_classification( + self, + text: str, + candidate_labels: List[str], + *, + multi_label: Optional[bool] = False, + hypothesis_template: Optional[str] = None, + model: Optional[str] = None, + ) -> List[ZeroShotClassificationOutputElement]: + """ + Provide as input a text and a set of candidate labels to classify the input text. + + Args: + text (`str`): + The input text to classify. + candidate_labels (`List[str]`): + The set of possible class labels to classify the text into. + labels (`List[str]`, *optional*): + (deprecated) List of strings. Each string is the verbalization of a possible label for the input text. + multi_label (`bool`, *optional*): + Whether multiple candidate labels can be true. If false, the scores are normalized such that the sum of + the label likelihoods for each sequence is 1. If true, the labels are considered independent and + probabilities are normalized for each candidate. + hypothesis_template (`str`, *optional*): + The sentence used in conjunction with `candidate_labels` to attempt the text classification by + replacing the placeholder with the candidate labels. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. If not provided, the default recommended zero-shot classification model will be used. + + + Returns: + `List[ZeroShotClassificationOutputElement]`: List of [`ZeroShotClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example with `multi_label=False`: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> text = ( + ... "A new model offers an explanation for how the Galilean satellites formed around the solar system's" + ... "largest world. Konstantin Batygin did not set out to solve one of the solar system's most puzzling" + ... " mysteries when he went for a run up a hill in Nice, France." + ... ) + >>> labels = ["space & cosmos", "scientific discovery", "microbiology", "robots", "archeology"] + >>> await client.zero_shot_classification(text, labels) + [ + ZeroShotClassificationOutputElement(label='scientific discovery', score=0.7961668968200684), + ZeroShotClassificationOutputElement(label='space & cosmos', score=0.18570658564567566), + ZeroShotClassificationOutputElement(label='microbiology', score=0.00730885099619627), + ZeroShotClassificationOutputElement(label='archeology', score=0.006258360575884581), + ZeroShotClassificationOutputElement(label='robots', score=0.004559356719255447), + ] + >>> await client.zero_shot_classification(text, labels, multi_label=True) + [ + ZeroShotClassificationOutputElement(label='scientific discovery', score=0.9829297661781311), + ZeroShotClassificationOutputElement(label='space & cosmos', score=0.755190908908844), + ZeroShotClassificationOutputElement(label='microbiology', score=0.0005462635890580714), + ZeroShotClassificationOutputElement(label='archeology', score=0.00047131875180639327), + ZeroShotClassificationOutputElement(label='robots', score=0.00030448526376858354), + ] + ``` + + Example with `multi_label=True` and a custom `hypothesis_template`: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.zero_shot_classification( + ... text="I really like our dinner and I'm very happy. I don't like the weather though.", + ... labels=["positive", "negative", "pessimistic", "optimistic"], + ... multi_label=True, + ... hypothesis_template="This text is {} towards the weather" + ... ) + [ + ZeroShotClassificationOutputElement(label='negative', score=0.9231801629066467), + ZeroShotClassificationOutputElement(label='pessimistic', score=0.8760990500450134), + ZeroShotClassificationOutputElement(label='optimistic', score=0.0008674879791215062), + ZeroShotClassificationOutputElement(label='positive', score=0.0005250611575320363) + ] + ``` + """ + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="zero-shot-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=text, + parameters={ + "candidate_labels": candidate_labels, + "multi_label": multi_label, + "hypothesis_template": hypothesis_template, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + output = _bytes_to_dict(response) + return [ + ZeroShotClassificationOutputElement.parse_obj_as_instance({"label": label, "score": score}) + for label, score in zip(output["labels"], output["scores"]) + ] + + async def zero_shot_image_classification( + self, + image: ContentT, + candidate_labels: List[str], + *, + model: Optional[str] = None, + hypothesis_template: Optional[str] = None, + # deprecated argument + labels: List[str] = None, # type: ignore + ) -> List[ZeroShotImageClassificationOutputElement]: + """ + Provide input image and text labels to predict text labels for the image. + + Args: + image (`Union[str, Path, bytes, BinaryIO]`): + The input image to caption. It can be raw bytes, an image file, or a URL to an online image. + candidate_labels (`List[str]`): + The candidate labels for this image + labels (`List[str]`, *optional*): + (deprecated) List of string possible labels. There must be at least 2 labels. + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. If not provided, the default recommended zero-shot image classification model will be used. + hypothesis_template (`str`, *optional*): + The sentence used in conjunction with `candidate_labels` to attempt the image classification by + replacing the placeholder with the candidate labels. + + Returns: + `List[ZeroShotImageClassificationOutputElement]`: List of [`ZeroShotImageClassificationOutputElement`] items containing the predicted labels and their confidence. + + Raises: + [`InferenceTimeoutError`]: + If the model is unavailable or the request times out. + `aiohttp.ClientResponseError`: + If the request fails with an HTTP error status code other than HTTP 503. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + + >>> await client.zero_shot_image_classification( + ... "https://upload.wikimedia.org/wikipedia/commons/thumb/4/43/Cute_dog.jpg/320px-Cute_dog.jpg", + ... labels=["dog", "cat", "horse"], + ... ) + [ZeroShotImageClassificationOutputElement(label='dog', score=0.956),...] + ``` + """ + # Raise ValueError if input is less than 2 labels + if len(candidate_labels) < 2: + raise ValueError("You must specify at least 2 classes to compare.") + + model_id = model or self.model + provider_helper = get_provider_helper(self.provider, task="zero-shot-image-classification", model=model_id) + request_parameters = provider_helper.prepare_request( + inputs=image, + parameters={ + "candidate_labels": candidate_labels, + "hypothesis_template": hypothesis_template, + }, + headers=self.headers, + model=model_id, + api_key=self.token, + ) + response = await self._inner_post(request_parameters) + return ZeroShotImageClassificationOutputElement.parse_obj_as_list(response) + + @_deprecate_method( + version="0.35.0", + message=( + "HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)." + " Use `HfApi.list_models(..., inference_provider='...')` to list warm models per provider." + ), + ) + async def list_deployed_models( + self, frameworks: Union[None, str, Literal["all"], List[str]] = None + ) -> Dict[str, List[str]]: + """ + List models deployed on the HF Serverless Inference API service. + + This helper checks deployed models framework by framework. By default, it will check the 4 main frameworks that + are supported and account for 95% of the hosted models. However, if you want a complete list of models you can + specify `frameworks="all"` as input. Alternatively, if you know before-hand which framework you are interested + in, you can also restrict to search to this one (e.g. `frameworks="text-generation-inference"`). The more + frameworks are checked, the more time it will take. + + + + This endpoint method does not return a live list of all models available for the HF Inference API service. + It searches over a cached list of models that were recently available and the list may not be up to date. + If you want to know the live status of a specific model, use [`~InferenceClient.get_model_status`]. + + + + + + This endpoint method is mostly useful for discoverability. If you already know which model you want to use and want to + check its availability, you can directly use [`~InferenceClient.get_model_status`]. + + + + Args: + frameworks (`Literal["all"]` or `List[str]` or `str`, *optional*): + The frameworks to filter on. By default only a subset of the available frameworks are tested. If set to + "all", all available frameworks will be tested. It is also possible to provide a single framework or a + custom set of frameworks to check. + + Returns: + `Dict[str, List[str]]`: A dictionary mapping task names to a sorted list of model IDs. + + Example: + ```py + # Must be run in an async contextthon + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + + # Discover zero-shot-classification models currently deployed + >>> models = await client.list_deployed_models() + >>> models["zero-shot-classification"] + ['Narsil/deberta-large-mnli-zero-cls', 'facebook/bart-large-mnli', ...] + + # List from only 1 framework + >>> await client.list_deployed_models("text-generation-inference") + {'text-generation': ['bigcode/starcoder', 'meta-llama/Llama-2-70b-chat-hf', ...], ...} + ``` + """ + if self.provider != "hf-inference": + raise ValueError(f"Listing deployed models is not supported on '{self.provider}'.") + + # Resolve which frameworks to check + if frameworks is None: + frameworks = constants.MAIN_INFERENCE_API_FRAMEWORKS + elif frameworks == "all": + frameworks = constants.ALL_INFERENCE_API_FRAMEWORKS + elif isinstance(frameworks, str): + frameworks = [frameworks] + frameworks = list(set(frameworks)) + + # Fetch them iteratively + models_by_task: Dict[str, List[str]] = {} + + def _unpack_response(framework: str, items: List[Dict]) -> None: + for model in items: + if framework == "sentence-transformers": + # Model running with the `sentence-transformers` framework can work with both tasks even if not + # branded as such in the API response + models_by_task.setdefault("feature-extraction", []).append(model["model_id"]) + models_by_task.setdefault("sentence-similarity", []).append(model["model_id"]) + else: + models_by_task.setdefault(model["task"], []).append(model["model_id"]) + + for framework in frameworks: + response = get_session().get( + f"{constants.INFERENCE_ENDPOINT}/framework/{framework}", headers=build_hf_headers(token=self.token) + ) + hf_raise_for_status(response) + _unpack_response(framework, response.json()) + + # Sort alphabetically for discoverability and return + for task, models in models_by_task.items(): + models_by_task[task] = sorted(set(models), key=lambda x: x.lower()) + return models_by_task + + def _get_client_session(self, headers: Optional[Dict] = None) -> "ClientSession": + aiohttp = _import_aiohttp() + client_headers = self.headers.copy() + if headers is not None: + client_headers.update(headers) + + # Return a new aiohttp ClientSession with correct settings. + session = aiohttp.ClientSession( + headers=client_headers, + cookies=self.cookies, + timeout=aiohttp.ClientTimeout(self.timeout), + trust_env=self.trust_env, + ) + + # Keep track of sessions to close them later + self._sessions[session] = set() + + # Override the `._request` method to register responses to be closed + session._wrapped_request = session._request + + async def _request(method, url, **kwargs): + response = await session._wrapped_request(method, url, **kwargs) + self._sessions[session].add(response) + return response + + session._request = _request + + # Override the 'close' method to + # 1. close ongoing responses + # 2. deregister the session when closed + session._close = session.close + + async def close_session(): + for response in self._sessions[session]: + response.close() + await session._close() + self._sessions.pop(session, None) + + session.close = close_session + return session + + async def get_endpoint_info(self, *, model: Optional[str] = None) -> Dict[str, Any]: + """ + Get information about the deployed endpoint. + + This endpoint is only available on endpoints powered by Text-Generation-Inference (TGI) or Text-Embedding-Inference (TEI). + Endpoints powered by `transformers` return an empty payload. + + Args: + model (`str`, *optional*): + The model to use for inference. Can be a model ID hosted on the Hugging Face Hub or a URL to a deployed + Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `Dict[str, Any]`: Information about the endpoint. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient("meta-llama/Meta-Llama-3-70B-Instruct") + >>> await client.get_endpoint_info() + { + 'model_id': 'meta-llama/Meta-Llama-3-70B-Instruct', + 'model_sha': None, + 'model_dtype': 'torch.float16', + 'model_device_type': 'cuda', + 'model_pipeline_tag': None, + 'max_concurrent_requests': 128, + 'max_best_of': 2, + 'max_stop_sequences': 4, + 'max_input_length': 8191, + 'max_total_tokens': 8192, + 'waiting_served_ratio': 0.3, + 'max_batch_total_tokens': 1259392, + 'max_waiting_tokens': 20, + 'max_batch_size': None, + 'validation_workers': 32, + 'max_client_batch_size': 4, + 'version': '2.0.2', + 'sha': 'dccab72549635c7eb5ddb17f43f0b7cdff07c214', + 'docker_label': 'sha-dccab72' + } + ``` + """ + if self.provider != "hf-inference": + raise ValueError(f"Getting endpoint info is not supported on '{self.provider}'.") + + model = model or self.model + if model is None: + raise ValueError("Model id not provided.") + if model.startswith(("http://", "https://")): + url = model.rstrip("/") + "/info" + else: + url = f"{constants.INFERENCE_ENDPOINT}/models/{model}/info" + + async with self._get_client_session(headers=build_hf_headers(token=self.token)) as client: + response = await client.get(url, proxy=self.proxies) + response.raise_for_status() + return await response.json() + + async def health_check(self, model: Optional[str] = None) -> bool: + """ + Check the health of the deployed endpoint. + + Health check is only available with Inference Endpoints powered by Text-Generation-Inference (TGI) or Text-Embedding-Inference (TEI). + For Inference API, please use [`InferenceClient.get_model_status`] instead. + + Args: + model (`str`, *optional*): + URL of the Inference Endpoint. This parameter overrides the model defined at the instance level. Defaults to None. + + Returns: + `bool`: True if everything is working fine. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient("https://jzgu0buei5.us-east-1.aws.endpoints.huggingface.cloud") + >>> await client.health_check() + True + ``` + """ + if self.provider != "hf-inference": + raise ValueError(f"Health check is not supported on '{self.provider}'.") + + model = model or self.model + if model is None: + raise ValueError("Model id not provided.") + if not model.startswith(("http://", "https://")): + raise ValueError( + "Model must be an Inference Endpoint URL. For serverless Inference API, please use `InferenceClient.get_model_status`." + ) + url = model.rstrip("/") + "/health" + + async with self._get_client_session(headers=build_hf_headers(token=self.token)) as client: + response = await client.get(url, proxy=self.proxies) + return response.status == 200 + + @_deprecate_method( + version="0.35.0", + message=( + "HF Inference API is getting revamped and will only support warm models in the future (no cold start allowed)." + " Use `HfApi.model_info` to get the model status both with HF Inference API and external providers." + ), + ) + async def get_model_status(self, model: Optional[str] = None) -> ModelStatus: + """ + Get the status of a model hosted on the HF Inference API. + + + + This endpoint is mostly useful when you already know which model you want to use and want to check its + availability. If you want to discover already deployed models, you should rather use [`~InferenceClient.list_deployed_models`]. + + + + Args: + model (`str`, *optional*): + Identifier of the model for witch the status gonna be checked. If model is not provided, + the model associated with this instance of [`InferenceClient`] will be used. Only HF Inference API service can be checked so the + identifier cannot be a URL. + + + Returns: + [`ModelStatus`]: An instance of ModelStatus dataclass, containing information, + about the state of the model: load, state, compute type and framework. + + Example: + ```py + # Must be run in an async context + >>> from huggingface_hub import AsyncInferenceClient + >>> client = AsyncInferenceClient() + >>> await client.get_model_status("meta-llama/Meta-Llama-3-8B-Instruct") + ModelStatus(loaded=True, state='Loaded', compute_type='gpu', framework='text-generation-inference') + ``` + """ + if self.provider != "hf-inference": + raise ValueError(f"Getting model status is not supported on '{self.provider}'.") + + model = model or self.model + if model is None: + raise ValueError("Model id not provided.") + if model.startswith("https://"): + raise NotImplementedError("Model status is only available for Inference API endpoints.") + url = f"{constants.INFERENCE_ENDPOINT}/status/{model}" + + async with self._get_client_session(headers=build_hf_headers(token=self.token)) as client: + response = await client.get(url, proxy=self.proxies) + response.raise_for_status() + response_data = await response.json() + + if "error" in response_data: + raise ValueError(response_data["error"]) + + return ModelStatus( + loaded=response_data["loaded"], + state=response_data["state"], + compute_type=response_data["compute_type"], + framework=response_data["framework"], + ) + + @property + def chat(self) -> "ProxyClientChat": + return ProxyClientChat(self) + + +class _ProxyClient: + """Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client.""" + + def __init__(self, client: AsyncInferenceClient): + self._client = client + + +class ProxyClientChat(_ProxyClient): + """Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client.""" + + @property + def completions(self) -> "ProxyClientChatCompletions": + return ProxyClientChatCompletions(self._client) + + +class ProxyClientChatCompletions(_ProxyClient): + """Proxy class to be able to call `client.chat.completion.create(...)` as OpenAI client.""" + + @property + def create(self): + return self._client.chat_completion diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py new file mode 100644 index 0000000000000000000000000000000000000000..fe455ee71084920a3b8b246d875b8ab1ef555ad9 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/chat_completion.py @@ -0,0 +1,345 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from typing import Any, Dict, List, Literal, Optional, Union + +from .base import BaseInferenceType, dataclass_with_extra + + +@dataclass_with_extra +class ChatCompletionInputURL(BaseInferenceType): + url: str + + +ChatCompletionInputMessageChunkType = Literal["text", "image_url"] + + +@dataclass_with_extra +class ChatCompletionInputMessageChunk(BaseInferenceType): + type: "ChatCompletionInputMessageChunkType" + image_url: Optional[ChatCompletionInputURL] = None + text: Optional[str] = None + + +@dataclass_with_extra +class ChatCompletionInputFunctionDefinition(BaseInferenceType): + name: str + parameters: Any + description: Optional[str] = None + + +@dataclass_with_extra +class ChatCompletionInputToolCall(BaseInferenceType): + function: ChatCompletionInputFunctionDefinition + id: str + type: str + + +@dataclass_with_extra +class ChatCompletionInputMessage(BaseInferenceType): + role: str + content: Optional[Union[List[ChatCompletionInputMessageChunk], str]] = None + name: Optional[str] = None + tool_calls: Optional[List[ChatCompletionInputToolCall]] = None + + +@dataclass_with_extra +class ChatCompletionInputJSONSchema(BaseInferenceType): + name: str + """ + The name of the response format. + """ + description: Optional[str] = None + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + schema: Optional[Dict[str, object]] = None + """ + The schema for the response format, described as a JSON Schema object. Learn how + to build JSON schemas [here](https://json-schema.org/). + """ + strict: Optional[bool] = None + """ + Whether to enable strict schema adherence when generating the output. If set to + true, the model will always follow the exact schema defined in the `schema` + field. + """ + + +@dataclass_with_extra +class ChatCompletionInputResponseFormatText(BaseInferenceType): + type: Literal["text"] + + +@dataclass_with_extra +class ChatCompletionInputResponseFormatJSONSchema(BaseInferenceType): + type: Literal["json_schema"] + json_schema: ChatCompletionInputJSONSchema + + +@dataclass_with_extra +class ChatCompletionInputResponseFormatJSONObject(BaseInferenceType): + type: Literal["json_object"] + + +ChatCompletionInputGrammarType = Union[ + ChatCompletionInputResponseFormatText, + ChatCompletionInputResponseFormatJSONSchema, + ChatCompletionInputResponseFormatJSONObject, +] + + +@dataclass_with_extra +class ChatCompletionInputStreamOptions(BaseInferenceType): + include_usage: Optional[bool] = None + """If set, an additional chunk will be streamed before the data: [DONE] message. The usage + field on this chunk shows the token usage statistics for the entire request, and the + choices field will always be an empty array. All other chunks will also include a usage + field, but with a null value. + """ + + +@dataclass_with_extra +class ChatCompletionInputFunctionName(BaseInferenceType): + name: str + + +@dataclass_with_extra +class ChatCompletionInputToolChoiceClass(BaseInferenceType): + function: ChatCompletionInputFunctionName + + +ChatCompletionInputToolChoiceEnum = Literal["auto", "none", "required"] + + +@dataclass_with_extra +class ChatCompletionInputTool(BaseInferenceType): + function: ChatCompletionInputFunctionDefinition + type: str + + +@dataclass_with_extra +class ChatCompletionInput(BaseInferenceType): + """Chat Completion Input. + Auto-generated from TGI specs. + For more details, check out + https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + """ + + messages: List[ChatCompletionInputMessage] + """A list of messages comprising the conversation so far.""" + frequency_penalty: Optional[float] = None + """Number between -2.0 and 2.0. Positive values penalize new tokens based on their existing + frequency in the text so far, + decreasing the model's likelihood to repeat the same line verbatim. + """ + logit_bias: Optional[List[float]] = None + """UNUSED + Modify the likelihood of specified tokens appearing in the completion. Accepts a JSON + object that maps tokens + (specified by their token ID in the tokenizer) to an associated bias value from -100 to + 100. Mathematically, + the bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, + but values between -1 and 1 should decrease or increase likelihood of selection; values + like -100 or 100 should + result in a ban or exclusive selection of the relevant token. + """ + logprobs: Optional[bool] = None + """Whether to return log probabilities of the output tokens or not. If true, returns the log + probabilities of each + output token returned in the content of message. + """ + max_tokens: Optional[int] = None + """The maximum number of tokens that can be generated in the chat completion.""" + model: Optional[str] = None + """[UNUSED] ID of the model to use. See the model endpoint compatibility table for details + on which models work with the Chat API. + """ + n: Optional[int] = None + """UNUSED + How many chat completion choices to generate for each input message. Note that you will + be charged based on the + number of generated tokens across all of the choices. Keep n as 1 to minimize costs. + """ + presence_penalty: Optional[float] = None + """Number between -2.0 and 2.0. Positive values penalize new tokens based on whether they + appear in the text so far, + increasing the model's likelihood to talk about new topics + """ + response_format: Optional[ChatCompletionInputGrammarType] = None + seed: Optional[int] = None + stop: Optional[List[str]] = None + """Up to 4 sequences where the API will stop generating further tokens.""" + stream: Optional[bool] = None + stream_options: Optional[ChatCompletionInputStreamOptions] = None + temperature: Optional[float] = None + """What sampling temperature to use, between 0 and 2. Higher values like 0.8 will make the + output more random, while + lower values like 0.2 will make it more focused and deterministic. + We generally recommend altering this or `top_p` but not both. + """ + tool_choice: Optional[Union[ChatCompletionInputToolChoiceClass, "ChatCompletionInputToolChoiceEnum"]] = None + tool_prompt: Optional[str] = None + """A prompt to be appended before the tools""" + tools: Optional[List[ChatCompletionInputTool]] = None + """A list of tools the model may call. Currently, only functions are supported as a tool. + Use this to provide a list of + functions the model may generate JSON inputs for. + """ + top_logprobs: Optional[int] = None + """An integer between 0 and 5 specifying the number of most likely tokens to return at each + token position, each with + an associated log probability. logprobs must be set to true if this parameter is used. + """ + top_p: Optional[float] = None + """An alternative to sampling with temperature, called nucleus sampling, where the model + considers the results of the + tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% + probability mass are considered. + """ + + +@dataclass_with_extra +class ChatCompletionOutputTopLogprob(BaseInferenceType): + logprob: float + token: str + + +@dataclass_with_extra +class ChatCompletionOutputLogprob(BaseInferenceType): + logprob: float + token: str + top_logprobs: List[ChatCompletionOutputTopLogprob] + + +@dataclass_with_extra +class ChatCompletionOutputLogprobs(BaseInferenceType): + content: List[ChatCompletionOutputLogprob] + + +@dataclass_with_extra +class ChatCompletionOutputFunctionDefinition(BaseInferenceType): + arguments: str + name: str + description: Optional[str] = None + + +@dataclass_with_extra +class ChatCompletionOutputToolCall(BaseInferenceType): + function: ChatCompletionOutputFunctionDefinition + id: str + type: str + + +@dataclass_with_extra +class ChatCompletionOutputMessage(BaseInferenceType): + role: str + content: Optional[str] = None + tool_call_id: Optional[str] = None + tool_calls: Optional[List[ChatCompletionOutputToolCall]] = None + + +@dataclass_with_extra +class ChatCompletionOutputComplete(BaseInferenceType): + finish_reason: str + index: int + message: ChatCompletionOutputMessage + logprobs: Optional[ChatCompletionOutputLogprobs] = None + + +@dataclass_with_extra +class ChatCompletionOutputUsage(BaseInferenceType): + completion_tokens: int + prompt_tokens: int + total_tokens: int + + +@dataclass_with_extra +class ChatCompletionOutput(BaseInferenceType): + """Chat Completion Output. + Auto-generated from TGI specs. + For more details, check out + https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + """ + + choices: List[ChatCompletionOutputComplete] + created: int + id: str + model: str + system_fingerprint: str + usage: ChatCompletionOutputUsage + + +@dataclass_with_extra +class ChatCompletionStreamOutputFunction(BaseInferenceType): + arguments: str + name: Optional[str] = None + + +@dataclass_with_extra +class ChatCompletionStreamOutputDeltaToolCall(BaseInferenceType): + function: ChatCompletionStreamOutputFunction + id: str + index: int + type: str + + +@dataclass_with_extra +class ChatCompletionStreamOutputDelta(BaseInferenceType): + role: str + content: Optional[str] = None + tool_call_id: Optional[str] = None + tool_calls: Optional[List[ChatCompletionStreamOutputDeltaToolCall]] = None + + +@dataclass_with_extra +class ChatCompletionStreamOutputTopLogprob(BaseInferenceType): + logprob: float + token: str + + +@dataclass_with_extra +class ChatCompletionStreamOutputLogprob(BaseInferenceType): + logprob: float + token: str + top_logprobs: List[ChatCompletionStreamOutputTopLogprob] + + +@dataclass_with_extra +class ChatCompletionStreamOutputLogprobs(BaseInferenceType): + content: List[ChatCompletionStreamOutputLogprob] + + +@dataclass_with_extra +class ChatCompletionStreamOutputChoice(BaseInferenceType): + delta: ChatCompletionStreamOutputDelta + index: int + finish_reason: Optional[str] = None + logprobs: Optional[ChatCompletionStreamOutputLogprobs] = None + + +@dataclass_with_extra +class ChatCompletionStreamOutputUsage(BaseInferenceType): + completion_tokens: int + prompt_tokens: int + total_tokens: int + + +@dataclass_with_extra +class ChatCompletionStreamOutput(BaseInferenceType): + """Chat Completion Stream Output. + Auto-generated from TGI specs. + For more details, check out + https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-tgi-import.ts. + """ + + choices: List[ChatCompletionStreamOutputChoice] + created: int + id: str + model: str + system_fingerprint: str + usage: Optional[ChatCompletionStreamOutputUsage] = None diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py new file mode 100644 index 0000000000000000000000000000000000000000..dfcdc56bc507e50280d38e0f63b024ada6a7ea94 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/fill_mask.py @@ -0,0 +1,47 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from typing import Any, List, Optional + +from .base import BaseInferenceType, dataclass_with_extra + + +@dataclass_with_extra +class FillMaskParameters(BaseInferenceType): + """Additional inference parameters for Fill Mask""" + + targets: Optional[List[str]] = None + """When passed, the model will limit the scores to the passed targets instead of looking up + in the whole vocabulary. If the provided targets are not in the model vocab, they will be + tokenized and the first resulting token will be used (with a warning, and that might be + slower). + """ + top_k: Optional[int] = None + """When passed, overrides the number of predictions to return.""" + + +@dataclass_with_extra +class FillMaskInput(BaseInferenceType): + """Inputs for Fill Mask inference""" + + inputs: str + """The text with masked tokens""" + parameters: Optional[FillMaskParameters] = None + """Additional inference parameters for Fill Mask""" + + +@dataclass_with_extra +class FillMaskOutputElement(BaseInferenceType): + """Outputs of inference for the Fill Mask task""" + + score: float + """The corresponding probability""" + sequence: str + """The corresponding input with the mask token prediction.""" + token: int + """The predicted token id (to replace the masked one).""" + token_str: Any + fill_mask_output_token_str: Optional[str] = None + """The predicted token (to replace the masked one).""" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_classification.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_classification.py new file mode 100644 index 0000000000000000000000000000000000000000..0fdda6c83ff4c7aee5dc7794f0530e89d6b43047 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_classification.py @@ -0,0 +1,43 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from typing import Literal, Optional + +from .base import BaseInferenceType, dataclass_with_extra + + +ImageClassificationOutputTransform = Literal["sigmoid", "softmax", "none"] + + +@dataclass_with_extra +class ImageClassificationParameters(BaseInferenceType): + """Additional inference parameters for Image Classification""" + + function_to_apply: Optional["ImageClassificationOutputTransform"] = None + """The function to apply to the model outputs in order to retrieve the scores.""" + top_k: Optional[int] = None + """When specified, limits the output to the top K most probable classes.""" + + +@dataclass_with_extra +class ImageClassificationInput(BaseInferenceType): + """Inputs for Image Classification inference""" + + inputs: str + """The input image data as a base64-encoded string. If no `parameters` are provided, you can + also provide the image data as a raw bytes payload. + """ + parameters: Optional[ImageClassificationParameters] = None + """Additional inference parameters for Image Classification""" + + +@dataclass_with_extra +class ImageClassificationOutputElement(BaseInferenceType): + """Outputs of inference for the Image Classification task""" + + label: str + """The predicted class label.""" + score: float + """The corresponding probability.""" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py new file mode 100644 index 0000000000000000000000000000000000000000..3dbf61db83ec2ae6ceafd901c4425567cd2e5b03 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/image_segmentation.py @@ -0,0 +1,51 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from typing import Literal, Optional + +from .base import BaseInferenceType, dataclass_with_extra + + +ImageSegmentationSubtask = Literal["instance", "panoptic", "semantic"] + + +@dataclass_with_extra +class ImageSegmentationParameters(BaseInferenceType): + """Additional inference parameters for Image Segmentation""" + + mask_threshold: Optional[float] = None + """Threshold to use when turning the predicted masks into binary values.""" + overlap_mask_area_threshold: Optional[float] = None + """Mask overlap threshold to eliminate small, disconnected segments.""" + subtask: Optional["ImageSegmentationSubtask"] = None + """Segmentation task to be performed, depending on model capabilities.""" + threshold: Optional[float] = None + """Probability threshold to filter out predicted masks.""" + + +@dataclass_with_extra +class ImageSegmentationInput(BaseInferenceType): + """Inputs for Image Segmentation inference""" + + inputs: str + """The input image data as a base64-encoded string. If no `parameters` are provided, you can + also provide the image data as a raw bytes payload. + """ + parameters: Optional[ImageSegmentationParameters] = None + """Additional inference parameters for Image Segmentation""" + + +@dataclass_with_extra +class ImageSegmentationOutputElement(BaseInferenceType): + """Outputs of inference for the Image Segmentation task + A predicted mask / segment + """ + + label: str + """The label of the predicted segment.""" + mask: str + """The corresponding mask as a black-and-white image (base64-encoded).""" + score: Optional[float] = None + """The score or confidence degree the model has.""" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/question_answering.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..014ab41893c560a2c266bc04a1d60bc933be31c7 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/question_answering.py @@ -0,0 +1,74 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from typing import Optional + +from .base import BaseInferenceType, dataclass_with_extra + + +@dataclass_with_extra +class QuestionAnsweringInputData(BaseInferenceType): + """One (context, question) pair to answer""" + + context: str + """The context to be used for answering the question""" + question: str + """The question to be answered""" + + +@dataclass_with_extra +class QuestionAnsweringParameters(BaseInferenceType): + """Additional inference parameters for Question Answering""" + + align_to_words: Optional[bool] = None + """Attempts to align the answer to real words. Improves quality on space separated + languages. Might hurt on non-space-separated languages (like Japanese or Chinese) + """ + doc_stride: Optional[int] = None + """If the context is too long to fit with the question for the model, it will be split in + several chunks with some overlap. This argument controls the size of that overlap. + """ + handle_impossible_answer: Optional[bool] = None + """Whether to accept impossible as an answer.""" + max_answer_len: Optional[int] = None + """The maximum length of predicted answers (e.g., only answers with a shorter length are + considered). + """ + max_question_len: Optional[int] = None + """The maximum length of the question after tokenization. It will be truncated if needed.""" + max_seq_len: Optional[int] = None + """The maximum length of the total sentence (context + question) in tokens of each chunk + passed to the model. The context will be split in several chunks (using docStride as + overlap) if needed. + """ + top_k: Optional[int] = None + """The number of answers to return (will be chosen by order of likelihood). Note that we + return less than topk answers if there are not enough options available within the + context. + """ + + +@dataclass_with_extra +class QuestionAnsweringInput(BaseInferenceType): + """Inputs for Question Answering inference""" + + inputs: QuestionAnsweringInputData + """One (context, question) pair to answer""" + parameters: Optional[QuestionAnsweringParameters] = None + """Additional inference parameters for Question Answering""" + + +@dataclass_with_extra +class QuestionAnsweringOutputElement(BaseInferenceType): + """Outputs of inference for the Question Answering task""" + + answer: str + """The answer to the question.""" + end: int + """The character position in the input where the answer ends.""" + score: float + """The probability associated to the answer.""" + start: int + """The character position in the input where the answer begins.""" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py new file mode 100644 index 0000000000000000000000000000000000000000..34ac74e21e8a30d889f1a251f648d4c365325be6 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text2text_generation.py @@ -0,0 +1,42 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from typing import Any, Dict, Literal, Optional + +from .base import BaseInferenceType, dataclass_with_extra + + +Text2TextGenerationTruncationStrategy = Literal["do_not_truncate", "longest_first", "only_first", "only_second"] + + +@dataclass_with_extra +class Text2TextGenerationParameters(BaseInferenceType): + """Additional inference parameters for Text2text Generation""" + + clean_up_tokenization_spaces: Optional[bool] = None + """Whether to clean up the potential extra spaces in the text output.""" + generate_parameters: Optional[Dict[str, Any]] = None + """Additional parametrization of the text generation algorithm""" + truncation: Optional["Text2TextGenerationTruncationStrategy"] = None + """The truncation strategy to use""" + + +@dataclass_with_extra +class Text2TextGenerationInput(BaseInferenceType): + """Inputs for Text2text Generation inference""" + + inputs: str + """The input text data""" + parameters: Optional[Text2TextGenerationParameters] = None + """Additional inference parameters for Text2text Generation""" + + +@dataclass_with_extra +class Text2TextGenerationOutput(BaseInferenceType): + """Outputs of inference for the Text2text Generation task""" + + generated_text: Any + text2_text_generation_output_generated_text: Optional[str] = None + """The generated text.""" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text_to_audio.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text_to_audio.py new file mode 100644 index 0000000000000000000000000000000000000000..87af80a598af70800b8386f034c65de0b397479e --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text_to_audio.py @@ -0,0 +1,99 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from typing import Any, Literal, Optional, Union + +from .base import BaseInferenceType, dataclass_with_extra + + +TextToAudioEarlyStoppingEnum = Literal["never"] + + +@dataclass_with_extra +class TextToAudioGenerationParameters(BaseInferenceType): + """Parametrization of the text generation process""" + + do_sample: Optional[bool] = None + """Whether to use sampling instead of greedy decoding when generating new tokens.""" + early_stopping: Optional[Union[bool, "TextToAudioEarlyStoppingEnum"]] = None + """Controls the stopping condition for beam-based methods.""" + epsilon_cutoff: Optional[float] = None + """If set to float strictly between 0 and 1, only tokens with a conditional probability + greater than epsilon_cutoff will be sampled. In the paper, suggested values range from + 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language + Model Desmoothing](https://hf.co/papers/2210.15191) for more details. + """ + eta_cutoff: Optional[float] = None + """Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to + float strictly between 0 and 1, a token is only considered if it is greater than either + eta_cutoff or sqrt(eta_cutoff) * exp(-entropy(softmax(next_token_logits))). The latter + term is intuitively the expected next token probability, scaled by sqrt(eta_cutoff). In + the paper, suggested values range from 3e-4 to 2e-3, depending on the size of the model. + See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191) + for more details. + """ + max_length: Optional[int] = None + """The maximum length (in tokens) of the generated text, including the input.""" + max_new_tokens: Optional[int] = None + """The maximum number of tokens to generate. Takes precedence over max_length.""" + min_length: Optional[int] = None + """The minimum length (in tokens) of the generated text, including the input.""" + min_new_tokens: Optional[int] = None + """The minimum number of tokens to generate. Takes precedence over min_length.""" + num_beam_groups: Optional[int] = None + """Number of groups to divide num_beams into in order to ensure diversity among different + groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details. + """ + num_beams: Optional[int] = None + """Number of beams to use for beam search.""" + penalty_alpha: Optional[float] = None + """The value balances the model confidence and the degeneration penalty in contrastive + search decoding. + """ + temperature: Optional[float] = None + """The value used to modulate the next token probabilities.""" + top_k: Optional[int] = None + """The number of highest probability vocabulary tokens to keep for top-k-filtering.""" + top_p: Optional[float] = None + """If set to float < 1, only the smallest set of most probable tokens with probabilities + that add up to top_p or higher are kept for generation. + """ + typical_p: Optional[float] = None + """Local typicality measures how similar the conditional probability of predicting a target + token next is to the expected conditional probability of predicting a random token next, + given the partial text already generated. If set to float < 1, the smallest set of the + most locally typical tokens with probabilities that add up to typical_p or higher are + kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details. + """ + use_cache: Optional[bool] = None + """Whether the model should use the past last key/values attentions to speed up decoding""" + + +@dataclass_with_extra +class TextToAudioParameters(BaseInferenceType): + """Additional inference parameters for Text To Audio""" + + generation_parameters: Optional[TextToAudioGenerationParameters] = None + """Parametrization of the text generation process""" + + +@dataclass_with_extra +class TextToAudioInput(BaseInferenceType): + """Inputs for Text To Audio inference""" + + inputs: str + """The input text data""" + parameters: Optional[TextToAudioParameters] = None + """Additional inference parameters for Text To Audio""" + + +@dataclass_with_extra +class TextToAudioOutput(BaseInferenceType): + """Outputs of inference for the Text To Audio task""" + + audio: Any + """The generated audio waveform.""" + sampling_rate: float + """The sampling rate of the generated audio waveform.""" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text_to_image.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text_to_image.py new file mode 100644 index 0000000000000000000000000000000000000000..20c963731371339975019ca5d40c95303d79209b --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text_to_image.py @@ -0,0 +1,50 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from typing import Any, Optional + +from .base import BaseInferenceType, dataclass_with_extra + + +@dataclass_with_extra +class TextToImageParameters(BaseInferenceType): + """Additional inference parameters for Text To Image""" + + guidance_scale: Optional[float] = None + """A higher guidance scale value encourages the model to generate images closely linked to + the text prompt, but values too high may cause saturation and other artifacts. + """ + height: Optional[int] = None + """The height in pixels of the output image""" + negative_prompt: Optional[str] = None + """One prompt to guide what NOT to include in image generation.""" + num_inference_steps: Optional[int] = None + """The number of denoising steps. More denoising steps usually lead to a higher quality + image at the expense of slower inference. + """ + scheduler: Optional[str] = None + """Override the scheduler with a compatible one.""" + seed: Optional[int] = None + """Seed for the random number generator.""" + width: Optional[int] = None + """The width in pixels of the output image""" + + +@dataclass_with_extra +class TextToImageInput(BaseInferenceType): + """Inputs for Text To Image inference""" + + inputs: str + """The input text data (sometimes called "prompt")""" + parameters: Optional[TextToImageParameters] = None + """Additional inference parameters for Text To Image""" + + +@dataclass_with_extra +class TextToImageOutput(BaseInferenceType): + """Outputs of inference for the Text To Image task""" + + image: Any + """The generated image returned as raw bytes in the payload.""" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text_to_video.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text_to_video.py new file mode 100644 index 0000000000000000000000000000000000000000..e54a1bc094e4aaf7132e502aa268bc052ab34f0a --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/text_to_video.py @@ -0,0 +1,46 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from typing import Any, List, Optional + +from .base import BaseInferenceType, dataclass_with_extra + + +@dataclass_with_extra +class TextToVideoParameters(BaseInferenceType): + """Additional inference parameters for Text To Video""" + + guidance_scale: Optional[float] = None + """A higher guidance scale value encourages the model to generate videos closely linked to + the text prompt, but values too high may cause saturation and other artifacts. + """ + negative_prompt: Optional[List[str]] = None + """One or several prompt to guide what NOT to include in video generation.""" + num_frames: Optional[float] = None + """The num_frames parameter determines how many video frames are generated.""" + num_inference_steps: Optional[int] = None + """The number of denoising steps. More denoising steps usually lead to a higher quality + video at the expense of slower inference. + """ + seed: Optional[int] = None + """Seed for the random number generator.""" + + +@dataclass_with_extra +class TextToVideoInput(BaseInferenceType): + """Inputs for Text To Video inference""" + + inputs: str + """The input text data (sometimes called "prompt")""" + parameters: Optional[TextToVideoParameters] = None + """Additional inference parameters for Text To Video""" + + +@dataclass_with_extra +class TextToVideoOutput(BaseInferenceType): + """Outputs of inference for the Text To Video task""" + + video: Any + """The generated video returned as raw bytes in the payload.""" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py new file mode 100644 index 0000000000000000000000000000000000000000..d368f1621289bc11a17be3e590cf8a040019d455 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_generated/types/visual_question_answering.py @@ -0,0 +1,49 @@ +# Inference code generated from the JSON schema spec in @huggingface/tasks. +# +# See: +# - script: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/scripts/inference-codegen.ts +# - specs: https://github.com/huggingface/huggingface.js/tree/main/packages/tasks/src/tasks. +from typing import Any, Optional + +from .base import BaseInferenceType, dataclass_with_extra + + +@dataclass_with_extra +class VisualQuestionAnsweringInputData(BaseInferenceType): + """One (image, question) pair to answer""" + + image: Any + """The image.""" + question: str + """The question to answer based on the image.""" + + +@dataclass_with_extra +class VisualQuestionAnsweringParameters(BaseInferenceType): + """Additional inference parameters for Visual Question Answering""" + + top_k: Optional[int] = None + """The number of answers to return (will be chosen by order of likelihood). Note that we + return less than topk answers if there are not enough options available within the + context. + """ + + +@dataclass_with_extra +class VisualQuestionAnsweringInput(BaseInferenceType): + """Inputs for Visual Question Answering inference""" + + inputs: VisualQuestionAnsweringInputData + """One (image, question) pair to answer""" + parameters: Optional[VisualQuestionAnsweringParameters] = None + """Additional inference parameters for Visual Question Answering""" + + +@dataclass_with_extra +class VisualQuestionAnsweringOutputElement(BaseInferenceType): + """Outputs of inference for the Visual Question Answering task""" + + score: float + """The associated score / probability""" + answer: Optional[str] = None + """The answer to the question""" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/__init__.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/_cli_hacks.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/_cli_hacks.py new file mode 100644 index 0000000000000000000000000000000000000000..44113b9101a78c7fc5c7abfb58965ed0c22032a1 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/_cli_hacks.py @@ -0,0 +1,88 @@ +import asyncio +import sys +from functools import partial + +import typer + + +def _patch_anyio_open_process(): + """ + Patch anyio.open_process to allow detached processes on Windows and Unix-like systems. + + This is necessary to prevent the MCP client from being interrupted by Ctrl+C when running in the CLI. + """ + import subprocess + + import anyio + + if getattr(anyio, "_tiny_agents_patched", False): + return + anyio._tiny_agents_patched = True + + original_open_process = anyio.open_process + + if sys.platform == "win32": + # On Windows, we need to set the creation flags to create a new process group + + async def open_process_in_new_group(*args, **kwargs): + """ + Wrapper for open_process to handle Windows-specific process creation flags. + """ + # Ensure we pass the creation flags for Windows + kwargs.setdefault("creationflags", subprocess.CREATE_NEW_PROCESS_GROUP) + return await original_open_process(*args, **kwargs) + + anyio.open_process = open_process_in_new_group + else: + # For Unix-like systems, we can use setsid to create a new session + async def open_process_in_new_group(*args, **kwargs): + """ + Wrapper for open_process to handle Unix-like systems with start_new_session=True. + """ + kwargs.setdefault("start_new_session", True) + return await original_open_process(*args, **kwargs) + + anyio.open_process = open_process_in_new_group + + +async def _async_prompt(exit_event: asyncio.Event, prompt: str = "» ") -> str: + """ + Asynchronous prompt function that reads input from stdin without blocking. + + This function is designed to work in an asynchronous context, allowing the event loop to gracefully stop it (e.g. on Ctrl+C). + + Alternatively, we could use https://github.com/vxgmichel/aioconsole but that would be an additional dependency. + """ + loop = asyncio.get_event_loop() + + if sys.platform == "win32": + # Windows: Use run_in_executor to avoid blocking the event loop + # Degraded solution: this is not ideal as user will have to CTRL+C once more to stop the prompt (and it'll not be graceful) + return await loop.run_in_executor(None, partial(typer.prompt, prompt, prompt_suffix=" ")) + else: + # UNIX-like: Use loop.add_reader for non-blocking stdin read + future = loop.create_future() + + def on_input(): + line = sys.stdin.readline() + loop.remove_reader(sys.stdin) + future.set_result(line) + + print(prompt, end=" ", flush=True) + loop.add_reader(sys.stdin, on_input) # not supported on Windows + + # Wait for user input or exit event + # Wait until either the user hits enter or exit_event is set + exit_task = asyncio.create_task(exit_event.wait()) + await asyncio.wait( + [future, exit_task], + return_when=asyncio.FIRST_COMPLETED, + ) + + # Check which one has been triggered + if exit_event.is_set(): + future.cancel() + return "" + + line = await future + return line.strip() diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/agent.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/agent.py new file mode 100644 index 0000000000000000000000000000000000000000..4f88016ba709d15445525e2bd252febb5b2287ab --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/agent.py @@ -0,0 +1,103 @@ +from __future__ import annotations + +import asyncio +from typing import AsyncGenerator, Dict, Iterable, List, Optional, Union + +from huggingface_hub import ChatCompletionInputMessage, ChatCompletionStreamOutput, MCPClient + +from .._providers import PROVIDER_OR_POLICY_T +from .constants import DEFAULT_SYSTEM_PROMPT, EXIT_LOOP_TOOLS, MAX_NUM_TURNS +from .types import ServerConfig + + +class Agent(MCPClient): + """ + Implementation of a Simple Agent, which is a simple while loop built right on top of an [`MCPClient`]. + + + + This class is experimental and might be subject to breaking changes in the future without prior notice. + + + + Args: + model (`str`, *optional*): + The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct` + or a URL to a deployed Inference Endpoint or other local or remote endpoint. + servers (`Iterable[Dict]`): + MCP servers to connect to. Each server is a dictionary containing a `type` key and a `config` key. The `type` key can be `"stdio"` or `"sse"`, and the `config` key is a dictionary of arguments for the server. + provider (`str`, *optional*): + Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers. + If model is a URL or `base_url` is passed, then `provider` is not used. + base_url (`str`, *optional*): + The base URL to run inference. Defaults to None. + api_key (`str`, *optional*): + Token to use for authentication. Will default to the locally Hugging Face saved token if not provided. You can also use your own provider API key to interact directly with the provider's service. + prompt (`str`, *optional*): + The system prompt to use for the agent. Defaults to the default system prompt in `constants.py`. + """ + + def __init__( + self, + *, + model: Optional[str] = None, + servers: Iterable[ServerConfig], + provider: Optional[PROVIDER_OR_POLICY_T] = None, + base_url: Optional[str] = None, + api_key: Optional[str] = None, + prompt: Optional[str] = None, + ): + super().__init__(model=model, provider=provider, base_url=base_url, api_key=api_key) + self._servers_cfg = list(servers) + self.messages: List[Union[Dict, ChatCompletionInputMessage]] = [ + {"role": "system", "content": prompt or DEFAULT_SYSTEM_PROMPT} + ] + + async def load_tools(self) -> None: + for cfg in self._servers_cfg: + await self.add_mcp_server(**cfg) + + async def run( + self, + user_input: str, + *, + abort_event: Optional[asyncio.Event] = None, + ) -> AsyncGenerator[Union[ChatCompletionStreamOutput, ChatCompletionInputMessage], None]: + """ + Run the agent with the given user input. + + Args: + user_input (`str`): + The user input to run the agent with. + abort_event (`asyncio.Event`, *optional*): + An event that can be used to abort the agent. If the event is set, the agent will stop running. + """ + self.messages.append({"role": "user", "content": user_input}) + + num_turns: int = 0 + next_turn_should_call_tools = True + + while True: + if abort_event and abort_event.is_set(): + return + + async for item in self.process_single_turn_with_tools( + self.messages, + exit_loop_tools=EXIT_LOOP_TOOLS, + exit_if_first_chunk_no_tool=(num_turns > 0 and next_turn_should_call_tools), + ): + yield item + + num_turns += 1 + last = self.messages[-1] + + if last.get("role") == "tool" and last.get("name") in {t.function.name for t in EXIT_LOOP_TOOLS}: + return + + if last.get("role") != "tool" and num_turns > MAX_NUM_TURNS: + return + + if last.get("role") != "tool" and next_turn_should_call_tools: + return + + next_turn_should_call_tools = last.get("role") != "tool" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/cli.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..5d599ee82906765c2667b9f0984c52cec5de302a --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/cli.py @@ -0,0 +1,229 @@ +import asyncio +import os +import signal +import traceback +from typing import Optional + +import typer +from rich import print + +from ._cli_hacks import _async_prompt, _patch_anyio_open_process +from .agent import Agent +from .utils import _load_agent_config + + +app = typer.Typer( + rich_markup_mode="rich", + help="A squad of lightweight composable AI applications built on Hugging Face's Inference Client and MCP stack.", +) + +run_cli = typer.Typer( + name="run", + help="Run the Agent in the CLI", + invoke_without_command=True, +) +app.add_typer(run_cli, name="run") + + +async def run_agent( + agent_path: Optional[str], +) -> None: + """ + Tiny Agent loop. + + Args: + agent_path (`str`, *optional*): + Path to a local folder containing an `agent.json` and optionally a custom `PROMPT.md` file or a built-in agent stored in a Hugging Face dataset. + + """ + _patch_anyio_open_process() # Hacky way to prevent stdio connections to be stopped by Ctrl+C + + config, prompt = _load_agent_config(agent_path) + + inputs = config.get("inputs", []) + servers = config.get("servers", []) + + abort_event = asyncio.Event() + exit_event = asyncio.Event() + first_sigint = True + + loop = asyncio.get_running_loop() + original_sigint_handler = signal.getsignal(signal.SIGINT) + + def _sigint_handler() -> None: + nonlocal first_sigint + if first_sigint: + first_sigint = False + abort_event.set() + print("\n[red]Interrupted. Press Ctrl+C again to quit.[/red]", flush=True) + return + + print("\n[red]Exiting...[/red]", flush=True) + exit_event.set() + + try: + sigint_registered_in_loop = False + try: + loop.add_signal_handler(signal.SIGINT, _sigint_handler) + sigint_registered_in_loop = True + except (AttributeError, NotImplementedError): + # Windows (or any loop that doesn't support it) : fall back to sync + signal.signal(signal.SIGINT, lambda *_: _sigint_handler()) + + # Handle inputs (i.e. env variables injection) + if len(inputs) > 0: + print( + "[bold blue]Some initial inputs are required by the agent. " + "Please provide a value or leave empty to load from env.[/bold blue]" + ) + for input_item in inputs: + input_id = input_item["id"] + description = input_item["description"] + env_special_value = "${input:" + input_id + "}" # Special value to indicate env variable injection + + # Check env variables that will use this input + input_vars = set() + for server in servers: + # Check stdio's "env" and http/sse's "headers" mappings + env_or_headers = server.get("env", {}) if server["type"] == "stdio" else server.get("headers", {}) + for key, value in env_or_headers.items(): + if env_special_value in value: + input_vars.add(key) + + if not input_vars: + print(f"[yellow]Input {input_id} defined in config but not used by any server.[/yellow]") + continue + + # Prompt user for input + env_variable_key = input_id.replace("-", "_").upper() + print( + f"[blue] • {input_id}[/blue]: {description}. (default: load from {env_variable_key}).", + end=" ", + ) + user_input = (await _async_prompt(exit_event=exit_event)).strip() + if exit_event.is_set(): + return + + # Inject user input (or env variable) into stdio's env or http/sse's headers + for server in servers: + env_or_headers = server.get("env", {}) if server["type"] == "stdio" else server.get("headers", {}) + for key, value in env_or_headers.items(): + if env_special_value in value: + if user_input: + env_or_headers[key] = env_or_headers[key].replace(env_special_value, user_input) + else: + value_from_env = os.getenv(env_variable_key, "") + env_or_headers[key] = env_or_headers[key].replace(env_special_value, value_from_env) + if value_from_env: + print(f"[green]Value successfully loaded from '{env_variable_key}'[/green]") + else: + print( + f"[yellow]No value found for '{env_variable_key}' in environment variables. Continuing.[/yellow]" + ) + + print() + + # Main agent loop + async with Agent( + provider=config.get("provider"), # type: ignore[arg-type] + model=config.get("model"), + base_url=config.get("endpointUrl"), # type: ignore[arg-type] + servers=servers, # type: ignore[arg-type] + prompt=prompt, + ) as agent: + await agent.load_tools() + print(f"[bold blue]Agent loaded with {len(agent.available_tools)} tools:[/bold blue]") + for t in agent.available_tools: + print(f"[blue] • {t.function.name}[/blue]") + + while True: + abort_event.clear() + + # Check if we should exit + if exit_event.is_set(): + return + + try: + user_input = await _async_prompt(exit_event=exit_event) + first_sigint = True + except EOFError: + print("\n[red]EOF received, exiting.[/red]", flush=True) + break + except KeyboardInterrupt: + if not first_sigint and abort_event.is_set(): + continue + else: + print("\n[red]Keyboard interrupt during input processing.[/red]", flush=True) + break + + try: + async for chunk in agent.run(user_input, abort_event=abort_event): + if abort_event.is_set() and not first_sigint: + break + if exit_event.is_set(): + return + + if hasattr(chunk, "choices"): + delta = chunk.choices[0].delta + if delta.content: + print(delta.content, end="", flush=True) + if delta.tool_calls: + for call in delta.tool_calls: + if call.id: + print(f"", end="") + if call.function.name: + print(f"{call.function.name}", end=" ") + if call.function.arguments: + print(f"{call.function.arguments}", end="") + else: + print( + f"\n\n[green]Tool[{chunk.name}] {chunk.tool_call_id}\n{chunk.content}[/green]\n", + flush=True, + ) + + print() + + except Exception as e: + tb_str = traceback.format_exc() + print(f"\n[bold red]Error during agent run: {e}\n{tb_str}[/bold red]", flush=True) + first_sigint = True # Allow graceful interrupt for the next command + + except Exception as e: + tb_str = traceback.format_exc() + print(f"\n[bold red]An unexpected error occurred: {e}\n{tb_str}[/bold red]", flush=True) + raise e + + finally: + if sigint_registered_in_loop: + try: + loop.remove_signal_handler(signal.SIGINT) + except (AttributeError, NotImplementedError): + pass + else: + signal.signal(signal.SIGINT, original_sigint_handler) + + +@run_cli.callback() +def run( + path: Optional[str] = typer.Argument( + None, + help=( + "Path to a local folder containing an agent.json file or a built-in agent " + "stored in the 'tiny-agents/tiny-agents' Hugging Face dataset " + "(https://huggingface.co/datasets/tiny-agents/tiny-agents)" + ), + show_default=False, + ), +): + try: + asyncio.run(run_agent(path)) + except KeyboardInterrupt: + print("\n[red]Application terminated by KeyboardInterrupt.[/red]", flush=True) + raise typer.Exit(code=130) + except Exception as e: + print(f"\n[bold red]An unexpected error occurred: {e}[/bold red]", flush=True) + raise e + + +if __name__ == "__main__": + app() diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/constants.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..a7f2664696638502e141e4767570cbf709c689c7 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/constants.py @@ -0,0 +1,74 @@ +from __future__ import annotations + +import sys +from pathlib import Path +from typing import List + +from huggingface_hub import ChatCompletionInputTool + + +FILENAME_CONFIG = "agent.json" +FILENAME_PROMPT = "PROMPT.md" + +DEFAULT_AGENT = { + "model": "Qwen/Qwen2.5-72B-Instruct", + "provider": "nebius", + "servers": [ + { + "type": "stdio", + "command": "npx", + "args": [ + "-y", + "@modelcontextprotocol/server-filesystem", + str(Path.home() / ("Desktop" if sys.platform == "darwin" else "")), + ], + }, + { + "type": "stdio", + "command": "npx", + "args": ["@playwright/mcp@latest"], + }, + ], +} + + +DEFAULT_SYSTEM_PROMPT = """ +You are an agent - please keep going until the user’s query is completely +resolved, before ending your turn and yielding back to the user. Only terminate +your turn when you are sure that the problem is solved, or if you need more +info from the user to solve the problem. +If you are not sure about anything pertaining to the user’s request, use your +tools to read files and gather the relevant information: do NOT guess or make +up an answer. +You MUST plan extensively before each function call, and reflect extensively +on the outcomes of the previous function calls. DO NOT do this entire process +by making function calls only, as this can impair your ability to solve the +problem and think insightfully. +""".strip() + +MAX_NUM_TURNS = 10 + +TASK_COMPLETE_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment] + { + "type": "function", + "function": { + "name": "task_complete", + "description": "Call this tool when the task given by the user is complete", + }, + } +) + +ASK_QUESTION_TOOL: ChatCompletionInputTool = ChatCompletionInputTool.parse_obj( # type: ignore[assignment] + { + "type": "function", + "function": { + "name": "ask_question", + "description": "Ask the user for more info required to solve or clarify their problem.", + }, + } +) + +EXIT_LOOP_TOOLS: List[ChatCompletionInputTool] = [TASK_COMPLETE_TOOL, ASK_QUESTION_TOOL] + + +DEFAULT_REPO_ID = "tiny-agents/tiny-agents" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/mcp_client.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/mcp_client.py new file mode 100644 index 0000000000000000000000000000000000000000..2712dea12127ed69a088d9414f0715de3e103d8b --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/mcp_client.py @@ -0,0 +1,369 @@ +import json +import logging +from contextlib import AsyncExitStack +from datetime import timedelta +from pathlib import Path +from typing import TYPE_CHECKING, Any, AsyncIterable, Dict, List, Literal, Optional, Union, overload + +from typing_extensions import NotRequired, TypeAlias, TypedDict, Unpack + +from ...utils._runtime import get_hf_hub_version +from .._generated._async_client import AsyncInferenceClient +from .._generated.types import ( + ChatCompletionInputMessage, + ChatCompletionInputTool, + ChatCompletionStreamOutput, + ChatCompletionStreamOutputDeltaToolCall, +) +from .._providers import PROVIDER_OR_POLICY_T +from .utils import format_result + + +if TYPE_CHECKING: + from mcp import ClientSession + +logger = logging.getLogger(__name__) + +# Type alias for tool names +ToolName: TypeAlias = str + +ServerType: TypeAlias = Literal["stdio", "sse", "http"] + + +class StdioServerParameters_T(TypedDict): + command: str + args: NotRequired[List[str]] + env: NotRequired[Dict[str, str]] + cwd: NotRequired[Union[str, Path, None]] + + +class SSEServerParameters_T(TypedDict): + url: str + headers: NotRequired[Dict[str, Any]] + timeout: NotRequired[float] + sse_read_timeout: NotRequired[float] + + +class StreamableHTTPParameters_T(TypedDict): + url: str + headers: NotRequired[dict[str, Any]] + timeout: NotRequired[timedelta] + sse_read_timeout: NotRequired[timedelta] + terminate_on_close: NotRequired[bool] + + +class MCPClient: + """ + Client for connecting to one or more MCP servers and processing chat completions with tools. + + + + This class is experimental and might be subject to breaking changes in the future without prior notice. + + + + Args: + model (`str`, `optional`): + The model to run inference with. Can be a model id hosted on the Hugging Face Hub, e.g. `meta-llama/Meta-Llama-3-8B-Instruct` + or a URL to a deployed Inference Endpoint or other local or remote endpoint. + provider (`str`, *optional*): + Name of the provider to use for inference. Defaults to "auto" i.e. the first of the providers available for the model, sorted by the user's order in https://hf.co/settings/inference-providers. + If model is a URL or `base_url` is passed, then `provider` is not used. + base_url (`str`, *optional*): + The base URL to run inference. Defaults to None. + api_key (`str`, `optional`): + Token to use for authentication. Will default to the locally Hugging Face saved token if not provided. You can also use your own provider API key to interact directly with the provider's service. + """ + + def __init__( + self, + *, + model: Optional[str] = None, + provider: Optional[PROVIDER_OR_POLICY_T] = None, + base_url: Optional[str] = None, + api_key: Optional[str] = None, + ): + # Initialize MCP sessions as a dictionary of ClientSession objects + self.sessions: Dict[ToolName, "ClientSession"] = {} + self.exit_stack = AsyncExitStack() + self.available_tools: List[ChatCompletionInputTool] = [] + # To be able to send the model in the payload if `base_url` is provided + if model is None and base_url is None: + raise ValueError("At least one of `model` or `base_url` should be set in `MCPClient`.") + self.payload_model = model + self.client = AsyncInferenceClient( + model=None if base_url is not None else model, + provider=provider, + api_key=api_key, + base_url=base_url, + ) + + async def __aenter__(self): + """Enter the context manager""" + await self.client.__aenter__() + await self.exit_stack.__aenter__() + return self + + async def __aexit__(self, exc_type, exc_val, exc_tb): + """Exit the context manager""" + await self.client.__aexit__(exc_type, exc_val, exc_tb) + await self.cleanup() + + async def cleanup(self): + """Clean up resources""" + await self.client.close() + await self.exit_stack.aclose() + + @overload + async def add_mcp_server(self, type: Literal["stdio"], **params: Unpack[StdioServerParameters_T]): ... + + @overload + async def add_mcp_server(self, type: Literal["sse"], **params: Unpack[SSEServerParameters_T]): ... + + @overload + async def add_mcp_server(self, type: Literal["http"], **params: Unpack[StreamableHTTPParameters_T]): ... + + async def add_mcp_server(self, type: ServerType, **params: Any): + """Connect to an MCP server + + Args: + type (`str`): + Type of the server to connect to. Can be one of: + - "stdio": Standard input/output server (local) + - "sse": Server-sent events (SSE) server + - "http": StreamableHTTP server + **params (`Dict[str, Any]`): + Server parameters that can be either: + - For stdio servers: + - command (str): The command to run the MCP server + - args (List[str], optional): Arguments for the command + - env (Dict[str, str], optional): Environment variables for the command + - cwd (Union[str, Path, None], optional): Working directory for the command + - For SSE servers: + - url (str): The URL of the SSE server + - headers (Dict[str, Any], optional): Headers for the SSE connection + - timeout (float, optional): Connection timeout + - sse_read_timeout (float, optional): SSE read timeout + - For StreamableHTTP servers: + - url (str): The URL of the StreamableHTTP server + - headers (Dict[str, Any], optional): Headers for the StreamableHTTP connection + - timeout (timedelta, optional): Connection timeout + - sse_read_timeout (timedelta, optional): SSE read timeout + - terminate_on_close (bool, optional): Whether to terminate on close + """ + from mcp import ClientSession, StdioServerParameters + from mcp import types as mcp_types + + # Determine server type and create appropriate parameters + if type == "stdio": + # Handle stdio server + from mcp.client.stdio import stdio_client + + logger.info(f"Connecting to stdio MCP server with command: {params['command']} {params.get('args', [])}") + + client_kwargs = {"command": params["command"]} + for key in ["args", "env", "cwd"]: + if params.get(key) is not None: + client_kwargs[key] = params[key] + server_params = StdioServerParameters(**client_kwargs) + read, write = await self.exit_stack.enter_async_context(stdio_client(server_params)) + elif type == "sse": + # Handle SSE server + from mcp.client.sse import sse_client + + logger.info(f"Connecting to SSE MCP server at: {params['url']}") + + client_kwargs = {"url": params["url"]} + for key in ["headers", "timeout", "sse_read_timeout"]: + if params.get(key) is not None: + client_kwargs[key] = params[key] + read, write = await self.exit_stack.enter_async_context(sse_client(**client_kwargs)) + elif type == "http": + # Handle StreamableHTTP server + from mcp.client.streamable_http import streamablehttp_client + + logger.info(f"Connecting to StreamableHTTP MCP server at: {params['url']}") + + client_kwargs = {"url": params["url"]} + for key in ["headers", "timeout", "sse_read_timeout", "terminate_on_close"]: + if params.get(key) is not None: + client_kwargs[key] = params[key] + read, write, _ = await self.exit_stack.enter_async_context(streamablehttp_client(**client_kwargs)) + # ^ TODO: should be handle `get_session_id_callback`? (function to retrieve the current session ID) + else: + raise ValueError(f"Unsupported server type: {type}") + + session = await self.exit_stack.enter_async_context( + ClientSession( + read_stream=read, + write_stream=write, + client_info=mcp_types.Implementation( + name="huggingface_hub.MCPClient", + version=get_hf_hub_version(), + ), + ) + ) + + logger.debug("Initializing session...") + await session.initialize() + + # List available tools + response = await session.list_tools() + logger.debug("Connected to server with tools:", [tool.name for tool in response.tools]) + + for tool in response.tools: + if tool.name in self.sessions: + logger.warning(f"Tool '{tool.name}' already defined by another server. Skipping.") + continue + + # Map tool names to their server for later lookup + self.sessions[tool.name] = session + + # Add tool to the list of available tools (for use in chat completions) + self.available_tools.append( + ChatCompletionInputTool.parse_obj_as_instance( + { + "type": "function", + "function": { + "name": tool.name, + "description": tool.description, + "parameters": tool.inputSchema, + }, + } + ) + ) + + async def process_single_turn_with_tools( + self, + messages: List[Union[Dict, ChatCompletionInputMessage]], + exit_loop_tools: Optional[List[ChatCompletionInputTool]] = None, + exit_if_first_chunk_no_tool: bool = False, + ) -> AsyncIterable[Union[ChatCompletionStreamOutput, ChatCompletionInputMessage]]: + """Process a query using `self.model` and available tools, yielding chunks and tool outputs. + + Args: + messages (`List[Dict]`): + List of message objects representing the conversation history + exit_loop_tools (`List[ChatCompletionInputTool]`, *optional*): + List of tools that should exit the generator when called + exit_if_first_chunk_no_tool (`bool`, *optional*): + Exit if no tool is present in the first chunks. Default to False. + + Yields: + [`ChatCompletionStreamOutput`] chunks or [`ChatCompletionInputMessage`] objects + """ + # Prepare tools list based on options + tools = self.available_tools + if exit_loop_tools is not None: + tools = [*exit_loop_tools, *self.available_tools] + + # Create the streaming request + response = await self.client.chat.completions.create( + model=self.payload_model, + messages=messages, + tools=tools, + tool_choice="auto", + stream=True, + ) + + message: Dict[str, Any] = {"role": "unknown", "content": ""} + final_tool_calls: Dict[int, ChatCompletionStreamOutputDeltaToolCall] = {} + num_of_chunks = 0 + + # Read from stream + async for chunk in response: + num_of_chunks += 1 + delta = chunk.choices[0].delta if chunk.choices and len(chunk.choices) > 0 else None + if not delta: + continue + + # Process message + if delta.role: + message["role"] = delta.role + if delta.content: + message["content"] += delta.content + + # Process tool calls + if delta.tool_calls: + for tool_call in delta.tool_calls: + # Aggregate chunks into tool calls + if tool_call.index not in final_tool_calls: + if ( + tool_call.function.arguments is None or tool_call.function.arguments == "{}" + ): # Corner case (depends on provider) + tool_call.function.arguments = "" + final_tool_calls[tool_call.index] = tool_call + + elif tool_call.function.arguments: + final_tool_calls[tool_call.index].function.arguments += tool_call.function.arguments + + # Optionally exit early if no tools in first chunks + if exit_if_first_chunk_no_tool and num_of_chunks <= 2 and len(final_tool_calls) == 0: + return + + # Yield each chunk to caller + yield chunk + + # Add the assistant message with tool calls (if any) to messages + if message["content"] or final_tool_calls: + # if the role is unknown, set it to assistant + if message.get("role") == "unknown": + message["role"] = "assistant" + # Convert final_tool_calls to the format expected by OpenAI + if final_tool_calls: + tool_calls_list: List[Dict[str, Any]] = [] + for tc in final_tool_calls.values(): + tool_calls_list.append( + { + "id": tc.id, + "type": "function", + "function": { + "name": tc.function.name, + "arguments": tc.function.arguments or "{}", + }, + } + ) + message["tool_calls"] = tool_calls_list + messages.append(message) + + # Process tool calls one by one + for tool_call in final_tool_calls.values(): + function_name = tool_call.function.name + try: + function_args = json.loads(tool_call.function.arguments or "{}") + except json.JSONDecodeError as err: + tool_message = { + "role": "tool", + "tool_call_id": tool_call.id, + "name": function_name, + "content": f"Invalid JSON generated by the model: {err}", + } + tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message) + messages.append(tool_message_as_obj) + yield tool_message_as_obj + continue # move to next tool call + + tool_message = {"role": "tool", "tool_call_id": tool_call.id, "content": "", "name": function_name} + + # Check if this is an exit loop tool + if exit_loop_tools and function_name in [t.function.name for t in exit_loop_tools]: + tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message) + messages.append(tool_message_as_obj) + yield tool_message_as_obj + return + + # Execute tool call with the appropriate session + session = self.sessions.get(function_name) + if session is not None: + try: + result = await session.call_tool(function_name, function_args) + tool_message["content"] = format_result(result) + except Exception as err: + tool_message["content"] = f"Error: MCP tool call failed with error message: {err}" + else: + tool_message["content"] = f"Error: No session found for tool: {function_name}" + + # Yield tool message + tool_message_as_obj = ChatCompletionInputMessage.parse_obj_as_instance(tool_message) + messages.append(tool_message_as_obj) + yield tool_message_as_obj diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/types.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/types.py new file mode 100644 index 0000000000000000000000000000000000000000..7177695e7b6f19cc9a259cc4404326b871c609ec --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/types.py @@ -0,0 +1,39 @@ +from typing import Dict, List, Literal, TypedDict, Union + + +class InputConfig(TypedDict, total=False): + id: str + description: str + type: str + password: bool + + +class StdioServerConfig(TypedDict): + type: Literal["stdio"] + command: str + args: List[str] + env: Dict[str, str] + cwd: str + + +class HTTPServerConfig(TypedDict): + type: Literal["http"] + url: str + headers: Dict[str, str] + + +class SSEServerConfig(TypedDict): + type: Literal["sse"] + url: str + headers: Dict[str, str] + + +ServerConfig = Union[StdioServerConfig, HTTPServerConfig, SSEServerConfig] + + +# AgentConfig root object +class AgentConfig(TypedDict): + model: str + provider: str + inputs: List[InputConfig] + servers: List[ServerConfig] diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/utils.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6e75eef45fe2e1f3deb169acd9c1ca76d9a7e283 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_mcp/utils.py @@ -0,0 +1,124 @@ +""" +Utility functions for MCPClient and Tiny Agents. + +Formatting utilities taken from the JS SDK: https://github.com/huggingface/huggingface.js/blob/main/packages/mcp-client/src/ResultFormatter.ts. +""" + +import json +from pathlib import Path +from typing import TYPE_CHECKING, List, Optional, Tuple + +from huggingface_hub import snapshot_download +from huggingface_hub.errors import EntryNotFoundError + +from .constants import DEFAULT_AGENT, DEFAULT_REPO_ID, FILENAME_CONFIG, FILENAME_PROMPT +from .types import AgentConfig + + +if TYPE_CHECKING: + from mcp import types as mcp_types + + +def format_result(result: "mcp_types.CallToolResult") -> str: + """ + Formats a mcp.types.CallToolResult content into a human-readable string. + + Args: + result (CallToolResult) + Object returned by mcp.ClientSession.call_tool. + + Returns: + str + A formatted string representing the content of the result. + """ + content = result.content + + if len(content) == 0: + return "[No content]" + + formatted_parts: List[str] = [] + + for item in content: + if item.type == "text": + formatted_parts.append(item.text) + + elif item.type == "image": + formatted_parts.append( + f"[Binary Content: Image {item.mimeType}, {_get_base64_size(item.data)} bytes]\n" + f"The task is complete and the content accessible to the User" + ) + + elif item.type == "audio": + formatted_parts.append( + f"[Binary Content: Audio {item.mimeType}, {_get_base64_size(item.data)} bytes]\n" + f"The task is complete and the content accessible to the User" + ) + + elif item.type == "resource": + resource = item.resource + + if hasattr(resource, "text"): + formatted_parts.append(resource.text) + + elif hasattr(resource, "blob"): + formatted_parts.append( + f"[Binary Content ({resource.uri}): {resource.mimeType}, {_get_base64_size(resource.blob)} bytes]\n" + f"The task is complete and the content accessible to the User" + ) + + return "\n".join(formatted_parts) + + +def _get_base64_size(base64_str: str) -> int: + """Estimate the byte size of a base64-encoded string.""" + # Remove any prefix like "data:image/png;base64," + if "," in base64_str: + base64_str = base64_str.split(",")[1] + + padding = 0 + if base64_str.endswith("=="): + padding = 2 + elif base64_str.endswith("="): + padding = 1 + + return (len(base64_str) * 3) // 4 - padding + + +def _load_agent_config(agent_path: Optional[str]) -> Tuple[AgentConfig, Optional[str]]: + """Load server config and prompt.""" + + def _read_dir(directory: Path) -> Tuple[AgentConfig, Optional[str]]: + cfg_file = directory / FILENAME_CONFIG + if not cfg_file.exists(): + raise FileNotFoundError(f" Config file not found in {directory}! Please make sure it exists locally") + + config: AgentConfig = json.loads(cfg_file.read_text(encoding="utf-8")) + prompt_file = directory / FILENAME_PROMPT + prompt: Optional[str] = prompt_file.read_text(encoding="utf-8") if prompt_file.exists() else None + return config, prompt + + if agent_path is None: + return DEFAULT_AGENT, None # type: ignore[return-value] + + path = Path(agent_path).expanduser() + + if path.is_file(): + return json.loads(path.read_text(encoding="utf-8")), None + + if path.is_dir(): + return _read_dir(path) + + # fetch from the Hub + try: + repo_dir = Path( + snapshot_download( + repo_id=DEFAULT_REPO_ID, + allow_patterns=f"{agent_path}/*", + repo_type="dataset", + ) + ) + return _read_dir(repo_dir / agent_path) + except Exception as err: + raise EntryNotFoundError( + f" Agent {agent_path} not found in tiny-agents/tiny-agents! Please make sure it exists in https://huggingface.co/datasets/tiny-agents/tiny-agents." + ) from err diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/__init__.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8d73b837fca2dd6aa9833b7940e454d405ca8da2 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/__init__.py @@ -0,0 +1,205 @@ +from typing import Dict, Literal, Optional, Union + +from huggingface_hub.inference._providers.featherless_ai import ( + FeatherlessConversationalTask, + FeatherlessTextGenerationTask, +) +from huggingface_hub.utils import logging + +from ._common import TaskProviderHelper, _fetch_inference_provider_mapping +from .black_forest_labs import BlackForestLabsTextToImageTask +from .cerebras import CerebrasConversationalTask +from .cohere import CohereConversationalTask +from .fal_ai import ( + FalAIAutomaticSpeechRecognitionTask, + FalAITextToImageTask, + FalAITextToSpeechTask, + FalAITextToVideoTask, +) +from .fireworks_ai import FireworksAIConversationalTask +from .groq import GroqConversationalTask +from .hf_inference import ( + HFInferenceBinaryInputTask, + HFInferenceConversational, + HFInferenceFeatureExtractionTask, + HFInferenceTask, +) +from .hyperbolic import HyperbolicTextGenerationTask, HyperbolicTextToImageTask +from .nebius import ( + NebiusConversationalTask, + NebiusFeatureExtractionTask, + NebiusTextGenerationTask, + NebiusTextToImageTask, +) +from .novita import NovitaConversationalTask, NovitaTextGenerationTask, NovitaTextToVideoTask +from .nscale import NscaleConversationalTask, NscaleTextToImageTask +from .openai import OpenAIConversationalTask +from .replicate import ReplicateTask, ReplicateTextToImageTask, ReplicateTextToSpeechTask +from .sambanova import SambanovaConversationalTask, SambanovaFeatureExtractionTask +from .together import TogetherConversationalTask, TogetherTextGenerationTask, TogetherTextToImageTask + + +logger = logging.get_logger(__name__) + + +PROVIDER_T = Literal[ + "black-forest-labs", + "cerebras", + "cohere", + "fal-ai", + "featherless-ai", + "fireworks-ai", + "groq", + "hf-inference", + "hyperbolic", + "nebius", + "novita", + "nscale", + "openai", + "replicate", + "sambanova", + "together", +] + +PROVIDER_OR_POLICY_T = Union[PROVIDER_T, Literal["auto"]] + +PROVIDERS: Dict[PROVIDER_T, Dict[str, TaskProviderHelper]] = { + "black-forest-labs": { + "text-to-image": BlackForestLabsTextToImageTask(), + }, + "cerebras": { + "conversational": CerebrasConversationalTask(), + }, + "cohere": { + "conversational": CohereConversationalTask(), + }, + "fal-ai": { + "automatic-speech-recognition": FalAIAutomaticSpeechRecognitionTask(), + "text-to-image": FalAITextToImageTask(), + "text-to-speech": FalAITextToSpeechTask(), + "text-to-video": FalAITextToVideoTask(), + }, + "featherless-ai": { + "conversational": FeatherlessConversationalTask(), + "text-generation": FeatherlessTextGenerationTask(), + }, + "fireworks-ai": { + "conversational": FireworksAIConversationalTask(), + }, + "groq": { + "conversational": GroqConversationalTask(), + }, + "hf-inference": { + "text-to-image": HFInferenceTask("text-to-image"), + "conversational": HFInferenceConversational(), + "text-generation": HFInferenceTask("text-generation"), + "text-classification": HFInferenceTask("text-classification"), + "question-answering": HFInferenceTask("question-answering"), + "audio-classification": HFInferenceBinaryInputTask("audio-classification"), + "automatic-speech-recognition": HFInferenceBinaryInputTask("automatic-speech-recognition"), + "fill-mask": HFInferenceTask("fill-mask"), + "feature-extraction": HFInferenceFeatureExtractionTask(), + "image-classification": HFInferenceBinaryInputTask("image-classification"), + "image-segmentation": HFInferenceBinaryInputTask("image-segmentation"), + "document-question-answering": HFInferenceTask("document-question-answering"), + "image-to-text": HFInferenceBinaryInputTask("image-to-text"), + "object-detection": HFInferenceBinaryInputTask("object-detection"), + "audio-to-audio": HFInferenceBinaryInputTask("audio-to-audio"), + "zero-shot-image-classification": HFInferenceBinaryInputTask("zero-shot-image-classification"), + "zero-shot-classification": HFInferenceTask("zero-shot-classification"), + "image-to-image": HFInferenceBinaryInputTask("image-to-image"), + "sentence-similarity": HFInferenceTask("sentence-similarity"), + "table-question-answering": HFInferenceTask("table-question-answering"), + "tabular-classification": HFInferenceTask("tabular-classification"), + "text-to-speech": HFInferenceTask("text-to-speech"), + "token-classification": HFInferenceTask("token-classification"), + "translation": HFInferenceTask("translation"), + "summarization": HFInferenceTask("summarization"), + "visual-question-answering": HFInferenceBinaryInputTask("visual-question-answering"), + }, + "hyperbolic": { + "text-to-image": HyperbolicTextToImageTask(), + "conversational": HyperbolicTextGenerationTask("conversational"), + "text-generation": HyperbolicTextGenerationTask("text-generation"), + }, + "nebius": { + "text-to-image": NebiusTextToImageTask(), + "conversational": NebiusConversationalTask(), + "text-generation": NebiusTextGenerationTask(), + "feature-extraction": NebiusFeatureExtractionTask(), + }, + "novita": { + "text-generation": NovitaTextGenerationTask(), + "conversational": NovitaConversationalTask(), + "text-to-video": NovitaTextToVideoTask(), + }, + "nscale": { + "conversational": NscaleConversationalTask(), + "text-to-image": NscaleTextToImageTask(), + }, + "openai": { + "conversational": OpenAIConversationalTask(), + }, + "replicate": { + "text-to-image": ReplicateTextToImageTask(), + "text-to-speech": ReplicateTextToSpeechTask(), + "text-to-video": ReplicateTask("text-to-video"), + }, + "sambanova": { + "conversational": SambanovaConversationalTask(), + "feature-extraction": SambanovaFeatureExtractionTask(), + }, + "together": { + "text-to-image": TogetherTextToImageTask(), + "conversational": TogetherConversationalTask(), + "text-generation": TogetherTextGenerationTask(), + }, +} + + +def get_provider_helper( + provider: Optional[PROVIDER_OR_POLICY_T], task: str, model: Optional[str] +) -> TaskProviderHelper: + """Get provider helper instance by name and task. + + Args: + provider (`str`, *optional*): name of the provider, or "auto" to automatically select the provider for the model. + task (`str`): Name of the task + model (`str`, *optional*): Name of the model + Returns: + TaskProviderHelper: Helper instance for the specified provider and task + + Raises: + ValueError: If provider or task is not supported + """ + + if (model is None and provider in (None, "auto")) or ( + model is not None and model.startswith(("http://", "https://")) + ): + provider = "hf-inference" + + if provider is None: + logger.info( + "Defaulting to 'auto' which will select the first provider available for the model, sorted by the user's order in https://hf.co/settings/inference-providers." + ) + provider = "auto" + + if provider == "auto": + if model is None: + raise ValueError("Specifying a model is required when provider is 'auto'") + provider_mapping = _fetch_inference_provider_mapping(model) + provider = next(iter(provider_mapping)).provider + + provider_tasks = PROVIDERS.get(provider) # type: ignore + if provider_tasks is None: + raise ValueError( + f"Provider '{provider}' not supported. Available values: 'auto' or any provider from {list(PROVIDERS.keys())}." + "Passing 'auto' (default value) will automatically select the first provider available for the model, sorted " + "by the user's order in https://hf.co/settings/inference-providers." + ) + + if task not in provider_tasks: + raise ValueError( + f"Task '{task}' not supported for provider '{provider}'. Available tasks: {list(provider_tasks.keys())}" + ) + return provider_tasks[task] diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/_common.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/_common.py new file mode 100644 index 0000000000000000000000000000000000000000..8ce62d56eac36ea4d72878629bffb066cb9b977f --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/_common.py @@ -0,0 +1,299 @@ +from functools import lru_cache +from typing import Any, Dict, List, Optional, Union, overload + +from huggingface_hub import constants +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters +from huggingface_hub.inference._generated.types.chat_completion import ChatCompletionInputMessage +from huggingface_hub.utils import build_hf_headers, get_token, logging + + +logger = logging.get_logger(__name__) + + +# Dev purposes only. +# If you want to try to run inference for a new model locally before it's registered on huggingface.co +# for a given Inference Provider, you can add it to the following dictionary. +HARDCODED_MODEL_INFERENCE_MAPPING: Dict[str, Dict[str, InferenceProviderMapping]] = { + # "HF model ID" => InferenceProviderMapping object initialized with "Model ID on Inference Provider's side" + # + # Example: + # "Qwen/Qwen2.5-Coder-32B-Instruct": InferenceProviderMapping(hf_model_id="Qwen/Qwen2.5-Coder-32B-Instruct", + # provider_id="Qwen2.5-Coder-32B-Instruct", + # task="conversational", + # status="live") + "cerebras": {}, + "cohere": {}, + "fal-ai": {}, + "fireworks-ai": {}, + "groq": {}, + "hf-inference": {}, + "hyperbolic": {}, + "nebius": {}, + "nscale": {}, + "replicate": {}, + "sambanova": {}, + "together": {}, +} + + +@overload +def filter_none(obj: Dict[str, Any]) -> Dict[str, Any]: ... +@overload +def filter_none(obj: List[Any]) -> List[Any]: ... + + +def filter_none(obj: Union[Dict[str, Any], List[Any]]) -> Union[Dict[str, Any], List[Any]]: + if isinstance(obj, dict): + cleaned: Dict[str, Any] = {} + for k, v in obj.items(): + if v is None: + continue + if isinstance(v, (dict, list)): + v = filter_none(v) + # remove empty nested dicts + if isinstance(v, dict) and not v: + continue + cleaned[k] = v + return cleaned + + if isinstance(obj, list): + return [filter_none(v) if isinstance(v, (dict, list)) else v for v in obj] + + raise ValueError(f"Expected dict or list, got {type(obj)}") + + +class TaskProviderHelper: + """Base class for task-specific provider helpers.""" + + def __init__(self, provider: str, base_url: str, task: str) -> None: + self.provider = provider + self.task = task + self.base_url = base_url + + def prepare_request( + self, + *, + inputs: Any, + parameters: Dict[str, Any], + headers: Dict, + model: Optional[str], + api_key: Optional[str], + extra_payload: Optional[Dict[str, Any]] = None, + ) -> RequestParameters: + """ + Prepare the request to be sent to the provider. + + Each step (api_key, model, headers, url, payload) can be customized in subclasses. + """ + # api_key from user, or local token, or raise error + api_key = self._prepare_api_key(api_key) + + # mapped model from HF model ID + provider_mapping_info = self._prepare_mapping_info(model) + + # default HF headers + user headers (to customize in subclasses) + headers = self._prepare_headers(headers, api_key) + + # routed URL if HF token, or direct URL (to customize in '_prepare_route' in subclasses) + url = self._prepare_url(api_key, provider_mapping_info.provider_id) + + # prepare payload (to customize in subclasses) + payload = self._prepare_payload_as_dict(inputs, parameters, provider_mapping_info=provider_mapping_info) + if payload is not None: + payload = recursive_merge(payload, extra_payload or {}) + + # body data (to customize in subclasses) + data = self._prepare_payload_as_bytes(inputs, parameters, provider_mapping_info, extra_payload) + + # check if both payload and data are set and return + if payload is not None and data is not None: + raise ValueError("Both payload and data cannot be set in the same request.") + if payload is None and data is None: + raise ValueError("Either payload or data must be set in the request.") + return RequestParameters( + url=url, task=self.task, model=provider_mapping_info.provider_id, json=payload, data=data, headers=headers + ) + + def get_response( + self, + response: Union[bytes, Dict], + request_params: Optional[RequestParameters] = None, + ) -> Any: + """ + Return the response in the expected format. + + Override this method in subclasses for customized response handling.""" + return response + + def _prepare_api_key(self, api_key: Optional[str]) -> str: + """Return the API key to use for the request. + + Usually not overwritten in subclasses.""" + if api_key is None: + api_key = get_token() + if api_key is None: + raise ValueError( + f"You must provide an api_key to work with {self.provider} API or log in with `huggingface-cli login`." + ) + return api_key + + def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping: + """Return the mapped model ID to use for the request. + + Usually not overwritten in subclasses.""" + if model is None: + raise ValueError(f"Please provide an HF model ID supported by {self.provider}.") + + # hardcoded mapping for local testing + if HARDCODED_MODEL_INFERENCE_MAPPING.get(self.provider, {}).get(model): + return HARDCODED_MODEL_INFERENCE_MAPPING[self.provider][model] + + provider_mapping = None + for mapping in _fetch_inference_provider_mapping(model): + if mapping.provider == self.provider: + provider_mapping = mapping + break + + if provider_mapping is None: + raise ValueError(f"Model {model} is not supported by provider {self.provider}.") + + if provider_mapping.task != self.task: + raise ValueError( + f"Model {model} is not supported for task {self.task} and provider {self.provider}. " + f"Supported task: {provider_mapping.task}." + ) + + if provider_mapping.status == "staging": + logger.warning( + f"Model {model} is in staging mode for provider {self.provider}. Meant for test purposes only." + ) + if provider_mapping.status == "error": + logger.warning( + f"Our latest automated health check on model '{model}' for provider '{self.provider}' did not complete successfully. " + "Inference call might fail." + ) + return provider_mapping + + def _prepare_headers(self, headers: Dict, api_key: str) -> Dict: + """Return the headers to use for the request. + + Override this method in subclasses for customized headers. + """ + return {**build_hf_headers(token=api_key), **headers} + + def _prepare_url(self, api_key: str, mapped_model: str) -> str: + """Return the URL to use for the request. + + Usually not overwritten in subclasses.""" + base_url = self._prepare_base_url(api_key) + route = self._prepare_route(mapped_model, api_key) + return f"{base_url.rstrip('/')}/{route.lstrip('/')}" + + def _prepare_base_url(self, api_key: str) -> str: + """Return the base URL to use for the request. + + Usually not overwritten in subclasses.""" + # Route to the proxy if the api_key is a HF TOKEN + if api_key.startswith("hf_"): + logger.info(f"Calling '{self.provider}' provider through Hugging Face router.") + return constants.INFERENCE_PROXY_TEMPLATE.format(provider=self.provider) + else: + logger.info(f"Calling '{self.provider}' provider directly.") + return self.base_url + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + """Return the route to use for the request. + + Override this method in subclasses for customized routes. + """ + return "" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + """Return the payload to use for the request, as a dict. + + Override this method in subclasses for customized payloads. + Only one of `_prepare_payload_as_dict` and `_prepare_payload_as_bytes` should return a value. + """ + return None + + def _prepare_payload_as_bytes( + self, + inputs: Any, + parameters: Dict, + provider_mapping_info: InferenceProviderMapping, + extra_payload: Optional[Dict], + ) -> Optional[bytes]: + """Return the body to use for the request, as bytes. + + Override this method in subclasses for customized body data. + Only one of `_prepare_payload_as_dict` and `_prepare_payload_as_bytes` should return a value. + """ + return None + + +class BaseConversationalTask(TaskProviderHelper): + """ + Base class for conversational (chat completion) tasks. + The schema follows the OpenAI API format defined here: https://platform.openai.com/docs/api-reference/chat + """ + + def __init__(self, provider: str, base_url: str): + super().__init__(provider=provider, base_url=base_url, task="conversational") + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return "/v1/chat/completions" + + def _prepare_payload_as_dict( + self, + inputs: List[Union[Dict, ChatCompletionInputMessage]], + parameters: Dict, + provider_mapping_info: InferenceProviderMapping, + ) -> Optional[Dict]: + return filter_none({"messages": inputs, **parameters, "model": provider_mapping_info.provider_id}) + + +class BaseTextGenerationTask(TaskProviderHelper): + """ + Base class for text-generation (completion) tasks. + The schema follows the OpenAI API format defined here: https://platform.openai.com/docs/api-reference/completions + """ + + def __init__(self, provider: str, base_url: str): + super().__init__(provider=provider, base_url=base_url, task="text-generation") + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return "/v1/completions" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + return {"prompt": inputs, **filter_none(parameters), "model": provider_mapping_info.provider_id} + + +@lru_cache(maxsize=None) +def _fetch_inference_provider_mapping(model: str) -> List["InferenceProviderMapping"]: + """ + Fetch provider mappings for a model from the Hub. + """ + from huggingface_hub.hf_api import HfApi + + info = HfApi().model_info(model, expand=["inferenceProviderMapping"]) + provider_mapping = info.inference_provider_mapping + if provider_mapping is None: + raise ValueError(f"No provider mapping found for model {model}") + return provider_mapping + + +def recursive_merge(dict1: Dict, dict2: Dict) -> Dict: + return { + **dict1, + **{ + key: recursive_merge(dict1[key], value) + if (key in dict1 and isinstance(dict1[key], dict) and isinstance(value, dict)) + else value + for key, value in dict2.items() + }, + } diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/black_forest_labs.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/black_forest_labs.py new file mode 100644 index 0000000000000000000000000000000000000000..afa8ed281d8a8e94a054b83b74ec6909f623e300 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/black_forest_labs.py @@ -0,0 +1,69 @@ +import time +from typing import Any, Dict, Optional, Union + +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _as_dict +from huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none +from huggingface_hub.utils import logging +from huggingface_hub.utils._http import get_session + + +logger = logging.get_logger(__name__) + +MAX_POLLING_ATTEMPTS = 6 +POLLING_INTERVAL = 1.0 + + +class BlackForestLabsTextToImageTask(TaskProviderHelper): + def __init__(self): + super().__init__(provider="black-forest-labs", base_url="https://api.us1.bfl.ai", task="text-to-image") + + def _prepare_headers(self, headers: Dict, api_key: str) -> Dict: + headers = super()._prepare_headers(headers, api_key) + if not api_key.startswith("hf_"): + _ = headers.pop("authorization") + headers["X-Key"] = api_key + return headers + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return f"/v1/{mapped_model}" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + parameters = filter_none(parameters) + if "num_inference_steps" in parameters: + parameters["steps"] = parameters.pop("num_inference_steps") + if "guidance_scale" in parameters: + parameters["guidance"] = parameters.pop("guidance_scale") + + return {"prompt": inputs, **parameters} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + """ + Polling mechanism for Black Forest Labs since the API is asynchronous. + """ + url = _as_dict(response).get("polling_url") + session = get_session() + for _ in range(MAX_POLLING_ATTEMPTS): + time.sleep(POLLING_INTERVAL) + + response = session.get(url, headers={"Content-Type": "application/json"}) # type: ignore + response.raise_for_status() # type: ignore + response_json: Dict = response.json() # type: ignore + status = response_json.get("status") + logger.info( + f"Polling generation result from {url}. Current status: {status}. " + f"Will retry after {POLLING_INTERVAL} seconds if not ready." + ) + + if ( + status == "Ready" + and isinstance(response_json.get("result"), dict) + and (sample_url := response_json["result"].get("sample")) + ): + image_resp = session.get(sample_url) + image_resp.raise_for_status() + return image_resp.content + + raise TimeoutError(f"Failed to get the image URL after {MAX_POLLING_ATTEMPTS} attempts.") diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/cerebras.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/cerebras.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b9c3aacb3e134a8e755297c15ece198ffe633d --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/cerebras.py @@ -0,0 +1,6 @@ +from ._common import BaseConversationalTask + + +class CerebrasConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider="cerebras", base_url="https://api.cerebras.ai") diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/cohere.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/cohere.py new file mode 100644 index 0000000000000000000000000000000000000000..a5e9191caec50b0e659dddceba3e817a4ac28307 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/cohere.py @@ -0,0 +1,32 @@ +from typing import Any, Dict, Optional + +from huggingface_hub.hf_api import InferenceProviderMapping + +from ._common import BaseConversationalTask + + +_PROVIDER = "cohere" +_BASE_URL = "https://api.cohere.com" + + +class CohereConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider=_PROVIDER, base_url=_BASE_URL) + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return "/compatibility/v1/chat/completions" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) + response_format = parameters.get("response_format") + if isinstance(response_format, dict) and response_format.get("type") == "json_schema": + json_schema_details = response_format.get("json_schema") + if isinstance(json_schema_details, dict) and "schema" in json_schema_details: + payload["response_format"] = { # type: ignore [index] + "type": "json_object", + "schema": json_schema_details["schema"], + } + + return payload diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/fal_ai.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/fal_ai.py new file mode 100644 index 0000000000000000000000000000000000000000..8dd463b6b1b3adcda4f954ace535b95b009eb17a --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/fal_ai.py @@ -0,0 +1,172 @@ +import base64 +import time +from abc import ABC +from typing import Any, Dict, Optional, Union +from urllib.parse import urlparse + +from huggingface_hub import constants +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _as_dict +from huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none +from huggingface_hub.utils import get_session, hf_raise_for_status +from huggingface_hub.utils.logging import get_logger + + +logger = get_logger(__name__) + +# Arbitrary polling interval +_POLLING_INTERVAL = 0.5 + + +class FalAITask(TaskProviderHelper, ABC): + def __init__(self, task: str): + super().__init__(provider="fal-ai", base_url="https://fal.run", task=task) + + def _prepare_headers(self, headers: Dict, api_key: str) -> Dict: + headers = super()._prepare_headers(headers, api_key) + if not api_key.startswith("hf_"): + headers["authorization"] = f"Key {api_key}" + return headers + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return f"/{mapped_model}" + + +class FalAIAutomaticSpeechRecognitionTask(FalAITask): + def __init__(self): + super().__init__("automatic-speech-recognition") + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + if isinstance(inputs, str) and inputs.startswith(("http://", "https://")): + # If input is a URL, pass it directly + audio_url = inputs + else: + # If input is a file path, read it first + if isinstance(inputs, str): + with open(inputs, "rb") as f: + inputs = f.read() + + audio_b64 = base64.b64encode(inputs).decode() + content_type = "audio/mpeg" + audio_url = f"data:{content_type};base64,{audio_b64}" + + return {"audio_url": audio_url, **filter_none(parameters)} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + text = _as_dict(response)["text"] + if not isinstance(text, str): + raise ValueError(f"Unexpected output format from FalAI API. Expected string, got {type(text)}.") + return text + + +class FalAITextToImageTask(FalAITask): + def __init__(self): + super().__init__("text-to-image") + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + payload: Dict[str, Any] = { + "prompt": inputs, + **filter_none(parameters), + } + if "width" in payload and "height" in payload: + payload["image_size"] = { + "width": payload.pop("width"), + "height": payload.pop("height"), + } + if provider_mapping_info.adapter_weights_path is not None: + lora_path = constants.HUGGINGFACE_CO_URL_TEMPLATE.format( + repo_id=provider_mapping_info.hf_model_id, + revision="main", + filename=provider_mapping_info.adapter_weights_path, + ) + payload["loras"] = [{"path": lora_path, "scale": 1}] + if provider_mapping_info.provider_id == "fal-ai/lora": + # little hack: fal requires the base model for stable-diffusion-based loras but not for flux-based + # See payloads in https://fal.ai/models/fal-ai/lora/api vs https://fal.ai/models/fal-ai/flux-lora/api + payload["model_name"] = "stabilityai/stable-diffusion-xl-base-1.0" + + return payload + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + url = _as_dict(response)["images"][0]["url"] + return get_session().get(url).content + + +class FalAITextToSpeechTask(FalAITask): + def __init__(self): + super().__init__("text-to-speech") + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + return {"text": inputs, **filter_none(parameters)} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + url = _as_dict(response)["audio"]["url"] + return get_session().get(url).content + + +class FalAITextToVideoTask(FalAITask): + def __init__(self): + super().__init__("text-to-video") + + def _prepare_base_url(self, api_key: str) -> str: + if api_key.startswith("hf_"): + return super()._prepare_base_url(api_key) + else: + logger.info(f"Calling '{self.provider}' provider directly.") + return "https://queue.fal.run" + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + if api_key.startswith("hf_"): + # Use the queue subdomain for HF routing + return f"/{mapped_model}?_subdomain=queue" + return f"/{mapped_model}" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + return {"prompt": inputs, **filter_none(parameters)} + + def get_response( + self, + response: Union[bytes, Dict], + request_params: Optional[RequestParameters] = None, + ) -> Any: + response_dict = _as_dict(response) + + request_id = response_dict.get("request_id") + if not request_id: + raise ValueError("No request ID found in the response") + if request_params is None: + raise ValueError( + "A `RequestParameters` object should be provided to get text-to-video responses with Fal AI." + ) + + # extract the base url and query params + parsed_url = urlparse(request_params.url) + # a bit hacky way to concatenate the provider name without parsing `parsed_url.path` + base_url = f"{parsed_url.scheme}://{parsed_url.netloc}{'/fal-ai' if parsed_url.netloc == 'router.huggingface.co' else ''}" + query_param = f"?{parsed_url.query}" if parsed_url.query else "" + + # extracting the provider model id for status and result urls + # from the response as it might be different from the mapped model in `request_params.url` + model_id = urlparse(response_dict.get("response_url")).path + status_url = f"{base_url}{str(model_id)}/status{query_param}" + result_url = f"{base_url}{str(model_id)}{query_param}" + + status = response_dict.get("status") + logger.info("Generating the video.. this can take several minutes.") + while status != "COMPLETED": + time.sleep(_POLLING_INTERVAL) + status_response = get_session().get(status_url, headers=request_params.headers) + hf_raise_for_status(status_response) + status = status_response.json().get("status") + + response = get_session().get(result_url, headers=request_params.headers).json() + url = _as_dict(response)["video"]["url"] + return get_session().get(url).content diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/featherless_ai.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/featherless_ai.py new file mode 100644 index 0000000000000000000000000000000000000000..6ad1c48134f5c990b6ac4fca5ff919f4cc0d2373 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/featherless_ai.py @@ -0,0 +1,38 @@ +from typing import Any, Dict, Optional, Union + +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _as_dict + +from ._common import BaseConversationalTask, BaseTextGenerationTask, filter_none + + +_PROVIDER = "featherless-ai" +_BASE_URL = "https://api.featherless.ai" + + +class FeatherlessTextGenerationTask(BaseTextGenerationTask): + def __init__(self): + super().__init__(provider=_PROVIDER, base_url=_BASE_URL) + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + params = filter_none(parameters.copy()) + params["max_tokens"] = params.pop("max_new_tokens", None) + + return {"prompt": inputs, **params, "model": provider_mapping_info.provider_id} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + output = _as_dict(response)["choices"][0] + return { + "generated_text": output["text"], + "details": { + "finish_reason": output.get("finish_reason"), + "seed": output.get("seed"), + }, + } + + +class FeatherlessConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider=_PROVIDER, base_url=_BASE_URL) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/fireworks_ai.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/fireworks_ai.py new file mode 100644 index 0000000000000000000000000000000000000000..b4cc19a5700047f6516b2784d9785a99d7e32451 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/fireworks_ai.py @@ -0,0 +1,27 @@ +from typing import Any, Dict, Optional + +from huggingface_hub.hf_api import InferenceProviderMapping + +from ._common import BaseConversationalTask + + +class FireworksAIConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider="fireworks-ai", base_url="https://api.fireworks.ai") + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return "/inference/v1/chat/completions" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) + response_format = parameters.get("response_format") + if isinstance(response_format, dict) and response_format.get("type") == "json_schema": + json_schema_details = response_format.get("json_schema") + if isinstance(json_schema_details, dict) and "schema" in json_schema_details: + payload["response_format"] = { # type: ignore [index] + "type": "json_object", + "schema": json_schema_details["schema"], + } + return payload diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/groq.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/groq.py new file mode 100644 index 0000000000000000000000000000000000000000..11e677504e89bc02b966e7d37d9e11f1b94b297f --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/groq.py @@ -0,0 +1,9 @@ +from ._common import BaseConversationalTask + + +class GroqConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider="groq", base_url="https://api.groq.com") + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return "/openai/v1/chat/completions" diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/hf_inference.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/hf_inference.py new file mode 100644 index 0000000000000000000000000000000000000000..f5531a02c7a127dbcdd3daccc3e6c7318e6036a2 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/hf_inference.py @@ -0,0 +1,212 @@ +import json +from functools import lru_cache +from pathlib import Path +from typing import Any, Dict, Optional, Union + +from huggingface_hub import constants +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _b64_encode, _bytes_to_dict, _open_as_binary +from huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none +from huggingface_hub.utils import build_hf_headers, get_session, get_token, hf_raise_for_status + + +class HFInferenceTask(TaskProviderHelper): + """Base class for HF Inference API tasks.""" + + def __init__(self, task: str): + super().__init__( + provider="hf-inference", + base_url=constants.INFERENCE_PROXY_TEMPLATE.format(provider="hf-inference"), + task=task, + ) + + def _prepare_api_key(self, api_key: Optional[str]) -> str: + # special case: for HF Inference we allow not providing an API key + return api_key or get_token() # type: ignore[return-value] + + def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping: + if model is not None and model.startswith(("http://", "https://")): + return InferenceProviderMapping( + provider="hf-inference", providerId=model, hf_model_id=model, task=self.task, status="live" + ) + model_id = model if model is not None else _fetch_recommended_models().get(self.task) + if model_id is None: + raise ValueError( + f"Task {self.task} has no recommended model for HF Inference. Please specify a model" + " explicitly. Visit https://huggingface.co/tasks for more info." + ) + _check_supported_task(model_id, self.task) + return InferenceProviderMapping( + provider="hf-inference", providerId=model_id, hf_model_id=model_id, task=self.task, status="live" + ) + + def _prepare_url(self, api_key: str, mapped_model: str) -> str: + # hf-inference provider can handle URLs (e.g. Inference Endpoints or TGI deployment) + if mapped_model.startswith(("http://", "https://")): + return mapped_model + return ( + # Feature-extraction and sentence-similarity are the only cases where we handle models with several tasks. + f"{self.base_url}/models/{mapped_model}/pipeline/{self.task}" + if self.task in ("feature-extraction", "sentence-similarity") + # Otherwise, we use the default endpoint + else f"{self.base_url}/models/{mapped_model}" + ) + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + if isinstance(inputs, bytes): + raise ValueError(f"Unexpected binary input for task {self.task}.") + if isinstance(inputs, Path): + raise ValueError(f"Unexpected path input for task {self.task} (got {inputs})") + return {"inputs": inputs, "parameters": filter_none(parameters)} + + +class HFInferenceBinaryInputTask(HFInferenceTask): + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + return None + + def _prepare_payload_as_bytes( + self, + inputs: Any, + parameters: Dict, + provider_mapping_info: InferenceProviderMapping, + extra_payload: Optional[Dict], + ) -> Optional[bytes]: + parameters = filter_none(parameters) + extra_payload = extra_payload or {} + has_parameters = len(parameters) > 0 or len(extra_payload) > 0 + + # Raise if not a binary object or a local path or a URL. + if not isinstance(inputs, (bytes, Path)) and not isinstance(inputs, str): + raise ValueError(f"Expected binary inputs or a local path or a URL. Got {inputs}") + + # Send inputs as raw content when no parameters are provided + if not has_parameters: + with _open_as_binary(inputs) as data: + data_as_bytes = data if isinstance(data, bytes) else data.read() + return data_as_bytes + + # Otherwise encode as b64 + return json.dumps({"inputs": _b64_encode(inputs), "parameters": parameters, **extra_payload}).encode("utf-8") + + +class HFInferenceConversational(HFInferenceTask): + def __init__(self): + super().__init__("conversational") + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + payload = filter_none(parameters) + mapped_model = provider_mapping_info.provider_id + payload_model = parameters.get("model") or mapped_model + + if payload_model is None or payload_model.startswith(("http://", "https://")): + payload_model = "dummy" + + response_format = parameters.get("response_format") + if isinstance(response_format, dict) and response_format.get("type") == "json_schema": + payload["response_format"] = { + "type": "json_object", + "value": response_format["json_schema"]["schema"], + } + return {**payload, "model": payload_model, "messages": inputs} + + def _prepare_url(self, api_key: str, mapped_model: str) -> str: + base_url = ( + mapped_model + if mapped_model.startswith(("http://", "https://")) + else f"{constants.INFERENCE_PROXY_TEMPLATE.format(provider='hf-inference')}/models/{mapped_model}" + ) + return _build_chat_completion_url(base_url) + + +def _build_chat_completion_url(model_url: str) -> str: + # Strip trailing / + model_url = model_url.rstrip("/") + + # Append /chat/completions if not already present + if model_url.endswith("/v1"): + model_url += "/chat/completions" + + # Append /v1/chat/completions if not already present + if not model_url.endswith("/chat/completions"): + model_url += "/v1/chat/completions" + + return model_url + + +@lru_cache(maxsize=1) +def _fetch_recommended_models() -> Dict[str, Optional[str]]: + response = get_session().get(f"{constants.ENDPOINT}/api/tasks", headers=build_hf_headers()) + hf_raise_for_status(response) + return {task: next(iter(details["widgetModels"]), None) for task, details in response.json().items()} + + +@lru_cache(maxsize=None) +def _check_supported_task(model: str, task: str) -> None: + from huggingface_hub.hf_api import HfApi + + model_info = HfApi().model_info(model) + pipeline_tag = model_info.pipeline_tag + tags = model_info.tags or [] + is_conversational = "conversational" in tags + if task in ("text-generation", "conversational"): + if pipeline_tag == "text-generation": + # text-generation + conversational tag -> both tasks allowed + if is_conversational: + return + # text-generation without conversational tag -> only text-generation allowed + if task == "text-generation": + return + raise ValueError(f"Model '{model}' doesn't support task '{task}'.") + + if pipeline_tag == "text2text-generation": + if task == "text-generation": + return + raise ValueError(f"Model '{model}' doesn't support task '{task}'.") + + if pipeline_tag == "image-text-to-text": + if is_conversational and task == "conversational": + return # Only conversational allowed if tagged as conversational + raise ValueError("Non-conversational image-text-to-text task is not supported.") + + if ( + task in ("feature-extraction", "sentence-similarity") + and pipeline_tag in ("feature-extraction", "sentence-similarity") + and task in tags + ): + # feature-extraction and sentence-similarity are interchangeable for HF Inference + return + + # For all other tasks, just check pipeline tag + if pipeline_tag != task: + raise ValueError( + f"Model '{model}' doesn't support task '{task}'. Supported tasks: '{pipeline_tag}', got: '{task}'" + ) + return + + +class HFInferenceFeatureExtractionTask(HFInferenceTask): + def __init__(self): + super().__init__("feature-extraction") + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + if isinstance(inputs, bytes): + raise ValueError(f"Unexpected binary input for task {self.task}.") + if isinstance(inputs, Path): + raise ValueError(f"Unexpected path input for task {self.task} (got {inputs})") + + # Parameters are sent at root-level for feature-extraction task + # See specs: https://github.com/huggingface/huggingface.js/blob/main/packages/tasks/src/tasks/feature-extraction/spec/input.json + return {"inputs": inputs, **filter_none(parameters)} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + if isinstance(response, bytes): + return _bytes_to_dict(response) + return response diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/hyperbolic.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/hyperbolic.py new file mode 100644 index 0000000000000000000000000000000000000000..6dcb14cc275f6b80db5643361b9dfd3cbf8d91a2 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/hyperbolic.py @@ -0,0 +1,47 @@ +import base64 +from typing import Any, Dict, Optional, Union + +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _as_dict +from huggingface_hub.inference._providers._common import BaseConversationalTask, TaskProviderHelper, filter_none + + +class HyperbolicTextToImageTask(TaskProviderHelper): + def __init__(self): + super().__init__(provider="hyperbolic", base_url="https://api.hyperbolic.xyz", task="text-to-image") + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return "/v1/images/generations" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + mapped_model = provider_mapping_info.provider_id + parameters = filter_none(parameters) + if "num_inference_steps" in parameters: + parameters["steps"] = parameters.pop("num_inference_steps") + if "guidance_scale" in parameters: + parameters["cfg_scale"] = parameters.pop("guidance_scale") + # For Hyperbolic, the width and height are required parameters + if "width" not in parameters: + parameters["width"] = 512 + if "height" not in parameters: + parameters["height"] = 512 + return {"prompt": inputs, "model_name": mapped_model, **parameters} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + response_dict = _as_dict(response) + return base64.b64decode(response_dict["images"][0]["image"]) + + +class HyperbolicTextGenerationTask(BaseConversationalTask): + """ + Special case for Hyperbolic, where text-generation task is handled as a conversational task. + """ + + def __init__(self, task: str): + super().__init__( + provider="hyperbolic", + base_url="https://api.hyperbolic.xyz", + ) + self.task = task diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/nebius.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/nebius.py new file mode 100644 index 0000000000000000000000000000000000000000..85ad67c4c8835d7fb8bfe5f36e426614174a66ba --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/nebius.py @@ -0,0 +1,83 @@ +import base64 +from typing import Any, Dict, Optional, Union + +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _as_dict +from huggingface_hub.inference._providers._common import ( + BaseConversationalTask, + BaseTextGenerationTask, + TaskProviderHelper, + filter_none, +) + + +class NebiusTextGenerationTask(BaseTextGenerationTask): + def __init__(self): + super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai") + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + output = _as_dict(response)["choices"][0] + return { + "generated_text": output["text"], + "details": { + "finish_reason": output.get("finish_reason"), + "seed": output.get("seed"), + }, + } + + +class NebiusConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider="nebius", base_url="https://api.studio.nebius.ai") + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) + response_format = parameters.get("response_format") + if isinstance(response_format, dict) and response_format.get("type") == "json_schema": + json_schema_details = response_format.get("json_schema") + if isinstance(json_schema_details, dict) and "schema" in json_schema_details: + payload["guided_json"] = json_schema_details["schema"] # type: ignore [index] + return payload + + +class NebiusTextToImageTask(TaskProviderHelper): + def __init__(self): + super().__init__(task="text-to-image", provider="nebius", base_url="https://api.studio.nebius.ai") + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return "/v1/images/generations" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + mapped_model = provider_mapping_info.provider_id + parameters = filter_none(parameters) + if "guidance_scale" in parameters: + parameters.pop("guidance_scale") + if parameters.get("response_format") not in ("b64_json", "url"): + parameters["response_format"] = "b64_json" + + return {"prompt": inputs, **parameters, "model": mapped_model} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + response_dict = _as_dict(response) + return base64.b64decode(response_dict["data"][0]["b64_json"]) + + +class NebiusFeatureExtractionTask(TaskProviderHelper): + def __init__(self): + super().__init__(task="feature-extraction", provider="nebius", base_url="https://api.studio.nebius.ai") + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return "/v1/embeddings" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + return {"input": inputs, "model": provider_mapping_info.provider_id} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + embeddings = _as_dict(response)["data"] + return [embedding["embedding"] for embedding in embeddings] diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/novita.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/novita.py new file mode 100644 index 0000000000000000000000000000000000000000..44adc9017b456f487513cde251086075d84b69f0 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/novita.py @@ -0,0 +1,69 @@ +from typing import Any, Dict, Optional, Union + +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _as_dict +from huggingface_hub.inference._providers._common import ( + BaseConversationalTask, + BaseTextGenerationTask, + TaskProviderHelper, + filter_none, +) +from huggingface_hub.utils import get_session + + +_PROVIDER = "novita" +_BASE_URL = "https://api.novita.ai" + + +class NovitaTextGenerationTask(BaseTextGenerationTask): + def __init__(self): + super().__init__(provider=_PROVIDER, base_url=_BASE_URL) + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + # there is no v1/ route for novita + return "/v3/openai/completions" + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + output = _as_dict(response)["choices"][0] + return { + "generated_text": output["text"], + "details": { + "finish_reason": output.get("finish_reason"), + "seed": output.get("seed"), + }, + } + + +class NovitaConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider=_PROVIDER, base_url=_BASE_URL) + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + # there is no v1/ route for novita + return "/v3/openai/chat/completions" + + +class NovitaTextToVideoTask(TaskProviderHelper): + def __init__(self): + super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task="text-to-video") + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return f"/v3/hf/{mapped_model}" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + return {"prompt": inputs, **filter_none(parameters)} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + response_dict = _as_dict(response) + if not ( + isinstance(response_dict, dict) + and "video" in response_dict + and isinstance(response_dict["video"], dict) + and "video_url" in response_dict["video"] + ): + raise ValueError("Expected response format: { 'video': { 'video_url': string } }") + + video_url = response_dict["video"]["video_url"] + return get_session().get(video_url).content diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/nscale.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/nscale.py new file mode 100644 index 0000000000000000000000000000000000000000..ce5b20e354e246e93a7dd9831e4acf69ebcfad63 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/nscale.py @@ -0,0 +1,44 @@ +import base64 +from typing import Any, Dict, Optional, Union + +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _as_dict + +from ._common import BaseConversationalTask, TaskProviderHelper, filter_none + + +class NscaleConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider="nscale", base_url="https://inference.api.nscale.com") + + +class NscaleTextToImageTask(TaskProviderHelper): + def __init__(self): + super().__init__(provider="nscale", base_url="https://inference.api.nscale.com", task="text-to-image") + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return "/v1/images/generations" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + mapped_model = provider_mapping_info.provider_id + # Combine all parameters except inputs and parameters + parameters = filter_none(parameters) + if "width" in parameters and "height" in parameters: + parameters["size"] = f"{parameters.pop('width')}x{parameters.pop('height')}" + if "num_inference_steps" in parameters: + parameters.pop("num_inference_steps") + if "cfg_scale" in parameters: + parameters.pop("cfg_scale") + payload = { + "response_format": "b64_json", + "prompt": inputs, + "model": mapped_model, + **parameters, + } + return payload + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + response_dict = _as_dict(response) + return base64.b64decode(response_dict["data"][0]["b64_json"]) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/openai.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/openai.py new file mode 100644 index 0000000000000000000000000000000000000000..7a554093c173ea8f664cb7fbd9616ce3a08ce78c --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/openai.py @@ -0,0 +1,25 @@ +from typing import Optional + +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._providers._common import BaseConversationalTask + + +class OpenAIConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider="openai", base_url="https://api.openai.com") + + def _prepare_api_key(self, api_key: Optional[str]) -> str: + if api_key is None: + raise ValueError("You must provide an api_key to work with OpenAI API.") + if api_key.startswith("hf_"): + raise ValueError( + "OpenAI provider is not available through Hugging Face routing, please use your own OpenAI API key." + ) + return api_key + + def _prepare_mapping_info(self, model: Optional[str]) -> InferenceProviderMapping: + if model is None: + raise ValueError("Please provide an OpenAI model ID, e.g. `gpt-4o` or `o1`.") + return InferenceProviderMapping( + provider="openai", providerId=model, task="conversational", status="live", hf_model_id=model + ) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/replicate.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/replicate.py new file mode 100644 index 0000000000000000000000000000000000000000..2ba312764735e289ebb6add72577f4a948a18dc3 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/replicate.py @@ -0,0 +1,72 @@ +from typing import Any, Dict, Optional, Union + +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _as_dict +from huggingface_hub.inference._providers._common import TaskProviderHelper, filter_none +from huggingface_hub.utils import get_session + + +_PROVIDER = "replicate" +_BASE_URL = "https://api.replicate.com" + + +class ReplicateTask(TaskProviderHelper): + def __init__(self, task: str): + super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task) + + def _prepare_headers(self, headers: Dict, api_key: str) -> Dict: + headers = super()._prepare_headers(headers, api_key) + headers["Prefer"] = "wait" + return headers + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + if ":" in mapped_model: + return "/v1/predictions" + return f"/v1/models/{mapped_model}/predictions" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + mapped_model = provider_mapping_info.provider_id + payload: Dict[str, Any] = {"input": {"prompt": inputs, **filter_none(parameters)}} + if ":" in mapped_model: + version = mapped_model.split(":", 1)[1] + payload["version"] = version + return payload + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + response_dict = _as_dict(response) + if response_dict.get("output") is None: + raise TimeoutError( + f"Inference request timed out after 60 seconds. No output generated for model {response_dict.get('model')}" + "The model might be in cold state or starting up. Please try again later." + ) + output_url = ( + response_dict["output"] if isinstance(response_dict["output"], str) else response_dict["output"][0] + ) + return get_session().get(output_url).content + + +class ReplicateTextToImageTask(ReplicateTask): + def __init__(self): + super().__init__("text-to-image") + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + payload: Dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment] + if provider_mapping_info.adapter_weights_path is not None: + payload["input"]["lora_weights"] = f"https://huggingface.co/{provider_mapping_info.hf_model_id}" + return payload + + +class ReplicateTextToSpeechTask(ReplicateTask): + def __init__(self): + super().__init__("text-to-speech") + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + payload: Dict = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) # type: ignore[assignment] + payload["input"]["text"] = payload["input"].pop("prompt") # rename "prompt" to "text" for TTS + return payload diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/sambanova.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/sambanova.py new file mode 100644 index 0000000000000000000000000000000000000000..ed96fb766ce49003b605bda8ef8ee34da0ebe2f4 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/sambanova.py @@ -0,0 +1,42 @@ +from typing import Any, Dict, Optional, Union + +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _as_dict +from huggingface_hub.inference._providers._common import BaseConversationalTask, TaskProviderHelper, filter_none + + +class SambanovaConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider="sambanova", base_url="https://api.sambanova.ai") + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + response_format_config = parameters.get("response_format") + if isinstance(response_format_config, dict): + if response_format_config.get("type") == "json_schema": + json_schema_config = response_format_config.get("json_schema", {}) + strict = json_schema_config.get("strict") + if isinstance(json_schema_config, dict) and (strict is True or strict is None): + json_schema_config["strict"] = False + + payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) + return payload + + +class SambanovaFeatureExtractionTask(TaskProviderHelper): + def __init__(self): + super().__init__(provider="sambanova", base_url="https://api.sambanova.ai", task="feature-extraction") + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + return "/v1/embeddings" + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + parameters = filter_none(parameters) + return {"input": inputs, "model": provider_mapping_info.provider_id, **parameters} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + embeddings = _as_dict(response)["data"] + return [embedding["embedding"] for embedding in embeddings] diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/together.py b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/together.py new file mode 100644 index 0000000000000000000000000000000000000000..de166b7baf8d50b255f29cf8cc9b9d3fa639646e --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/inference/_providers/together.py @@ -0,0 +1,88 @@ +import base64 +from abc import ABC +from typing import Any, Dict, Optional, Union + +from huggingface_hub.hf_api import InferenceProviderMapping +from huggingface_hub.inference._common import RequestParameters, _as_dict +from huggingface_hub.inference._providers._common import ( + BaseConversationalTask, + BaseTextGenerationTask, + TaskProviderHelper, + filter_none, +) + + +_PROVIDER = "together" +_BASE_URL = "https://api.together.xyz" + + +class TogetherTask(TaskProviderHelper, ABC): + """Base class for Together API tasks.""" + + def __init__(self, task: str): + super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task) + + def _prepare_route(self, mapped_model: str, api_key: str) -> str: + if self.task == "text-to-image": + return "/v1/images/generations" + elif self.task == "conversational": + return "/v1/chat/completions" + elif self.task == "text-generation": + return "/v1/completions" + raise ValueError(f"Unsupported task '{self.task}' for Together API.") + + +class TogetherTextGenerationTask(BaseTextGenerationTask): + def __init__(self): + super().__init__(provider=_PROVIDER, base_url=_BASE_URL) + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + output = _as_dict(response)["choices"][0] + return { + "generated_text": output["text"], + "details": { + "finish_reason": output.get("finish_reason"), + "seed": output.get("seed"), + }, + } + + +class TogetherConversationalTask(BaseConversationalTask): + def __init__(self): + super().__init__(provider=_PROVIDER, base_url=_BASE_URL) + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + payload = super()._prepare_payload_as_dict(inputs, parameters, provider_mapping_info) + response_format = parameters.get("response_format") + if isinstance(response_format, dict) and response_format.get("type") == "json_schema": + json_schema_details = response_format.get("json_schema") + if isinstance(json_schema_details, dict) and "schema" in json_schema_details: + payload["response_format"] = { # type: ignore [index] + "type": "json_object", + "schema": json_schema_details["schema"], + } + + return payload + + +class TogetherTextToImageTask(TogetherTask): + def __init__(self): + super().__init__("text-to-image") + + def _prepare_payload_as_dict( + self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping + ) -> Optional[Dict]: + mapped_model = provider_mapping_info.provider_id + parameters = filter_none(parameters) + if "num_inference_steps" in parameters: + parameters["steps"] = parameters.pop("num_inference_steps") + if "guidance_scale" in parameters: + parameters["guidance"] = parameters.pop("guidance_scale") + + return {"prompt": inputs, "response_format": "base64", **parameters, "model": mapped_model} + + def get_response(self, response: Union[bytes, Dict], request_params: Optional[RequestParameters] = None) -> Any: + response_dict = _as_dict(response) + return base64.b64decode(response_dict["data"][0]["b64_json"]) diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/templates/datasetcard_template.md b/.venv/lib/python3.13/site-packages/huggingface_hub/templates/datasetcard_template.md new file mode 100644 index 0000000000000000000000000000000000000000..9af29ebbed93653ec74a8952e314e7554323ef15 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/templates/datasetcard_template.md @@ -0,0 +1,143 @@ +--- +# For reference on dataset card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/datasetcard.md?plain=1 +# Doc / guide: https://huggingface.co/docs/hub/datasets-cards +{{ card_data }} +--- + +# Dataset Card for {{ pretty_name | default("Dataset Name", true) }} + + + +{{ dataset_summary | default("", true) }} + +## Dataset Details + +### Dataset Description + + + +{{ dataset_description | default("", true) }} + +- **Curated by:** {{ curators | default("[More Information Needed]", true)}} +- **Funded by [optional]:** {{ funded_by | default("[More Information Needed]", true)}} +- **Shared by [optional]:** {{ shared_by | default("[More Information Needed]", true)}} +- **Language(s) (NLP):** {{ language | default("[More Information Needed]", true)}} +- **License:** {{ license | default("[More Information Needed]", true)}} + +### Dataset Sources [optional] + + + +- **Repository:** {{ repo | default("[More Information Needed]", true)}} +- **Paper [optional]:** {{ paper | default("[More Information Needed]", true)}} +- **Demo [optional]:** {{ demo | default("[More Information Needed]", true)}} + +## Uses + + + +### Direct Use + + + +{{ direct_use | default("[More Information Needed]", true)}} + +### Out-of-Scope Use + + + +{{ out_of_scope_use | default("[More Information Needed]", true)}} + +## Dataset Structure + + + +{{ dataset_structure | default("[More Information Needed]", true)}} + +## Dataset Creation + +### Curation Rationale + + + +{{ curation_rationale_section | default("[More Information Needed]", true)}} + +### Source Data + + + +#### Data Collection and Processing + + + +{{ data_collection_and_processing_section | default("[More Information Needed]", true)}} + +#### Who are the source data producers? + + + +{{ source_data_producers_section | default("[More Information Needed]", true)}} + +### Annotations [optional] + + + +#### Annotation process + + + +{{ annotation_process_section | default("[More Information Needed]", true)}} + +#### Who are the annotators? + + + +{{ who_are_annotators_section | default("[More Information Needed]", true)}} + +#### Personal and Sensitive Information + + + +{{ personal_and_sensitive_information | default("[More Information Needed]", true)}} + +## Bias, Risks, and Limitations + + + +{{ bias_risks_limitations | default("[More Information Needed]", true)}} + +### Recommendations + + + +{{ bias_recommendations | default("Users should be made aware of the risks, biases and limitations of the dataset. More information needed for further recommendations.", true)}} + +## Citation [optional] + + + +**BibTeX:** + +{{ citation_bibtex | default("[More Information Needed]", true)}} + +**APA:** + +{{ citation_apa | default("[More Information Needed]", true)}} + +## Glossary [optional] + + + +{{ glossary | default("[More Information Needed]", true)}} + +## More Information [optional] + +{{ more_information | default("[More Information Needed]", true)}} + +## Dataset Card Authors [optional] + +{{ dataset_card_authors | default("[More Information Needed]", true)}} + +## Dataset Card Contact + +{{ dataset_card_contact | default("[More Information Needed]", true)}} diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/templates/modelcard_template.md b/.venv/lib/python3.13/site-packages/huggingface_hub/templates/modelcard_template.md new file mode 100644 index 0000000000000000000000000000000000000000..79ca15e4547debac763b390ef8e4b715e6f6403f --- /dev/null +++ b/.venv/lib/python3.13/site-packages/huggingface_hub/templates/modelcard_template.md @@ -0,0 +1,200 @@ +--- +# For reference on model card metadata, see the spec: https://github.com/huggingface/hub-docs/blob/main/modelcard.md?plain=1 +# Doc / guide: https://huggingface.co/docs/hub/model-cards +{{ card_data }} +--- + +# Model Card for {{ model_id | default("Model ID", true) }} + + + +{{ model_summary | default("", true) }} + +## Model Details + +### Model Description + + + +{{ model_description | default("", true) }} + +- **Developed by:** {{ developers | default("[More Information Needed]", true)}} +- **Funded by [optional]:** {{ funded_by | default("[More Information Needed]", true)}} +- **Shared by [optional]:** {{ shared_by | default("[More Information Needed]", true)}} +- **Model type:** {{ model_type | default("[More Information Needed]", true)}} +- **Language(s) (NLP):** {{ language | default("[More Information Needed]", true)}} +- **License:** {{ license | default("[More Information Needed]", true)}} +- **Finetuned from model [optional]:** {{ base_model | default("[More Information Needed]", true)}} + +### Model Sources [optional] + + + +- **Repository:** {{ repo | default("[More Information Needed]", true)}} +- **Paper [optional]:** {{ paper | default("[More Information Needed]", true)}} +- **Demo [optional]:** {{ demo | default("[More Information Needed]", true)}} + +## Uses + + + +### Direct Use + + + +{{ direct_use | default("[More Information Needed]", true)}} + +### Downstream Use [optional] + + + +{{ downstream_use | default("[More Information Needed]", true)}} + +### Out-of-Scope Use + + + +{{ out_of_scope_use | default("[More Information Needed]", true)}} + +## Bias, Risks, and Limitations + + + +{{ bias_risks_limitations | default("[More Information Needed]", true)}} + +### Recommendations + + + +{{ bias_recommendations | default("Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.", true)}} + +## How to Get Started with the Model + +Use the code below to get started with the model. + +{{ get_started_code | default("[More Information Needed]", true)}} + +## Training Details + +### Training Data + + + +{{ training_data | default("[More Information Needed]", true)}} + +### Training Procedure + + + +#### Preprocessing [optional] + +{{ preprocessing | default("[More Information Needed]", true)}} + + +#### Training Hyperparameters + +- **Training regime:** {{ training_regime | default("[More Information Needed]", true)}} + +#### Speeds, Sizes, Times [optional] + + + +{{ speeds_sizes_times | default("[More Information Needed]", true)}} + +## Evaluation + + + +### Testing Data, Factors & Metrics + +#### Testing Data + + + +{{ testing_data | default("[More Information Needed]", true)}} + +#### Factors + + + +{{ testing_factors | default("[More Information Needed]", true)}} + +#### Metrics + + + +{{ testing_metrics | default("[More Information Needed]", true)}} + +### Results + +{{ results | default("[More Information Needed]", true)}} + +#### Summary + +{{ results_summary | default("", true) }} + +## Model Examination [optional] + + + +{{ model_examination | default("[More Information Needed]", true)}} + +## Environmental Impact + + + +Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). + +- **Hardware Type:** {{ hardware_type | default("[More Information Needed]", true)}} +- **Hours used:** {{ hours_used | default("[More Information Needed]", true)}} +- **Cloud Provider:** {{ cloud_provider | default("[More Information Needed]", true)}} +- **Compute Region:** {{ cloud_region | default("[More Information Needed]", true)}} +- **Carbon Emitted:** {{ co2_emitted | default("[More Information Needed]", true)}} + +## Technical Specifications [optional] + +### Model Architecture and Objective + +{{ model_specs | default("[More Information Needed]", true)}} + +### Compute Infrastructure + +{{ compute_infrastructure | default("[More Information Needed]", true)}} + +#### Hardware + +{{ hardware_requirements | default("[More Information Needed]", true)}} + +#### Software + +{{ software | default("[More Information Needed]", true)}} + +## Citation [optional] + + + +**BibTeX:** + +{{ citation_bibtex | default("[More Information Needed]", true)}} + +**APA:** + +{{ citation_apa | default("[More Information Needed]", true)}} + +## Glossary [optional] + + + +{{ glossary | default("[More Information Needed]", true)}} + +## More Information [optional] + +{{ more_information | default("[More Information Needed]", true)}} + +## Model Card Authors [optional] + +{{ model_card_authors | default("[More Information Needed]", true)}} + +## Model Card Contact + +{{ model_card_contact | default("[More Information Needed]", true)}} diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45f2c54bff4525ca9bb4c9661753523748594d72 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_auth.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_auth.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5b211721e567d7f99f508737b557979f775dc5d5 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_auth.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_cache_assets.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_cache_assets.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..91f9f80d1914232858b3454be30757abf2714c07 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_cache_assets.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_cache_manager.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_cache_manager.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c58517a1aa785a6709d68bc94fd2f3d3c0d7c974 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_cache_manager.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_chunk_utils.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_chunk_utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..63f3285659162fea848a9a6b33d4159d9f70a87f Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_chunk_utils.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_datetime.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_datetime.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bda7a396cf87a3de501c93e522fe4cd5620b355d Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_datetime.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_deprecation.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_deprecation.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b7055743e7c50b0592c7b05688fd39b58db8ad52 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_deprecation.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_experimental.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_experimental.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1574dfe8410cbaf09c8941fcd0c11108d29eaad4 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_experimental.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_git_credential.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_git_credential.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f57f1eb1892f529428a535eea885cb2a6ceba0ac Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_git_credential.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_headers.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_headers.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2d28b4c0dfccd62d426dfa049e9201ee50502c5 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_headers.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_http.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_http.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..064e9e8f728ed91fcd538d52021108ecdba0b4b7 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_http.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58d048c126b297b14309845fe0122c4dc19746fa Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_pagination.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9f634539d92f75269d8533a196c8a7946370ddb2 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_paths.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_runtime.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_runtime.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67ba39ef7c92fc94ed0b575e18d0c45022e696db Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_runtime.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_safetensors.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_safetensors.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..90db3f93da15ab0baeceda9c2fa8f3c983d4c8d5 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_safetensors.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e9d5589e72f707c2f3a5bcd171b8dbfce38c5ad Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_subprocess.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5384b820023318cf73f7efa3a9da89adaec18555 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_telemetry.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_xet.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_xet.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4409e67a994cbf3ae36e973d4b4b73f91b841ba4 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/_xet.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/endpoint_helpers.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/endpoint_helpers.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4f98914c0c6603a34751324269cbe1bc1e0460f9 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/endpoint_helpers.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac84a2008d234b14c0dbbc45348e0104a658dfa6 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/insecure_hashlib.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/logging.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/logging.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6d01852492057da352a9c214e9c945277c57c5ed Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/logging.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/sha.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/sha.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e296e57715763621d836219a54836abb1343524c Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/sha.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/tqdm.cpython-313.pyc b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/tqdm.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b076b081344d5bdaf1de08d19fa921add89a1214 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/huggingface_hub/utils/__pycache__/tqdm.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/packaging-25.0.dist-info/licenses/LICENSE b/.venv/lib/python3.13/site-packages/packaging-25.0.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..6f62d44e4ef733c0e713afcd2371fed7f2b3de67 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/packaging-25.0.dist-info/licenses/LICENSE @@ -0,0 +1,3 @@ +This software is made available under the terms of *either* of the licenses +found in LICENSE.APACHE or LICENSE.BSD. Contributions to this software is made +under the terms of *both* these licenses. diff --git a/.venv/lib/python3.13/site-packages/packaging-25.0.dist-info/licenses/LICENSE.APACHE b/.venv/lib/python3.13/site-packages/packaging-25.0.dist-info/licenses/LICENSE.APACHE new file mode 100644 index 0000000000000000000000000000000000000000..f433b1a53f5b830a205fd2df78e2b34974656c7b --- /dev/null +++ b/.venv/lib/python3.13/site-packages/packaging-25.0.dist-info/licenses/LICENSE.APACHE @@ -0,0 +1,177 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/.venv/lib/python3.13/site-packages/packaging-25.0.dist-info/licenses/LICENSE.BSD b/.venv/lib/python3.13/site-packages/packaging-25.0.dist-info/licenses/LICENSE.BSD new file mode 100644 index 0000000000000000000000000000000000000000..42ce7b75c92fb01a3f6ed17eea363f756b7da582 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/packaging-25.0.dist-info/licenses/LICENSE.BSD @@ -0,0 +1,23 @@ +Copyright (c) Donald Stufft and individual contributors. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + + 1. Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + 2. Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.13/site-packages/packaging/licenses/__init__.py b/.venv/lib/python3.13/site-packages/packaging/licenses/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6f7f9e6289dabe5bbc45cc8f01ac2449da890d02 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/packaging/licenses/__init__.py @@ -0,0 +1,145 @@ +####################################################################################### +# +# Adapted from: +# https://github.com/pypa/hatch/blob/5352e44/backend/src/hatchling/licenses/parse.py +# +# MIT License +# +# Copyright (c) 2017-present Ofek Lev +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of this +# software and associated documentation files (the "Software"), to deal in the Software +# without restriction, including without limitation the rights to use, copy, modify, +# merge, publish, distribute, sublicense, and/or sell copies of the Software, and to +# permit persons to whom the Software is furnished to do so, subject to the following +# conditions: +# +# The above copyright notice and this permission notice shall be included in all copies +# or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, +# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A +# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF +# CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE +# OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# +# With additional allowance of arbitrary `LicenseRef-` identifiers, not just +# `LicenseRef-Public-Domain` and `LicenseRef-Proprietary`. +# +####################################################################################### +from __future__ import annotations + +import re +from typing import NewType, cast + +from packaging.licenses._spdx import EXCEPTIONS, LICENSES + +__all__ = [ + "InvalidLicenseExpression", + "NormalizedLicenseExpression", + "canonicalize_license_expression", +] + +license_ref_allowed = re.compile("^[A-Za-z0-9.-]*$") + +NormalizedLicenseExpression = NewType("NormalizedLicenseExpression", str) + + +class InvalidLicenseExpression(ValueError): + """Raised when a license-expression string is invalid + + >>> canonicalize_license_expression("invalid") + Traceback (most recent call last): + ... + packaging.licenses.InvalidLicenseExpression: Invalid license expression: 'invalid' + """ + + +def canonicalize_license_expression( + raw_license_expression: str, +) -> NormalizedLicenseExpression: + if not raw_license_expression: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) + + # Pad any parentheses so tokenization can be achieved by merely splitting on + # whitespace. + license_expression = raw_license_expression.replace("(", " ( ").replace(")", " ) ") + licenseref_prefix = "LicenseRef-" + license_refs = { + ref.lower(): "LicenseRef-" + ref[len(licenseref_prefix) :] + for ref in license_expression.split() + if ref.lower().startswith(licenseref_prefix.lower()) + } + + # Normalize to lower case so we can look up licenses/exceptions + # and so boolean operators are Python-compatible. + license_expression = license_expression.lower() + + tokens = license_expression.split() + + # Rather than implementing boolean logic, we create an expression that Python can + # parse. Everything that is not involved with the grammar itself is treated as + # `False` and the expression should evaluate as such. + python_tokens = [] + for token in tokens: + if token not in {"or", "and", "with", "(", ")"}: + python_tokens.append("False") + elif token == "with": + python_tokens.append("or") + elif token == "(" and python_tokens and python_tokens[-1] not in {"or", "and"}: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) + else: + python_tokens.append(token) + + python_expression = " ".join(python_tokens) + try: + invalid = eval(python_expression, globals(), locals()) + except Exception: + invalid = True + + if invalid is not False: + message = f"Invalid license expression: {raw_license_expression!r}" + raise InvalidLicenseExpression(message) from None + + # Take a final pass to check for unknown licenses/exceptions. + normalized_tokens = [] + for token in tokens: + if token in {"or", "and", "with", "(", ")"}: + normalized_tokens.append(token.upper()) + continue + + if normalized_tokens and normalized_tokens[-1] == "WITH": + if token not in EXCEPTIONS: + message = f"Unknown license exception: {token!r}" + raise InvalidLicenseExpression(message) + + normalized_tokens.append(EXCEPTIONS[token]["id"]) + else: + if token.endswith("+"): + final_token = token[:-1] + suffix = "+" + else: + final_token = token + suffix = "" + + if final_token.startswith("licenseref-"): + if not license_ref_allowed.match(final_token): + message = f"Invalid licenseref: {final_token!r}" + raise InvalidLicenseExpression(message) + normalized_tokens.append(license_refs[final_token] + suffix) + else: + if final_token not in LICENSES: + message = f"Unknown license: {final_token!r}" + raise InvalidLicenseExpression(message) + normalized_tokens.append(LICENSES[final_token]["id"] + suffix) + + normalized_expression = " ".join(normalized_tokens) + + return cast( + NormalizedLicenseExpression, + normalized_expression.replace("( ", "(").replace(" )", ")"), + ) diff --git a/.venv/lib/python3.13/site-packages/packaging/licenses/_spdx.py b/.venv/lib/python3.13/site-packages/packaging/licenses/_spdx.py new file mode 100644 index 0000000000000000000000000000000000000000..eac22276a34ccd73fc9d70c67ca318a49eb11e77 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/packaging/licenses/_spdx.py @@ -0,0 +1,759 @@ + +from __future__ import annotations + +from typing import TypedDict + +class SPDXLicense(TypedDict): + id: str + deprecated: bool + +class SPDXException(TypedDict): + id: str + deprecated: bool + + +VERSION = '3.25.0' + +LICENSES: dict[str, SPDXLicense] = { + '0bsd': {'id': '0BSD', 'deprecated': False}, + '3d-slicer-1.0': {'id': '3D-Slicer-1.0', 'deprecated': False}, + 'aal': {'id': 'AAL', 'deprecated': False}, + 'abstyles': {'id': 'Abstyles', 'deprecated': False}, + 'adacore-doc': {'id': 'AdaCore-doc', 'deprecated': False}, + 'adobe-2006': {'id': 'Adobe-2006', 'deprecated': False}, + 'adobe-display-postscript': {'id': 'Adobe-Display-PostScript', 'deprecated': False}, + 'adobe-glyph': {'id': 'Adobe-Glyph', 'deprecated': False}, + 'adobe-utopia': {'id': 'Adobe-Utopia', 'deprecated': False}, + 'adsl': {'id': 'ADSL', 'deprecated': False}, + 'afl-1.1': {'id': 'AFL-1.1', 'deprecated': False}, + 'afl-1.2': {'id': 'AFL-1.2', 'deprecated': False}, + 'afl-2.0': {'id': 'AFL-2.0', 'deprecated': False}, + 'afl-2.1': {'id': 'AFL-2.1', 'deprecated': False}, + 'afl-3.0': {'id': 'AFL-3.0', 'deprecated': False}, + 'afmparse': {'id': 'Afmparse', 'deprecated': False}, + 'agpl-1.0': {'id': 'AGPL-1.0', 'deprecated': True}, + 'agpl-1.0-only': {'id': 'AGPL-1.0-only', 'deprecated': False}, + 'agpl-1.0-or-later': {'id': 'AGPL-1.0-or-later', 'deprecated': False}, + 'agpl-3.0': {'id': 'AGPL-3.0', 'deprecated': True}, + 'agpl-3.0-only': {'id': 'AGPL-3.0-only', 'deprecated': False}, + 'agpl-3.0-or-later': {'id': 'AGPL-3.0-or-later', 'deprecated': False}, + 'aladdin': {'id': 'Aladdin', 'deprecated': False}, + 'amd-newlib': {'id': 'AMD-newlib', 'deprecated': False}, + 'amdplpa': {'id': 'AMDPLPA', 'deprecated': False}, + 'aml': {'id': 'AML', 'deprecated': False}, + 'aml-glslang': {'id': 'AML-glslang', 'deprecated': False}, + 'ampas': {'id': 'AMPAS', 'deprecated': False}, + 'antlr-pd': {'id': 'ANTLR-PD', 'deprecated': False}, + 'antlr-pd-fallback': {'id': 'ANTLR-PD-fallback', 'deprecated': False}, + 'any-osi': {'id': 'any-OSI', 'deprecated': False}, + 'apache-1.0': {'id': 'Apache-1.0', 'deprecated': False}, + 'apache-1.1': {'id': 'Apache-1.1', 'deprecated': False}, + 'apache-2.0': {'id': 'Apache-2.0', 'deprecated': False}, + 'apafml': {'id': 'APAFML', 'deprecated': False}, + 'apl-1.0': {'id': 'APL-1.0', 'deprecated': False}, + 'app-s2p': {'id': 'App-s2p', 'deprecated': False}, + 'apsl-1.0': {'id': 'APSL-1.0', 'deprecated': False}, + 'apsl-1.1': {'id': 'APSL-1.1', 'deprecated': False}, + 'apsl-1.2': {'id': 'APSL-1.2', 'deprecated': False}, + 'apsl-2.0': {'id': 'APSL-2.0', 'deprecated': False}, + 'arphic-1999': {'id': 'Arphic-1999', 'deprecated': False}, + 'artistic-1.0': {'id': 'Artistic-1.0', 'deprecated': False}, + 'artistic-1.0-cl8': {'id': 'Artistic-1.0-cl8', 'deprecated': False}, + 'artistic-1.0-perl': {'id': 'Artistic-1.0-Perl', 'deprecated': False}, + 'artistic-2.0': {'id': 'Artistic-2.0', 'deprecated': False}, + 'aswf-digital-assets-1.0': {'id': 'ASWF-Digital-Assets-1.0', 'deprecated': False}, + 'aswf-digital-assets-1.1': {'id': 'ASWF-Digital-Assets-1.1', 'deprecated': False}, + 'baekmuk': {'id': 'Baekmuk', 'deprecated': False}, + 'bahyph': {'id': 'Bahyph', 'deprecated': False}, + 'barr': {'id': 'Barr', 'deprecated': False}, + 'bcrypt-solar-designer': {'id': 'bcrypt-Solar-Designer', 'deprecated': False}, + 'beerware': {'id': 'Beerware', 'deprecated': False}, + 'bitstream-charter': {'id': 'Bitstream-Charter', 'deprecated': False}, + 'bitstream-vera': {'id': 'Bitstream-Vera', 'deprecated': False}, + 'bittorrent-1.0': {'id': 'BitTorrent-1.0', 'deprecated': False}, + 'bittorrent-1.1': {'id': 'BitTorrent-1.1', 'deprecated': False}, + 'blessing': {'id': 'blessing', 'deprecated': False}, + 'blueoak-1.0.0': {'id': 'BlueOak-1.0.0', 'deprecated': False}, + 'boehm-gc': {'id': 'Boehm-GC', 'deprecated': False}, + 'borceux': {'id': 'Borceux', 'deprecated': False}, + 'brian-gladman-2-clause': {'id': 'Brian-Gladman-2-Clause', 'deprecated': False}, + 'brian-gladman-3-clause': {'id': 'Brian-Gladman-3-Clause', 'deprecated': False}, + 'bsd-1-clause': {'id': 'BSD-1-Clause', 'deprecated': False}, + 'bsd-2-clause': {'id': 'BSD-2-Clause', 'deprecated': False}, + 'bsd-2-clause-darwin': {'id': 'BSD-2-Clause-Darwin', 'deprecated': False}, + 'bsd-2-clause-first-lines': {'id': 'BSD-2-Clause-first-lines', 'deprecated': False}, + 'bsd-2-clause-freebsd': {'id': 'BSD-2-Clause-FreeBSD', 'deprecated': True}, + 'bsd-2-clause-netbsd': {'id': 'BSD-2-Clause-NetBSD', 'deprecated': True}, + 'bsd-2-clause-patent': {'id': 'BSD-2-Clause-Patent', 'deprecated': False}, + 'bsd-2-clause-views': {'id': 'BSD-2-Clause-Views', 'deprecated': False}, + 'bsd-3-clause': {'id': 'BSD-3-Clause', 'deprecated': False}, + 'bsd-3-clause-acpica': {'id': 'BSD-3-Clause-acpica', 'deprecated': False}, + 'bsd-3-clause-attribution': {'id': 'BSD-3-Clause-Attribution', 'deprecated': False}, + 'bsd-3-clause-clear': {'id': 'BSD-3-Clause-Clear', 'deprecated': False}, + 'bsd-3-clause-flex': {'id': 'BSD-3-Clause-flex', 'deprecated': False}, + 'bsd-3-clause-hp': {'id': 'BSD-3-Clause-HP', 'deprecated': False}, + 'bsd-3-clause-lbnl': {'id': 'BSD-3-Clause-LBNL', 'deprecated': False}, + 'bsd-3-clause-modification': {'id': 'BSD-3-Clause-Modification', 'deprecated': False}, + 'bsd-3-clause-no-military-license': {'id': 'BSD-3-Clause-No-Military-License', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-license': {'id': 'BSD-3-Clause-No-Nuclear-License', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-license-2014': {'id': 'BSD-3-Clause-No-Nuclear-License-2014', 'deprecated': False}, + 'bsd-3-clause-no-nuclear-warranty': {'id': 'BSD-3-Clause-No-Nuclear-Warranty', 'deprecated': False}, + 'bsd-3-clause-open-mpi': {'id': 'BSD-3-Clause-Open-MPI', 'deprecated': False}, + 'bsd-3-clause-sun': {'id': 'BSD-3-Clause-Sun', 'deprecated': False}, + 'bsd-4-clause': {'id': 'BSD-4-Clause', 'deprecated': False}, + 'bsd-4-clause-shortened': {'id': 'BSD-4-Clause-Shortened', 'deprecated': False}, + 'bsd-4-clause-uc': {'id': 'BSD-4-Clause-UC', 'deprecated': False}, + 'bsd-4.3reno': {'id': 'BSD-4.3RENO', 'deprecated': False}, + 'bsd-4.3tahoe': {'id': 'BSD-4.3TAHOE', 'deprecated': False}, + 'bsd-advertising-acknowledgement': {'id': 'BSD-Advertising-Acknowledgement', 'deprecated': False}, + 'bsd-attribution-hpnd-disclaimer': {'id': 'BSD-Attribution-HPND-disclaimer', 'deprecated': False}, + 'bsd-inferno-nettverk': {'id': 'BSD-Inferno-Nettverk', 'deprecated': False}, + 'bsd-protection': {'id': 'BSD-Protection', 'deprecated': False}, + 'bsd-source-beginning-file': {'id': 'BSD-Source-beginning-file', 'deprecated': False}, + 'bsd-source-code': {'id': 'BSD-Source-Code', 'deprecated': False}, + 'bsd-systemics': {'id': 'BSD-Systemics', 'deprecated': False}, + 'bsd-systemics-w3works': {'id': 'BSD-Systemics-W3Works', 'deprecated': False}, + 'bsl-1.0': {'id': 'BSL-1.0', 'deprecated': False}, + 'busl-1.1': {'id': 'BUSL-1.1', 'deprecated': False}, + 'bzip2-1.0.5': {'id': 'bzip2-1.0.5', 'deprecated': True}, + 'bzip2-1.0.6': {'id': 'bzip2-1.0.6', 'deprecated': False}, + 'c-uda-1.0': {'id': 'C-UDA-1.0', 'deprecated': False}, + 'cal-1.0': {'id': 'CAL-1.0', 'deprecated': False}, + 'cal-1.0-combined-work-exception': {'id': 'CAL-1.0-Combined-Work-Exception', 'deprecated': False}, + 'caldera': {'id': 'Caldera', 'deprecated': False}, + 'caldera-no-preamble': {'id': 'Caldera-no-preamble', 'deprecated': False}, + 'catharon': {'id': 'Catharon', 'deprecated': False}, + 'catosl-1.1': {'id': 'CATOSL-1.1', 'deprecated': False}, + 'cc-by-1.0': {'id': 'CC-BY-1.0', 'deprecated': False}, + 'cc-by-2.0': {'id': 'CC-BY-2.0', 'deprecated': False}, + 'cc-by-2.5': {'id': 'CC-BY-2.5', 'deprecated': False}, + 'cc-by-2.5-au': {'id': 'CC-BY-2.5-AU', 'deprecated': False}, + 'cc-by-3.0': {'id': 'CC-BY-3.0', 'deprecated': False}, + 'cc-by-3.0-at': {'id': 'CC-BY-3.0-AT', 'deprecated': False}, + 'cc-by-3.0-au': {'id': 'CC-BY-3.0-AU', 'deprecated': False}, + 'cc-by-3.0-de': {'id': 'CC-BY-3.0-DE', 'deprecated': False}, + 'cc-by-3.0-igo': {'id': 'CC-BY-3.0-IGO', 'deprecated': False}, + 'cc-by-3.0-nl': {'id': 'CC-BY-3.0-NL', 'deprecated': False}, + 'cc-by-3.0-us': {'id': 'CC-BY-3.0-US', 'deprecated': False}, + 'cc-by-4.0': {'id': 'CC-BY-4.0', 'deprecated': False}, + 'cc-by-nc-1.0': {'id': 'CC-BY-NC-1.0', 'deprecated': False}, + 'cc-by-nc-2.0': {'id': 'CC-BY-NC-2.0', 'deprecated': False}, + 'cc-by-nc-2.5': {'id': 'CC-BY-NC-2.5', 'deprecated': False}, + 'cc-by-nc-3.0': {'id': 'CC-BY-NC-3.0', 'deprecated': False}, + 'cc-by-nc-3.0-de': {'id': 'CC-BY-NC-3.0-DE', 'deprecated': False}, + 'cc-by-nc-4.0': {'id': 'CC-BY-NC-4.0', 'deprecated': False}, + 'cc-by-nc-nd-1.0': {'id': 'CC-BY-NC-ND-1.0', 'deprecated': False}, + 'cc-by-nc-nd-2.0': {'id': 'CC-BY-NC-ND-2.0', 'deprecated': False}, + 'cc-by-nc-nd-2.5': {'id': 'CC-BY-NC-ND-2.5', 'deprecated': False}, + 'cc-by-nc-nd-3.0': {'id': 'CC-BY-NC-ND-3.0', 'deprecated': False}, + 'cc-by-nc-nd-3.0-de': {'id': 'CC-BY-NC-ND-3.0-DE', 'deprecated': False}, + 'cc-by-nc-nd-3.0-igo': {'id': 'CC-BY-NC-ND-3.0-IGO', 'deprecated': False}, + 'cc-by-nc-nd-4.0': {'id': 'CC-BY-NC-ND-4.0', 'deprecated': False}, + 'cc-by-nc-sa-1.0': {'id': 'CC-BY-NC-SA-1.0', 'deprecated': False}, + 'cc-by-nc-sa-2.0': {'id': 'CC-BY-NC-SA-2.0', 'deprecated': False}, + 'cc-by-nc-sa-2.0-de': {'id': 'CC-BY-NC-SA-2.0-DE', 'deprecated': False}, + 'cc-by-nc-sa-2.0-fr': {'id': 'CC-BY-NC-SA-2.0-FR', 'deprecated': False}, + 'cc-by-nc-sa-2.0-uk': {'id': 'CC-BY-NC-SA-2.0-UK', 'deprecated': False}, + 'cc-by-nc-sa-2.5': {'id': 'CC-BY-NC-SA-2.5', 'deprecated': False}, + 'cc-by-nc-sa-3.0': {'id': 'CC-BY-NC-SA-3.0', 'deprecated': False}, + 'cc-by-nc-sa-3.0-de': {'id': 'CC-BY-NC-SA-3.0-DE', 'deprecated': False}, + 'cc-by-nc-sa-3.0-igo': {'id': 'CC-BY-NC-SA-3.0-IGO', 'deprecated': False}, + 'cc-by-nc-sa-4.0': {'id': 'CC-BY-NC-SA-4.0', 'deprecated': False}, + 'cc-by-nd-1.0': {'id': 'CC-BY-ND-1.0', 'deprecated': False}, + 'cc-by-nd-2.0': {'id': 'CC-BY-ND-2.0', 'deprecated': False}, + 'cc-by-nd-2.5': {'id': 'CC-BY-ND-2.5', 'deprecated': False}, + 'cc-by-nd-3.0': {'id': 'CC-BY-ND-3.0', 'deprecated': False}, + 'cc-by-nd-3.0-de': {'id': 'CC-BY-ND-3.0-DE', 'deprecated': False}, + 'cc-by-nd-4.0': {'id': 'CC-BY-ND-4.0', 'deprecated': False}, + 'cc-by-sa-1.0': {'id': 'CC-BY-SA-1.0', 'deprecated': False}, + 'cc-by-sa-2.0': {'id': 'CC-BY-SA-2.0', 'deprecated': False}, + 'cc-by-sa-2.0-uk': {'id': 'CC-BY-SA-2.0-UK', 'deprecated': False}, + 'cc-by-sa-2.1-jp': {'id': 'CC-BY-SA-2.1-JP', 'deprecated': False}, + 'cc-by-sa-2.5': {'id': 'CC-BY-SA-2.5', 'deprecated': False}, + 'cc-by-sa-3.0': {'id': 'CC-BY-SA-3.0', 'deprecated': False}, + 'cc-by-sa-3.0-at': {'id': 'CC-BY-SA-3.0-AT', 'deprecated': False}, + 'cc-by-sa-3.0-de': {'id': 'CC-BY-SA-3.0-DE', 'deprecated': False}, + 'cc-by-sa-3.0-igo': {'id': 'CC-BY-SA-3.0-IGO', 'deprecated': False}, + 'cc-by-sa-4.0': {'id': 'CC-BY-SA-4.0', 'deprecated': False}, + 'cc-pddc': {'id': 'CC-PDDC', 'deprecated': False}, + 'cc0-1.0': {'id': 'CC0-1.0', 'deprecated': False}, + 'cddl-1.0': {'id': 'CDDL-1.0', 'deprecated': False}, + 'cddl-1.1': {'id': 'CDDL-1.1', 'deprecated': False}, + 'cdl-1.0': {'id': 'CDL-1.0', 'deprecated': False}, + 'cdla-permissive-1.0': {'id': 'CDLA-Permissive-1.0', 'deprecated': False}, + 'cdla-permissive-2.0': {'id': 'CDLA-Permissive-2.0', 'deprecated': False}, + 'cdla-sharing-1.0': {'id': 'CDLA-Sharing-1.0', 'deprecated': False}, + 'cecill-1.0': {'id': 'CECILL-1.0', 'deprecated': False}, + 'cecill-1.1': {'id': 'CECILL-1.1', 'deprecated': False}, + 'cecill-2.0': {'id': 'CECILL-2.0', 'deprecated': False}, + 'cecill-2.1': {'id': 'CECILL-2.1', 'deprecated': False}, + 'cecill-b': {'id': 'CECILL-B', 'deprecated': False}, + 'cecill-c': {'id': 'CECILL-C', 'deprecated': False}, + 'cern-ohl-1.1': {'id': 'CERN-OHL-1.1', 'deprecated': False}, + 'cern-ohl-1.2': {'id': 'CERN-OHL-1.2', 'deprecated': False}, + 'cern-ohl-p-2.0': {'id': 'CERN-OHL-P-2.0', 'deprecated': False}, + 'cern-ohl-s-2.0': {'id': 'CERN-OHL-S-2.0', 'deprecated': False}, + 'cern-ohl-w-2.0': {'id': 'CERN-OHL-W-2.0', 'deprecated': False}, + 'cfitsio': {'id': 'CFITSIO', 'deprecated': False}, + 'check-cvs': {'id': 'check-cvs', 'deprecated': False}, + 'checkmk': {'id': 'checkmk', 'deprecated': False}, + 'clartistic': {'id': 'ClArtistic', 'deprecated': False}, + 'clips': {'id': 'Clips', 'deprecated': False}, + 'cmu-mach': {'id': 'CMU-Mach', 'deprecated': False}, + 'cmu-mach-nodoc': {'id': 'CMU-Mach-nodoc', 'deprecated': False}, + 'cnri-jython': {'id': 'CNRI-Jython', 'deprecated': False}, + 'cnri-python': {'id': 'CNRI-Python', 'deprecated': False}, + 'cnri-python-gpl-compatible': {'id': 'CNRI-Python-GPL-Compatible', 'deprecated': False}, + 'coil-1.0': {'id': 'COIL-1.0', 'deprecated': False}, + 'community-spec-1.0': {'id': 'Community-Spec-1.0', 'deprecated': False}, + 'condor-1.1': {'id': 'Condor-1.1', 'deprecated': False}, + 'copyleft-next-0.3.0': {'id': 'copyleft-next-0.3.0', 'deprecated': False}, + 'copyleft-next-0.3.1': {'id': 'copyleft-next-0.3.1', 'deprecated': False}, + 'cornell-lossless-jpeg': {'id': 'Cornell-Lossless-JPEG', 'deprecated': False}, + 'cpal-1.0': {'id': 'CPAL-1.0', 'deprecated': False}, + 'cpl-1.0': {'id': 'CPL-1.0', 'deprecated': False}, + 'cpol-1.02': {'id': 'CPOL-1.02', 'deprecated': False}, + 'cronyx': {'id': 'Cronyx', 'deprecated': False}, + 'crossword': {'id': 'Crossword', 'deprecated': False}, + 'crystalstacker': {'id': 'CrystalStacker', 'deprecated': False}, + 'cua-opl-1.0': {'id': 'CUA-OPL-1.0', 'deprecated': False}, + 'cube': {'id': 'Cube', 'deprecated': False}, + 'curl': {'id': 'curl', 'deprecated': False}, + 'cve-tou': {'id': 'cve-tou', 'deprecated': False}, + 'd-fsl-1.0': {'id': 'D-FSL-1.0', 'deprecated': False}, + 'dec-3-clause': {'id': 'DEC-3-Clause', 'deprecated': False}, + 'diffmark': {'id': 'diffmark', 'deprecated': False}, + 'dl-de-by-2.0': {'id': 'DL-DE-BY-2.0', 'deprecated': False}, + 'dl-de-zero-2.0': {'id': 'DL-DE-ZERO-2.0', 'deprecated': False}, + 'doc': {'id': 'DOC', 'deprecated': False}, + 'docbook-schema': {'id': 'DocBook-Schema', 'deprecated': False}, + 'docbook-xml': {'id': 'DocBook-XML', 'deprecated': False}, + 'dotseqn': {'id': 'Dotseqn', 'deprecated': False}, + 'drl-1.0': {'id': 'DRL-1.0', 'deprecated': False}, + 'drl-1.1': {'id': 'DRL-1.1', 'deprecated': False}, + 'dsdp': {'id': 'DSDP', 'deprecated': False}, + 'dtoa': {'id': 'dtoa', 'deprecated': False}, + 'dvipdfm': {'id': 'dvipdfm', 'deprecated': False}, + 'ecl-1.0': {'id': 'ECL-1.0', 'deprecated': False}, + 'ecl-2.0': {'id': 'ECL-2.0', 'deprecated': False}, + 'ecos-2.0': {'id': 'eCos-2.0', 'deprecated': True}, + 'efl-1.0': {'id': 'EFL-1.0', 'deprecated': False}, + 'efl-2.0': {'id': 'EFL-2.0', 'deprecated': False}, + 'egenix': {'id': 'eGenix', 'deprecated': False}, + 'elastic-2.0': {'id': 'Elastic-2.0', 'deprecated': False}, + 'entessa': {'id': 'Entessa', 'deprecated': False}, + 'epics': {'id': 'EPICS', 'deprecated': False}, + 'epl-1.0': {'id': 'EPL-1.0', 'deprecated': False}, + 'epl-2.0': {'id': 'EPL-2.0', 'deprecated': False}, + 'erlpl-1.1': {'id': 'ErlPL-1.1', 'deprecated': False}, + 'etalab-2.0': {'id': 'etalab-2.0', 'deprecated': False}, + 'eudatagrid': {'id': 'EUDatagrid', 'deprecated': False}, + 'eupl-1.0': {'id': 'EUPL-1.0', 'deprecated': False}, + 'eupl-1.1': {'id': 'EUPL-1.1', 'deprecated': False}, + 'eupl-1.2': {'id': 'EUPL-1.2', 'deprecated': False}, + 'eurosym': {'id': 'Eurosym', 'deprecated': False}, + 'fair': {'id': 'Fair', 'deprecated': False}, + 'fbm': {'id': 'FBM', 'deprecated': False}, + 'fdk-aac': {'id': 'FDK-AAC', 'deprecated': False}, + 'ferguson-twofish': {'id': 'Ferguson-Twofish', 'deprecated': False}, + 'frameworx-1.0': {'id': 'Frameworx-1.0', 'deprecated': False}, + 'freebsd-doc': {'id': 'FreeBSD-DOC', 'deprecated': False}, + 'freeimage': {'id': 'FreeImage', 'deprecated': False}, + 'fsfap': {'id': 'FSFAP', 'deprecated': False}, + 'fsfap-no-warranty-disclaimer': {'id': 'FSFAP-no-warranty-disclaimer', 'deprecated': False}, + 'fsful': {'id': 'FSFUL', 'deprecated': False}, + 'fsfullr': {'id': 'FSFULLR', 'deprecated': False}, + 'fsfullrwd': {'id': 'FSFULLRWD', 'deprecated': False}, + 'ftl': {'id': 'FTL', 'deprecated': False}, + 'furuseth': {'id': 'Furuseth', 'deprecated': False}, + 'fwlw': {'id': 'fwlw', 'deprecated': False}, + 'gcr-docs': {'id': 'GCR-docs', 'deprecated': False}, + 'gd': {'id': 'GD', 'deprecated': False}, + 'gfdl-1.1': {'id': 'GFDL-1.1', 'deprecated': True}, + 'gfdl-1.1-invariants-only': {'id': 'GFDL-1.1-invariants-only', 'deprecated': False}, + 'gfdl-1.1-invariants-or-later': {'id': 'GFDL-1.1-invariants-or-later', 'deprecated': False}, + 'gfdl-1.1-no-invariants-only': {'id': 'GFDL-1.1-no-invariants-only', 'deprecated': False}, + 'gfdl-1.1-no-invariants-or-later': {'id': 'GFDL-1.1-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.1-only': {'id': 'GFDL-1.1-only', 'deprecated': False}, + 'gfdl-1.1-or-later': {'id': 'GFDL-1.1-or-later', 'deprecated': False}, + 'gfdl-1.2': {'id': 'GFDL-1.2', 'deprecated': True}, + 'gfdl-1.2-invariants-only': {'id': 'GFDL-1.2-invariants-only', 'deprecated': False}, + 'gfdl-1.2-invariants-or-later': {'id': 'GFDL-1.2-invariants-or-later', 'deprecated': False}, + 'gfdl-1.2-no-invariants-only': {'id': 'GFDL-1.2-no-invariants-only', 'deprecated': False}, + 'gfdl-1.2-no-invariants-or-later': {'id': 'GFDL-1.2-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.2-only': {'id': 'GFDL-1.2-only', 'deprecated': False}, + 'gfdl-1.2-or-later': {'id': 'GFDL-1.2-or-later', 'deprecated': False}, + 'gfdl-1.3': {'id': 'GFDL-1.3', 'deprecated': True}, + 'gfdl-1.3-invariants-only': {'id': 'GFDL-1.3-invariants-only', 'deprecated': False}, + 'gfdl-1.3-invariants-or-later': {'id': 'GFDL-1.3-invariants-or-later', 'deprecated': False}, + 'gfdl-1.3-no-invariants-only': {'id': 'GFDL-1.3-no-invariants-only', 'deprecated': False}, + 'gfdl-1.3-no-invariants-or-later': {'id': 'GFDL-1.3-no-invariants-or-later', 'deprecated': False}, + 'gfdl-1.3-only': {'id': 'GFDL-1.3-only', 'deprecated': False}, + 'gfdl-1.3-or-later': {'id': 'GFDL-1.3-or-later', 'deprecated': False}, + 'giftware': {'id': 'Giftware', 'deprecated': False}, + 'gl2ps': {'id': 'GL2PS', 'deprecated': False}, + 'glide': {'id': 'Glide', 'deprecated': False}, + 'glulxe': {'id': 'Glulxe', 'deprecated': False}, + 'glwtpl': {'id': 'GLWTPL', 'deprecated': False}, + 'gnuplot': {'id': 'gnuplot', 'deprecated': False}, + 'gpl-1.0': {'id': 'GPL-1.0', 'deprecated': True}, + 'gpl-1.0+': {'id': 'GPL-1.0+', 'deprecated': True}, + 'gpl-1.0-only': {'id': 'GPL-1.0-only', 'deprecated': False}, + 'gpl-1.0-or-later': {'id': 'GPL-1.0-or-later', 'deprecated': False}, + 'gpl-2.0': {'id': 'GPL-2.0', 'deprecated': True}, + 'gpl-2.0+': {'id': 'GPL-2.0+', 'deprecated': True}, + 'gpl-2.0-only': {'id': 'GPL-2.0-only', 'deprecated': False}, + 'gpl-2.0-or-later': {'id': 'GPL-2.0-or-later', 'deprecated': False}, + 'gpl-2.0-with-autoconf-exception': {'id': 'GPL-2.0-with-autoconf-exception', 'deprecated': True}, + 'gpl-2.0-with-bison-exception': {'id': 'GPL-2.0-with-bison-exception', 'deprecated': True}, + 'gpl-2.0-with-classpath-exception': {'id': 'GPL-2.0-with-classpath-exception', 'deprecated': True}, + 'gpl-2.0-with-font-exception': {'id': 'GPL-2.0-with-font-exception', 'deprecated': True}, + 'gpl-2.0-with-gcc-exception': {'id': 'GPL-2.0-with-GCC-exception', 'deprecated': True}, + 'gpl-3.0': {'id': 'GPL-3.0', 'deprecated': True}, + 'gpl-3.0+': {'id': 'GPL-3.0+', 'deprecated': True}, + 'gpl-3.0-only': {'id': 'GPL-3.0-only', 'deprecated': False}, + 'gpl-3.0-or-later': {'id': 'GPL-3.0-or-later', 'deprecated': False}, + 'gpl-3.0-with-autoconf-exception': {'id': 'GPL-3.0-with-autoconf-exception', 'deprecated': True}, + 'gpl-3.0-with-gcc-exception': {'id': 'GPL-3.0-with-GCC-exception', 'deprecated': True}, + 'graphics-gems': {'id': 'Graphics-Gems', 'deprecated': False}, + 'gsoap-1.3b': {'id': 'gSOAP-1.3b', 'deprecated': False}, + 'gtkbook': {'id': 'gtkbook', 'deprecated': False}, + 'gutmann': {'id': 'Gutmann', 'deprecated': False}, + 'haskellreport': {'id': 'HaskellReport', 'deprecated': False}, + 'hdparm': {'id': 'hdparm', 'deprecated': False}, + 'hidapi': {'id': 'HIDAPI', 'deprecated': False}, + 'hippocratic-2.1': {'id': 'Hippocratic-2.1', 'deprecated': False}, + 'hp-1986': {'id': 'HP-1986', 'deprecated': False}, + 'hp-1989': {'id': 'HP-1989', 'deprecated': False}, + 'hpnd': {'id': 'HPND', 'deprecated': False}, + 'hpnd-dec': {'id': 'HPND-DEC', 'deprecated': False}, + 'hpnd-doc': {'id': 'HPND-doc', 'deprecated': False}, + 'hpnd-doc-sell': {'id': 'HPND-doc-sell', 'deprecated': False}, + 'hpnd-export-us': {'id': 'HPND-export-US', 'deprecated': False}, + 'hpnd-export-us-acknowledgement': {'id': 'HPND-export-US-acknowledgement', 'deprecated': False}, + 'hpnd-export-us-modify': {'id': 'HPND-export-US-modify', 'deprecated': False}, + 'hpnd-export2-us': {'id': 'HPND-export2-US', 'deprecated': False}, + 'hpnd-fenneberg-livingston': {'id': 'HPND-Fenneberg-Livingston', 'deprecated': False}, + 'hpnd-inria-imag': {'id': 'HPND-INRIA-IMAG', 'deprecated': False}, + 'hpnd-intel': {'id': 'HPND-Intel', 'deprecated': False}, + 'hpnd-kevlin-henney': {'id': 'HPND-Kevlin-Henney', 'deprecated': False}, + 'hpnd-markus-kuhn': {'id': 'HPND-Markus-Kuhn', 'deprecated': False}, + 'hpnd-merchantability-variant': {'id': 'HPND-merchantability-variant', 'deprecated': False}, + 'hpnd-mit-disclaimer': {'id': 'HPND-MIT-disclaimer', 'deprecated': False}, + 'hpnd-netrek': {'id': 'HPND-Netrek', 'deprecated': False}, + 'hpnd-pbmplus': {'id': 'HPND-Pbmplus', 'deprecated': False}, + 'hpnd-sell-mit-disclaimer-xserver': {'id': 'HPND-sell-MIT-disclaimer-xserver', 'deprecated': False}, + 'hpnd-sell-regexpr': {'id': 'HPND-sell-regexpr', 'deprecated': False}, + 'hpnd-sell-variant': {'id': 'HPND-sell-variant', 'deprecated': False}, + 'hpnd-sell-variant-mit-disclaimer': {'id': 'HPND-sell-variant-MIT-disclaimer', 'deprecated': False}, + 'hpnd-sell-variant-mit-disclaimer-rev': {'id': 'HPND-sell-variant-MIT-disclaimer-rev', 'deprecated': False}, + 'hpnd-uc': {'id': 'HPND-UC', 'deprecated': False}, + 'hpnd-uc-export-us': {'id': 'HPND-UC-export-US', 'deprecated': False}, + 'htmltidy': {'id': 'HTMLTIDY', 'deprecated': False}, + 'ibm-pibs': {'id': 'IBM-pibs', 'deprecated': False}, + 'icu': {'id': 'ICU', 'deprecated': False}, + 'iec-code-components-eula': {'id': 'IEC-Code-Components-EULA', 'deprecated': False}, + 'ijg': {'id': 'IJG', 'deprecated': False}, + 'ijg-short': {'id': 'IJG-short', 'deprecated': False}, + 'imagemagick': {'id': 'ImageMagick', 'deprecated': False}, + 'imatix': {'id': 'iMatix', 'deprecated': False}, + 'imlib2': {'id': 'Imlib2', 'deprecated': False}, + 'info-zip': {'id': 'Info-ZIP', 'deprecated': False}, + 'inner-net-2.0': {'id': 'Inner-Net-2.0', 'deprecated': False}, + 'intel': {'id': 'Intel', 'deprecated': False}, + 'intel-acpi': {'id': 'Intel-ACPI', 'deprecated': False}, + 'interbase-1.0': {'id': 'Interbase-1.0', 'deprecated': False}, + 'ipa': {'id': 'IPA', 'deprecated': False}, + 'ipl-1.0': {'id': 'IPL-1.0', 'deprecated': False}, + 'isc': {'id': 'ISC', 'deprecated': False}, + 'isc-veillard': {'id': 'ISC-Veillard', 'deprecated': False}, + 'jam': {'id': 'Jam', 'deprecated': False}, + 'jasper-2.0': {'id': 'JasPer-2.0', 'deprecated': False}, + 'jpl-image': {'id': 'JPL-image', 'deprecated': False}, + 'jpnic': {'id': 'JPNIC', 'deprecated': False}, + 'json': {'id': 'JSON', 'deprecated': False}, + 'kastrup': {'id': 'Kastrup', 'deprecated': False}, + 'kazlib': {'id': 'Kazlib', 'deprecated': False}, + 'knuth-ctan': {'id': 'Knuth-CTAN', 'deprecated': False}, + 'lal-1.2': {'id': 'LAL-1.2', 'deprecated': False}, + 'lal-1.3': {'id': 'LAL-1.3', 'deprecated': False}, + 'latex2e': {'id': 'Latex2e', 'deprecated': False}, + 'latex2e-translated-notice': {'id': 'Latex2e-translated-notice', 'deprecated': False}, + 'leptonica': {'id': 'Leptonica', 'deprecated': False}, + 'lgpl-2.0': {'id': 'LGPL-2.0', 'deprecated': True}, + 'lgpl-2.0+': {'id': 'LGPL-2.0+', 'deprecated': True}, + 'lgpl-2.0-only': {'id': 'LGPL-2.0-only', 'deprecated': False}, + 'lgpl-2.0-or-later': {'id': 'LGPL-2.0-or-later', 'deprecated': False}, + 'lgpl-2.1': {'id': 'LGPL-2.1', 'deprecated': True}, + 'lgpl-2.1+': {'id': 'LGPL-2.1+', 'deprecated': True}, + 'lgpl-2.1-only': {'id': 'LGPL-2.1-only', 'deprecated': False}, + 'lgpl-2.1-or-later': {'id': 'LGPL-2.1-or-later', 'deprecated': False}, + 'lgpl-3.0': {'id': 'LGPL-3.0', 'deprecated': True}, + 'lgpl-3.0+': {'id': 'LGPL-3.0+', 'deprecated': True}, + 'lgpl-3.0-only': {'id': 'LGPL-3.0-only', 'deprecated': False}, + 'lgpl-3.0-or-later': {'id': 'LGPL-3.0-or-later', 'deprecated': False}, + 'lgpllr': {'id': 'LGPLLR', 'deprecated': False}, + 'libpng': {'id': 'Libpng', 'deprecated': False}, + 'libpng-2.0': {'id': 'libpng-2.0', 'deprecated': False}, + 'libselinux-1.0': {'id': 'libselinux-1.0', 'deprecated': False}, + 'libtiff': {'id': 'libtiff', 'deprecated': False}, + 'libutil-david-nugent': {'id': 'libutil-David-Nugent', 'deprecated': False}, + 'liliq-p-1.1': {'id': 'LiLiQ-P-1.1', 'deprecated': False}, + 'liliq-r-1.1': {'id': 'LiLiQ-R-1.1', 'deprecated': False}, + 'liliq-rplus-1.1': {'id': 'LiLiQ-Rplus-1.1', 'deprecated': False}, + 'linux-man-pages-1-para': {'id': 'Linux-man-pages-1-para', 'deprecated': False}, + 'linux-man-pages-copyleft': {'id': 'Linux-man-pages-copyleft', 'deprecated': False}, + 'linux-man-pages-copyleft-2-para': {'id': 'Linux-man-pages-copyleft-2-para', 'deprecated': False}, + 'linux-man-pages-copyleft-var': {'id': 'Linux-man-pages-copyleft-var', 'deprecated': False}, + 'linux-openib': {'id': 'Linux-OpenIB', 'deprecated': False}, + 'loop': {'id': 'LOOP', 'deprecated': False}, + 'lpd-document': {'id': 'LPD-document', 'deprecated': False}, + 'lpl-1.0': {'id': 'LPL-1.0', 'deprecated': False}, + 'lpl-1.02': {'id': 'LPL-1.02', 'deprecated': False}, + 'lppl-1.0': {'id': 'LPPL-1.0', 'deprecated': False}, + 'lppl-1.1': {'id': 'LPPL-1.1', 'deprecated': False}, + 'lppl-1.2': {'id': 'LPPL-1.2', 'deprecated': False}, + 'lppl-1.3a': {'id': 'LPPL-1.3a', 'deprecated': False}, + 'lppl-1.3c': {'id': 'LPPL-1.3c', 'deprecated': False}, + 'lsof': {'id': 'lsof', 'deprecated': False}, + 'lucida-bitmap-fonts': {'id': 'Lucida-Bitmap-Fonts', 'deprecated': False}, + 'lzma-sdk-9.11-to-9.20': {'id': 'LZMA-SDK-9.11-to-9.20', 'deprecated': False}, + 'lzma-sdk-9.22': {'id': 'LZMA-SDK-9.22', 'deprecated': False}, + 'mackerras-3-clause': {'id': 'Mackerras-3-Clause', 'deprecated': False}, + 'mackerras-3-clause-acknowledgment': {'id': 'Mackerras-3-Clause-acknowledgment', 'deprecated': False}, + 'magaz': {'id': 'magaz', 'deprecated': False}, + 'mailprio': {'id': 'mailprio', 'deprecated': False}, + 'makeindex': {'id': 'MakeIndex', 'deprecated': False}, + 'martin-birgmeier': {'id': 'Martin-Birgmeier', 'deprecated': False}, + 'mcphee-slideshow': {'id': 'McPhee-slideshow', 'deprecated': False}, + 'metamail': {'id': 'metamail', 'deprecated': False}, + 'minpack': {'id': 'Minpack', 'deprecated': False}, + 'miros': {'id': 'MirOS', 'deprecated': False}, + 'mit': {'id': 'MIT', 'deprecated': False}, + 'mit-0': {'id': 'MIT-0', 'deprecated': False}, + 'mit-advertising': {'id': 'MIT-advertising', 'deprecated': False}, + 'mit-cmu': {'id': 'MIT-CMU', 'deprecated': False}, + 'mit-enna': {'id': 'MIT-enna', 'deprecated': False}, + 'mit-feh': {'id': 'MIT-feh', 'deprecated': False}, + 'mit-festival': {'id': 'MIT-Festival', 'deprecated': False}, + 'mit-khronos-old': {'id': 'MIT-Khronos-old', 'deprecated': False}, + 'mit-modern-variant': {'id': 'MIT-Modern-Variant', 'deprecated': False}, + 'mit-open-group': {'id': 'MIT-open-group', 'deprecated': False}, + 'mit-testregex': {'id': 'MIT-testregex', 'deprecated': False}, + 'mit-wu': {'id': 'MIT-Wu', 'deprecated': False}, + 'mitnfa': {'id': 'MITNFA', 'deprecated': False}, + 'mmixware': {'id': 'MMIXware', 'deprecated': False}, + 'motosoto': {'id': 'Motosoto', 'deprecated': False}, + 'mpeg-ssg': {'id': 'MPEG-SSG', 'deprecated': False}, + 'mpi-permissive': {'id': 'mpi-permissive', 'deprecated': False}, + 'mpich2': {'id': 'mpich2', 'deprecated': False}, + 'mpl-1.0': {'id': 'MPL-1.0', 'deprecated': False}, + 'mpl-1.1': {'id': 'MPL-1.1', 'deprecated': False}, + 'mpl-2.0': {'id': 'MPL-2.0', 'deprecated': False}, + 'mpl-2.0-no-copyleft-exception': {'id': 'MPL-2.0-no-copyleft-exception', 'deprecated': False}, + 'mplus': {'id': 'mplus', 'deprecated': False}, + 'ms-lpl': {'id': 'MS-LPL', 'deprecated': False}, + 'ms-pl': {'id': 'MS-PL', 'deprecated': False}, + 'ms-rl': {'id': 'MS-RL', 'deprecated': False}, + 'mtll': {'id': 'MTLL', 'deprecated': False}, + 'mulanpsl-1.0': {'id': 'MulanPSL-1.0', 'deprecated': False}, + 'mulanpsl-2.0': {'id': 'MulanPSL-2.0', 'deprecated': False}, + 'multics': {'id': 'Multics', 'deprecated': False}, + 'mup': {'id': 'Mup', 'deprecated': False}, + 'naist-2003': {'id': 'NAIST-2003', 'deprecated': False}, + 'nasa-1.3': {'id': 'NASA-1.3', 'deprecated': False}, + 'naumen': {'id': 'Naumen', 'deprecated': False}, + 'nbpl-1.0': {'id': 'NBPL-1.0', 'deprecated': False}, + 'ncbi-pd': {'id': 'NCBI-PD', 'deprecated': False}, + 'ncgl-uk-2.0': {'id': 'NCGL-UK-2.0', 'deprecated': False}, + 'ncl': {'id': 'NCL', 'deprecated': False}, + 'ncsa': {'id': 'NCSA', 'deprecated': False}, + 'net-snmp': {'id': 'Net-SNMP', 'deprecated': True}, + 'netcdf': {'id': 'NetCDF', 'deprecated': False}, + 'newsletr': {'id': 'Newsletr', 'deprecated': False}, + 'ngpl': {'id': 'NGPL', 'deprecated': False}, + 'nicta-1.0': {'id': 'NICTA-1.0', 'deprecated': False}, + 'nist-pd': {'id': 'NIST-PD', 'deprecated': False}, + 'nist-pd-fallback': {'id': 'NIST-PD-fallback', 'deprecated': False}, + 'nist-software': {'id': 'NIST-Software', 'deprecated': False}, + 'nlod-1.0': {'id': 'NLOD-1.0', 'deprecated': False}, + 'nlod-2.0': {'id': 'NLOD-2.0', 'deprecated': False}, + 'nlpl': {'id': 'NLPL', 'deprecated': False}, + 'nokia': {'id': 'Nokia', 'deprecated': False}, + 'nosl': {'id': 'NOSL', 'deprecated': False}, + 'noweb': {'id': 'Noweb', 'deprecated': False}, + 'npl-1.0': {'id': 'NPL-1.0', 'deprecated': False}, + 'npl-1.1': {'id': 'NPL-1.1', 'deprecated': False}, + 'nposl-3.0': {'id': 'NPOSL-3.0', 'deprecated': False}, + 'nrl': {'id': 'NRL', 'deprecated': False}, + 'ntp': {'id': 'NTP', 'deprecated': False}, + 'ntp-0': {'id': 'NTP-0', 'deprecated': False}, + 'nunit': {'id': 'Nunit', 'deprecated': True}, + 'o-uda-1.0': {'id': 'O-UDA-1.0', 'deprecated': False}, + 'oar': {'id': 'OAR', 'deprecated': False}, + 'occt-pl': {'id': 'OCCT-PL', 'deprecated': False}, + 'oclc-2.0': {'id': 'OCLC-2.0', 'deprecated': False}, + 'odbl-1.0': {'id': 'ODbL-1.0', 'deprecated': False}, + 'odc-by-1.0': {'id': 'ODC-By-1.0', 'deprecated': False}, + 'offis': {'id': 'OFFIS', 'deprecated': False}, + 'ofl-1.0': {'id': 'OFL-1.0', 'deprecated': False}, + 'ofl-1.0-no-rfn': {'id': 'OFL-1.0-no-RFN', 'deprecated': False}, + 'ofl-1.0-rfn': {'id': 'OFL-1.0-RFN', 'deprecated': False}, + 'ofl-1.1': {'id': 'OFL-1.1', 'deprecated': False}, + 'ofl-1.1-no-rfn': {'id': 'OFL-1.1-no-RFN', 'deprecated': False}, + 'ofl-1.1-rfn': {'id': 'OFL-1.1-RFN', 'deprecated': False}, + 'ogc-1.0': {'id': 'OGC-1.0', 'deprecated': False}, + 'ogdl-taiwan-1.0': {'id': 'OGDL-Taiwan-1.0', 'deprecated': False}, + 'ogl-canada-2.0': {'id': 'OGL-Canada-2.0', 'deprecated': False}, + 'ogl-uk-1.0': {'id': 'OGL-UK-1.0', 'deprecated': False}, + 'ogl-uk-2.0': {'id': 'OGL-UK-2.0', 'deprecated': False}, + 'ogl-uk-3.0': {'id': 'OGL-UK-3.0', 'deprecated': False}, + 'ogtsl': {'id': 'OGTSL', 'deprecated': False}, + 'oldap-1.1': {'id': 'OLDAP-1.1', 'deprecated': False}, + 'oldap-1.2': {'id': 'OLDAP-1.2', 'deprecated': False}, + 'oldap-1.3': {'id': 'OLDAP-1.3', 'deprecated': False}, + 'oldap-1.4': {'id': 'OLDAP-1.4', 'deprecated': False}, + 'oldap-2.0': {'id': 'OLDAP-2.0', 'deprecated': False}, + 'oldap-2.0.1': {'id': 'OLDAP-2.0.1', 'deprecated': False}, + 'oldap-2.1': {'id': 'OLDAP-2.1', 'deprecated': False}, + 'oldap-2.2': {'id': 'OLDAP-2.2', 'deprecated': False}, + 'oldap-2.2.1': {'id': 'OLDAP-2.2.1', 'deprecated': False}, + 'oldap-2.2.2': {'id': 'OLDAP-2.2.2', 'deprecated': False}, + 'oldap-2.3': {'id': 'OLDAP-2.3', 'deprecated': False}, + 'oldap-2.4': {'id': 'OLDAP-2.4', 'deprecated': False}, + 'oldap-2.5': {'id': 'OLDAP-2.5', 'deprecated': False}, + 'oldap-2.6': {'id': 'OLDAP-2.6', 'deprecated': False}, + 'oldap-2.7': {'id': 'OLDAP-2.7', 'deprecated': False}, + 'oldap-2.8': {'id': 'OLDAP-2.8', 'deprecated': False}, + 'olfl-1.3': {'id': 'OLFL-1.3', 'deprecated': False}, + 'oml': {'id': 'OML', 'deprecated': False}, + 'openpbs-2.3': {'id': 'OpenPBS-2.3', 'deprecated': False}, + 'openssl': {'id': 'OpenSSL', 'deprecated': False}, + 'openssl-standalone': {'id': 'OpenSSL-standalone', 'deprecated': False}, + 'openvision': {'id': 'OpenVision', 'deprecated': False}, + 'opl-1.0': {'id': 'OPL-1.0', 'deprecated': False}, + 'opl-uk-3.0': {'id': 'OPL-UK-3.0', 'deprecated': False}, + 'opubl-1.0': {'id': 'OPUBL-1.0', 'deprecated': False}, + 'oset-pl-2.1': {'id': 'OSET-PL-2.1', 'deprecated': False}, + 'osl-1.0': {'id': 'OSL-1.0', 'deprecated': False}, + 'osl-1.1': {'id': 'OSL-1.1', 'deprecated': False}, + 'osl-2.0': {'id': 'OSL-2.0', 'deprecated': False}, + 'osl-2.1': {'id': 'OSL-2.1', 'deprecated': False}, + 'osl-3.0': {'id': 'OSL-3.0', 'deprecated': False}, + 'padl': {'id': 'PADL', 'deprecated': False}, + 'parity-6.0.0': {'id': 'Parity-6.0.0', 'deprecated': False}, + 'parity-7.0.0': {'id': 'Parity-7.0.0', 'deprecated': False}, + 'pddl-1.0': {'id': 'PDDL-1.0', 'deprecated': False}, + 'php-3.0': {'id': 'PHP-3.0', 'deprecated': False}, + 'php-3.01': {'id': 'PHP-3.01', 'deprecated': False}, + 'pixar': {'id': 'Pixar', 'deprecated': False}, + 'pkgconf': {'id': 'pkgconf', 'deprecated': False}, + 'plexus': {'id': 'Plexus', 'deprecated': False}, + 'pnmstitch': {'id': 'pnmstitch', 'deprecated': False}, + 'polyform-noncommercial-1.0.0': {'id': 'PolyForm-Noncommercial-1.0.0', 'deprecated': False}, + 'polyform-small-business-1.0.0': {'id': 'PolyForm-Small-Business-1.0.0', 'deprecated': False}, + 'postgresql': {'id': 'PostgreSQL', 'deprecated': False}, + 'ppl': {'id': 'PPL', 'deprecated': False}, + 'psf-2.0': {'id': 'PSF-2.0', 'deprecated': False}, + 'psfrag': {'id': 'psfrag', 'deprecated': False}, + 'psutils': {'id': 'psutils', 'deprecated': False}, + 'python-2.0': {'id': 'Python-2.0', 'deprecated': False}, + 'python-2.0.1': {'id': 'Python-2.0.1', 'deprecated': False}, + 'python-ldap': {'id': 'python-ldap', 'deprecated': False}, + 'qhull': {'id': 'Qhull', 'deprecated': False}, + 'qpl-1.0': {'id': 'QPL-1.0', 'deprecated': False}, + 'qpl-1.0-inria-2004': {'id': 'QPL-1.0-INRIA-2004', 'deprecated': False}, + 'radvd': {'id': 'radvd', 'deprecated': False}, + 'rdisc': {'id': 'Rdisc', 'deprecated': False}, + 'rhecos-1.1': {'id': 'RHeCos-1.1', 'deprecated': False}, + 'rpl-1.1': {'id': 'RPL-1.1', 'deprecated': False}, + 'rpl-1.5': {'id': 'RPL-1.5', 'deprecated': False}, + 'rpsl-1.0': {'id': 'RPSL-1.0', 'deprecated': False}, + 'rsa-md': {'id': 'RSA-MD', 'deprecated': False}, + 'rscpl': {'id': 'RSCPL', 'deprecated': False}, + 'ruby': {'id': 'Ruby', 'deprecated': False}, + 'ruby-pty': {'id': 'Ruby-pty', 'deprecated': False}, + 'sax-pd': {'id': 'SAX-PD', 'deprecated': False}, + 'sax-pd-2.0': {'id': 'SAX-PD-2.0', 'deprecated': False}, + 'saxpath': {'id': 'Saxpath', 'deprecated': False}, + 'scea': {'id': 'SCEA', 'deprecated': False}, + 'schemereport': {'id': 'SchemeReport', 'deprecated': False}, + 'sendmail': {'id': 'Sendmail', 'deprecated': False}, + 'sendmail-8.23': {'id': 'Sendmail-8.23', 'deprecated': False}, + 'sgi-b-1.0': {'id': 'SGI-B-1.0', 'deprecated': False}, + 'sgi-b-1.1': {'id': 'SGI-B-1.1', 'deprecated': False}, + 'sgi-b-2.0': {'id': 'SGI-B-2.0', 'deprecated': False}, + 'sgi-opengl': {'id': 'SGI-OpenGL', 'deprecated': False}, + 'sgp4': {'id': 'SGP4', 'deprecated': False}, + 'shl-0.5': {'id': 'SHL-0.5', 'deprecated': False}, + 'shl-0.51': {'id': 'SHL-0.51', 'deprecated': False}, + 'simpl-2.0': {'id': 'SimPL-2.0', 'deprecated': False}, + 'sissl': {'id': 'SISSL', 'deprecated': False}, + 'sissl-1.2': {'id': 'SISSL-1.2', 'deprecated': False}, + 'sl': {'id': 'SL', 'deprecated': False}, + 'sleepycat': {'id': 'Sleepycat', 'deprecated': False}, + 'smlnj': {'id': 'SMLNJ', 'deprecated': False}, + 'smppl': {'id': 'SMPPL', 'deprecated': False}, + 'snia': {'id': 'SNIA', 'deprecated': False}, + 'snprintf': {'id': 'snprintf', 'deprecated': False}, + 'softsurfer': {'id': 'softSurfer', 'deprecated': False}, + 'soundex': {'id': 'Soundex', 'deprecated': False}, + 'spencer-86': {'id': 'Spencer-86', 'deprecated': False}, + 'spencer-94': {'id': 'Spencer-94', 'deprecated': False}, + 'spencer-99': {'id': 'Spencer-99', 'deprecated': False}, + 'spl-1.0': {'id': 'SPL-1.0', 'deprecated': False}, + 'ssh-keyscan': {'id': 'ssh-keyscan', 'deprecated': False}, + 'ssh-openssh': {'id': 'SSH-OpenSSH', 'deprecated': False}, + 'ssh-short': {'id': 'SSH-short', 'deprecated': False}, + 'ssleay-standalone': {'id': 'SSLeay-standalone', 'deprecated': False}, + 'sspl-1.0': {'id': 'SSPL-1.0', 'deprecated': False}, + 'standardml-nj': {'id': 'StandardML-NJ', 'deprecated': True}, + 'sugarcrm-1.1.3': {'id': 'SugarCRM-1.1.3', 'deprecated': False}, + 'sun-ppp': {'id': 'Sun-PPP', 'deprecated': False}, + 'sun-ppp-2000': {'id': 'Sun-PPP-2000', 'deprecated': False}, + 'sunpro': {'id': 'SunPro', 'deprecated': False}, + 'swl': {'id': 'SWL', 'deprecated': False}, + 'swrule': {'id': 'swrule', 'deprecated': False}, + 'symlinks': {'id': 'Symlinks', 'deprecated': False}, + 'tapr-ohl-1.0': {'id': 'TAPR-OHL-1.0', 'deprecated': False}, + 'tcl': {'id': 'TCL', 'deprecated': False}, + 'tcp-wrappers': {'id': 'TCP-wrappers', 'deprecated': False}, + 'termreadkey': {'id': 'TermReadKey', 'deprecated': False}, + 'tgppl-1.0': {'id': 'TGPPL-1.0', 'deprecated': False}, + 'threeparttable': {'id': 'threeparttable', 'deprecated': False}, + 'tmate': {'id': 'TMate', 'deprecated': False}, + 'torque-1.1': {'id': 'TORQUE-1.1', 'deprecated': False}, + 'tosl': {'id': 'TOSL', 'deprecated': False}, + 'tpdl': {'id': 'TPDL', 'deprecated': False}, + 'tpl-1.0': {'id': 'TPL-1.0', 'deprecated': False}, + 'ttwl': {'id': 'TTWL', 'deprecated': False}, + 'ttyp0': {'id': 'TTYP0', 'deprecated': False}, + 'tu-berlin-1.0': {'id': 'TU-Berlin-1.0', 'deprecated': False}, + 'tu-berlin-2.0': {'id': 'TU-Berlin-2.0', 'deprecated': False}, + 'ubuntu-font-1.0': {'id': 'Ubuntu-font-1.0', 'deprecated': False}, + 'ucar': {'id': 'UCAR', 'deprecated': False}, + 'ucl-1.0': {'id': 'UCL-1.0', 'deprecated': False}, + 'ulem': {'id': 'ulem', 'deprecated': False}, + 'umich-merit': {'id': 'UMich-Merit', 'deprecated': False}, + 'unicode-3.0': {'id': 'Unicode-3.0', 'deprecated': False}, + 'unicode-dfs-2015': {'id': 'Unicode-DFS-2015', 'deprecated': False}, + 'unicode-dfs-2016': {'id': 'Unicode-DFS-2016', 'deprecated': False}, + 'unicode-tou': {'id': 'Unicode-TOU', 'deprecated': False}, + 'unixcrypt': {'id': 'UnixCrypt', 'deprecated': False}, + 'unlicense': {'id': 'Unlicense', 'deprecated': False}, + 'upl-1.0': {'id': 'UPL-1.0', 'deprecated': False}, + 'urt-rle': {'id': 'URT-RLE', 'deprecated': False}, + 'vim': {'id': 'Vim', 'deprecated': False}, + 'vostrom': {'id': 'VOSTROM', 'deprecated': False}, + 'vsl-1.0': {'id': 'VSL-1.0', 'deprecated': False}, + 'w3c': {'id': 'W3C', 'deprecated': False}, + 'w3c-19980720': {'id': 'W3C-19980720', 'deprecated': False}, + 'w3c-20150513': {'id': 'W3C-20150513', 'deprecated': False}, + 'w3m': {'id': 'w3m', 'deprecated': False}, + 'watcom-1.0': {'id': 'Watcom-1.0', 'deprecated': False}, + 'widget-workshop': {'id': 'Widget-Workshop', 'deprecated': False}, + 'wsuipa': {'id': 'Wsuipa', 'deprecated': False}, + 'wtfpl': {'id': 'WTFPL', 'deprecated': False}, + 'wxwindows': {'id': 'wxWindows', 'deprecated': True}, + 'x11': {'id': 'X11', 'deprecated': False}, + 'x11-distribute-modifications-variant': {'id': 'X11-distribute-modifications-variant', 'deprecated': False}, + 'x11-swapped': {'id': 'X11-swapped', 'deprecated': False}, + 'xdebug-1.03': {'id': 'Xdebug-1.03', 'deprecated': False}, + 'xerox': {'id': 'Xerox', 'deprecated': False}, + 'xfig': {'id': 'Xfig', 'deprecated': False}, + 'xfree86-1.1': {'id': 'XFree86-1.1', 'deprecated': False}, + 'xinetd': {'id': 'xinetd', 'deprecated': False}, + 'xkeyboard-config-zinoviev': {'id': 'xkeyboard-config-Zinoviev', 'deprecated': False}, + 'xlock': {'id': 'xlock', 'deprecated': False}, + 'xnet': {'id': 'Xnet', 'deprecated': False}, + 'xpp': {'id': 'xpp', 'deprecated': False}, + 'xskat': {'id': 'XSkat', 'deprecated': False}, + 'xzoom': {'id': 'xzoom', 'deprecated': False}, + 'ypl-1.0': {'id': 'YPL-1.0', 'deprecated': False}, + 'ypl-1.1': {'id': 'YPL-1.1', 'deprecated': False}, + 'zed': {'id': 'Zed', 'deprecated': False}, + 'zeeff': {'id': 'Zeeff', 'deprecated': False}, + 'zend-2.0': {'id': 'Zend-2.0', 'deprecated': False}, + 'zimbra-1.3': {'id': 'Zimbra-1.3', 'deprecated': False}, + 'zimbra-1.4': {'id': 'Zimbra-1.4', 'deprecated': False}, + 'zlib': {'id': 'Zlib', 'deprecated': False}, + 'zlib-acknowledgement': {'id': 'zlib-acknowledgement', 'deprecated': False}, + 'zpl-1.1': {'id': 'ZPL-1.1', 'deprecated': False}, + 'zpl-2.0': {'id': 'ZPL-2.0', 'deprecated': False}, + 'zpl-2.1': {'id': 'ZPL-2.1', 'deprecated': False}, +} + +EXCEPTIONS: dict[str, SPDXException] = { + '389-exception': {'id': '389-exception', 'deprecated': False}, + 'asterisk-exception': {'id': 'Asterisk-exception', 'deprecated': False}, + 'asterisk-linking-protocols-exception': {'id': 'Asterisk-linking-protocols-exception', 'deprecated': False}, + 'autoconf-exception-2.0': {'id': 'Autoconf-exception-2.0', 'deprecated': False}, + 'autoconf-exception-3.0': {'id': 'Autoconf-exception-3.0', 'deprecated': False}, + 'autoconf-exception-generic': {'id': 'Autoconf-exception-generic', 'deprecated': False}, + 'autoconf-exception-generic-3.0': {'id': 'Autoconf-exception-generic-3.0', 'deprecated': False}, + 'autoconf-exception-macro': {'id': 'Autoconf-exception-macro', 'deprecated': False}, + 'bison-exception-1.24': {'id': 'Bison-exception-1.24', 'deprecated': False}, + 'bison-exception-2.2': {'id': 'Bison-exception-2.2', 'deprecated': False}, + 'bootloader-exception': {'id': 'Bootloader-exception', 'deprecated': False}, + 'classpath-exception-2.0': {'id': 'Classpath-exception-2.0', 'deprecated': False}, + 'clisp-exception-2.0': {'id': 'CLISP-exception-2.0', 'deprecated': False}, + 'cryptsetup-openssl-exception': {'id': 'cryptsetup-OpenSSL-exception', 'deprecated': False}, + 'digirule-foss-exception': {'id': 'DigiRule-FOSS-exception', 'deprecated': False}, + 'ecos-exception-2.0': {'id': 'eCos-exception-2.0', 'deprecated': False}, + 'erlang-otp-linking-exception': {'id': 'erlang-otp-linking-exception', 'deprecated': False}, + 'fawkes-runtime-exception': {'id': 'Fawkes-Runtime-exception', 'deprecated': False}, + 'fltk-exception': {'id': 'FLTK-exception', 'deprecated': False}, + 'fmt-exception': {'id': 'fmt-exception', 'deprecated': False}, + 'font-exception-2.0': {'id': 'Font-exception-2.0', 'deprecated': False}, + 'freertos-exception-2.0': {'id': 'freertos-exception-2.0', 'deprecated': False}, + 'gcc-exception-2.0': {'id': 'GCC-exception-2.0', 'deprecated': False}, + 'gcc-exception-2.0-note': {'id': 'GCC-exception-2.0-note', 'deprecated': False}, + 'gcc-exception-3.1': {'id': 'GCC-exception-3.1', 'deprecated': False}, + 'gmsh-exception': {'id': 'Gmsh-exception', 'deprecated': False}, + 'gnat-exception': {'id': 'GNAT-exception', 'deprecated': False}, + 'gnome-examples-exception': {'id': 'GNOME-examples-exception', 'deprecated': False}, + 'gnu-compiler-exception': {'id': 'GNU-compiler-exception', 'deprecated': False}, + 'gnu-javamail-exception': {'id': 'gnu-javamail-exception', 'deprecated': False}, + 'gpl-3.0-interface-exception': {'id': 'GPL-3.0-interface-exception', 'deprecated': False}, + 'gpl-3.0-linking-exception': {'id': 'GPL-3.0-linking-exception', 'deprecated': False}, + 'gpl-3.0-linking-source-exception': {'id': 'GPL-3.0-linking-source-exception', 'deprecated': False}, + 'gpl-cc-1.0': {'id': 'GPL-CC-1.0', 'deprecated': False}, + 'gstreamer-exception-2005': {'id': 'GStreamer-exception-2005', 'deprecated': False}, + 'gstreamer-exception-2008': {'id': 'GStreamer-exception-2008', 'deprecated': False}, + 'i2p-gpl-java-exception': {'id': 'i2p-gpl-java-exception', 'deprecated': False}, + 'kicad-libraries-exception': {'id': 'KiCad-libraries-exception', 'deprecated': False}, + 'lgpl-3.0-linking-exception': {'id': 'LGPL-3.0-linking-exception', 'deprecated': False}, + 'libpri-openh323-exception': {'id': 'libpri-OpenH323-exception', 'deprecated': False}, + 'libtool-exception': {'id': 'Libtool-exception', 'deprecated': False}, + 'linux-syscall-note': {'id': 'Linux-syscall-note', 'deprecated': False}, + 'llgpl': {'id': 'LLGPL', 'deprecated': False}, + 'llvm-exception': {'id': 'LLVM-exception', 'deprecated': False}, + 'lzma-exception': {'id': 'LZMA-exception', 'deprecated': False}, + 'mif-exception': {'id': 'mif-exception', 'deprecated': False}, + 'nokia-qt-exception-1.1': {'id': 'Nokia-Qt-exception-1.1', 'deprecated': True}, + 'ocaml-lgpl-linking-exception': {'id': 'OCaml-LGPL-linking-exception', 'deprecated': False}, + 'occt-exception-1.0': {'id': 'OCCT-exception-1.0', 'deprecated': False}, + 'openjdk-assembly-exception-1.0': {'id': 'OpenJDK-assembly-exception-1.0', 'deprecated': False}, + 'openvpn-openssl-exception': {'id': 'openvpn-openssl-exception', 'deprecated': False}, + 'pcre2-exception': {'id': 'PCRE2-exception', 'deprecated': False}, + 'ps-or-pdf-font-exception-20170817': {'id': 'PS-or-PDF-font-exception-20170817', 'deprecated': False}, + 'qpl-1.0-inria-2004-exception': {'id': 'QPL-1.0-INRIA-2004-exception', 'deprecated': False}, + 'qt-gpl-exception-1.0': {'id': 'Qt-GPL-exception-1.0', 'deprecated': False}, + 'qt-lgpl-exception-1.1': {'id': 'Qt-LGPL-exception-1.1', 'deprecated': False}, + 'qwt-exception-1.0': {'id': 'Qwt-exception-1.0', 'deprecated': False}, + 'romic-exception': {'id': 'romic-exception', 'deprecated': False}, + 'rrdtool-floss-exception-2.0': {'id': 'RRDtool-FLOSS-exception-2.0', 'deprecated': False}, + 'sane-exception': {'id': 'SANE-exception', 'deprecated': False}, + 'shl-2.0': {'id': 'SHL-2.0', 'deprecated': False}, + 'shl-2.1': {'id': 'SHL-2.1', 'deprecated': False}, + 'stunnel-exception': {'id': 'stunnel-exception', 'deprecated': False}, + 'swi-exception': {'id': 'SWI-exception', 'deprecated': False}, + 'swift-exception': {'id': 'Swift-exception', 'deprecated': False}, + 'texinfo-exception': {'id': 'Texinfo-exception', 'deprecated': False}, + 'u-boot-exception-2.0': {'id': 'u-boot-exception-2.0', 'deprecated': False}, + 'ubdl-exception': {'id': 'UBDL-exception', 'deprecated': False}, + 'universal-foss-exception-1.0': {'id': 'Universal-FOSS-exception-1.0', 'deprecated': False}, + 'vsftpd-openssl-exception': {'id': 'vsftpd-openssl-exception', 'deprecated': False}, + 'wxwindows-exception-3.1': {'id': 'WxWindows-exception-3.1', 'deprecated': False}, + 'x11vnc-openssl-exception': {'id': 'x11vnc-openssl-exception', 'deprecated': False}, +} diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit-3.0.51.dist-info/licenses/AUTHORS.rst b/.venv/lib/python3.13/site-packages/prompt_toolkit-3.0.51.dist-info/licenses/AUTHORS.rst new file mode 100644 index 0000000000000000000000000000000000000000..f7c8f60f4006dee49df0321c3786f9e413fa2cce --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit-3.0.51.dist-info/licenses/AUTHORS.rst @@ -0,0 +1,11 @@ +Authors +======= + +Creator +------- +Jonathan Slenders + +Contributors +------------ + +- Amjith Ramanujam diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit-3.0.51.dist-info/licenses/LICENSE b/.venv/lib/python3.13/site-packages/prompt_toolkit-3.0.51.dist-info/licenses/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..e1720e0fb70684043842e94ede48622e6bffc62d --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit-3.0.51.dist-info/licenses/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2014, Jonathan Slenders +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, this + list of conditions and the following disclaimer in the documentation and/or + other materials provided with the distribution. + +* Neither the name of the {organization} nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a8a0a0cbbe739c1e36cb16f327fc19151e926f94 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/auto_suggest.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/auto_suggest.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2033afca332735ff7899347fd63924b9af955548 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/auto_suggest.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/buffer.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/buffer.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f506142bd59d42430998b26f979d5afc68843bf2 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/buffer.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/cache.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/cache.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fc5186e5118a3b2bfee12a1ac64aab254b10efb9 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/cache.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/cursor_shapes.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/cursor_shapes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..81e133f46059f4663c2f37603e0f3c1a14ef52d4 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/cursor_shapes.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/data_structures.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/data_structures.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fad48989e3fc309229b94adf0461e84658f7fa5b Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/data_structures.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/document.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/document.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2f300f2ee9aee2cd61bb27b5176b9f96aaf8165e Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/document.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/enums.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/enums.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a39027e3c3c6e804019ef6bfd5bd164f5900511d Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/enums.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/history.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/history.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad3619f7e72211baca503b8ff1e2304b89cede97 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/history.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/keys.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/keys.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3bcf021be810ca785ca5f514405d7add3975d02a Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/keys.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/mouse_events.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/mouse_events.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5bf9f43f83232168967e5e8826840e0eaa5c55f2 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/mouse_events.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/renderer.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/renderer.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..568c19967b58ca5c9720603ab6195790ad5d88a0 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/renderer.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/search.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/search.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..acc209442119be0abe5740b12ea2744f8534c679 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/search.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/selection.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/selection.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..698f3b9576fa2245b258e420d88ab9a4e1087df1 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/selection.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/utils.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7ada76c52542d9da8587da81dd58932b20f6af8 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/utils.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/validation.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/validation.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..58a8706804eb2a47b9821f9415c97adf3e764edf Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/__pycache__/validation.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/__init__.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f65a94e60ac830fb66fb431387bb8e4b20ca7ecf --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/__init__.py @@ -0,0 +1,43 @@ +from __future__ import annotations + +from .base import ( + CompleteEvent, + Completer, + Completion, + ConditionalCompleter, + DummyCompleter, + DynamicCompleter, + ThreadedCompleter, + get_common_complete_suffix, + merge_completers, +) +from .deduplicate import DeduplicateCompleter +from .filesystem import ExecutableCompleter, PathCompleter +from .fuzzy_completer import FuzzyCompleter, FuzzyWordCompleter +from .nested import NestedCompleter +from .word_completer import WordCompleter + +__all__ = [ + # Base. + "Completion", + "Completer", + "ThreadedCompleter", + "DummyCompleter", + "DynamicCompleter", + "CompleteEvent", + "ConditionalCompleter", + "merge_completers", + "get_common_complete_suffix", + # Filesystem. + "PathCompleter", + "ExecutableCompleter", + # Fuzzy + "FuzzyCompleter", + "FuzzyWordCompleter", + # Nested. + "NestedCompleter", + # Word completer. + "WordCompleter", + # Deduplicate + "DeduplicateCompleter", +] diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/base.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/base.py new file mode 100644 index 0000000000000000000000000000000000000000..3846ef756264eccd0c35e79e42cbf2a7662d5129 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/base.py @@ -0,0 +1,438 @@ +""" """ + +from __future__ import annotations + +from abc import ABCMeta, abstractmethod +from typing import AsyncGenerator, Callable, Iterable, Sequence + +from prompt_toolkit.document import Document +from prompt_toolkit.eventloop import aclosing, generator_to_async_generator +from prompt_toolkit.filters import FilterOrBool, to_filter +from prompt_toolkit.formatted_text import AnyFormattedText, StyleAndTextTuples + +__all__ = [ + "Completion", + "Completer", + "ThreadedCompleter", + "DummyCompleter", + "DynamicCompleter", + "CompleteEvent", + "ConditionalCompleter", + "merge_completers", + "get_common_complete_suffix", +] + + +class Completion: + """ + :param text: The new string that will be inserted into the document. + :param start_position: Position relative to the cursor_position where the + new text will start. The text will be inserted between the + start_position and the original cursor position. + :param display: (optional string or formatted text) If the completion has + to be displayed differently in the completion menu. + :param display_meta: (Optional string or formatted text) Meta information + about the completion, e.g. the path or source where it's coming from. + This can also be a callable that returns a string. + :param style: Style string. + :param selected_style: Style string, used for a selected completion. + This can override the `style` parameter. + """ + + def __init__( + self, + text: str, + start_position: int = 0, + display: AnyFormattedText | None = None, + display_meta: AnyFormattedText | None = None, + style: str = "", + selected_style: str = "", + ) -> None: + from prompt_toolkit.formatted_text import to_formatted_text + + self.text = text + self.start_position = start_position + self._display_meta = display_meta + + if display is None: + display = text + + self.display = to_formatted_text(display) + + self.style = style + self.selected_style = selected_style + + assert self.start_position <= 0 + + def __repr__(self) -> str: + if isinstance(self.display, str) and self.display == self.text: + return f"{self.__class__.__name__}(text={self.text!r}, start_position={self.start_position!r})" + else: + return f"{self.__class__.__name__}(text={self.text!r}, start_position={self.start_position!r}, display={self.display!r})" + + def __eq__(self, other: object) -> bool: + if not isinstance(other, Completion): + return False + return ( + self.text == other.text + and self.start_position == other.start_position + and self.display == other.display + and self._display_meta == other._display_meta + ) + + def __hash__(self) -> int: + return hash((self.text, self.start_position, self.display, self._display_meta)) + + @property + def display_text(self) -> str: + "The 'display' field as plain text." + from prompt_toolkit.formatted_text import fragment_list_to_text + + return fragment_list_to_text(self.display) + + @property + def display_meta(self) -> StyleAndTextTuples: + "Return meta-text. (This is lazy when using a callable)." + from prompt_toolkit.formatted_text import to_formatted_text + + return to_formatted_text(self._display_meta or "") + + @property + def display_meta_text(self) -> str: + "The 'meta' field as plain text." + from prompt_toolkit.formatted_text import fragment_list_to_text + + return fragment_list_to_text(self.display_meta) + + def new_completion_from_position(self, position: int) -> Completion: + """ + (Only for internal use!) + Get a new completion by splitting this one. Used by `Application` when + it needs to have a list of new completions after inserting the common + prefix. + """ + assert position - self.start_position >= 0 + + return Completion( + text=self.text[position - self.start_position :], + display=self.display, + display_meta=self._display_meta, + ) + + +class CompleteEvent: + """ + Event that called the completer. + + :param text_inserted: When True, it means that completions are requested + because of a text insert. (`Buffer.complete_while_typing`.) + :param completion_requested: When True, it means that the user explicitly + pressed the `Tab` key in order to view the completions. + + These two flags can be used for instance to implement a completer that + shows some completions when ``Tab`` has been pressed, but not + automatically when the user presses a space. (Because of + `complete_while_typing`.) + """ + + def __init__( + self, text_inserted: bool = False, completion_requested: bool = False + ) -> None: + assert not (text_inserted and completion_requested) + + #: Automatic completion while typing. + self.text_inserted = text_inserted + + #: Used explicitly requested completion by pressing 'tab'. + self.completion_requested = completion_requested + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(text_inserted={self.text_inserted!r}, completion_requested={self.completion_requested!r})" + + +class Completer(metaclass=ABCMeta): + """ + Base class for completer implementations. + """ + + @abstractmethod + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + """ + This should be a generator that yields :class:`.Completion` instances. + + If the generation of completions is something expensive (that takes a + lot of time), consider wrapping this `Completer` class in a + `ThreadedCompleter`. In that case, the completer algorithm runs in a + background thread and completions will be displayed as soon as they + arrive. + + :param document: :class:`~prompt_toolkit.document.Document` instance. + :param complete_event: :class:`.CompleteEvent` instance. + """ + while False: + yield + + async def get_completions_async( + self, document: Document, complete_event: CompleteEvent + ) -> AsyncGenerator[Completion, None]: + """ + Asynchronous generator for completions. (Probably, you won't have to + override this.) + + Asynchronous generator of :class:`.Completion` objects. + """ + for item in self.get_completions(document, complete_event): + yield item + + +class ThreadedCompleter(Completer): + """ + Wrapper that runs the `get_completions` generator in a thread. + + (Use this to prevent the user interface from becoming unresponsive if the + generation of completions takes too much time.) + + The completions will be displayed as soon as they are produced. The user + can already select a completion, even if not all completions are displayed. + """ + + def __init__(self, completer: Completer) -> None: + self.completer = completer + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + return self.completer.get_completions(document, complete_event) + + async def get_completions_async( + self, document: Document, complete_event: CompleteEvent + ) -> AsyncGenerator[Completion, None]: + """ + Asynchronous generator of completions. + """ + # NOTE: Right now, we are consuming the `get_completions` generator in + # a synchronous background thread, then passing the results one + # at a time over a queue, and consuming this queue in the main + # thread (that's what `generator_to_async_generator` does). That + # means that if the completer is *very* slow, we'll be showing + # completions in the UI once they are computed. + + # It's very tempting to replace this implementation with the + # commented code below for several reasons: + + # - `generator_to_async_generator` is not perfect and hard to get + # right. It's a lot of complexity for little gain. The + # implementation needs a huge buffer for it to be efficient + # when there are many completions (like 50k+). + # - Normally, a completer is supposed to be fast, users can have + # "complete while typing" enabled, and want to see the + # completions within a second. Handling one completion at a + # time, and rendering once we get it here doesn't make any + # sense if this is quick anyway. + # - Completers like `FuzzyCompleter` prepare all completions + # anyway so that they can be sorted by accuracy before they are + # yielded. At the point that we start yielding completions + # here, we already have all completions. + # - The `Buffer` class has complex logic to invalidate the UI + # while it is consuming the completions. We don't want to + # invalidate the UI for every completion (if there are many), + # but we want to do it often enough so that completions are + # being displayed while they are produced. + + # We keep the current behavior mainly for backward-compatibility. + # Similarly, it would be better for this function to not return + # an async generator, but simply be a coroutine that returns a + # list of `Completion` objects, containing all completions at + # once. + + # Note that this argument doesn't mean we shouldn't use + # `ThreadedCompleter`. It still makes sense to produce + # completions in a background thread, because we don't want to + # freeze the UI while the user is typing. But sending the + # completions one at a time to the UI maybe isn't worth it. + + # def get_all_in_thread() -> List[Completion]: + # return list(self.get_completions(document, complete_event)) + + # completions = await get_running_loop().run_in_executor(None, get_all_in_thread) + # for completion in completions: + # yield completion + + async with aclosing( + generator_to_async_generator( + lambda: self.completer.get_completions(document, complete_event) + ) + ) as async_generator: + async for completion in async_generator: + yield completion + + def __repr__(self) -> str: + return f"ThreadedCompleter({self.completer!r})" + + +class DummyCompleter(Completer): + """ + A completer that doesn't return any completion. + """ + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + return [] + + def __repr__(self) -> str: + return "DummyCompleter()" + + +class DynamicCompleter(Completer): + """ + Completer class that can dynamically returns any Completer. + + :param get_completer: Callable that returns a :class:`.Completer` instance. + """ + + def __init__(self, get_completer: Callable[[], Completer | None]) -> None: + self.get_completer = get_completer + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + completer = self.get_completer() or DummyCompleter() + return completer.get_completions(document, complete_event) + + async def get_completions_async( + self, document: Document, complete_event: CompleteEvent + ) -> AsyncGenerator[Completion, None]: + completer = self.get_completer() or DummyCompleter() + + async for completion in completer.get_completions_async( + document, complete_event + ): + yield completion + + def __repr__(self) -> str: + return f"DynamicCompleter({self.get_completer!r} -> {self.get_completer()!r})" + + +class ConditionalCompleter(Completer): + """ + Wrapper around any other completer that will enable/disable the completions + depending on whether the received condition is satisfied. + + :param completer: :class:`.Completer` instance. + :param filter: :class:`.Filter` instance. + """ + + def __init__(self, completer: Completer, filter: FilterOrBool) -> None: + self.completer = completer + self.filter = to_filter(filter) + + def __repr__(self) -> str: + return f"ConditionalCompleter({self.completer!r}, filter={self.filter!r})" + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + # Get all completions in a blocking way. + if self.filter(): + yield from self.completer.get_completions(document, complete_event) + + async def get_completions_async( + self, document: Document, complete_event: CompleteEvent + ) -> AsyncGenerator[Completion, None]: + # Get all completions in a non-blocking way. + if self.filter(): + async with aclosing( + self.completer.get_completions_async(document, complete_event) + ) as async_generator: + async for item in async_generator: + yield item + + +class _MergedCompleter(Completer): + """ + Combine several completers into one. + """ + + def __init__(self, completers: Sequence[Completer]) -> None: + self.completers = completers + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + # Get all completions from the other completers in a blocking way. + for completer in self.completers: + yield from completer.get_completions(document, complete_event) + + async def get_completions_async( + self, document: Document, complete_event: CompleteEvent + ) -> AsyncGenerator[Completion, None]: + # Get all completions from the other completers in a non-blocking way. + for completer in self.completers: + async with aclosing( + completer.get_completions_async(document, complete_event) + ) as async_generator: + async for item in async_generator: + yield item + + +def merge_completers( + completers: Sequence[Completer], deduplicate: bool = False +) -> Completer: + """ + Combine several completers into one. + + :param deduplicate: If `True`, wrap the result in a `DeduplicateCompleter` + so that completions that would result in the same text will be + deduplicated. + """ + if deduplicate: + from .deduplicate import DeduplicateCompleter + + return DeduplicateCompleter(_MergedCompleter(completers)) + + return _MergedCompleter(completers) + + +def get_common_complete_suffix( + document: Document, completions: Sequence[Completion] +) -> str: + """ + Return the common prefix for all completions. + """ + + # Take only completions that don't change the text before the cursor. + def doesnt_change_before_cursor(completion: Completion) -> bool: + end = completion.text[: -completion.start_position] + return document.text_before_cursor.endswith(end) + + completions2 = [c for c in completions if doesnt_change_before_cursor(c)] + + # When there is at least one completion that changes the text before the + # cursor, don't return any common part. + if len(completions2) != len(completions): + return "" + + # Return the common prefix. + def get_suffix(completion: Completion) -> str: + return completion.text[-completion.start_position :] + + return _commonprefix([get_suffix(c) for c in completions2]) + + +def _commonprefix(strings: Iterable[str]) -> str: + # Similar to os.path.commonprefix + if not strings: + return "" + + else: + s1 = min(strings) + s2 = max(strings) + + for i, c in enumerate(s1): + if c != s2[i]: + return s1[:i] + + return s1 diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/deduplicate.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/deduplicate.py new file mode 100644 index 0000000000000000000000000000000000000000..c3d525669d2c415ce0754a9c6ff1936061b4a3ce --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/deduplicate.py @@ -0,0 +1,45 @@ +from __future__ import annotations + +from typing import Iterable + +from prompt_toolkit.document import Document + +from .base import CompleteEvent, Completer, Completion + +__all__ = ["DeduplicateCompleter"] + + +class DeduplicateCompleter(Completer): + """ + Wrapper around a completer that removes duplicates. Only the first unique + completions are kept. + + Completions are considered to be a duplicate if they result in the same + document text when they would be applied. + """ + + def __init__(self, completer: Completer) -> None: + self.completer = completer + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + # Keep track of the document strings we'd get after applying any completion. + found_so_far: set[str] = set() + + for completion in self.completer.get_completions(document, complete_event): + text_if_applied = ( + document.text[: document.cursor_position + completion.start_position] + + completion.text + + document.text[document.cursor_position :] + ) + + if text_if_applied == document.text: + # Don't include completions that don't have any effect at all. + continue + + if text_if_applied in found_so_far: + continue + + found_so_far.add(text_if_applied) + yield completion diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/filesystem.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/filesystem.py new file mode 100644 index 0000000000000000000000000000000000000000..8e7f87e00ba7971b9bf7f08831dd901a43ed7ce7 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/filesystem.py @@ -0,0 +1,118 @@ +from __future__ import annotations + +import os +from typing import Callable, Iterable + +from prompt_toolkit.completion import CompleteEvent, Completer, Completion +from prompt_toolkit.document import Document + +__all__ = [ + "PathCompleter", + "ExecutableCompleter", +] + + +class PathCompleter(Completer): + """ + Complete for Path variables. + + :param get_paths: Callable which returns a list of directories to look into + when the user enters a relative path. + :param file_filter: Callable which takes a filename and returns whether + this file should show up in the completion. ``None`` + when no filtering has to be done. + :param min_input_len: Don't do autocompletion when the input string is shorter. + """ + + def __init__( + self, + only_directories: bool = False, + get_paths: Callable[[], list[str]] | None = None, + file_filter: Callable[[str], bool] | None = None, + min_input_len: int = 0, + expanduser: bool = False, + ) -> None: + self.only_directories = only_directories + self.get_paths = get_paths or (lambda: ["."]) + self.file_filter = file_filter or (lambda _: True) + self.min_input_len = min_input_len + self.expanduser = expanduser + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + text = document.text_before_cursor + + # Complete only when we have at least the minimal input length, + # otherwise, we can too many results and autocompletion will become too + # heavy. + if len(text) < self.min_input_len: + return + + try: + # Do tilde expansion. + if self.expanduser: + text = os.path.expanduser(text) + + # Directories where to look. + dirname = os.path.dirname(text) + if dirname: + directories = [ + os.path.dirname(os.path.join(p, text)) for p in self.get_paths() + ] + else: + directories = self.get_paths() + + # Start of current file. + prefix = os.path.basename(text) + + # Get all filenames. + filenames = [] + for directory in directories: + # Look for matches in this directory. + if os.path.isdir(directory): + for filename in os.listdir(directory): + if filename.startswith(prefix): + filenames.append((directory, filename)) + + # Sort + filenames = sorted(filenames, key=lambda k: k[1]) + + # Yield them. + for directory, filename in filenames: + completion = filename[len(prefix) :] + full_name = os.path.join(directory, filename) + + if os.path.isdir(full_name): + # For directories, add a slash to the filename. + # (We don't add them to the `completion`. Users can type it + # to trigger the autocompletion themselves.) + filename += "/" + elif self.only_directories: + continue + + if not self.file_filter(full_name): + continue + + yield Completion( + text=completion, + start_position=0, + display=filename, + ) + except OSError: + pass + + +class ExecutableCompleter(PathCompleter): + """ + Complete only executable files in the current path. + """ + + def __init__(self) -> None: + super().__init__( + only_directories=False, + min_input_len=1, + get_paths=lambda: os.environ.get("PATH", "").split(os.pathsep), + file_filter=lambda name: os.access(name, os.X_OK), + expanduser=True, + ) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/fuzzy_completer.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/fuzzy_completer.py new file mode 100644 index 0000000000000000000000000000000000000000..82625ab63f1da3aecdae37266aebeff486796818 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/fuzzy_completer.py @@ -0,0 +1,213 @@ +from __future__ import annotations + +import re +from typing import Callable, Iterable, NamedTuple, Sequence + +from prompt_toolkit.document import Document +from prompt_toolkit.filters import FilterOrBool, to_filter +from prompt_toolkit.formatted_text import AnyFormattedText, StyleAndTextTuples + +from .base import CompleteEvent, Completer, Completion +from .word_completer import WordCompleter + +__all__ = [ + "FuzzyCompleter", + "FuzzyWordCompleter", +] + + +class FuzzyCompleter(Completer): + """ + Fuzzy completion. + This wraps any other completer and turns it into a fuzzy completer. + + If the list of words is: ["leopard" , "gorilla", "dinosaur", "cat", "bee"] + Then trying to complete "oar" would yield "leopard" and "dinosaur", but not + the others, because they match the regular expression 'o.*a.*r'. + Similar, in another application "djm" could expand to "django_migrations". + + The results are sorted by relevance, which is defined as the start position + and the length of the match. + + Notice that this is not really a tool to work around spelling mistakes, + like what would be possible with difflib. The purpose is rather to have a + quicker or more intuitive way to filter the given completions, especially + when many completions have a common prefix. + + Fuzzy algorithm is based on this post: + https://blog.amjith.com/fuzzyfinder-in-10-lines-of-python + + :param completer: A :class:`~.Completer` instance. + :param WORD: When True, use WORD characters. + :param pattern: Regex pattern which selects the characters before the + cursor that are considered for the fuzzy matching. + :param enable_fuzzy: (bool or `Filter`) Enabled the fuzzy behavior. For + easily turning fuzzyness on or off according to a certain condition. + """ + + def __init__( + self, + completer: Completer, + WORD: bool = False, + pattern: str | None = None, + enable_fuzzy: FilterOrBool = True, + ) -> None: + assert pattern is None or pattern.startswith("^") + + self.completer = completer + self.pattern = pattern + self.WORD = WORD + self.pattern = pattern + self.enable_fuzzy = to_filter(enable_fuzzy) + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + if self.enable_fuzzy(): + return self._get_fuzzy_completions(document, complete_event) + else: + return self.completer.get_completions(document, complete_event) + + def _get_pattern(self) -> str: + if self.pattern: + return self.pattern + if self.WORD: + return r"[^\s]+" + return "^[a-zA-Z0-9_]*" + + def _get_fuzzy_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + word_before_cursor = document.get_word_before_cursor( + pattern=re.compile(self._get_pattern()) + ) + + # Get completions + document2 = Document( + text=document.text[: document.cursor_position - len(word_before_cursor)], + cursor_position=document.cursor_position - len(word_before_cursor), + ) + + inner_completions = list( + self.completer.get_completions(document2, complete_event) + ) + + fuzzy_matches: list[_FuzzyMatch] = [] + + if word_before_cursor == "": + # If word before the cursor is an empty string, consider all + # completions, without filtering everything with an empty regex + # pattern. + fuzzy_matches = [_FuzzyMatch(0, 0, compl) for compl in inner_completions] + else: + pat = ".*?".join(map(re.escape, word_before_cursor)) + pat = f"(?=({pat}))" # lookahead regex to manage overlapping matches + regex = re.compile(pat, re.IGNORECASE) + for compl in inner_completions: + matches = list(regex.finditer(compl.text)) + if matches: + # Prefer the match, closest to the left, then shortest. + best = min(matches, key=lambda m: (m.start(), len(m.group(1)))) + fuzzy_matches.append( + _FuzzyMatch(len(best.group(1)), best.start(), compl) + ) + + def sort_key(fuzzy_match: _FuzzyMatch) -> tuple[int, int]: + "Sort by start position, then by the length of the match." + return fuzzy_match.start_pos, fuzzy_match.match_length + + fuzzy_matches = sorted(fuzzy_matches, key=sort_key) + + for match in fuzzy_matches: + # Include these completions, but set the correct `display` + # attribute and `start_position`. + yield Completion( + text=match.completion.text, + start_position=match.completion.start_position + - len(word_before_cursor), + # We access to private `_display_meta` attribute, because that one is lazy. + display_meta=match.completion._display_meta, + display=self._get_display(match, word_before_cursor), + style=match.completion.style, + ) + + def _get_display( + self, fuzzy_match: _FuzzyMatch, word_before_cursor: str + ) -> AnyFormattedText: + """ + Generate formatted text for the display label. + """ + + def get_display() -> AnyFormattedText: + m = fuzzy_match + word = m.completion.text + + if m.match_length == 0: + # No highlighting when we have zero length matches (no input text). + # In this case, use the original display text (which can include + # additional styling or characters). + return m.completion.display + + result: StyleAndTextTuples = [] + + # Text before match. + result.append(("class:fuzzymatch.outside", word[: m.start_pos])) + + # The match itself. + characters = list(word_before_cursor) + + for c in word[m.start_pos : m.start_pos + m.match_length]: + classname = "class:fuzzymatch.inside" + if characters and c.lower() == characters[0].lower(): + classname += ".character" + del characters[0] + + result.append((classname, c)) + + # Text after match. + result.append( + ("class:fuzzymatch.outside", word[m.start_pos + m.match_length :]) + ) + + return result + + return get_display() + + +class FuzzyWordCompleter(Completer): + """ + Fuzzy completion on a list of words. + + (This is basically a `WordCompleter` wrapped in a `FuzzyCompleter`.) + + :param words: List of words or callable that returns a list of words. + :param meta_dict: Optional dict mapping words to their meta-information. + :param WORD: When True, use WORD characters. + """ + + def __init__( + self, + words: Sequence[str] | Callable[[], Sequence[str]], + meta_dict: dict[str, str] | None = None, + WORD: bool = False, + ) -> None: + self.words = words + self.meta_dict = meta_dict or {} + self.WORD = WORD + + self.word_completer = WordCompleter( + words=self.words, WORD=self.WORD, meta_dict=self.meta_dict + ) + + self.fuzzy_completer = FuzzyCompleter(self.word_completer, WORD=self.WORD) + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + return self.fuzzy_completer.get_completions(document, complete_event) + + +class _FuzzyMatch(NamedTuple): + match_length: int + start_pos: int + completion: Completion diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/nested.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/nested.py new file mode 100644 index 0000000000000000000000000000000000000000..b72b69ee21294c8341fa0710dcac133615aa1c2c --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/nested.py @@ -0,0 +1,109 @@ +""" +Nestedcompleter for completion of hierarchical data structures. +""" + +from __future__ import annotations + +from typing import Any, Iterable, Mapping, Set, Union + +from prompt_toolkit.completion import CompleteEvent, Completer, Completion +from prompt_toolkit.completion.word_completer import WordCompleter +from prompt_toolkit.document import Document + +__all__ = ["NestedCompleter"] + +# NestedDict = Mapping[str, Union['NestedDict', Set[str], None, Completer]] +NestedDict = Mapping[str, Union[Any, Set[str], None, Completer]] + + +class NestedCompleter(Completer): + """ + Completer which wraps around several other completers, and calls any the + one that corresponds with the first word of the input. + + By combining multiple `NestedCompleter` instances, we can achieve multiple + hierarchical levels of autocompletion. This is useful when `WordCompleter` + is not sufficient. + + If you need multiple levels, check out the `from_nested_dict` classmethod. + """ + + def __init__( + self, options: dict[str, Completer | None], ignore_case: bool = True + ) -> None: + self.options = options + self.ignore_case = ignore_case + + def __repr__(self) -> str: + return f"NestedCompleter({self.options!r}, ignore_case={self.ignore_case!r})" + + @classmethod + def from_nested_dict(cls, data: NestedDict) -> NestedCompleter: + """ + Create a `NestedCompleter`, starting from a nested dictionary data + structure, like this: + + .. code:: + + data = { + 'show': { + 'version': None, + 'interfaces': None, + 'clock': None, + 'ip': {'interface': {'brief'}} + }, + 'exit': None + 'enable': None + } + + The value should be `None` if there is no further completion at some + point. If all values in the dictionary are None, it is also possible to + use a set instead. + + Values in this data structure can be a completers as well. + """ + options: dict[str, Completer | None] = {} + for key, value in data.items(): + if isinstance(value, Completer): + options[key] = value + elif isinstance(value, dict): + options[key] = cls.from_nested_dict(value) + elif isinstance(value, set): + options[key] = cls.from_nested_dict(dict.fromkeys(value)) + else: + assert value is None + options[key] = None + + return cls(options) + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + # Split document. + text = document.text_before_cursor.lstrip() + stripped_len = len(document.text_before_cursor) - len(text) + + # If there is a space, check for the first term, and use a + # subcompleter. + if " " in text: + first_term = text.split()[0] + completer = self.options.get(first_term) + + # If we have a sub completer, use this for the completions. + if completer is not None: + remaining_text = text[len(first_term) :].lstrip() + move_cursor = len(text) - len(remaining_text) + stripped_len + + new_document = Document( + remaining_text, + cursor_position=document.cursor_position - move_cursor, + ) + + yield from completer.get_completions(new_document, complete_event) + + # No space in the input: behave exactly like `WordCompleter`. + else: + completer = WordCompleter( + list(self.options.keys()), ignore_case=self.ignore_case + ) + yield from completer.get_completions(document, complete_event) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/word_completer.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/word_completer.py new file mode 100644 index 0000000000000000000000000000000000000000..2e124056ba211e0252cd44e2d16691487b04e398 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/completion/word_completer.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +from typing import Callable, Iterable, Mapping, Pattern, Sequence + +from prompt_toolkit.completion import CompleteEvent, Completer, Completion +from prompt_toolkit.document import Document +from prompt_toolkit.formatted_text import AnyFormattedText + +__all__ = [ + "WordCompleter", +] + + +class WordCompleter(Completer): + """ + Simple autocompletion on a list of words. + + :param words: List of words or callable that returns a list of words. + :param ignore_case: If True, case-insensitive completion. + :param meta_dict: Optional dict mapping words to their meta-text. (This + should map strings to strings or formatted text.) + :param WORD: When True, use WORD characters. + :param sentence: When True, don't complete by comparing the word before the + cursor, but by comparing all the text before the cursor. In this case, + the list of words is just a list of strings, where each string can + contain spaces. (Can not be used together with the WORD option.) + :param match_middle: When True, match not only the start, but also in the + middle of the word. + :param pattern: Optional compiled regex for finding the word before + the cursor to complete. When given, use this regex pattern instead of + default one (see document._FIND_WORD_RE) + """ + + def __init__( + self, + words: Sequence[str] | Callable[[], Sequence[str]], + ignore_case: bool = False, + display_dict: Mapping[str, AnyFormattedText] | None = None, + meta_dict: Mapping[str, AnyFormattedText] | None = None, + WORD: bool = False, + sentence: bool = False, + match_middle: bool = False, + pattern: Pattern[str] | None = None, + ) -> None: + assert not (WORD and sentence) + + self.words = words + self.ignore_case = ignore_case + self.display_dict = display_dict or {} + self.meta_dict = meta_dict or {} + self.WORD = WORD + self.sentence = sentence + self.match_middle = match_middle + self.pattern = pattern + + def get_completions( + self, document: Document, complete_event: CompleteEvent + ) -> Iterable[Completion]: + # Get list of words. + words = self.words + if callable(words): + words = words() + + # Get word/text before cursor. + if self.sentence: + word_before_cursor = document.text_before_cursor + else: + word_before_cursor = document.get_word_before_cursor( + WORD=self.WORD, pattern=self.pattern + ) + + if self.ignore_case: + word_before_cursor = word_before_cursor.lower() + + def word_matches(word: str) -> bool: + """True when the word before the cursor matches.""" + if self.ignore_case: + word = word.lower() + + if self.match_middle: + return word_before_cursor in word + else: + return word.startswith(word_before_cursor) + + for a in words: + if word_matches(a): + display = self.display_dict.get(a, a) + display_meta = self.meta_dict.get(a, "") + yield Completion( + text=a, + start_position=-len(word_before_cursor), + display=display, + display_meta=display_meta, + ) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/contrib/__init__.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/contrib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__init__.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..556ed8846b84ad636e58e26621b24e98a103e685 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__init__.py @@ -0,0 +1,71 @@ +""" +Filters decide whether something is active or not (they decide about a boolean +state). This is used to enable/disable features, like key bindings, parts of +the layout and other stuff. For instance, we could have a `HasSearch` filter +attached to some part of the layout, in order to show that part of the user +interface only while the user is searching. + +Filters are made to avoid having to attach callbacks to all event in order to +propagate state. However, they are lazy, they don't automatically propagate the +state of what they are observing. Only when a filter is called (it's actually a +callable), it will calculate its value. So, its not really reactive +programming, but it's made to fit for this framework. + +Filters can be chained using ``&`` and ``|`` operations, and inverted using the +``~`` operator, for instance:: + + filter = has_focus('default') & ~ has_selection +""" + +from __future__ import annotations + +from .app import * +from .base import Always, Condition, Filter, FilterOrBool, Never +from .cli import * +from .utils import is_true, to_filter + +__all__ = [ + # app + "has_arg", + "has_completions", + "completion_is_selected", + "has_focus", + "buffer_has_focus", + "has_selection", + "has_validation_error", + "is_done", + "is_read_only", + "is_multiline", + "renderer_height_is_known", + "in_editing_mode", + "in_paste_mode", + "vi_mode", + "vi_navigation_mode", + "vi_insert_mode", + "vi_insert_multiple_mode", + "vi_replace_mode", + "vi_selection_mode", + "vi_waiting_for_text_object_mode", + "vi_digraph_mode", + "vi_recording_macro", + "emacs_mode", + "emacs_insert_mode", + "emacs_selection_mode", + "shift_selection_mode", + "is_searching", + "control_is_searchable", + "vi_search_direction_reversed", + # base. + "Filter", + "Never", + "Always", + "Condition", + "FilterOrBool", + # utils. + "is_true", + "to_filter", +] + +from .cli import __all__ as cli_all + +__all__.extend(cli_all) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..629e395f2ec4e9573478d5f3099cdd5b2b499b07 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/app.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/app.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8eaaad22e1dd93a2c02b42d5f0884a23f687a5e3 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/app.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/base.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/base.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd90369d1286093f42135c4a782317172ecf767 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/base.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/cli.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/cli.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20806118755faaf0a8b5bc5201aa19f5dc5669ed Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/cli.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/utils.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e65e664ba353d2ddcfcdfa69e29c2a01daee0573 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/__pycache__/utils.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/app.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/app.py new file mode 100644 index 0000000000000000000000000000000000000000..b1b7c1beea87a5a5bc23fe62263987b6664d2a5d --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/app.py @@ -0,0 +1,419 @@ +""" +Filters that accept a `Application` as argument. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, cast + +from prompt_toolkit.application.current import get_app +from prompt_toolkit.cache import memoized +from prompt_toolkit.enums import EditingMode + +from .base import Condition + +if TYPE_CHECKING: + from prompt_toolkit.layout.layout import FocusableElement + + +__all__ = [ + "has_arg", + "has_completions", + "completion_is_selected", + "has_focus", + "buffer_has_focus", + "has_selection", + "has_suggestion", + "has_validation_error", + "is_done", + "is_read_only", + "is_multiline", + "renderer_height_is_known", + "in_editing_mode", + "in_paste_mode", + "vi_mode", + "vi_navigation_mode", + "vi_insert_mode", + "vi_insert_multiple_mode", + "vi_replace_mode", + "vi_selection_mode", + "vi_waiting_for_text_object_mode", + "vi_digraph_mode", + "vi_recording_macro", + "emacs_mode", + "emacs_insert_mode", + "emacs_selection_mode", + "shift_selection_mode", + "is_searching", + "control_is_searchable", + "vi_search_direction_reversed", +] + + +# NOTE: `has_focus` below should *not* be `memoized`. It can reference any user +# control. For instance, if we would continuously create new +# `PromptSession` instances, then previous instances won't be released, +# because this memoize (which caches results in the global scope) will +# still refer to each instance. +def has_focus(value: FocusableElement) -> Condition: + """ + Enable when this buffer has the focus. + """ + from prompt_toolkit.buffer import Buffer + from prompt_toolkit.layout import walk + from prompt_toolkit.layout.containers import Container, Window, to_container + from prompt_toolkit.layout.controls import UIControl + + if isinstance(value, str): + + def test() -> bool: + return get_app().current_buffer.name == value + + elif isinstance(value, Buffer): + + def test() -> bool: + return get_app().current_buffer == value + + elif isinstance(value, UIControl): + + def test() -> bool: + return get_app().layout.current_control == value + + else: + value = to_container(value) + + if isinstance(value, Window): + + def test() -> bool: + return get_app().layout.current_window == value + + else: + + def test() -> bool: + # Consider focused when any window inside this container is + # focused. + current_window = get_app().layout.current_window + + for c in walk(cast(Container, value)): + if isinstance(c, Window) and c == current_window: + return True + return False + + @Condition + def has_focus_filter() -> bool: + return test() + + return has_focus_filter + + +@Condition +def buffer_has_focus() -> bool: + """ + Enabled when the currently focused control is a `BufferControl`. + """ + return get_app().layout.buffer_has_focus + + +@Condition +def has_selection() -> bool: + """ + Enable when the current buffer has a selection. + """ + return bool(get_app().current_buffer.selection_state) + + +@Condition +def has_suggestion() -> bool: + """ + Enable when the current buffer has a suggestion. + """ + buffer = get_app().current_buffer + return buffer.suggestion is not None and buffer.suggestion.text != "" + + +@Condition +def has_completions() -> bool: + """ + Enable when the current buffer has completions. + """ + state = get_app().current_buffer.complete_state + return state is not None and len(state.completions) > 0 + + +@Condition +def completion_is_selected() -> bool: + """ + True when the user selected a completion. + """ + complete_state = get_app().current_buffer.complete_state + return complete_state is not None and complete_state.current_completion is not None + + +@Condition +def is_read_only() -> bool: + """ + True when the current buffer is read only. + """ + return get_app().current_buffer.read_only() + + +@Condition +def is_multiline() -> bool: + """ + True when the current buffer has been marked as multiline. + """ + return get_app().current_buffer.multiline() + + +@Condition +def has_validation_error() -> bool: + "Current buffer has validation error." + return get_app().current_buffer.validation_error is not None + + +@Condition +def has_arg() -> bool: + "Enable when the input processor has an 'arg'." + return get_app().key_processor.arg is not None + + +@Condition +def is_done() -> bool: + """ + True when the CLI is returning, aborting or exiting. + """ + return get_app().is_done + + +@Condition +def renderer_height_is_known() -> bool: + """ + Only True when the renderer knows it's real height. + + (On VT100 terminals, we have to wait for a CPR response, before we can be + sure of the available height between the cursor position and the bottom of + the terminal. And usually it's nicer to wait with drawing bottom toolbars + until we receive the height, in order to avoid flickering -- first drawing + somewhere in the middle, and then again at the bottom.) + """ + return get_app().renderer.height_is_known + + +@memoized() +def in_editing_mode(editing_mode: EditingMode) -> Condition: + """ + Check whether a given editing mode is active. (Vi or Emacs.) + """ + + @Condition + def in_editing_mode_filter() -> bool: + return get_app().editing_mode == editing_mode + + return in_editing_mode_filter + + +@Condition +def in_paste_mode() -> bool: + return get_app().paste_mode() + + +@Condition +def vi_mode() -> bool: + return get_app().editing_mode == EditingMode.VI + + +@Condition +def vi_navigation_mode() -> bool: + """ + Active when the set for Vi navigation key bindings are active. + """ + from prompt_toolkit.key_binding.vi_state import InputMode + + app = get_app() + + if ( + app.editing_mode != EditingMode.VI + or app.vi_state.operator_func + or app.vi_state.waiting_for_digraph + or app.current_buffer.selection_state + ): + return False + + return ( + app.vi_state.input_mode == InputMode.NAVIGATION + or app.vi_state.temporary_navigation_mode + or app.current_buffer.read_only() + ) + + +@Condition +def vi_insert_mode() -> bool: + from prompt_toolkit.key_binding.vi_state import InputMode + + app = get_app() + + if ( + app.editing_mode != EditingMode.VI + or app.vi_state.operator_func + or app.vi_state.waiting_for_digraph + or app.current_buffer.selection_state + or app.vi_state.temporary_navigation_mode + or app.current_buffer.read_only() + ): + return False + + return app.vi_state.input_mode == InputMode.INSERT + + +@Condition +def vi_insert_multiple_mode() -> bool: + from prompt_toolkit.key_binding.vi_state import InputMode + + app = get_app() + + if ( + app.editing_mode != EditingMode.VI + or app.vi_state.operator_func + or app.vi_state.waiting_for_digraph + or app.current_buffer.selection_state + or app.vi_state.temporary_navigation_mode + or app.current_buffer.read_only() + ): + return False + + return app.vi_state.input_mode == InputMode.INSERT_MULTIPLE + + +@Condition +def vi_replace_mode() -> bool: + from prompt_toolkit.key_binding.vi_state import InputMode + + app = get_app() + + if ( + app.editing_mode != EditingMode.VI + or app.vi_state.operator_func + or app.vi_state.waiting_for_digraph + or app.current_buffer.selection_state + or app.vi_state.temporary_navigation_mode + or app.current_buffer.read_only() + ): + return False + + return app.vi_state.input_mode == InputMode.REPLACE + + +@Condition +def vi_replace_single_mode() -> bool: + from prompt_toolkit.key_binding.vi_state import InputMode + + app = get_app() + + if ( + app.editing_mode != EditingMode.VI + or app.vi_state.operator_func + or app.vi_state.waiting_for_digraph + or app.current_buffer.selection_state + or app.vi_state.temporary_navigation_mode + or app.current_buffer.read_only() + ): + return False + + return app.vi_state.input_mode == InputMode.REPLACE_SINGLE + + +@Condition +def vi_selection_mode() -> bool: + app = get_app() + if app.editing_mode != EditingMode.VI: + return False + + return bool(app.current_buffer.selection_state) + + +@Condition +def vi_waiting_for_text_object_mode() -> bool: + app = get_app() + if app.editing_mode != EditingMode.VI: + return False + + return app.vi_state.operator_func is not None + + +@Condition +def vi_digraph_mode() -> bool: + app = get_app() + if app.editing_mode != EditingMode.VI: + return False + + return app.vi_state.waiting_for_digraph + + +@Condition +def vi_recording_macro() -> bool: + "When recording a Vi macro." + app = get_app() + if app.editing_mode != EditingMode.VI: + return False + + return app.vi_state.recording_register is not None + + +@Condition +def emacs_mode() -> bool: + "When the Emacs bindings are active." + return get_app().editing_mode == EditingMode.EMACS + + +@Condition +def emacs_insert_mode() -> bool: + app = get_app() + if ( + app.editing_mode != EditingMode.EMACS + or app.current_buffer.selection_state + or app.current_buffer.read_only() + ): + return False + return True + + +@Condition +def emacs_selection_mode() -> bool: + app = get_app() + return bool( + app.editing_mode == EditingMode.EMACS and app.current_buffer.selection_state + ) + + +@Condition +def shift_selection_mode() -> bool: + app = get_app() + return bool( + app.current_buffer.selection_state + and app.current_buffer.selection_state.shift_mode + ) + + +@Condition +def is_searching() -> bool: + "When we are searching." + app = get_app() + return app.layout.is_searching + + +@Condition +def control_is_searchable() -> bool: + "When the current UIControl is searchable." + from prompt_toolkit.layout.controls import BufferControl + + control = get_app().layout.current_control + + return ( + isinstance(control, BufferControl) and control.search_buffer_control is not None + ) + + +@Condition +def vi_search_direction_reversed() -> bool: + "When the '/' and '?' key bindings for Vi-style searching have been reversed." + return get_app().reverse_vi_search_direction() diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/base.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/base.py new file mode 100644 index 0000000000000000000000000000000000000000..cd95424dc3862e14cfb57f079d7dfdcbb255bd86 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/base.py @@ -0,0 +1,260 @@ +from __future__ import annotations + +from abc import ABCMeta, abstractmethod +from typing import Callable, Iterable, Union + +__all__ = ["Filter", "Never", "Always", "Condition", "FilterOrBool"] + + +class Filter(metaclass=ABCMeta): + """ + Base class for any filter to activate/deactivate a feature, depending on a + condition. + + The return value of ``__call__`` will tell if the feature should be active. + """ + + def __init__(self) -> None: + self._and_cache: dict[Filter, Filter] = {} + self._or_cache: dict[Filter, Filter] = {} + self._invert_result: Filter | None = None + + @abstractmethod + def __call__(self) -> bool: + """ + The actual call to evaluate the filter. + """ + return True + + def __and__(self, other: Filter) -> Filter: + """ + Chaining of filters using the & operator. + """ + assert isinstance(other, Filter), f"Expecting filter, got {other!r}" + + if isinstance(other, Always): + return self + if isinstance(other, Never): + return other + + if other in self._and_cache: + return self._and_cache[other] + + result = _AndList.create([self, other]) + self._and_cache[other] = result + return result + + def __or__(self, other: Filter) -> Filter: + """ + Chaining of filters using the | operator. + """ + assert isinstance(other, Filter), f"Expecting filter, got {other!r}" + + if isinstance(other, Always): + return other + if isinstance(other, Never): + return self + + if other in self._or_cache: + return self._or_cache[other] + + result = _OrList.create([self, other]) + self._or_cache[other] = result + return result + + def __invert__(self) -> Filter: + """ + Inverting of filters using the ~ operator. + """ + if self._invert_result is None: + self._invert_result = _Invert(self) + + return self._invert_result + + def __bool__(self) -> None: + """ + By purpose, we don't allow bool(...) operations directly on a filter, + because the meaning is ambiguous. + + Executing a filter has to be done always by calling it. Providing + defaults for `None` values should be done through an `is None` check + instead of for instance ``filter1 or Always()``. + """ + raise ValueError( + "The truth value of a Filter is ambiguous. Instead, call it as a function." + ) + + +def _remove_duplicates(filters: list[Filter]) -> list[Filter]: + result = [] + for f in filters: + if f not in result: + result.append(f) + return result + + +class _AndList(Filter): + """ + Result of &-operation between several filters. + """ + + def __init__(self, filters: list[Filter]) -> None: + super().__init__() + self.filters = filters + + @classmethod + def create(cls, filters: Iterable[Filter]) -> Filter: + """ + Create a new filter by applying an `&` operator between them. + + If there's only one unique filter in the given iterable, it will return + that one filter instead of an `_AndList`. + """ + filters_2: list[Filter] = [] + + for f in filters: + if isinstance(f, _AndList): # Turn nested _AndLists into one. + filters_2.extend(f.filters) + else: + filters_2.append(f) + + # Remove duplicates. This could speed up execution, and doesn't make a + # difference for the evaluation. + filters = _remove_duplicates(filters_2) + + # If only one filter is left, return that without wrapping into an + # `_AndList`. + if len(filters) == 1: + return filters[0] + + return cls(filters) + + def __call__(self) -> bool: + return all(f() for f in self.filters) + + def __repr__(self) -> str: + return "&".join(repr(f) for f in self.filters) + + +class _OrList(Filter): + """ + Result of |-operation between several filters. + """ + + def __init__(self, filters: list[Filter]) -> None: + super().__init__() + self.filters = filters + + @classmethod + def create(cls, filters: Iterable[Filter]) -> Filter: + """ + Create a new filter by applying an `|` operator between them. + + If there's only one unique filter in the given iterable, it will return + that one filter instead of an `_OrList`. + """ + filters_2: list[Filter] = [] + + for f in filters: + if isinstance(f, _OrList): # Turn nested _AndLists into one. + filters_2.extend(f.filters) + else: + filters_2.append(f) + + # Remove duplicates. This could speed up execution, and doesn't make a + # difference for the evaluation. + filters = _remove_duplicates(filters_2) + + # If only one filter is left, return that without wrapping into an + # `_AndList`. + if len(filters) == 1: + return filters[0] + + return cls(filters) + + def __call__(self) -> bool: + return any(f() for f in self.filters) + + def __repr__(self) -> str: + return "|".join(repr(f) for f in self.filters) + + +class _Invert(Filter): + """ + Negation of another filter. + """ + + def __init__(self, filter: Filter) -> None: + super().__init__() + self.filter = filter + + def __call__(self) -> bool: + return not self.filter() + + def __repr__(self) -> str: + return f"~{self.filter!r}" + + +class Always(Filter): + """ + Always enable feature. + """ + + def __call__(self) -> bool: + return True + + def __or__(self, other: Filter) -> Filter: + return self + + def __and__(self, other: Filter) -> Filter: + return other + + def __invert__(self) -> Never: + return Never() + + +class Never(Filter): + """ + Never enable feature. + """ + + def __call__(self) -> bool: + return False + + def __and__(self, other: Filter) -> Filter: + return self + + def __or__(self, other: Filter) -> Filter: + return other + + def __invert__(self) -> Always: + return Always() + + +class Condition(Filter): + """ + Turn any callable into a Filter. The callable is supposed to not take any + arguments. + + This can be used as a decorator:: + + @Condition + def feature_is_active(): # `feature_is_active` becomes a Filter. + return True + + :param func: Callable which takes no inputs and returns a boolean. + """ + + def __init__(self, func: Callable[[], bool]) -> None: + super().__init__() + self.func = func + + def __call__(self) -> bool: + return self.func() + + def __repr__(self) -> str: + return f"Condition({self.func!r})" + + +# Often used as type annotation. +FilterOrBool = Union[Filter, bool] diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/cli.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/cli.py new file mode 100644 index 0000000000000000000000000000000000000000..902fbaae981296d8bd8143db58463b3a3f9ddd10 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/cli.py @@ -0,0 +1,65 @@ +""" +For backwards-compatibility. keep this file. +(Many people are going to have key bindings that rely on this file.) +""" + +from __future__ import annotations + +from .app import * + +__all__ = [ + # Old names. + "HasArg", + "HasCompletions", + "HasFocus", + "HasSelection", + "HasValidationError", + "IsDone", + "IsReadOnly", + "IsMultiline", + "RendererHeightIsKnown", + "InEditingMode", + "InPasteMode", + "ViMode", + "ViNavigationMode", + "ViInsertMode", + "ViInsertMultipleMode", + "ViReplaceMode", + "ViSelectionMode", + "ViWaitingForTextObjectMode", + "ViDigraphMode", + "EmacsMode", + "EmacsInsertMode", + "EmacsSelectionMode", + "IsSearching", + "HasSearch", + "ControlIsSearchable", +] + +# Keep the original classnames for backwards compatibility. +HasValidationError = lambda: has_validation_error +HasArg = lambda: has_arg +IsDone = lambda: is_done +RendererHeightIsKnown = lambda: renderer_height_is_known +ViNavigationMode = lambda: vi_navigation_mode +InPasteMode = lambda: in_paste_mode +EmacsMode = lambda: emacs_mode +EmacsInsertMode = lambda: emacs_insert_mode +ViMode = lambda: vi_mode +IsSearching = lambda: is_searching +HasSearch = lambda: is_searching +ControlIsSearchable = lambda: control_is_searchable +EmacsSelectionMode = lambda: emacs_selection_mode +ViDigraphMode = lambda: vi_digraph_mode +ViWaitingForTextObjectMode = lambda: vi_waiting_for_text_object_mode +ViSelectionMode = lambda: vi_selection_mode +ViReplaceMode = lambda: vi_replace_mode +ViInsertMultipleMode = lambda: vi_insert_multiple_mode +ViInsertMode = lambda: vi_insert_mode +HasSelection = lambda: has_selection +HasCompletions = lambda: has_completions +IsReadOnly = lambda: is_read_only +IsMultiline = lambda: is_multiline + +HasFocus = has_focus # No lambda here! (Has_focus is callable that returns a callable.) +InEditingMode = in_editing_mode diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/utils.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..20e00ee09e3becc6d1e39621e63c1fac86ebe130 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/filters/utils.py @@ -0,0 +1,41 @@ +from __future__ import annotations + +from .base import Always, Filter, FilterOrBool, Never + +__all__ = [ + "to_filter", + "is_true", +] + + +_always = Always() +_never = Never() + + +_bool_to_filter: dict[bool, Filter] = { + True: _always, + False: _never, +} + + +def to_filter(bool_or_filter: FilterOrBool) -> Filter: + """ + Accept both booleans and Filters as input and + turn it into a Filter. + """ + if isinstance(bool_or_filter, bool): + return _bool_to_filter[bool_or_filter] + + if isinstance(bool_or_filter, Filter): + return bool_or_filter + + raise TypeError(f"Expecting a bool or a Filter instance. Got {bool_or_filter!r}") + + +def is_true(value: FilterOrBool) -> bool: + """ + Test whether `value` is True. In case of a Filter, call it. + + :param value: Boolean or `Filter` instance. + """ + return to_filter(value)() diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__init__.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ed8631b4fa73c3799c1ea4db4d2853d5eac556aa --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__init__.py @@ -0,0 +1,14 @@ +from __future__ import annotations + +from .base import DummyInput, Input, PipeInput +from .defaults import create_input, create_pipe_input + +__all__ = [ + # Base. + "Input", + "PipeInput", + "DummyInput", + # Defaults. + "create_input", + "create_pipe_input", +] diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ba2f7f2de8156a251cf641bf445d37412a12dedd Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/ansi_escape_sequences.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/ansi_escape_sequences.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..67340ac3952d62394e346fa05b2e37571ba7b41c Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/ansi_escape_sequences.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/defaults.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/defaults.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04a2cc7e314d63b9daaebcff91f16bc2b933eca8 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/defaults.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/typeahead.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/typeahead.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..92a073795e9bfc312480d0885653365f5fa57e0a Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/typeahead.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/vt100_parser.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/vt100_parser.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dbe5e40efb7274a0480f9b84684504fda2ec545d Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/__pycache__/vt100_parser.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/ansi_escape_sequences.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/ansi_escape_sequences.py new file mode 100644 index 0000000000000000000000000000000000000000..1fba418b7376be59854f931babefbf2922e0e8c3 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/ansi_escape_sequences.py @@ -0,0 +1,344 @@ +""" +Mappings from VT100 (ANSI) escape sequences to the corresponding prompt_toolkit +keys. + +We are not using the terminfo/termcap databases to detect the ANSI escape +sequences for the input. Instead, we recognize 99% of the most common +sequences. This works well, because in practice, every modern terminal is +mostly Xterm compatible. + +Some useful docs: +- Mintty: https://github.com/mintty/mintty/blob/master/wiki/Keycodes.md +""" + +from __future__ import annotations + +from ..keys import Keys + +__all__ = [ + "ANSI_SEQUENCES", + "REVERSE_ANSI_SEQUENCES", +] + +# Mapping of vt100 escape codes to Keys. +ANSI_SEQUENCES: dict[str, Keys | tuple[Keys, ...]] = { + # Control keys. + "\x00": Keys.ControlAt, # Control-At (Also for Ctrl-Space) + "\x01": Keys.ControlA, # Control-A (home) + "\x02": Keys.ControlB, # Control-B (emacs cursor left) + "\x03": Keys.ControlC, # Control-C (interrupt) + "\x04": Keys.ControlD, # Control-D (exit) + "\x05": Keys.ControlE, # Control-E (end) + "\x06": Keys.ControlF, # Control-F (cursor forward) + "\x07": Keys.ControlG, # Control-G + "\x08": Keys.ControlH, # Control-H (8) (Identical to '\b') + "\x09": Keys.ControlI, # Control-I (9) (Identical to '\t') + "\x0a": Keys.ControlJ, # Control-J (10) (Identical to '\n') + "\x0b": Keys.ControlK, # Control-K (delete until end of line; vertical tab) + "\x0c": Keys.ControlL, # Control-L (clear; form feed) + "\x0d": Keys.ControlM, # Control-M (13) (Identical to '\r') + "\x0e": Keys.ControlN, # Control-N (14) (history forward) + "\x0f": Keys.ControlO, # Control-O (15) + "\x10": Keys.ControlP, # Control-P (16) (history back) + "\x11": Keys.ControlQ, # Control-Q + "\x12": Keys.ControlR, # Control-R (18) (reverse search) + "\x13": Keys.ControlS, # Control-S (19) (forward search) + "\x14": Keys.ControlT, # Control-T + "\x15": Keys.ControlU, # Control-U + "\x16": Keys.ControlV, # Control-V + "\x17": Keys.ControlW, # Control-W + "\x18": Keys.ControlX, # Control-X + "\x19": Keys.ControlY, # Control-Y (25) + "\x1a": Keys.ControlZ, # Control-Z + "\x1b": Keys.Escape, # Also Control-[ + "\x9b": Keys.ShiftEscape, + "\x1c": Keys.ControlBackslash, # Both Control-\ (also Ctrl-| ) + "\x1d": Keys.ControlSquareClose, # Control-] + "\x1e": Keys.ControlCircumflex, # Control-^ + "\x1f": Keys.ControlUnderscore, # Control-underscore (Also for Ctrl-hyphen.) + # ASCII Delete (0x7f) + # Vt220 (and Linux terminal) send this when pressing backspace. We map this + # to ControlH, because that will make it easier to create key bindings that + # work everywhere, with the trade-off that it's no longer possible to + # handle backspace and control-h individually for the few terminals that + # support it. (Most terminals send ControlH when backspace is pressed.) + # See: http://www.ibb.net/~anne/keyboard.html + "\x7f": Keys.ControlH, + # -- + # Various + "\x1b[1~": Keys.Home, # tmux + "\x1b[2~": Keys.Insert, + "\x1b[3~": Keys.Delete, + "\x1b[4~": Keys.End, # tmux + "\x1b[5~": Keys.PageUp, + "\x1b[6~": Keys.PageDown, + "\x1b[7~": Keys.Home, # xrvt + "\x1b[8~": Keys.End, # xrvt + "\x1b[Z": Keys.BackTab, # shift + tab + "\x1b\x09": Keys.BackTab, # Linux console + "\x1b[~": Keys.BackTab, # Windows console + # -- + # Function keys. + "\x1bOP": Keys.F1, + "\x1bOQ": Keys.F2, + "\x1bOR": Keys.F3, + "\x1bOS": Keys.F4, + "\x1b[[A": Keys.F1, # Linux console. + "\x1b[[B": Keys.F2, # Linux console. + "\x1b[[C": Keys.F3, # Linux console. + "\x1b[[D": Keys.F4, # Linux console. + "\x1b[[E": Keys.F5, # Linux console. + "\x1b[11~": Keys.F1, # rxvt-unicode + "\x1b[12~": Keys.F2, # rxvt-unicode + "\x1b[13~": Keys.F3, # rxvt-unicode + "\x1b[14~": Keys.F4, # rxvt-unicode + "\x1b[15~": Keys.F5, + "\x1b[17~": Keys.F6, + "\x1b[18~": Keys.F7, + "\x1b[19~": Keys.F8, + "\x1b[20~": Keys.F9, + "\x1b[21~": Keys.F10, + "\x1b[23~": Keys.F11, + "\x1b[24~": Keys.F12, + "\x1b[25~": Keys.F13, + "\x1b[26~": Keys.F14, + "\x1b[28~": Keys.F15, + "\x1b[29~": Keys.F16, + "\x1b[31~": Keys.F17, + "\x1b[32~": Keys.F18, + "\x1b[33~": Keys.F19, + "\x1b[34~": Keys.F20, + # Xterm + "\x1b[1;2P": Keys.F13, + "\x1b[1;2Q": Keys.F14, + # '\x1b[1;2R': Keys.F15, # Conflicts with CPR response. + "\x1b[1;2S": Keys.F16, + "\x1b[15;2~": Keys.F17, + "\x1b[17;2~": Keys.F18, + "\x1b[18;2~": Keys.F19, + "\x1b[19;2~": Keys.F20, + "\x1b[20;2~": Keys.F21, + "\x1b[21;2~": Keys.F22, + "\x1b[23;2~": Keys.F23, + "\x1b[24;2~": Keys.F24, + # -- + # CSI 27 disambiguated modified "other" keys (xterm) + # Ref: https://invisible-island.net/xterm/modified-keys.html + # These are currently unsupported, so just re-map some common ones to the + # unmodified versions + "\x1b[27;2;13~": Keys.ControlM, # Shift + Enter + "\x1b[27;5;13~": Keys.ControlM, # Ctrl + Enter + "\x1b[27;6;13~": Keys.ControlM, # Ctrl + Shift + Enter + # -- + # Control + function keys. + "\x1b[1;5P": Keys.ControlF1, + "\x1b[1;5Q": Keys.ControlF2, + # "\x1b[1;5R": Keys.ControlF3, # Conflicts with CPR response. + "\x1b[1;5S": Keys.ControlF4, + "\x1b[15;5~": Keys.ControlF5, + "\x1b[17;5~": Keys.ControlF6, + "\x1b[18;5~": Keys.ControlF7, + "\x1b[19;5~": Keys.ControlF8, + "\x1b[20;5~": Keys.ControlF9, + "\x1b[21;5~": Keys.ControlF10, + "\x1b[23;5~": Keys.ControlF11, + "\x1b[24;5~": Keys.ControlF12, + "\x1b[1;6P": Keys.ControlF13, + "\x1b[1;6Q": Keys.ControlF14, + # "\x1b[1;6R": Keys.ControlF15, # Conflicts with CPR response. + "\x1b[1;6S": Keys.ControlF16, + "\x1b[15;6~": Keys.ControlF17, + "\x1b[17;6~": Keys.ControlF18, + "\x1b[18;6~": Keys.ControlF19, + "\x1b[19;6~": Keys.ControlF20, + "\x1b[20;6~": Keys.ControlF21, + "\x1b[21;6~": Keys.ControlF22, + "\x1b[23;6~": Keys.ControlF23, + "\x1b[24;6~": Keys.ControlF24, + # -- + # Tmux (Win32 subsystem) sends the following scroll events. + "\x1b[62~": Keys.ScrollUp, + "\x1b[63~": Keys.ScrollDown, + "\x1b[200~": Keys.BracketedPaste, # Start of bracketed paste. + # -- + # Sequences generated by numpad 5. Not sure what it means. (It doesn't + # appear in 'infocmp'. Just ignore. + "\x1b[E": Keys.Ignore, # Xterm. + "\x1b[G": Keys.Ignore, # Linux console. + # -- + # Meta/control/escape + pageup/pagedown/insert/delete. + "\x1b[3;2~": Keys.ShiftDelete, # xterm, gnome-terminal. + "\x1b[5;2~": Keys.ShiftPageUp, + "\x1b[6;2~": Keys.ShiftPageDown, + "\x1b[2;3~": (Keys.Escape, Keys.Insert), + "\x1b[3;3~": (Keys.Escape, Keys.Delete), + "\x1b[5;3~": (Keys.Escape, Keys.PageUp), + "\x1b[6;3~": (Keys.Escape, Keys.PageDown), + "\x1b[2;4~": (Keys.Escape, Keys.ShiftInsert), + "\x1b[3;4~": (Keys.Escape, Keys.ShiftDelete), + "\x1b[5;4~": (Keys.Escape, Keys.ShiftPageUp), + "\x1b[6;4~": (Keys.Escape, Keys.ShiftPageDown), + "\x1b[3;5~": Keys.ControlDelete, # xterm, gnome-terminal. + "\x1b[5;5~": Keys.ControlPageUp, + "\x1b[6;5~": Keys.ControlPageDown, + "\x1b[3;6~": Keys.ControlShiftDelete, + "\x1b[5;6~": Keys.ControlShiftPageUp, + "\x1b[6;6~": Keys.ControlShiftPageDown, + "\x1b[2;7~": (Keys.Escape, Keys.ControlInsert), + "\x1b[5;7~": (Keys.Escape, Keys.ControlPageDown), + "\x1b[6;7~": (Keys.Escape, Keys.ControlPageDown), + "\x1b[2;8~": (Keys.Escape, Keys.ControlShiftInsert), + "\x1b[5;8~": (Keys.Escape, Keys.ControlShiftPageDown), + "\x1b[6;8~": (Keys.Escape, Keys.ControlShiftPageDown), + # -- + # Arrows. + # (Normal cursor mode). + "\x1b[A": Keys.Up, + "\x1b[B": Keys.Down, + "\x1b[C": Keys.Right, + "\x1b[D": Keys.Left, + "\x1b[H": Keys.Home, + "\x1b[F": Keys.End, + # Tmux sends following keystrokes when control+arrow is pressed, but for + # Emacs ansi-term sends the same sequences for normal arrow keys. Consider + # it a normal arrow press, because that's more important. + # (Application cursor mode). + "\x1bOA": Keys.Up, + "\x1bOB": Keys.Down, + "\x1bOC": Keys.Right, + "\x1bOD": Keys.Left, + "\x1bOF": Keys.End, + "\x1bOH": Keys.Home, + # Shift + arrows. + "\x1b[1;2A": Keys.ShiftUp, + "\x1b[1;2B": Keys.ShiftDown, + "\x1b[1;2C": Keys.ShiftRight, + "\x1b[1;2D": Keys.ShiftLeft, + "\x1b[1;2F": Keys.ShiftEnd, + "\x1b[1;2H": Keys.ShiftHome, + # Meta + arrow keys. Several terminals handle this differently. + # The following sequences are for xterm and gnome-terminal. + # (Iterm sends ESC followed by the normal arrow_up/down/left/right + # sequences, and the OSX Terminal sends ESCb and ESCf for "alt + # arrow_left" and "alt arrow_right." We don't handle these + # explicitly, in here, because would could not distinguish between + # pressing ESC (to go to Vi navigation mode), followed by just the + # 'b' or 'f' key. These combinations are handled in + # the input processor.) + "\x1b[1;3A": (Keys.Escape, Keys.Up), + "\x1b[1;3B": (Keys.Escape, Keys.Down), + "\x1b[1;3C": (Keys.Escape, Keys.Right), + "\x1b[1;3D": (Keys.Escape, Keys.Left), + "\x1b[1;3F": (Keys.Escape, Keys.End), + "\x1b[1;3H": (Keys.Escape, Keys.Home), + # Alt+shift+number. + "\x1b[1;4A": (Keys.Escape, Keys.ShiftDown), + "\x1b[1;4B": (Keys.Escape, Keys.ShiftUp), + "\x1b[1;4C": (Keys.Escape, Keys.ShiftRight), + "\x1b[1;4D": (Keys.Escape, Keys.ShiftLeft), + "\x1b[1;4F": (Keys.Escape, Keys.ShiftEnd), + "\x1b[1;4H": (Keys.Escape, Keys.ShiftHome), + # Control + arrows. + "\x1b[1;5A": Keys.ControlUp, # Cursor Mode + "\x1b[1;5B": Keys.ControlDown, # Cursor Mode + "\x1b[1;5C": Keys.ControlRight, # Cursor Mode + "\x1b[1;5D": Keys.ControlLeft, # Cursor Mode + "\x1b[1;5F": Keys.ControlEnd, + "\x1b[1;5H": Keys.ControlHome, + # Tmux sends following keystrokes when control+arrow is pressed, but for + # Emacs ansi-term sends the same sequences for normal arrow keys. Consider + # it a normal arrow press, because that's more important. + "\x1b[5A": Keys.ControlUp, + "\x1b[5B": Keys.ControlDown, + "\x1b[5C": Keys.ControlRight, + "\x1b[5D": Keys.ControlLeft, + "\x1bOc": Keys.ControlRight, # rxvt + "\x1bOd": Keys.ControlLeft, # rxvt + # Control + shift + arrows. + "\x1b[1;6A": Keys.ControlShiftDown, + "\x1b[1;6B": Keys.ControlShiftUp, + "\x1b[1;6C": Keys.ControlShiftRight, + "\x1b[1;6D": Keys.ControlShiftLeft, + "\x1b[1;6F": Keys.ControlShiftEnd, + "\x1b[1;6H": Keys.ControlShiftHome, + # Control + Meta + arrows. + "\x1b[1;7A": (Keys.Escape, Keys.ControlDown), + "\x1b[1;7B": (Keys.Escape, Keys.ControlUp), + "\x1b[1;7C": (Keys.Escape, Keys.ControlRight), + "\x1b[1;7D": (Keys.Escape, Keys.ControlLeft), + "\x1b[1;7F": (Keys.Escape, Keys.ControlEnd), + "\x1b[1;7H": (Keys.Escape, Keys.ControlHome), + # Meta + Shift + arrows. + "\x1b[1;8A": (Keys.Escape, Keys.ControlShiftDown), + "\x1b[1;8B": (Keys.Escape, Keys.ControlShiftUp), + "\x1b[1;8C": (Keys.Escape, Keys.ControlShiftRight), + "\x1b[1;8D": (Keys.Escape, Keys.ControlShiftLeft), + "\x1b[1;8F": (Keys.Escape, Keys.ControlShiftEnd), + "\x1b[1;8H": (Keys.Escape, Keys.ControlShiftHome), + # Meta + arrow on (some?) Macs when using iTerm defaults (see issue #483). + "\x1b[1;9A": (Keys.Escape, Keys.Up), + "\x1b[1;9B": (Keys.Escape, Keys.Down), + "\x1b[1;9C": (Keys.Escape, Keys.Right), + "\x1b[1;9D": (Keys.Escape, Keys.Left), + # -- + # Control/shift/meta + number in mintty. + # (c-2 will actually send c-@ and c-6 will send c-^.) + "\x1b[1;5p": Keys.Control0, + "\x1b[1;5q": Keys.Control1, + "\x1b[1;5r": Keys.Control2, + "\x1b[1;5s": Keys.Control3, + "\x1b[1;5t": Keys.Control4, + "\x1b[1;5u": Keys.Control5, + "\x1b[1;5v": Keys.Control6, + "\x1b[1;5w": Keys.Control7, + "\x1b[1;5x": Keys.Control8, + "\x1b[1;5y": Keys.Control9, + "\x1b[1;6p": Keys.ControlShift0, + "\x1b[1;6q": Keys.ControlShift1, + "\x1b[1;6r": Keys.ControlShift2, + "\x1b[1;6s": Keys.ControlShift3, + "\x1b[1;6t": Keys.ControlShift4, + "\x1b[1;6u": Keys.ControlShift5, + "\x1b[1;6v": Keys.ControlShift6, + "\x1b[1;6w": Keys.ControlShift7, + "\x1b[1;6x": Keys.ControlShift8, + "\x1b[1;6y": Keys.ControlShift9, + "\x1b[1;7p": (Keys.Escape, Keys.Control0), + "\x1b[1;7q": (Keys.Escape, Keys.Control1), + "\x1b[1;7r": (Keys.Escape, Keys.Control2), + "\x1b[1;7s": (Keys.Escape, Keys.Control3), + "\x1b[1;7t": (Keys.Escape, Keys.Control4), + "\x1b[1;7u": (Keys.Escape, Keys.Control5), + "\x1b[1;7v": (Keys.Escape, Keys.Control6), + "\x1b[1;7w": (Keys.Escape, Keys.Control7), + "\x1b[1;7x": (Keys.Escape, Keys.Control8), + "\x1b[1;7y": (Keys.Escape, Keys.Control9), + "\x1b[1;8p": (Keys.Escape, Keys.ControlShift0), + "\x1b[1;8q": (Keys.Escape, Keys.ControlShift1), + "\x1b[1;8r": (Keys.Escape, Keys.ControlShift2), + "\x1b[1;8s": (Keys.Escape, Keys.ControlShift3), + "\x1b[1;8t": (Keys.Escape, Keys.ControlShift4), + "\x1b[1;8u": (Keys.Escape, Keys.ControlShift5), + "\x1b[1;8v": (Keys.Escape, Keys.ControlShift6), + "\x1b[1;8w": (Keys.Escape, Keys.ControlShift7), + "\x1b[1;8x": (Keys.Escape, Keys.ControlShift8), + "\x1b[1;8y": (Keys.Escape, Keys.ControlShift9), +} + + +def _get_reverse_ansi_sequences() -> dict[Keys, str]: + """ + Create a dictionary that maps prompt_toolkit keys back to the VT100 escape + sequences. + """ + result: dict[Keys, str] = {} + + for sequence, key in ANSI_SEQUENCES.items(): + if not isinstance(key, tuple): + if key not in result: + result[key] = sequence + + return result + + +REVERSE_ANSI_SEQUENCES = _get_reverse_ansi_sequences() diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/base.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/base.py new file mode 100644 index 0000000000000000000000000000000000000000..3dcb994bd95b4e7ed333374364e37cc7c0793110 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/base.py @@ -0,0 +1,153 @@ +""" +Abstraction of CLI Input. +""" + +from __future__ import annotations + +from abc import ABCMeta, abstractmethod, abstractproperty +from contextlib import contextmanager +from typing import Callable, ContextManager, Generator + +from prompt_toolkit.key_binding import KeyPress + +__all__ = [ + "Input", + "PipeInput", + "DummyInput", +] + + +class Input(metaclass=ABCMeta): + """ + Abstraction for any input. + + An instance of this class can be given to the constructor of a + :class:`~prompt_toolkit.application.Application` and will also be + passed to the :class:`~prompt_toolkit.eventloop.base.EventLoop`. + """ + + @abstractmethod + def fileno(self) -> int: + """ + Fileno for putting this in an event loop. + """ + + @abstractmethod + def typeahead_hash(self) -> str: + """ + Identifier for storing type ahead key presses. + """ + + @abstractmethod + def read_keys(self) -> list[KeyPress]: + """ + Return a list of Key objects which are read/parsed from the input. + """ + + def flush_keys(self) -> list[KeyPress]: + """ + Flush the underlying parser. and return the pending keys. + (Used for vt100 input.) + """ + return [] + + def flush(self) -> None: + "The event loop can call this when the input has to be flushed." + pass + + @abstractproperty + def closed(self) -> bool: + "Should be true when the input stream is closed." + return False + + @abstractmethod + def raw_mode(self) -> ContextManager[None]: + """ + Context manager that turns the input into raw mode. + """ + + @abstractmethod + def cooked_mode(self) -> ContextManager[None]: + """ + Context manager that turns the input into cooked mode. + """ + + @abstractmethod + def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]: + """ + Return a context manager that makes this input active in the current + event loop. + """ + + @abstractmethod + def detach(self) -> ContextManager[None]: + """ + Return a context manager that makes sure that this input is not active + in the current event loop. + """ + + def close(self) -> None: + "Close input." + pass + + +class PipeInput(Input): + """ + Abstraction for pipe input. + """ + + @abstractmethod + def send_bytes(self, data: bytes) -> None: + """Feed byte string into the pipe""" + + @abstractmethod + def send_text(self, data: str) -> None: + """Feed a text string into the pipe""" + + +class DummyInput(Input): + """ + Input for use in a `DummyApplication` + + If used in an actual application, it will make the application render + itself once and exit immediately, due to an `EOFError`. + """ + + def fileno(self) -> int: + raise NotImplementedError + + def typeahead_hash(self) -> str: + return f"dummy-{id(self)}" + + def read_keys(self) -> list[KeyPress]: + return [] + + @property + def closed(self) -> bool: + # This needs to be true, so that the dummy input will trigger an + # `EOFError` immediately in the application. + return True + + def raw_mode(self) -> ContextManager[None]: + return _dummy_context_manager() + + def cooked_mode(self) -> ContextManager[None]: + return _dummy_context_manager() + + def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]: + # Call the callback immediately once after attaching. + # This tells the callback to call `read_keys` and check the + # `input.closed` flag, after which it won't receive any keys, but knows + # that `EOFError` should be raised. This unblocks `read_from_input` in + # `application.py`. + input_ready_callback() + + return _dummy_context_manager() + + def detach(self) -> ContextManager[None]: + return _dummy_context_manager() + + +@contextmanager +def _dummy_context_manager() -> Generator[None, None, None]: + yield diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/defaults.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..483eeb2092e48119776b341da92d66e2f549abbf --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/defaults.py @@ -0,0 +1,79 @@ +from __future__ import annotations + +import io +import sys +from typing import ContextManager, TextIO + +from .base import DummyInput, Input, PipeInput + +__all__ = [ + "create_input", + "create_pipe_input", +] + + +def create_input(stdin: TextIO | None = None, always_prefer_tty: bool = False) -> Input: + """ + Create the appropriate `Input` object for the current os/environment. + + :param always_prefer_tty: When set, if `sys.stdin` is connected to a Unix + `pipe`, check whether `sys.stdout` or `sys.stderr` are connected to a + pseudo terminal. If so, open the tty for reading instead of reading for + `sys.stdin`. (We can open `stdout` or `stderr` for reading, this is how + a `$PAGER` works.) + """ + if sys.platform == "win32": + from .win32 import Win32Input + + # If `stdin` was assigned `None` (which happens with pythonw.exe), use + # a `DummyInput`. This triggers `EOFError` in the application code. + if stdin is None and sys.stdin is None: + return DummyInput() + + return Win32Input(stdin or sys.stdin) + else: + from .vt100 import Vt100Input + + # If no input TextIO is given, use stdin/stdout. + if stdin is None: + stdin = sys.stdin + + if always_prefer_tty: + for obj in [sys.stdin, sys.stdout, sys.stderr]: + if obj.isatty(): + stdin = obj + break + + # If we can't access the file descriptor for the selected stdin, return + # a `DummyInput` instead. This can happen for instance in unit tests, + # when `sys.stdin` is patched by something that's not an actual file. + # (Instantiating `Vt100Input` would fail in this case.) + try: + stdin.fileno() + except io.UnsupportedOperation: + return DummyInput() + + return Vt100Input(stdin) + + +def create_pipe_input() -> ContextManager[PipeInput]: + """ + Create an input pipe. + This is mostly useful for unit testing. + + Usage:: + + with create_pipe_input() as input: + input.send_text('inputdata') + + Breaking change: In prompt_toolkit 3.0.28 and earlier, this was returning + the `PipeInput` directly, rather than through a context manager. + """ + if sys.platform == "win32": + from .win32_pipe import Win32PipeInput + + return Win32PipeInput.create() + else: + from .posix_pipe import PosixPipeInput + + return PosixPipeInput.create() diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/posix_pipe.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/posix_pipe.py new file mode 100644 index 0000000000000000000000000000000000000000..c131fb816e3461088643b4d79797e75c96129b65 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/posix_pipe.py @@ -0,0 +1,118 @@ +from __future__ import annotations + +import sys + +assert sys.platform != "win32" + +import os +from contextlib import contextmanager +from typing import ContextManager, Iterator, TextIO, cast + +from ..utils import DummyContext +from .base import PipeInput +from .vt100 import Vt100Input + +__all__ = [ + "PosixPipeInput", +] + + +class _Pipe: + "Wrapper around os.pipe, that ensures we don't double close any end." + + def __init__(self) -> None: + self.read_fd, self.write_fd = os.pipe() + self._read_closed = False + self._write_closed = False + + def close_read(self) -> None: + "Close read-end if not yet closed." + if self._read_closed: + return + + os.close(self.read_fd) + self._read_closed = True + + def close_write(self) -> None: + "Close write-end if not yet closed." + if self._write_closed: + return + + os.close(self.write_fd) + self._write_closed = True + + def close(self) -> None: + "Close both read and write ends." + self.close_read() + self.close_write() + + +class PosixPipeInput(Vt100Input, PipeInput): + """ + Input that is send through a pipe. + This is useful if we want to send the input programmatically into the + application. Mostly useful for unit testing. + + Usage:: + + with PosixPipeInput.create() as input: + input.send_text('inputdata') + """ + + _id = 0 + + def __init__(self, _pipe: _Pipe, _text: str = "") -> None: + # Private constructor. Users should use the public `.create()` method. + self.pipe = _pipe + + class Stdin: + encoding = "utf-8" + + def isatty(stdin) -> bool: + return True + + def fileno(stdin) -> int: + return self.pipe.read_fd + + super().__init__(cast(TextIO, Stdin())) + self.send_text(_text) + + # Identifier for every PipeInput for the hash. + self.__class__._id += 1 + self._id = self.__class__._id + + @classmethod + @contextmanager + def create(cls, text: str = "") -> Iterator[PosixPipeInput]: + pipe = _Pipe() + try: + yield PosixPipeInput(_pipe=pipe, _text=text) + finally: + pipe.close() + + def send_bytes(self, data: bytes) -> None: + os.write(self.pipe.write_fd, data) + + def send_text(self, data: str) -> None: + "Send text to the input." + os.write(self.pipe.write_fd, data.encode("utf-8")) + + def raw_mode(self) -> ContextManager[None]: + return DummyContext() + + def cooked_mode(self) -> ContextManager[None]: + return DummyContext() + + def close(self) -> None: + "Close pipe fds." + # Only close the write-end of the pipe. This will unblock the reader + # callback (in vt100.py > _attached_input), which eventually will raise + # `EOFError`. If we'd also close the read-end, then the event loop + # won't wake up the corresponding callback because of this. + self.pipe.close_write() + + def typeahead_hash(self) -> str: + """ + This needs to be unique for every `PipeInput`. + """ + return f"pipe-input-{self._id}" diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/posix_utils.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/posix_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..4a78dc421b75620c1ed8ae9f0aba254555535a8c --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/posix_utils.py @@ -0,0 +1,97 @@ +from __future__ import annotations + +import os +import select +from codecs import getincrementaldecoder + +__all__ = [ + "PosixStdinReader", +] + + +class PosixStdinReader: + """ + Wrapper around stdin which reads (nonblocking) the next available 1024 + bytes and decodes it. + + Note that you can't be sure that the input file is closed if the ``read`` + function returns an empty string. When ``errors=ignore`` is passed, + ``read`` can return an empty string if all malformed input was replaced by + an empty string. (We can't block here and wait for more input.) So, because + of that, check the ``closed`` attribute, to be sure that the file has been + closed. + + :param stdin_fd: File descriptor from which we read. + :param errors: Can be 'ignore', 'strict' or 'replace'. + On Python3, this can be 'surrogateescape', which is the default. + + 'surrogateescape' is preferred, because this allows us to transfer + unrecognized bytes to the key bindings. Some terminals, like lxterminal + and Guake, use the 'Mxx' notation to send mouse events, where each 'x' + can be any possible byte. + """ + + # By default, we want to 'ignore' errors here. The input stream can be full + # of junk. One occurrence of this that I had was when using iTerm2 on OS X, + # with "Option as Meta" checked (You should choose "Option as +Esc".) + + def __init__( + self, stdin_fd: int, errors: str = "surrogateescape", encoding: str = "utf-8" + ) -> None: + self.stdin_fd = stdin_fd + self.errors = errors + + # Create incremental decoder for decoding stdin. + # We can not just do `os.read(stdin.fileno(), 1024).decode('utf-8')`, because + # it could be that we are in the middle of a utf-8 byte sequence. + self._stdin_decoder_cls = getincrementaldecoder(encoding) + self._stdin_decoder = self._stdin_decoder_cls(errors=errors) + + #: True when there is nothing anymore to read. + self.closed = False + + def read(self, count: int = 1024) -> str: + # By default we choose a rather small chunk size, because reading + # big amounts of input at once, causes the event loop to process + # all these key bindings also at once without going back to the + # loop. This will make the application feel unresponsive. + """ + Read the input and return it as a string. + + Return the text. Note that this can return an empty string, even when + the input stream was not yet closed. This means that something went + wrong during the decoding. + """ + if self.closed: + return "" + + # Check whether there is some input to read. `os.read` would block + # otherwise. + # (Actually, the event loop is responsible to make sure that this + # function is only called when there is something to read, but for some + # reason this happens in certain situations.) + try: + if not select.select([self.stdin_fd], [], [], 0)[0]: + return "" + except OSError: + # Happens for instance when the file descriptor was closed. + # (We had this in ptterm, where the FD became ready, a callback was + # scheduled, but in the meantime another callback closed it already.) + self.closed = True + + # Note: the following works better than wrapping `self.stdin` like + # `codecs.getreader('utf-8')(stdin)` and doing `read(1)`. + # Somehow that causes some latency when the escape + # character is pressed. (Especially on combination with the `select`.) + try: + data = os.read(self.stdin_fd, count) + + # Nothing more to read, stream is closed. + if data == b"": + self.closed = True + return "" + except OSError: + # In case of SIGWINCH + data = b"" + + return self._stdin_decoder.decode(data) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/typeahead.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/typeahead.py new file mode 100644 index 0000000000000000000000000000000000000000..f8faa93289e8493f339555e2959455a497d803da --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/typeahead.py @@ -0,0 +1,78 @@ +r""" +Store input key strokes if we did read more than was required. + +The input classes `Vt100Input` and `Win32Input` read the input text in chunks +of a few kilobytes. This means that if we read input from stdin, it could be +that we read a couple of lines (with newlines in between) at once. + +This creates a problem: potentially, we read too much from stdin. Sometimes +people paste several lines at once because they paste input in a REPL and +expect each input() call to process one line. Or they rely on type ahead +because the application can't keep up with the processing. + +However, we need to read input in bigger chunks. We need this mostly to support +pasting of larger chunks of text. We don't want everything to become +unresponsive because we: + - read one character; + - parse one character; + - call the key binding, which does a string operation with one character; + - and render the user interface. +Doing text operations on single characters is very inefficient in Python, so we +prefer to work on bigger chunks of text. This is why we have to read the input +in bigger chunks. + +Further, line buffering is also not an option, because it doesn't work well in +the architecture. We use lower level Posix APIs, that work better with the +event loop and so on. In fact, there is also nothing that defines that only \n +can accept the input, you could create a key binding for any key to accept the +input. + +To support type ahead, this module will store all the key strokes that were +read too early, so that they can be feed into to the next `prompt()` call or to +the next prompt_toolkit `Application`. +""" + +from __future__ import annotations + +from collections import defaultdict + +from ..key_binding import KeyPress +from .base import Input + +__all__ = [ + "store_typeahead", + "get_typeahead", + "clear_typeahead", +] + +_buffer: dict[str, list[KeyPress]] = defaultdict(list) + + +def store_typeahead(input_obj: Input, key_presses: list[KeyPress]) -> None: + """ + Insert typeahead key presses for the given input. + """ + global _buffer + key = input_obj.typeahead_hash() + _buffer[key].extend(key_presses) + + +def get_typeahead(input_obj: Input) -> list[KeyPress]: + """ + Retrieve typeahead and reset the buffer for this input. + """ + global _buffer + + key = input_obj.typeahead_hash() + result = _buffer[key] + _buffer[key] = [] + return result + + +def clear_typeahead(input_obj: Input) -> None: + """ + Clear typeahead buffer. + """ + global _buffer + key = input_obj.typeahead_hash() + _buffer[key] = [] diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/vt100.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/vt100.py new file mode 100644 index 0000000000000000000000000000000000000000..c1660de9561ece11949ca3dbf8afd23b3f2f1abc --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/vt100.py @@ -0,0 +1,309 @@ +from __future__ import annotations + +import sys + +assert sys.platform != "win32" + +import contextlib +import io +import termios +import tty +from asyncio import AbstractEventLoop, get_running_loop +from typing import Callable, ContextManager, Generator, TextIO + +from ..key_binding import KeyPress +from .base import Input +from .posix_utils import PosixStdinReader +from .vt100_parser import Vt100Parser + +__all__ = [ + "Vt100Input", + "raw_mode", + "cooked_mode", +] + + +class Vt100Input(Input): + """ + Vt100 input for Posix systems. + (This uses a posix file descriptor that can be registered in the event loop.) + """ + + # For the error messages. Only display "Input is not a terminal" once per + # file descriptor. + _fds_not_a_terminal: set[int] = set() + + def __init__(self, stdin: TextIO) -> None: + # Test whether the given input object has a file descriptor. + # (Idle reports stdin to be a TTY, but fileno() is not implemented.) + try: + # This should not raise, but can return 0. + stdin.fileno() + except io.UnsupportedOperation as e: + if "idlelib.run" in sys.modules: + raise io.UnsupportedOperation( + "Stdin is not a terminal. Running from Idle is not supported." + ) from e + else: + raise io.UnsupportedOperation("Stdin is not a terminal.") from e + + # Even when we have a file descriptor, it doesn't mean it's a TTY. + # Normally, this requires a real TTY device, but people instantiate + # this class often during unit tests as well. They use for instance + # pexpect to pipe data into an application. For convenience, we print + # an error message and go on. + isatty = stdin.isatty() + fd = stdin.fileno() + + if not isatty and fd not in Vt100Input._fds_not_a_terminal: + msg = "Warning: Input is not a terminal (fd=%r).\n" + sys.stderr.write(msg % fd) + sys.stderr.flush() + Vt100Input._fds_not_a_terminal.add(fd) + + # + self.stdin = stdin + + # Create a backup of the fileno(). We want this to work even if the + # underlying file is closed, so that `typeahead_hash()` keeps working. + self._fileno = stdin.fileno() + + self._buffer: list[KeyPress] = [] # Buffer to collect the Key objects. + self.stdin_reader = PosixStdinReader(self._fileno, encoding=stdin.encoding) + self.vt100_parser = Vt100Parser( + lambda key_press: self._buffer.append(key_press) + ) + + def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]: + """ + Return a context manager that makes this input active in the current + event loop. + """ + return _attached_input(self, input_ready_callback) + + def detach(self) -> ContextManager[None]: + """ + Return a context manager that makes sure that this input is not active + in the current event loop. + """ + return _detached_input(self) + + def read_keys(self) -> list[KeyPress]: + "Read list of KeyPress." + # Read text from stdin. + data = self.stdin_reader.read() + + # Pass it through our vt100 parser. + self.vt100_parser.feed(data) + + # Return result. + result = self._buffer + self._buffer = [] + return result + + def flush_keys(self) -> list[KeyPress]: + """ + Flush pending keys and return them. + (Used for flushing the 'escape' key.) + """ + # Flush all pending keys. (This is most important to flush the vt100 + # 'Escape' key early when nothing else follows.) + self.vt100_parser.flush() + + # Return result. + result = self._buffer + self._buffer = [] + return result + + @property + def closed(self) -> bool: + return self.stdin_reader.closed + + def raw_mode(self) -> ContextManager[None]: + return raw_mode(self.stdin.fileno()) + + def cooked_mode(self) -> ContextManager[None]: + return cooked_mode(self.stdin.fileno()) + + def fileno(self) -> int: + return self.stdin.fileno() + + def typeahead_hash(self) -> str: + return f"fd-{self._fileno}" + + +_current_callbacks: dict[ + tuple[AbstractEventLoop, int], Callable[[], None] | None +] = {} # (loop, fd) -> current callback + + +@contextlib.contextmanager +def _attached_input( + input: Vt100Input, callback: Callable[[], None] +) -> Generator[None, None, None]: + """ + Context manager that makes this input active in the current event loop. + + :param input: :class:`~prompt_toolkit.input.Input` object. + :param callback: Called when the input is ready to read. + """ + loop = get_running_loop() + fd = input.fileno() + previous = _current_callbacks.get((loop, fd)) + + def callback_wrapper() -> None: + """Wrapper around the callback that already removes the reader when + the input is closed. Otherwise, we keep continuously calling this + callback, until we leave the context manager (which can happen a bit + later). This fixes issues when piping /dev/null into a prompt_toolkit + application.""" + if input.closed: + loop.remove_reader(fd) + callback() + + try: + loop.add_reader(fd, callback_wrapper) + except PermissionError: + # For `EPollSelector`, adding /dev/null to the event loop will raise + # `PermissionError` (that doesn't happen for `SelectSelector` + # apparently). Whenever we get a `PermissionError`, we can raise + # `EOFError`, because there's not more to be read anyway. `EOFError` is + # an exception that people expect in + # `prompt_toolkit.application.Application.run()`. + # To reproduce, do: `ptpython 0< /dev/null 1< /dev/null` + raise EOFError + + _current_callbacks[loop, fd] = callback + + try: + yield + finally: + loop.remove_reader(fd) + + if previous: + loop.add_reader(fd, previous) + _current_callbacks[loop, fd] = previous + else: + del _current_callbacks[loop, fd] + + +@contextlib.contextmanager +def _detached_input(input: Vt100Input) -> Generator[None, None, None]: + loop = get_running_loop() + fd = input.fileno() + previous = _current_callbacks.get((loop, fd)) + + if previous: + loop.remove_reader(fd) + _current_callbacks[loop, fd] = None + + try: + yield + finally: + if previous: + loop.add_reader(fd, previous) + _current_callbacks[loop, fd] = previous + + +class raw_mode: + """ + :: + + with raw_mode(stdin): + ''' the pseudo-terminal stdin is now used in raw mode ''' + + We ignore errors when executing `tcgetattr` fails. + """ + + # There are several reasons for ignoring errors: + # 1. To avoid the "Inappropriate ioctl for device" crash if somebody would + # execute this code (In a Python REPL, for instance): + # + # import os; f = open(os.devnull); os.dup2(f.fileno(), 0) + # + # The result is that the eventloop will stop correctly, because it has + # to logic to quit when stdin is closed. However, we should not fail at + # this point. See: + # https://github.com/jonathanslenders/python-prompt-toolkit/pull/393 + # https://github.com/jonathanslenders/python-prompt-toolkit/issues/392 + + # 2. Related, when stdin is an SSH pipe, and no full terminal was allocated. + # See: https://github.com/jonathanslenders/python-prompt-toolkit/pull/165 + def __init__(self, fileno: int) -> None: + self.fileno = fileno + self.attrs_before: list[int | list[bytes | int]] | None + try: + self.attrs_before = termios.tcgetattr(fileno) + except termios.error: + # Ignore attribute errors. + self.attrs_before = None + + def __enter__(self) -> None: + # NOTE: On os X systems, using pty.setraw() fails. Therefor we are using this: + try: + newattr = termios.tcgetattr(self.fileno) + except termios.error: + pass + else: + newattr[tty.LFLAG] = self._patch_lflag(newattr[tty.LFLAG]) + newattr[tty.IFLAG] = self._patch_iflag(newattr[tty.IFLAG]) + + # VMIN defines the number of characters read at a time in + # non-canonical mode. It seems to default to 1 on Linux, but on + # Solaris and derived operating systems it defaults to 4. (This is + # because the VMIN slot is the same as the VEOF slot, which + # defaults to ASCII EOT = Ctrl-D = 4.) + newattr[tty.CC][termios.VMIN] = 1 + + termios.tcsetattr(self.fileno, termios.TCSANOW, newattr) + + @classmethod + def _patch_lflag(cls, attrs: int) -> int: + return attrs & ~(termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG) + + @classmethod + def _patch_iflag(cls, attrs: int) -> int: + return attrs & ~( + # Disable XON/XOFF flow control on output and input. + # (Don't capture Ctrl-S and Ctrl-Q.) + # Like executing: "stty -ixon." + termios.IXON + | termios.IXOFF + | + # Don't translate carriage return into newline on input. + termios.ICRNL + | termios.INLCR + | termios.IGNCR + ) + + def __exit__(self, *a: object) -> None: + if self.attrs_before is not None: + try: + termios.tcsetattr(self.fileno, termios.TCSANOW, self.attrs_before) + except termios.error: + pass + + # # Put the terminal in application mode. + # self._stdout.write('\x1b[?1h') + + +class cooked_mode(raw_mode): + """ + The opposite of ``raw_mode``, used when we need cooked mode inside a + `raw_mode` block. Used in `Application.run_in_terminal`.:: + + with cooked_mode(stdin): + ''' the pseudo-terminal stdin is now used in cooked mode. ''' + """ + + @classmethod + def _patch_lflag(cls, attrs: int) -> int: + return attrs | (termios.ECHO | termios.ICANON | termios.IEXTEN | termios.ISIG) + + @classmethod + def _patch_iflag(cls, attrs: int) -> int: + # Turn the ICRNL flag back on. (Without this, calling `input()` in + # run_in_terminal doesn't work and displays ^M instead. Ptpython + # evaluates commands using `run_in_terminal`, so it's important that + # they translate ^M back into ^J.) + return attrs | termios.ICRNL diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/vt100_parser.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/vt100_parser.py new file mode 100644 index 0000000000000000000000000000000000000000..73dbce3d83d6685ab0586b07b6583e30b406ed57 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/vt100_parser.py @@ -0,0 +1,250 @@ +""" +Parser for VT100 input stream. +""" + +from __future__ import annotations + +import re +from typing import Callable, Dict, Generator + +from ..key_binding.key_processor import KeyPress +from ..keys import Keys +from .ansi_escape_sequences import ANSI_SEQUENCES + +__all__ = [ + "Vt100Parser", +] + + +# Regex matching any CPR response +# (Note that we use '\Z' instead of '$', because '$' could include a trailing +# newline.) +_cpr_response_re = re.compile("^" + re.escape("\x1b[") + r"\d+;\d+R\Z") + +# Mouse events: +# Typical: "Esc[MaB*" Urxvt: "Esc[96;14;13M" and for Xterm SGR: "Esc[<64;85;12M" +_mouse_event_re = re.compile("^" + re.escape("\x1b[") + r"( bool: + # (hard coded) If this could be a prefix of a CPR response, return + # True. + if _cpr_response_prefix_re.match(prefix) or _mouse_event_prefix_re.match( + prefix + ): + result = True + else: + # If this could be a prefix of anything else, also return True. + result = any( + v + for k, v in ANSI_SEQUENCES.items() + if k.startswith(prefix) and k != prefix + ) + + self[prefix] = result + return result + + +_IS_PREFIX_OF_LONGER_MATCH_CACHE = _IsPrefixOfLongerMatchCache() + + +class Vt100Parser: + """ + Parser for VT100 input stream. + Data can be fed through the `feed` method and the given callback will be + called with KeyPress objects. + + :: + + def callback(key): + pass + i = Vt100Parser(callback) + i.feed('data\x01...') + + :attr feed_key_callback: Function that will be called when a key is parsed. + """ + + # Lookup table of ANSI escape sequences for a VT100 terminal + # Hint: in order to know what sequences your terminal writes to stdin, run + # "od -c" and start typing. + def __init__(self, feed_key_callback: Callable[[KeyPress], None]) -> None: + self.feed_key_callback = feed_key_callback + self.reset() + + def reset(self, request: bool = False) -> None: + self._in_bracketed_paste = False + self._start_parser() + + def _start_parser(self) -> None: + """ + Start the parser coroutine. + """ + self._input_parser = self._input_parser_generator() + self._input_parser.send(None) # type: ignore + + def _get_match(self, prefix: str) -> None | Keys | tuple[Keys, ...]: + """ + Return the key (or keys) that maps to this prefix. + """ + # (hard coded) If we match a CPR response, return Keys.CPRResponse. + # (This one doesn't fit in the ANSI_SEQUENCES, because it contains + # integer variables.) + if _cpr_response_re.match(prefix): + return Keys.CPRResponse + + elif _mouse_event_re.match(prefix): + return Keys.Vt100MouseEvent + + # Otherwise, use the mappings. + try: + return ANSI_SEQUENCES[prefix] + except KeyError: + return None + + def _input_parser_generator(self) -> Generator[None, str | _Flush, None]: + """ + Coroutine (state machine) for the input parser. + """ + prefix = "" + retry = False + flush = False + + while True: + flush = False + + if retry: + retry = False + else: + # Get next character. + c = yield + + if isinstance(c, _Flush): + flush = True + else: + prefix += c + + # If we have some data, check for matches. + if prefix: + is_prefix_of_longer_match = _IS_PREFIX_OF_LONGER_MATCH_CACHE[prefix] + match = self._get_match(prefix) + + # Exact matches found, call handlers.. + if (flush or not is_prefix_of_longer_match) and match: + self._call_handler(match, prefix) + prefix = "" + + # No exact match found. + elif (flush or not is_prefix_of_longer_match) and not match: + found = False + retry = True + + # Loop over the input, try the longest match first and + # shift. + for i in range(len(prefix), 0, -1): + match = self._get_match(prefix[:i]) + if match: + self._call_handler(match, prefix[:i]) + prefix = prefix[i:] + found = True + + if not found: + self._call_handler(prefix[0], prefix[0]) + prefix = prefix[1:] + + def _call_handler( + self, key: str | Keys | tuple[Keys, ...], insert_text: str + ) -> None: + """ + Callback to handler. + """ + if isinstance(key, tuple): + # Received ANSI sequence that corresponds with multiple keys + # (probably alt+something). Handle keys individually, but only pass + # data payload to first KeyPress (so that we won't insert it + # multiple times). + for i, k in enumerate(key): + self._call_handler(k, insert_text if i == 0 else "") + else: + if key == Keys.BracketedPaste: + self._in_bracketed_paste = True + self._paste_buffer = "" + else: + self.feed_key_callback(KeyPress(key, insert_text)) + + def feed(self, data: str) -> None: + """ + Feed the input stream. + + :param data: Input string (unicode). + """ + # Handle bracketed paste. (We bypass the parser that matches all other + # key presses and keep reading input until we see the end mark.) + # This is much faster then parsing character by character. + if self._in_bracketed_paste: + self._paste_buffer += data + end_mark = "\x1b[201~" + + if end_mark in self._paste_buffer: + end_index = self._paste_buffer.index(end_mark) + + # Feed content to key bindings. + paste_content = self._paste_buffer[:end_index] + self.feed_key_callback(KeyPress(Keys.BracketedPaste, paste_content)) + + # Quit bracketed paste mode and handle remaining input. + self._in_bracketed_paste = False + remaining = self._paste_buffer[end_index + len(end_mark) :] + self._paste_buffer = "" + + self.feed(remaining) + + # Handle normal input character by character. + else: + for i, c in enumerate(data): + if self._in_bracketed_paste: + # Quit loop and process from this position when the parser + # entered bracketed paste. + self.feed(data[i:]) + break + else: + self._input_parser.send(c) + + def flush(self) -> None: + """ + Flush the buffer of the input stream. + + This will allow us to handle the escape key (or maybe meta) sooner. + The input received by the escape key is actually the same as the first + characters of e.g. Arrow-Up, so without knowing what follows the escape + sequence, we don't know whether escape has been pressed, or whether + it's something else. This flush function should be called after a + timeout, and processes everything that's still in the buffer as-is, so + without assuming any characters will follow. + """ + self._input_parser.send(_Flush()) + + def feed_and_flush(self, data: str) -> None: + """ + Wrapper around ``feed`` and ``flush``. + """ + self.feed(data) + self.flush() diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/win32.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/win32.py new file mode 100644 index 0000000000000000000000000000000000000000..1ff3234a398b581f0390c50dd8053aad9227f3b8 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/win32.py @@ -0,0 +1,886 @@ +from __future__ import annotations + +import os +import sys +from abc import abstractmethod +from asyncio import get_running_loop +from contextlib import contextmanager + +from ..utils import SPHINX_AUTODOC_RUNNING + +assert sys.platform == "win32" + +# Do not import win32-specific stuff when generating documentation. +# Otherwise RTD would be unable to generate docs for this module. +if not SPHINX_AUTODOC_RUNNING: + import msvcrt + from ctypes import windll + +from ctypes import Array, byref, pointer +from ctypes.wintypes import DWORD, HANDLE +from typing import Callable, ContextManager, Iterable, Iterator, TextIO + +from prompt_toolkit.eventloop import run_in_executor_with_context +from prompt_toolkit.eventloop.win32 import create_win32_event, wait_for_handles +from prompt_toolkit.key_binding.key_processor import KeyPress +from prompt_toolkit.keys import Keys +from prompt_toolkit.mouse_events import MouseButton, MouseEventType +from prompt_toolkit.win32_types import ( + INPUT_RECORD, + KEY_EVENT_RECORD, + MOUSE_EVENT_RECORD, + STD_INPUT_HANDLE, + EventTypes, +) + +from .ansi_escape_sequences import REVERSE_ANSI_SEQUENCES +from .base import Input +from .vt100_parser import Vt100Parser + +__all__ = [ + "Win32Input", + "ConsoleInputReader", + "raw_mode", + "cooked_mode", + "attach_win32_input", + "detach_win32_input", +] + +# Win32 Constants for MOUSE_EVENT_RECORD. +# See: https://docs.microsoft.com/en-us/windows/console/mouse-event-record-str +FROM_LEFT_1ST_BUTTON_PRESSED = 0x1 +RIGHTMOST_BUTTON_PRESSED = 0x2 +MOUSE_MOVED = 0x0001 +MOUSE_WHEELED = 0x0004 + +# See: https://msdn.microsoft.com/pl-pl/library/windows/desktop/ms686033(v=vs.85).aspx +ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200 + + +class _Win32InputBase(Input): + """ + Base class for `Win32Input` and `Win32PipeInput`. + """ + + def __init__(self) -> None: + self.win32_handles = _Win32Handles() + + @property + @abstractmethod + def handle(self) -> HANDLE: + pass + + +class Win32Input(_Win32InputBase): + """ + `Input` class that reads from the Windows console. + """ + + def __init__(self, stdin: TextIO | None = None) -> None: + super().__init__() + self._use_virtual_terminal_input = _is_win_vt100_input_enabled() + + self.console_input_reader: Vt100ConsoleInputReader | ConsoleInputReader + + if self._use_virtual_terminal_input: + self.console_input_reader = Vt100ConsoleInputReader() + else: + self.console_input_reader = ConsoleInputReader() + + def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]: + """ + Return a context manager that makes this input active in the current + event loop. + """ + return attach_win32_input(self, input_ready_callback) + + def detach(self) -> ContextManager[None]: + """ + Return a context manager that makes sure that this input is not active + in the current event loop. + """ + return detach_win32_input(self) + + def read_keys(self) -> list[KeyPress]: + return list(self.console_input_reader.read()) + + def flush(self) -> None: + pass + + @property + def closed(self) -> bool: + return False + + def raw_mode(self) -> ContextManager[None]: + return raw_mode( + use_win10_virtual_terminal_input=self._use_virtual_terminal_input + ) + + def cooked_mode(self) -> ContextManager[None]: + return cooked_mode() + + def fileno(self) -> int: + # The windows console doesn't depend on the file handle, so + # this is not used for the event loop (which uses the + # handle instead). But it's used in `Application.run_system_command` + # which opens a subprocess with a given stdin/stdout. + return sys.stdin.fileno() + + def typeahead_hash(self) -> str: + return "win32-input" + + def close(self) -> None: + self.console_input_reader.close() + + @property + def handle(self) -> HANDLE: + return self.console_input_reader.handle + + +class ConsoleInputReader: + """ + :param recognize_paste: When True, try to discover paste actions and turn + the event into a BracketedPaste. + """ + + # Keys with character data. + mappings = { + b"\x1b": Keys.Escape, + b"\x00": Keys.ControlSpace, # Control-Space (Also for Ctrl-@) + b"\x01": Keys.ControlA, # Control-A (home) + b"\x02": Keys.ControlB, # Control-B (emacs cursor left) + b"\x03": Keys.ControlC, # Control-C (interrupt) + b"\x04": Keys.ControlD, # Control-D (exit) + b"\x05": Keys.ControlE, # Control-E (end) + b"\x06": Keys.ControlF, # Control-F (cursor forward) + b"\x07": Keys.ControlG, # Control-G + b"\x08": Keys.ControlH, # Control-H (8) (Identical to '\b') + b"\x09": Keys.ControlI, # Control-I (9) (Identical to '\t') + b"\x0a": Keys.ControlJ, # Control-J (10) (Identical to '\n') + b"\x0b": Keys.ControlK, # Control-K (delete until end of line; vertical tab) + b"\x0c": Keys.ControlL, # Control-L (clear; form feed) + b"\x0d": Keys.ControlM, # Control-M (enter) + b"\x0e": Keys.ControlN, # Control-N (14) (history forward) + b"\x0f": Keys.ControlO, # Control-O (15) + b"\x10": Keys.ControlP, # Control-P (16) (history back) + b"\x11": Keys.ControlQ, # Control-Q + b"\x12": Keys.ControlR, # Control-R (18) (reverse search) + b"\x13": Keys.ControlS, # Control-S (19) (forward search) + b"\x14": Keys.ControlT, # Control-T + b"\x15": Keys.ControlU, # Control-U + b"\x16": Keys.ControlV, # Control-V + b"\x17": Keys.ControlW, # Control-W + b"\x18": Keys.ControlX, # Control-X + b"\x19": Keys.ControlY, # Control-Y (25) + b"\x1a": Keys.ControlZ, # Control-Z + b"\x1c": Keys.ControlBackslash, # Both Control-\ and Ctrl-| + b"\x1d": Keys.ControlSquareClose, # Control-] + b"\x1e": Keys.ControlCircumflex, # Control-^ + b"\x1f": Keys.ControlUnderscore, # Control-underscore (Also for Ctrl-hyphen.) + b"\x7f": Keys.Backspace, # (127) Backspace (ASCII Delete.) + } + + # Keys that don't carry character data. + keycodes = { + # Home/End + 33: Keys.PageUp, + 34: Keys.PageDown, + 35: Keys.End, + 36: Keys.Home, + # Arrows + 37: Keys.Left, + 38: Keys.Up, + 39: Keys.Right, + 40: Keys.Down, + 45: Keys.Insert, + 46: Keys.Delete, + # F-keys. + 112: Keys.F1, + 113: Keys.F2, + 114: Keys.F3, + 115: Keys.F4, + 116: Keys.F5, + 117: Keys.F6, + 118: Keys.F7, + 119: Keys.F8, + 120: Keys.F9, + 121: Keys.F10, + 122: Keys.F11, + 123: Keys.F12, + } + + LEFT_ALT_PRESSED = 0x0002 + RIGHT_ALT_PRESSED = 0x0001 + SHIFT_PRESSED = 0x0010 + LEFT_CTRL_PRESSED = 0x0008 + RIGHT_CTRL_PRESSED = 0x0004 + + def __init__(self, recognize_paste: bool = True) -> None: + self._fdcon = None + self.recognize_paste = recognize_paste + + # When stdin is a tty, use that handle, otherwise, create a handle from + # CONIN$. + self.handle: HANDLE + if sys.stdin.isatty(): + self.handle = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)) + else: + self._fdcon = os.open("CONIN$", os.O_RDWR | os.O_BINARY) + self.handle = HANDLE(msvcrt.get_osfhandle(self._fdcon)) + + def close(self) -> None: + "Close fdcon." + if self._fdcon is not None: + os.close(self._fdcon) + + def read(self) -> Iterable[KeyPress]: + """ + Return a list of `KeyPress` instances. It won't return anything when + there was nothing to read. (This function doesn't block.) + + http://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx + """ + max_count = 2048 # Max events to read at the same time. + + read = DWORD(0) + arrtype = INPUT_RECORD * max_count + input_records = arrtype() + + # Check whether there is some input to read. `ReadConsoleInputW` would + # block otherwise. + # (Actually, the event loop is responsible to make sure that this + # function is only called when there is something to read, but for some + # reason this happened in the asyncio_win32 loop, and it's better to be + # safe anyway.) + if not wait_for_handles([self.handle], timeout=0): + return + + # Get next batch of input event. + windll.kernel32.ReadConsoleInputW( + self.handle, pointer(input_records), max_count, pointer(read) + ) + + # First, get all the keys from the input buffer, in order to determine + # whether we should consider this a paste event or not. + all_keys = list(self._get_keys(read, input_records)) + + # Fill in 'data' for key presses. + all_keys = [self._insert_key_data(key) for key in all_keys] + + # Correct non-bmp characters that are passed as separate surrogate codes + all_keys = list(self._merge_paired_surrogates(all_keys)) + + if self.recognize_paste and self._is_paste(all_keys): + gen = iter(all_keys) + k: KeyPress | None + + for k in gen: + # Pasting: if the current key consists of text or \n, turn it + # into a BracketedPaste. + data = [] + while k and ( + not isinstance(k.key, Keys) + or k.key in {Keys.ControlJ, Keys.ControlM} + ): + data.append(k.data) + try: + k = next(gen) + except StopIteration: + k = None + + if data: + yield KeyPress(Keys.BracketedPaste, "".join(data)) + if k is not None: + yield k + else: + yield from all_keys + + def _insert_key_data(self, key_press: KeyPress) -> KeyPress: + """ + Insert KeyPress data, for vt100 compatibility. + """ + if key_press.data: + return key_press + + if isinstance(key_press.key, Keys): + data = REVERSE_ANSI_SEQUENCES.get(key_press.key, "") + else: + data = "" + + return KeyPress(key_press.key, data) + + def _get_keys( + self, read: DWORD, input_records: Array[INPUT_RECORD] + ) -> Iterator[KeyPress]: + """ + Generator that yields `KeyPress` objects from the input records. + """ + for i in range(read.value): + ir = input_records[i] + + # Get the right EventType from the EVENT_RECORD. + # (For some reason the Windows console application 'cmder' + # [http://gooseberrycreative.com/cmder/] can return '0' for + # ir.EventType. -- Just ignore that.) + if ir.EventType in EventTypes: + ev = getattr(ir.Event, EventTypes[ir.EventType]) + + # Process if this is a key event. (We also have mouse, menu and + # focus events.) + if isinstance(ev, KEY_EVENT_RECORD) and ev.KeyDown: + yield from self._event_to_key_presses(ev) + + elif isinstance(ev, MOUSE_EVENT_RECORD): + yield from self._handle_mouse(ev) + + @staticmethod + def _merge_paired_surrogates(key_presses: list[KeyPress]) -> Iterator[KeyPress]: + """ + Combines consecutive KeyPresses with high and low surrogates into + single characters + """ + buffered_high_surrogate = None + for key in key_presses: + is_text = not isinstance(key.key, Keys) + is_high_surrogate = is_text and "\ud800" <= key.key <= "\udbff" + is_low_surrogate = is_text and "\udc00" <= key.key <= "\udfff" + + if buffered_high_surrogate: + if is_low_surrogate: + # convert high surrogate + low surrogate to single character + fullchar = ( + (buffered_high_surrogate.key + key.key) + .encode("utf-16-le", "surrogatepass") + .decode("utf-16-le") + ) + key = KeyPress(fullchar, fullchar) + else: + yield buffered_high_surrogate + buffered_high_surrogate = None + + if is_high_surrogate: + buffered_high_surrogate = key + else: + yield key + + if buffered_high_surrogate: + yield buffered_high_surrogate + + @staticmethod + def _is_paste(keys: list[KeyPress]) -> bool: + """ + Return `True` when we should consider this list of keys as a paste + event. Pasted text on windows will be turned into a + `Keys.BracketedPaste` event. (It's not 100% correct, but it is probably + the best possible way to detect pasting of text and handle that + correctly.) + """ + # Consider paste when it contains at least one newline and at least one + # other character. + text_count = 0 + newline_count = 0 + + for k in keys: + if not isinstance(k.key, Keys): + text_count += 1 + if k.key == Keys.ControlM: + newline_count += 1 + + return newline_count >= 1 and text_count >= 1 + + def _event_to_key_presses(self, ev: KEY_EVENT_RECORD) -> list[KeyPress]: + """ + For this `KEY_EVENT_RECORD`, return a list of `KeyPress` instances. + """ + assert isinstance(ev, KEY_EVENT_RECORD) and ev.KeyDown + + result: KeyPress | None = None + + control_key_state = ev.ControlKeyState + u_char = ev.uChar.UnicodeChar + # Use surrogatepass because u_char may be an unmatched surrogate + ascii_char = u_char.encode("utf-8", "surrogatepass") + + # NOTE: We don't use `ev.uChar.AsciiChar`. That appears to be the + # unicode code point truncated to 1 byte. See also: + # https://github.com/ipython/ipython/issues/10004 + # https://github.com/jonathanslenders/python-prompt-toolkit/issues/389 + + if u_char == "\x00": + if ev.VirtualKeyCode in self.keycodes: + result = KeyPress(self.keycodes[ev.VirtualKeyCode], "") + else: + if ascii_char in self.mappings: + if self.mappings[ascii_char] == Keys.ControlJ: + u_char = ( + "\n" # Windows sends \n, turn into \r for unix compatibility. + ) + result = KeyPress(self.mappings[ascii_char], u_char) + else: + result = KeyPress(u_char, u_char) + + # First we handle Shift-Control-Arrow/Home/End (need to do this first) + if ( + ( + control_key_state & self.LEFT_CTRL_PRESSED + or control_key_state & self.RIGHT_CTRL_PRESSED + ) + and control_key_state & self.SHIFT_PRESSED + and result + ): + mapping: dict[str, str] = { + Keys.Left: Keys.ControlShiftLeft, + Keys.Right: Keys.ControlShiftRight, + Keys.Up: Keys.ControlShiftUp, + Keys.Down: Keys.ControlShiftDown, + Keys.Home: Keys.ControlShiftHome, + Keys.End: Keys.ControlShiftEnd, + Keys.Insert: Keys.ControlShiftInsert, + Keys.PageUp: Keys.ControlShiftPageUp, + Keys.PageDown: Keys.ControlShiftPageDown, + } + result.key = mapping.get(result.key, result.key) + + # Correctly handle Control-Arrow/Home/End and Control-Insert/Delete keys. + if ( + control_key_state & self.LEFT_CTRL_PRESSED + or control_key_state & self.RIGHT_CTRL_PRESSED + ) and result: + mapping = { + Keys.Left: Keys.ControlLeft, + Keys.Right: Keys.ControlRight, + Keys.Up: Keys.ControlUp, + Keys.Down: Keys.ControlDown, + Keys.Home: Keys.ControlHome, + Keys.End: Keys.ControlEnd, + Keys.Insert: Keys.ControlInsert, + Keys.Delete: Keys.ControlDelete, + Keys.PageUp: Keys.ControlPageUp, + Keys.PageDown: Keys.ControlPageDown, + } + result.key = mapping.get(result.key, result.key) + + # Turn 'Tab' into 'BackTab' when shift was pressed. + # Also handle other shift-key combination + if control_key_state & self.SHIFT_PRESSED and result: + mapping = { + Keys.Tab: Keys.BackTab, + Keys.Left: Keys.ShiftLeft, + Keys.Right: Keys.ShiftRight, + Keys.Up: Keys.ShiftUp, + Keys.Down: Keys.ShiftDown, + Keys.Home: Keys.ShiftHome, + Keys.End: Keys.ShiftEnd, + Keys.Insert: Keys.ShiftInsert, + Keys.Delete: Keys.ShiftDelete, + Keys.PageUp: Keys.ShiftPageUp, + Keys.PageDown: Keys.ShiftPageDown, + } + result.key = mapping.get(result.key, result.key) + + # Turn 'Space' into 'ControlSpace' when control was pressed. + if ( + ( + control_key_state & self.LEFT_CTRL_PRESSED + or control_key_state & self.RIGHT_CTRL_PRESSED + ) + and result + and result.data == " " + ): + result = KeyPress(Keys.ControlSpace, " ") + + # Turn Control-Enter into META-Enter. (On a vt100 terminal, we cannot + # detect this combination. But it's really practical on Windows.) + if ( + ( + control_key_state & self.LEFT_CTRL_PRESSED + or control_key_state & self.RIGHT_CTRL_PRESSED + ) + and result + and result.key == Keys.ControlJ + ): + return [KeyPress(Keys.Escape, ""), result] + + # Return result. If alt was pressed, prefix the result with an + # 'Escape' key, just like unix VT100 terminals do. + + # NOTE: Only replace the left alt with escape. The right alt key often + # acts as altgr and is used in many non US keyboard layouts for + # typing some special characters, like a backslash. We don't want + # all backslashes to be prefixed with escape. (Esc-\ has a + # meaning in E-macs, for instance.) + if result: + meta_pressed = control_key_state & self.LEFT_ALT_PRESSED + + if meta_pressed: + return [KeyPress(Keys.Escape, ""), result] + else: + return [result] + + else: + return [] + + def _handle_mouse(self, ev: MOUSE_EVENT_RECORD) -> list[KeyPress]: + """ + Handle mouse events. Return a list of KeyPress instances. + """ + event_flags = ev.EventFlags + button_state = ev.ButtonState + + event_type: MouseEventType | None = None + button: MouseButton = MouseButton.NONE + + # Scroll events. + if event_flags & MOUSE_WHEELED: + if button_state > 0: + event_type = MouseEventType.SCROLL_UP + else: + event_type = MouseEventType.SCROLL_DOWN + else: + # Handle button state for non-scroll events. + if button_state == FROM_LEFT_1ST_BUTTON_PRESSED: + button = MouseButton.LEFT + + elif button_state == RIGHTMOST_BUTTON_PRESSED: + button = MouseButton.RIGHT + + # Move events. + if event_flags & MOUSE_MOVED: + event_type = MouseEventType.MOUSE_MOVE + + # No key pressed anymore: mouse up. + if event_type is None: + if button_state > 0: + # Some button pressed. + event_type = MouseEventType.MOUSE_DOWN + else: + # No button pressed. + event_type = MouseEventType.MOUSE_UP + + data = ";".join( + [ + button.value, + event_type.value, + str(ev.MousePosition.X), + str(ev.MousePosition.Y), + ] + ) + return [KeyPress(Keys.WindowsMouseEvent, data)] + + +class Vt100ConsoleInputReader: + """ + Similar to `ConsoleInputReader`, but for usage when + `ENABLE_VIRTUAL_TERMINAL_INPUT` is enabled. This assumes that Windows sends + us the right vt100 escape sequences and we parse those with our vt100 + parser. + + (Using this instead of `ConsoleInputReader` results in the "data" attribute + from the `KeyPress` instances to be more correct in edge cases, because + this responds to for instance the terminal being in application cursor keys + mode.) + """ + + def __init__(self) -> None: + self._fdcon = None + + self._buffer: list[KeyPress] = [] # Buffer to collect the Key objects. + self._vt100_parser = Vt100Parser( + lambda key_press: self._buffer.append(key_press) + ) + + # When stdin is a tty, use that handle, otherwise, create a handle from + # CONIN$. + self.handle: HANDLE + if sys.stdin.isatty(): + self.handle = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)) + else: + self._fdcon = os.open("CONIN$", os.O_RDWR | os.O_BINARY) + self.handle = HANDLE(msvcrt.get_osfhandle(self._fdcon)) + + def close(self) -> None: + "Close fdcon." + if self._fdcon is not None: + os.close(self._fdcon) + + def read(self) -> Iterable[KeyPress]: + """ + Return a list of `KeyPress` instances. It won't return anything when + there was nothing to read. (This function doesn't block.) + + http://msdn.microsoft.com/en-us/library/windows/desktop/ms684961(v=vs.85).aspx + """ + max_count = 2048 # Max events to read at the same time. + + read = DWORD(0) + arrtype = INPUT_RECORD * max_count + input_records = arrtype() + + # Check whether there is some input to read. `ReadConsoleInputW` would + # block otherwise. + # (Actually, the event loop is responsible to make sure that this + # function is only called when there is something to read, but for some + # reason this happened in the asyncio_win32 loop, and it's better to be + # safe anyway.) + if not wait_for_handles([self.handle], timeout=0): + return [] + + # Get next batch of input event. + windll.kernel32.ReadConsoleInputW( + self.handle, pointer(input_records), max_count, pointer(read) + ) + + # First, get all the keys from the input buffer, in order to determine + # whether we should consider this a paste event or not. + for key_data in self._get_keys(read, input_records): + self._vt100_parser.feed(key_data) + + # Return result. + result = self._buffer + self._buffer = [] + return result + + def _get_keys( + self, read: DWORD, input_records: Array[INPUT_RECORD] + ) -> Iterator[str]: + """ + Generator that yields `KeyPress` objects from the input records. + """ + for i in range(read.value): + ir = input_records[i] + + # Get the right EventType from the EVENT_RECORD. + # (For some reason the Windows console application 'cmder' + # [http://gooseberrycreative.com/cmder/] can return '0' for + # ir.EventType. -- Just ignore that.) + if ir.EventType in EventTypes: + ev = getattr(ir.Event, EventTypes[ir.EventType]) + + # Process if this is a key event. (We also have mouse, menu and + # focus events.) + if isinstance(ev, KEY_EVENT_RECORD) and ev.KeyDown: + u_char = ev.uChar.UnicodeChar + if u_char != "\x00": + yield u_char + + +class _Win32Handles: + """ + Utility to keep track of which handles are connectod to which callbacks. + + `add_win32_handle` starts a tiny event loop in another thread which waits + for the Win32 handle to become ready. When this happens, the callback will + be called in the current asyncio event loop using `call_soon_threadsafe`. + + `remove_win32_handle` will stop this tiny event loop. + + NOTE: We use this technique, so that we don't have to use the + `ProactorEventLoop` on Windows and we can wait for things like stdin + in a `SelectorEventLoop`. This is important, because our inputhook + mechanism (used by IPython), only works with the `SelectorEventLoop`. + """ + + def __init__(self) -> None: + self._handle_callbacks: dict[int, Callable[[], None]] = {} + + # Windows Events that are triggered when we have to stop watching this + # handle. + self._remove_events: dict[int, HANDLE] = {} + + def add_win32_handle(self, handle: HANDLE, callback: Callable[[], None]) -> None: + """ + Add a Win32 handle to the event loop. + """ + handle_value = handle.value + + if handle_value is None: + raise ValueError("Invalid handle.") + + # Make sure to remove a previous registered handler first. + self.remove_win32_handle(handle) + + loop = get_running_loop() + self._handle_callbacks[handle_value] = callback + + # Create remove event. + remove_event = create_win32_event() + self._remove_events[handle_value] = remove_event + + # Add reader. + def ready() -> None: + # Tell the callback that input's ready. + try: + callback() + finally: + run_in_executor_with_context(wait, loop=loop) + + # Wait for the input to become ready. + # (Use an executor for this, the Windows asyncio event loop doesn't + # allow us to wait for handles like stdin.) + def wait() -> None: + # Wait until either the handle becomes ready, or the remove event + # has been set. + result = wait_for_handles([remove_event, handle]) + + if result is remove_event: + windll.kernel32.CloseHandle(remove_event) + return + else: + loop.call_soon_threadsafe(ready) + + run_in_executor_with_context(wait, loop=loop) + + def remove_win32_handle(self, handle: HANDLE) -> Callable[[], None] | None: + """ + Remove a Win32 handle from the event loop. + Return either the registered handler or `None`. + """ + if handle.value is None: + return None # Ignore. + + # Trigger remove events, so that the reader knows to stop. + try: + event = self._remove_events.pop(handle.value) + except KeyError: + pass + else: + windll.kernel32.SetEvent(event) + + try: + return self._handle_callbacks.pop(handle.value) + except KeyError: + return None + + +@contextmanager +def attach_win32_input( + input: _Win32InputBase, callback: Callable[[], None] +) -> Iterator[None]: + """ + Context manager that makes this input active in the current event loop. + + :param input: :class:`~prompt_toolkit.input.Input` object. + :param input_ready_callback: Called when the input is ready to read. + """ + win32_handles = input.win32_handles + handle = input.handle + + if handle.value is None: + raise ValueError("Invalid handle.") + + # Add reader. + previous_callback = win32_handles.remove_win32_handle(handle) + win32_handles.add_win32_handle(handle, callback) + + try: + yield + finally: + win32_handles.remove_win32_handle(handle) + + if previous_callback: + win32_handles.add_win32_handle(handle, previous_callback) + + +@contextmanager +def detach_win32_input(input: _Win32InputBase) -> Iterator[None]: + win32_handles = input.win32_handles + handle = input.handle + + if handle.value is None: + raise ValueError("Invalid handle.") + + previous_callback = win32_handles.remove_win32_handle(handle) + + try: + yield + finally: + if previous_callback: + win32_handles.add_win32_handle(handle, previous_callback) + + +class raw_mode: + """ + :: + + with raw_mode(stdin): + ''' the windows terminal is now in 'raw' mode. ''' + + The ``fileno`` attribute is ignored. This is to be compatible with the + `raw_input` method of `.vt100_input`. + """ + + def __init__( + self, fileno: int | None = None, use_win10_virtual_terminal_input: bool = False + ) -> None: + self.handle = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)) + self.use_win10_virtual_terminal_input = use_win10_virtual_terminal_input + + def __enter__(self) -> None: + # Remember original mode. + original_mode = DWORD() + windll.kernel32.GetConsoleMode(self.handle, pointer(original_mode)) + self.original_mode = original_mode + + self._patch() + + def _patch(self) -> None: + # Set raw + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_PROCESSED_INPUT = 0x0001 + + new_mode = self.original_mode.value & ~( + ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT + ) + + if self.use_win10_virtual_terminal_input: + new_mode |= ENABLE_VIRTUAL_TERMINAL_INPUT + + windll.kernel32.SetConsoleMode(self.handle, new_mode) + + def __exit__(self, *a: object) -> None: + # Restore original mode + windll.kernel32.SetConsoleMode(self.handle, self.original_mode) + + +class cooked_mode(raw_mode): + """ + :: + + with cooked_mode(stdin): + ''' The pseudo-terminal stdin is now used in cooked mode. ''' + """ + + def _patch(self) -> None: + # Set cooked. + ENABLE_ECHO_INPUT = 0x0004 + ENABLE_LINE_INPUT = 0x0002 + ENABLE_PROCESSED_INPUT = 0x0001 + + windll.kernel32.SetConsoleMode( + self.handle, + self.original_mode.value + | (ENABLE_ECHO_INPUT | ENABLE_LINE_INPUT | ENABLE_PROCESSED_INPUT), + ) + + +def _is_win_vt100_input_enabled() -> bool: + """ + Returns True when we're running Windows and VT100 escape sequences are + supported. + """ + hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)) + + # Get original console mode. + original_mode = DWORD(0) + windll.kernel32.GetConsoleMode(hconsole, byref(original_mode)) + + try: + # Try to enable VT100 sequences. + result: int = windll.kernel32.SetConsoleMode( + hconsole, DWORD(ENABLE_VIRTUAL_TERMINAL_INPUT) + ) + + return result == 1 + finally: + windll.kernel32.SetConsoleMode(hconsole, original_mode) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/input/win32_pipe.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/win32_pipe.py new file mode 100644 index 0000000000000000000000000000000000000000..0bafa49eab71547708feaf1bc8ee41e061c52340 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/input/win32_pipe.py @@ -0,0 +1,156 @@ +from __future__ import annotations + +import sys + +assert sys.platform == "win32" + +from contextlib import contextmanager +from ctypes import windll +from ctypes.wintypes import HANDLE +from typing import Callable, ContextManager, Iterator + +from prompt_toolkit.eventloop.win32 import create_win32_event + +from ..key_binding import KeyPress +from ..utils import DummyContext +from .base import PipeInput +from .vt100_parser import Vt100Parser +from .win32 import _Win32InputBase, attach_win32_input, detach_win32_input + +__all__ = ["Win32PipeInput"] + + +class Win32PipeInput(_Win32InputBase, PipeInput): + """ + This is an input pipe that works on Windows. + Text or bytes can be feed into the pipe, and key strokes can be read from + the pipe. This is useful if we want to send the input programmatically into + the application. Mostly useful for unit testing. + + Notice that even though it's Windows, we use vt100 escape sequences over + the pipe. + + Usage:: + + input = Win32PipeInput() + input.send_text('inputdata') + """ + + _id = 0 + + def __init__(self, _event: HANDLE) -> None: + super().__init__() + # Event (handle) for registering this input in the event loop. + # This event is set when there is data available to read from the pipe. + # Note: We use this approach instead of using a regular pipe, like + # returned from `os.pipe()`, because making such a regular pipe + # non-blocking is tricky and this works really well. + self._event = create_win32_event() + + self._closed = False + + # Parser for incoming keys. + self._buffer: list[KeyPress] = [] # Buffer to collect the Key objects. + self.vt100_parser = Vt100Parser(lambda key: self._buffer.append(key)) + + # Identifier for every PipeInput for the hash. + self.__class__._id += 1 + self._id = self.__class__._id + + @classmethod + @contextmanager + def create(cls) -> Iterator[Win32PipeInput]: + event = create_win32_event() + try: + yield Win32PipeInput(_event=event) + finally: + windll.kernel32.CloseHandle(event) + + @property + def closed(self) -> bool: + return self._closed + + def fileno(self) -> int: + """ + The windows pipe doesn't depend on the file handle. + """ + raise NotImplementedError + + @property + def handle(self) -> HANDLE: + "The handle used for registering this pipe in the event loop." + return self._event + + def attach(self, input_ready_callback: Callable[[], None]) -> ContextManager[None]: + """ + Return a context manager that makes this input active in the current + event loop. + """ + return attach_win32_input(self, input_ready_callback) + + def detach(self) -> ContextManager[None]: + """ + Return a context manager that makes sure that this input is not active + in the current event loop. + """ + return detach_win32_input(self) + + def read_keys(self) -> list[KeyPress]: + "Read list of KeyPress." + + # Return result. + result = self._buffer + self._buffer = [] + + # Reset event. + if not self._closed: + # (If closed, the event should not reset.) + windll.kernel32.ResetEvent(self._event) + + return result + + def flush_keys(self) -> list[KeyPress]: + """ + Flush pending keys and return them. + (Used for flushing the 'escape' key.) + """ + # Flush all pending keys. (This is most important to flush the vt100 + # 'Escape' key early when nothing else follows.) + self.vt100_parser.flush() + + # Return result. + result = self._buffer + self._buffer = [] + return result + + def send_bytes(self, data: bytes) -> None: + "Send bytes to the input." + self.send_text(data.decode("utf-8", "ignore")) + + def send_text(self, text: str) -> None: + "Send text to the input." + if self._closed: + raise ValueError("Attempt to write into a closed pipe.") + + # Pass it through our vt100 parser. + self.vt100_parser.feed(text) + + # Set event. + windll.kernel32.SetEvent(self._event) + + def raw_mode(self) -> ContextManager[None]: + return DummyContext() + + def cooked_mode(self) -> ContextManager[None]: + return DummyContext() + + def close(self) -> None: + "Close write-end of the pipe." + self._closed = True + windll.kernel32.SetEvent(self._event) + + def typeahead_hash(self) -> str: + """ + This needs to be unique for every `PipeInput`. + """ + return f"pipe-input-{self._id}" diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/__init__.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c31746aba14fd18b4d2a6df8c474bcebddfdb057 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/__init__.py @@ -0,0 +1,22 @@ +from __future__ import annotations + +from .key_bindings import ( + ConditionalKeyBindings, + DynamicKeyBindings, + KeyBindings, + KeyBindingsBase, + merge_key_bindings, +) +from .key_processor import KeyPress, KeyPressEvent + +__all__ = [ + # key_bindings. + "ConditionalKeyBindings", + "DynamicKeyBindings", + "KeyBindings", + "KeyBindingsBase", + "merge_key_bindings", + # key_processor + "KeyPress", + "KeyPressEvent", +] diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/digraphs.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/digraphs.py new file mode 100644 index 0000000000000000000000000000000000000000..f0152dc4cb05525ac4d4589e67402d7ba55ba1d4 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/digraphs.py @@ -0,0 +1,1378 @@ +""" +Vi Digraphs. +This is a list of special characters that can be inserted in Vi insert mode by +pressing Control-K followed by to normal characters. + +Taken from Neovim and translated to Python: +https://raw.githubusercontent.com/neovim/neovim/master/src/nvim/digraph.c +""" + +from __future__ import annotations + +__all__ = [ + "DIGRAPHS", +] + +# digraphs for Unicode from RFC1345 +# (also work for ISO-8859-1 aka latin1) +DIGRAPHS: dict[tuple[str, str], int] = { + ("N", "U"): 0x00, + ("S", "H"): 0x01, + ("S", "X"): 0x02, + ("E", "X"): 0x03, + ("E", "T"): 0x04, + ("E", "Q"): 0x05, + ("A", "K"): 0x06, + ("B", "L"): 0x07, + ("B", "S"): 0x08, + ("H", "T"): 0x09, + ("L", "F"): 0x0A, + ("V", "T"): 0x0B, + ("F", "F"): 0x0C, + ("C", "R"): 0x0D, + ("S", "O"): 0x0E, + ("S", "I"): 0x0F, + ("D", "L"): 0x10, + ("D", "1"): 0x11, + ("D", "2"): 0x12, + ("D", "3"): 0x13, + ("D", "4"): 0x14, + ("N", "K"): 0x15, + ("S", "Y"): 0x16, + ("E", "B"): 0x17, + ("C", "N"): 0x18, + ("E", "M"): 0x19, + ("S", "B"): 0x1A, + ("E", "C"): 0x1B, + ("F", "S"): 0x1C, + ("G", "S"): 0x1D, + ("R", "S"): 0x1E, + ("U", "S"): 0x1F, + ("S", "P"): 0x20, + ("N", "b"): 0x23, + ("D", "O"): 0x24, + ("A", "t"): 0x40, + ("<", "("): 0x5B, + ("/", "/"): 0x5C, + (")", ">"): 0x5D, + ("'", ">"): 0x5E, + ("'", "!"): 0x60, + ("(", "!"): 0x7B, + ("!", "!"): 0x7C, + ("!", ")"): 0x7D, + ("'", "?"): 0x7E, + ("D", "T"): 0x7F, + ("P", "A"): 0x80, + ("H", "O"): 0x81, + ("B", "H"): 0x82, + ("N", "H"): 0x83, + ("I", "N"): 0x84, + ("N", "L"): 0x85, + ("S", "A"): 0x86, + ("E", "S"): 0x87, + ("H", "S"): 0x88, + ("H", "J"): 0x89, + ("V", "S"): 0x8A, + ("P", "D"): 0x8B, + ("P", "U"): 0x8C, + ("R", "I"): 0x8D, + ("S", "2"): 0x8E, + ("S", "3"): 0x8F, + ("D", "C"): 0x90, + ("P", "1"): 0x91, + ("P", "2"): 0x92, + ("T", "S"): 0x93, + ("C", "C"): 0x94, + ("M", "W"): 0x95, + ("S", "G"): 0x96, + ("E", "G"): 0x97, + ("S", "S"): 0x98, + ("G", "C"): 0x99, + ("S", "C"): 0x9A, + ("C", "I"): 0x9B, + ("S", "T"): 0x9C, + ("O", "C"): 0x9D, + ("P", "M"): 0x9E, + ("A", "C"): 0x9F, + ("N", "S"): 0xA0, + ("!", "I"): 0xA1, + ("C", "t"): 0xA2, + ("P", "d"): 0xA3, + ("C", "u"): 0xA4, + ("Y", "e"): 0xA5, + ("B", "B"): 0xA6, + ("S", "E"): 0xA7, + ("'", ":"): 0xA8, + ("C", "o"): 0xA9, + ("-", "a"): 0xAA, + ("<", "<"): 0xAB, + ("N", "O"): 0xAC, + ("-", "-"): 0xAD, + ("R", "g"): 0xAE, + ("'", "m"): 0xAF, + ("D", "G"): 0xB0, + ("+", "-"): 0xB1, + ("2", "S"): 0xB2, + ("3", "S"): 0xB3, + ("'", "'"): 0xB4, + ("M", "y"): 0xB5, + ("P", "I"): 0xB6, + (".", "M"): 0xB7, + ("'", ","): 0xB8, + ("1", "S"): 0xB9, + ("-", "o"): 0xBA, + (">", ">"): 0xBB, + ("1", "4"): 0xBC, + ("1", "2"): 0xBD, + ("3", "4"): 0xBE, + ("?", "I"): 0xBF, + ("A", "!"): 0xC0, + ("A", "'"): 0xC1, + ("A", ">"): 0xC2, + ("A", "?"): 0xC3, + ("A", ":"): 0xC4, + ("A", "A"): 0xC5, + ("A", "E"): 0xC6, + ("C", ","): 0xC7, + ("E", "!"): 0xC8, + ("E", "'"): 0xC9, + ("E", ">"): 0xCA, + ("E", ":"): 0xCB, + ("I", "!"): 0xCC, + ("I", "'"): 0xCD, + ("I", ">"): 0xCE, + ("I", ":"): 0xCF, + ("D", "-"): 0xD0, + ("N", "?"): 0xD1, + ("O", "!"): 0xD2, + ("O", "'"): 0xD3, + ("O", ">"): 0xD4, + ("O", "?"): 0xD5, + ("O", ":"): 0xD6, + ("*", "X"): 0xD7, + ("O", "/"): 0xD8, + ("U", "!"): 0xD9, + ("U", "'"): 0xDA, + ("U", ">"): 0xDB, + ("U", ":"): 0xDC, + ("Y", "'"): 0xDD, + ("T", "H"): 0xDE, + ("s", "s"): 0xDF, + ("a", "!"): 0xE0, + ("a", "'"): 0xE1, + ("a", ">"): 0xE2, + ("a", "?"): 0xE3, + ("a", ":"): 0xE4, + ("a", "a"): 0xE5, + ("a", "e"): 0xE6, + ("c", ","): 0xE7, + ("e", "!"): 0xE8, + ("e", "'"): 0xE9, + ("e", ">"): 0xEA, + ("e", ":"): 0xEB, + ("i", "!"): 0xEC, + ("i", "'"): 0xED, + ("i", ">"): 0xEE, + ("i", ":"): 0xEF, + ("d", "-"): 0xF0, + ("n", "?"): 0xF1, + ("o", "!"): 0xF2, + ("o", "'"): 0xF3, + ("o", ">"): 0xF4, + ("o", "?"): 0xF5, + ("o", ":"): 0xF6, + ("-", ":"): 0xF7, + ("o", "/"): 0xF8, + ("u", "!"): 0xF9, + ("u", "'"): 0xFA, + ("u", ">"): 0xFB, + ("u", ":"): 0xFC, + ("y", "'"): 0xFD, + ("t", "h"): 0xFE, + ("y", ":"): 0xFF, + ("A", "-"): 0x0100, + ("a", "-"): 0x0101, + ("A", "("): 0x0102, + ("a", "("): 0x0103, + ("A", ";"): 0x0104, + ("a", ";"): 0x0105, + ("C", "'"): 0x0106, + ("c", "'"): 0x0107, + ("C", ">"): 0x0108, + ("c", ">"): 0x0109, + ("C", "."): 0x010A, + ("c", "."): 0x010B, + ("C", "<"): 0x010C, + ("c", "<"): 0x010D, + ("D", "<"): 0x010E, + ("d", "<"): 0x010F, + ("D", "/"): 0x0110, + ("d", "/"): 0x0111, + ("E", "-"): 0x0112, + ("e", "-"): 0x0113, + ("E", "("): 0x0114, + ("e", "("): 0x0115, + ("E", "."): 0x0116, + ("e", "."): 0x0117, + ("E", ";"): 0x0118, + ("e", ";"): 0x0119, + ("E", "<"): 0x011A, + ("e", "<"): 0x011B, + ("G", ">"): 0x011C, + ("g", ">"): 0x011D, + ("G", "("): 0x011E, + ("g", "("): 0x011F, + ("G", "."): 0x0120, + ("g", "."): 0x0121, + ("G", ","): 0x0122, + ("g", ","): 0x0123, + ("H", ">"): 0x0124, + ("h", ">"): 0x0125, + ("H", "/"): 0x0126, + ("h", "/"): 0x0127, + ("I", "?"): 0x0128, + ("i", "?"): 0x0129, + ("I", "-"): 0x012A, + ("i", "-"): 0x012B, + ("I", "("): 0x012C, + ("i", "("): 0x012D, + ("I", ";"): 0x012E, + ("i", ";"): 0x012F, + ("I", "."): 0x0130, + ("i", "."): 0x0131, + ("I", "J"): 0x0132, + ("i", "j"): 0x0133, + ("J", ">"): 0x0134, + ("j", ">"): 0x0135, + ("K", ","): 0x0136, + ("k", ","): 0x0137, + ("k", "k"): 0x0138, + ("L", "'"): 0x0139, + ("l", "'"): 0x013A, + ("L", ","): 0x013B, + ("l", ","): 0x013C, + ("L", "<"): 0x013D, + ("l", "<"): 0x013E, + ("L", "."): 0x013F, + ("l", "."): 0x0140, + ("L", "/"): 0x0141, + ("l", "/"): 0x0142, + ("N", "'"): 0x0143, + ("n", "'"): 0x0144, + ("N", ","): 0x0145, + ("n", ","): 0x0146, + ("N", "<"): 0x0147, + ("n", "<"): 0x0148, + ("'", "n"): 0x0149, + ("N", "G"): 0x014A, + ("n", "g"): 0x014B, + ("O", "-"): 0x014C, + ("o", "-"): 0x014D, + ("O", "("): 0x014E, + ("o", "("): 0x014F, + ("O", '"'): 0x0150, + ("o", '"'): 0x0151, + ("O", "E"): 0x0152, + ("o", "e"): 0x0153, + ("R", "'"): 0x0154, + ("r", "'"): 0x0155, + ("R", ","): 0x0156, + ("r", ","): 0x0157, + ("R", "<"): 0x0158, + ("r", "<"): 0x0159, + ("S", "'"): 0x015A, + ("s", "'"): 0x015B, + ("S", ">"): 0x015C, + ("s", ">"): 0x015D, + ("S", ","): 0x015E, + ("s", ","): 0x015F, + ("S", "<"): 0x0160, + ("s", "<"): 0x0161, + ("T", ","): 0x0162, + ("t", ","): 0x0163, + ("T", "<"): 0x0164, + ("t", "<"): 0x0165, + ("T", "/"): 0x0166, + ("t", "/"): 0x0167, + ("U", "?"): 0x0168, + ("u", "?"): 0x0169, + ("U", "-"): 0x016A, + ("u", "-"): 0x016B, + ("U", "("): 0x016C, + ("u", "("): 0x016D, + ("U", "0"): 0x016E, + ("u", "0"): 0x016F, + ("U", '"'): 0x0170, + ("u", '"'): 0x0171, + ("U", ";"): 0x0172, + ("u", ";"): 0x0173, + ("W", ">"): 0x0174, + ("w", ">"): 0x0175, + ("Y", ">"): 0x0176, + ("y", ">"): 0x0177, + ("Y", ":"): 0x0178, + ("Z", "'"): 0x0179, + ("z", "'"): 0x017A, + ("Z", "."): 0x017B, + ("z", "."): 0x017C, + ("Z", "<"): 0x017D, + ("z", "<"): 0x017E, + ("O", "9"): 0x01A0, + ("o", "9"): 0x01A1, + ("O", "I"): 0x01A2, + ("o", "i"): 0x01A3, + ("y", "r"): 0x01A6, + ("U", "9"): 0x01AF, + ("u", "9"): 0x01B0, + ("Z", "/"): 0x01B5, + ("z", "/"): 0x01B6, + ("E", "D"): 0x01B7, + ("A", "<"): 0x01CD, + ("a", "<"): 0x01CE, + ("I", "<"): 0x01CF, + ("i", "<"): 0x01D0, + ("O", "<"): 0x01D1, + ("o", "<"): 0x01D2, + ("U", "<"): 0x01D3, + ("u", "<"): 0x01D4, + ("A", "1"): 0x01DE, + ("a", "1"): 0x01DF, + ("A", "7"): 0x01E0, + ("a", "7"): 0x01E1, + ("A", "3"): 0x01E2, + ("a", "3"): 0x01E3, + ("G", "/"): 0x01E4, + ("g", "/"): 0x01E5, + ("G", "<"): 0x01E6, + ("g", "<"): 0x01E7, + ("K", "<"): 0x01E8, + ("k", "<"): 0x01E9, + ("O", ";"): 0x01EA, + ("o", ";"): 0x01EB, + ("O", "1"): 0x01EC, + ("o", "1"): 0x01ED, + ("E", "Z"): 0x01EE, + ("e", "z"): 0x01EF, + ("j", "<"): 0x01F0, + ("G", "'"): 0x01F4, + ("g", "'"): 0x01F5, + (";", "S"): 0x02BF, + ("'", "<"): 0x02C7, + ("'", "("): 0x02D8, + ("'", "."): 0x02D9, + ("'", "0"): 0x02DA, + ("'", ";"): 0x02DB, + ("'", '"'): 0x02DD, + ("A", "%"): 0x0386, + ("E", "%"): 0x0388, + ("Y", "%"): 0x0389, + ("I", "%"): 0x038A, + ("O", "%"): 0x038C, + ("U", "%"): 0x038E, + ("W", "%"): 0x038F, + ("i", "3"): 0x0390, + ("A", "*"): 0x0391, + ("B", "*"): 0x0392, + ("G", "*"): 0x0393, + ("D", "*"): 0x0394, + ("E", "*"): 0x0395, + ("Z", "*"): 0x0396, + ("Y", "*"): 0x0397, + ("H", "*"): 0x0398, + ("I", "*"): 0x0399, + ("K", "*"): 0x039A, + ("L", "*"): 0x039B, + ("M", "*"): 0x039C, + ("N", "*"): 0x039D, + ("C", "*"): 0x039E, + ("O", "*"): 0x039F, + ("P", "*"): 0x03A0, + ("R", "*"): 0x03A1, + ("S", "*"): 0x03A3, + ("T", "*"): 0x03A4, + ("U", "*"): 0x03A5, + ("F", "*"): 0x03A6, + ("X", "*"): 0x03A7, + ("Q", "*"): 0x03A8, + ("W", "*"): 0x03A9, + ("J", "*"): 0x03AA, + ("V", "*"): 0x03AB, + ("a", "%"): 0x03AC, + ("e", "%"): 0x03AD, + ("y", "%"): 0x03AE, + ("i", "%"): 0x03AF, + ("u", "3"): 0x03B0, + ("a", "*"): 0x03B1, + ("b", "*"): 0x03B2, + ("g", "*"): 0x03B3, + ("d", "*"): 0x03B4, + ("e", "*"): 0x03B5, + ("z", "*"): 0x03B6, + ("y", "*"): 0x03B7, + ("h", "*"): 0x03B8, + ("i", "*"): 0x03B9, + ("k", "*"): 0x03BA, + ("l", "*"): 0x03BB, + ("m", "*"): 0x03BC, + ("n", "*"): 0x03BD, + ("c", "*"): 0x03BE, + ("o", "*"): 0x03BF, + ("p", "*"): 0x03C0, + ("r", "*"): 0x03C1, + ("*", "s"): 0x03C2, + ("s", "*"): 0x03C3, + ("t", "*"): 0x03C4, + ("u", "*"): 0x03C5, + ("f", "*"): 0x03C6, + ("x", "*"): 0x03C7, + ("q", "*"): 0x03C8, + ("w", "*"): 0x03C9, + ("j", "*"): 0x03CA, + ("v", "*"): 0x03CB, + ("o", "%"): 0x03CC, + ("u", "%"): 0x03CD, + ("w", "%"): 0x03CE, + ("'", "G"): 0x03D8, + (",", "G"): 0x03D9, + ("T", "3"): 0x03DA, + ("t", "3"): 0x03DB, + ("M", "3"): 0x03DC, + ("m", "3"): 0x03DD, + ("K", "3"): 0x03DE, + ("k", "3"): 0x03DF, + ("P", "3"): 0x03E0, + ("p", "3"): 0x03E1, + ("'", "%"): 0x03F4, + ("j", "3"): 0x03F5, + ("I", "O"): 0x0401, + ("D", "%"): 0x0402, + ("G", "%"): 0x0403, + ("I", "E"): 0x0404, + ("D", "S"): 0x0405, + ("I", "I"): 0x0406, + ("Y", "I"): 0x0407, + ("J", "%"): 0x0408, + ("L", "J"): 0x0409, + ("N", "J"): 0x040A, + ("T", "s"): 0x040B, + ("K", "J"): 0x040C, + ("V", "%"): 0x040E, + ("D", "Z"): 0x040F, + ("A", "="): 0x0410, + ("B", "="): 0x0411, + ("V", "="): 0x0412, + ("G", "="): 0x0413, + ("D", "="): 0x0414, + ("E", "="): 0x0415, + ("Z", "%"): 0x0416, + ("Z", "="): 0x0417, + ("I", "="): 0x0418, + ("J", "="): 0x0419, + ("K", "="): 0x041A, + ("L", "="): 0x041B, + ("M", "="): 0x041C, + ("N", "="): 0x041D, + ("O", "="): 0x041E, + ("P", "="): 0x041F, + ("R", "="): 0x0420, + ("S", "="): 0x0421, + ("T", "="): 0x0422, + ("U", "="): 0x0423, + ("F", "="): 0x0424, + ("H", "="): 0x0425, + ("C", "="): 0x0426, + ("C", "%"): 0x0427, + ("S", "%"): 0x0428, + ("S", "c"): 0x0429, + ("=", '"'): 0x042A, + ("Y", "="): 0x042B, + ("%", '"'): 0x042C, + ("J", "E"): 0x042D, + ("J", "U"): 0x042E, + ("J", "A"): 0x042F, + ("a", "="): 0x0430, + ("b", "="): 0x0431, + ("v", "="): 0x0432, + ("g", "="): 0x0433, + ("d", "="): 0x0434, + ("e", "="): 0x0435, + ("z", "%"): 0x0436, + ("z", "="): 0x0437, + ("i", "="): 0x0438, + ("j", "="): 0x0439, + ("k", "="): 0x043A, + ("l", "="): 0x043B, + ("m", "="): 0x043C, + ("n", "="): 0x043D, + ("o", "="): 0x043E, + ("p", "="): 0x043F, + ("r", "="): 0x0440, + ("s", "="): 0x0441, + ("t", "="): 0x0442, + ("u", "="): 0x0443, + ("f", "="): 0x0444, + ("h", "="): 0x0445, + ("c", "="): 0x0446, + ("c", "%"): 0x0447, + ("s", "%"): 0x0448, + ("s", "c"): 0x0449, + ("=", "'"): 0x044A, + ("y", "="): 0x044B, + ("%", "'"): 0x044C, + ("j", "e"): 0x044D, + ("j", "u"): 0x044E, + ("j", "a"): 0x044F, + ("i", "o"): 0x0451, + ("d", "%"): 0x0452, + ("g", "%"): 0x0453, + ("i", "e"): 0x0454, + ("d", "s"): 0x0455, + ("i", "i"): 0x0456, + ("y", "i"): 0x0457, + ("j", "%"): 0x0458, + ("l", "j"): 0x0459, + ("n", "j"): 0x045A, + ("t", "s"): 0x045B, + ("k", "j"): 0x045C, + ("v", "%"): 0x045E, + ("d", "z"): 0x045F, + ("Y", "3"): 0x0462, + ("y", "3"): 0x0463, + ("O", "3"): 0x046A, + ("o", "3"): 0x046B, + ("F", "3"): 0x0472, + ("f", "3"): 0x0473, + ("V", "3"): 0x0474, + ("v", "3"): 0x0475, + ("C", "3"): 0x0480, + ("c", "3"): 0x0481, + ("G", "3"): 0x0490, + ("g", "3"): 0x0491, + ("A", "+"): 0x05D0, + ("B", "+"): 0x05D1, + ("G", "+"): 0x05D2, + ("D", "+"): 0x05D3, + ("H", "+"): 0x05D4, + ("W", "+"): 0x05D5, + ("Z", "+"): 0x05D6, + ("X", "+"): 0x05D7, + ("T", "j"): 0x05D8, + ("J", "+"): 0x05D9, + ("K", "%"): 0x05DA, + ("K", "+"): 0x05DB, + ("L", "+"): 0x05DC, + ("M", "%"): 0x05DD, + ("M", "+"): 0x05DE, + ("N", "%"): 0x05DF, + ("N", "+"): 0x05E0, + ("S", "+"): 0x05E1, + ("E", "+"): 0x05E2, + ("P", "%"): 0x05E3, + ("P", "+"): 0x05E4, + ("Z", "j"): 0x05E5, + ("Z", "J"): 0x05E6, + ("Q", "+"): 0x05E7, + ("R", "+"): 0x05E8, + ("S", "h"): 0x05E9, + ("T", "+"): 0x05EA, + (",", "+"): 0x060C, + (";", "+"): 0x061B, + ("?", "+"): 0x061F, + ("H", "'"): 0x0621, + ("a", "M"): 0x0622, + ("a", "H"): 0x0623, + ("w", "H"): 0x0624, + ("a", "h"): 0x0625, + ("y", "H"): 0x0626, + ("a", "+"): 0x0627, + ("b", "+"): 0x0628, + ("t", "m"): 0x0629, + ("t", "+"): 0x062A, + ("t", "k"): 0x062B, + ("g", "+"): 0x062C, + ("h", "k"): 0x062D, + ("x", "+"): 0x062E, + ("d", "+"): 0x062F, + ("d", "k"): 0x0630, + ("r", "+"): 0x0631, + ("z", "+"): 0x0632, + ("s", "+"): 0x0633, + ("s", "n"): 0x0634, + ("c", "+"): 0x0635, + ("d", "d"): 0x0636, + ("t", "j"): 0x0637, + ("z", "H"): 0x0638, + ("e", "+"): 0x0639, + ("i", "+"): 0x063A, + ("+", "+"): 0x0640, + ("f", "+"): 0x0641, + ("q", "+"): 0x0642, + ("k", "+"): 0x0643, + ("l", "+"): 0x0644, + ("m", "+"): 0x0645, + ("n", "+"): 0x0646, + ("h", "+"): 0x0647, + ("w", "+"): 0x0648, + ("j", "+"): 0x0649, + ("y", "+"): 0x064A, + (":", "+"): 0x064B, + ('"', "+"): 0x064C, + ("=", "+"): 0x064D, + ("/", "+"): 0x064E, + ("'", "+"): 0x064F, + ("1", "+"): 0x0650, + ("3", "+"): 0x0651, + ("0", "+"): 0x0652, + ("a", "S"): 0x0670, + ("p", "+"): 0x067E, + ("v", "+"): 0x06A4, + ("g", "f"): 0x06AF, + ("0", "a"): 0x06F0, + ("1", "a"): 0x06F1, + ("2", "a"): 0x06F2, + ("3", "a"): 0x06F3, + ("4", "a"): 0x06F4, + ("5", "a"): 0x06F5, + ("6", "a"): 0x06F6, + ("7", "a"): 0x06F7, + ("8", "a"): 0x06F8, + ("9", "a"): 0x06F9, + ("B", "."): 0x1E02, + ("b", "."): 0x1E03, + ("B", "_"): 0x1E06, + ("b", "_"): 0x1E07, + ("D", "."): 0x1E0A, + ("d", "."): 0x1E0B, + ("D", "_"): 0x1E0E, + ("d", "_"): 0x1E0F, + ("D", ","): 0x1E10, + ("d", ","): 0x1E11, + ("F", "."): 0x1E1E, + ("f", "."): 0x1E1F, + ("G", "-"): 0x1E20, + ("g", "-"): 0x1E21, + ("H", "."): 0x1E22, + ("h", "."): 0x1E23, + ("H", ":"): 0x1E26, + ("h", ":"): 0x1E27, + ("H", ","): 0x1E28, + ("h", ","): 0x1E29, + ("K", "'"): 0x1E30, + ("k", "'"): 0x1E31, + ("K", "_"): 0x1E34, + ("k", "_"): 0x1E35, + ("L", "_"): 0x1E3A, + ("l", "_"): 0x1E3B, + ("M", "'"): 0x1E3E, + ("m", "'"): 0x1E3F, + ("M", "."): 0x1E40, + ("m", "."): 0x1E41, + ("N", "."): 0x1E44, + ("n", "."): 0x1E45, + ("N", "_"): 0x1E48, + ("n", "_"): 0x1E49, + ("P", "'"): 0x1E54, + ("p", "'"): 0x1E55, + ("P", "."): 0x1E56, + ("p", "."): 0x1E57, + ("R", "."): 0x1E58, + ("r", "."): 0x1E59, + ("R", "_"): 0x1E5E, + ("r", "_"): 0x1E5F, + ("S", "."): 0x1E60, + ("s", "."): 0x1E61, + ("T", "."): 0x1E6A, + ("t", "."): 0x1E6B, + ("T", "_"): 0x1E6E, + ("t", "_"): 0x1E6F, + ("V", "?"): 0x1E7C, + ("v", "?"): 0x1E7D, + ("W", "!"): 0x1E80, + ("w", "!"): 0x1E81, + ("W", "'"): 0x1E82, + ("w", "'"): 0x1E83, + ("W", ":"): 0x1E84, + ("w", ":"): 0x1E85, + ("W", "."): 0x1E86, + ("w", "."): 0x1E87, + ("X", "."): 0x1E8A, + ("x", "."): 0x1E8B, + ("X", ":"): 0x1E8C, + ("x", ":"): 0x1E8D, + ("Y", "."): 0x1E8E, + ("y", "."): 0x1E8F, + ("Z", ">"): 0x1E90, + ("z", ">"): 0x1E91, + ("Z", "_"): 0x1E94, + ("z", "_"): 0x1E95, + ("h", "_"): 0x1E96, + ("t", ":"): 0x1E97, + ("w", "0"): 0x1E98, + ("y", "0"): 0x1E99, + ("A", "2"): 0x1EA2, + ("a", "2"): 0x1EA3, + ("E", "2"): 0x1EBA, + ("e", "2"): 0x1EBB, + ("E", "?"): 0x1EBC, + ("e", "?"): 0x1EBD, + ("I", "2"): 0x1EC8, + ("i", "2"): 0x1EC9, + ("O", "2"): 0x1ECE, + ("o", "2"): 0x1ECF, + ("U", "2"): 0x1EE6, + ("u", "2"): 0x1EE7, + ("Y", "!"): 0x1EF2, + ("y", "!"): 0x1EF3, + ("Y", "2"): 0x1EF6, + ("y", "2"): 0x1EF7, + ("Y", "?"): 0x1EF8, + ("y", "?"): 0x1EF9, + (";", "'"): 0x1F00, + (",", "'"): 0x1F01, + (";", "!"): 0x1F02, + (",", "!"): 0x1F03, + ("?", ";"): 0x1F04, + ("?", ","): 0x1F05, + ("!", ":"): 0x1F06, + ("?", ":"): 0x1F07, + ("1", "N"): 0x2002, + ("1", "M"): 0x2003, + ("3", "M"): 0x2004, + ("4", "M"): 0x2005, + ("6", "M"): 0x2006, + ("1", "T"): 0x2009, + ("1", "H"): 0x200A, + ("-", "1"): 0x2010, + ("-", "N"): 0x2013, + ("-", "M"): 0x2014, + ("-", "3"): 0x2015, + ("!", "2"): 0x2016, + ("=", "2"): 0x2017, + ("'", "6"): 0x2018, + ("'", "9"): 0x2019, + (".", "9"): 0x201A, + ("9", "'"): 0x201B, + ('"', "6"): 0x201C, + ('"', "9"): 0x201D, + (":", "9"): 0x201E, + ("9", '"'): 0x201F, + ("/", "-"): 0x2020, + ("/", "="): 0x2021, + (".", "."): 0x2025, + ("%", "0"): 0x2030, + ("1", "'"): 0x2032, + ("2", "'"): 0x2033, + ("3", "'"): 0x2034, + ("1", '"'): 0x2035, + ("2", '"'): 0x2036, + ("3", '"'): 0x2037, + ("C", "a"): 0x2038, + ("<", "1"): 0x2039, + (">", "1"): 0x203A, + (":", "X"): 0x203B, + ("'", "-"): 0x203E, + ("/", "f"): 0x2044, + ("0", "S"): 0x2070, + ("4", "S"): 0x2074, + ("5", "S"): 0x2075, + ("6", "S"): 0x2076, + ("7", "S"): 0x2077, + ("8", "S"): 0x2078, + ("9", "S"): 0x2079, + ("+", "S"): 0x207A, + ("-", "S"): 0x207B, + ("=", "S"): 0x207C, + ("(", "S"): 0x207D, + (")", "S"): 0x207E, + ("n", "S"): 0x207F, + ("0", "s"): 0x2080, + ("1", "s"): 0x2081, + ("2", "s"): 0x2082, + ("3", "s"): 0x2083, + ("4", "s"): 0x2084, + ("5", "s"): 0x2085, + ("6", "s"): 0x2086, + ("7", "s"): 0x2087, + ("8", "s"): 0x2088, + ("9", "s"): 0x2089, + ("+", "s"): 0x208A, + ("-", "s"): 0x208B, + ("=", "s"): 0x208C, + ("(", "s"): 0x208D, + (")", "s"): 0x208E, + ("L", "i"): 0x20A4, + ("P", "t"): 0x20A7, + ("W", "="): 0x20A9, + ("=", "e"): 0x20AC, # euro + ("E", "u"): 0x20AC, # euro + ("=", "R"): 0x20BD, # rouble + ("=", "P"): 0x20BD, # rouble + ("o", "C"): 0x2103, + ("c", "o"): 0x2105, + ("o", "F"): 0x2109, + ("N", "0"): 0x2116, + ("P", "O"): 0x2117, + ("R", "x"): 0x211E, + ("S", "M"): 0x2120, + ("T", "M"): 0x2122, + ("O", "m"): 0x2126, + ("A", "O"): 0x212B, + ("1", "3"): 0x2153, + ("2", "3"): 0x2154, + ("1", "5"): 0x2155, + ("2", "5"): 0x2156, + ("3", "5"): 0x2157, + ("4", "5"): 0x2158, + ("1", "6"): 0x2159, + ("5", "6"): 0x215A, + ("1", "8"): 0x215B, + ("3", "8"): 0x215C, + ("5", "8"): 0x215D, + ("7", "8"): 0x215E, + ("1", "R"): 0x2160, + ("2", "R"): 0x2161, + ("3", "R"): 0x2162, + ("4", "R"): 0x2163, + ("5", "R"): 0x2164, + ("6", "R"): 0x2165, + ("7", "R"): 0x2166, + ("8", "R"): 0x2167, + ("9", "R"): 0x2168, + ("a", "R"): 0x2169, + ("b", "R"): 0x216A, + ("c", "R"): 0x216B, + ("1", "r"): 0x2170, + ("2", "r"): 0x2171, + ("3", "r"): 0x2172, + ("4", "r"): 0x2173, + ("5", "r"): 0x2174, + ("6", "r"): 0x2175, + ("7", "r"): 0x2176, + ("8", "r"): 0x2177, + ("9", "r"): 0x2178, + ("a", "r"): 0x2179, + ("b", "r"): 0x217A, + ("c", "r"): 0x217B, + ("<", "-"): 0x2190, + ("-", "!"): 0x2191, + ("-", ">"): 0x2192, + ("-", "v"): 0x2193, + ("<", ">"): 0x2194, + ("U", "D"): 0x2195, + ("<", "="): 0x21D0, + ("=", ">"): 0x21D2, + ("=", "="): 0x21D4, + ("F", "A"): 0x2200, + ("d", "P"): 0x2202, + ("T", "E"): 0x2203, + ("/", "0"): 0x2205, + ("D", "E"): 0x2206, + ("N", "B"): 0x2207, + ("(", "-"): 0x2208, + ("-", ")"): 0x220B, + ("*", "P"): 0x220F, + ("+", "Z"): 0x2211, + ("-", "2"): 0x2212, + ("-", "+"): 0x2213, + ("*", "-"): 0x2217, + ("O", "b"): 0x2218, + ("S", "b"): 0x2219, + ("R", "T"): 0x221A, + ("0", "("): 0x221D, + ("0", "0"): 0x221E, + ("-", "L"): 0x221F, + ("-", "V"): 0x2220, + ("P", "P"): 0x2225, + ("A", "N"): 0x2227, + ("O", "R"): 0x2228, + ("(", "U"): 0x2229, + (")", "U"): 0x222A, + ("I", "n"): 0x222B, + ("D", "I"): 0x222C, + ("I", "o"): 0x222E, + (".", ":"): 0x2234, + (":", "."): 0x2235, + (":", "R"): 0x2236, + (":", ":"): 0x2237, + ("?", "1"): 0x223C, + ("C", "G"): 0x223E, + ("?", "-"): 0x2243, + ("?", "="): 0x2245, + ("?", "2"): 0x2248, + ("=", "?"): 0x224C, + ("H", "I"): 0x2253, + ("!", "="): 0x2260, + ("=", "3"): 0x2261, + ("=", "<"): 0x2264, + (">", "="): 0x2265, + ("<", "*"): 0x226A, + ("*", ">"): 0x226B, + ("!", "<"): 0x226E, + ("!", ">"): 0x226F, + ("(", "C"): 0x2282, + (")", "C"): 0x2283, + ("(", "_"): 0x2286, + (")", "_"): 0x2287, + ("0", "."): 0x2299, + ("0", "2"): 0x229A, + ("-", "T"): 0x22A5, + (".", "P"): 0x22C5, + (":", "3"): 0x22EE, + (".", "3"): 0x22EF, + ("E", "h"): 0x2302, + ("<", "7"): 0x2308, + (">", "7"): 0x2309, + ("7", "<"): 0x230A, + ("7", ">"): 0x230B, + ("N", "I"): 0x2310, + ("(", "A"): 0x2312, + ("T", "R"): 0x2315, + ("I", "u"): 0x2320, + ("I", "l"): 0x2321, + ("<", "/"): 0x2329, + ("/", ">"): 0x232A, + ("V", "s"): 0x2423, + ("1", "h"): 0x2440, + ("3", "h"): 0x2441, + ("2", "h"): 0x2442, + ("4", "h"): 0x2443, + ("1", "j"): 0x2446, + ("2", "j"): 0x2447, + ("3", "j"): 0x2448, + ("4", "j"): 0x2449, + ("1", "."): 0x2488, + ("2", "."): 0x2489, + ("3", "."): 0x248A, + ("4", "."): 0x248B, + ("5", "."): 0x248C, + ("6", "."): 0x248D, + ("7", "."): 0x248E, + ("8", "."): 0x248F, + ("9", "."): 0x2490, + ("h", "h"): 0x2500, + ("H", "H"): 0x2501, + ("v", "v"): 0x2502, + ("V", "V"): 0x2503, + ("3", "-"): 0x2504, + ("3", "_"): 0x2505, + ("3", "!"): 0x2506, + ("3", "/"): 0x2507, + ("4", "-"): 0x2508, + ("4", "_"): 0x2509, + ("4", "!"): 0x250A, + ("4", "/"): 0x250B, + ("d", "r"): 0x250C, + ("d", "R"): 0x250D, + ("D", "r"): 0x250E, + ("D", "R"): 0x250F, + ("d", "l"): 0x2510, + ("d", "L"): 0x2511, + ("D", "l"): 0x2512, + ("L", "D"): 0x2513, + ("u", "r"): 0x2514, + ("u", "R"): 0x2515, + ("U", "r"): 0x2516, + ("U", "R"): 0x2517, + ("u", "l"): 0x2518, + ("u", "L"): 0x2519, + ("U", "l"): 0x251A, + ("U", "L"): 0x251B, + ("v", "r"): 0x251C, + ("v", "R"): 0x251D, + ("V", "r"): 0x2520, + ("V", "R"): 0x2523, + ("v", "l"): 0x2524, + ("v", "L"): 0x2525, + ("V", "l"): 0x2528, + ("V", "L"): 0x252B, + ("d", "h"): 0x252C, + ("d", "H"): 0x252F, + ("D", "h"): 0x2530, + ("D", "H"): 0x2533, + ("u", "h"): 0x2534, + ("u", "H"): 0x2537, + ("U", "h"): 0x2538, + ("U", "H"): 0x253B, + ("v", "h"): 0x253C, + ("v", "H"): 0x253F, + ("V", "h"): 0x2542, + ("V", "H"): 0x254B, + ("F", "D"): 0x2571, + ("B", "D"): 0x2572, + ("T", "B"): 0x2580, + ("L", "B"): 0x2584, + ("F", "B"): 0x2588, + ("l", "B"): 0x258C, + ("R", "B"): 0x2590, + (".", "S"): 0x2591, + (":", "S"): 0x2592, + ("?", "S"): 0x2593, + ("f", "S"): 0x25A0, + ("O", "S"): 0x25A1, + ("R", "O"): 0x25A2, + ("R", "r"): 0x25A3, + ("R", "F"): 0x25A4, + ("R", "Y"): 0x25A5, + ("R", "H"): 0x25A6, + ("R", "Z"): 0x25A7, + ("R", "K"): 0x25A8, + ("R", "X"): 0x25A9, + ("s", "B"): 0x25AA, + ("S", "R"): 0x25AC, + ("O", "r"): 0x25AD, + ("U", "T"): 0x25B2, + ("u", "T"): 0x25B3, + ("P", "R"): 0x25B6, + ("T", "r"): 0x25B7, + ("D", "t"): 0x25BC, + ("d", "T"): 0x25BD, + ("P", "L"): 0x25C0, + ("T", "l"): 0x25C1, + ("D", "b"): 0x25C6, + ("D", "w"): 0x25C7, + ("L", "Z"): 0x25CA, + ("0", "m"): 0x25CB, + ("0", "o"): 0x25CE, + ("0", "M"): 0x25CF, + ("0", "L"): 0x25D0, + ("0", "R"): 0x25D1, + ("S", "n"): 0x25D8, + ("I", "c"): 0x25D9, + ("F", "d"): 0x25E2, + ("B", "d"): 0x25E3, + ("*", "2"): 0x2605, + ("*", "1"): 0x2606, + ("<", "H"): 0x261C, + (">", "H"): 0x261E, + ("0", "u"): 0x263A, + ("0", "U"): 0x263B, + ("S", "U"): 0x263C, + ("F", "m"): 0x2640, + ("M", "l"): 0x2642, + ("c", "S"): 0x2660, + ("c", "H"): 0x2661, + ("c", "D"): 0x2662, + ("c", "C"): 0x2663, + ("M", "d"): 0x2669, + ("M", "8"): 0x266A, + ("M", "2"): 0x266B, + ("M", "b"): 0x266D, + ("M", "x"): 0x266E, + ("M", "X"): 0x266F, + ("O", "K"): 0x2713, + ("X", "X"): 0x2717, + ("-", "X"): 0x2720, + ("I", "S"): 0x3000, + (",", "_"): 0x3001, + (".", "_"): 0x3002, + ("+", '"'): 0x3003, + ("+", "_"): 0x3004, + ("*", "_"): 0x3005, + (";", "_"): 0x3006, + ("0", "_"): 0x3007, + ("<", "+"): 0x300A, + (">", "+"): 0x300B, + ("<", "'"): 0x300C, + (">", "'"): 0x300D, + ("<", '"'): 0x300E, + (">", '"'): 0x300F, + ("(", '"'): 0x3010, + (")", '"'): 0x3011, + ("=", "T"): 0x3012, + ("=", "_"): 0x3013, + ("(", "'"): 0x3014, + (")", "'"): 0x3015, + ("(", "I"): 0x3016, + (")", "I"): 0x3017, + ("-", "?"): 0x301C, + ("A", "5"): 0x3041, + ("a", "5"): 0x3042, + ("I", "5"): 0x3043, + ("i", "5"): 0x3044, + ("U", "5"): 0x3045, + ("u", "5"): 0x3046, + ("E", "5"): 0x3047, + ("e", "5"): 0x3048, + ("O", "5"): 0x3049, + ("o", "5"): 0x304A, + ("k", "a"): 0x304B, + ("g", "a"): 0x304C, + ("k", "i"): 0x304D, + ("g", "i"): 0x304E, + ("k", "u"): 0x304F, + ("g", "u"): 0x3050, + ("k", "e"): 0x3051, + ("g", "e"): 0x3052, + ("k", "o"): 0x3053, + ("g", "o"): 0x3054, + ("s", "a"): 0x3055, + ("z", "a"): 0x3056, + ("s", "i"): 0x3057, + ("z", "i"): 0x3058, + ("s", "u"): 0x3059, + ("z", "u"): 0x305A, + ("s", "e"): 0x305B, + ("z", "e"): 0x305C, + ("s", "o"): 0x305D, + ("z", "o"): 0x305E, + ("t", "a"): 0x305F, + ("d", "a"): 0x3060, + ("t", "i"): 0x3061, + ("d", "i"): 0x3062, + ("t", "U"): 0x3063, + ("t", "u"): 0x3064, + ("d", "u"): 0x3065, + ("t", "e"): 0x3066, + ("d", "e"): 0x3067, + ("t", "o"): 0x3068, + ("d", "o"): 0x3069, + ("n", "a"): 0x306A, + ("n", "i"): 0x306B, + ("n", "u"): 0x306C, + ("n", "e"): 0x306D, + ("n", "o"): 0x306E, + ("h", "a"): 0x306F, + ("b", "a"): 0x3070, + ("p", "a"): 0x3071, + ("h", "i"): 0x3072, + ("b", "i"): 0x3073, + ("p", "i"): 0x3074, + ("h", "u"): 0x3075, + ("b", "u"): 0x3076, + ("p", "u"): 0x3077, + ("h", "e"): 0x3078, + ("b", "e"): 0x3079, + ("p", "e"): 0x307A, + ("h", "o"): 0x307B, + ("b", "o"): 0x307C, + ("p", "o"): 0x307D, + ("m", "a"): 0x307E, + ("m", "i"): 0x307F, + ("m", "u"): 0x3080, + ("m", "e"): 0x3081, + ("m", "o"): 0x3082, + ("y", "A"): 0x3083, + ("y", "a"): 0x3084, + ("y", "U"): 0x3085, + ("y", "u"): 0x3086, + ("y", "O"): 0x3087, + ("y", "o"): 0x3088, + ("r", "a"): 0x3089, + ("r", "i"): 0x308A, + ("r", "u"): 0x308B, + ("r", "e"): 0x308C, + ("r", "o"): 0x308D, + ("w", "A"): 0x308E, + ("w", "a"): 0x308F, + ("w", "i"): 0x3090, + ("w", "e"): 0x3091, + ("w", "o"): 0x3092, + ("n", "5"): 0x3093, + ("v", "u"): 0x3094, + ('"', "5"): 0x309B, + ("0", "5"): 0x309C, + ("*", "5"): 0x309D, + ("+", "5"): 0x309E, + ("a", "6"): 0x30A1, + ("A", "6"): 0x30A2, + ("i", "6"): 0x30A3, + ("I", "6"): 0x30A4, + ("u", "6"): 0x30A5, + ("U", "6"): 0x30A6, + ("e", "6"): 0x30A7, + ("E", "6"): 0x30A8, + ("o", "6"): 0x30A9, + ("O", "6"): 0x30AA, + ("K", "a"): 0x30AB, + ("G", "a"): 0x30AC, + ("K", "i"): 0x30AD, + ("G", "i"): 0x30AE, + ("K", "u"): 0x30AF, + ("G", "u"): 0x30B0, + ("K", "e"): 0x30B1, + ("G", "e"): 0x30B2, + ("K", "o"): 0x30B3, + ("G", "o"): 0x30B4, + ("S", "a"): 0x30B5, + ("Z", "a"): 0x30B6, + ("S", "i"): 0x30B7, + ("Z", "i"): 0x30B8, + ("S", "u"): 0x30B9, + ("Z", "u"): 0x30BA, + ("S", "e"): 0x30BB, + ("Z", "e"): 0x30BC, + ("S", "o"): 0x30BD, + ("Z", "o"): 0x30BE, + ("T", "a"): 0x30BF, + ("D", "a"): 0x30C0, + ("T", "i"): 0x30C1, + ("D", "i"): 0x30C2, + ("T", "U"): 0x30C3, + ("T", "u"): 0x30C4, + ("D", "u"): 0x30C5, + ("T", "e"): 0x30C6, + ("D", "e"): 0x30C7, + ("T", "o"): 0x30C8, + ("D", "o"): 0x30C9, + ("N", "a"): 0x30CA, + ("N", "i"): 0x30CB, + ("N", "u"): 0x30CC, + ("N", "e"): 0x30CD, + ("N", "o"): 0x30CE, + ("H", "a"): 0x30CF, + ("B", "a"): 0x30D0, + ("P", "a"): 0x30D1, + ("H", "i"): 0x30D2, + ("B", "i"): 0x30D3, + ("P", "i"): 0x30D4, + ("H", "u"): 0x30D5, + ("B", "u"): 0x30D6, + ("P", "u"): 0x30D7, + ("H", "e"): 0x30D8, + ("B", "e"): 0x30D9, + ("P", "e"): 0x30DA, + ("H", "o"): 0x30DB, + ("B", "o"): 0x30DC, + ("P", "o"): 0x30DD, + ("M", "a"): 0x30DE, + ("M", "i"): 0x30DF, + ("M", "u"): 0x30E0, + ("M", "e"): 0x30E1, + ("M", "o"): 0x30E2, + ("Y", "A"): 0x30E3, + ("Y", "a"): 0x30E4, + ("Y", "U"): 0x30E5, + ("Y", "u"): 0x30E6, + ("Y", "O"): 0x30E7, + ("Y", "o"): 0x30E8, + ("R", "a"): 0x30E9, + ("R", "i"): 0x30EA, + ("R", "u"): 0x30EB, + ("R", "e"): 0x30EC, + ("R", "o"): 0x30ED, + ("W", "A"): 0x30EE, + ("W", "a"): 0x30EF, + ("W", "i"): 0x30F0, + ("W", "e"): 0x30F1, + ("W", "o"): 0x30F2, + ("N", "6"): 0x30F3, + ("V", "u"): 0x30F4, + ("K", "A"): 0x30F5, + ("K", "E"): 0x30F6, + ("V", "a"): 0x30F7, + ("V", "i"): 0x30F8, + ("V", "e"): 0x30F9, + ("V", "o"): 0x30FA, + (".", "6"): 0x30FB, + ("-", "6"): 0x30FC, + ("*", "6"): 0x30FD, + ("+", "6"): 0x30FE, + ("b", "4"): 0x3105, + ("p", "4"): 0x3106, + ("m", "4"): 0x3107, + ("f", "4"): 0x3108, + ("d", "4"): 0x3109, + ("t", "4"): 0x310A, + ("n", "4"): 0x310B, + ("l", "4"): 0x310C, + ("g", "4"): 0x310D, + ("k", "4"): 0x310E, + ("h", "4"): 0x310F, + ("j", "4"): 0x3110, + ("q", "4"): 0x3111, + ("x", "4"): 0x3112, + ("z", "h"): 0x3113, + ("c", "h"): 0x3114, + ("s", "h"): 0x3115, + ("r", "4"): 0x3116, + ("z", "4"): 0x3117, + ("c", "4"): 0x3118, + ("s", "4"): 0x3119, + ("a", "4"): 0x311A, + ("o", "4"): 0x311B, + ("e", "4"): 0x311C, + ("a", "i"): 0x311E, + ("e", "i"): 0x311F, + ("a", "u"): 0x3120, + ("o", "u"): 0x3121, + ("a", "n"): 0x3122, + ("e", "n"): 0x3123, + ("a", "N"): 0x3124, + ("e", "N"): 0x3125, + ("e", "r"): 0x3126, + ("i", "4"): 0x3127, + ("u", "4"): 0x3128, + ("i", "u"): 0x3129, + ("v", "4"): 0x312A, + ("n", "G"): 0x312B, + ("g", "n"): 0x312C, + ("1", "c"): 0x3220, + ("2", "c"): 0x3221, + ("3", "c"): 0x3222, + ("4", "c"): 0x3223, + ("5", "c"): 0x3224, + ("6", "c"): 0x3225, + ("7", "c"): 0x3226, + ("8", "c"): 0x3227, + ("9", "c"): 0x3228, + # code points 0xe000 - 0xefff excluded, they have no assigned + # characters, only used in proposals. + ("f", "f"): 0xFB00, + ("f", "i"): 0xFB01, + ("f", "l"): 0xFB02, + ("f", "t"): 0xFB05, + ("s", "t"): 0xFB06, + # Vim 5.x compatible digraphs that don't conflict with the above + ("~", "!"): 161, + ("c", "|"): 162, + ("$", "$"): 163, + ("o", "x"): 164, # currency symbol in ISO 8859-1 + ("Y", "-"): 165, + ("|", "|"): 166, + ("c", "O"): 169, + ("-", ","): 172, + ("-", "="): 175, + ("~", "o"): 176, + ("2", "2"): 178, + ("3", "3"): 179, + ("p", "p"): 182, + ("~", "."): 183, + ("1", "1"): 185, + ("~", "?"): 191, + ("A", "`"): 192, + ("A", "^"): 194, + ("A", "~"): 195, + ("A", '"'): 196, + ("A", "@"): 197, + ("E", "`"): 200, + ("E", "^"): 202, + ("E", '"'): 203, + ("I", "`"): 204, + ("I", "^"): 206, + ("I", '"'): 207, + ("N", "~"): 209, + ("O", "`"): 210, + ("O", "^"): 212, + ("O", "~"): 213, + ("/", "\\"): 215, # multiplication symbol in ISO 8859-1 + ("U", "`"): 217, + ("U", "^"): 219, + ("I", "p"): 222, + ("a", "`"): 224, + ("a", "^"): 226, + ("a", "~"): 227, + ("a", '"'): 228, + ("a", "@"): 229, + ("e", "`"): 232, + ("e", "^"): 234, + ("e", '"'): 235, + ("i", "`"): 236, + ("i", "^"): 238, + ("n", "~"): 241, + ("o", "`"): 242, + ("o", "^"): 244, + ("o", "~"): 245, + ("u", "`"): 249, + ("u", "^"): 251, + ("y", '"'): 255, +} diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/emacs_state.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/emacs_state.py new file mode 100644 index 0000000000000000000000000000000000000000..6a2ebf46d803e03bfab689a27c6e48f437ab355e --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/emacs_state.py @@ -0,0 +1,36 @@ +from __future__ import annotations + +from .key_processor import KeyPress + +__all__ = [ + "EmacsState", +] + + +class EmacsState: + """ + Mutable class to hold Emacs specific state. + """ + + def __init__(self) -> None: + # Simple macro recording. (Like Readline does.) + # (For Emacs mode.) + self.macro: list[KeyPress] | None = [] + self.current_recording: list[KeyPress] | None = None + + def reset(self) -> None: + self.current_recording = None + + @property + def is_recording(self) -> bool: + "Tell whether we are recording a macro." + return self.current_recording is not None + + def start_macro(self) -> None: + "Start recording macro." + self.current_recording = [] + + def end_macro(self) -> None: + "End recording macro." + self.macro = self.current_recording + self.current_recording = None diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/vi_state.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/vi_state.py new file mode 100644 index 0000000000000000000000000000000000000000..7ec552faa26bb986ad231489242e570f7dc82a1e --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/key_binding/vi_state.py @@ -0,0 +1,107 @@ +from __future__ import annotations + +from enum import Enum +from typing import TYPE_CHECKING, Callable + +from prompt_toolkit.clipboard import ClipboardData + +if TYPE_CHECKING: + from .key_bindings.vi import TextObject + from .key_processor import KeyPressEvent + +__all__ = [ + "InputMode", + "CharacterFind", + "ViState", +] + + +class InputMode(str, Enum): + value: str + + INSERT = "vi-insert" + INSERT_MULTIPLE = "vi-insert-multiple" + NAVIGATION = "vi-navigation" # Normal mode. + REPLACE = "vi-replace" + REPLACE_SINGLE = "vi-replace-single" + + +class CharacterFind: + def __init__(self, character: str, backwards: bool = False) -> None: + self.character = character + self.backwards = backwards + + +class ViState: + """ + Mutable class to hold the state of the Vi navigation. + """ + + def __init__(self) -> None: + #: None or CharacterFind instance. (This is used to repeat the last + #: search in Vi mode, by pressing the 'n' or 'N' in navigation mode.) + self.last_character_find: CharacterFind | None = None + + # When an operator is given and we are waiting for text object, + # -- e.g. in the case of 'dw', after the 'd' --, an operator callback + # is set here. + self.operator_func: None | (Callable[[KeyPressEvent, TextObject], None]) = None + self.operator_arg: int | None = None + + #: Named registers. Maps register name (e.g. 'a') to + #: :class:`ClipboardData` instances. + self.named_registers: dict[str, ClipboardData] = {} + + #: The Vi mode we're currently in to. + self.__input_mode = InputMode.INSERT + + #: Waiting for digraph. + self.waiting_for_digraph = False + self.digraph_symbol1: str | None = None # (None or a symbol.) + + #: When true, make ~ act as an operator. + self.tilde_operator = False + + #: Register in which we are recording a macro. + #: `None` when not recording anything. + # Note that the recording is only stored in the register after the + # recording is stopped. So we record in a separate `current_recording` + # variable. + self.recording_register: str | None = None + self.current_recording: str = "" + + # Temporary navigation (normal) mode. + # This happens when control-o has been pressed in insert or replace + # mode. The user can now do one navigation action and we'll return back + # to insert/replace. + self.temporary_navigation_mode = False + + @property + def input_mode(self) -> InputMode: + "Get `InputMode`." + return self.__input_mode + + @input_mode.setter + def input_mode(self, value: InputMode) -> None: + "Set `InputMode`." + if value == InputMode.NAVIGATION: + self.waiting_for_digraph = False + self.operator_func = None + self.operator_arg = None + + self.__input_mode = value + + def reset(self) -> None: + """ + Reset state, go back to the given mode. INSERT by default. + """ + # Go back to insert mode. + self.input_mode = InputMode.INSERT + + self.waiting_for_digraph = False + self.operator_func = None + self.operator_arg = None + + # Reset recording state. + self.recording_register = None + self.current_recording = "" diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/__init__.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7cd0c7725cfada09be5b653cf1102c74be793f68 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/__init__.py @@ -0,0 +1,147 @@ +""" +Command line layout definitions +------------------------------- + +The layout of a command line interface is defined by a Container instance. +There are two main groups of classes here. Containers and controls: + +- A container can contain other containers or controls, it can have multiple + children and it decides about the dimensions. +- A control is responsible for rendering the actual content to a screen. + A control can propose some dimensions, but it's the container who decides + about the dimensions -- or when the control consumes more space -- which part + of the control will be visible. + + +Container classes:: + + - Container (Abstract base class) + |- HSplit (Horizontal split) + |- VSplit (Vertical split) + |- FloatContainer (Container which can also contain menus and other floats) + `- Window (Container which contains one actual control + +Control classes:: + + - UIControl (Abstract base class) + |- FormattedTextControl (Renders formatted text, or a simple list of text fragments) + `- BufferControl (Renders an input buffer.) + + +Usually, you end up wrapping every control inside a `Window` object, because +that's the only way to render it in a layout. + +There are some prepared toolbars which are ready to use:: + +- SystemToolbar (Shows the 'system' input buffer, for entering system commands.) +- ArgToolbar (Shows the input 'arg', for repetition of input commands.) +- SearchToolbar (Shows the 'search' input buffer, for incremental search.) +- CompletionsToolbar (Shows the completions of the current buffer.) +- ValidationToolbar (Shows validation errors of the current buffer.) + +And one prepared menu: + +- CompletionsMenu + +""" + +from __future__ import annotations + +from .containers import ( + AnyContainer, + ColorColumn, + ConditionalContainer, + Container, + DynamicContainer, + Float, + FloatContainer, + HorizontalAlign, + HSplit, + ScrollOffsets, + VerticalAlign, + VSplit, + Window, + WindowAlign, + WindowRenderInfo, + is_container, + to_container, + to_window, +) +from .controls import ( + BufferControl, + DummyControl, + FormattedTextControl, + SearchBufferControl, + UIContent, + UIControl, +) +from .dimension import ( + AnyDimension, + D, + Dimension, + is_dimension, + max_layout_dimensions, + sum_layout_dimensions, + to_dimension, +) +from .layout import InvalidLayoutError, Layout, walk +from .margins import ( + ConditionalMargin, + Margin, + NumberedMargin, + PromptMargin, + ScrollbarMargin, +) +from .menus import CompletionsMenu, MultiColumnCompletionsMenu +from .scrollable_pane import ScrollablePane + +__all__ = [ + # Layout. + "Layout", + "InvalidLayoutError", + "walk", + # Dimensions. + "AnyDimension", + "Dimension", + "D", + "sum_layout_dimensions", + "max_layout_dimensions", + "to_dimension", + "is_dimension", + # Containers. + "AnyContainer", + "Container", + "HorizontalAlign", + "VerticalAlign", + "HSplit", + "VSplit", + "FloatContainer", + "Float", + "WindowAlign", + "Window", + "WindowRenderInfo", + "ConditionalContainer", + "ScrollOffsets", + "ColorColumn", + "to_container", + "to_window", + "is_container", + "DynamicContainer", + "ScrollablePane", + # Controls. + "BufferControl", + "SearchBufferControl", + "DummyControl", + "FormattedTextControl", + "UIControl", + "UIContent", + # Margins. + "Margin", + "NumberedMargin", + "ScrollbarMargin", + "ConditionalMargin", + "PromptMargin", + # Menus. + "CompletionsMenu", + "MultiColumnCompletionsMenu", +] diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/containers.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/containers.py new file mode 100644 index 0000000000000000000000000000000000000000..99b453477c96ad7c4290e52a158cb93f951df2c3 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/containers.py @@ -0,0 +1,2738 @@ +""" +Container for the layout. +(Containers can contain other containers or user interface controls.) +""" + +from __future__ import annotations + +from abc import ABCMeta, abstractmethod +from enum import Enum +from functools import partial +from typing import TYPE_CHECKING, Callable, Sequence, Union, cast + +from prompt_toolkit.application.current import get_app +from prompt_toolkit.cache import SimpleCache +from prompt_toolkit.data_structures import Point +from prompt_toolkit.filters import ( + FilterOrBool, + emacs_insert_mode, + to_filter, + vi_insert_mode, +) +from prompt_toolkit.formatted_text import ( + AnyFormattedText, + StyleAndTextTuples, + to_formatted_text, +) +from prompt_toolkit.formatted_text.utils import ( + fragment_list_to_text, + fragment_list_width, +) +from prompt_toolkit.key_binding import KeyBindingsBase +from prompt_toolkit.mouse_events import MouseEvent, MouseEventType +from prompt_toolkit.utils import get_cwidth, take_using_weights, to_int, to_str + +from .controls import ( + DummyControl, + FormattedTextControl, + GetLinePrefixCallable, + UIContent, + UIControl, +) +from .dimension import ( + AnyDimension, + Dimension, + max_layout_dimensions, + sum_layout_dimensions, + to_dimension, +) +from .margins import Margin +from .mouse_handlers import MouseHandlers +from .screen import _CHAR_CACHE, Screen, WritePosition +from .utils import explode_text_fragments + +if TYPE_CHECKING: + from typing_extensions import Protocol, TypeGuard + + from prompt_toolkit.key_binding.key_bindings import NotImplementedOrNone + + +__all__ = [ + "AnyContainer", + "Container", + "HorizontalAlign", + "VerticalAlign", + "HSplit", + "VSplit", + "FloatContainer", + "Float", + "WindowAlign", + "Window", + "WindowRenderInfo", + "ConditionalContainer", + "ScrollOffsets", + "ColorColumn", + "to_container", + "to_window", + "is_container", + "DynamicContainer", +] + + +class Container(metaclass=ABCMeta): + """ + Base class for user interface layout. + """ + + @abstractmethod + def reset(self) -> None: + """ + Reset the state of this container and all the children. + (E.g. reset scroll offsets, etc...) + """ + + @abstractmethod + def preferred_width(self, max_available_width: int) -> Dimension: + """ + Return a :class:`~prompt_toolkit.layout.Dimension` that represents the + desired width for this container. + """ + + @abstractmethod + def preferred_height(self, width: int, max_available_height: int) -> Dimension: + """ + Return a :class:`~prompt_toolkit.layout.Dimension` that represents the + desired height for this container. + """ + + @abstractmethod + def write_to_screen( + self, + screen: Screen, + mouse_handlers: MouseHandlers, + write_position: WritePosition, + parent_style: str, + erase_bg: bool, + z_index: int | None, + ) -> None: + """ + Write the actual content to the screen. + + :param screen: :class:`~prompt_toolkit.layout.screen.Screen` + :param mouse_handlers: :class:`~prompt_toolkit.layout.mouse_handlers.MouseHandlers`. + :param parent_style: Style string to pass to the :class:`.Window` + object. This will be applied to all content of the windows. + :class:`.VSplit` and :class:`.HSplit` can use it to pass their + style down to the windows that they contain. + :param z_index: Used for propagating z_index from parent to child. + """ + + def is_modal(self) -> bool: + """ + When this container is modal, key bindings from parent containers are + not taken into account if a user control in this container is focused. + """ + return False + + def get_key_bindings(self) -> KeyBindingsBase | None: + """ + Returns a :class:`.KeyBindings` object. These bindings become active when any + user control in this container has the focus, except if any containers + between this container and the focused user control is modal. + """ + return None + + @abstractmethod + def get_children(self) -> list[Container]: + """ + Return the list of child :class:`.Container` objects. + """ + return [] + + +if TYPE_CHECKING: + + class MagicContainer(Protocol): + """ + Any object that implements ``__pt_container__`` represents a container. + """ + + def __pt_container__(self) -> AnyContainer: ... + + +AnyContainer = Union[Container, "MagicContainer"] + + +def _window_too_small() -> Window: + "Create a `Window` that displays the 'Window too small' text." + return Window( + FormattedTextControl(text=[("class:window-too-small", " Window too small... ")]) + ) + + +class VerticalAlign(Enum): + "Alignment for `HSplit`." + + TOP = "TOP" + CENTER = "CENTER" + BOTTOM = "BOTTOM" + JUSTIFY = "JUSTIFY" + + +class HorizontalAlign(Enum): + "Alignment for `VSplit`." + + LEFT = "LEFT" + CENTER = "CENTER" + RIGHT = "RIGHT" + JUSTIFY = "JUSTIFY" + + +class _Split(Container): + """ + The common parts of `VSplit` and `HSplit`. + """ + + def __init__( + self, + children: Sequence[AnyContainer], + window_too_small: Container | None = None, + padding: AnyDimension = Dimension.exact(0), + padding_char: str | None = None, + padding_style: str = "", + width: AnyDimension = None, + height: AnyDimension = None, + z_index: int | None = None, + modal: bool = False, + key_bindings: KeyBindingsBase | None = None, + style: str | Callable[[], str] = "", + ) -> None: + self.children = [to_container(c) for c in children] + self.window_too_small = window_too_small or _window_too_small() + self.padding = padding + self.padding_char = padding_char + self.padding_style = padding_style + + self.width = width + self.height = height + self.z_index = z_index + + self.modal = modal + self.key_bindings = key_bindings + self.style = style + + def is_modal(self) -> bool: + return self.modal + + def get_key_bindings(self) -> KeyBindingsBase | None: + return self.key_bindings + + def get_children(self) -> list[Container]: + return self.children + + +class HSplit(_Split): + """ + Several layouts, one stacked above/under the other. :: + + +--------------------+ + | | + +--------------------+ + | | + +--------------------+ + + By default, this doesn't display a horizontal line between the children, + but if this is something you need, then create a HSplit as follows:: + + HSplit(children=[ ... ], padding_char='-', + padding=1, padding_style='#ffff00') + + :param children: List of child :class:`.Container` objects. + :param window_too_small: A :class:`.Container` object that is displayed if + there is not enough space for all the children. By default, this is a + "Window too small" message. + :param align: `VerticalAlign` value. + :param width: When given, use this width instead of looking at the children. + :param height: When given, use this height instead of looking at the children. + :param z_index: (int or None) When specified, this can be used to bring + element in front of floating elements. `None` means: inherit from parent. + :param style: A style string. + :param modal: ``True`` or ``False``. + :param key_bindings: ``None`` or a :class:`.KeyBindings` object. + + :param padding: (`Dimension` or int), size to be used for the padding. + :param padding_char: Character to be used for filling in the padding. + :param padding_style: Style to applied to the padding. + """ + + def __init__( + self, + children: Sequence[AnyContainer], + window_too_small: Container | None = None, + align: VerticalAlign = VerticalAlign.JUSTIFY, + padding: AnyDimension = 0, + padding_char: str | None = None, + padding_style: str = "", + width: AnyDimension = None, + height: AnyDimension = None, + z_index: int | None = None, + modal: bool = False, + key_bindings: KeyBindingsBase | None = None, + style: str | Callable[[], str] = "", + ) -> None: + super().__init__( + children=children, + window_too_small=window_too_small, + padding=padding, + padding_char=padding_char, + padding_style=padding_style, + width=width, + height=height, + z_index=z_index, + modal=modal, + key_bindings=key_bindings, + style=style, + ) + + self.align = align + + self._children_cache: SimpleCache[tuple[Container, ...], list[Container]] = ( + SimpleCache(maxsize=1) + ) + self._remaining_space_window = Window() # Dummy window. + + def preferred_width(self, max_available_width: int) -> Dimension: + if self.width is not None: + return to_dimension(self.width) + + if self.children: + dimensions = [c.preferred_width(max_available_width) for c in self.children] + return max_layout_dimensions(dimensions) + else: + return Dimension() + + def preferred_height(self, width: int, max_available_height: int) -> Dimension: + if self.height is not None: + return to_dimension(self.height) + + dimensions = [ + c.preferred_height(width, max_available_height) for c in self._all_children + ] + return sum_layout_dimensions(dimensions) + + def reset(self) -> None: + for c in self.children: + c.reset() + + @property + def _all_children(self) -> list[Container]: + """ + List of child objects, including padding. + """ + + def get() -> list[Container]: + result: list[Container] = [] + + # Padding Top. + if self.align in (VerticalAlign.CENTER, VerticalAlign.BOTTOM): + result.append(Window(width=Dimension(preferred=0))) + + # The children with padding. + for child in self.children: + result.append(child) + result.append( + Window( + height=self.padding, + char=self.padding_char, + style=self.padding_style, + ) + ) + if result: + result.pop() + + # Padding right. + if self.align in (VerticalAlign.CENTER, VerticalAlign.TOP): + result.append(Window(width=Dimension(preferred=0))) + + return result + + return self._children_cache.get(tuple(self.children), get) + + def write_to_screen( + self, + screen: Screen, + mouse_handlers: MouseHandlers, + write_position: WritePosition, + parent_style: str, + erase_bg: bool, + z_index: int | None, + ) -> None: + """ + Render the prompt to a `Screen` instance. + + :param screen: The :class:`~prompt_toolkit.layout.screen.Screen` class + to which the output has to be written. + """ + sizes = self._divide_heights(write_position) + style = parent_style + " " + to_str(self.style) + z_index = z_index if self.z_index is None else self.z_index + + if sizes is None: + self.window_too_small.write_to_screen( + screen, mouse_handlers, write_position, style, erase_bg, z_index + ) + else: + # + ypos = write_position.ypos + xpos = write_position.xpos + width = write_position.width + + # Draw child panes. + for s, c in zip(sizes, self._all_children): + c.write_to_screen( + screen, + mouse_handlers, + WritePosition(xpos, ypos, width, s), + style, + erase_bg, + z_index, + ) + ypos += s + + # Fill in the remaining space. This happens when a child control + # refuses to take more space and we don't have any padding. Adding a + # dummy child control for this (in `self._all_children`) is not + # desired, because in some situations, it would take more space, even + # when it's not required. This is required to apply the styling. + remaining_height = write_position.ypos + write_position.height - ypos + if remaining_height > 0: + self._remaining_space_window.write_to_screen( + screen, + mouse_handlers, + WritePosition(xpos, ypos, width, remaining_height), + style, + erase_bg, + z_index, + ) + + def _divide_heights(self, write_position: WritePosition) -> list[int] | None: + """ + Return the heights for all rows. + Or None when there is not enough space. + """ + if not self.children: + return [] + + width = write_position.width + height = write_position.height + + # Calculate heights. + dimensions = [c.preferred_height(width, height) for c in self._all_children] + + # Sum dimensions + sum_dimensions = sum_layout_dimensions(dimensions) + + # If there is not enough space for both. + # Don't do anything. + if sum_dimensions.min > height: + return None + + # Find optimal sizes. (Start with minimal size, increase until we cover + # the whole height.) + sizes = [d.min for d in dimensions] + + child_generator = take_using_weights( + items=list(range(len(dimensions))), weights=[d.weight for d in dimensions] + ) + + i = next(child_generator) + + # Increase until we meet at least the 'preferred' size. + preferred_stop = min(height, sum_dimensions.preferred) + preferred_dimensions = [d.preferred for d in dimensions] + + while sum(sizes) < preferred_stop: + if sizes[i] < preferred_dimensions[i]: + sizes[i] += 1 + i = next(child_generator) + + # Increase until we use all the available space. (or until "max") + if not get_app().is_done: + max_stop = min(height, sum_dimensions.max) + max_dimensions = [d.max for d in dimensions] + + while sum(sizes) < max_stop: + if sizes[i] < max_dimensions[i]: + sizes[i] += 1 + i = next(child_generator) + + return sizes + + +class VSplit(_Split): + """ + Several layouts, one stacked left/right of the other. :: + + +---------+----------+ + | | | + | | | + +---------+----------+ + + By default, this doesn't display a vertical line between the children, but + if this is something you need, then create a HSplit as follows:: + + VSplit(children=[ ... ], padding_char='|', + padding=1, padding_style='#ffff00') + + :param children: List of child :class:`.Container` objects. + :param window_too_small: A :class:`.Container` object that is displayed if + there is not enough space for all the children. By default, this is a + "Window too small" message. + :param align: `HorizontalAlign` value. + :param width: When given, use this width instead of looking at the children. + :param height: When given, use this height instead of looking at the children. + :param z_index: (int or None) When specified, this can be used to bring + element in front of floating elements. `None` means: inherit from parent. + :param style: A style string. + :param modal: ``True`` or ``False``. + :param key_bindings: ``None`` or a :class:`.KeyBindings` object. + + :param padding: (`Dimension` or int), size to be used for the padding. + :param padding_char: Character to be used for filling in the padding. + :param padding_style: Style to applied to the padding. + """ + + def __init__( + self, + children: Sequence[AnyContainer], + window_too_small: Container | None = None, + align: HorizontalAlign = HorizontalAlign.JUSTIFY, + padding: AnyDimension = 0, + padding_char: str | None = None, + padding_style: str = "", + width: AnyDimension = None, + height: AnyDimension = None, + z_index: int | None = None, + modal: bool = False, + key_bindings: KeyBindingsBase | None = None, + style: str | Callable[[], str] = "", + ) -> None: + super().__init__( + children=children, + window_too_small=window_too_small, + padding=padding, + padding_char=padding_char, + padding_style=padding_style, + width=width, + height=height, + z_index=z_index, + modal=modal, + key_bindings=key_bindings, + style=style, + ) + + self.align = align + + self._children_cache: SimpleCache[tuple[Container, ...], list[Container]] = ( + SimpleCache(maxsize=1) + ) + self._remaining_space_window = Window() # Dummy window. + + def preferred_width(self, max_available_width: int) -> Dimension: + if self.width is not None: + return to_dimension(self.width) + + dimensions = [ + c.preferred_width(max_available_width) for c in self._all_children + ] + + return sum_layout_dimensions(dimensions) + + def preferred_height(self, width: int, max_available_height: int) -> Dimension: + if self.height is not None: + return to_dimension(self.height) + + # At the point where we want to calculate the heights, the widths have + # already been decided. So we can trust `width` to be the actual + # `width` that's going to be used for the rendering. So, + # `divide_widths` is supposed to use all of the available width. + # Using only the `preferred` width caused a bug where the reported + # height was more than required. (we had a `BufferControl` which did + # wrap lines because of the smaller width returned by `_divide_widths`. + + sizes = self._divide_widths(width) + children = self._all_children + + if sizes is None: + return Dimension() + else: + dimensions = [ + c.preferred_height(s, max_available_height) + for s, c in zip(sizes, children) + ] + return max_layout_dimensions(dimensions) + + def reset(self) -> None: + for c in self.children: + c.reset() + + @property + def _all_children(self) -> list[Container]: + """ + List of child objects, including padding. + """ + + def get() -> list[Container]: + result: list[Container] = [] + + # Padding left. + if self.align in (HorizontalAlign.CENTER, HorizontalAlign.RIGHT): + result.append(Window(width=Dimension(preferred=0))) + + # The children with padding. + for child in self.children: + result.append(child) + result.append( + Window( + width=self.padding, + char=self.padding_char, + style=self.padding_style, + ) + ) + if result: + result.pop() + + # Padding right. + if self.align in (HorizontalAlign.CENTER, HorizontalAlign.LEFT): + result.append(Window(width=Dimension(preferred=0))) + + return result + + return self._children_cache.get(tuple(self.children), get) + + def _divide_widths(self, width: int) -> list[int] | None: + """ + Return the widths for all columns. + Or None when there is not enough space. + """ + children = self._all_children + + if not children: + return [] + + # Calculate widths. + dimensions = [c.preferred_width(width) for c in children] + preferred_dimensions = [d.preferred for d in dimensions] + + # Sum dimensions + sum_dimensions = sum_layout_dimensions(dimensions) + + # If there is not enough space for both. + # Don't do anything. + if sum_dimensions.min > width: + return None + + # Find optimal sizes. (Start with minimal size, increase until we cover + # the whole width.) + sizes = [d.min for d in dimensions] + + child_generator = take_using_weights( + items=list(range(len(dimensions))), weights=[d.weight for d in dimensions] + ) + + i = next(child_generator) + + # Increase until we meet at least the 'preferred' size. + preferred_stop = min(width, sum_dimensions.preferred) + + while sum(sizes) < preferred_stop: + if sizes[i] < preferred_dimensions[i]: + sizes[i] += 1 + i = next(child_generator) + + # Increase until we use all the available space. + max_dimensions = [d.max for d in dimensions] + max_stop = min(width, sum_dimensions.max) + + while sum(sizes) < max_stop: + if sizes[i] < max_dimensions[i]: + sizes[i] += 1 + i = next(child_generator) + + return sizes + + def write_to_screen( + self, + screen: Screen, + mouse_handlers: MouseHandlers, + write_position: WritePosition, + parent_style: str, + erase_bg: bool, + z_index: int | None, + ) -> None: + """ + Render the prompt to a `Screen` instance. + + :param screen: The :class:`~prompt_toolkit.layout.screen.Screen` class + to which the output has to be written. + """ + if not self.children: + return + + children = self._all_children + sizes = self._divide_widths(write_position.width) + style = parent_style + " " + to_str(self.style) + z_index = z_index if self.z_index is None else self.z_index + + # If there is not enough space. + if sizes is None: + self.window_too_small.write_to_screen( + screen, mouse_handlers, write_position, style, erase_bg, z_index + ) + return + + # Calculate heights, take the largest possible, but not larger than + # write_position.height. + heights = [ + child.preferred_height(width, write_position.height).preferred + for width, child in zip(sizes, children) + ] + height = max(write_position.height, min(write_position.height, max(heights))) + + # + ypos = write_position.ypos + xpos = write_position.xpos + + # Draw all child panes. + for s, c in zip(sizes, children): + c.write_to_screen( + screen, + mouse_handlers, + WritePosition(xpos, ypos, s, height), + style, + erase_bg, + z_index, + ) + xpos += s + + # Fill in the remaining space. This happens when a child control + # refuses to take more space and we don't have any padding. Adding a + # dummy child control for this (in `self._all_children`) is not + # desired, because in some situations, it would take more space, even + # when it's not required. This is required to apply the styling. + remaining_width = write_position.xpos + write_position.width - xpos + if remaining_width > 0: + self._remaining_space_window.write_to_screen( + screen, + mouse_handlers, + WritePosition(xpos, ypos, remaining_width, height), + style, + erase_bg, + z_index, + ) + + +class FloatContainer(Container): + """ + Container which can contain another container for the background, as well + as a list of floating containers on top of it. + + Example Usage:: + + FloatContainer(content=Window(...), + floats=[ + Float(xcursor=True, + ycursor=True, + content=CompletionsMenu(...)) + ]) + + :param z_index: (int or None) When specified, this can be used to bring + element in front of floating elements. `None` means: inherit from parent. + This is the z_index for the whole `Float` container as a whole. + """ + + def __init__( + self, + content: AnyContainer, + floats: list[Float], + modal: bool = False, + key_bindings: KeyBindingsBase | None = None, + style: str | Callable[[], str] = "", + z_index: int | None = None, + ) -> None: + self.content = to_container(content) + self.floats = floats + + self.modal = modal + self.key_bindings = key_bindings + self.style = style + self.z_index = z_index + + def reset(self) -> None: + self.content.reset() + + for f in self.floats: + f.content.reset() + + def preferred_width(self, max_available_width: int) -> Dimension: + return self.content.preferred_width(max_available_width) + + def preferred_height(self, width: int, max_available_height: int) -> Dimension: + """ + Return the preferred height of the float container. + (We don't care about the height of the floats, they should always fit + into the dimensions provided by the container.) + """ + return self.content.preferred_height(width, max_available_height) + + def write_to_screen( + self, + screen: Screen, + mouse_handlers: MouseHandlers, + write_position: WritePosition, + parent_style: str, + erase_bg: bool, + z_index: int | None, + ) -> None: + style = parent_style + " " + to_str(self.style) + z_index = z_index if self.z_index is None else self.z_index + + self.content.write_to_screen( + screen, mouse_handlers, write_position, style, erase_bg, z_index + ) + + for number, fl in enumerate(self.floats): + # z_index of a Float is computed by summing the z_index of the + # container and the `Float`. + new_z_index = (z_index or 0) + fl.z_index + style = parent_style + " " + to_str(self.style) + + # If the float that we have here, is positioned relative to the + # cursor position, but the Window that specifies the cursor + # position is not drawn yet, because it's a Float itself, we have + # to postpone this calculation. (This is a work-around, but good + # enough for now.) + postpone = fl.xcursor is not None or fl.ycursor is not None + + if postpone: + new_z_index = ( + number + 10**8 + ) # Draw as late as possible, but keep the order. + screen.draw_with_z_index( + z_index=new_z_index, + draw_func=partial( + self._draw_float, + fl, + screen, + mouse_handlers, + write_position, + style, + erase_bg, + new_z_index, + ), + ) + else: + self._draw_float( + fl, + screen, + mouse_handlers, + write_position, + style, + erase_bg, + new_z_index, + ) + + def _draw_float( + self, + fl: Float, + screen: Screen, + mouse_handlers: MouseHandlers, + write_position: WritePosition, + style: str, + erase_bg: bool, + z_index: int | None, + ) -> None: + "Draw a single Float." + # When a menu_position was given, use this instead of the cursor + # position. (These cursor positions are absolute, translate again + # relative to the write_position.) + # Note: This should be inside the for-loop, because one float could + # set the cursor position to be used for the next one. + cpos = screen.get_menu_position( + fl.attach_to_window or get_app().layout.current_window + ) + cursor_position = Point( + x=cpos.x - write_position.xpos, y=cpos.y - write_position.ypos + ) + + fl_width = fl.get_width() + fl_height = fl.get_height() + width: int + height: int + xpos: int + ypos: int + + # Left & width given. + if fl.left is not None and fl_width is not None: + xpos = fl.left + width = fl_width + # Left & right given -> calculate width. + elif fl.left is not None and fl.right is not None: + xpos = fl.left + width = write_position.width - fl.left - fl.right + # Width & right given -> calculate left. + elif fl_width is not None and fl.right is not None: + xpos = write_position.width - fl.right - fl_width + width = fl_width + # Near x position of cursor. + elif fl.xcursor: + if fl_width is None: + width = fl.content.preferred_width(write_position.width).preferred + width = min(write_position.width, width) + else: + width = fl_width + + xpos = cursor_position.x + if xpos + width > write_position.width: + xpos = max(0, write_position.width - width) + # Only width given -> center horizontally. + elif fl_width: + xpos = int((write_position.width - fl_width) / 2) + width = fl_width + # Otherwise, take preferred width from float content. + else: + width = fl.content.preferred_width(write_position.width).preferred + + if fl.left is not None: + xpos = fl.left + elif fl.right is not None: + xpos = max(0, write_position.width - width - fl.right) + else: # Center horizontally. + xpos = max(0, int((write_position.width - width) / 2)) + + # Trim. + width = min(width, write_position.width - xpos) + + # Top & height given. + if fl.top is not None and fl_height is not None: + ypos = fl.top + height = fl_height + # Top & bottom given -> calculate height. + elif fl.top is not None and fl.bottom is not None: + ypos = fl.top + height = write_position.height - fl.top - fl.bottom + # Height & bottom given -> calculate top. + elif fl_height is not None and fl.bottom is not None: + ypos = write_position.height - fl_height - fl.bottom + height = fl_height + # Near cursor. + elif fl.ycursor: + ypos = cursor_position.y + (0 if fl.allow_cover_cursor else 1) + + if fl_height is None: + height = fl.content.preferred_height( + width, write_position.height + ).preferred + else: + height = fl_height + + # Reduce height if not enough space. (We can use the height + # when the content requires it.) + if height > write_position.height - ypos: + if write_position.height - ypos + 1 >= ypos: + # When the space below the cursor is more than + # the space above, just reduce the height. + height = write_position.height - ypos + else: + # Otherwise, fit the float above the cursor. + height = min(height, cursor_position.y) + ypos = cursor_position.y - height + + # Only height given -> center vertically. + elif fl_height: + ypos = int((write_position.height - fl_height) / 2) + height = fl_height + # Otherwise, take preferred height from content. + else: + height = fl.content.preferred_height(width, write_position.height).preferred + + if fl.top is not None: + ypos = fl.top + elif fl.bottom is not None: + ypos = max(0, write_position.height - height - fl.bottom) + else: # Center vertically. + ypos = max(0, int((write_position.height - height) / 2)) + + # Trim. + height = min(height, write_position.height - ypos) + + # Write float. + # (xpos and ypos can be negative: a float can be partially visible.) + if height > 0 and width > 0: + wp = WritePosition( + xpos=xpos + write_position.xpos, + ypos=ypos + write_position.ypos, + width=width, + height=height, + ) + + if not fl.hide_when_covering_content or self._area_is_empty(screen, wp): + fl.content.write_to_screen( + screen, + mouse_handlers, + wp, + style, + erase_bg=not fl.transparent(), + z_index=z_index, + ) + + def _area_is_empty(self, screen: Screen, write_position: WritePosition) -> bool: + """ + Return True when the area below the write position is still empty. + (For floats that should not hide content underneath.) + """ + wp = write_position + + for y in range(wp.ypos, wp.ypos + wp.height): + if y in screen.data_buffer: + row = screen.data_buffer[y] + + for x in range(wp.xpos, wp.xpos + wp.width): + c = row[x] + if c.char != " ": + return False + + return True + + def is_modal(self) -> bool: + return self.modal + + def get_key_bindings(self) -> KeyBindingsBase | None: + return self.key_bindings + + def get_children(self) -> list[Container]: + children = [self.content] + children.extend(f.content for f in self.floats) + return children + + +class Float: + """ + Float for use in a :class:`.FloatContainer`. + Except for the `content` parameter, all other options are optional. + + :param content: :class:`.Container` instance. + + :param width: :class:`.Dimension` or callable which returns a :class:`.Dimension`. + :param height: :class:`.Dimension` or callable which returns a :class:`.Dimension`. + + :param left: Distance to the left edge of the :class:`.FloatContainer`. + :param right: Distance to the right edge of the :class:`.FloatContainer`. + :param top: Distance to the top of the :class:`.FloatContainer`. + :param bottom: Distance to the bottom of the :class:`.FloatContainer`. + + :param attach_to_window: Attach to the cursor from this window, instead of + the current window. + :param hide_when_covering_content: Hide the float when it covers content underneath. + :param allow_cover_cursor: When `False`, make sure to display the float + below the cursor. Not on top of the indicated position. + :param z_index: Z-index position. For a Float, this needs to be at least + one. It is relative to the z_index of the parent container. + :param transparent: :class:`.Filter` indicating whether this float needs to be + drawn transparently. + """ + + def __init__( + self, + content: AnyContainer, + top: int | None = None, + right: int | None = None, + bottom: int | None = None, + left: int | None = None, + width: int | Callable[[], int] | None = None, + height: int | Callable[[], int] | None = None, + xcursor: bool = False, + ycursor: bool = False, + attach_to_window: AnyContainer | None = None, + hide_when_covering_content: bool = False, + allow_cover_cursor: bool = False, + z_index: int = 1, + transparent: bool = False, + ) -> None: + assert z_index >= 1 + + self.left = left + self.right = right + self.top = top + self.bottom = bottom + + self.width = width + self.height = height + + self.xcursor = xcursor + self.ycursor = ycursor + + self.attach_to_window = ( + to_window(attach_to_window) if attach_to_window else None + ) + + self.content = to_container(content) + self.hide_when_covering_content = hide_when_covering_content + self.allow_cover_cursor = allow_cover_cursor + self.z_index = z_index + self.transparent = to_filter(transparent) + + def get_width(self) -> int | None: + if callable(self.width): + return self.width() + return self.width + + def get_height(self) -> int | None: + if callable(self.height): + return self.height() + return self.height + + def __repr__(self) -> str: + return f"Float(content={self.content!r})" + + +class WindowRenderInfo: + """ + Render information for the last render time of this control. + It stores mapping information between the input buffers (in case of a + :class:`~prompt_toolkit.layout.controls.BufferControl`) and the actual + render position on the output screen. + + (Could be used for implementation of the Vi 'H' and 'L' key bindings as + well as implementing mouse support.) + + :param ui_content: The original :class:`.UIContent` instance that contains + the whole input, without clipping. (ui_content) + :param horizontal_scroll: The horizontal scroll of the :class:`.Window` instance. + :param vertical_scroll: The vertical scroll of the :class:`.Window` instance. + :param window_width: The width of the window that displays the content, + without the margins. + :param window_height: The height of the window that displays the content. + :param configured_scroll_offsets: The scroll offsets as configured for the + :class:`Window` instance. + :param visible_line_to_row_col: Mapping that maps the row numbers on the + displayed screen (starting from zero for the first visible line) to + (row, col) tuples pointing to the row and column of the :class:`.UIContent`. + :param rowcol_to_yx: Mapping that maps (row, column) tuples representing + coordinates of the :class:`UIContent` to (y, x) absolute coordinates at + the rendered screen. + """ + + def __init__( + self, + window: Window, + ui_content: UIContent, + horizontal_scroll: int, + vertical_scroll: int, + window_width: int, + window_height: int, + configured_scroll_offsets: ScrollOffsets, + visible_line_to_row_col: dict[int, tuple[int, int]], + rowcol_to_yx: dict[tuple[int, int], tuple[int, int]], + x_offset: int, + y_offset: int, + wrap_lines: bool, + ) -> None: + self.window = window + self.ui_content = ui_content + self.vertical_scroll = vertical_scroll + self.window_width = window_width # Width without margins. + self.window_height = window_height + + self.configured_scroll_offsets = configured_scroll_offsets + self.visible_line_to_row_col = visible_line_to_row_col + self.wrap_lines = wrap_lines + + self._rowcol_to_yx = rowcol_to_yx # row/col from input to absolute y/x + # screen coordinates. + self._x_offset = x_offset + self._y_offset = y_offset + + @property + def visible_line_to_input_line(self) -> dict[int, int]: + return { + visible_line: rowcol[0] + for visible_line, rowcol in self.visible_line_to_row_col.items() + } + + @property + def cursor_position(self) -> Point: + """ + Return the cursor position coordinates, relative to the left/top corner + of the rendered screen. + """ + cpos = self.ui_content.cursor_position + try: + y, x = self._rowcol_to_yx[cpos.y, cpos.x] + except KeyError: + # For `DummyControl` for instance, the content can be empty, and so + # will `_rowcol_to_yx` be. Return 0/0 by default. + return Point(x=0, y=0) + else: + return Point(x=x - self._x_offset, y=y - self._y_offset) + + @property + def applied_scroll_offsets(self) -> ScrollOffsets: + """ + Return a :class:`.ScrollOffsets` instance that indicates the actual + offset. This can be less than or equal to what's configured. E.g, when + the cursor is completely at the top, the top offset will be zero rather + than what's configured. + """ + if self.displayed_lines[0] == 0: + top = 0 + else: + # Get row where the cursor is displayed. + y = self.input_line_to_visible_line[self.ui_content.cursor_position.y] + top = min(y, self.configured_scroll_offsets.top) + + return ScrollOffsets( + top=top, + bottom=min( + self.ui_content.line_count - self.displayed_lines[-1] - 1, + self.configured_scroll_offsets.bottom, + ), + # For left/right, it probably doesn't make sense to return something. + # (We would have to calculate the widths of all the lines and keep + # double width characters in mind.) + left=0, + right=0, + ) + + @property + def displayed_lines(self) -> list[int]: + """ + List of all the visible rows. (Line numbers of the input buffer.) + The last line may not be entirely visible. + """ + return sorted(row for row, col in self.visible_line_to_row_col.values()) + + @property + def input_line_to_visible_line(self) -> dict[int, int]: + """ + Return the dictionary mapping the line numbers of the input buffer to + the lines of the screen. When a line spans several rows at the screen, + the first row appears in the dictionary. + """ + result: dict[int, int] = {} + for k, v in self.visible_line_to_input_line.items(): + if v in result: + result[v] = min(result[v], k) + else: + result[v] = k + return result + + def first_visible_line(self, after_scroll_offset: bool = False) -> int: + """ + Return the line number (0 based) of the input document that corresponds + with the first visible line. + """ + if after_scroll_offset: + return self.displayed_lines[self.applied_scroll_offsets.top] + else: + return self.displayed_lines[0] + + def last_visible_line(self, before_scroll_offset: bool = False) -> int: + """ + Like `first_visible_line`, but for the last visible line. + """ + if before_scroll_offset: + return self.displayed_lines[-1 - self.applied_scroll_offsets.bottom] + else: + return self.displayed_lines[-1] + + def center_visible_line( + self, before_scroll_offset: bool = False, after_scroll_offset: bool = False + ) -> int: + """ + Like `first_visible_line`, but for the center visible line. + """ + return ( + self.first_visible_line(after_scroll_offset) + + ( + self.last_visible_line(before_scroll_offset) + - self.first_visible_line(after_scroll_offset) + ) + // 2 + ) + + @property + def content_height(self) -> int: + """ + The full height of the user control. + """ + return self.ui_content.line_count + + @property + def full_height_visible(self) -> bool: + """ + True when the full height is visible (There is no vertical scroll.) + """ + return ( + self.vertical_scroll == 0 + and self.last_visible_line() == self.content_height + ) + + @property + def top_visible(self) -> bool: + """ + True when the top of the buffer is visible. + """ + return self.vertical_scroll == 0 + + @property + def bottom_visible(self) -> bool: + """ + True when the bottom of the buffer is visible. + """ + return self.last_visible_line() == self.content_height - 1 + + @property + def vertical_scroll_percentage(self) -> int: + """ + Vertical scroll as a percentage. (0 means: the top is visible, + 100 means: the bottom is visible.) + """ + if self.bottom_visible: + return 100 + else: + return 100 * self.vertical_scroll // self.content_height + + def get_height_for_line(self, lineno: int) -> int: + """ + Return the height of the given line. + (The height that it would take, if this line became visible.) + """ + if self.wrap_lines: + return self.ui_content.get_height_for_line( + lineno, self.window_width, self.window.get_line_prefix + ) + else: + return 1 + + +class ScrollOffsets: + """ + Scroll offsets for the :class:`.Window` class. + + Note that left/right offsets only make sense if line wrapping is disabled. + """ + + def __init__( + self, + top: int | Callable[[], int] = 0, + bottom: int | Callable[[], int] = 0, + left: int | Callable[[], int] = 0, + right: int | Callable[[], int] = 0, + ) -> None: + self._top = top + self._bottom = bottom + self._left = left + self._right = right + + @property + def top(self) -> int: + return to_int(self._top) + + @property + def bottom(self) -> int: + return to_int(self._bottom) + + @property + def left(self) -> int: + return to_int(self._left) + + @property + def right(self) -> int: + return to_int(self._right) + + def __repr__(self) -> str: + return f"ScrollOffsets(top={self._top!r}, bottom={self._bottom!r}, left={self._left!r}, right={self._right!r})" + + +class ColorColumn: + """ + Column for a :class:`.Window` to be colored. + """ + + def __init__(self, position: int, style: str = "class:color-column") -> None: + self.position = position + self.style = style + + +_in_insert_mode = vi_insert_mode | emacs_insert_mode + + +class WindowAlign(Enum): + """ + Alignment of the Window content. + + Note that this is different from `HorizontalAlign` and `VerticalAlign`, + which are used for the alignment of the child containers in respectively + `VSplit` and `HSplit`. + """ + + LEFT = "LEFT" + RIGHT = "RIGHT" + CENTER = "CENTER" + + +class Window(Container): + """ + Container that holds a control. + + :param content: :class:`.UIControl` instance. + :param width: :class:`.Dimension` instance or callable. + :param height: :class:`.Dimension` instance or callable. + :param z_index: When specified, this can be used to bring element in front + of floating elements. + :param dont_extend_width: When `True`, don't take up more width then the + preferred width reported by the control. + :param dont_extend_height: When `True`, don't take up more width then the + preferred height reported by the control. + :param ignore_content_width: A `bool` or :class:`.Filter` instance. Ignore + the :class:`.UIContent` width when calculating the dimensions. + :param ignore_content_height: A `bool` or :class:`.Filter` instance. Ignore + the :class:`.UIContent` height when calculating the dimensions. + :param left_margins: A list of :class:`.Margin` instance to be displayed on + the left. For instance: :class:`~prompt_toolkit.layout.NumberedMargin` + can be one of them in order to show line numbers. + :param right_margins: Like `left_margins`, but on the other side. + :param scroll_offsets: :class:`.ScrollOffsets` instance, representing the + preferred amount of lines/columns to be always visible before/after the + cursor. When both top and bottom are a very high number, the cursor + will be centered vertically most of the time. + :param allow_scroll_beyond_bottom: A `bool` or + :class:`.Filter` instance. When True, allow scrolling so far, that the + top part of the content is not visible anymore, while there is still + empty space available at the bottom of the window. In the Vi editor for + instance, this is possible. You will see tildes while the top part of + the body is hidden. + :param wrap_lines: A `bool` or :class:`.Filter` instance. When True, don't + scroll horizontally, but wrap lines instead. + :param get_vertical_scroll: Callable that takes this window + instance as input and returns a preferred vertical scroll. + (When this is `None`, the scroll is only determined by the last and + current cursor position.) + :param get_horizontal_scroll: Callable that takes this window + instance as input and returns a preferred vertical scroll. + :param always_hide_cursor: A `bool` or + :class:`.Filter` instance. When True, never display the cursor, even + when the user control specifies a cursor position. + :param cursorline: A `bool` or :class:`.Filter` instance. When True, + display a cursorline. + :param cursorcolumn: A `bool` or :class:`.Filter` instance. When True, + display a cursorcolumn. + :param colorcolumns: A list of :class:`.ColorColumn` instances that + describe the columns to be highlighted, or a callable that returns such + a list. + :param align: :class:`.WindowAlign` value or callable that returns an + :class:`.WindowAlign` value. alignment of content. + :param style: A style string. Style to be applied to all the cells in this + window. (This can be a callable that returns a string.) + :param char: (string) Character to be used for filling the background. This + can also be a callable that returns a character. + :param get_line_prefix: None or a callable that returns formatted text to + be inserted before a line. It takes a line number (int) and a + wrap_count and returns formatted text. This can be used for + implementation of line continuations, things like Vim "breakindent" and + so on. + """ + + def __init__( + self, + content: UIControl | None = None, + width: AnyDimension = None, + height: AnyDimension = None, + z_index: int | None = None, + dont_extend_width: FilterOrBool = False, + dont_extend_height: FilterOrBool = False, + ignore_content_width: FilterOrBool = False, + ignore_content_height: FilterOrBool = False, + left_margins: Sequence[Margin] | None = None, + right_margins: Sequence[Margin] | None = None, + scroll_offsets: ScrollOffsets | None = None, + allow_scroll_beyond_bottom: FilterOrBool = False, + wrap_lines: FilterOrBool = False, + get_vertical_scroll: Callable[[Window], int] | None = None, + get_horizontal_scroll: Callable[[Window], int] | None = None, + always_hide_cursor: FilterOrBool = False, + cursorline: FilterOrBool = False, + cursorcolumn: FilterOrBool = False, + colorcolumns: ( + None | list[ColorColumn] | Callable[[], list[ColorColumn]] + ) = None, + align: WindowAlign | Callable[[], WindowAlign] = WindowAlign.LEFT, + style: str | Callable[[], str] = "", + char: None | str | Callable[[], str] = None, + get_line_prefix: GetLinePrefixCallable | None = None, + ) -> None: + self.allow_scroll_beyond_bottom = to_filter(allow_scroll_beyond_bottom) + self.always_hide_cursor = to_filter(always_hide_cursor) + self.wrap_lines = to_filter(wrap_lines) + self.cursorline = to_filter(cursorline) + self.cursorcolumn = to_filter(cursorcolumn) + + self.content = content or DummyControl() + self.dont_extend_width = to_filter(dont_extend_width) + self.dont_extend_height = to_filter(dont_extend_height) + self.ignore_content_width = to_filter(ignore_content_width) + self.ignore_content_height = to_filter(ignore_content_height) + self.left_margins = left_margins or [] + self.right_margins = right_margins or [] + self.scroll_offsets = scroll_offsets or ScrollOffsets() + self.get_vertical_scroll = get_vertical_scroll + self.get_horizontal_scroll = get_horizontal_scroll + self.colorcolumns = colorcolumns or [] + self.align = align + self.style = style + self.char = char + self.get_line_prefix = get_line_prefix + + self.width = width + self.height = height + self.z_index = z_index + + # Cache for the screens generated by the margin. + self._ui_content_cache: SimpleCache[tuple[int, int, int], UIContent] = ( + SimpleCache(maxsize=8) + ) + self._margin_width_cache: SimpleCache[tuple[Margin, int], int] = SimpleCache( + maxsize=1 + ) + + self.reset() + + def __repr__(self) -> str: + return f"Window(content={self.content!r})" + + def reset(self) -> None: + self.content.reset() + + #: Scrolling position of the main content. + self.vertical_scroll = 0 + self.horizontal_scroll = 0 + + # Vertical scroll 2: this is the vertical offset that a line is + # scrolled if a single line (the one that contains the cursor) consumes + # all of the vertical space. + self.vertical_scroll_2 = 0 + + #: Keep render information (mappings between buffer input and render + #: output.) + self.render_info: WindowRenderInfo | None = None + + def _get_margin_width(self, margin: Margin) -> int: + """ + Return the width for this margin. + (Calculate only once per render time.) + """ + + # Margin.get_width, needs to have a UIContent instance. + def get_ui_content() -> UIContent: + return self._get_ui_content(width=0, height=0) + + def get_width() -> int: + return margin.get_width(get_ui_content) + + key = (margin, get_app().render_counter) + return self._margin_width_cache.get(key, get_width) + + def _get_total_margin_width(self) -> int: + """ + Calculate and return the width of the margin (left + right). + """ + return sum(self._get_margin_width(m) for m in self.left_margins) + sum( + self._get_margin_width(m) for m in self.right_margins + ) + + def preferred_width(self, max_available_width: int) -> Dimension: + """ + Calculate the preferred width for this window. + """ + + def preferred_content_width() -> int | None: + """Content width: is only calculated if no exact width for the + window was given.""" + if self.ignore_content_width(): + return None + + # Calculate the width of the margin. + total_margin_width = self._get_total_margin_width() + + # Window of the content. (Can be `None`.) + preferred_width = self.content.preferred_width( + max_available_width - total_margin_width + ) + + if preferred_width is not None: + # Include width of the margins. + preferred_width += total_margin_width + return preferred_width + + # Merge. + return self._merge_dimensions( + dimension=to_dimension(self.width), + get_preferred=preferred_content_width, + dont_extend=self.dont_extend_width(), + ) + + def preferred_height(self, width: int, max_available_height: int) -> Dimension: + """ + Calculate the preferred height for this window. + """ + + def preferred_content_height() -> int | None: + """Content height: is only calculated if no exact height for the + window was given.""" + if self.ignore_content_height(): + return None + + total_margin_width = self._get_total_margin_width() + wrap_lines = self.wrap_lines() + + return self.content.preferred_height( + width - total_margin_width, + max_available_height, + wrap_lines, + self.get_line_prefix, + ) + + return self._merge_dimensions( + dimension=to_dimension(self.height), + get_preferred=preferred_content_height, + dont_extend=self.dont_extend_height(), + ) + + @staticmethod + def _merge_dimensions( + dimension: Dimension | None, + get_preferred: Callable[[], int | None], + dont_extend: bool = False, + ) -> Dimension: + """ + Take the Dimension from this `Window` class and the received preferred + size from the `UIControl` and return a `Dimension` to report to the + parent container. + """ + dimension = dimension or Dimension() + + # When a preferred dimension was explicitly given to the Window, + # ignore the UIControl. + preferred: int | None + + if dimension.preferred_specified: + preferred = dimension.preferred + else: + # Otherwise, calculate the preferred dimension from the UI control + # content. + preferred = get_preferred() + + # When a 'preferred' dimension is given by the UIControl, make sure + # that it stays within the bounds of the Window. + if preferred is not None: + if dimension.max_specified: + preferred = min(preferred, dimension.max) + + if dimension.min_specified: + preferred = max(preferred, dimension.min) + + # When a `dont_extend` flag has been given, use the preferred dimension + # also as the max dimension. + max_: int | None + min_: int | None + + if dont_extend and preferred is not None: + max_ = min(dimension.max, preferred) + else: + max_ = dimension.max if dimension.max_specified else None + + min_ = dimension.min if dimension.min_specified else None + + return Dimension( + min=min_, max=max_, preferred=preferred, weight=dimension.weight + ) + + def _get_ui_content(self, width: int, height: int) -> UIContent: + """ + Create a `UIContent` instance. + """ + + def get_content() -> UIContent: + return self.content.create_content(width=width, height=height) + + key = (get_app().render_counter, width, height) + return self._ui_content_cache.get(key, get_content) + + def _get_digraph_char(self) -> str | None: + "Return `False`, or the Digraph symbol to be used." + app = get_app() + if app.quoted_insert: + return "^" + if app.vi_state.waiting_for_digraph: + if app.vi_state.digraph_symbol1: + return app.vi_state.digraph_symbol1 + return "?" + return None + + def write_to_screen( + self, + screen: Screen, + mouse_handlers: MouseHandlers, + write_position: WritePosition, + parent_style: str, + erase_bg: bool, + z_index: int | None, + ) -> None: + """ + Write window to screen. This renders the user control, the margins and + copies everything over to the absolute position at the given screen. + """ + # If dont_extend_width/height was given. Then reduce width/height in + # WritePosition if the parent wanted us to paint in a bigger area. + # (This happens if this window is bundled with another window in a + # HSplit/VSplit, but with different size requirements.) + write_position = WritePosition( + xpos=write_position.xpos, + ypos=write_position.ypos, + width=write_position.width, + height=write_position.height, + ) + + if self.dont_extend_width(): + write_position.width = min( + write_position.width, + self.preferred_width(write_position.width).preferred, + ) + + if self.dont_extend_height(): + write_position.height = min( + write_position.height, + self.preferred_height( + write_position.width, write_position.height + ).preferred, + ) + + # Draw + z_index = z_index if self.z_index is None else self.z_index + + draw_func = partial( + self._write_to_screen_at_index, + screen, + mouse_handlers, + write_position, + parent_style, + erase_bg, + ) + + if z_index is None or z_index <= 0: + # When no z_index is given, draw right away. + draw_func() + else: + # Otherwise, postpone. + screen.draw_with_z_index(z_index=z_index, draw_func=draw_func) + + def _write_to_screen_at_index( + self, + screen: Screen, + mouse_handlers: MouseHandlers, + write_position: WritePosition, + parent_style: str, + erase_bg: bool, + ) -> None: + # Don't bother writing invisible windows. + # (We save some time, but also avoid applying last-line styling.) + if write_position.height <= 0 or write_position.width <= 0: + return + + # Calculate margin sizes. + left_margin_widths = [self._get_margin_width(m) for m in self.left_margins] + right_margin_widths = [self._get_margin_width(m) for m in self.right_margins] + total_margin_width = sum(left_margin_widths + right_margin_widths) + + # Render UserControl. + ui_content = self.content.create_content( + write_position.width - total_margin_width, write_position.height + ) + assert isinstance(ui_content, UIContent) + + # Scroll content. + wrap_lines = self.wrap_lines() + self._scroll( + ui_content, write_position.width - total_margin_width, write_position.height + ) + + # Erase background and fill with `char`. + self._fill_bg(screen, write_position, erase_bg) + + # Resolve `align` attribute. + align = self.align() if callable(self.align) else self.align + + # Write body + visible_line_to_row_col, rowcol_to_yx = self._copy_body( + ui_content, + screen, + write_position, + sum(left_margin_widths), + write_position.width - total_margin_width, + self.vertical_scroll, + self.horizontal_scroll, + wrap_lines=wrap_lines, + highlight_lines=True, + vertical_scroll_2=self.vertical_scroll_2, + always_hide_cursor=self.always_hide_cursor(), + has_focus=get_app().layout.current_control == self.content, + align=align, + get_line_prefix=self.get_line_prefix, + ) + + # Remember render info. (Set before generating the margins. They need this.) + x_offset = write_position.xpos + sum(left_margin_widths) + y_offset = write_position.ypos + + render_info = WindowRenderInfo( + window=self, + ui_content=ui_content, + horizontal_scroll=self.horizontal_scroll, + vertical_scroll=self.vertical_scroll, + window_width=write_position.width - total_margin_width, + window_height=write_position.height, + configured_scroll_offsets=self.scroll_offsets, + visible_line_to_row_col=visible_line_to_row_col, + rowcol_to_yx=rowcol_to_yx, + x_offset=x_offset, + y_offset=y_offset, + wrap_lines=wrap_lines, + ) + self.render_info = render_info + + # Set mouse handlers. + def mouse_handler(mouse_event: MouseEvent) -> NotImplementedOrNone: + """ + Wrapper around the mouse_handler of the `UIControl` that turns + screen coordinates into line coordinates. + Returns `NotImplemented` if no UI invalidation should be done. + """ + # Don't handle mouse events outside of the current modal part of + # the UI. + if self not in get_app().layout.walk_through_modal_area(): + return NotImplemented + + # Find row/col position first. + yx_to_rowcol = {v: k for k, v in rowcol_to_yx.items()} + y = mouse_event.position.y + x = mouse_event.position.x + + # If clicked below the content area, look for a position in the + # last line instead. + max_y = write_position.ypos + len(visible_line_to_row_col) - 1 + y = min(max_y, y) + result: NotImplementedOrNone + + while x >= 0: + try: + row, col = yx_to_rowcol[y, x] + except KeyError: + # Try again. (When clicking on the right side of double + # width characters, or on the right side of the input.) + x -= 1 + else: + # Found position, call handler of UIControl. + result = self.content.mouse_handler( + MouseEvent( + position=Point(x=col, y=row), + event_type=mouse_event.event_type, + button=mouse_event.button, + modifiers=mouse_event.modifiers, + ) + ) + break + else: + # nobreak. + # (No x/y coordinate found for the content. This happens in + # case of a DummyControl, that does not have any content. + # Report (0,0) instead.) + result = self.content.mouse_handler( + MouseEvent( + position=Point(x=0, y=0), + event_type=mouse_event.event_type, + button=mouse_event.button, + modifiers=mouse_event.modifiers, + ) + ) + + # If it returns NotImplemented, handle it here. + if result == NotImplemented: + result = self._mouse_handler(mouse_event) + + return result + + mouse_handlers.set_mouse_handler_for_range( + x_min=write_position.xpos + sum(left_margin_widths), + x_max=write_position.xpos + write_position.width - total_margin_width, + y_min=write_position.ypos, + y_max=write_position.ypos + write_position.height, + handler=mouse_handler, + ) + + # Render and copy margins. + move_x = 0 + + def render_margin(m: Margin, width: int) -> UIContent: + "Render margin. Return `Screen`." + # Retrieve margin fragments. + fragments = m.create_margin(render_info, width, write_position.height) + + # Turn it into a UIContent object. + # already rendered those fragments using this size.) + return FormattedTextControl(fragments).create_content( + width + 1, write_position.height + ) + + for m, width in zip(self.left_margins, left_margin_widths): + if width > 0: # (ConditionalMargin returns a zero width. -- Don't render.) + # Create screen for margin. + margin_content = render_margin(m, width) + + # Copy and shift X. + self._copy_margin(margin_content, screen, write_position, move_x, width) + move_x += width + + move_x = write_position.width - sum(right_margin_widths) + + for m, width in zip(self.right_margins, right_margin_widths): + # Create screen for margin. + margin_content = render_margin(m, width) + + # Copy and shift X. + self._copy_margin(margin_content, screen, write_position, move_x, width) + move_x += width + + # Apply 'self.style' + self._apply_style(screen, write_position, parent_style) + + # Tell the screen that this user control has been painted at this + # position. + screen.visible_windows_to_write_positions[self] = write_position + + def _copy_body( + self, + ui_content: UIContent, + new_screen: Screen, + write_position: WritePosition, + move_x: int, + width: int, + vertical_scroll: int = 0, + horizontal_scroll: int = 0, + wrap_lines: bool = False, + highlight_lines: bool = False, + vertical_scroll_2: int = 0, + always_hide_cursor: bool = False, + has_focus: bool = False, + align: WindowAlign = WindowAlign.LEFT, + get_line_prefix: Callable[[int, int], AnyFormattedText] | None = None, + ) -> tuple[dict[int, tuple[int, int]], dict[tuple[int, int], tuple[int, int]]]: + """ + Copy the UIContent into the output screen. + Return (visible_line_to_row_col, rowcol_to_yx) tuple. + + :param get_line_prefix: None or a callable that takes a line number + (int) and a wrap_count (int) and returns formatted text. + """ + xpos = write_position.xpos + move_x + ypos = write_position.ypos + line_count = ui_content.line_count + new_buffer = new_screen.data_buffer + empty_char = _CHAR_CACHE["", ""] + + # Map visible line number to (row, col) of input. + # 'col' will always be zero if line wrapping is off. + visible_line_to_row_col: dict[int, tuple[int, int]] = {} + + # Maps (row, col) from the input to (y, x) screen coordinates. + rowcol_to_yx: dict[tuple[int, int], tuple[int, int]] = {} + + def copy_line( + line: StyleAndTextTuples, + lineno: int, + x: int, + y: int, + is_input: bool = False, + ) -> tuple[int, int]: + """ + Copy over a single line to the output screen. This can wrap over + multiple lines in the output. It will call the prefix (prompt) + function before every line. + """ + if is_input: + current_rowcol_to_yx = rowcol_to_yx + else: + current_rowcol_to_yx = {} # Throwaway dictionary. + + # Draw line prefix. + if is_input and get_line_prefix: + prompt = to_formatted_text(get_line_prefix(lineno, 0)) + x, y = copy_line(prompt, lineno, x, y, is_input=False) + + # Scroll horizontally. + skipped = 0 # Characters skipped because of horizontal scrolling. + if horizontal_scroll and is_input: + h_scroll = horizontal_scroll + line = explode_text_fragments(line) + while h_scroll > 0 and line: + h_scroll -= get_cwidth(line[0][1]) + skipped += 1 + del line[:1] # Remove first character. + + x -= h_scroll # When scrolling over double width character, + # this can end up being negative. + + # Align this line. (Note that this doesn't work well when we use + # get_line_prefix and that function returns variable width prefixes.) + if align == WindowAlign.CENTER: + line_width = fragment_list_width(line) + if line_width < width: + x += (width - line_width) // 2 + elif align == WindowAlign.RIGHT: + line_width = fragment_list_width(line) + if line_width < width: + x += width - line_width + + col = 0 + wrap_count = 0 + for style, text, *_ in line: + new_buffer_row = new_buffer[y + ypos] + + # Remember raw VT escape sequences. (E.g. FinalTerm's + # escape sequences.) + if "[ZeroWidthEscape]" in style: + new_screen.zero_width_escapes[y + ypos][x + xpos] += text + continue + + for c in text: + char = _CHAR_CACHE[c, style] + char_width = char.width + + # Wrap when the line width is exceeded. + if wrap_lines and x + char_width > width: + visible_line_to_row_col[y + 1] = ( + lineno, + visible_line_to_row_col[y][1] + x, + ) + y += 1 + wrap_count += 1 + x = 0 + + # Insert line prefix (continuation prompt). + if is_input and get_line_prefix: + prompt = to_formatted_text( + get_line_prefix(lineno, wrap_count) + ) + x, y = copy_line(prompt, lineno, x, y, is_input=False) + + new_buffer_row = new_buffer[y + ypos] + + if y >= write_position.height: + return x, y # Break out of all for loops. + + # Set character in screen and shift 'x'. + if x >= 0 and y >= 0 and x < width: + new_buffer_row[x + xpos] = char + + # When we print a multi width character, make sure + # to erase the neighbors positions in the screen. + # (The empty string if different from everything, + # so next redraw this cell will repaint anyway.) + if char_width > 1: + for i in range(1, char_width): + new_buffer_row[x + xpos + i] = empty_char + + # If this is a zero width characters, then it's + # probably part of a decomposed unicode character. + # See: https://en.wikipedia.org/wiki/Unicode_equivalence + # Merge it in the previous cell. + elif char_width == 0: + # Handle all character widths. If the previous + # character is a multiwidth character, then + # merge it two positions back. + for pw in [2, 1]: # Previous character width. + if ( + x - pw >= 0 + and new_buffer_row[x + xpos - pw].width == pw + ): + prev_char = new_buffer_row[x + xpos - pw] + char2 = _CHAR_CACHE[ + prev_char.char + c, prev_char.style + ] + new_buffer_row[x + xpos - pw] = char2 + + # Keep track of write position for each character. + current_rowcol_to_yx[lineno, col + skipped] = ( + y + ypos, + x + xpos, + ) + + col += 1 + x += char_width + return x, y + + # Copy content. + def copy() -> int: + y = -vertical_scroll_2 + lineno = vertical_scroll + + while y < write_position.height and lineno < line_count: + # Take the next line and copy it in the real screen. + line = ui_content.get_line(lineno) + + visible_line_to_row_col[y] = (lineno, horizontal_scroll) + + # Copy margin and actual line. + x = 0 + x, y = copy_line(line, lineno, x, y, is_input=True) + + lineno += 1 + y += 1 + return y + + copy() + + def cursor_pos_to_screen_pos(row: int, col: int) -> Point: + "Translate row/col from UIContent to real Screen coordinates." + try: + y, x = rowcol_to_yx[row, col] + except KeyError: + # Normally this should never happen. (It is a bug, if it happens.) + # But to be sure, return (0, 0) + return Point(x=0, y=0) + + # raise ValueError( + # 'Invalid position. row=%r col=%r, vertical_scroll=%r, ' + # 'horizontal_scroll=%r, height=%r' % + # (row, col, vertical_scroll, horizontal_scroll, write_position.height)) + else: + return Point(x=x, y=y) + + # Set cursor and menu positions. + if ui_content.cursor_position: + screen_cursor_position = cursor_pos_to_screen_pos( + ui_content.cursor_position.y, ui_content.cursor_position.x + ) + + if has_focus: + new_screen.set_cursor_position(self, screen_cursor_position) + + if always_hide_cursor: + new_screen.show_cursor = False + else: + new_screen.show_cursor = ui_content.show_cursor + + self._highlight_digraph(new_screen) + + if highlight_lines: + self._highlight_cursorlines( + new_screen, + screen_cursor_position, + xpos, + ypos, + width, + write_position.height, + ) + + # Draw input characters from the input processor queue. + if has_focus and ui_content.cursor_position: + self._show_key_processor_key_buffer(new_screen) + + # Set menu position. + if ui_content.menu_position: + new_screen.set_menu_position( + self, + cursor_pos_to_screen_pos( + ui_content.menu_position.y, ui_content.menu_position.x + ), + ) + + # Update output screen height. + new_screen.height = max(new_screen.height, ypos + write_position.height) + + return visible_line_to_row_col, rowcol_to_yx + + def _fill_bg( + self, screen: Screen, write_position: WritePosition, erase_bg: bool + ) -> None: + """ + Erase/fill the background. + (Useful for floats and when a `char` has been given.) + """ + char: str | None + if callable(self.char): + char = self.char() + else: + char = self.char + + if erase_bg or char: + wp = write_position + char_obj = _CHAR_CACHE[char or " ", ""] + + for y in range(wp.ypos, wp.ypos + wp.height): + row = screen.data_buffer[y] + for x in range(wp.xpos, wp.xpos + wp.width): + row[x] = char_obj + + def _apply_style( + self, new_screen: Screen, write_position: WritePosition, parent_style: str + ) -> None: + # Apply `self.style`. + style = parent_style + " " + to_str(self.style) + + new_screen.fill_area(write_position, style=style, after=False) + + # Apply the 'last-line' class to the last line of each Window. This can + # be used to apply an 'underline' to the user control. + wp = WritePosition( + write_position.xpos, + write_position.ypos + write_position.height - 1, + write_position.width, + 1, + ) + new_screen.fill_area(wp, "class:last-line", after=True) + + def _highlight_digraph(self, new_screen: Screen) -> None: + """ + When we are in Vi digraph mode, put a question mark underneath the + cursor. + """ + digraph_char = self._get_digraph_char() + if digraph_char: + cpos = new_screen.get_cursor_position(self) + new_screen.data_buffer[cpos.y][cpos.x] = _CHAR_CACHE[ + digraph_char, "class:digraph" + ] + + def _show_key_processor_key_buffer(self, new_screen: Screen) -> None: + """ + When the user is typing a key binding that consists of several keys, + display the last pressed key if the user is in insert mode and the key + is meaningful to be displayed. + E.g. Some people want to bind 'jj' to escape in Vi insert mode. But the + first 'j' needs to be displayed in order to get some feedback. + """ + app = get_app() + key_buffer = app.key_processor.key_buffer + + if key_buffer and _in_insert_mode() and not app.is_done: + # The textual data for the given key. (Can be a VT100 escape + # sequence.) + data = key_buffer[-1].data + + # Display only if this is a 1 cell width character. + if get_cwidth(data) == 1: + cpos = new_screen.get_cursor_position(self) + new_screen.data_buffer[cpos.y][cpos.x] = _CHAR_CACHE[ + data, "class:partial-key-binding" + ] + + def _highlight_cursorlines( + self, new_screen: Screen, cpos: Point, x: int, y: int, width: int, height: int + ) -> None: + """ + Highlight cursor row/column. + """ + cursor_line_style = " class:cursor-line " + cursor_column_style = " class:cursor-column " + + data_buffer = new_screen.data_buffer + + # Highlight cursor line. + if self.cursorline(): + row = data_buffer[cpos.y] + for x in range(x, x + width): + original_char = row[x] + row[x] = _CHAR_CACHE[ + original_char.char, original_char.style + cursor_line_style + ] + + # Highlight cursor column. + if self.cursorcolumn(): + for y2 in range(y, y + height): + row = data_buffer[y2] + original_char = row[cpos.x] + row[cpos.x] = _CHAR_CACHE[ + original_char.char, original_char.style + cursor_column_style + ] + + # Highlight color columns + colorcolumns = self.colorcolumns + if callable(colorcolumns): + colorcolumns = colorcolumns() + + for cc in colorcolumns: + assert isinstance(cc, ColorColumn) + column = cc.position + + if column < x + width: # Only draw when visible. + color_column_style = " " + cc.style + + for y2 in range(y, y + height): + row = data_buffer[y2] + original_char = row[column + x] + row[column + x] = _CHAR_CACHE[ + original_char.char, original_char.style + color_column_style + ] + + def _copy_margin( + self, + margin_content: UIContent, + new_screen: Screen, + write_position: WritePosition, + move_x: int, + width: int, + ) -> None: + """ + Copy characters from the margin screen to the real screen. + """ + xpos = write_position.xpos + move_x + ypos = write_position.ypos + + margin_write_position = WritePosition(xpos, ypos, width, write_position.height) + self._copy_body(margin_content, new_screen, margin_write_position, 0, width) + + def _scroll(self, ui_content: UIContent, width: int, height: int) -> None: + """ + Scroll body. Ensure that the cursor is visible. + """ + if self.wrap_lines(): + func = self._scroll_when_linewrapping + else: + func = self._scroll_without_linewrapping + + func(ui_content, width, height) + + def _scroll_when_linewrapping( + self, ui_content: UIContent, width: int, height: int + ) -> None: + """ + Scroll to make sure the cursor position is visible and that we maintain + the requested scroll offset. + + Set `self.horizontal_scroll/vertical_scroll`. + """ + scroll_offsets_bottom = self.scroll_offsets.bottom + scroll_offsets_top = self.scroll_offsets.top + + # We don't have horizontal scrolling. + self.horizontal_scroll = 0 + + def get_line_height(lineno: int) -> int: + return ui_content.get_height_for_line(lineno, width, self.get_line_prefix) + + # When there is no space, reset `vertical_scroll_2` to zero and abort. + # This can happen if the margin is bigger than the window width. + # Otherwise the text height will become "infinite" (a big number) and + # the copy_line will spend a huge amount of iterations trying to render + # nothing. + if width <= 0: + self.vertical_scroll = ui_content.cursor_position.y + self.vertical_scroll_2 = 0 + return + + # If the current line consumes more than the whole window height, + # then we have to scroll vertically inside this line. (We don't take + # the scroll offsets into account for this.) + # Also, ignore the scroll offsets in this case. Just set the vertical + # scroll to this line. + line_height = get_line_height(ui_content.cursor_position.y) + if line_height > height - scroll_offsets_top: + # Calculate the height of the text before the cursor (including + # line prefixes). + text_before_height = ui_content.get_height_for_line( + ui_content.cursor_position.y, + width, + self.get_line_prefix, + slice_stop=ui_content.cursor_position.x, + ) + + # Adjust scroll offset. + self.vertical_scroll = ui_content.cursor_position.y + self.vertical_scroll_2 = min( + text_before_height - 1, # Keep the cursor visible. + line_height + - height, # Avoid blank lines at the bottom when scrolling up again. + self.vertical_scroll_2, + ) + self.vertical_scroll_2 = max( + 0, text_before_height - height, self.vertical_scroll_2 + ) + return + else: + self.vertical_scroll_2 = 0 + + # Current line doesn't consume the whole height. Take scroll offsets into account. + def get_min_vertical_scroll() -> int: + # Make sure that the cursor line is not below the bottom. + # (Calculate how many lines can be shown between the cursor and the .) + used_height = 0 + prev_lineno = ui_content.cursor_position.y + + for lineno in range(ui_content.cursor_position.y, -1, -1): + used_height += get_line_height(lineno) + + if used_height > height - scroll_offsets_bottom: + return prev_lineno + else: + prev_lineno = lineno + return 0 + + def get_max_vertical_scroll() -> int: + # Make sure that the cursor line is not above the top. + prev_lineno = ui_content.cursor_position.y + used_height = 0 + + for lineno in range(ui_content.cursor_position.y - 1, -1, -1): + used_height += get_line_height(lineno) + + if used_height > scroll_offsets_top: + return prev_lineno + else: + prev_lineno = lineno + return prev_lineno + + def get_topmost_visible() -> int: + """ + Calculate the upper most line that can be visible, while the bottom + is still visible. We should not allow scroll more than this if + `allow_scroll_beyond_bottom` is false. + """ + prev_lineno = ui_content.line_count - 1 + used_height = 0 + for lineno in range(ui_content.line_count - 1, -1, -1): + used_height += get_line_height(lineno) + if used_height > height: + return prev_lineno + else: + prev_lineno = lineno + return prev_lineno + + # Scroll vertically. (Make sure that the whole line which contains the + # cursor is visible. + topmost_visible = get_topmost_visible() + + # Note: the `min(topmost_visible, ...)` is to make sure that we + # don't require scrolling up because of the bottom scroll offset, + # when we are at the end of the document. + self.vertical_scroll = max( + self.vertical_scroll, min(topmost_visible, get_min_vertical_scroll()) + ) + self.vertical_scroll = min(self.vertical_scroll, get_max_vertical_scroll()) + + # Disallow scrolling beyond bottom? + if not self.allow_scroll_beyond_bottom(): + self.vertical_scroll = min(self.vertical_scroll, topmost_visible) + + def _scroll_without_linewrapping( + self, ui_content: UIContent, width: int, height: int + ) -> None: + """ + Scroll to make sure the cursor position is visible and that we maintain + the requested scroll offset. + + Set `self.horizontal_scroll/vertical_scroll`. + """ + cursor_position = ui_content.cursor_position or Point(x=0, y=0) + + # Without line wrapping, we will never have to scroll vertically inside + # a single line. + self.vertical_scroll_2 = 0 + + if ui_content.line_count == 0: + self.vertical_scroll = 0 + self.horizontal_scroll = 0 + return + else: + current_line_text = fragment_list_to_text( + ui_content.get_line(cursor_position.y) + ) + + def do_scroll( + current_scroll: int, + scroll_offset_start: int, + scroll_offset_end: int, + cursor_pos: int, + window_size: int, + content_size: int, + ) -> int: + "Scrolling algorithm. Used for both horizontal and vertical scrolling." + # Calculate the scroll offset to apply. + # This can obviously never be more than have the screen size. Also, when the + # cursor appears at the top or bottom, we don't apply the offset. + scroll_offset_start = int( + min(scroll_offset_start, window_size / 2, cursor_pos) + ) + scroll_offset_end = int( + min(scroll_offset_end, window_size / 2, content_size - 1 - cursor_pos) + ) + + # Prevent negative scroll offsets. + if current_scroll < 0: + current_scroll = 0 + + # Scroll back if we scrolled to much and there's still space to show more of the document. + if ( + not self.allow_scroll_beyond_bottom() + and current_scroll > content_size - window_size + ): + current_scroll = max(0, content_size - window_size) + + # Scroll up if cursor is before visible part. + if current_scroll > cursor_pos - scroll_offset_start: + current_scroll = max(0, cursor_pos - scroll_offset_start) + + # Scroll down if cursor is after visible part. + if current_scroll < (cursor_pos + 1) - window_size + scroll_offset_end: + current_scroll = (cursor_pos + 1) - window_size + scroll_offset_end + + return current_scroll + + # When a preferred scroll is given, take that first into account. + if self.get_vertical_scroll: + self.vertical_scroll = self.get_vertical_scroll(self) + assert isinstance(self.vertical_scroll, int) + if self.get_horizontal_scroll: + self.horizontal_scroll = self.get_horizontal_scroll(self) + assert isinstance(self.horizontal_scroll, int) + + # Update horizontal/vertical scroll to make sure that the cursor + # remains visible. + offsets = self.scroll_offsets + + self.vertical_scroll = do_scroll( + current_scroll=self.vertical_scroll, + scroll_offset_start=offsets.top, + scroll_offset_end=offsets.bottom, + cursor_pos=ui_content.cursor_position.y, + window_size=height, + content_size=ui_content.line_count, + ) + + if self.get_line_prefix: + current_line_prefix_width = fragment_list_width( + to_formatted_text(self.get_line_prefix(ui_content.cursor_position.y, 0)) + ) + else: + current_line_prefix_width = 0 + + self.horizontal_scroll = do_scroll( + current_scroll=self.horizontal_scroll, + scroll_offset_start=offsets.left, + scroll_offset_end=offsets.right, + cursor_pos=get_cwidth(current_line_text[: ui_content.cursor_position.x]), + window_size=width - current_line_prefix_width, + # We can only analyze the current line. Calculating the width off + # all the lines is too expensive. + content_size=max( + get_cwidth(current_line_text), self.horizontal_scroll + width + ), + ) + + def _mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone: + """ + Mouse handler. Called when the UI control doesn't handle this + particular event. + + Return `NotImplemented` if nothing was done as a consequence of this + key binding (no UI invalidate required in that case). + """ + if mouse_event.event_type == MouseEventType.SCROLL_DOWN: + self._scroll_down() + return None + elif mouse_event.event_type == MouseEventType.SCROLL_UP: + self._scroll_up() + return None + + return NotImplemented + + def _scroll_down(self) -> None: + "Scroll window down." + info = self.render_info + + if info is None: + return + + if self.vertical_scroll < info.content_height - info.window_height: + if info.cursor_position.y <= info.configured_scroll_offsets.top: + self.content.move_cursor_down() + + self.vertical_scroll += 1 + + def _scroll_up(self) -> None: + "Scroll window up." + info = self.render_info + + if info is None: + return + + if info.vertical_scroll > 0: + # TODO: not entirely correct yet in case of line wrapping and long lines. + if ( + info.cursor_position.y + >= info.window_height - 1 - info.configured_scroll_offsets.bottom + ): + self.content.move_cursor_up() + + self.vertical_scroll -= 1 + + def get_key_bindings(self) -> KeyBindingsBase | None: + return self.content.get_key_bindings() + + def get_children(self) -> list[Container]: + return [] + + +class ConditionalContainer(Container): + """ + Wrapper around any other container that can change the visibility. The + received `filter` determines whether the given container should be + displayed or not. + + :param content: :class:`.Container` instance. + :param filter: :class:`.Filter` instance. + """ + + def __init__(self, content: AnyContainer, filter: FilterOrBool) -> None: + self.content = to_container(content) + self.filter = to_filter(filter) + + def __repr__(self) -> str: + return f"ConditionalContainer({self.content!r}, filter={self.filter!r})" + + def reset(self) -> None: + self.content.reset() + + def preferred_width(self, max_available_width: int) -> Dimension: + if self.filter(): + return self.content.preferred_width(max_available_width) + else: + return Dimension.zero() + + def preferred_height(self, width: int, max_available_height: int) -> Dimension: + if self.filter(): + return self.content.preferred_height(width, max_available_height) + else: + return Dimension.zero() + + def write_to_screen( + self, + screen: Screen, + mouse_handlers: MouseHandlers, + write_position: WritePosition, + parent_style: str, + erase_bg: bool, + z_index: int | None, + ) -> None: + if self.filter(): + return self.content.write_to_screen( + screen, mouse_handlers, write_position, parent_style, erase_bg, z_index + ) + + def get_children(self) -> list[Container]: + return [self.content] + + +class DynamicContainer(Container): + """ + Container class that dynamically returns any Container. + + :param get_container: Callable that returns a :class:`.Container` instance + or any widget with a ``__pt_container__`` method. + """ + + def __init__(self, get_container: Callable[[], AnyContainer]) -> None: + self.get_container = get_container + + def _get_container(self) -> Container: + """ + Return the current container object. + + We call `to_container`, because `get_container` can also return a + widget with a ``__pt_container__`` method. + """ + obj = self.get_container() + return to_container(obj) + + def reset(self) -> None: + self._get_container().reset() + + def preferred_width(self, max_available_width: int) -> Dimension: + return self._get_container().preferred_width(max_available_width) + + def preferred_height(self, width: int, max_available_height: int) -> Dimension: + return self._get_container().preferred_height(width, max_available_height) + + def write_to_screen( + self, + screen: Screen, + mouse_handlers: MouseHandlers, + write_position: WritePosition, + parent_style: str, + erase_bg: bool, + z_index: int | None, + ) -> None: + self._get_container().write_to_screen( + screen, mouse_handlers, write_position, parent_style, erase_bg, z_index + ) + + def is_modal(self) -> bool: + return False + + def get_key_bindings(self) -> KeyBindingsBase | None: + # Key bindings will be collected when `layout.walk()` finds the child + # container. + return None + + def get_children(self) -> list[Container]: + # Here we have to return the current active container itself, not its + # children. Otherwise, we run into issues where `layout.walk()` will + # never see an object of type `Window` if this contains a window. We + # can't/shouldn't proxy the "isinstance" check. + return [self._get_container()] + + +def to_container(container: AnyContainer) -> Container: + """ + Make sure that the given object is a :class:`.Container`. + """ + if isinstance(container, Container): + return container + elif hasattr(container, "__pt_container__"): + return to_container(container.__pt_container__()) + else: + raise ValueError(f"Not a container object: {container!r}") + + +def to_window(container: AnyContainer) -> Window: + """ + Make sure that the given argument is a :class:`.Window`. + """ + if isinstance(container, Window): + return container + elif hasattr(container, "__pt_container__"): + return to_window(cast("MagicContainer", container).__pt_container__()) + else: + raise ValueError(f"Not a Window object: {container!r}.") + + +def is_container(value: object) -> TypeGuard[AnyContainer]: + """ + Checks whether the given value is a container object + (for use in assert statements). + """ + if isinstance(value, Container): + return True + if hasattr(value, "__pt_container__"): + return is_container(cast("MagicContainer", value).__pt_container__()) + return False diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/controls.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/controls.py new file mode 100644 index 0000000000000000000000000000000000000000..5083c8286d4cb5b204a6ca92d10a5aadcda1fec7 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/controls.py @@ -0,0 +1,956 @@ +""" +User interface Controls for the layout. +""" + +from __future__ import annotations + +import time +from abc import ABCMeta, abstractmethod +from typing import TYPE_CHECKING, Callable, Hashable, Iterable, NamedTuple + +from prompt_toolkit.application.current import get_app +from prompt_toolkit.buffer import Buffer +from prompt_toolkit.cache import SimpleCache +from prompt_toolkit.data_structures import Point +from prompt_toolkit.document import Document +from prompt_toolkit.filters import FilterOrBool, to_filter +from prompt_toolkit.formatted_text import ( + AnyFormattedText, + StyleAndTextTuples, + to_formatted_text, +) +from prompt_toolkit.formatted_text.utils import ( + fragment_list_to_text, + fragment_list_width, + split_lines, +) +from prompt_toolkit.lexers import Lexer, SimpleLexer +from prompt_toolkit.mouse_events import MouseButton, MouseEvent, MouseEventType +from prompt_toolkit.search import SearchState +from prompt_toolkit.selection import SelectionType +from prompt_toolkit.utils import get_cwidth + +from .processors import ( + DisplayMultipleCursors, + HighlightIncrementalSearchProcessor, + HighlightSearchProcessor, + HighlightSelectionProcessor, + Processor, + TransformationInput, + merge_processors, +) + +if TYPE_CHECKING: + from prompt_toolkit.key_binding.key_bindings import ( + KeyBindingsBase, + NotImplementedOrNone, + ) + from prompt_toolkit.utils import Event + + +__all__ = [ + "BufferControl", + "SearchBufferControl", + "DummyControl", + "FormattedTextControl", + "UIControl", + "UIContent", +] + +GetLinePrefixCallable = Callable[[int, int], AnyFormattedText] + + +class UIControl(metaclass=ABCMeta): + """ + Base class for all user interface controls. + """ + + def reset(self) -> None: + # Default reset. (Doesn't have to be implemented.) + pass + + def preferred_width(self, max_available_width: int) -> int | None: + return None + + def preferred_height( + self, + width: int, + max_available_height: int, + wrap_lines: bool, + get_line_prefix: GetLinePrefixCallable | None, + ) -> int | None: + return None + + def is_focusable(self) -> bool: + """ + Tell whether this user control is focusable. + """ + return False + + @abstractmethod + def create_content(self, width: int, height: int) -> UIContent: + """ + Generate the content for this user control. + + Returns a :class:`.UIContent` instance. + """ + + def mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone: + """ + Handle mouse events. + + When `NotImplemented` is returned, it means that the given event is not + handled by the `UIControl` itself. The `Window` or key bindings can + decide to handle this event as scrolling or changing focus. + + :param mouse_event: `MouseEvent` instance. + """ + return NotImplemented + + def move_cursor_down(self) -> None: + """ + Request to move the cursor down. + This happens when scrolling down and the cursor is completely at the + top. + """ + + def move_cursor_up(self) -> None: + """ + Request to move the cursor up. + """ + + def get_key_bindings(self) -> KeyBindingsBase | None: + """ + The key bindings that are specific for this user control. + + Return a :class:`.KeyBindings` object if some key bindings are + specified, or `None` otherwise. + """ + + def get_invalidate_events(self) -> Iterable[Event[object]]: + """ + Return a list of `Event` objects. This can be a generator. + (The application collects all these events, in order to bind redraw + handlers to these events.) + """ + return [] + + +class UIContent: + """ + Content generated by a user control. This content consists of a list of + lines. + + :param get_line: Callable that takes a line number and returns the current + line. This is a list of (style_str, text) tuples. + :param line_count: The number of lines. + :param cursor_position: a :class:`.Point` for the cursor position. + :param menu_position: a :class:`.Point` for the menu position. + :param show_cursor: Make the cursor visible. + """ + + def __init__( + self, + get_line: Callable[[int], StyleAndTextTuples] = (lambda i: []), + line_count: int = 0, + cursor_position: Point | None = None, + menu_position: Point | None = None, + show_cursor: bool = True, + ): + self.get_line = get_line + self.line_count = line_count + self.cursor_position = cursor_position or Point(x=0, y=0) + self.menu_position = menu_position + self.show_cursor = show_cursor + + # Cache for line heights. Maps cache key -> height + self._line_heights_cache: dict[Hashable, int] = {} + + def __getitem__(self, lineno: int) -> StyleAndTextTuples: + "Make it iterable (iterate line by line)." + if lineno < self.line_count: + return self.get_line(lineno) + else: + raise IndexError + + def get_height_for_line( + self, + lineno: int, + width: int, + get_line_prefix: GetLinePrefixCallable | None, + slice_stop: int | None = None, + ) -> int: + """ + Return the height that a given line would need if it is rendered in a + space with the given width (using line wrapping). + + :param get_line_prefix: None or a `Window.get_line_prefix` callable + that returns the prefix to be inserted before this line. + :param slice_stop: Wrap only "line[:slice_stop]" and return that + partial result. This is needed for scrolling the window correctly + when line wrapping. + :returns: The computed height. + """ + # Instead of using `get_line_prefix` as key, we use render_counter + # instead. This is more reliable, because this function could still be + # the same, while the content would change over time. + key = get_app().render_counter, lineno, width, slice_stop + + try: + return self._line_heights_cache[key] + except KeyError: + if width == 0: + height = 10**8 + else: + # Calculate line width first. + line = fragment_list_to_text(self.get_line(lineno))[:slice_stop] + text_width = get_cwidth(line) + + if get_line_prefix: + # Add prefix width. + text_width += fragment_list_width( + to_formatted_text(get_line_prefix(lineno, 0)) + ) + + # Slower path: compute path when there's a line prefix. + height = 1 + + # Keep wrapping as long as the line doesn't fit. + # Keep adding new prefixes for every wrapped line. + while text_width > width: + height += 1 + text_width -= width + + fragments2 = to_formatted_text( + get_line_prefix(lineno, height - 1) + ) + prefix_width = get_cwidth(fragment_list_to_text(fragments2)) + + if prefix_width >= width: # Prefix doesn't fit. + height = 10**8 + break + + text_width += prefix_width + else: + # Fast path: compute height when there's no line prefix. + try: + quotient, remainder = divmod(text_width, width) + except ZeroDivisionError: + height = 10**8 + else: + if remainder: + quotient += 1 # Like math.ceil. + height = max(1, quotient) + + # Cache and return + self._line_heights_cache[key] = height + return height + + +class FormattedTextControl(UIControl): + """ + Control that displays formatted text. This can be either plain text, an + :class:`~prompt_toolkit.formatted_text.HTML` object an + :class:`~prompt_toolkit.formatted_text.ANSI` object, a list of ``(style_str, + text)`` tuples or a callable that takes no argument and returns one of + those, depending on how you prefer to do the formatting. See + ``prompt_toolkit.layout.formatted_text`` for more information. + + (It's mostly optimized for rather small widgets, like toolbars, menus, etc...) + + When this UI control has the focus, the cursor will be shown in the upper + left corner of this control by default. There are two ways for specifying + the cursor position: + + - Pass a `get_cursor_position` function which returns a `Point` instance + with the current cursor position. + + - If the (formatted) text is passed as a list of ``(style, text)`` tuples + and there is one that looks like ``('[SetCursorPosition]', '')``, then + this will specify the cursor position. + + Mouse support: + + The list of fragments can also contain tuples of three items, looking like: + (style_str, text, handler). When mouse support is enabled and the user + clicks on this fragment, then the given handler is called. That handler + should accept two inputs: (Application, MouseEvent) and it should + either handle the event or return `NotImplemented` in case we want the + containing Window to handle this event. + + :param focusable: `bool` or :class:`.Filter`: Tell whether this control is + focusable. + + :param text: Text or formatted text to be displayed. + :param style: Style string applied to the content. (If you want to style + the whole :class:`~prompt_toolkit.layout.Window`, pass the style to the + :class:`~prompt_toolkit.layout.Window` instead.) + :param key_bindings: a :class:`.KeyBindings` object. + :param get_cursor_position: A callable that returns the cursor position as + a `Point` instance. + """ + + def __init__( + self, + text: AnyFormattedText = "", + style: str = "", + focusable: FilterOrBool = False, + key_bindings: KeyBindingsBase | None = None, + show_cursor: bool = True, + modal: bool = False, + get_cursor_position: Callable[[], Point | None] | None = None, + ) -> None: + self.text = text # No type check on 'text'. This is done dynamically. + self.style = style + self.focusable = to_filter(focusable) + + # Key bindings. + self.key_bindings = key_bindings + self.show_cursor = show_cursor + self.modal = modal + self.get_cursor_position = get_cursor_position + + #: Cache for the content. + self._content_cache: SimpleCache[Hashable, UIContent] = SimpleCache(maxsize=18) + self._fragment_cache: SimpleCache[int, StyleAndTextTuples] = SimpleCache( + maxsize=1 + ) + # Only cache one fragment list. We don't need the previous item. + + # Render info for the mouse support. + self._fragments: StyleAndTextTuples | None = None + + def reset(self) -> None: + self._fragments = None + + def is_focusable(self) -> bool: + return self.focusable() + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.text!r})" + + def _get_formatted_text_cached(self) -> StyleAndTextTuples: + """ + Get fragments, but only retrieve fragments once during one render run. + (This function is called several times during one rendering, because + we also need those for calculating the dimensions.) + """ + return self._fragment_cache.get( + get_app().render_counter, lambda: to_formatted_text(self.text, self.style) + ) + + def preferred_width(self, max_available_width: int) -> int: + """ + Return the preferred width for this control. + That is the width of the longest line. + """ + text = fragment_list_to_text(self._get_formatted_text_cached()) + line_lengths = [get_cwidth(l) for l in text.split("\n")] + return max(line_lengths) + + def preferred_height( + self, + width: int, + max_available_height: int, + wrap_lines: bool, + get_line_prefix: GetLinePrefixCallable | None, + ) -> int | None: + """ + Return the preferred height for this control. + """ + content = self.create_content(width, None) + if wrap_lines: + height = 0 + for i in range(content.line_count): + height += content.get_height_for_line(i, width, get_line_prefix) + if height >= max_available_height: + return max_available_height + return height + else: + return content.line_count + + def create_content(self, width: int, height: int | None) -> UIContent: + # Get fragments + fragments_with_mouse_handlers = self._get_formatted_text_cached() + fragment_lines_with_mouse_handlers = list( + split_lines(fragments_with_mouse_handlers) + ) + + # Strip mouse handlers from fragments. + fragment_lines: list[StyleAndTextTuples] = [ + [(item[0], item[1]) for item in line] + for line in fragment_lines_with_mouse_handlers + ] + + # Keep track of the fragments with mouse handler, for later use in + # `mouse_handler`. + self._fragments = fragments_with_mouse_handlers + + # If there is a `[SetCursorPosition]` in the fragment list, set the + # cursor position here. + def get_cursor_position( + fragment: str = "[SetCursorPosition]", + ) -> Point | None: + for y, line in enumerate(fragment_lines): + x = 0 + for style_str, text, *_ in line: + if fragment in style_str: + return Point(x=x, y=y) + x += len(text) + return None + + # If there is a `[SetMenuPosition]`, set the menu over here. + def get_menu_position() -> Point | None: + return get_cursor_position("[SetMenuPosition]") + + cursor_position = (self.get_cursor_position or get_cursor_position)() + + # Create content, or take it from the cache. + key = (tuple(fragments_with_mouse_handlers), width, cursor_position) + + def get_content() -> UIContent: + return UIContent( + get_line=lambda i: fragment_lines[i], + line_count=len(fragment_lines), + show_cursor=self.show_cursor, + cursor_position=cursor_position, + menu_position=get_menu_position(), + ) + + return self._content_cache.get(key, get_content) + + def mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone: + """ + Handle mouse events. + + (When the fragment list contained mouse handlers and the user clicked on + on any of these, the matching handler is called. This handler can still + return `NotImplemented` in case we want the + :class:`~prompt_toolkit.layout.Window` to handle this particular + event.) + """ + if self._fragments: + # Read the generator. + fragments_for_line = list(split_lines(self._fragments)) + + try: + fragments = fragments_for_line[mouse_event.position.y] + except IndexError: + return NotImplemented + else: + # Find position in the fragment list. + xpos = mouse_event.position.x + + # Find mouse handler for this character. + count = 0 + for item in fragments: + count += len(item[1]) + if count > xpos: + if len(item) >= 3: + # Handler found. Call it. + # (Handler can return NotImplemented, so return + # that result.) + handler = item[2] + return handler(mouse_event) + else: + break + + # Otherwise, don't handle here. + return NotImplemented + + def is_modal(self) -> bool: + return self.modal + + def get_key_bindings(self) -> KeyBindingsBase | None: + return self.key_bindings + + +class DummyControl(UIControl): + """ + A dummy control object that doesn't paint any content. + + Useful for filling a :class:`~prompt_toolkit.layout.Window`. (The + `fragment` and `char` attributes of the `Window` class can be used to + define the filling.) + """ + + def create_content(self, width: int, height: int) -> UIContent: + def get_line(i: int) -> StyleAndTextTuples: + return [] + + return UIContent(get_line=get_line, line_count=100**100) # Something very big. + + def is_focusable(self) -> bool: + return False + + +class _ProcessedLine(NamedTuple): + fragments: StyleAndTextTuples + source_to_display: Callable[[int], int] + display_to_source: Callable[[int], int] + + +class BufferControl(UIControl): + """ + Control for visualizing the content of a :class:`.Buffer`. + + :param buffer: The :class:`.Buffer` object to be displayed. + :param input_processors: A list of + :class:`~prompt_toolkit.layout.processors.Processor` objects. + :param include_default_input_processors: When True, include the default + processors for highlighting of selection, search and displaying of + multiple cursors. + :param lexer: :class:`.Lexer` instance for syntax highlighting. + :param preview_search: `bool` or :class:`.Filter`: Show search while + typing. When this is `True`, probably you want to add a + ``HighlightIncrementalSearchProcessor`` as well. Otherwise only the + cursor position will move, but the text won't be highlighted. + :param focusable: `bool` or :class:`.Filter`: Tell whether this control is focusable. + :param focus_on_click: Focus this buffer when it's click, but not yet focused. + :param key_bindings: a :class:`.KeyBindings` object. + """ + + def __init__( + self, + buffer: Buffer | None = None, + input_processors: list[Processor] | None = None, + include_default_input_processors: bool = True, + lexer: Lexer | None = None, + preview_search: FilterOrBool = False, + focusable: FilterOrBool = True, + search_buffer_control: ( + None | SearchBufferControl | Callable[[], SearchBufferControl] + ) = None, + menu_position: Callable[[], int | None] | None = None, + focus_on_click: FilterOrBool = False, + key_bindings: KeyBindingsBase | None = None, + ): + self.input_processors = input_processors + self.include_default_input_processors = include_default_input_processors + + self.default_input_processors = [ + HighlightSearchProcessor(), + HighlightIncrementalSearchProcessor(), + HighlightSelectionProcessor(), + DisplayMultipleCursors(), + ] + + self.preview_search = to_filter(preview_search) + self.focusable = to_filter(focusable) + self.focus_on_click = to_filter(focus_on_click) + + self.buffer = buffer or Buffer() + self.menu_position = menu_position + self.lexer = lexer or SimpleLexer() + self.key_bindings = key_bindings + self._search_buffer_control = search_buffer_control + + #: Cache for the lexer. + #: Often, due to cursor movement, undo/redo and window resizing + #: operations, it happens that a short time, the same document has to be + #: lexed. This is a fairly easy way to cache such an expensive operation. + self._fragment_cache: SimpleCache[ + Hashable, Callable[[int], StyleAndTextTuples] + ] = SimpleCache(maxsize=8) + + self._last_click_timestamp: float | None = None + self._last_get_processed_line: Callable[[int], _ProcessedLine] | None = None + + def __repr__(self) -> str: + return f"<{self.__class__.__name__} buffer={self.buffer!r} at {id(self)!r}>" + + @property + def search_buffer_control(self) -> SearchBufferControl | None: + result: SearchBufferControl | None + + if callable(self._search_buffer_control): + result = self._search_buffer_control() + else: + result = self._search_buffer_control + + assert result is None or isinstance(result, SearchBufferControl) + return result + + @property + def search_buffer(self) -> Buffer | None: + control = self.search_buffer_control + if control is not None: + return control.buffer + return None + + @property + def search_state(self) -> SearchState: + """ + Return the `SearchState` for searching this `BufferControl`. This is + always associated with the search control. If one search bar is used + for searching multiple `BufferControls`, then they share the same + `SearchState`. + """ + search_buffer_control = self.search_buffer_control + if search_buffer_control: + return search_buffer_control.searcher_search_state + else: + return SearchState() + + def is_focusable(self) -> bool: + return self.focusable() + + def preferred_width(self, max_available_width: int) -> int | None: + """ + This should return the preferred width. + + Note: We don't specify a preferred width according to the content, + because it would be too expensive. Calculating the preferred + width can be done by calculating the longest line, but this would + require applying all the processors to each line. This is + unfeasible for a larger document, and doing it for small + documents only would result in inconsistent behavior. + """ + return None + + def preferred_height( + self, + width: int, + max_available_height: int, + wrap_lines: bool, + get_line_prefix: GetLinePrefixCallable | None, + ) -> int | None: + # Calculate the content height, if it was drawn on a screen with the + # given width. + height = 0 + content = self.create_content(width, height=1) # Pass a dummy '1' as height. + + # When line wrapping is off, the height should be equal to the amount + # of lines. + if not wrap_lines: + return content.line_count + + # When the number of lines exceeds the max_available_height, just + # return max_available_height. No need to calculate anything. + if content.line_count >= max_available_height: + return max_available_height + + for i in range(content.line_count): + height += content.get_height_for_line(i, width, get_line_prefix) + + if height >= max_available_height: + return max_available_height + + return height + + def _get_formatted_text_for_line_func( + self, document: Document + ) -> Callable[[int], StyleAndTextTuples]: + """ + Create a function that returns the fragments for a given line. + """ + + # Cache using `document.text`. + def get_formatted_text_for_line() -> Callable[[int], StyleAndTextTuples]: + return self.lexer.lex_document(document) + + key = (document.text, self.lexer.invalidation_hash()) + return self._fragment_cache.get(key, get_formatted_text_for_line) + + def _create_get_processed_line_func( + self, document: Document, width: int, height: int + ) -> Callable[[int], _ProcessedLine]: + """ + Create a function that takes a line number of the current document and + returns a _ProcessedLine(processed_fragments, source_to_display, display_to_source) + tuple. + """ + # Merge all input processors together. + input_processors = self.input_processors or [] + if self.include_default_input_processors: + input_processors = self.default_input_processors + input_processors + + merged_processor = merge_processors(input_processors) + + def transform( + lineno: int, + fragments: StyleAndTextTuples, + get_line: Callable[[int], StyleAndTextTuples], + ) -> _ProcessedLine: + "Transform the fragments for a given line number." + + # Get cursor position at this line. + def source_to_display(i: int) -> int: + """X position from the buffer to the x position in the + processed fragment list. By default, we start from the 'identity' + operation.""" + return i + + transformation = merged_processor.apply_transformation( + TransformationInput( + self, + document, + lineno, + source_to_display, + fragments, + width, + height, + get_line, + ) + ) + + return _ProcessedLine( + transformation.fragments, + transformation.source_to_display, + transformation.display_to_source, + ) + + def create_func() -> Callable[[int], _ProcessedLine]: + get_line = self._get_formatted_text_for_line_func(document) + cache: dict[int, _ProcessedLine] = {} + + def get_processed_line(i: int) -> _ProcessedLine: + try: + return cache[i] + except KeyError: + processed_line = transform(i, get_line(i), get_line) + cache[i] = processed_line + return processed_line + + return get_processed_line + + return create_func() + + def create_content( + self, width: int, height: int, preview_search: bool = False + ) -> UIContent: + """ + Create a UIContent. + """ + buffer = self.buffer + + # Trigger history loading of the buffer. We do this during the + # rendering of the UI here, because it needs to happen when an + # `Application` with its event loop is running. During the rendering of + # the buffer control is the earliest place we can achieve this, where + # we're sure the right event loop is active, and don't require user + # interaction (like in a key binding). + buffer.load_history_if_not_yet_loaded() + + # Get the document to be shown. If we are currently searching (the + # search buffer has focus, and the preview_search filter is enabled), + # then use the search document, which has possibly a different + # text/cursor position.) + search_control = self.search_buffer_control + preview_now = preview_search or bool( + # Only if this feature is enabled. + self.preview_search() + and + # And something was typed in the associated search field. + search_control + and search_control.buffer.text + and + # And we are searching in this control. (Many controls can point to + # the same search field, like in Pyvim.) + get_app().layout.search_target_buffer_control == self + ) + + if preview_now and search_control is not None: + ss = self.search_state + + document = buffer.document_for_search( + SearchState( + text=search_control.buffer.text, + direction=ss.direction, + ignore_case=ss.ignore_case, + ) + ) + else: + document = buffer.document + + get_processed_line = self._create_get_processed_line_func( + document, width, height + ) + self._last_get_processed_line = get_processed_line + + def translate_rowcol(row: int, col: int) -> Point: + "Return the content column for this coordinate." + return Point(x=get_processed_line(row).source_to_display(col), y=row) + + def get_line(i: int) -> StyleAndTextTuples: + "Return the fragments for a given line number." + fragments = get_processed_line(i).fragments + + # Add a space at the end, because that is a possible cursor + # position. (When inserting after the input.) We should do this on + # all the lines, not just the line containing the cursor. (Because + # otherwise, line wrapping/scrolling could change when moving the + # cursor around.) + fragments = fragments + [("", " ")] + return fragments + + content = UIContent( + get_line=get_line, + line_count=document.line_count, + cursor_position=translate_rowcol( + document.cursor_position_row, document.cursor_position_col + ), + ) + + # If there is an auto completion going on, use that start point for a + # pop-up menu position. (But only when this buffer has the focus -- + # there is only one place for a menu, determined by the focused buffer.) + if get_app().layout.current_control == self: + menu_position = self.menu_position() if self.menu_position else None + if menu_position is not None: + assert isinstance(menu_position, int) + menu_row, menu_col = buffer.document.translate_index_to_position( + menu_position + ) + content.menu_position = translate_rowcol(menu_row, menu_col) + elif buffer.complete_state: + # Position for completion menu. + # Note: We use 'min', because the original cursor position could be + # behind the input string when the actual completion is for + # some reason shorter than the text we had before. (A completion + # can change and shorten the input.) + menu_row, menu_col = buffer.document.translate_index_to_position( + min( + buffer.cursor_position, + buffer.complete_state.original_document.cursor_position, + ) + ) + content.menu_position = translate_rowcol(menu_row, menu_col) + else: + content.menu_position = None + + return content + + def mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone: + """ + Mouse handler for this control. + """ + buffer = self.buffer + position = mouse_event.position + + # Focus buffer when clicked. + if get_app().layout.current_control == self: + if self._last_get_processed_line: + processed_line = self._last_get_processed_line(position.y) + + # Translate coordinates back to the cursor position of the + # original input. + xpos = processed_line.display_to_source(position.x) + index = buffer.document.translate_row_col_to_index(position.y, xpos) + + # Set the cursor position. + if mouse_event.event_type == MouseEventType.MOUSE_DOWN: + buffer.exit_selection() + buffer.cursor_position = index + + elif ( + mouse_event.event_type == MouseEventType.MOUSE_MOVE + and mouse_event.button != MouseButton.NONE + ): + # Click and drag to highlight a selection + if ( + buffer.selection_state is None + and abs(buffer.cursor_position - index) > 0 + ): + buffer.start_selection(selection_type=SelectionType.CHARACTERS) + buffer.cursor_position = index + + elif mouse_event.event_type == MouseEventType.MOUSE_UP: + # When the cursor was moved to another place, select the text. + # (The >1 is actually a small but acceptable workaround for + # selecting text in Vi navigation mode. In navigation mode, + # the cursor can never be after the text, so the cursor + # will be repositioned automatically.) + if abs(buffer.cursor_position - index) > 1: + if buffer.selection_state is None: + buffer.start_selection( + selection_type=SelectionType.CHARACTERS + ) + buffer.cursor_position = index + + # Select word around cursor on double click. + # Two MOUSE_UP events in a short timespan are considered a double click. + double_click = ( + self._last_click_timestamp + and time.time() - self._last_click_timestamp < 0.3 + ) + self._last_click_timestamp = time.time() + + if double_click: + start, end = buffer.document.find_boundaries_of_current_word() + buffer.cursor_position += start + buffer.start_selection(selection_type=SelectionType.CHARACTERS) + buffer.cursor_position += end - start + else: + # Don't handle scroll events here. + return NotImplemented + + # Not focused, but focusing on click events. + else: + if ( + self.focus_on_click() + and mouse_event.event_type == MouseEventType.MOUSE_UP + ): + # Focus happens on mouseup. (If we did this on mousedown, the + # up event will be received at the point where this widget is + # focused and be handled anyway.) + get_app().layout.current_control = self + else: + return NotImplemented + + return None + + def move_cursor_down(self) -> None: + b = self.buffer + b.cursor_position += b.document.get_cursor_down_position() + + def move_cursor_up(self) -> None: + b = self.buffer + b.cursor_position += b.document.get_cursor_up_position() + + def get_key_bindings(self) -> KeyBindingsBase | None: + """ + When additional key bindings are given. Return these. + """ + return self.key_bindings + + def get_invalidate_events(self) -> Iterable[Event[object]]: + """ + Return the Window invalidate events. + """ + # Whenever the buffer changes, the UI has to be updated. + yield self.buffer.on_text_changed + yield self.buffer.on_cursor_position_changed + + yield self.buffer.on_completions_changed + yield self.buffer.on_suggestion_set + + +class SearchBufferControl(BufferControl): + """ + :class:`.BufferControl` which is used for searching another + :class:`.BufferControl`. + + :param ignore_case: Search case insensitive. + """ + + def __init__( + self, + buffer: Buffer | None = None, + input_processors: list[Processor] | None = None, + lexer: Lexer | None = None, + focus_on_click: FilterOrBool = False, + key_bindings: KeyBindingsBase | None = None, + ignore_case: FilterOrBool = False, + ): + super().__init__( + buffer=buffer, + input_processors=input_processors, + lexer=lexer, + focus_on_click=focus_on_click, + key_bindings=key_bindings, + ) + + # If this BufferControl is used as a search field for one or more other + # BufferControls, then represents the search state. + self.searcher_search_state = SearchState(ignore_case=ignore_case) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/dimension.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/dimension.py new file mode 100644 index 0000000000000000000000000000000000000000..2e6f5dd4eb1f55ed3fd0ab9821e6d6e60e380a07 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/dimension.py @@ -0,0 +1,220 @@ +""" +Layout dimensions are used to give the minimum, maximum and preferred +dimensions for containers and controls. +""" + +from __future__ import annotations + +from typing import TYPE_CHECKING, Any, Callable, Union + +__all__ = [ + "Dimension", + "D", + "sum_layout_dimensions", + "max_layout_dimensions", + "AnyDimension", + "to_dimension", + "is_dimension", +] + +if TYPE_CHECKING: + from typing_extensions import TypeGuard + + +class Dimension: + """ + Specified dimension (width/height) of a user control or window. + + The layout engine tries to honor the preferred size. If that is not + possible, because the terminal is larger or smaller, it tries to keep in + between min and max. + + :param min: Minimum size. + :param max: Maximum size. + :param weight: For a VSplit/HSplit, the actual size will be determined + by taking the proportion of weights from all the children. + E.g. When there are two children, one with a weight of 1, + and the other with a weight of 2, the second will always be + twice as big as the first, if the min/max values allow it. + :param preferred: Preferred size. + """ + + def __init__( + self, + min: int | None = None, + max: int | None = None, + weight: int | None = None, + preferred: int | None = None, + ) -> None: + if weight is not None: + assert weight >= 0 # Also cannot be a float. + + assert min is None or min >= 0 + assert max is None or max >= 0 + assert preferred is None or preferred >= 0 + + self.min_specified = min is not None + self.max_specified = max is not None + self.preferred_specified = preferred is not None + self.weight_specified = weight is not None + + if min is None: + min = 0 # Smallest possible value. + if max is None: # 0-values are allowed, so use "is None" + max = 1000**10 # Something huge. + if preferred is None: + preferred = min + if weight is None: + weight = 1 + + self.min = min + self.max = max + self.preferred = preferred + self.weight = weight + + # Don't allow situations where max < min. (This would be a bug.) + if max < min: + raise ValueError("Invalid Dimension: max < min.") + + # Make sure that the 'preferred' size is always in the min..max range. + if self.preferred < self.min: + self.preferred = self.min + + if self.preferred > self.max: + self.preferred = self.max + + @classmethod + def exact(cls, amount: int) -> Dimension: + """ + Return a :class:`.Dimension` with an exact size. (min, max and + preferred set to ``amount``). + """ + return cls(min=amount, max=amount, preferred=amount) + + @classmethod + def zero(cls) -> Dimension: + """ + Create a dimension that represents a zero size. (Used for 'invisible' + controls.) + """ + return cls.exact(amount=0) + + def is_zero(self) -> bool: + "True if this `Dimension` represents a zero size." + return self.preferred == 0 or self.max == 0 + + def __repr__(self) -> str: + fields = [] + if self.min_specified: + fields.append(f"min={self.min!r}") + if self.max_specified: + fields.append(f"max={self.max!r}") + if self.preferred_specified: + fields.append(f"preferred={self.preferred!r}") + if self.weight_specified: + fields.append(f"weight={self.weight!r}") + + return "Dimension({})".format(", ".join(fields)) + + +def sum_layout_dimensions(dimensions: list[Dimension]) -> Dimension: + """ + Sum a list of :class:`.Dimension` instances. + """ + min = sum(d.min for d in dimensions) + max = sum(d.max for d in dimensions) + preferred = sum(d.preferred for d in dimensions) + + return Dimension(min=min, max=max, preferred=preferred) + + +def max_layout_dimensions(dimensions: list[Dimension]) -> Dimension: + """ + Take the maximum of a list of :class:`.Dimension` instances. + Used when we have a HSplit/VSplit, and we want to get the best width/height.) + """ + if not len(dimensions): + return Dimension.zero() + + # If all dimensions are size zero. Return zero. + # (This is important for HSplit/VSplit, to report the right values to their + # parent when all children are invisible.) + if all(d.is_zero() for d in dimensions): + return dimensions[0] + + # Ignore empty dimensions. (They should not reduce the size of others.) + dimensions = [d for d in dimensions if not d.is_zero()] + + if dimensions: + # Take the highest minimum dimension. + min_ = max(d.min for d in dimensions) + + # For the maximum, we would prefer not to go larger than then smallest + # 'max' value, unless other dimensions have a bigger preferred value. + # This seems to work best: + # - We don't want that a widget with a small height in a VSplit would + # shrink other widgets in the split. + # If it doesn't work well enough, then it's up to the UI designer to + # explicitly pass dimensions. + max_ = min(d.max for d in dimensions) + max_ = max(max_, max(d.preferred for d in dimensions)) + + # Make sure that min>=max. In some scenarios, when certain min..max + # ranges don't have any overlap, we can end up in such an impossible + # situation. In that case, give priority to the max value. + # E.g. taking (1..5) and (8..9) would return (8..5). Instead take (8..8). + if min_ > max_: + max_ = min_ + + preferred = max(d.preferred for d in dimensions) + + return Dimension(min=min_, max=max_, preferred=preferred) + else: + return Dimension() + + +# Anything that can be converted to a dimension. +AnyDimension = Union[ + None, # None is a valid dimension that will fit anything. + int, + Dimension, + # Callable[[], 'AnyDimension'] # Recursive definition not supported by mypy. + Callable[[], Any], +] + + +def to_dimension(value: AnyDimension) -> Dimension: + """ + Turn the given object into a `Dimension` object. + """ + if value is None: + return Dimension() + if isinstance(value, int): + return Dimension.exact(value) + if isinstance(value, Dimension): + return value + if callable(value): + return to_dimension(value()) + + raise ValueError("Not an integer or Dimension object.") + + +def is_dimension(value: object) -> TypeGuard[AnyDimension]: + """ + Test whether the given value could be a valid dimension. + (For usage in an assertion. It's not guaranteed in case of a callable.) + """ + if value is None: + return True + if callable(value): + return True # Assume it's a callable that doesn't take arguments. + if isinstance(value, (int, Dimension)): + return True + return False + + +# Common alias. +D = Dimension + +# For backward-compatibility. +LayoutDimension = Dimension diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/dummy.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/dummy.py new file mode 100644 index 0000000000000000000000000000000000000000..1ee3e6c9bdd3474a2135522acd8e62b55d2bcfa1 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/dummy.py @@ -0,0 +1,40 @@ +""" +Dummy layout. Used when somebody creates an `Application` without specifying a +`Layout`. +""" + +from __future__ import annotations + +from prompt_toolkit.formatted_text import HTML +from prompt_toolkit.key_binding import KeyBindings +from prompt_toolkit.key_binding.key_processor import KeyPressEvent + +from .containers import Window +from .controls import FormattedTextControl +from .dimension import D +from .layout import Layout + +__all__ = [ + "create_dummy_layout", +] + +E = KeyPressEvent + + +def create_dummy_layout() -> Layout: + """ + Create a dummy layout for use in an 'Application' that doesn't have a + layout specified. When ENTER is pressed, the application quits. + """ + kb = KeyBindings() + + @kb.add("enter") + def enter(event: E) -> None: + event.app.exit() + + control = FormattedTextControl( + HTML("No layout specified. Press ENTER to quit."), + key_bindings=kb, + ) + window = Window(content=control, height=D(min=1)) + return Layout(container=window, focused_element=window) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/layout.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/layout.py new file mode 100644 index 0000000000000000000000000000000000000000..f9b7110925d9e79475bac08415f99d16fd99fd93 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/layout.py @@ -0,0 +1,412 @@ +""" +Wrapper for the layout. +""" + +from __future__ import annotations + +from typing import Generator, Iterable, Union + +from prompt_toolkit.buffer import Buffer + +from .containers import ( + AnyContainer, + ConditionalContainer, + Container, + Window, + to_container, +) +from .controls import BufferControl, SearchBufferControl, UIControl + +__all__ = [ + "Layout", + "InvalidLayoutError", + "walk", +] + +FocusableElement = Union[str, Buffer, UIControl, AnyContainer] + + +class Layout: + """ + The layout for a prompt_toolkit + :class:`~prompt_toolkit.application.Application`. + This also keeps track of which user control is focused. + + :param container: The "root" container for the layout. + :param focused_element: element to be focused initially. (Can be anything + the `focus` function accepts.) + """ + + def __init__( + self, + container: AnyContainer, + focused_element: FocusableElement | None = None, + ) -> None: + self.container = to_container(container) + self._stack: list[Window] = [] + + # Map search BufferControl back to the original BufferControl. + # This is used to keep track of when exactly we are searching, and for + # applying the search. + # When a link exists in this dictionary, that means the search is + # currently active. + # Map: search_buffer_control -> original buffer control. + self.search_links: dict[SearchBufferControl, BufferControl] = {} + + # Mapping that maps the children in the layout to their parent. + # This relationship is calculated dynamically, each time when the UI + # is rendered. (UI elements have only references to their children.) + self._child_to_parent: dict[Container, Container] = {} + + if focused_element is None: + try: + self._stack.append(next(self.find_all_windows())) + except StopIteration as e: + raise InvalidLayoutError( + "Invalid layout. The layout does not contain any Window object." + ) from e + else: + self.focus(focused_element) + + # List of visible windows. + self.visible_windows: list[Window] = [] # List of `Window` objects. + + def __repr__(self) -> str: + return f"Layout({self.container!r}, current_window={self.current_window!r})" + + def find_all_windows(self) -> Generator[Window, None, None]: + """ + Find all the :class:`.UIControl` objects in this layout. + """ + for item in self.walk(): + if isinstance(item, Window): + yield item + + def find_all_controls(self) -> Iterable[UIControl]: + for container in self.find_all_windows(): + yield container.content + + def focus(self, value: FocusableElement) -> None: + """ + Focus the given UI element. + + `value` can be either: + + - a :class:`.UIControl` + - a :class:`.Buffer` instance or the name of a :class:`.Buffer` + - a :class:`.Window` + - Any container object. In this case we will focus the :class:`.Window` + from this container that was focused most recent, or the very first + focusable :class:`.Window` of the container. + """ + # BufferControl by buffer name. + if isinstance(value, str): + for control in self.find_all_controls(): + if isinstance(control, BufferControl) and control.buffer.name == value: + self.focus(control) + return + raise ValueError(f"Couldn't find Buffer in the current layout: {value!r}.") + + # BufferControl by buffer object. + elif isinstance(value, Buffer): + for control in self.find_all_controls(): + if isinstance(control, BufferControl) and control.buffer == value: + self.focus(control) + return + raise ValueError(f"Couldn't find Buffer in the current layout: {value!r}.") + + # Focus UIControl. + elif isinstance(value, UIControl): + if value not in self.find_all_controls(): + raise ValueError( + "Invalid value. Container does not appear in the layout." + ) + if not value.is_focusable(): + raise ValueError("Invalid value. UIControl is not focusable.") + + self.current_control = value + + # Otherwise, expecting any Container object. + else: + value = to_container(value) + + if isinstance(value, Window): + # This is a `Window`: focus that. + if value not in self.find_all_windows(): + raise ValueError( + f"Invalid value. Window does not appear in the layout: {value!r}" + ) + + self.current_window = value + else: + # Focus a window in this container. + # If we have many windows as part of this container, and some + # of them have been focused before, take the last focused + # item. (This is very useful when the UI is composed of more + # complex sub components.) + windows = [] + for c in walk(value, skip_hidden=True): + if isinstance(c, Window) and c.content.is_focusable(): + windows.append(c) + + # Take the first one that was focused before. + for w in reversed(self._stack): + if w in windows: + self.current_window = w + return + + # None was focused before: take the very first focusable window. + if windows: + self.current_window = windows[0] + return + + raise ValueError( + f"Invalid value. Container cannot be focused: {value!r}" + ) + + def has_focus(self, value: FocusableElement) -> bool: + """ + Check whether the given control has the focus. + :param value: :class:`.UIControl` or :class:`.Window` instance. + """ + if isinstance(value, str): + if self.current_buffer is None: + return False + return self.current_buffer.name == value + if isinstance(value, Buffer): + return self.current_buffer == value + if isinstance(value, UIControl): + return self.current_control == value + else: + value = to_container(value) + if isinstance(value, Window): + return self.current_window == value + else: + # Check whether this "container" is focused. This is true if + # one of the elements inside is focused. + for element in walk(value): + if element == self.current_window: + return True + return False + + @property + def current_control(self) -> UIControl: + """ + Get the :class:`.UIControl` to currently has the focus. + """ + return self._stack[-1].content + + @current_control.setter + def current_control(self, control: UIControl) -> None: + """ + Set the :class:`.UIControl` to receive the focus. + """ + for window in self.find_all_windows(): + if window.content == control: + self.current_window = window + return + + raise ValueError("Control not found in the user interface.") + + @property + def current_window(self) -> Window: + "Return the :class:`.Window` object that is currently focused." + return self._stack[-1] + + @current_window.setter + def current_window(self, value: Window) -> None: + "Set the :class:`.Window` object to be currently focused." + self._stack.append(value) + + @property + def is_searching(self) -> bool: + "True if we are searching right now." + return self.current_control in self.search_links + + @property + def search_target_buffer_control(self) -> BufferControl | None: + """ + Return the :class:`.BufferControl` in which we are searching or `None`. + """ + # Not every `UIControl` is a `BufferControl`. This only applies to + # `BufferControl`. + control = self.current_control + + if isinstance(control, SearchBufferControl): + return self.search_links.get(control) + else: + return None + + def get_focusable_windows(self) -> Iterable[Window]: + """ + Return all the :class:`.Window` objects which are focusable (in the + 'modal' area). + """ + for w in self.walk_through_modal_area(): + if isinstance(w, Window) and w.content.is_focusable(): + yield w + + def get_visible_focusable_windows(self) -> list[Window]: + """ + Return a list of :class:`.Window` objects that are focusable. + """ + # focusable windows are windows that are visible, but also part of the + # modal container. Make sure to keep the ordering. + visible_windows = self.visible_windows + return [w for w in self.get_focusable_windows() if w in visible_windows] + + @property + def current_buffer(self) -> Buffer | None: + """ + The currently focused :class:`~.Buffer` or `None`. + """ + ui_control = self.current_control + if isinstance(ui_control, BufferControl): + return ui_control.buffer + return None + + def get_buffer_by_name(self, buffer_name: str) -> Buffer | None: + """ + Look in the layout for a buffer with the given name. + Return `None` when nothing was found. + """ + for w in self.walk(): + if isinstance(w, Window) and isinstance(w.content, BufferControl): + if w.content.buffer.name == buffer_name: + return w.content.buffer + return None + + @property + def buffer_has_focus(self) -> bool: + """ + Return `True` if the currently focused control is a + :class:`.BufferControl`. (For instance, used to determine whether the + default key bindings should be active or not.) + """ + ui_control = self.current_control + return isinstance(ui_control, BufferControl) + + @property + def previous_control(self) -> UIControl: + """ + Get the :class:`.UIControl` to previously had the focus. + """ + try: + return self._stack[-2].content + except IndexError: + return self._stack[-1].content + + def focus_last(self) -> None: + """ + Give the focus to the last focused control. + """ + if len(self._stack) > 1: + self._stack = self._stack[:-1] + + def focus_next(self) -> None: + """ + Focus the next visible/focusable Window. + """ + windows = self.get_visible_focusable_windows() + + if len(windows) > 0: + try: + index = windows.index(self.current_window) + except ValueError: + index = 0 + else: + index = (index + 1) % len(windows) + + self.focus(windows[index]) + + def focus_previous(self) -> None: + """ + Focus the previous visible/focusable Window. + """ + windows = self.get_visible_focusable_windows() + + if len(windows) > 0: + try: + index = windows.index(self.current_window) + except ValueError: + index = 0 + else: + index = (index - 1) % len(windows) + + self.focus(windows[index]) + + def walk(self) -> Iterable[Container]: + """ + Walk through all the layout nodes (and their children) and yield them. + """ + yield from walk(self.container) + + def walk_through_modal_area(self) -> Iterable[Container]: + """ + Walk through all the containers which are in the current 'modal' part + of the layout. + """ + # Go up in the tree, and find the root. (it will be a part of the + # layout, if the focus is in a modal part.) + root: Container = self.current_window + while not root.is_modal() and root in self._child_to_parent: + root = self._child_to_parent[root] + + yield from walk(root) + + def update_parents_relations(self) -> None: + """ + Update child->parent relationships mapping. + """ + parents = {} + + def walk(e: Container) -> None: + for c in e.get_children(): + parents[c] = e + walk(c) + + walk(self.container) + + self._child_to_parent = parents + + def reset(self) -> None: + # Remove all search links when the UI starts. + # (Important, for instance when control-c is been pressed while + # searching. The prompt cancels, but next `run()` call the search + # links are still there.) + self.search_links.clear() + + self.container.reset() + + def get_parent(self, container: Container) -> Container | None: + """ + Return the parent container for the given container, or ``None``, if it + wasn't found. + """ + try: + return self._child_to_parent[container] + except KeyError: + return None + + +class InvalidLayoutError(Exception): + pass + + +def walk(container: Container, skip_hidden: bool = False) -> Iterable[Container]: + """ + Walk through layout, starting at this container. + """ + # When `skip_hidden` is set, don't go into disabled ConditionalContainer containers. + if ( + skip_hidden + and isinstance(container, ConditionalContainer) + and not container.filter() + ): + return + + yield container + + for c in container.get_children(): + # yield from walk(c) + yield from walk(c, skip_hidden=skip_hidden) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/margins.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/margins.py new file mode 100644 index 0000000000000000000000000000000000000000..737a74d29bc2648688679078bc0d53331d4aadec --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/margins.py @@ -0,0 +1,304 @@ +""" +Margin implementations for a :class:`~prompt_toolkit.layout.containers.Window`. +""" + +from __future__ import annotations + +from abc import ABCMeta, abstractmethod +from typing import TYPE_CHECKING, Callable + +from prompt_toolkit.filters import FilterOrBool, to_filter +from prompt_toolkit.formatted_text import ( + StyleAndTextTuples, + fragment_list_to_text, + to_formatted_text, +) +from prompt_toolkit.utils import get_cwidth + +from .controls import UIContent + +if TYPE_CHECKING: + from .containers import WindowRenderInfo + +__all__ = [ + "Margin", + "NumberedMargin", + "ScrollbarMargin", + "ConditionalMargin", + "PromptMargin", +] + + +class Margin(metaclass=ABCMeta): + """ + Base interface for a margin. + """ + + @abstractmethod + def get_width(self, get_ui_content: Callable[[], UIContent]) -> int: + """ + Return the width that this margin is going to consume. + + :param get_ui_content: Callable that asks the user control to create + a :class:`.UIContent` instance. This can be used for instance to + obtain the number of lines. + """ + return 0 + + @abstractmethod + def create_margin( + self, window_render_info: WindowRenderInfo, width: int, height: int + ) -> StyleAndTextTuples: + """ + Creates a margin. + This should return a list of (style_str, text) tuples. + + :param window_render_info: + :class:`~prompt_toolkit.layout.containers.WindowRenderInfo` + instance, generated after rendering and copying the visible part of + the :class:`~prompt_toolkit.layout.controls.UIControl` into the + :class:`~prompt_toolkit.layout.containers.Window`. + :param width: The width that's available for this margin. (As reported + by :meth:`.get_width`.) + :param height: The height that's available for this margin. (The height + of the :class:`~prompt_toolkit.layout.containers.Window`.) + """ + return [] + + +class NumberedMargin(Margin): + """ + Margin that displays the line numbers. + + :param relative: Number relative to the cursor position. Similar to the Vi + 'relativenumber' option. + :param display_tildes: Display tildes after the end of the document, just + like Vi does. + """ + + def __init__( + self, relative: FilterOrBool = False, display_tildes: FilterOrBool = False + ) -> None: + self.relative = to_filter(relative) + self.display_tildes = to_filter(display_tildes) + + def get_width(self, get_ui_content: Callable[[], UIContent]) -> int: + line_count = get_ui_content().line_count + return max(3, len(f"{line_count}") + 1) + + def create_margin( + self, window_render_info: WindowRenderInfo, width: int, height: int + ) -> StyleAndTextTuples: + relative = self.relative() + + style = "class:line-number" + style_current = "class:line-number.current" + + # Get current line number. + current_lineno = window_render_info.ui_content.cursor_position.y + + # Construct margin. + result: StyleAndTextTuples = [] + last_lineno = None + + for y, lineno in enumerate(window_render_info.displayed_lines): + # Only display line number if this line is not a continuation of the previous line. + if lineno != last_lineno: + if lineno is None: + pass + elif lineno == current_lineno: + # Current line. + if relative: + # Left align current number in relative mode. + result.append((style_current, "%i" % (lineno + 1))) + else: + result.append( + (style_current, ("%i " % (lineno + 1)).rjust(width)) + ) + else: + # Other lines. + if relative: + lineno = abs(lineno - current_lineno) - 1 + + result.append((style, ("%i " % (lineno + 1)).rjust(width))) + + last_lineno = lineno + result.append(("", "\n")) + + # Fill with tildes. + if self.display_tildes(): + while y < window_render_info.window_height: + result.append(("class:tilde", "~\n")) + y += 1 + + return result + + +class ConditionalMargin(Margin): + """ + Wrapper around other :class:`.Margin` classes to show/hide them. + """ + + def __init__(self, margin: Margin, filter: FilterOrBool) -> None: + self.margin = margin + self.filter = to_filter(filter) + + def get_width(self, get_ui_content: Callable[[], UIContent]) -> int: + if self.filter(): + return self.margin.get_width(get_ui_content) + else: + return 0 + + def create_margin( + self, window_render_info: WindowRenderInfo, width: int, height: int + ) -> StyleAndTextTuples: + if width and self.filter(): + return self.margin.create_margin(window_render_info, width, height) + else: + return [] + + +class ScrollbarMargin(Margin): + """ + Margin displaying a scrollbar. + + :param display_arrows: Display scroll up/down arrows. + """ + + def __init__( + self, + display_arrows: FilterOrBool = False, + up_arrow_symbol: str = "^", + down_arrow_symbol: str = "v", + ) -> None: + self.display_arrows = to_filter(display_arrows) + self.up_arrow_symbol = up_arrow_symbol + self.down_arrow_symbol = down_arrow_symbol + + def get_width(self, get_ui_content: Callable[[], UIContent]) -> int: + return 1 + + def create_margin( + self, window_render_info: WindowRenderInfo, width: int, height: int + ) -> StyleAndTextTuples: + content_height = window_render_info.content_height + window_height = window_render_info.window_height + display_arrows = self.display_arrows() + + if display_arrows: + window_height -= 2 + + try: + fraction_visible = len(window_render_info.displayed_lines) / float( + content_height + ) + fraction_above = window_render_info.vertical_scroll / float(content_height) + + scrollbar_height = int( + min(window_height, max(1, window_height * fraction_visible)) + ) + scrollbar_top = int(window_height * fraction_above) + except ZeroDivisionError: + return [] + else: + + def is_scroll_button(row: int) -> bool: + "True if we should display a button on this row." + return scrollbar_top <= row <= scrollbar_top + scrollbar_height + + # Up arrow. + result: StyleAndTextTuples = [] + if display_arrows: + result.extend( + [ + ("class:scrollbar.arrow", self.up_arrow_symbol), + ("class:scrollbar", "\n"), + ] + ) + + # Scrollbar body. + scrollbar_background = "class:scrollbar.background" + scrollbar_background_start = "class:scrollbar.background,scrollbar.start" + scrollbar_button = "class:scrollbar.button" + scrollbar_button_end = "class:scrollbar.button,scrollbar.end" + + for i in range(window_height): + if is_scroll_button(i): + if not is_scroll_button(i + 1): + # Give the last cell a different style, because we + # want to underline this. + result.append((scrollbar_button_end, " ")) + else: + result.append((scrollbar_button, " ")) + else: + if is_scroll_button(i + 1): + result.append((scrollbar_background_start, " ")) + else: + result.append((scrollbar_background, " ")) + result.append(("", "\n")) + + # Down arrow + if display_arrows: + result.append(("class:scrollbar.arrow", self.down_arrow_symbol)) + + return result + + +class PromptMargin(Margin): + """ + [Deprecated] + + Create margin that displays a prompt. + This can display one prompt at the first line, and a continuation prompt + (e.g, just dots) on all the following lines. + + This `PromptMargin` implementation has been largely superseded in favor of + the `get_line_prefix` attribute of `Window`. The reason is that a margin is + always a fixed width, while `get_line_prefix` can return a variable width + prefix in front of every line, making it more powerful, especially for line + continuations. + + :param get_prompt: Callable returns formatted text or a list of + `(style_str, type)` tuples to be shown as the prompt at the first line. + :param get_continuation: Callable that takes three inputs. The width (int), + line_number (int), and is_soft_wrap (bool). It should return formatted + text or a list of `(style_str, type)` tuples for the next lines of the + input. + """ + + def __init__( + self, + get_prompt: Callable[[], StyleAndTextTuples], + get_continuation: None + | (Callable[[int, int, bool], StyleAndTextTuples]) = None, + ) -> None: + self.get_prompt = get_prompt + self.get_continuation = get_continuation + + def get_width(self, get_ui_content: Callable[[], UIContent]) -> int: + "Width to report to the `Window`." + # Take the width from the first line. + text = fragment_list_to_text(self.get_prompt()) + return get_cwidth(text) + + def create_margin( + self, window_render_info: WindowRenderInfo, width: int, height: int + ) -> StyleAndTextTuples: + get_continuation = self.get_continuation + result: StyleAndTextTuples = [] + + # First line. + result.extend(to_formatted_text(self.get_prompt())) + + # Next lines. + if get_continuation: + last_y = None + + for y in window_render_info.displayed_lines[1:]: + result.append(("", "\n")) + result.extend( + to_formatted_text(get_continuation(width, y, y == last_y)) + ) + last_y = y + + return result diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/menus.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/menus.py new file mode 100644 index 0000000000000000000000000000000000000000..612e8ab6a31fccc76ca06f8aca01f0a0b742103f --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/menus.py @@ -0,0 +1,748 @@ +from __future__ import annotations + +import math +from itertools import zip_longest +from typing import TYPE_CHECKING, Callable, Iterable, Sequence, TypeVar, cast +from weakref import WeakKeyDictionary + +from prompt_toolkit.application.current import get_app +from prompt_toolkit.buffer import CompletionState +from prompt_toolkit.completion import Completion +from prompt_toolkit.data_structures import Point +from prompt_toolkit.filters import ( + Condition, + FilterOrBool, + has_completions, + is_done, + to_filter, +) +from prompt_toolkit.formatted_text import ( + StyleAndTextTuples, + fragment_list_width, + to_formatted_text, +) +from prompt_toolkit.key_binding.key_processor import KeyPressEvent +from prompt_toolkit.layout.utils import explode_text_fragments +from prompt_toolkit.mouse_events import MouseEvent, MouseEventType +from prompt_toolkit.utils import get_cwidth + +from .containers import ConditionalContainer, HSplit, ScrollOffsets, Window +from .controls import GetLinePrefixCallable, UIContent, UIControl +from .dimension import Dimension +from .margins import ScrollbarMargin + +if TYPE_CHECKING: + from prompt_toolkit.key_binding.key_bindings import ( + KeyBindings, + NotImplementedOrNone, + ) + + +__all__ = [ + "CompletionsMenu", + "MultiColumnCompletionsMenu", +] + +E = KeyPressEvent + + +class CompletionsMenuControl(UIControl): + """ + Helper for drawing the complete menu to the screen. + + :param scroll_offset: Number (integer) representing the preferred amount of + completions to be displayed before and after the current one. When this + is a very high number, the current completion will be shown in the + middle most of the time. + """ + + # Preferred minimum size of the menu control. + # The CompletionsMenu class defines a width of 8, and there is a scrollbar + # of 1.) + MIN_WIDTH = 7 + + def has_focus(self) -> bool: + return False + + def preferred_width(self, max_available_width: int) -> int | None: + complete_state = get_app().current_buffer.complete_state + if complete_state: + menu_width = self._get_menu_width(500, complete_state) + menu_meta_width = self._get_menu_meta_width(500, complete_state) + + return menu_width + menu_meta_width + else: + return 0 + + def preferred_height( + self, + width: int, + max_available_height: int, + wrap_lines: bool, + get_line_prefix: GetLinePrefixCallable | None, + ) -> int | None: + complete_state = get_app().current_buffer.complete_state + if complete_state: + return len(complete_state.completions) + else: + return 0 + + def create_content(self, width: int, height: int) -> UIContent: + """ + Create a UIContent object for this control. + """ + complete_state = get_app().current_buffer.complete_state + if complete_state: + completions = complete_state.completions + index = complete_state.complete_index # Can be None! + + # Calculate width of completions menu. + menu_width = self._get_menu_width(width, complete_state) + menu_meta_width = self._get_menu_meta_width( + width - menu_width, complete_state + ) + show_meta = self._show_meta(complete_state) + + def get_line(i: int) -> StyleAndTextTuples: + c = completions[i] + is_current_completion = i == index + result = _get_menu_item_fragments( + c, is_current_completion, menu_width, space_after=True + ) + + if show_meta: + result += self._get_menu_item_meta_fragments( + c, is_current_completion, menu_meta_width + ) + return result + + return UIContent( + get_line=get_line, + cursor_position=Point(x=0, y=index or 0), + line_count=len(completions), + ) + + return UIContent() + + def _show_meta(self, complete_state: CompletionState) -> bool: + """ + Return ``True`` if we need to show a column with meta information. + """ + return any(c.display_meta_text for c in complete_state.completions) + + def _get_menu_width(self, max_width: int, complete_state: CompletionState) -> int: + """ + Return the width of the main column. + """ + return min( + max_width, + max( + self.MIN_WIDTH, + max(get_cwidth(c.display_text) for c in complete_state.completions) + 2, + ), + ) + + def _get_menu_meta_width( + self, max_width: int, complete_state: CompletionState + ) -> int: + """ + Return the width of the meta column. + """ + + def meta_width(completion: Completion) -> int: + return get_cwidth(completion.display_meta_text) + + if self._show_meta(complete_state): + # If the amount of completions is over 200, compute the width based + # on the first 200 completions, otherwise this can be very slow. + completions = complete_state.completions + if len(completions) > 200: + completions = completions[:200] + + return min(max_width, max(meta_width(c) for c in completions) + 2) + else: + return 0 + + def _get_menu_item_meta_fragments( + self, completion: Completion, is_current_completion: bool, width: int + ) -> StyleAndTextTuples: + if is_current_completion: + style_str = "class:completion-menu.meta.completion.current" + else: + style_str = "class:completion-menu.meta.completion" + + text, tw = _trim_formatted_text(completion.display_meta, width - 2) + padding = " " * (width - 1 - tw) + + return to_formatted_text( + cast(StyleAndTextTuples, []) + [("", " ")] + text + [("", padding)], + style=style_str, + ) + + def mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone: + """ + Handle mouse events: clicking and scrolling. + """ + b = get_app().current_buffer + + if mouse_event.event_type == MouseEventType.MOUSE_UP: + # Select completion. + b.go_to_completion(mouse_event.position.y) + b.complete_state = None + + elif mouse_event.event_type == MouseEventType.SCROLL_DOWN: + # Scroll up. + b.complete_next(count=3, disable_wrap_around=True) + + elif mouse_event.event_type == MouseEventType.SCROLL_UP: + # Scroll down. + b.complete_previous(count=3, disable_wrap_around=True) + + return None + + +def _get_menu_item_fragments( + completion: Completion, + is_current_completion: bool, + width: int, + space_after: bool = False, +) -> StyleAndTextTuples: + """ + Get the style/text tuples for a menu item, styled and trimmed to the given + width. + """ + if is_current_completion: + style_str = f"class:completion-menu.completion.current {completion.style} {completion.selected_style}" + else: + style_str = "class:completion-menu.completion " + completion.style + + text, tw = _trim_formatted_text( + completion.display, (width - 2 if space_after else width - 1) + ) + + padding = " " * (width - 1 - tw) + + return to_formatted_text( + cast(StyleAndTextTuples, []) + [("", " ")] + text + [("", padding)], + style=style_str, + ) + + +def _trim_formatted_text( + formatted_text: StyleAndTextTuples, max_width: int +) -> tuple[StyleAndTextTuples, int]: + """ + Trim the text to `max_width`, append dots when the text is too long. + Returns (text, width) tuple. + """ + width = fragment_list_width(formatted_text) + + # When the text is too wide, trim it. + if width > max_width: + result = [] # Text fragments. + remaining_width = max_width - 3 + + for style_and_ch in explode_text_fragments(formatted_text): + ch_width = get_cwidth(style_and_ch[1]) + + if ch_width <= remaining_width: + result.append(style_and_ch) + remaining_width -= ch_width + else: + break + + result.append(("", "...")) + + return result, max_width - remaining_width + else: + return formatted_text, width + + +class CompletionsMenu(ConditionalContainer): + # NOTE: We use a pretty big z_index by default. Menus are supposed to be + # above anything else. We also want to make sure that the content is + # visible at the point where we draw this menu. + def __init__( + self, + max_height: int | None = None, + scroll_offset: int | Callable[[], int] = 0, + extra_filter: FilterOrBool = True, + display_arrows: FilterOrBool = False, + z_index: int = 10**8, + ) -> None: + extra_filter = to_filter(extra_filter) + display_arrows = to_filter(display_arrows) + + super().__init__( + content=Window( + content=CompletionsMenuControl(), + width=Dimension(min=8), + height=Dimension(min=1, max=max_height), + scroll_offsets=ScrollOffsets(top=scroll_offset, bottom=scroll_offset), + right_margins=[ScrollbarMargin(display_arrows=display_arrows)], + dont_extend_width=True, + style="class:completion-menu", + z_index=z_index, + ), + # Show when there are completions but not at the point we are + # returning the input. + filter=extra_filter & has_completions & ~is_done, + ) + + +class MultiColumnCompletionMenuControl(UIControl): + """ + Completion menu that displays all the completions in several columns. + When there are more completions than space for them to be displayed, an + arrow is shown on the left or right side. + + `min_rows` indicates how many rows will be available in any possible case. + When this is larger than one, it will try to use less columns and more + rows until this value is reached. + Be careful passing in a too big value, if less than the given amount of + rows are available, more columns would have been required, but + `preferred_width` doesn't know about that and reports a too small value. + This results in less completions displayed and additional scrolling. + (It's a limitation of how the layout engine currently works: first the + widths are calculated, then the heights.) + + :param suggested_max_column_width: The suggested max width of a column. + The column can still be bigger than this, but if there is place for two + columns of this width, we will display two columns. This to avoid that + if there is one very wide completion, that it doesn't significantly + reduce the amount of columns. + """ + + _required_margin = 3 # One extra padding on the right + space for arrows. + + def __init__(self, min_rows: int = 3, suggested_max_column_width: int = 30) -> None: + assert min_rows >= 1 + + self.min_rows = min_rows + self.suggested_max_column_width = suggested_max_column_width + self.scroll = 0 + + # Cache for column width computations. This computation is not cheap, + # so we don't want to do it over and over again while the user + # navigates through the completions. + # (map `completion_state` to `(completion_count, width)`. We remember + # the count, because a completer can add new completions to the + # `CompletionState` while loading.) + self._column_width_for_completion_state: WeakKeyDictionary[ + CompletionState, tuple[int, int] + ] = WeakKeyDictionary() + + # Info of last rendering. + self._rendered_rows = 0 + self._rendered_columns = 0 + self._total_columns = 0 + self._render_pos_to_completion: dict[tuple[int, int], Completion] = {} + self._render_left_arrow = False + self._render_right_arrow = False + self._render_width = 0 + + def reset(self) -> None: + self.scroll = 0 + + def has_focus(self) -> bool: + return False + + def preferred_width(self, max_available_width: int) -> int | None: + """ + Preferred width: prefer to use at least min_rows, but otherwise as much + as possible horizontally. + """ + complete_state = get_app().current_buffer.complete_state + if complete_state is None: + return 0 + + column_width = self._get_column_width(complete_state) + result = int( + column_width + * math.ceil(len(complete_state.completions) / float(self.min_rows)) + ) + + # When the desired width is still more than the maximum available, + # reduce by removing columns until we are less than the available + # width. + while ( + result > column_width + and result > max_available_width - self._required_margin + ): + result -= column_width + return result + self._required_margin + + def preferred_height( + self, + width: int, + max_available_height: int, + wrap_lines: bool, + get_line_prefix: GetLinePrefixCallable | None, + ) -> int | None: + """ + Preferred height: as much as needed in order to display all the completions. + """ + complete_state = get_app().current_buffer.complete_state + if complete_state is None: + return 0 + + column_width = self._get_column_width(complete_state) + column_count = max(1, (width - self._required_margin) // column_width) + + return int(math.ceil(len(complete_state.completions) / float(column_count))) + + def create_content(self, width: int, height: int) -> UIContent: + """ + Create a UIContent object for this menu. + """ + complete_state = get_app().current_buffer.complete_state + if complete_state is None: + return UIContent() + + column_width = self._get_column_width(complete_state) + self._render_pos_to_completion = {} + + _T = TypeVar("_T") + + def grouper( + n: int, iterable: Iterable[_T], fillvalue: _T | None = None + ) -> Iterable[Sequence[_T | None]]: + "grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx" + args = [iter(iterable)] * n + return zip_longest(fillvalue=fillvalue, *args) + + def is_current_completion(completion: Completion) -> bool: + "Returns True when this completion is the currently selected one." + return ( + complete_state is not None + and complete_state.complete_index is not None + and c == complete_state.current_completion + ) + + # Space required outside of the regular columns, for displaying the + # left and right arrow. + HORIZONTAL_MARGIN_REQUIRED = 3 + + # There should be at least one column, but it cannot be wider than + # the available width. + column_width = min(width - HORIZONTAL_MARGIN_REQUIRED, column_width) + + # However, when the columns tend to be very wide, because there are + # some very wide entries, shrink it anyway. + if column_width > self.suggested_max_column_width: + # `column_width` can still be bigger that `suggested_max_column_width`, + # but if there is place for two columns, we divide by two. + column_width //= column_width // self.suggested_max_column_width + + visible_columns = max(1, (width - self._required_margin) // column_width) + + columns_ = list(grouper(height, complete_state.completions)) + rows_ = list(zip(*columns_)) + + # Make sure the current completion is always visible: update scroll offset. + selected_column = (complete_state.complete_index or 0) // height + self.scroll = min( + selected_column, max(self.scroll, selected_column - visible_columns + 1) + ) + + render_left_arrow = self.scroll > 0 + render_right_arrow = self.scroll < len(rows_[0]) - visible_columns + + # Write completions to screen. + fragments_for_line = [] + + for row_index, row in enumerate(rows_): + fragments: StyleAndTextTuples = [] + middle_row = row_index == len(rows_) // 2 + + # Draw left arrow if we have hidden completions on the left. + if render_left_arrow: + fragments.append(("class:scrollbar", "<" if middle_row else " ")) + elif render_right_arrow: + # Reserve one column empty space. (If there is a right + # arrow right now, there can be a left arrow as well.) + fragments.append(("", " ")) + + # Draw row content. + for column_index, c in enumerate(row[self.scroll :][:visible_columns]): + if c is not None: + fragments += _get_menu_item_fragments( + c, is_current_completion(c), column_width, space_after=False + ) + + # Remember render position for mouse click handler. + for x in range(column_width): + self._render_pos_to_completion[ + (column_index * column_width + x, row_index) + ] = c + else: + fragments.append(("class:completion", " " * column_width)) + + # Draw trailing padding for this row. + # (_get_menu_item_fragments only returns padding on the left.) + if render_left_arrow or render_right_arrow: + fragments.append(("class:completion", " ")) + + # Draw right arrow if we have hidden completions on the right. + if render_right_arrow: + fragments.append(("class:scrollbar", ">" if middle_row else " ")) + elif render_left_arrow: + fragments.append(("class:completion", " ")) + + # Add line. + fragments_for_line.append( + to_formatted_text(fragments, style="class:completion-menu") + ) + + self._rendered_rows = height + self._rendered_columns = visible_columns + self._total_columns = len(columns_) + self._render_left_arrow = render_left_arrow + self._render_right_arrow = render_right_arrow + self._render_width = ( + column_width * visible_columns + render_left_arrow + render_right_arrow + 1 + ) + + def get_line(i: int) -> StyleAndTextTuples: + return fragments_for_line[i] + + return UIContent(get_line=get_line, line_count=len(rows_)) + + def _get_column_width(self, completion_state: CompletionState) -> int: + """ + Return the width of each column. + """ + try: + count, width = self._column_width_for_completion_state[completion_state] + if count != len(completion_state.completions): + # Number of completions changed, recompute. + raise KeyError + return width + except KeyError: + result = ( + max(get_cwidth(c.display_text) for c in completion_state.completions) + + 1 + ) + self._column_width_for_completion_state[completion_state] = ( + len(completion_state.completions), + result, + ) + return result + + def mouse_handler(self, mouse_event: MouseEvent) -> NotImplementedOrNone: + """ + Handle scroll and click events. + """ + b = get_app().current_buffer + + def scroll_left() -> None: + b.complete_previous(count=self._rendered_rows, disable_wrap_around=True) + self.scroll = max(0, self.scroll - 1) + + def scroll_right() -> None: + b.complete_next(count=self._rendered_rows, disable_wrap_around=True) + self.scroll = min( + self._total_columns - self._rendered_columns, self.scroll + 1 + ) + + if mouse_event.event_type == MouseEventType.SCROLL_DOWN: + scroll_right() + + elif mouse_event.event_type == MouseEventType.SCROLL_UP: + scroll_left() + + elif mouse_event.event_type == MouseEventType.MOUSE_UP: + x = mouse_event.position.x + y = mouse_event.position.y + + # Mouse click on left arrow. + if x == 0: + if self._render_left_arrow: + scroll_left() + + # Mouse click on right arrow. + elif x == self._render_width - 1: + if self._render_right_arrow: + scroll_right() + + # Mouse click on completion. + else: + completion = self._render_pos_to_completion.get((x, y)) + if completion: + b.apply_completion(completion) + + return None + + def get_key_bindings(self) -> KeyBindings: + """ + Expose key bindings that handle the left/right arrow keys when the menu + is displayed. + """ + from prompt_toolkit.key_binding.key_bindings import KeyBindings + + kb = KeyBindings() + + @Condition + def filter() -> bool: + "Only handle key bindings if this menu is visible." + app = get_app() + complete_state = app.current_buffer.complete_state + + # There need to be completions, and one needs to be selected. + if complete_state is None or complete_state.complete_index is None: + return False + + # This menu needs to be visible. + return any(window.content == self for window in app.layout.visible_windows) + + def move(right: bool = False) -> None: + buff = get_app().current_buffer + complete_state = buff.complete_state + + if complete_state is not None and complete_state.complete_index is not None: + # Calculate new complete index. + new_index = complete_state.complete_index + if right: + new_index += self._rendered_rows + else: + new_index -= self._rendered_rows + + if 0 <= new_index < len(complete_state.completions): + buff.go_to_completion(new_index) + + # NOTE: the is_global is required because the completion menu will + # never be focussed. + + @kb.add("left", is_global=True, filter=filter) + def _left(event: E) -> None: + move() + + @kb.add("right", is_global=True, filter=filter) + def _right(event: E) -> None: + move(True) + + return kb + + +class MultiColumnCompletionsMenu(HSplit): + """ + Container that displays the completions in several columns. + When `show_meta` (a :class:`~prompt_toolkit.filters.Filter`) evaluates + to True, it shows the meta information at the bottom. + """ + + def __init__( + self, + min_rows: int = 3, + suggested_max_column_width: int = 30, + show_meta: FilterOrBool = True, + extra_filter: FilterOrBool = True, + z_index: int = 10**8, + ) -> None: + show_meta = to_filter(show_meta) + extra_filter = to_filter(extra_filter) + + # Display filter: show when there are completions but not at the point + # we are returning the input. + full_filter = extra_filter & has_completions & ~is_done + + @Condition + def any_completion_has_meta() -> bool: + complete_state = get_app().current_buffer.complete_state + return complete_state is not None and any( + c.display_meta for c in complete_state.completions + ) + + # Create child windows. + # NOTE: We don't set style='class:completion-menu' to the + # `MultiColumnCompletionMenuControl`, because this is used in a + # Float that is made transparent, and the size of the control + # doesn't always correspond exactly with the size of the + # generated content. + completions_window = ConditionalContainer( + content=Window( + content=MultiColumnCompletionMenuControl( + min_rows=min_rows, + suggested_max_column_width=suggested_max_column_width, + ), + width=Dimension(min=8), + height=Dimension(min=1), + ), + filter=full_filter, + ) + + meta_window = ConditionalContainer( + content=Window(content=_SelectedCompletionMetaControl()), + filter=full_filter & show_meta & any_completion_has_meta, + ) + + # Initialize split. + super().__init__([completions_window, meta_window], z_index=z_index) + + +class _SelectedCompletionMetaControl(UIControl): + """ + Control that shows the meta information of the selected completion. + """ + + def preferred_width(self, max_available_width: int) -> int | None: + """ + Report the width of the longest meta text as the preferred width of this control. + + It could be that we use less width, but this way, we're sure that the + layout doesn't change when we select another completion (E.g. that + completions are suddenly shown in more or fewer columns.) + """ + app = get_app() + if app.current_buffer.complete_state: + state = app.current_buffer.complete_state + + if len(state.completions) >= 30: + # When there are many completions, calling `get_cwidth` for + # every `display_meta_text` is too expensive. In this case, + # just return the max available width. There will be enough + # columns anyway so that the whole screen is filled with + # completions and `create_content` will then take up as much + # space as needed. + return max_available_width + + return 2 + max( + get_cwidth(c.display_meta_text) for c in state.completions[:100] + ) + else: + return 0 + + def preferred_height( + self, + width: int, + max_available_height: int, + wrap_lines: bool, + get_line_prefix: GetLinePrefixCallable | None, + ) -> int | None: + return 1 + + def create_content(self, width: int, height: int) -> UIContent: + fragments = self._get_text_fragments() + + def get_line(i: int) -> StyleAndTextTuples: + return fragments + + return UIContent(get_line=get_line, line_count=1 if fragments else 0) + + def _get_text_fragments(self) -> StyleAndTextTuples: + style = "class:completion-menu.multi-column-meta" + state = get_app().current_buffer.complete_state + + if ( + state + and state.current_completion + and state.current_completion.display_meta_text + ): + return to_formatted_text( + cast(StyleAndTextTuples, [("", " ")]) + + state.current_completion.display_meta + + [("", " ")], + style=style, + ) + + return [] diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/mouse_handlers.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/mouse_handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..52deac14560168f53d11d0af39dc1d1f93fb3724 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/mouse_handlers.py @@ -0,0 +1,56 @@ +from __future__ import annotations + +from collections import defaultdict +from typing import TYPE_CHECKING, Callable + +from prompt_toolkit.mouse_events import MouseEvent + +if TYPE_CHECKING: + from prompt_toolkit.key_binding.key_bindings import NotImplementedOrNone + +__all__ = [ + "MouseHandler", + "MouseHandlers", +] + + +MouseHandler = Callable[[MouseEvent], "NotImplementedOrNone"] + + +class MouseHandlers: + """ + Two dimensional raster of callbacks for mouse events. + """ + + def __init__(self) -> None: + def dummy_callback(mouse_event: MouseEvent) -> NotImplementedOrNone: + """ + :param mouse_event: `MouseEvent` instance. + """ + return NotImplemented + + # NOTE: Previously, the data structure was a dictionary mapping (x,y) + # to the handlers. This however would be more inefficient when copying + # over the mouse handlers of the visible region in the scrollable pane. + + # Map y (row) to x (column) to handlers. + self.mouse_handlers: defaultdict[int, defaultdict[int, MouseHandler]] = ( + defaultdict(lambda: defaultdict(lambda: dummy_callback)) + ) + + def set_mouse_handler_for_range( + self, + x_min: int, + x_max: int, + y_min: int, + y_max: int, + handler: Callable[[MouseEvent], NotImplementedOrNone], + ) -> None: + """ + Set mouse handler for a region. + """ + for y in range(y_min, y_max): + row = self.mouse_handlers[y] + + for x in range(x_min, x_max): + row[x] = handler diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/processors.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/processors.py new file mode 100644 index 0000000000000000000000000000000000000000..666e79c66da174152a616660d3b4976cdbc18fec --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/processors.py @@ -0,0 +1,1016 @@ +""" +Processors are little transformation blocks that transform the fragments list +from a buffer before the BufferControl will render it to the screen. + +They can insert fragments before or after, or highlight fragments by replacing the +fragment types. +""" + +from __future__ import annotations + +import re +from abc import ABCMeta, abstractmethod +from typing import TYPE_CHECKING, Callable, Hashable, cast + +from prompt_toolkit.application.current import get_app +from prompt_toolkit.cache import SimpleCache +from prompt_toolkit.document import Document +from prompt_toolkit.filters import FilterOrBool, to_filter, vi_insert_multiple_mode +from prompt_toolkit.formatted_text import ( + AnyFormattedText, + StyleAndTextTuples, + to_formatted_text, +) +from prompt_toolkit.formatted_text.utils import fragment_list_len, fragment_list_to_text +from prompt_toolkit.search import SearchDirection +from prompt_toolkit.utils import to_int, to_str + +from .utils import explode_text_fragments + +if TYPE_CHECKING: + from .controls import BufferControl, UIContent + +__all__ = [ + "Processor", + "TransformationInput", + "Transformation", + "DummyProcessor", + "HighlightSearchProcessor", + "HighlightIncrementalSearchProcessor", + "HighlightSelectionProcessor", + "PasswordProcessor", + "HighlightMatchingBracketProcessor", + "DisplayMultipleCursors", + "BeforeInput", + "ShowArg", + "AfterInput", + "AppendAutoSuggestion", + "ConditionalProcessor", + "ShowLeadingWhiteSpaceProcessor", + "ShowTrailingWhiteSpaceProcessor", + "TabsProcessor", + "ReverseSearchProcessor", + "DynamicProcessor", + "merge_processors", +] + + +class Processor(metaclass=ABCMeta): + """ + Manipulate the fragments for a given line in a + :class:`~prompt_toolkit.layout.controls.BufferControl`. + """ + + @abstractmethod + def apply_transformation( + self, transformation_input: TransformationInput + ) -> Transformation: + """ + Apply transformation. Returns a :class:`.Transformation` instance. + + :param transformation_input: :class:`.TransformationInput` object. + """ + return Transformation(transformation_input.fragments) + + +SourceToDisplay = Callable[[int], int] +DisplayToSource = Callable[[int], int] + + +class TransformationInput: + """ + :param buffer_control: :class:`.BufferControl` instance. + :param lineno: The number of the line to which we apply the processor. + :param source_to_display: A function that returns the position in the + `fragments` for any position in the source string. (This takes + previous processors into account.) + :param fragments: List of fragments that we can transform. (Received from the + previous processor.) + :param get_line: Optional ; a callable that returns the fragments of another + line in the current buffer; This can be used to create processors capable + of affecting transforms across multiple lines. + """ + + def __init__( + self, + buffer_control: BufferControl, + document: Document, + lineno: int, + source_to_display: SourceToDisplay, + fragments: StyleAndTextTuples, + width: int, + height: int, + get_line: Callable[[int], StyleAndTextTuples] | None = None, + ) -> None: + self.buffer_control = buffer_control + self.document = document + self.lineno = lineno + self.source_to_display = source_to_display + self.fragments = fragments + self.width = width + self.height = height + self.get_line = get_line + + def unpack( + self, + ) -> tuple[ + BufferControl, Document, int, SourceToDisplay, StyleAndTextTuples, int, int + ]: + return ( + self.buffer_control, + self.document, + self.lineno, + self.source_to_display, + self.fragments, + self.width, + self.height, + ) + + +class Transformation: + """ + Transformation result, as returned by :meth:`.Processor.apply_transformation`. + + Important: Always make sure that the length of `document.text` is equal to + the length of all the text in `fragments`! + + :param fragments: The transformed fragments. To be displayed, or to pass to + the next processor. + :param source_to_display: Cursor position transformation from original + string to transformed string. + :param display_to_source: Cursor position transformed from source string to + original string. + """ + + def __init__( + self, + fragments: StyleAndTextTuples, + source_to_display: SourceToDisplay | None = None, + display_to_source: DisplayToSource | None = None, + ) -> None: + self.fragments = fragments + self.source_to_display = source_to_display or (lambda i: i) + self.display_to_source = display_to_source or (lambda i: i) + + +class DummyProcessor(Processor): + """ + A `Processor` that doesn't do anything. + """ + + def apply_transformation( + self, transformation_input: TransformationInput + ) -> Transformation: + return Transformation(transformation_input.fragments) + + +class HighlightSearchProcessor(Processor): + """ + Processor that highlights search matches in the document. + Note that this doesn't support multiline search matches yet. + + The style classes 'search' and 'search.current' will be applied to the + content. + """ + + _classname = "search" + _classname_current = "search.current" + + def _get_search_text(self, buffer_control: BufferControl) -> str: + """ + The text we are searching for. + """ + return buffer_control.search_state.text + + def apply_transformation( + self, transformation_input: TransformationInput + ) -> Transformation: + ( + buffer_control, + document, + lineno, + source_to_display, + fragments, + _, + _, + ) = transformation_input.unpack() + + search_text = self._get_search_text(buffer_control) + searchmatch_fragment = f" class:{self._classname} " + searchmatch_current_fragment = f" class:{self._classname_current} " + + if search_text and not get_app().is_done: + # For each search match, replace the style string. + line_text = fragment_list_to_text(fragments) + fragments = explode_text_fragments(fragments) + + if buffer_control.search_state.ignore_case(): + flags = re.IGNORECASE + else: + flags = re.RegexFlag(0) + + # Get cursor column. + cursor_column: int | None + if document.cursor_position_row == lineno: + cursor_column = source_to_display(document.cursor_position_col) + else: + cursor_column = None + + for match in re.finditer(re.escape(search_text), line_text, flags=flags): + if cursor_column is not None: + on_cursor = match.start() <= cursor_column < match.end() + else: + on_cursor = False + + for i in range(match.start(), match.end()): + old_fragment, text, *_ = fragments[i] + if on_cursor: + fragments[i] = ( + old_fragment + searchmatch_current_fragment, + fragments[i][1], + ) + else: + fragments[i] = ( + old_fragment + searchmatch_fragment, + fragments[i][1], + ) + + return Transformation(fragments) + + +class HighlightIncrementalSearchProcessor(HighlightSearchProcessor): + """ + Highlight the search terms that are used for highlighting the incremental + search. The style class 'incsearch' will be applied to the content. + + Important: this requires the `preview_search=True` flag to be set for the + `BufferControl`. Otherwise, the cursor position won't be set to the search + match while searching, and nothing happens. + """ + + _classname = "incsearch" + _classname_current = "incsearch.current" + + def _get_search_text(self, buffer_control: BufferControl) -> str: + """ + The text we are searching for. + """ + # When the search buffer has focus, take that text. + search_buffer = buffer_control.search_buffer + if search_buffer is not None and search_buffer.text: + return search_buffer.text + return "" + + +class HighlightSelectionProcessor(Processor): + """ + Processor that highlights the selection in the document. + """ + + def apply_transformation( + self, transformation_input: TransformationInput + ) -> Transformation: + ( + buffer_control, + document, + lineno, + source_to_display, + fragments, + _, + _, + ) = transformation_input.unpack() + + selected_fragment = " class:selected " + + # In case of selection, highlight all matches. + selection_at_line = document.selection_range_at_line(lineno) + + if selection_at_line: + from_, to = selection_at_line + from_ = source_to_display(from_) + to = source_to_display(to) + + fragments = explode_text_fragments(fragments) + + if from_ == 0 and to == 0 and len(fragments) == 0: + # When this is an empty line, insert a space in order to + # visualize the selection. + return Transformation([(selected_fragment, " ")]) + else: + for i in range(from_, to): + if i < len(fragments): + old_fragment, old_text, *_ = fragments[i] + fragments[i] = (old_fragment + selected_fragment, old_text) + elif i == len(fragments): + fragments.append((selected_fragment, " ")) + + return Transformation(fragments) + + +class PasswordProcessor(Processor): + """ + Processor that masks the input. (For passwords.) + + :param char: (string) Character to be used. "*" by default. + """ + + def __init__(self, char: str = "*") -> None: + self.char = char + + def apply_transformation(self, ti: TransformationInput) -> Transformation: + fragments: StyleAndTextTuples = cast( + StyleAndTextTuples, + [ + (style, self.char * len(text), *handler) + for style, text, *handler in ti.fragments + ], + ) + + return Transformation(fragments) + + +class HighlightMatchingBracketProcessor(Processor): + """ + When the cursor is on or right after a bracket, it highlights the matching + bracket. + + :param max_cursor_distance: Only highlight matching brackets when the + cursor is within this distance. (From inside a `Processor`, we can't + know which lines will be visible on the screen. But we also don't want + to scan the whole document for matching brackets on each key press, so + we limit to this value.) + """ + + _closing_braces = "])}>" + + def __init__( + self, chars: str = "[](){}<>", max_cursor_distance: int = 1000 + ) -> None: + self.chars = chars + self.max_cursor_distance = max_cursor_distance + + self._positions_cache: SimpleCache[Hashable, list[tuple[int, int]]] = ( + SimpleCache(maxsize=8) + ) + + def _get_positions_to_highlight(self, document: Document) -> list[tuple[int, int]]: + """ + Return a list of (row, col) tuples that need to be highlighted. + """ + pos: int | None + + # Try for the character under the cursor. + if document.current_char and document.current_char in self.chars: + pos = document.find_matching_bracket_position( + start_pos=document.cursor_position - self.max_cursor_distance, + end_pos=document.cursor_position + self.max_cursor_distance, + ) + + # Try for the character before the cursor. + elif ( + document.char_before_cursor + and document.char_before_cursor in self._closing_braces + and document.char_before_cursor in self.chars + ): + document = Document(document.text, document.cursor_position - 1) + + pos = document.find_matching_bracket_position( + start_pos=document.cursor_position - self.max_cursor_distance, + end_pos=document.cursor_position + self.max_cursor_distance, + ) + else: + pos = None + + # Return a list of (row, col) tuples that need to be highlighted. + if pos: + pos += document.cursor_position # pos is relative. + row, col = document.translate_index_to_position(pos) + return [ + (row, col), + (document.cursor_position_row, document.cursor_position_col), + ] + else: + return [] + + def apply_transformation( + self, transformation_input: TransformationInput + ) -> Transformation: + ( + buffer_control, + document, + lineno, + source_to_display, + fragments, + _, + _, + ) = transformation_input.unpack() + + # When the application is in the 'done' state, don't highlight. + if get_app().is_done: + return Transformation(fragments) + + # Get the highlight positions. + key = (get_app().render_counter, document.text, document.cursor_position) + positions = self._positions_cache.get( + key, lambda: self._get_positions_to_highlight(document) + ) + + # Apply if positions were found at this line. + if positions: + for row, col in positions: + if row == lineno: + col = source_to_display(col) + fragments = explode_text_fragments(fragments) + style, text, *_ = fragments[col] + + if col == document.cursor_position_col: + style += " class:matching-bracket.cursor " + else: + style += " class:matching-bracket.other " + + fragments[col] = (style, text) + + return Transformation(fragments) + + +class DisplayMultipleCursors(Processor): + """ + When we're in Vi block insert mode, display all the cursors. + """ + + def apply_transformation( + self, transformation_input: TransformationInput + ) -> Transformation: + ( + buffer_control, + document, + lineno, + source_to_display, + fragments, + _, + _, + ) = transformation_input.unpack() + + buff = buffer_control.buffer + + if vi_insert_multiple_mode(): + cursor_positions = buff.multiple_cursor_positions + fragments = explode_text_fragments(fragments) + + # If any cursor appears on the current line, highlight that. + start_pos = document.translate_row_col_to_index(lineno, 0) + end_pos = start_pos + len(document.lines[lineno]) + + fragment_suffix = " class:multiple-cursors" + + for p in cursor_positions: + if start_pos <= p <= end_pos: + column = source_to_display(p - start_pos) + + # Replace fragment. + try: + style, text, *_ = fragments[column] + except IndexError: + # Cursor needs to be displayed after the current text. + fragments.append((fragment_suffix, " ")) + else: + style += fragment_suffix + fragments[column] = (style, text) + + return Transformation(fragments) + else: + return Transformation(fragments) + + +class BeforeInput(Processor): + """ + Insert text before the input. + + :param text: This can be either plain text or formatted text + (or a callable that returns any of those). + :param style: style to be applied to this prompt/prefix. + """ + + def __init__(self, text: AnyFormattedText, style: str = "") -> None: + self.text = text + self.style = style + + def apply_transformation(self, ti: TransformationInput) -> Transformation: + source_to_display: SourceToDisplay | None + display_to_source: DisplayToSource | None + + if ti.lineno == 0: + # Get fragments. + fragments_before = to_formatted_text(self.text, self.style) + fragments = fragments_before + ti.fragments + + shift_position = fragment_list_len(fragments_before) + source_to_display = lambda i: i + shift_position + display_to_source = lambda i: i - shift_position + else: + fragments = ti.fragments + source_to_display = None + display_to_source = None + + return Transformation( + fragments, + source_to_display=source_to_display, + display_to_source=display_to_source, + ) + + def __repr__(self) -> str: + return f"BeforeInput({self.text!r}, {self.style!r})" + + +class ShowArg(BeforeInput): + """ + Display the 'arg' in front of the input. + + This was used by the `PromptSession`, but now it uses the + `Window.get_line_prefix` function instead. + """ + + def __init__(self) -> None: + super().__init__(self._get_text_fragments) + + def _get_text_fragments(self) -> StyleAndTextTuples: + app = get_app() + if app.key_processor.arg is None: + return [] + else: + arg = app.key_processor.arg + + return [ + ("class:prompt.arg", "(arg: "), + ("class:prompt.arg.text", str(arg)), + ("class:prompt.arg", ") "), + ] + + def __repr__(self) -> str: + return "ShowArg()" + + +class AfterInput(Processor): + """ + Insert text after the input. + + :param text: This can be either plain text or formatted text + (or a callable that returns any of those). + :param style: style to be applied to this prompt/prefix. + """ + + def __init__(self, text: AnyFormattedText, style: str = "") -> None: + self.text = text + self.style = style + + def apply_transformation(self, ti: TransformationInput) -> Transformation: + # Insert fragments after the last line. + if ti.lineno == ti.document.line_count - 1: + # Get fragments. + fragments_after = to_formatted_text(self.text, self.style) + return Transformation(fragments=ti.fragments + fragments_after) + else: + return Transformation(fragments=ti.fragments) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.text!r}, style={self.style!r})" + + +class AppendAutoSuggestion(Processor): + """ + Append the auto suggestion to the input. + (The user can then press the right arrow the insert the suggestion.) + """ + + def __init__(self, style: str = "class:auto-suggestion") -> None: + self.style = style + + def apply_transformation(self, ti: TransformationInput) -> Transformation: + # Insert fragments after the last line. + if ti.lineno == ti.document.line_count - 1: + buffer = ti.buffer_control.buffer + + if buffer.suggestion and ti.document.is_cursor_at_the_end: + suggestion = buffer.suggestion.text + else: + suggestion = "" + + return Transformation(fragments=ti.fragments + [(self.style, suggestion)]) + else: + return Transformation(fragments=ti.fragments) + + +class ShowLeadingWhiteSpaceProcessor(Processor): + """ + Make leading whitespace visible. + + :param get_char: Callable that returns one character. + """ + + def __init__( + self, + get_char: Callable[[], str] | None = None, + style: str = "class:leading-whitespace", + ) -> None: + def default_get_char() -> str: + if "\xb7".encode(get_app().output.encoding(), "replace") == b"?": + return "." + else: + return "\xb7" + + self.style = style + self.get_char = get_char or default_get_char + + def apply_transformation(self, ti: TransformationInput) -> Transformation: + fragments = ti.fragments + + # Walk through all te fragments. + if fragments and fragment_list_to_text(fragments).startswith(" "): + t = (self.style, self.get_char()) + fragments = explode_text_fragments(fragments) + + for i in range(len(fragments)): + if fragments[i][1] == " ": + fragments[i] = t + else: + break + + return Transformation(fragments) + + +class ShowTrailingWhiteSpaceProcessor(Processor): + """ + Make trailing whitespace visible. + + :param get_char: Callable that returns one character. + """ + + def __init__( + self, + get_char: Callable[[], str] | None = None, + style: str = "class:training-whitespace", + ) -> None: + def default_get_char() -> str: + if "\xb7".encode(get_app().output.encoding(), "replace") == b"?": + return "." + else: + return "\xb7" + + self.style = style + self.get_char = get_char or default_get_char + + def apply_transformation(self, ti: TransformationInput) -> Transformation: + fragments = ti.fragments + + if fragments and fragments[-1][1].endswith(" "): + t = (self.style, self.get_char()) + fragments = explode_text_fragments(fragments) + + # Walk backwards through all te fragments and replace whitespace. + for i in range(len(fragments) - 1, -1, -1): + char = fragments[i][1] + if char == " ": + fragments[i] = t + else: + break + + return Transformation(fragments) + + +class TabsProcessor(Processor): + """ + Render tabs as spaces (instead of ^I) or make them visible (for instance, + by replacing them with dots.) + + :param tabstop: Horizontal space taken by a tab. (`int` or callable that + returns an `int`). + :param char1: Character or callable that returns a character (text of + length one). This one is used for the first space taken by the tab. + :param char2: Like `char1`, but for the rest of the space. + """ + + def __init__( + self, + tabstop: int | Callable[[], int] = 4, + char1: str | Callable[[], str] = "|", + char2: str | Callable[[], str] = "\u2508", + style: str = "class:tab", + ) -> None: + self.char1 = char1 + self.char2 = char2 + self.tabstop = tabstop + self.style = style + + def apply_transformation(self, ti: TransformationInput) -> Transformation: + tabstop = to_int(self.tabstop) + style = self.style + + # Create separator for tabs. + separator1 = to_str(self.char1) + separator2 = to_str(self.char2) + + # Transform fragments. + fragments = explode_text_fragments(ti.fragments) + + position_mappings = {} + result_fragments: StyleAndTextTuples = [] + pos = 0 + + for i, fragment_and_text in enumerate(fragments): + position_mappings[i] = pos + + if fragment_and_text[1] == "\t": + # Calculate how many characters we have to insert. + count = tabstop - (pos % tabstop) + if count == 0: + count = tabstop + + # Insert tab. + result_fragments.append((style, separator1)) + result_fragments.append((style, separator2 * (count - 1))) + pos += count + else: + result_fragments.append(fragment_and_text) + pos += 1 + + position_mappings[len(fragments)] = pos + # Add `pos+1` to mapping, because the cursor can be right after the + # line as well. + position_mappings[len(fragments) + 1] = pos + 1 + + def source_to_display(from_position: int) -> int: + "Maps original cursor position to the new one." + return position_mappings[from_position] + + def display_to_source(display_pos: int) -> int: + "Maps display cursor position to the original one." + position_mappings_reversed = {v: k for k, v in position_mappings.items()} + + while display_pos >= 0: + try: + return position_mappings_reversed[display_pos] + except KeyError: + display_pos -= 1 + return 0 + + return Transformation( + result_fragments, + source_to_display=source_to_display, + display_to_source=display_to_source, + ) + + +class ReverseSearchProcessor(Processor): + """ + Process to display the "(reverse-i-search)`...`:..." stuff around + the search buffer. + + Note: This processor is meant to be applied to the BufferControl that + contains the search buffer, it's not meant for the original input. + """ + + _excluded_input_processors: list[type[Processor]] = [ + HighlightSearchProcessor, + HighlightSelectionProcessor, + BeforeInput, + AfterInput, + ] + + def _get_main_buffer(self, buffer_control: BufferControl) -> BufferControl | None: + from prompt_toolkit.layout.controls import BufferControl + + prev_control = get_app().layout.search_target_buffer_control + if ( + isinstance(prev_control, BufferControl) + and prev_control.search_buffer_control == buffer_control + ): + return prev_control + return None + + def _content( + self, main_control: BufferControl, ti: TransformationInput + ) -> UIContent: + from prompt_toolkit.layout.controls import BufferControl + + # Emulate the BufferControl through which we are searching. + # For this we filter out some of the input processors. + excluded_processors = tuple(self._excluded_input_processors) + + def filter_processor(item: Processor) -> Processor | None: + """Filter processors from the main control that we want to disable + here. This returns either an accepted processor or None.""" + # For a `_MergedProcessor`, check each individual processor, recursively. + if isinstance(item, _MergedProcessor): + accepted_processors = [filter_processor(p) for p in item.processors] + return merge_processors( + [p for p in accepted_processors if p is not None] + ) + + # For a `ConditionalProcessor`, check the body. + elif isinstance(item, ConditionalProcessor): + p = filter_processor(item.processor) + if p: + return ConditionalProcessor(p, item.filter) + + # Otherwise, check the processor itself. + else: + if not isinstance(item, excluded_processors): + return item + + return None + + filtered_processor = filter_processor( + merge_processors(main_control.input_processors or []) + ) + highlight_processor = HighlightIncrementalSearchProcessor() + + if filtered_processor: + new_processors = [filtered_processor, highlight_processor] + else: + new_processors = [highlight_processor] + + from .controls import SearchBufferControl + + assert isinstance(ti.buffer_control, SearchBufferControl) + + buffer_control = BufferControl( + buffer=main_control.buffer, + input_processors=new_processors, + include_default_input_processors=False, + lexer=main_control.lexer, + preview_search=True, + search_buffer_control=ti.buffer_control, + ) + + return buffer_control.create_content(ti.width, ti.height, preview_search=True) + + def apply_transformation(self, ti: TransformationInput) -> Transformation: + from .controls import SearchBufferControl + + assert isinstance(ti.buffer_control, SearchBufferControl), ( + "`ReverseSearchProcessor` should be applied to a `SearchBufferControl` only." + ) + + source_to_display: SourceToDisplay | None + display_to_source: DisplayToSource | None + + main_control = self._get_main_buffer(ti.buffer_control) + + if ti.lineno == 0 and main_control: + content = self._content(main_control, ti) + + # Get the line from the original document for this search. + line_fragments = content.get_line(content.cursor_position.y) + + if main_control.search_state.direction == SearchDirection.FORWARD: + direction_text = "i-search" + else: + direction_text = "reverse-i-search" + + fragments_before: StyleAndTextTuples = [ + ("class:prompt.search", "("), + ("class:prompt.search", direction_text), + ("class:prompt.search", ")`"), + ] + + fragments = ( + fragments_before + + [ + ("class:prompt.search.text", fragment_list_to_text(ti.fragments)), + ("", "': "), + ] + + line_fragments + ) + + shift_position = fragment_list_len(fragments_before) + source_to_display = lambda i: i + shift_position + display_to_source = lambda i: i - shift_position + else: + source_to_display = None + display_to_source = None + fragments = ti.fragments + + return Transformation( + fragments, + source_to_display=source_to_display, + display_to_source=display_to_source, + ) + + +class ConditionalProcessor(Processor): + """ + Processor that applies another processor, according to a certain condition. + Example:: + + # Create a function that returns whether or not the processor should + # currently be applied. + def highlight_enabled(): + return true_or_false + + # Wrapped it in a `ConditionalProcessor` for usage in a `BufferControl`. + BufferControl(input_processors=[ + ConditionalProcessor(HighlightSearchProcessor(), + Condition(highlight_enabled))]) + + :param processor: :class:`.Processor` instance. + :param filter: :class:`~prompt_toolkit.filters.Filter` instance. + """ + + def __init__(self, processor: Processor, filter: FilterOrBool) -> None: + self.processor = processor + self.filter = to_filter(filter) + + def apply_transformation( + self, transformation_input: TransformationInput + ) -> Transformation: + # Run processor when enabled. + if self.filter(): + return self.processor.apply_transformation(transformation_input) + else: + return Transformation(transformation_input.fragments) + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(processor={self.processor!r}, filter={self.filter!r})" + + +class DynamicProcessor(Processor): + """ + Processor class that dynamically returns any Processor. + + :param get_processor: Callable that returns a :class:`.Processor` instance. + """ + + def __init__(self, get_processor: Callable[[], Processor | None]) -> None: + self.get_processor = get_processor + + def apply_transformation(self, ti: TransformationInput) -> Transformation: + processor = self.get_processor() or DummyProcessor() + return processor.apply_transformation(ti) + + +def merge_processors(processors: list[Processor]) -> Processor: + """ + Merge multiple `Processor` objects into one. + """ + if len(processors) == 0: + return DummyProcessor() + + if len(processors) == 1: + return processors[0] # Nothing to merge. + + return _MergedProcessor(processors) + + +class _MergedProcessor(Processor): + """ + Processor that groups multiple other `Processor` objects, but exposes an + API as if it is one `Processor`. + """ + + def __init__(self, processors: list[Processor]): + self.processors = processors + + def apply_transformation(self, ti: TransformationInput) -> Transformation: + source_to_display_functions = [ti.source_to_display] + display_to_source_functions = [] + fragments = ti.fragments + + def source_to_display(i: int) -> int: + """Translate x position from the buffer to the x position in the + processor fragments list.""" + for f in source_to_display_functions: + i = f(i) + return i + + for p in self.processors: + transformation = p.apply_transformation( + TransformationInput( + ti.buffer_control, + ti.document, + ti.lineno, + source_to_display, + fragments, + ti.width, + ti.height, + ti.get_line, + ) + ) + fragments = transformation.fragments + display_to_source_functions.append(transformation.display_to_source) + source_to_display_functions.append(transformation.source_to_display) + + def display_to_source(i: int) -> int: + for f in reversed(display_to_source_functions): + i = f(i) + return i + + # In the case of a nested _MergedProcessor, each processor wants to + # receive a 'source_to_display' function (as part of the + # TransformationInput) that has everything in the chain before + # included, because it can be called as part of the + # `apply_transformation` function. However, this first + # `source_to_display` should not be part of the output that we are + # returning. (This is the most consistent with `display_to_source`.) + del source_to_display_functions[:1] + + return Transformation(fragments, source_to_display, display_to_source) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/screen.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/screen.py new file mode 100644 index 0000000000000000000000000000000000000000..475f540d11d8e5b8ff1567d5120e25c9667cc33e --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/screen.py @@ -0,0 +1,323 @@ +from __future__ import annotations + +from collections import defaultdict +from typing import TYPE_CHECKING, Callable + +from prompt_toolkit.cache import FastDictCache +from prompt_toolkit.data_structures import Point +from prompt_toolkit.utils import get_cwidth + +if TYPE_CHECKING: + from .containers import Window + + +__all__ = [ + "Screen", + "Char", +] + + +class Char: + """ + Represent a single character in a :class:`.Screen`. + + This should be considered immutable. + + :param char: A single character (can be a double-width character). + :param style: A style string. (Can contain classnames.) + """ + + __slots__ = ("char", "style", "width") + + # If we end up having one of these special control sequences in the input string, + # we should display them as follows: + # Usually this happens after a "quoted insert". + display_mappings: dict[str, str] = { + "\x00": "^@", # Control space + "\x01": "^A", + "\x02": "^B", + "\x03": "^C", + "\x04": "^D", + "\x05": "^E", + "\x06": "^F", + "\x07": "^G", + "\x08": "^H", + "\x09": "^I", + "\x0a": "^J", + "\x0b": "^K", + "\x0c": "^L", + "\x0d": "^M", + "\x0e": "^N", + "\x0f": "^O", + "\x10": "^P", + "\x11": "^Q", + "\x12": "^R", + "\x13": "^S", + "\x14": "^T", + "\x15": "^U", + "\x16": "^V", + "\x17": "^W", + "\x18": "^X", + "\x19": "^Y", + "\x1a": "^Z", + "\x1b": "^[", # Escape + "\x1c": "^\\", + "\x1d": "^]", + "\x1e": "^^", + "\x1f": "^_", + "\x7f": "^?", # ASCII Delete (backspace). + # Special characters. All visualized like Vim does. + "\x80": "<80>", + "\x81": "<81>", + "\x82": "<82>", + "\x83": "<83>", + "\x84": "<84>", + "\x85": "<85>", + "\x86": "<86>", + "\x87": "<87>", + "\x88": "<88>", + "\x89": "<89>", + "\x8a": "<8a>", + "\x8b": "<8b>", + "\x8c": "<8c>", + "\x8d": "<8d>", + "\x8e": "<8e>", + "\x8f": "<8f>", + "\x90": "<90>", + "\x91": "<91>", + "\x92": "<92>", + "\x93": "<93>", + "\x94": "<94>", + "\x95": "<95>", + "\x96": "<96>", + "\x97": "<97>", + "\x98": "<98>", + "\x99": "<99>", + "\x9a": "<9a>", + "\x9b": "<9b>", + "\x9c": "<9c>", + "\x9d": "<9d>", + "\x9e": "<9e>", + "\x9f": "<9f>", + # For the non-breaking space: visualize like Emacs does by default. + # (Print a space, but attach the 'nbsp' class that applies the + # underline style.) + "\xa0": " ", + } + + def __init__(self, char: str = " ", style: str = "") -> None: + # If this character has to be displayed otherwise, take that one. + if char in self.display_mappings: + if char == "\xa0": + style += " class:nbsp " # Will be underlined. + else: + style += " class:control-character " + + char = self.display_mappings[char] + + self.char = char + self.style = style + + # Calculate width. (We always need this, so better to store it directly + # as a member for performance.) + self.width = get_cwidth(char) + + # In theory, `other` can be any type of object, but because of performance + # we don't want to do an `isinstance` check every time. We assume "other" + # is always a "Char". + def _equal(self, other: Char) -> bool: + return self.char == other.char and self.style == other.style + + def _not_equal(self, other: Char) -> bool: + # Not equal: We don't do `not char.__eq__` here, because of the + # performance of calling yet another function. + return self.char != other.char or self.style != other.style + + if not TYPE_CHECKING: + __eq__ = _equal + __ne__ = _not_equal + + def __repr__(self) -> str: + return f"{self.__class__.__name__}({self.char!r}, {self.style!r})" + + +_CHAR_CACHE: FastDictCache[tuple[str, str], Char] = FastDictCache( + Char, size=1000 * 1000 +) +Transparent = "[transparent]" + + +class Screen: + """ + Two dimensional buffer of :class:`.Char` instances. + """ + + def __init__( + self, + default_char: Char | None = None, + initial_width: int = 0, + initial_height: int = 0, + ) -> None: + if default_char is None: + default_char2 = _CHAR_CACHE[" ", Transparent] + else: + default_char2 = default_char + + self.data_buffer: defaultdict[int, defaultdict[int, Char]] = defaultdict( + lambda: defaultdict(lambda: default_char2) + ) + + #: Escape sequences to be injected. + self.zero_width_escapes: defaultdict[int, defaultdict[int, str]] = defaultdict( + lambda: defaultdict(str) + ) + + #: Position of the cursor. + self.cursor_positions: dict[ + Window, Point + ] = {} # Map `Window` objects to `Point` objects. + + #: Visibility of the cursor. + self.show_cursor = True + + #: (Optional) Where to position the menu. E.g. at the start of a completion. + #: (We can't use the cursor position, because we don't want the + #: completion menu to change its position when we browse through all the + #: completions.) + self.menu_positions: dict[ + Window, Point + ] = {} # Map `Window` objects to `Point` objects. + + #: Currently used width/height of the screen. This will increase when + #: data is written to the screen. + self.width = initial_width or 0 + self.height = initial_height or 0 + + # Windows that have been drawn. (Each `Window` class will add itself to + # this list.) + self.visible_windows_to_write_positions: dict[Window, WritePosition] = {} + + # List of (z_index, draw_func) + self._draw_float_functions: list[tuple[int, Callable[[], None]]] = [] + + @property + def visible_windows(self) -> list[Window]: + return list(self.visible_windows_to_write_positions.keys()) + + def set_cursor_position(self, window: Window, position: Point) -> None: + """ + Set the cursor position for a given window. + """ + self.cursor_positions[window] = position + + def set_menu_position(self, window: Window, position: Point) -> None: + """ + Set the cursor position for a given window. + """ + self.menu_positions[window] = position + + def get_cursor_position(self, window: Window) -> Point: + """ + Get the cursor position for a given window. + Returns a `Point`. + """ + try: + return self.cursor_positions[window] + except KeyError: + return Point(x=0, y=0) + + def get_menu_position(self, window: Window) -> Point: + """ + Get the menu position for a given window. + (This falls back to the cursor position if no menu position was set.) + """ + try: + return self.menu_positions[window] + except KeyError: + try: + return self.cursor_positions[window] + except KeyError: + return Point(x=0, y=0) + + def draw_with_z_index(self, z_index: int, draw_func: Callable[[], None]) -> None: + """ + Add a draw-function for a `Window` which has a >= 0 z_index. + This will be postponed until `draw_all_floats` is called. + """ + self._draw_float_functions.append((z_index, draw_func)) + + def draw_all_floats(self) -> None: + """ + Draw all float functions in order of z-index. + """ + # We keep looping because some draw functions could add new functions + # to this list. See `FloatContainer`. + while self._draw_float_functions: + # Sort the floats that we have so far by z_index. + functions = sorted(self._draw_float_functions, key=lambda item: item[0]) + + # Draw only one at a time, then sort everything again. Now floats + # might have been added. + self._draw_float_functions = functions[1:] + functions[0][1]() + + def append_style_to_content(self, style_str: str) -> None: + """ + For all the characters in the screen. + Set the style string to the given `style_str`. + """ + b = self.data_buffer + char_cache = _CHAR_CACHE + + append_style = " " + style_str + + for y, row in b.items(): + for x, char in row.items(): + row[x] = char_cache[char.char, char.style + append_style] + + def fill_area( + self, write_position: WritePosition, style: str = "", after: bool = False + ) -> None: + """ + Fill the content of this area, using the given `style`. + The style is prepended before whatever was here before. + """ + if not style.strip(): + return + + xmin = write_position.xpos + xmax = write_position.xpos + write_position.width + char_cache = _CHAR_CACHE + data_buffer = self.data_buffer + + if after: + append_style = " " + style + prepend_style = "" + else: + append_style = "" + prepend_style = style + " " + + for y in range( + write_position.ypos, write_position.ypos + write_position.height + ): + row = data_buffer[y] + for x in range(xmin, xmax): + cell = row[x] + row[x] = char_cache[ + cell.char, prepend_style + cell.style + append_style + ] + + +class WritePosition: + def __init__(self, xpos: int, ypos: int, width: int, height: int) -> None: + assert height >= 0 + assert width >= 0 + # xpos and ypos can be negative. (A float can be partially visible.) + + self.xpos = xpos + self.ypos = ypos + self.width = width + self.height = height + + def __repr__(self) -> str: + return f"{self.__class__.__name__}(x={self.xpos!r}, y={self.ypos!r}, width={self.width!r}, height={self.height!r})" diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/scrollable_pane.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/scrollable_pane.py new file mode 100644 index 0000000000000000000000000000000000000000..e38fd761951a9c379eaccb832eb8ac6c0f1d5852 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/scrollable_pane.py @@ -0,0 +1,494 @@ +from __future__ import annotations + +from prompt_toolkit.data_structures import Point +from prompt_toolkit.filters import FilterOrBool, to_filter +from prompt_toolkit.key_binding import KeyBindingsBase +from prompt_toolkit.mouse_events import MouseEvent + +from .containers import Container, ScrollOffsets +from .dimension import AnyDimension, Dimension, sum_layout_dimensions, to_dimension +from .mouse_handlers import MouseHandler, MouseHandlers +from .screen import Char, Screen, WritePosition + +__all__ = ["ScrollablePane"] + +# Never go beyond this height, because performance will degrade. +MAX_AVAILABLE_HEIGHT = 10_000 + + +class ScrollablePane(Container): + """ + Container widget that exposes a larger virtual screen to its content and + displays it in a vertical scrollbale region. + + Typically this is wrapped in a large `HSplit` container. Make sure in that + case to not specify a `height` dimension of the `HSplit`, so that it will + scale according to the content. + + .. note:: + + If you want to display a completion menu for widgets in this + `ScrollablePane`, then it's still a good practice to use a + `FloatContainer` with a `CompletionsMenu` in a `Float` at the top-level + of the layout hierarchy, rather then nesting a `FloatContainer` in this + `ScrollablePane`. (Otherwise, it's possible that the completion menu + is clipped.) + + :param content: The content container. + :param scrolloffset: Try to keep the cursor within this distance from the + top/bottom (left/right offset is not used). + :param keep_cursor_visible: When `True`, automatically scroll the pane so + that the cursor (of the focused window) is always visible. + :param keep_focused_window_visible: When `True`, automatically scroll the + pane so that the focused window is visible, or as much visible as + possible if it doesn't completely fit the screen. + :param max_available_height: Always constraint the height to this amount + for performance reasons. + :param width: When given, use this width instead of looking at the children. + :param height: When given, use this height instead of looking at the children. + :param show_scrollbar: When `True` display a scrollbar on the right. + """ + + def __init__( + self, + content: Container, + scroll_offsets: ScrollOffsets | None = None, + keep_cursor_visible: FilterOrBool = True, + keep_focused_window_visible: FilterOrBool = True, + max_available_height: int = MAX_AVAILABLE_HEIGHT, + width: AnyDimension = None, + height: AnyDimension = None, + show_scrollbar: FilterOrBool = True, + display_arrows: FilterOrBool = True, + up_arrow_symbol: str = "^", + down_arrow_symbol: str = "v", + ) -> None: + self.content = content + self.scroll_offsets = scroll_offsets or ScrollOffsets(top=1, bottom=1) + self.keep_cursor_visible = to_filter(keep_cursor_visible) + self.keep_focused_window_visible = to_filter(keep_focused_window_visible) + self.max_available_height = max_available_height + self.width = width + self.height = height + self.show_scrollbar = to_filter(show_scrollbar) + self.display_arrows = to_filter(display_arrows) + self.up_arrow_symbol = up_arrow_symbol + self.down_arrow_symbol = down_arrow_symbol + + self.vertical_scroll = 0 + + def __repr__(self) -> str: + return f"ScrollablePane({self.content!r})" + + def reset(self) -> None: + self.content.reset() + + def preferred_width(self, max_available_width: int) -> Dimension: + if self.width is not None: + return to_dimension(self.width) + + # We're only scrolling vertical. So the preferred width is equal to + # that of the content. + content_width = self.content.preferred_width(max_available_width) + + # If a scrollbar needs to be displayed, add +1 to the content width. + if self.show_scrollbar(): + return sum_layout_dimensions([Dimension.exact(1), content_width]) + + return content_width + + def preferred_height(self, width: int, max_available_height: int) -> Dimension: + if self.height is not None: + return to_dimension(self.height) + + # Prefer a height large enough so that it fits all the content. If not, + # we'll make the pane scrollable. + if self.show_scrollbar(): + # If `show_scrollbar` is set. Always reserve space for the scrollbar. + width -= 1 + + dimension = self.content.preferred_height(width, self.max_available_height) + + # Only take 'preferred' into account. Min/max can be anything. + return Dimension(min=0, preferred=dimension.preferred) + + def write_to_screen( + self, + screen: Screen, + mouse_handlers: MouseHandlers, + write_position: WritePosition, + parent_style: str, + erase_bg: bool, + z_index: int | None, + ) -> None: + """ + Render scrollable pane content. + + This works by rendering on an off-screen canvas, and copying over the + visible region. + """ + show_scrollbar = self.show_scrollbar() + + if show_scrollbar: + virtual_width = write_position.width - 1 + else: + virtual_width = write_position.width + + # Compute preferred height again. + virtual_height = self.content.preferred_height( + virtual_width, self.max_available_height + ).preferred + + # Ensure virtual height is at least the available height. + virtual_height = max(virtual_height, write_position.height) + virtual_height = min(virtual_height, self.max_available_height) + + # First, write the content to a virtual screen, then copy over the + # visible part to the real screen. + temp_screen = Screen(default_char=Char(char=" ", style=parent_style)) + temp_screen.show_cursor = screen.show_cursor + temp_write_position = WritePosition( + xpos=0, ypos=0, width=virtual_width, height=virtual_height + ) + + temp_mouse_handlers = MouseHandlers() + + self.content.write_to_screen( + temp_screen, + temp_mouse_handlers, + temp_write_position, + parent_style, + erase_bg, + z_index, + ) + temp_screen.draw_all_floats() + + # If anything in the virtual screen is focused, move vertical scroll to + from prompt_toolkit.application import get_app + + focused_window = get_app().layout.current_window + + try: + visible_win_write_pos = temp_screen.visible_windows_to_write_positions[ + focused_window + ] + except KeyError: + pass # No window focused here. Don't scroll. + else: + # Make sure this window is visible. + self._make_window_visible( + write_position.height, + virtual_height, + visible_win_write_pos, + temp_screen.cursor_positions.get(focused_window), + ) + + # Copy over virtual screen and zero width escapes to real screen. + self._copy_over_screen(screen, temp_screen, write_position, virtual_width) + + # Copy over mouse handlers. + self._copy_over_mouse_handlers( + mouse_handlers, temp_mouse_handlers, write_position, virtual_width + ) + + # Set screen.width/height. + ypos = write_position.ypos + xpos = write_position.xpos + + screen.width = max(screen.width, xpos + virtual_width) + screen.height = max(screen.height, ypos + write_position.height) + + # Copy over window write positions. + self._copy_over_write_positions(screen, temp_screen, write_position) + + if temp_screen.show_cursor: + screen.show_cursor = True + + # Copy over cursor positions, if they are visible. + for window, point in temp_screen.cursor_positions.items(): + if ( + 0 <= point.x < write_position.width + and self.vertical_scroll + <= point.y + < write_position.height + self.vertical_scroll + ): + screen.cursor_positions[window] = Point( + x=point.x + xpos, y=point.y + ypos - self.vertical_scroll + ) + + # Copy over menu positions, but clip them to the visible area. + for window, point in temp_screen.menu_positions.items(): + screen.menu_positions[window] = self._clip_point_to_visible_area( + Point(x=point.x + xpos, y=point.y + ypos - self.vertical_scroll), + write_position, + ) + + # Draw scrollbar. + if show_scrollbar: + self._draw_scrollbar( + write_position, + virtual_height, + screen, + ) + + def _clip_point_to_visible_area( + self, point: Point, write_position: WritePosition + ) -> Point: + """ + Ensure that the cursor and menu positions always are always reported + """ + if point.x < write_position.xpos: + point = point._replace(x=write_position.xpos) + if point.y < write_position.ypos: + point = point._replace(y=write_position.ypos) + if point.x >= write_position.xpos + write_position.width: + point = point._replace(x=write_position.xpos + write_position.width - 1) + if point.y >= write_position.ypos + write_position.height: + point = point._replace(y=write_position.ypos + write_position.height - 1) + + return point + + def _copy_over_screen( + self, + screen: Screen, + temp_screen: Screen, + write_position: WritePosition, + virtual_width: int, + ) -> None: + """ + Copy over visible screen content and "zero width escape sequences". + """ + ypos = write_position.ypos + xpos = write_position.xpos + + for y in range(write_position.height): + temp_row = temp_screen.data_buffer[y + self.vertical_scroll] + row = screen.data_buffer[y + ypos] + temp_zero_width_escapes = temp_screen.zero_width_escapes[ + y + self.vertical_scroll + ] + zero_width_escapes = screen.zero_width_escapes[y + ypos] + + for x in range(virtual_width): + row[x + xpos] = temp_row[x] + + if x in temp_zero_width_escapes: + zero_width_escapes[x + xpos] = temp_zero_width_escapes[x] + + def _copy_over_mouse_handlers( + self, + mouse_handlers: MouseHandlers, + temp_mouse_handlers: MouseHandlers, + write_position: WritePosition, + virtual_width: int, + ) -> None: + """ + Copy over mouse handlers from virtual screen to real screen. + + Note: we take `virtual_width` because we don't want to copy over mouse + handlers that we possibly have behind the scrollbar. + """ + ypos = write_position.ypos + xpos = write_position.xpos + + # Cache mouse handlers when wrapping them. Very often the same mouse + # handler is registered for many positions. + mouse_handler_wrappers: dict[MouseHandler, MouseHandler] = {} + + def wrap_mouse_handler(handler: MouseHandler) -> MouseHandler: + "Wrap mouse handler. Translate coordinates in `MouseEvent`." + if handler not in mouse_handler_wrappers: + + def new_handler(event: MouseEvent) -> None: + new_event = MouseEvent( + position=Point( + x=event.position.x - xpos, + y=event.position.y + self.vertical_scroll - ypos, + ), + event_type=event.event_type, + button=event.button, + modifiers=event.modifiers, + ) + handler(new_event) + + mouse_handler_wrappers[handler] = new_handler + return mouse_handler_wrappers[handler] + + # Copy handlers. + mouse_handlers_dict = mouse_handlers.mouse_handlers + temp_mouse_handlers_dict = temp_mouse_handlers.mouse_handlers + + for y in range(write_position.height): + if y in temp_mouse_handlers_dict: + temp_mouse_row = temp_mouse_handlers_dict[y + self.vertical_scroll] + mouse_row = mouse_handlers_dict[y + ypos] + for x in range(virtual_width): + if x in temp_mouse_row: + mouse_row[x + xpos] = wrap_mouse_handler(temp_mouse_row[x]) + + def _copy_over_write_positions( + self, screen: Screen, temp_screen: Screen, write_position: WritePosition + ) -> None: + """ + Copy over window write positions. + """ + ypos = write_position.ypos + xpos = write_position.xpos + + for win, write_pos in temp_screen.visible_windows_to_write_positions.items(): + screen.visible_windows_to_write_positions[win] = WritePosition( + xpos=write_pos.xpos + xpos, + ypos=write_pos.ypos + ypos - self.vertical_scroll, + # TODO: if the window is only partly visible, then truncate width/height. + # This could be important if we have nested ScrollablePanes. + height=write_pos.height, + width=write_pos.width, + ) + + def is_modal(self) -> bool: + return self.content.is_modal() + + def get_key_bindings(self) -> KeyBindingsBase | None: + return self.content.get_key_bindings() + + def get_children(self) -> list[Container]: + return [self.content] + + def _make_window_visible( + self, + visible_height: int, + virtual_height: int, + visible_win_write_pos: WritePosition, + cursor_position: Point | None, + ) -> None: + """ + Scroll the scrollable pane, so that this window becomes visible. + + :param visible_height: Height of this `ScrollablePane` that is rendered. + :param virtual_height: Height of the virtual, temp screen. + :param visible_win_write_pos: `WritePosition` of the nested window on the + temp screen. + :param cursor_position: The location of the cursor position of this + window on the temp screen. + """ + # Start with maximum allowed scroll range, and then reduce according to + # the focused window and cursor position. + min_scroll = 0 + max_scroll = virtual_height - visible_height + + if self.keep_cursor_visible(): + # Reduce min/max scroll according to the cursor in the focused window. + if cursor_position is not None: + offsets = self.scroll_offsets + cpos_min_scroll = ( + cursor_position.y - visible_height + 1 + offsets.bottom + ) + cpos_max_scroll = cursor_position.y - offsets.top + min_scroll = max(min_scroll, cpos_min_scroll) + max_scroll = max(0, min(max_scroll, cpos_max_scroll)) + + if self.keep_focused_window_visible(): + # Reduce min/max scroll according to focused window position. + # If the window is small enough, bot the top and bottom of the window + # should be visible. + if visible_win_write_pos.height <= visible_height: + window_min_scroll = ( + visible_win_write_pos.ypos + + visible_win_write_pos.height + - visible_height + ) + window_max_scroll = visible_win_write_pos.ypos + else: + # Window does not fit on the screen. Make sure at least the whole + # screen is occupied with this window, and nothing else is shown. + window_min_scroll = visible_win_write_pos.ypos + window_max_scroll = ( + visible_win_write_pos.ypos + + visible_win_write_pos.height + - visible_height + ) + + min_scroll = max(min_scroll, window_min_scroll) + max_scroll = min(max_scroll, window_max_scroll) + + if min_scroll > max_scroll: + min_scroll = max_scroll # Should not happen. + + # Finally, properly clip the vertical scroll. + if self.vertical_scroll > max_scroll: + self.vertical_scroll = max_scroll + if self.vertical_scroll < min_scroll: + self.vertical_scroll = min_scroll + + def _draw_scrollbar( + self, write_position: WritePosition, content_height: int, screen: Screen + ) -> None: + """ + Draw the scrollbar on the screen. + + Note: There is some code duplication with the `ScrollbarMargin` + implementation. + """ + + window_height = write_position.height + display_arrows = self.display_arrows() + + if display_arrows: + window_height -= 2 + + try: + fraction_visible = write_position.height / float(content_height) + fraction_above = self.vertical_scroll / float(content_height) + + scrollbar_height = int( + min(window_height, max(1, window_height * fraction_visible)) + ) + scrollbar_top = int(window_height * fraction_above) + except ZeroDivisionError: + return + else: + + def is_scroll_button(row: int) -> bool: + "True if we should display a button on this row." + return scrollbar_top <= row <= scrollbar_top + scrollbar_height + + xpos = write_position.xpos + write_position.width - 1 + ypos = write_position.ypos + data_buffer = screen.data_buffer + + # Up arrow. + if display_arrows: + data_buffer[ypos][xpos] = Char( + self.up_arrow_symbol, "class:scrollbar.arrow" + ) + ypos += 1 + + # Scrollbar body. + scrollbar_background = "class:scrollbar.background" + scrollbar_background_start = "class:scrollbar.background,scrollbar.start" + scrollbar_button = "class:scrollbar.button" + scrollbar_button_end = "class:scrollbar.button,scrollbar.end" + + for i in range(window_height): + style = "" + if is_scroll_button(i): + if not is_scroll_button(i + 1): + # Give the last cell a different style, because we want + # to underline this. + style = scrollbar_button_end + else: + style = scrollbar_button + else: + if is_scroll_button(i + 1): + style = scrollbar_background_start + else: + style = scrollbar_background + + data_buffer[ypos][xpos] = Char(" ", style) + ypos += 1 + + # Down arrow + if display_arrows: + data_buffer[ypos][xpos] = Char( + self.down_arrow_symbol, "class:scrollbar.arrow" + ) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/utils.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..373fe52a5ae5ab927faad014cfcdeb50b401ffae --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/layout/utils.py @@ -0,0 +1,80 @@ +from __future__ import annotations + +from typing import TYPE_CHECKING, Iterable, List, TypeVar, cast, overload + +from prompt_toolkit.formatted_text.base import OneStyleAndTextTuple + +if TYPE_CHECKING: + from typing_extensions import SupportsIndex + +__all__ = [ + "explode_text_fragments", +] + +_T = TypeVar("_T", bound=OneStyleAndTextTuple) + + +class _ExplodedList(List[_T]): + """ + Wrapper around a list, that marks it as 'exploded'. + + As soon as items are added or the list is extended, the new items are + automatically exploded as well. + """ + + exploded = True + + def append(self, item: _T) -> None: + self.extend([item]) + + def extend(self, lst: Iterable[_T]) -> None: + super().extend(explode_text_fragments(lst)) + + def insert(self, index: SupportsIndex, item: _T) -> None: + raise NotImplementedError # TODO + + # TODO: When creating a copy() or [:], return also an _ExplodedList. + + @overload + def __setitem__(self, index: SupportsIndex, value: _T) -> None: ... + + @overload + def __setitem__(self, index: slice, value: Iterable[_T]) -> None: ... + + def __setitem__( + self, index: SupportsIndex | slice, value: _T | Iterable[_T] + ) -> None: + """ + Ensure that when `(style_str, 'long string')` is set, the string will be + exploded. + """ + if not isinstance(index, slice): + int_index = index.__index__() + index = slice(int_index, int_index + 1) + if isinstance(value, tuple): # In case of `OneStyleAndTextTuple`. + value = cast("List[_T]", [value]) + + super().__setitem__(index, explode_text_fragments(value)) + + +def explode_text_fragments(fragments: Iterable[_T]) -> _ExplodedList[_T]: + """ + Turn a list of (style_str, text) tuples into another list where each string is + exactly one character. + + It should be fine to call this function several times. Calling this on a + list that is already exploded, is a null operation. + + :param fragments: List of (style, text) tuples. + """ + # When the fragments is already exploded, don't explode again. + if isinstance(fragments, _ExplodedList): + return fragments + + result: list[_T] = [] + + for style, string, *rest in fragments: + for c in string: + result.append((style, c, *rest)) # type: ignore + + return _ExplodedList(result) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__init__.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8f72d07ff2393813ec14eb432a4ad0d54cb4fac8 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__init__.py @@ -0,0 +1,21 @@ +""" +Lexer interface and implementations. +Used for syntax highlighting. +""" + +from __future__ import annotations + +from .base import DynamicLexer, Lexer, SimpleLexer +from .pygments import PygmentsLexer, RegexSync, SyncFromStart, SyntaxSync + +__all__ = [ + # Base. + "Lexer", + "SimpleLexer", + "DynamicLexer", + # Pygments. + "PygmentsLexer", + "RegexSync", + "SyncFromStart", + "SyntaxSync", +] diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6ea75c4eac10e1544db18b0eaff9df5bba8ebf1b Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__pycache__/base.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__pycache__/base.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b0bb1565a513d32abd9bdbdd3e121b3d2fd9231 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__pycache__/base.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__pycache__/pygments.cpython-313.pyc b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__pycache__/pygments.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9cb6aa12aafd6c89600c815db687c91ad7e74a61 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/__pycache__/pygments.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/base.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/base.py new file mode 100644 index 0000000000000000000000000000000000000000..c61e2b9d32ef990f49308b40376792c0996d3433 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/base.py @@ -0,0 +1,85 @@ +""" +Base classes for prompt_toolkit lexers. +""" + +from __future__ import annotations + +from abc import ABCMeta, abstractmethod +from typing import Callable, Hashable + +from prompt_toolkit.document import Document +from prompt_toolkit.formatted_text.base import StyleAndTextTuples + +__all__ = [ + "Lexer", + "SimpleLexer", + "DynamicLexer", +] + + +class Lexer(metaclass=ABCMeta): + """ + Base class for all lexers. + """ + + @abstractmethod + def lex_document(self, document: Document) -> Callable[[int], StyleAndTextTuples]: + """ + Takes a :class:`~prompt_toolkit.document.Document` and returns a + callable that takes a line number and returns a list of + ``(style_str, text)`` tuples for that line. + + XXX: Note that in the past, this was supposed to return a list + of ``(Token, text)`` tuples, just like a Pygments lexer. + """ + + def invalidation_hash(self) -> Hashable: + """ + When this changes, `lex_document` could give a different output. + (Only used for `DynamicLexer`.) + """ + return id(self) + + +class SimpleLexer(Lexer): + """ + Lexer that doesn't do any tokenizing and returns the whole input as one + token. + + :param style: The style string for this lexer. + """ + + def __init__(self, style: str = "") -> None: + self.style = style + + def lex_document(self, document: Document) -> Callable[[int], StyleAndTextTuples]: + lines = document.lines + + def get_line(lineno: int) -> StyleAndTextTuples: + "Return the tokens for the given line." + try: + return [(self.style, lines[lineno])] + except IndexError: + return [] + + return get_line + + +class DynamicLexer(Lexer): + """ + Lexer class that can dynamically returns any Lexer. + + :param get_lexer: Callable that returns a :class:`.Lexer` instance. + """ + + def __init__(self, get_lexer: Callable[[], Lexer | None]) -> None: + self.get_lexer = get_lexer + self._dummy = SimpleLexer() + + def lex_document(self, document: Document) -> Callable[[int], StyleAndTextTuples]: + lexer = self.get_lexer() or self._dummy + return lexer.lex_document(document) + + def invalidation_hash(self) -> Hashable: + lexer = self.get_lexer() or self._dummy + return id(lexer) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/pygments.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/pygments.py new file mode 100644 index 0000000000000000000000000000000000000000..d5a39c4993f1514c34770fe08f25e8c998140ee5 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/lexers/pygments.py @@ -0,0 +1,328 @@ +""" +Adaptor classes for using Pygments lexers within prompt_toolkit. + +This includes syntax synchronization code, so that we don't have to start +lexing at the beginning of a document, when displaying a very large text. +""" + +from __future__ import annotations + +import re +from abc import ABCMeta, abstractmethod +from typing import TYPE_CHECKING, Callable, Dict, Generator, Iterable, Tuple + +from prompt_toolkit.document import Document +from prompt_toolkit.filters import FilterOrBool, to_filter +from prompt_toolkit.formatted_text.base import StyleAndTextTuples +from prompt_toolkit.formatted_text.utils import split_lines +from prompt_toolkit.styles.pygments import pygments_token_to_classname + +from .base import Lexer, SimpleLexer + +if TYPE_CHECKING: + from pygments.lexer import Lexer as PygmentsLexerCls + +__all__ = [ + "PygmentsLexer", + "SyntaxSync", + "SyncFromStart", + "RegexSync", +] + + +class SyntaxSync(metaclass=ABCMeta): + """ + Syntax synchronizer. This is a tool that finds a start position for the + lexer. This is especially important when editing big documents; we don't + want to start the highlighting by running the lexer from the beginning of + the file. That is very slow when editing. + """ + + @abstractmethod + def get_sync_start_position( + self, document: Document, lineno: int + ) -> tuple[int, int]: + """ + Return the position from where we can start lexing as a (row, column) + tuple. + + :param document: `Document` instance that contains all the lines. + :param lineno: The line that we want to highlight. (We need to return + this line, or an earlier position.) + """ + + +class SyncFromStart(SyntaxSync): + """ + Always start the syntax highlighting from the beginning. + """ + + def get_sync_start_position( + self, document: Document, lineno: int + ) -> tuple[int, int]: + return 0, 0 + + +class RegexSync(SyntaxSync): + """ + Synchronize by starting at a line that matches the given regex pattern. + """ + + # Never go more than this amount of lines backwards for synchronization. + # That would be too CPU intensive. + MAX_BACKWARDS = 500 + + # Start lexing at the start, if we are in the first 'n' lines and no + # synchronization position was found. + FROM_START_IF_NO_SYNC_POS_FOUND = 100 + + def __init__(self, pattern: str) -> None: + self._compiled_pattern = re.compile(pattern) + + def get_sync_start_position( + self, document: Document, lineno: int + ) -> tuple[int, int]: + """ + Scan backwards, and find a possible position to start. + """ + pattern = self._compiled_pattern + lines = document.lines + + # Scan upwards, until we find a point where we can start the syntax + # synchronization. + for i in range(lineno, max(-1, lineno - self.MAX_BACKWARDS), -1): + match = pattern.match(lines[i]) + if match: + return i, match.start() + + # No synchronization point found. If we aren't that far from the + # beginning, start at the very beginning, otherwise, just try to start + # at the current line. + if lineno < self.FROM_START_IF_NO_SYNC_POS_FOUND: + return 0, 0 + else: + return lineno, 0 + + @classmethod + def from_pygments_lexer_cls(cls, lexer_cls: PygmentsLexerCls) -> RegexSync: + """ + Create a :class:`.RegexSync` instance for this Pygments lexer class. + """ + patterns = { + # For Python, start highlighting at any class/def block. + "Python": r"^\s*(class|def)\s+", + "Python 3": r"^\s*(class|def)\s+", + # For HTML, start at any open/close tag definition. + "HTML": r"<[/a-zA-Z]", + # For javascript, start at a function. + "JavaScript": r"\bfunction\b", + # TODO: Add definitions for other languages. + # By default, we start at every possible line. + } + p = patterns.get(lexer_cls.name, "^") + return cls(p) + + +class _TokenCache(Dict[Tuple[str, ...], str]): + """ + Cache that converts Pygments tokens into `prompt_toolkit` style objects. + + ``Token.A.B.C`` will be converted into: + ``class:pygments,pygments.A,pygments.A.B,pygments.A.B.C`` + """ + + def __missing__(self, key: tuple[str, ...]) -> str: + result = "class:" + pygments_token_to_classname(key) + self[key] = result + return result + + +_token_cache = _TokenCache() + + +class PygmentsLexer(Lexer): + """ + Lexer that calls a pygments lexer. + + Example:: + + from pygments.lexers.html import HtmlLexer + lexer = PygmentsLexer(HtmlLexer) + + Note: Don't forget to also load a Pygments compatible style. E.g.:: + + from prompt_toolkit.styles.from_pygments import style_from_pygments_cls + from pygments.styles import get_style_by_name + style = style_from_pygments_cls(get_style_by_name('monokai')) + + :param pygments_lexer_cls: A `Lexer` from Pygments. + :param sync_from_start: Start lexing at the start of the document. This + will always give the best results, but it will be slow for bigger + documents. (When the last part of the document is display, then the + whole document will be lexed by Pygments on every key stroke.) It is + recommended to disable this for inputs that are expected to be more + than 1,000 lines. + :param syntax_sync: `SyntaxSync` object. + """ + + # Minimum amount of lines to go backwards when starting the parser. + # This is important when the lines are retrieved in reverse order, or when + # scrolling upwards. (Due to the complexity of calculating the vertical + # scroll offset in the `Window` class, lines are not always retrieved in + # order.) + MIN_LINES_BACKWARDS = 50 + + # When a parser was started this amount of lines back, read the parser + # until we get the current line. Otherwise, start a new parser. + # (This should probably be bigger than MIN_LINES_BACKWARDS.) + REUSE_GENERATOR_MAX_DISTANCE = 100 + + def __init__( + self, + pygments_lexer_cls: type[PygmentsLexerCls], + sync_from_start: FilterOrBool = True, + syntax_sync: SyntaxSync | None = None, + ) -> None: + self.pygments_lexer_cls = pygments_lexer_cls + self.sync_from_start = to_filter(sync_from_start) + + # Instantiate the Pygments lexer. + self.pygments_lexer = pygments_lexer_cls( + stripnl=False, stripall=False, ensurenl=False + ) + + # Create syntax sync instance. + self.syntax_sync = syntax_sync or RegexSync.from_pygments_lexer_cls( + pygments_lexer_cls + ) + + @classmethod + def from_filename( + cls, filename: str, sync_from_start: FilterOrBool = True + ) -> Lexer: + """ + Create a `Lexer` from a filename. + """ + # Inline imports: the Pygments dependency is optional! + from pygments.lexers import get_lexer_for_filename + from pygments.util import ClassNotFound + + try: + pygments_lexer = get_lexer_for_filename(filename) + except ClassNotFound: + return SimpleLexer() + else: + return cls(pygments_lexer.__class__, sync_from_start=sync_from_start) + + def lex_document(self, document: Document) -> Callable[[int], StyleAndTextTuples]: + """ + Create a lexer function that takes a line number and returns the list + of (style_str, text) tuples as the Pygments lexer returns for that line. + """ + LineGenerator = Generator[Tuple[int, StyleAndTextTuples], None, None] + + # Cache of already lexed lines. + cache: dict[int, StyleAndTextTuples] = {} + + # Pygments generators that are currently lexing. + # Map lexer generator to the line number. + line_generators: dict[LineGenerator, int] = {} + + def get_syntax_sync() -> SyntaxSync: + "The Syntax synchronization object that we currently use." + if self.sync_from_start(): + return SyncFromStart() + else: + return self.syntax_sync + + def find_closest_generator(i: int) -> LineGenerator | None: + "Return a generator close to line 'i', or None if none was found." + for generator, lineno in line_generators.items(): + if lineno < i and i - lineno < self.REUSE_GENERATOR_MAX_DISTANCE: + return generator + return None + + def create_line_generator(start_lineno: int, column: int = 0) -> LineGenerator: + """ + Create a generator that yields the lexed lines. + Each iteration it yields a (line_number, [(style_str, text), ...]) tuple. + """ + + def get_text_fragments() -> Iterable[tuple[str, str]]: + text = "\n".join(document.lines[start_lineno:])[column:] + + # We call `get_text_fragments_unprocessed`, because `get_tokens` will + # still replace \r\n and \r by \n. (We don't want that, + # Pygments should return exactly the same amount of text, as we + # have given as input.) + for _, t, v in self.pygments_lexer.get_tokens_unprocessed(text): + # Turn Pygments `Token` object into prompt_toolkit style + # strings. + yield _token_cache[t], v + + yield from enumerate(split_lines(list(get_text_fragments())), start_lineno) + + def get_generator(i: int) -> LineGenerator: + """ + Find an already started generator that is close, or create a new one. + """ + # Find closest line generator. + generator = find_closest_generator(i) + if generator: + return generator + + # No generator found. Determine starting point for the syntax + # synchronization first. + + # Go at least x lines back. (Make scrolling upwards more + # efficient.) + i = max(0, i - self.MIN_LINES_BACKWARDS) + + if i == 0: + row = 0 + column = 0 + else: + row, column = get_syntax_sync().get_sync_start_position(document, i) + + # Find generator close to this point, or otherwise create a new one. + generator = find_closest_generator(i) + if generator: + return generator + else: + generator = create_line_generator(row, column) + + # If the column is not 0, ignore the first line. (Which is + # incomplete. This happens when the synchronization algorithm tells + # us to start parsing in the middle of a line.) + if column: + next(generator) + row += 1 + + line_generators[generator] = row + return generator + + def get_line(i: int) -> StyleAndTextTuples: + "Return the tokens for a given line number." + try: + return cache[i] + except KeyError: + generator = get_generator(i) + + # Exhaust the generator, until we find the requested line. + for num, line in generator: + cache[num] = line + if num == i: + line_generators[generator] = i + + # Remove the next item from the cache. + # (It could happen that it's already there, because of + # another generator that started filling these lines, + # but we want to synchronize these lines with the + # current lexer's state.) + if num + 1 in cache: + del cache[num + 1] + + return cache[num] + return [] + + return get_line diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/output/__init__.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6b4c5f3b66c0032074d35edf3edec896f340888d --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/__init__.py @@ -0,0 +1,15 @@ +from __future__ import annotations + +from .base import DummyOutput, Output +from .color_depth import ColorDepth +from .defaults import create_output + +__all__ = [ + # Base. + "Output", + "DummyOutput", + # Color depth. + "ColorDepth", + # Defaults. + "create_output", +] diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/output/base.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/base.py new file mode 100644 index 0000000000000000000000000000000000000000..6ba09fdd089f02e047d95e2ea77a9d7d3e835728 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/base.py @@ -0,0 +1,332 @@ +""" +Interface for an output. +""" + +from __future__ import annotations + +from abc import ABCMeta, abstractmethod +from typing import TextIO + +from prompt_toolkit.cursor_shapes import CursorShape +from prompt_toolkit.data_structures import Size +from prompt_toolkit.styles import Attrs + +from .color_depth import ColorDepth + +__all__ = [ + "Output", + "DummyOutput", +] + + +class Output(metaclass=ABCMeta): + """ + Base class defining the output interface for a + :class:`~prompt_toolkit.renderer.Renderer`. + + Actual implementations are + :class:`~prompt_toolkit.output.vt100.Vt100_Output` and + :class:`~prompt_toolkit.output.win32.Win32Output`. + """ + + stdout: TextIO | None = None + + @abstractmethod + def fileno(self) -> int: + "Return the file descriptor to which we can write for the output." + + @abstractmethod + def encoding(self) -> str: + """ + Return the encoding for this output, e.g. 'utf-8'. + (This is used mainly to know which characters are supported by the + output the data, so that the UI can provide alternatives, when + required.) + """ + + @abstractmethod + def write(self, data: str) -> None: + "Write text (Terminal escape sequences will be removed/escaped.)" + + @abstractmethod + def write_raw(self, data: str) -> None: + "Write text." + + @abstractmethod + def set_title(self, title: str) -> None: + "Set terminal title." + + @abstractmethod + def clear_title(self) -> None: + "Clear title again. (or restore previous title.)" + + @abstractmethod + def flush(self) -> None: + "Write to output stream and flush." + + @abstractmethod + def erase_screen(self) -> None: + """ + Erases the screen with the background color and moves the cursor to + home. + """ + + @abstractmethod + def enter_alternate_screen(self) -> None: + "Go to the alternate screen buffer. (For full screen applications)." + + @abstractmethod + def quit_alternate_screen(self) -> None: + "Leave the alternate screen buffer." + + @abstractmethod + def enable_mouse_support(self) -> None: + "Enable mouse." + + @abstractmethod + def disable_mouse_support(self) -> None: + "Disable mouse." + + @abstractmethod + def erase_end_of_line(self) -> None: + """ + Erases from the current cursor position to the end of the current line. + """ + + @abstractmethod + def erase_down(self) -> None: + """ + Erases the screen from the current line down to the bottom of the + screen. + """ + + @abstractmethod + def reset_attributes(self) -> None: + "Reset color and styling attributes." + + @abstractmethod + def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None: + "Set new color and styling attributes." + + @abstractmethod + def disable_autowrap(self) -> None: + "Disable auto line wrapping." + + @abstractmethod + def enable_autowrap(self) -> None: + "Enable auto line wrapping." + + @abstractmethod + def cursor_goto(self, row: int = 0, column: int = 0) -> None: + "Move cursor position." + + @abstractmethod + def cursor_up(self, amount: int) -> None: + "Move cursor `amount` place up." + + @abstractmethod + def cursor_down(self, amount: int) -> None: + "Move cursor `amount` place down." + + @abstractmethod + def cursor_forward(self, amount: int) -> None: + "Move cursor `amount` place forward." + + @abstractmethod + def cursor_backward(self, amount: int) -> None: + "Move cursor `amount` place backward." + + @abstractmethod + def hide_cursor(self) -> None: + "Hide cursor." + + @abstractmethod + def show_cursor(self) -> None: + "Show cursor." + + @abstractmethod + def set_cursor_shape(self, cursor_shape: CursorShape) -> None: + "Set cursor shape to block, beam or underline." + + @abstractmethod + def reset_cursor_shape(self) -> None: + "Reset cursor shape." + + def ask_for_cpr(self) -> None: + """ + Asks for a cursor position report (CPR). + (VT100 only.) + """ + + @property + def responds_to_cpr(self) -> bool: + """ + `True` if the `Application` can expect to receive a CPR response after + calling `ask_for_cpr` (this will come back through the corresponding + `Input`). + + This is used to determine the amount of available rows we have below + the cursor position. In the first place, we have this so that the drop + down autocompletion menus are sized according to the available space. + + On Windows, we don't need this, there we have + `get_rows_below_cursor_position`. + """ + return False + + @abstractmethod + def get_size(self) -> Size: + "Return the size of the output window." + + def bell(self) -> None: + "Sound bell." + + def enable_bracketed_paste(self) -> None: + "For vt100 only." + + def disable_bracketed_paste(self) -> None: + "For vt100 only." + + def reset_cursor_key_mode(self) -> None: + """ + For vt100 only. + Put the terminal in normal cursor mode (instead of application mode). + + See: https://vt100.net/docs/vt100-ug/chapter3.html + """ + + def scroll_buffer_to_prompt(self) -> None: + "For Win32 only." + + def get_rows_below_cursor_position(self) -> int: + "For Windows only." + raise NotImplementedError + + @abstractmethod + def get_default_color_depth(self) -> ColorDepth: + """ + Get default color depth for this output. + + This value will be used if no color depth was explicitly passed to the + `Application`. + + .. note:: + + If the `$PROMPT_TOOLKIT_COLOR_DEPTH` environment variable has been + set, then `outputs.defaults.create_output` will pass this value to + the implementation as the default_color_depth, which is returned + here. (This is not used when the output corresponds to a + prompt_toolkit SSH/Telnet session.) + """ + + +class DummyOutput(Output): + """ + For testing. An output class that doesn't render anything. + """ + + def fileno(self) -> int: + "There is no sensible default for fileno()." + raise NotImplementedError + + def encoding(self) -> str: + return "utf-8" + + def write(self, data: str) -> None: + pass + + def write_raw(self, data: str) -> None: + pass + + def set_title(self, title: str) -> None: + pass + + def clear_title(self) -> None: + pass + + def flush(self) -> None: + pass + + def erase_screen(self) -> None: + pass + + def enter_alternate_screen(self) -> None: + pass + + def quit_alternate_screen(self) -> None: + pass + + def enable_mouse_support(self) -> None: + pass + + def disable_mouse_support(self) -> None: + pass + + def erase_end_of_line(self) -> None: + pass + + def erase_down(self) -> None: + pass + + def reset_attributes(self) -> None: + pass + + def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None: + pass + + def disable_autowrap(self) -> None: + pass + + def enable_autowrap(self) -> None: + pass + + def cursor_goto(self, row: int = 0, column: int = 0) -> None: + pass + + def cursor_up(self, amount: int) -> None: + pass + + def cursor_down(self, amount: int) -> None: + pass + + def cursor_forward(self, amount: int) -> None: + pass + + def cursor_backward(self, amount: int) -> None: + pass + + def hide_cursor(self) -> None: + pass + + def show_cursor(self) -> None: + pass + + def set_cursor_shape(self, cursor_shape: CursorShape) -> None: + pass + + def reset_cursor_shape(self) -> None: + pass + + def ask_for_cpr(self) -> None: + pass + + def bell(self) -> None: + pass + + def enable_bracketed_paste(self) -> None: + pass + + def disable_bracketed_paste(self) -> None: + pass + + def scroll_buffer_to_prompt(self) -> None: + pass + + def get_size(self) -> Size: + return Size(rows=40, columns=80) + + def get_rows_below_cursor_position(self) -> int: + return 40 + + def get_default_color_depth(self) -> ColorDepth: + return ColorDepth.DEPTH_1_BIT diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/output/color_depth.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/color_depth.py new file mode 100644 index 0000000000000000000000000000000000000000..f66d2bea301da9963918c28ba5ee277b755ceb5e --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/color_depth.py @@ -0,0 +1,64 @@ +from __future__ import annotations + +import os +from enum import Enum + +__all__ = [ + "ColorDepth", +] + + +class ColorDepth(str, Enum): + """ + Possible color depth values for the output. + """ + + value: str + + #: One color only. + DEPTH_1_BIT = "DEPTH_1_BIT" + + #: ANSI Colors. + DEPTH_4_BIT = "DEPTH_4_BIT" + + #: The default. + DEPTH_8_BIT = "DEPTH_8_BIT" + + #: 24 bit True color. + DEPTH_24_BIT = "DEPTH_24_BIT" + + # Aliases. + MONOCHROME = DEPTH_1_BIT + ANSI_COLORS_ONLY = DEPTH_4_BIT + DEFAULT = DEPTH_8_BIT + TRUE_COLOR = DEPTH_24_BIT + + @classmethod + def from_env(cls) -> ColorDepth | None: + """ + Return the color depth if the $PROMPT_TOOLKIT_COLOR_DEPTH environment + variable has been set. + + This is a way to enforce a certain color depth in all prompt_toolkit + applications. + """ + # Disable color if a `NO_COLOR` environment variable is set. + # See: https://no-color.org/ + if os.environ.get("NO_COLOR"): + return cls.DEPTH_1_BIT + + # Check the `PROMPT_TOOLKIT_COLOR_DEPTH` environment variable. + all_values = [i.value for i in ColorDepth] + if os.environ.get("PROMPT_TOOLKIT_COLOR_DEPTH") in all_values: + return cls(os.environ["PROMPT_TOOLKIT_COLOR_DEPTH"]) + + return None + + @classmethod + def default(cls) -> ColorDepth: + """ + Return the default color depth for the default output. + """ + from .defaults import create_output + + return create_output().get_default_color_depth() diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/output/conemu.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/conemu.py new file mode 100644 index 0000000000000000000000000000000000000000..636994494544ba223647bd1abbf33baaafedd89f --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/conemu.py @@ -0,0 +1,65 @@ +from __future__ import annotations + +import sys + +assert sys.platform == "win32" + +from typing import Any, TextIO + +from prompt_toolkit.data_structures import Size + +from .base import Output +from .color_depth import ColorDepth +from .vt100 import Vt100_Output +from .win32 import Win32Output + +__all__ = [ + "ConEmuOutput", +] + + +class ConEmuOutput: + """ + ConEmu (Windows) output abstraction. + + ConEmu is a Windows console application, but it also supports ANSI escape + sequences. This output class is actually a proxy to both `Win32Output` and + `Vt100_Output`. It uses `Win32Output` for console sizing and scrolling, but + all cursor movements and scrolling happens through the `Vt100_Output`. + + This way, we can have 256 colors in ConEmu and Cmder. Rendering will be + even a little faster as well. + + http://conemu.github.io/ + http://gooseberrycreative.com/cmder/ + """ + + def __init__( + self, stdout: TextIO, default_color_depth: ColorDepth | None = None + ) -> None: + self.win32_output = Win32Output(stdout, default_color_depth=default_color_depth) + self.vt100_output = Vt100_Output( + stdout, lambda: Size(0, 0), default_color_depth=default_color_depth + ) + + @property + def responds_to_cpr(self) -> bool: + return False # We don't need this on Windows. + + def __getattr__(self, name: str) -> Any: + if name in ( + "get_size", + "get_rows_below_cursor_position", + "enable_mouse_support", + "disable_mouse_support", + "scroll_buffer_to_prompt", + "get_win32_screen_buffer_info", + "enable_bracketed_paste", + "disable_bracketed_paste", + ): + return getattr(self.win32_output, name) + else: + return getattr(self.vt100_output, name) + + +Output.register(ConEmuOutput) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/output/defaults.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/defaults.py new file mode 100644 index 0000000000000000000000000000000000000000..6b06ed43c884e017bc01d53d8a4bd88617d2a741 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/defaults.py @@ -0,0 +1,106 @@ +from __future__ import annotations + +import sys +from typing import TYPE_CHECKING, TextIO, cast + +from prompt_toolkit.utils import ( + get_bell_environment_variable, + get_term_environment_variable, + is_conemu_ansi, +) + +from .base import DummyOutput, Output +from .color_depth import ColorDepth +from .plain_text import PlainTextOutput + +if TYPE_CHECKING: + from prompt_toolkit.patch_stdout import StdoutProxy + + +__all__ = [ + "create_output", +] + + +def create_output( + stdout: TextIO | StdoutProxy | None = None, always_prefer_tty: bool = False +) -> Output: + """ + Return an :class:`~prompt_toolkit.output.Output` instance for the command + line. + + :param stdout: The stdout object + :param always_prefer_tty: When set, look for `sys.stderr` if `sys.stdout` + is not a TTY. Useful if `sys.stdout` is redirected to a file, but we + still want user input and output on the terminal. + + By default, this is `False`. If `sys.stdout` is not a terminal (maybe + it's redirected to a file), then a `PlainTextOutput` will be returned. + That way, tools like `print_formatted_text` will write plain text into + that file. + """ + # Consider TERM, PROMPT_TOOLKIT_BELL, and PROMPT_TOOLKIT_COLOR_DEPTH + # environment variables. Notice that PROMPT_TOOLKIT_COLOR_DEPTH value is + # the default that's used if the Application doesn't override it. + term_from_env = get_term_environment_variable() + bell_from_env = get_bell_environment_variable() + color_depth_from_env = ColorDepth.from_env() + + if stdout is None: + # By default, render to stdout. If the output is piped somewhere else, + # render to stderr. + stdout = sys.stdout + + if always_prefer_tty: + for io in [sys.stdout, sys.stderr]: + if io is not None and io.isatty(): + # (This is `None` when using `pythonw.exe` on Windows.) + stdout = io + break + + # If the patch_stdout context manager has been used, then sys.stdout is + # replaced by this proxy. For prompt_toolkit applications, we want to use + # the real stdout. + from prompt_toolkit.patch_stdout import StdoutProxy + + while isinstance(stdout, StdoutProxy): + stdout = stdout.original_stdout + + # If the output is still `None`, use a DummyOutput. + # This happens for instance on Windows, when running the application under + # `pythonw.exe`. In that case, there won't be a terminal Window, and + # stdin/stdout/stderr are `None`. + if stdout is None: + return DummyOutput() + + if sys.platform == "win32": + from .conemu import ConEmuOutput + from .win32 import Win32Output + from .windows10 import Windows10_Output, is_win_vt100_enabled + + if is_win_vt100_enabled(): + return cast( + Output, + Windows10_Output(stdout, default_color_depth=color_depth_from_env), + ) + if is_conemu_ansi(): + return cast( + Output, ConEmuOutput(stdout, default_color_depth=color_depth_from_env) + ) + else: + return Win32Output(stdout, default_color_depth=color_depth_from_env) + else: + from .vt100 import Vt100_Output + + # Stdout is not a TTY? Render as plain text. + # This is mostly useful if stdout is redirected to a file, and + # `print_formatted_text` is used. + if not stdout.isatty(): + return PlainTextOutput(stdout) + + return Vt100_Output.from_pty( + stdout, + term=term_from_env, + default_color_depth=color_depth_from_env, + enable_bell=bell_from_env, + ) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/output/flush_stdout.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/flush_stdout.py new file mode 100644 index 0000000000000000000000000000000000000000..daf58efee63cc6cbe10e9b522a2dc50394acd83f --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/flush_stdout.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import errno +import os +import sys +from contextlib import contextmanager +from typing import IO, Iterator, TextIO + +__all__ = ["flush_stdout"] + + +def flush_stdout(stdout: TextIO, data: str) -> None: + # If the IO object has an `encoding` and `buffer` attribute, it means that + # we can access the underlying BinaryIO object and write into it in binary + # mode. This is preferred if possible. + # NOTE: When used in a Jupyter notebook, don't write binary. + # `ipykernel.iostream.OutStream` has an `encoding` attribute, but not + # a `buffer` attribute, so we can't write binary in it. + has_binary_io = hasattr(stdout, "encoding") and hasattr(stdout, "buffer") + + try: + # Ensure that `stdout` is made blocking when writing into it. + # Otherwise, when uvloop is activated (which makes stdout + # non-blocking), and we write big amounts of text, then we get a + # `BlockingIOError` here. + with _blocking_io(stdout): + # (We try to encode ourself, because that way we can replace + # characters that don't exist in the character set, avoiding + # UnicodeEncodeError crashes. E.g. u'\xb7' does not appear in 'ascii'.) + # My Arch Linux installation of july 2015 reported 'ANSI_X3.4-1968' + # for sys.stdout.encoding in xterm. + if has_binary_io: + stdout.buffer.write(data.encode(stdout.encoding or "utf-8", "replace")) + else: + stdout.write(data) + + stdout.flush() + except OSError as e: + if e.args and e.args[0] == errno.EINTR: + # Interrupted system call. Can happen in case of a window + # resize signal. (Just ignore. The resize handler will render + # again anyway.) + pass + elif e.args and e.args[0] == 0: + # This can happen when there is a lot of output and the user + # sends a KeyboardInterrupt by pressing Control-C. E.g. in + # a Python REPL when we execute "while True: print('test')". + # (The `ptpython` REPL uses this `Output` class instead of + # `stdout` directly -- in order to be network transparent.) + # So, just ignore. + pass + else: + raise + + +@contextmanager +def _blocking_io(io: IO[str]) -> Iterator[None]: + """ + Ensure that the FD for `io` is set to blocking in here. + """ + if sys.platform == "win32": + # On Windows, the `os` module doesn't have a `get/set_blocking` + # function. + yield + return + + try: + fd = io.fileno() + blocking = os.get_blocking(fd) + except: # noqa + # Failed somewhere. + # `get_blocking` can raise `OSError`. + # The io object can raise `AttributeError` when no `fileno()` method is + # present if we're not a real file object. + blocking = True # Assume we're good, and don't do anything. + + try: + # Make blocking if we weren't blocking yet. + if not blocking: + os.set_blocking(fd, True) + + yield + + finally: + # Restore original blocking mode. + if not blocking: + os.set_blocking(fd, blocking) diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/output/plain_text.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/plain_text.py new file mode 100644 index 0000000000000000000000000000000000000000..4b24ad960e4f2359e0363a216ad91a00d2910d24 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/plain_text.py @@ -0,0 +1,143 @@ +from __future__ import annotations + +from typing import TextIO + +from prompt_toolkit.cursor_shapes import CursorShape +from prompt_toolkit.data_structures import Size +from prompt_toolkit.styles import Attrs + +from .base import Output +from .color_depth import ColorDepth +from .flush_stdout import flush_stdout + +__all__ = ["PlainTextOutput"] + + +class PlainTextOutput(Output): + """ + Output that won't include any ANSI escape sequences. + + Useful when stdout is not a terminal. Maybe stdout is redirected to a file. + In this case, if `print_formatted_text` is used, for instance, we don't + want to include formatting. + + (The code is mostly identical to `Vt100_Output`, but without the + formatting.) + """ + + def __init__(self, stdout: TextIO) -> None: + assert all(hasattr(stdout, a) for a in ("write", "flush")) + + self.stdout: TextIO = stdout + self._buffer: list[str] = [] + + def fileno(self) -> int: + "There is no sensible default for fileno()." + return self.stdout.fileno() + + def encoding(self) -> str: + return "utf-8" + + def write(self, data: str) -> None: + self._buffer.append(data) + + def write_raw(self, data: str) -> None: + self._buffer.append(data) + + def set_title(self, title: str) -> None: + pass + + def clear_title(self) -> None: + pass + + def flush(self) -> None: + if not self._buffer: + return + + data = "".join(self._buffer) + self._buffer = [] + flush_stdout(self.stdout, data) + + def erase_screen(self) -> None: + pass + + def enter_alternate_screen(self) -> None: + pass + + def quit_alternate_screen(self) -> None: + pass + + def enable_mouse_support(self) -> None: + pass + + def disable_mouse_support(self) -> None: + pass + + def erase_end_of_line(self) -> None: + pass + + def erase_down(self) -> None: + pass + + def reset_attributes(self) -> None: + pass + + def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None: + pass + + def disable_autowrap(self) -> None: + pass + + def enable_autowrap(self) -> None: + pass + + def cursor_goto(self, row: int = 0, column: int = 0) -> None: + pass + + def cursor_up(self, amount: int) -> None: + pass + + def cursor_down(self, amount: int) -> None: + self._buffer.append("\n") + + def cursor_forward(self, amount: int) -> None: + self._buffer.append(" " * amount) + + def cursor_backward(self, amount: int) -> None: + pass + + def hide_cursor(self) -> None: + pass + + def show_cursor(self) -> None: + pass + + def set_cursor_shape(self, cursor_shape: CursorShape) -> None: + pass + + def reset_cursor_shape(self) -> None: + pass + + def ask_for_cpr(self) -> None: + pass + + def bell(self) -> None: + pass + + def enable_bracketed_paste(self) -> None: + pass + + def disable_bracketed_paste(self) -> None: + pass + + def scroll_buffer_to_prompt(self) -> None: + pass + + def get_size(self) -> Size: + return Size(rows=40, columns=80) + + def get_rows_below_cursor_position(self) -> int: + return 8 + + def get_default_color_depth(self) -> ColorDepth: + return ColorDepth.DEPTH_1_BIT diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/output/vt100.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/vt100.py new file mode 100644 index 0000000000000000000000000000000000000000..90df21e558ce63e2da3f05c4e9e8c418eae323da --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/vt100.py @@ -0,0 +1,757 @@ +""" +Output for vt100 terminals. + +A lot of thanks, regarding outputting of colors, goes to the Pygments project: +(We don't rely on Pygments anymore, because many things are very custom, and +everything has been highly optimized.) +http://pygments.org/ +""" + +from __future__ import annotations + +import io +import os +import sys +from typing import Callable, Dict, Hashable, Iterable, Sequence, TextIO, Tuple + +from prompt_toolkit.cursor_shapes import CursorShape +from prompt_toolkit.data_structures import Size +from prompt_toolkit.output import Output +from prompt_toolkit.styles import ANSI_COLOR_NAMES, Attrs +from prompt_toolkit.utils import is_dumb_terminal + +from .color_depth import ColorDepth +from .flush_stdout import flush_stdout + +__all__ = [ + "Vt100_Output", +] + + +FG_ANSI_COLORS = { + "ansidefault": 39, + # Low intensity. + "ansiblack": 30, + "ansired": 31, + "ansigreen": 32, + "ansiyellow": 33, + "ansiblue": 34, + "ansimagenta": 35, + "ansicyan": 36, + "ansigray": 37, + # High intensity. + "ansibrightblack": 90, + "ansibrightred": 91, + "ansibrightgreen": 92, + "ansibrightyellow": 93, + "ansibrightblue": 94, + "ansibrightmagenta": 95, + "ansibrightcyan": 96, + "ansiwhite": 97, +} + +BG_ANSI_COLORS = { + "ansidefault": 49, + # Low intensity. + "ansiblack": 40, + "ansired": 41, + "ansigreen": 42, + "ansiyellow": 43, + "ansiblue": 44, + "ansimagenta": 45, + "ansicyan": 46, + "ansigray": 47, + # High intensity. + "ansibrightblack": 100, + "ansibrightred": 101, + "ansibrightgreen": 102, + "ansibrightyellow": 103, + "ansibrightblue": 104, + "ansibrightmagenta": 105, + "ansibrightcyan": 106, + "ansiwhite": 107, +} + + +ANSI_COLORS_TO_RGB = { + "ansidefault": ( + 0x00, + 0x00, + 0x00, + ), # Don't use, 'default' doesn't really have a value. + "ansiblack": (0x00, 0x00, 0x00), + "ansigray": (0xE5, 0xE5, 0xE5), + "ansibrightblack": (0x7F, 0x7F, 0x7F), + "ansiwhite": (0xFF, 0xFF, 0xFF), + # Low intensity. + "ansired": (0xCD, 0x00, 0x00), + "ansigreen": (0x00, 0xCD, 0x00), + "ansiyellow": (0xCD, 0xCD, 0x00), + "ansiblue": (0x00, 0x00, 0xCD), + "ansimagenta": (0xCD, 0x00, 0xCD), + "ansicyan": (0x00, 0xCD, 0xCD), + # High intensity. + "ansibrightred": (0xFF, 0x00, 0x00), + "ansibrightgreen": (0x00, 0xFF, 0x00), + "ansibrightyellow": (0xFF, 0xFF, 0x00), + "ansibrightblue": (0x00, 0x00, 0xFF), + "ansibrightmagenta": (0xFF, 0x00, 0xFF), + "ansibrightcyan": (0x00, 0xFF, 0xFF), +} + + +assert set(FG_ANSI_COLORS) == set(ANSI_COLOR_NAMES) +assert set(BG_ANSI_COLORS) == set(ANSI_COLOR_NAMES) +assert set(ANSI_COLORS_TO_RGB) == set(ANSI_COLOR_NAMES) + + +def _get_closest_ansi_color(r: int, g: int, b: int, exclude: Sequence[str] = ()) -> str: + """ + Find closest ANSI color. Return it by name. + + :param r: Red (Between 0 and 255.) + :param g: Green (Between 0 and 255.) + :param b: Blue (Between 0 and 255.) + :param exclude: A tuple of color names to exclude. (E.g. ``('ansired', )``.) + """ + exclude = list(exclude) + + # When we have a bit of saturation, avoid the gray-like colors, otherwise, + # too often the distance to the gray color is less. + saturation = abs(r - g) + abs(g - b) + abs(b - r) # Between 0..510 + + if saturation > 30: + exclude.extend(["ansilightgray", "ansidarkgray", "ansiwhite", "ansiblack"]) + + # Take the closest color. + # (Thanks to Pygments for this part.) + distance = 257 * 257 * 3 # "infinity" (>distance from #000000 to #ffffff) + match = "ansidefault" + + for name, (r2, g2, b2) in ANSI_COLORS_TO_RGB.items(): + if name != "ansidefault" and name not in exclude: + d = (r - r2) ** 2 + (g - g2) ** 2 + (b - b2) ** 2 + + if d < distance: + match = name + distance = d + + return match + + +_ColorCodeAndName = Tuple[int, str] + + +class _16ColorCache: + """ + Cache which maps (r, g, b) tuples to 16 ansi colors. + + :param bg: Cache for background colors, instead of foreground. + """ + + def __init__(self, bg: bool = False) -> None: + self.bg = bg + self._cache: dict[Hashable, _ColorCodeAndName] = {} + + def get_code( + self, value: tuple[int, int, int], exclude: Sequence[str] = () + ) -> _ColorCodeAndName: + """ + Return a (ansi_code, ansi_name) tuple. (E.g. ``(44, 'ansiblue')``.) for + a given (r,g,b) value. + """ + key: Hashable = (value, tuple(exclude)) + cache = self._cache + + if key not in cache: + cache[key] = self._get(value, exclude) + + return cache[key] + + def _get( + self, value: tuple[int, int, int], exclude: Sequence[str] = () + ) -> _ColorCodeAndName: + r, g, b = value + match = _get_closest_ansi_color(r, g, b, exclude=exclude) + + # Turn color name into code. + if self.bg: + code = BG_ANSI_COLORS[match] + else: + code = FG_ANSI_COLORS[match] + + return code, match + + +class _256ColorCache(Dict[Tuple[int, int, int], int]): + """ + Cache which maps (r, g, b) tuples to 256 colors. + """ + + def __init__(self) -> None: + # Build color table. + colors: list[tuple[int, int, int]] = [] + + # colors 0..15: 16 basic colors + colors.append((0x00, 0x00, 0x00)) # 0 + colors.append((0xCD, 0x00, 0x00)) # 1 + colors.append((0x00, 0xCD, 0x00)) # 2 + colors.append((0xCD, 0xCD, 0x00)) # 3 + colors.append((0x00, 0x00, 0xEE)) # 4 + colors.append((0xCD, 0x00, 0xCD)) # 5 + colors.append((0x00, 0xCD, 0xCD)) # 6 + colors.append((0xE5, 0xE5, 0xE5)) # 7 + colors.append((0x7F, 0x7F, 0x7F)) # 8 + colors.append((0xFF, 0x00, 0x00)) # 9 + colors.append((0x00, 0xFF, 0x00)) # 10 + colors.append((0xFF, 0xFF, 0x00)) # 11 + colors.append((0x5C, 0x5C, 0xFF)) # 12 + colors.append((0xFF, 0x00, 0xFF)) # 13 + colors.append((0x00, 0xFF, 0xFF)) # 14 + colors.append((0xFF, 0xFF, 0xFF)) # 15 + + # colors 16..232: the 6x6x6 color cube + valuerange = (0x00, 0x5F, 0x87, 0xAF, 0xD7, 0xFF) + + for i in range(217): + r = valuerange[(i // 36) % 6] + g = valuerange[(i // 6) % 6] + b = valuerange[i % 6] + colors.append((r, g, b)) + + # colors 233..253: grayscale + for i in range(1, 22): + v = 8 + i * 10 + colors.append((v, v, v)) + + self.colors = colors + + def __missing__(self, value: tuple[int, int, int]) -> int: + r, g, b = value + + # Find closest color. + # (Thanks to Pygments for this!) + distance = 257 * 257 * 3 # "infinity" (>distance from #000000 to #ffffff) + match = 0 + + for i, (r2, g2, b2) in enumerate(self.colors): + if i >= 16: # XXX: We ignore the 16 ANSI colors when mapping RGB + # to the 256 colors, because these highly depend on + # the color scheme of the terminal. + d = (r - r2) ** 2 + (g - g2) ** 2 + (b - b2) ** 2 + + if d < distance: + match = i + distance = d + + # Turn color name into code. + self[value] = match + return match + + +_16_fg_colors = _16ColorCache(bg=False) +_16_bg_colors = _16ColorCache(bg=True) +_256_colors = _256ColorCache() + + +class _EscapeCodeCache(Dict[Attrs, str]): + """ + Cache for VT100 escape codes. It maps + (fgcolor, bgcolor, bold, underline, strike, reverse) tuples to VT100 + escape sequences. + + :param true_color: When True, use 24bit colors instead of 256 colors. + """ + + def __init__(self, color_depth: ColorDepth) -> None: + self.color_depth = color_depth + + def __missing__(self, attrs: Attrs) -> str: + ( + fgcolor, + bgcolor, + bold, + underline, + strike, + italic, + blink, + reverse, + hidden, + ) = attrs + parts: list[str] = [] + + parts.extend(self._colors_to_code(fgcolor or "", bgcolor or "")) + + if bold: + parts.append("1") + if italic: + parts.append("3") + if blink: + parts.append("5") + if underline: + parts.append("4") + if reverse: + parts.append("7") + if hidden: + parts.append("8") + if strike: + parts.append("9") + + if parts: + result = "\x1b[0;" + ";".join(parts) + "m" + else: + result = "\x1b[0m" + + self[attrs] = result + return result + + def _color_name_to_rgb(self, color: str) -> tuple[int, int, int]: + "Turn 'ffffff', into (0xff, 0xff, 0xff)." + try: + rgb = int(color, 16) + except ValueError: + raise + else: + r = (rgb >> 16) & 0xFF + g = (rgb >> 8) & 0xFF + b = rgb & 0xFF + return r, g, b + + def _colors_to_code(self, fg_color: str, bg_color: str) -> Iterable[str]: + """ + Return a tuple with the vt100 values that represent this color. + """ + # When requesting ANSI colors only, and both fg/bg color were converted + # to ANSI, ensure that the foreground and background color are not the + # same. (Unless they were explicitly defined to be the same color.) + fg_ansi = "" + + def get(color: str, bg: bool) -> list[int]: + nonlocal fg_ansi + + table = BG_ANSI_COLORS if bg else FG_ANSI_COLORS + + if not color or self.color_depth == ColorDepth.DEPTH_1_BIT: + return [] + + # 16 ANSI colors. (Given by name.) + elif color in table: + return [table[color]] + + # RGB colors. (Defined as 'ffffff'.) + else: + try: + rgb = self._color_name_to_rgb(color) + except ValueError: + return [] + + # When only 16 colors are supported, use that. + if self.color_depth == ColorDepth.DEPTH_4_BIT: + if bg: # Background. + if fg_color != bg_color: + exclude = [fg_ansi] + else: + exclude = [] + code, name = _16_bg_colors.get_code(rgb, exclude=exclude) + return [code] + else: # Foreground. + code, name = _16_fg_colors.get_code(rgb) + fg_ansi = name + return [code] + + # True colors. (Only when this feature is enabled.) + elif self.color_depth == ColorDepth.DEPTH_24_BIT: + r, g, b = rgb + return [(48 if bg else 38), 2, r, g, b] + + # 256 RGB colors. + else: + return [(48 if bg else 38), 5, _256_colors[rgb]] + + result: list[int] = [] + result.extend(get(fg_color, False)) + result.extend(get(bg_color, True)) + + return map(str, result) + + +def _get_size(fileno: int) -> tuple[int, int]: + """ + Get the size of this pseudo terminal. + + :param fileno: stdout.fileno() + :returns: A (rows, cols) tuple. + """ + size = os.get_terminal_size(fileno) + return size.lines, size.columns + + +class Vt100_Output(Output): + """ + :param get_size: A callable which returns the `Size` of the output terminal. + :param stdout: Any object with has a `write` and `flush` method + an 'encoding' property. + :param term: The terminal environment variable. (xterm, xterm-256color, linux, ...) + :param enable_cpr: When `True` (the default), send "cursor position + request" escape sequences to the output in order to detect the cursor + position. That way, we can properly determine how much space there is + available for the UI (especially for drop down menus) to render. The + `Renderer` will still try to figure out whether the current terminal + does respond to CPR escapes. When `False`, never attempt to send CPR + requests. + """ + + # For the error messages. Only display "Output is not a terminal" once per + # file descriptor. + _fds_not_a_terminal: set[int] = set() + + def __init__( + self, + stdout: TextIO, + get_size: Callable[[], Size], + term: str | None = None, + default_color_depth: ColorDepth | None = None, + enable_bell: bool = True, + enable_cpr: bool = True, + ) -> None: + assert all(hasattr(stdout, a) for a in ("write", "flush")) + + self._buffer: list[str] = [] + self.stdout: TextIO = stdout + self.default_color_depth = default_color_depth + self._get_size = get_size + self.term = term + self.enable_bell = enable_bell + self.enable_cpr = enable_cpr + + # Cache for escape codes. + self._escape_code_caches: dict[ColorDepth, _EscapeCodeCache] = { + ColorDepth.DEPTH_1_BIT: _EscapeCodeCache(ColorDepth.DEPTH_1_BIT), + ColorDepth.DEPTH_4_BIT: _EscapeCodeCache(ColorDepth.DEPTH_4_BIT), + ColorDepth.DEPTH_8_BIT: _EscapeCodeCache(ColorDepth.DEPTH_8_BIT), + ColorDepth.DEPTH_24_BIT: _EscapeCodeCache(ColorDepth.DEPTH_24_BIT), + } + + # Keep track of whether the cursor shape was ever changed. + # (We don't restore the cursor shape if it was never changed - by + # default, we don't change them.) + self._cursor_shape_changed = False + + # Don't hide/show the cursor when this was already done. + # (`None` means that we don't know whether the cursor is visible or + # not.) + self._cursor_visible: bool | None = None + + @classmethod + def from_pty( + cls, + stdout: TextIO, + term: str | None = None, + default_color_depth: ColorDepth | None = None, + enable_bell: bool = True, + ) -> Vt100_Output: + """ + Create an Output class from a pseudo terminal. + (This will take the dimensions by reading the pseudo + terminal attributes.) + """ + fd: int | None + # Normally, this requires a real TTY device, but people instantiate + # this class often during unit tests as well. For convenience, we print + # an error message, use standard dimensions, and go on. + try: + fd = stdout.fileno() + except io.UnsupportedOperation: + fd = None + + if not stdout.isatty() and (fd is None or fd not in cls._fds_not_a_terminal): + msg = "Warning: Output is not a terminal (fd=%r).\n" + sys.stderr.write(msg % fd) + sys.stderr.flush() + if fd is not None: + cls._fds_not_a_terminal.add(fd) + + def get_size() -> Size: + # If terminal (incorrectly) reports its size as 0, pick a + # reasonable default. See + # https://github.com/ipython/ipython/issues/10071 + rows, columns = (None, None) + + # It is possible that `stdout` is no longer a TTY device at this + # point. In that case we get an `OSError` in the ioctl call in + # `get_size`. See: + # https://github.com/prompt-toolkit/python-prompt-toolkit/pull/1021 + try: + rows, columns = _get_size(stdout.fileno()) + except OSError: + pass + return Size(rows=rows or 24, columns=columns or 80) + + return cls( + stdout, + get_size, + term=term, + default_color_depth=default_color_depth, + enable_bell=enable_bell, + ) + + def get_size(self) -> Size: + return self._get_size() + + def fileno(self) -> int: + "Return file descriptor." + return self.stdout.fileno() + + def encoding(self) -> str: + "Return encoding used for stdout." + return self.stdout.encoding + + def write_raw(self, data: str) -> None: + """ + Write raw data to output. + """ + self._buffer.append(data) + + def write(self, data: str) -> None: + """ + Write text to output. + (Removes vt100 escape codes. -- used for safely writing text.) + """ + self._buffer.append(data.replace("\x1b", "?")) + + def set_title(self, title: str) -> None: + """ + Set terminal title. + """ + if self.term not in ( + "linux", + "eterm-color", + ): # Not supported by the Linux console. + self.write_raw( + "\x1b]2;{}\x07".format(title.replace("\x1b", "").replace("\x07", "")) + ) + + def clear_title(self) -> None: + self.set_title("") + + def erase_screen(self) -> None: + """ + Erases the screen with the background color and moves the cursor to + home. + """ + self.write_raw("\x1b[2J") + + def enter_alternate_screen(self) -> None: + self.write_raw("\x1b[?1049h\x1b[H") + + def quit_alternate_screen(self) -> None: + self.write_raw("\x1b[?1049l") + + def enable_mouse_support(self) -> None: + self.write_raw("\x1b[?1000h") + + # Enable mouse-drag support. + self.write_raw("\x1b[?1003h") + + # Enable urxvt Mouse mode. (For terminals that understand this.) + self.write_raw("\x1b[?1015h") + + # Also enable Xterm SGR mouse mode. (For terminals that understand this.) + self.write_raw("\x1b[?1006h") + + # Note: E.g. lxterminal understands 1000h, but not the urxvt or sgr + # extensions. + + def disable_mouse_support(self) -> None: + self.write_raw("\x1b[?1000l") + self.write_raw("\x1b[?1015l") + self.write_raw("\x1b[?1006l") + self.write_raw("\x1b[?1003l") + + def erase_end_of_line(self) -> None: + """ + Erases from the current cursor position to the end of the current line. + """ + self.write_raw("\x1b[K") + + def erase_down(self) -> None: + """ + Erases the screen from the current line down to the bottom of the + screen. + """ + self.write_raw("\x1b[J") + + def reset_attributes(self) -> None: + self.write_raw("\x1b[0m") + + def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None: + """ + Create new style and output. + + :param attrs: `Attrs` instance. + """ + # Get current depth. + escape_code_cache = self._escape_code_caches[color_depth] + + # Write escape character. + self.write_raw(escape_code_cache[attrs]) + + def disable_autowrap(self) -> None: + self.write_raw("\x1b[?7l") + + def enable_autowrap(self) -> None: + self.write_raw("\x1b[?7h") + + def enable_bracketed_paste(self) -> None: + self.write_raw("\x1b[?2004h") + + def disable_bracketed_paste(self) -> None: + self.write_raw("\x1b[?2004l") + + def reset_cursor_key_mode(self) -> None: + """ + For vt100 only. + Put the terminal in cursor mode (instead of application mode). + """ + # Put the terminal in cursor mode. (Instead of application mode.) + self.write_raw("\x1b[?1l") + + def cursor_goto(self, row: int = 0, column: int = 0) -> None: + """ + Move cursor position. + """ + self.write_raw("\x1b[%i;%iH" % (row, column)) + + def cursor_up(self, amount: int) -> None: + if amount == 0: + pass + elif amount == 1: + self.write_raw("\x1b[A") + else: + self.write_raw("\x1b[%iA" % amount) + + def cursor_down(self, amount: int) -> None: + if amount == 0: + pass + elif amount == 1: + # Note: Not the same as '\n', '\n' can cause the window content to + # scroll. + self.write_raw("\x1b[B") + else: + self.write_raw("\x1b[%iB" % amount) + + def cursor_forward(self, amount: int) -> None: + if amount == 0: + pass + elif amount == 1: + self.write_raw("\x1b[C") + else: + self.write_raw("\x1b[%iC" % amount) + + def cursor_backward(self, amount: int) -> None: + if amount == 0: + pass + elif amount == 1: + self.write_raw("\b") # '\x1b[D' + else: + self.write_raw("\x1b[%iD" % amount) + + def hide_cursor(self) -> None: + if self._cursor_visible in (True, None): + self._cursor_visible = False + self.write_raw("\x1b[?25l") + + def show_cursor(self) -> None: + if self._cursor_visible in (False, None): + self._cursor_visible = True + self.write_raw("\x1b[?12l\x1b[?25h") # Stop blinking cursor and show. + + def set_cursor_shape(self, cursor_shape: CursorShape) -> None: + if cursor_shape == CursorShape._NEVER_CHANGE: + return + + self._cursor_shape_changed = True + self.write_raw( + { + CursorShape.BLOCK: "\x1b[2 q", + CursorShape.BEAM: "\x1b[6 q", + CursorShape.UNDERLINE: "\x1b[4 q", + CursorShape.BLINKING_BLOCK: "\x1b[1 q", + CursorShape.BLINKING_BEAM: "\x1b[5 q", + CursorShape.BLINKING_UNDERLINE: "\x1b[3 q", + }.get(cursor_shape, "") + ) + + def reset_cursor_shape(self) -> None: + "Reset cursor shape." + # (Only reset cursor shape, if we ever changed it.) + if self._cursor_shape_changed: + self._cursor_shape_changed = False + + # Reset cursor shape. + self.write_raw("\x1b[0 q") + + def flush(self) -> None: + """ + Write to output stream and flush. + """ + if not self._buffer: + return + + data = "".join(self._buffer) + self._buffer = [] + + flush_stdout(self.stdout, data) + + def ask_for_cpr(self) -> None: + """ + Asks for a cursor position report (CPR). + """ + self.write_raw("\x1b[6n") + self.flush() + + @property + def responds_to_cpr(self) -> bool: + if not self.enable_cpr: + return False + + # When the input is a tty, we assume that CPR is supported. + # It's not when the input is piped from Pexpect. + if os.environ.get("PROMPT_TOOLKIT_NO_CPR", "") == "1": + return False + + if is_dumb_terminal(self.term): + return False + try: + return self.stdout.isatty() + except ValueError: + return False # ValueError: I/O operation on closed file + + def bell(self) -> None: + "Sound bell." + if self.enable_bell: + self.write_raw("\a") + self.flush() + + def get_default_color_depth(self) -> ColorDepth: + """ + Return the default color depth for a vt100 terminal, according to the + our term value. + + We prefer 256 colors almost always, because this is what most terminals + support these days, and is a good default. + """ + if self.default_color_depth is not None: + return self.default_color_depth + + term = self.term + + if term is None: + return ColorDepth.DEFAULT + + if is_dumb_terminal(term): + return ColorDepth.DEPTH_1_BIT + + if term in ("linux", "eterm-color"): + return ColorDepth.DEPTH_4_BIT + + return ColorDepth.DEFAULT diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/output/win32.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/win32.py new file mode 100644 index 0000000000000000000000000000000000000000..83ccea43fcfd36ca2000ff6dfe12c428907ed7bd --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/win32.py @@ -0,0 +1,683 @@ +from __future__ import annotations + +import sys + +assert sys.platform == "win32" + +import os +from ctypes import ArgumentError, byref, c_char, c_long, c_uint, c_ulong, pointer +from ctypes.wintypes import DWORD, HANDLE +from typing import Callable, TextIO, TypeVar + +from prompt_toolkit.cursor_shapes import CursorShape +from prompt_toolkit.data_structures import Size +from prompt_toolkit.styles import ANSI_COLOR_NAMES, Attrs +from prompt_toolkit.utils import get_cwidth +from prompt_toolkit.win32_types import ( + CONSOLE_SCREEN_BUFFER_INFO, + COORD, + SMALL_RECT, + STD_INPUT_HANDLE, + STD_OUTPUT_HANDLE, +) + +from ..utils import SPHINX_AUTODOC_RUNNING +from .base import Output +from .color_depth import ColorDepth + +# Do not import win32-specific stuff when generating documentation. +# Otherwise RTD would be unable to generate docs for this module. +if not SPHINX_AUTODOC_RUNNING: + from ctypes import windll + + +__all__ = [ + "Win32Output", +] + + +def _coord_byval(coord: COORD) -> c_long: + """ + Turns a COORD object into a c_long. + This will cause it to be passed by value instead of by reference. (That is what I think at least.) + + When running ``ptipython`` is run (only with IPython), we often got the following error:: + + Error in 'SetConsoleCursorPosition'. + ArgumentError("argument 2: : wrong type",) + argument 2: : wrong type + + It was solved by turning ``COORD`` parameters into a ``c_long`` like this. + + More info: http://msdn.microsoft.com/en-us/library/windows/desktop/ms686025(v=vs.85).aspx + """ + return c_long(coord.Y * 0x10000 | coord.X & 0xFFFF) + + +#: If True: write the output of the renderer also to the following file. This +#: is very useful for debugging. (e.g.: to see that we don't write more bytes +#: than required.) +_DEBUG_RENDER_OUTPUT = False +_DEBUG_RENDER_OUTPUT_FILENAME = r"prompt-toolkit-windows-output.log" + + +class NoConsoleScreenBufferError(Exception): + """ + Raised when the application is not running inside a Windows Console, but + the user tries to instantiate Win32Output. + """ + + def __init__(self) -> None: + # Are we running in 'xterm' on Windows, like git-bash for instance? + xterm = "xterm" in os.environ.get("TERM", "") + + if xterm: + message = ( + "Found {}, while expecting a Windows console. " + 'Maybe try to run this program using "winpty" ' + "or run it in cmd.exe instead. Or otherwise, " + "in case of Cygwin, use the Python executable " + "that is compiled for Cygwin.".format(os.environ["TERM"]) + ) + else: + message = "No Windows console found. Are you running cmd.exe?" + super().__init__(message) + + +_T = TypeVar("_T") + + +class Win32Output(Output): + """ + I/O abstraction for rendering to Windows consoles. + (cmd.exe and similar.) + """ + + def __init__( + self, + stdout: TextIO, + use_complete_width: bool = False, + default_color_depth: ColorDepth | None = None, + ) -> None: + self.use_complete_width = use_complete_width + self.default_color_depth = default_color_depth + + self._buffer: list[str] = [] + self.stdout: TextIO = stdout + self.hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)) + + self._in_alternate_screen = False + self._hidden = False + + self.color_lookup_table = ColorLookupTable() + + # Remember the default console colors. + info = self.get_win32_screen_buffer_info() + self.default_attrs = info.wAttributes if info else 15 + + if _DEBUG_RENDER_OUTPUT: + self.LOG = open(_DEBUG_RENDER_OUTPUT_FILENAME, "ab") + + def fileno(self) -> int: + "Return file descriptor." + return self.stdout.fileno() + + def encoding(self) -> str: + "Return encoding used for stdout." + return self.stdout.encoding + + def write(self, data: str) -> None: + if self._hidden: + data = " " * get_cwidth(data) + + self._buffer.append(data) + + def write_raw(self, data: str) -> None: + "For win32, there is no difference between write and write_raw." + self.write(data) + + def get_size(self) -> Size: + info = self.get_win32_screen_buffer_info() + + # We take the width of the *visible* region as the size. Not the width + # of the complete screen buffer. (Unless use_complete_width has been + # set.) + if self.use_complete_width: + width = info.dwSize.X + else: + width = info.srWindow.Right - info.srWindow.Left + + height = info.srWindow.Bottom - info.srWindow.Top + 1 + + # We avoid the right margin, windows will wrap otherwise. + maxwidth = info.dwSize.X - 1 + width = min(maxwidth, width) + + # Create `Size` object. + return Size(rows=height, columns=width) + + def _winapi(self, func: Callable[..., _T], *a: object, **kw: object) -> _T: + """ + Flush and call win API function. + """ + self.flush() + + if _DEBUG_RENDER_OUTPUT: + self.LOG.write((f"{func.__name__!r}").encode() + b"\n") + self.LOG.write( + b" " + ", ".join([f"{i!r}" for i in a]).encode("utf-8") + b"\n" + ) + self.LOG.write( + b" " + + ", ".join([f"{type(i)!r}" for i in a]).encode("utf-8") + + b"\n" + ) + self.LOG.flush() + + try: + return func(*a, **kw) + except ArgumentError as e: + if _DEBUG_RENDER_OUTPUT: + self.LOG.write((f" Error in {func.__name__!r} {e!r} {e}\n").encode()) + + raise + + def get_win32_screen_buffer_info(self) -> CONSOLE_SCREEN_BUFFER_INFO: + """ + Return Screen buffer info. + """ + # NOTE: We don't call the `GetConsoleScreenBufferInfo` API through + # `self._winapi`. Doing so causes Python to crash on certain 64bit + # Python versions. (Reproduced with 64bit Python 2.7.6, on Windows + # 10). It is not clear why. Possibly, it has to do with passing + # these objects as an argument, or through *args. + + # The Python documentation contains the following - possibly related - warning: + # ctypes does not support passing unions or structures with + # bit-fields to functions by value. While this may work on 32-bit + # x86, it's not guaranteed by the library to work in the general + # case. Unions and structures with bit-fields should always be + # passed to functions by pointer. + + # Also see: + # - https://github.com/ipython/ipython/issues/10070 + # - https://github.com/jonathanslenders/python-prompt-toolkit/issues/406 + # - https://github.com/jonathanslenders/python-prompt-toolkit/issues/86 + + self.flush() + sbinfo = CONSOLE_SCREEN_BUFFER_INFO() + success = windll.kernel32.GetConsoleScreenBufferInfo( + self.hconsole, byref(sbinfo) + ) + + # success = self._winapi(windll.kernel32.GetConsoleScreenBufferInfo, + # self.hconsole, byref(sbinfo)) + + if success: + return sbinfo + else: + raise NoConsoleScreenBufferError + + def set_title(self, title: str) -> None: + """ + Set terminal title. + """ + self._winapi(windll.kernel32.SetConsoleTitleW, title) + + def clear_title(self) -> None: + self._winapi(windll.kernel32.SetConsoleTitleW, "") + + def erase_screen(self) -> None: + start = COORD(0, 0) + sbinfo = self.get_win32_screen_buffer_info() + length = sbinfo.dwSize.X * sbinfo.dwSize.Y + + self.cursor_goto(row=0, column=0) + self._erase(start, length) + + def erase_down(self) -> None: + sbinfo = self.get_win32_screen_buffer_info() + size = sbinfo.dwSize + + start = sbinfo.dwCursorPosition + length = (size.X - size.X) + size.X * (size.Y - sbinfo.dwCursorPosition.Y) + + self._erase(start, length) + + def erase_end_of_line(self) -> None: + """""" + sbinfo = self.get_win32_screen_buffer_info() + start = sbinfo.dwCursorPosition + length = sbinfo.dwSize.X - sbinfo.dwCursorPosition.X + + self._erase(start, length) + + def _erase(self, start: COORD, length: int) -> None: + chars_written = c_ulong() + + self._winapi( + windll.kernel32.FillConsoleOutputCharacterA, + self.hconsole, + c_char(b" "), + DWORD(length), + _coord_byval(start), + byref(chars_written), + ) + + # Reset attributes. + sbinfo = self.get_win32_screen_buffer_info() + self._winapi( + windll.kernel32.FillConsoleOutputAttribute, + self.hconsole, + sbinfo.wAttributes, + length, + _coord_byval(start), + byref(chars_written), + ) + + def reset_attributes(self) -> None: + "Reset the console foreground/background color." + self._winapi( + windll.kernel32.SetConsoleTextAttribute, self.hconsole, self.default_attrs + ) + self._hidden = False + + def set_attributes(self, attrs: Attrs, color_depth: ColorDepth) -> None: + ( + fgcolor, + bgcolor, + bold, + underline, + strike, + italic, + blink, + reverse, + hidden, + ) = attrs + self._hidden = bool(hidden) + + # Start from the default attributes. + win_attrs: int = self.default_attrs + + if color_depth != ColorDepth.DEPTH_1_BIT: + # Override the last four bits: foreground color. + if fgcolor: + win_attrs = win_attrs & ~0xF + win_attrs |= self.color_lookup_table.lookup_fg_color(fgcolor) + + # Override the next four bits: background color. + if bgcolor: + win_attrs = win_attrs & ~0xF0 + win_attrs |= self.color_lookup_table.lookup_bg_color(bgcolor) + + # Reverse: swap these four bits groups. + if reverse: + win_attrs = ( + (win_attrs & ~0xFF) + | ((win_attrs & 0xF) << 4) + | ((win_attrs & 0xF0) >> 4) + ) + + self._winapi(windll.kernel32.SetConsoleTextAttribute, self.hconsole, win_attrs) + + def disable_autowrap(self) -> None: + # Not supported by Windows. + pass + + def enable_autowrap(self) -> None: + # Not supported by Windows. + pass + + def cursor_goto(self, row: int = 0, column: int = 0) -> None: + pos = COORD(X=column, Y=row) + self._winapi( + windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos) + ) + + def cursor_up(self, amount: int) -> None: + sr = self.get_win32_screen_buffer_info().dwCursorPosition + pos = COORD(X=sr.X, Y=sr.Y - amount) + self._winapi( + windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos) + ) + + def cursor_down(self, amount: int) -> None: + self.cursor_up(-amount) + + def cursor_forward(self, amount: int) -> None: + sr = self.get_win32_screen_buffer_info().dwCursorPosition + # assert sr.X + amount >= 0, 'Negative cursor position: x=%r amount=%r' % (sr.X, amount) + + pos = COORD(X=max(0, sr.X + amount), Y=sr.Y) + self._winapi( + windll.kernel32.SetConsoleCursorPosition, self.hconsole, _coord_byval(pos) + ) + + def cursor_backward(self, amount: int) -> None: + self.cursor_forward(-amount) + + def flush(self) -> None: + """ + Write to output stream and flush. + """ + if not self._buffer: + # Only flush stdout buffer. (It could be that Python still has + # something in its buffer. -- We want to be sure to print that in + # the correct color.) + self.stdout.flush() + return + + data = "".join(self._buffer) + + if _DEBUG_RENDER_OUTPUT: + self.LOG.write((f"{data!r}").encode() + b"\n") + self.LOG.flush() + + # Print characters one by one. This appears to be the best solution + # in order to avoid traces of vertical lines when the completion + # menu disappears. + for b in data: + written = DWORD() + + retval = windll.kernel32.WriteConsoleW( + self.hconsole, b, 1, byref(written), None + ) + assert retval != 0 + + self._buffer = [] + + def get_rows_below_cursor_position(self) -> int: + info = self.get_win32_screen_buffer_info() + return info.srWindow.Bottom - info.dwCursorPosition.Y + 1 + + def scroll_buffer_to_prompt(self) -> None: + """ + To be called before drawing the prompt. This should scroll the console + to left, with the cursor at the bottom (if possible). + """ + # Get current window size + info = self.get_win32_screen_buffer_info() + sr = info.srWindow + cursor_pos = info.dwCursorPosition + + result = SMALL_RECT() + + # Scroll to the left. + result.Left = 0 + result.Right = sr.Right - sr.Left + + # Scroll vertical + win_height = sr.Bottom - sr.Top + if 0 < sr.Bottom - cursor_pos.Y < win_height - 1: + # no vertical scroll if cursor already on the screen + result.Bottom = sr.Bottom + else: + result.Bottom = max(win_height, cursor_pos.Y) + result.Top = result.Bottom - win_height + + # Scroll API + self._winapi( + windll.kernel32.SetConsoleWindowInfo, self.hconsole, True, byref(result) + ) + + def enter_alternate_screen(self) -> None: + """ + Go to alternate screen buffer. + """ + if not self._in_alternate_screen: + GENERIC_READ = 0x80000000 + GENERIC_WRITE = 0x40000000 + + # Create a new console buffer and activate that one. + handle = HANDLE( + self._winapi( + windll.kernel32.CreateConsoleScreenBuffer, + GENERIC_READ | GENERIC_WRITE, + DWORD(0), + None, + DWORD(1), + None, + ) + ) + + self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, handle) + self.hconsole = handle + self._in_alternate_screen = True + + def quit_alternate_screen(self) -> None: + """ + Make stdout again the active buffer. + """ + if self._in_alternate_screen: + stdout = HANDLE( + self._winapi(windll.kernel32.GetStdHandle, STD_OUTPUT_HANDLE) + ) + self._winapi(windll.kernel32.SetConsoleActiveScreenBuffer, stdout) + self._winapi(windll.kernel32.CloseHandle, self.hconsole) + self.hconsole = stdout + self._in_alternate_screen = False + + def enable_mouse_support(self) -> None: + ENABLE_MOUSE_INPUT = 0x10 + + # This `ENABLE_QUICK_EDIT_MODE` flag needs to be cleared for mouse + # support to work, but it's possible that it was already cleared + # before. + ENABLE_QUICK_EDIT_MODE = 0x0040 + + handle = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)) + + original_mode = DWORD() + self._winapi(windll.kernel32.GetConsoleMode, handle, pointer(original_mode)) + self._winapi( + windll.kernel32.SetConsoleMode, + handle, + (original_mode.value | ENABLE_MOUSE_INPUT) & ~ENABLE_QUICK_EDIT_MODE, + ) + + def disable_mouse_support(self) -> None: + ENABLE_MOUSE_INPUT = 0x10 + handle = HANDLE(windll.kernel32.GetStdHandle(STD_INPUT_HANDLE)) + + original_mode = DWORD() + self._winapi(windll.kernel32.GetConsoleMode, handle, pointer(original_mode)) + self._winapi( + windll.kernel32.SetConsoleMode, + handle, + original_mode.value & ~ENABLE_MOUSE_INPUT, + ) + + def hide_cursor(self) -> None: + pass + + def show_cursor(self) -> None: + pass + + def set_cursor_shape(self, cursor_shape: CursorShape) -> None: + pass + + def reset_cursor_shape(self) -> None: + pass + + @classmethod + def win32_refresh_window(cls) -> None: + """ + Call win32 API to refresh the whole Window. + + This is sometimes necessary when the application paints background + for completion menus. When the menu disappears, it leaves traces due + to a bug in the Windows Console. Sending a repaint request solves it. + """ + # Get console handle + handle = HANDLE(windll.kernel32.GetConsoleWindow()) + + RDW_INVALIDATE = 0x0001 + windll.user32.RedrawWindow(handle, None, None, c_uint(RDW_INVALIDATE)) + + def get_default_color_depth(self) -> ColorDepth: + """ + Return the default color depth for a windows terminal. + + Contrary to the Vt100 implementation, this doesn't depend on a $TERM + variable. + """ + if self.default_color_depth is not None: + return self.default_color_depth + + return ColorDepth.DEPTH_4_BIT + + +class FOREGROUND_COLOR: + BLACK = 0x0000 + BLUE = 0x0001 + GREEN = 0x0002 + CYAN = 0x0003 + RED = 0x0004 + MAGENTA = 0x0005 + YELLOW = 0x0006 + GRAY = 0x0007 + INTENSITY = 0x0008 # Foreground color is intensified. + + +class BACKGROUND_COLOR: + BLACK = 0x0000 + BLUE = 0x0010 + GREEN = 0x0020 + CYAN = 0x0030 + RED = 0x0040 + MAGENTA = 0x0050 + YELLOW = 0x0060 + GRAY = 0x0070 + INTENSITY = 0x0080 # Background color is intensified. + + +def _create_ansi_color_dict( + color_cls: type[FOREGROUND_COLOR] | type[BACKGROUND_COLOR], +) -> dict[str, int]: + "Create a table that maps the 16 named ansi colors to their Windows code." + return { + "ansidefault": color_cls.BLACK, + "ansiblack": color_cls.BLACK, + "ansigray": color_cls.GRAY, + "ansibrightblack": color_cls.BLACK | color_cls.INTENSITY, + "ansiwhite": color_cls.GRAY | color_cls.INTENSITY, + # Low intensity. + "ansired": color_cls.RED, + "ansigreen": color_cls.GREEN, + "ansiyellow": color_cls.YELLOW, + "ansiblue": color_cls.BLUE, + "ansimagenta": color_cls.MAGENTA, + "ansicyan": color_cls.CYAN, + # High intensity. + "ansibrightred": color_cls.RED | color_cls.INTENSITY, + "ansibrightgreen": color_cls.GREEN | color_cls.INTENSITY, + "ansibrightyellow": color_cls.YELLOW | color_cls.INTENSITY, + "ansibrightblue": color_cls.BLUE | color_cls.INTENSITY, + "ansibrightmagenta": color_cls.MAGENTA | color_cls.INTENSITY, + "ansibrightcyan": color_cls.CYAN | color_cls.INTENSITY, + } + + +FG_ANSI_COLORS = _create_ansi_color_dict(FOREGROUND_COLOR) +BG_ANSI_COLORS = _create_ansi_color_dict(BACKGROUND_COLOR) + +assert set(FG_ANSI_COLORS) == set(ANSI_COLOR_NAMES) +assert set(BG_ANSI_COLORS) == set(ANSI_COLOR_NAMES) + + +class ColorLookupTable: + """ + Inspired by pygments/formatters/terminal256.py + """ + + def __init__(self) -> None: + self._win32_colors = self._build_color_table() + + # Cache (map color string to foreground and background code). + self.best_match: dict[str, tuple[int, int]] = {} + + @staticmethod + def _build_color_table() -> list[tuple[int, int, int, int, int]]: + """ + Build an RGB-to-256 color conversion table + """ + FG = FOREGROUND_COLOR + BG = BACKGROUND_COLOR + + return [ + (0x00, 0x00, 0x00, FG.BLACK, BG.BLACK), + (0x00, 0x00, 0xAA, FG.BLUE, BG.BLUE), + (0x00, 0xAA, 0x00, FG.GREEN, BG.GREEN), + (0x00, 0xAA, 0xAA, FG.CYAN, BG.CYAN), + (0xAA, 0x00, 0x00, FG.RED, BG.RED), + (0xAA, 0x00, 0xAA, FG.MAGENTA, BG.MAGENTA), + (0xAA, 0xAA, 0x00, FG.YELLOW, BG.YELLOW), + (0x88, 0x88, 0x88, FG.GRAY, BG.GRAY), + (0x44, 0x44, 0xFF, FG.BLUE | FG.INTENSITY, BG.BLUE | BG.INTENSITY), + (0x44, 0xFF, 0x44, FG.GREEN | FG.INTENSITY, BG.GREEN | BG.INTENSITY), + (0x44, 0xFF, 0xFF, FG.CYAN | FG.INTENSITY, BG.CYAN | BG.INTENSITY), + (0xFF, 0x44, 0x44, FG.RED | FG.INTENSITY, BG.RED | BG.INTENSITY), + (0xFF, 0x44, 0xFF, FG.MAGENTA | FG.INTENSITY, BG.MAGENTA | BG.INTENSITY), + (0xFF, 0xFF, 0x44, FG.YELLOW | FG.INTENSITY, BG.YELLOW | BG.INTENSITY), + (0x44, 0x44, 0x44, FG.BLACK | FG.INTENSITY, BG.BLACK | BG.INTENSITY), + (0xFF, 0xFF, 0xFF, FG.GRAY | FG.INTENSITY, BG.GRAY | BG.INTENSITY), + ] + + def _closest_color(self, r: int, g: int, b: int) -> tuple[int, int]: + distance = 257 * 257 * 3 # "infinity" (>distance from #000000 to #ffffff) + fg_match = 0 + bg_match = 0 + + for r_, g_, b_, fg_, bg_ in self._win32_colors: + rd = r - r_ + gd = g - g_ + bd = b - b_ + + d = rd * rd + gd * gd + bd * bd + + if d < distance: + fg_match = fg_ + bg_match = bg_ + distance = d + return fg_match, bg_match + + def _color_indexes(self, color: str) -> tuple[int, int]: + indexes = self.best_match.get(color, None) + if indexes is None: + try: + rgb = int(str(color), 16) + except ValueError: + rgb = 0 + + r = (rgb >> 16) & 0xFF + g = (rgb >> 8) & 0xFF + b = rgb & 0xFF + indexes = self._closest_color(r, g, b) + self.best_match[color] = indexes + return indexes + + def lookup_fg_color(self, fg_color: str) -> int: + """ + Return the color for use in the + `windll.kernel32.SetConsoleTextAttribute` API call. + + :param fg_color: Foreground as text. E.g. 'ffffff' or 'red' + """ + # Foreground. + if fg_color in FG_ANSI_COLORS: + return FG_ANSI_COLORS[fg_color] + else: + return self._color_indexes(fg_color)[0] + + def lookup_bg_color(self, bg_color: str) -> int: + """ + Return the color for use in the + `windll.kernel32.SetConsoleTextAttribute` API call. + + :param bg_color: Background as text. E.g. 'ffffff' or 'red' + """ + # Background. + if bg_color in BG_ANSI_COLORS: + return BG_ANSI_COLORS[bg_color] + else: + return self._color_indexes(bg_color)[1] diff --git a/.venv/lib/python3.13/site-packages/prompt_toolkit/output/windows10.py b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/windows10.py new file mode 100644 index 0000000000000000000000000000000000000000..2b7e596e468a63b8a763716a2472560466faf74a --- /dev/null +++ b/.venv/lib/python3.13/site-packages/prompt_toolkit/output/windows10.py @@ -0,0 +1,133 @@ +from __future__ import annotations + +import sys + +assert sys.platform == "win32" + +from ctypes import byref, windll +from ctypes.wintypes import DWORD, HANDLE +from typing import Any, TextIO + +from prompt_toolkit.data_structures import Size +from prompt_toolkit.win32_types import STD_OUTPUT_HANDLE + +from .base import Output +from .color_depth import ColorDepth +from .vt100 import Vt100_Output +from .win32 import Win32Output + +__all__ = [ + "Windows10_Output", +] + +# See: https://msdn.microsoft.com/pl-pl/library/windows/desktop/ms686033(v=vs.85).aspx +ENABLE_PROCESSED_INPUT = 0x0001 +ENABLE_VIRTUAL_TERMINAL_PROCESSING = 0x0004 + + +class Windows10_Output: + """ + Windows 10 output abstraction. This enables and uses vt100 escape sequences. + """ + + def __init__( + self, stdout: TextIO, default_color_depth: ColorDepth | None = None + ) -> None: + self.default_color_depth = default_color_depth + self.win32_output = Win32Output(stdout, default_color_depth=default_color_depth) + self.vt100_output = Vt100_Output( + stdout, lambda: Size(0, 0), default_color_depth=default_color_depth + ) + self._hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)) + + def flush(self) -> None: + """ + Write to output stream and flush. + """ + original_mode = DWORD(0) + + # Remember the previous console mode. + windll.kernel32.GetConsoleMode(self._hconsole, byref(original_mode)) + + # Enable processing of vt100 sequences. + windll.kernel32.SetConsoleMode( + self._hconsole, + DWORD(ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING), + ) + + try: + self.vt100_output.flush() + finally: + # Restore console mode. + windll.kernel32.SetConsoleMode(self._hconsole, original_mode) + + @property + def responds_to_cpr(self) -> bool: + return False # We don't need this on Windows. + + def __getattr__(self, name: str) -> Any: + # NOTE: Now that we use "virtual terminal input" on + # Windows, both input and output are done through + # ANSI escape sequences on Windows. This means, we + # should enable bracketed paste like on Linux, and + # enable mouse support by calling the vt100_output. + if name in ( + "get_size", + "get_rows_below_cursor_position", + "scroll_buffer_to_prompt", + "get_win32_screen_buffer_info", + # "enable_mouse_support", + # "disable_mouse_support", + # "enable_bracketed_paste", + # "disable_bracketed_paste", + ): + return getattr(self.win32_output, name) + else: + return getattr(self.vt100_output, name) + + def get_default_color_depth(self) -> ColorDepth: + """ + Return the default color depth for a windows terminal. + + Contrary to the Vt100 implementation, this doesn't depend on a $TERM + variable. + """ + if self.default_color_depth is not None: + return self.default_color_depth + + # Previously, we used `DEPTH_4_BIT`, even on Windows 10. This was + # because true color support was added after "Console Virtual Terminal + # Sequences" support was added, and there was no good way to detect + # what support was given. + # 24bit color support was added in 2016, so let's assume it's safe to + # take that as a default: + # https://devblogs.microsoft.com/commandline/24-bit-color-in-the-windows-console/ + return ColorDepth.TRUE_COLOR + + +Output.register(Windows10_Output) + + +def is_win_vt100_enabled() -> bool: + """ + Returns True when we're running Windows and VT100 escape sequences are + supported. + """ + if sys.platform != "win32": + return False + + hconsole = HANDLE(windll.kernel32.GetStdHandle(STD_OUTPUT_HANDLE)) + + # Get original console mode. + original_mode = DWORD(0) + windll.kernel32.GetConsoleMode(hconsole, byref(original_mode)) + + try: + # Try to enable VT100 sequences. + result: int = windll.kernel32.SetConsoleMode( + hconsole, DWORD(ENABLE_PROCESSED_INPUT | ENABLE_VIRTUAL_TERMINAL_PROCESSING) + ) + + return result == 1 + finally: + windll.kernel32.SetConsoleMode(hconsole, original_mode) diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..017f602a16a6c997ba553f926bcaa93b5b268a12 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/__version__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/__version__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e736938a1a911cbab4dd03b83f27018c3d5b0e0 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/__version__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/_internal_utils.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/_internal_utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b68b12e0c26bf8bbf5a4914fab35e5bd4edd01e Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/_internal_utils.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/adapters.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/adapters.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a2fbd5c170f89f77413dc6e0df62aa2cbd4215f Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/adapters.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/api.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/api.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c30dd82b3cef5f37dfcec831bed1b592e5b2924c Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/api.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/auth.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/auth.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d66f426b036610d625149e399b223ceb5e2ed84 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/auth.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/certs.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/certs.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d55ff1086842bcc9ab5d489ff68f3e535660efd4 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/certs.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/compat.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/compat.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..efd918e0dbcfa6decf3a99310c21829757733d84 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/compat.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/cookies.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/cookies.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dab8f89fdca6daab6ee15938bb98ff30d8363205 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/cookies.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/exceptions.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/exceptions.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..46a8b65e42602eeb713bf9b142c6b0d45fb2f169 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/exceptions.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/hooks.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/hooks.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4bf9ea65c245b1eb280efa75f2051d69c043fee2 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/hooks.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/models.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/models.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4b1bec13ccc6b32f21176484db22ffbc1889f24 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/models.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/packages.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/packages.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d1b3b5a80e8187d0311aba2fb0dd214a3473e9c1 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/packages.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/sessions.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/sessions.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..64dc15578b5dc2a92a3391bd79ab8321a5bc3543 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/sessions.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/status_codes.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/status_codes.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e19714477c89be85bd9d60e643f16887ef148c22 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/status_codes.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/structures.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/structures.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..05fcebc6cd699458c6cc3c2057799eff1af086f9 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/structures.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/requests/__pycache__/utils.cpython-313.pyc b/.venv/lib/python3.13/site-packages/requests/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..aa430e89e1a97cf1e514ee978174d800e0f2c4b7 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/requests/__pycache__/utils.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1b43389de2a77f1959a7791fd3ee2a656d2e804 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/_dist_ver.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/_dist_ver.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb81ea6d1d1cbf429a94a5469f228752e2273130 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/_dist_ver.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/_monitor.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/_monitor.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9b7050d741b51e14cd602e8c54c862b0f5240fcf Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/_monitor.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0df22d3d3f5fc15b3887410eadebdd5e293fe0d5 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/_tqdm_pandas.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/asyncio.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/asyncio.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9eb5b629b154b724ddf653258283e7bf2dbf3b3a Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/asyncio.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/auto.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/auto.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1d90b8e682af6daf2882bc147e2b4747402a14a5 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/auto.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/autonotebook.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/autonotebook.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cc4de9580c2bd6e164312192dffa9f9037cfa2e2 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/autonotebook.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/cli.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/cli.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a737c3d8c9b2151db2b35637ddce9d81732f1e9 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/cli.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/gui.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/gui.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2970a99b0543c2d62acc8137f2fddd6b06734e4 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/gui.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/std.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/std.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e7cf877567f49b2de10fc68fbde5be1877c5bb89 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/std.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/utils.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/utils.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..952f2fa3581d0cff0f558da54cb362a868c6b7ec Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/utils.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/__pycache__/version.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/version.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a2717eac8d216cbe722b6d950c699425e60f1734 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/__pycache__/version.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/__init__.py b/.venv/lib/python3.13/site-packages/tqdm/contrib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d059461f91fb79115263c16314c3487e16ab98c2 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/tqdm/contrib/__init__.py @@ -0,0 +1,92 @@ +""" +Thin wrappers around common functions. + +Subpackages contain potentially unstable extensions. +""" +from warnings import warn + +from ..auto import tqdm as tqdm_auto +from ..std import TqdmDeprecationWarning, tqdm +from ..utils import ObjectWrapper + +__author__ = {"github.com/": ["casperdcl"]} +__all__ = ['tenumerate', 'tzip', 'tmap'] + + +class DummyTqdmFile(ObjectWrapper): + """Dummy file-like that will write to tqdm""" + + def __init__(self, wrapped): + super().__init__(wrapped) + self._buf = [] + + def write(self, x, nolock=False): + nl = b"\n" if isinstance(x, bytes) else "\n" + pre, sep, post = x.rpartition(nl) + if sep: + blank = type(nl)() + tqdm.write(blank.join(self._buf + [pre, sep]), + end=blank, file=self._wrapped, nolock=nolock) + self._buf = [post] + else: + self._buf.append(x) + + def __del__(self): + if self._buf: + blank = type(self._buf[0])() + try: + tqdm.write(blank.join(self._buf), end=blank, file=self._wrapped) + except (OSError, ValueError): + pass + + +def builtin_iterable(func): + """Returns `func`""" + warn("This function has no effect, and will be removed in tqdm==5.0.0", + TqdmDeprecationWarning, stacklevel=2) + return func + + +def tenumerate(iterable, start=0, total=None, tqdm_class=tqdm_auto, **tqdm_kwargs): + """ + Equivalent of `numpy.ndenumerate` or builtin `enumerate`. + + Parameters + ---------- + tqdm_class : [default: tqdm.auto.tqdm]. + """ + try: + import numpy as np + except ImportError: + pass + else: + if isinstance(iterable, np.ndarray): + return tqdm_class(np.ndenumerate(iterable), total=total or iterable.size, + **tqdm_kwargs) + return enumerate(tqdm_class(iterable, total=total, **tqdm_kwargs), start) + + +def tzip(iter1, *iter2plus, **tqdm_kwargs): + """ + Equivalent of builtin `zip`. + + Parameters + ---------- + tqdm_class : [default: tqdm.auto.tqdm]. + """ + kwargs = tqdm_kwargs.copy() + tqdm_class = kwargs.pop("tqdm_class", tqdm_auto) + for i in zip(tqdm_class(iter1, **kwargs), *iter2plus): + yield i + + +def tmap(function, *sequences, **tqdm_kwargs): + """ + Equivalent of builtin `map`. + + Parameters + ---------- + tqdm_class : [default: tqdm.auto.tqdm]. + """ + for i in tzip(*sequences, **tqdm_kwargs): + yield function(*i) diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/contrib/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..73f143762083f1e4b3175acee004671884c4e966 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/contrib/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/__pycache__/concurrent.cpython-313.pyc b/.venv/lib/python3.13/site-packages/tqdm/contrib/__pycache__/concurrent.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..273be86599469856d66ba249f6c30a878ce0200d Binary files /dev/null and b/.venv/lib/python3.13/site-packages/tqdm/contrib/__pycache__/concurrent.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/bells.py b/.venv/lib/python3.13/site-packages/tqdm/contrib/bells.py new file mode 100644 index 0000000000000000000000000000000000000000..5b8f4b9ecd894f1edfaa08d9fe730b8d7c8b93e0 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/tqdm/contrib/bells.py @@ -0,0 +1,26 @@ +""" +Even more features than `tqdm.auto` (all the bells & whistles): + +- `tqdm.auto` +- `tqdm.tqdm.pandas` +- `tqdm.contrib.telegram` + + uses `${TQDM_TELEGRAM_TOKEN}` and `${TQDM_TELEGRAM_CHAT_ID}` +- `tqdm.contrib.discord` + + uses `${TQDM_DISCORD_TOKEN}` and `${TQDM_DISCORD_CHANNEL_ID}` +""" +__all__ = ['tqdm', 'trange'] +import warnings +from os import getenv + +if getenv("TQDM_SLACK_TOKEN") and getenv("TQDM_SLACK_CHANNEL"): + from .slack import tqdm, trange +elif getenv("TQDM_TELEGRAM_TOKEN") and getenv("TQDM_TELEGRAM_CHAT_ID"): + from .telegram import tqdm, trange +elif getenv("TQDM_DISCORD_TOKEN") and getenv("TQDM_DISCORD_CHANNEL_ID"): + from .discord import tqdm, trange +else: + from ..auto import tqdm, trange + +with warnings.catch_warnings(): + warnings.simplefilter("ignore", category=FutureWarning) + tqdm.pandas() diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/concurrent.py b/.venv/lib/python3.13/site-packages/tqdm/contrib/concurrent.py new file mode 100644 index 0000000000000000000000000000000000000000..cd81d622a1309df179042159a56cef4f8c309224 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/tqdm/contrib/concurrent.py @@ -0,0 +1,105 @@ +""" +Thin wrappers around `concurrent.futures`. +""" +from contextlib import contextmanager +from operator import length_hint +from os import cpu_count + +from ..auto import tqdm as tqdm_auto +from ..std import TqdmWarning + +__author__ = {"github.com/": ["casperdcl"]} +__all__ = ['thread_map', 'process_map'] + + +@contextmanager +def ensure_lock(tqdm_class, lock_name=""): + """get (create if necessary) and then restore `tqdm_class`'s lock""" + old_lock = getattr(tqdm_class, '_lock', None) # don't create a new lock + lock = old_lock or tqdm_class.get_lock() # maybe create a new lock + lock = getattr(lock, lock_name, lock) # maybe subtype + tqdm_class.set_lock(lock) + yield lock + if old_lock is None: + del tqdm_class._lock + else: + tqdm_class.set_lock(old_lock) + + +def _executor_map(PoolExecutor, fn, *iterables, **tqdm_kwargs): + """ + Implementation of `thread_map` and `process_map`. + + Parameters + ---------- + tqdm_class : [default: tqdm.auto.tqdm]. + max_workers : [default: min(32, cpu_count() + 4)]. + chunksize : [default: 1]. + lock_name : [default: "":str]. + """ + kwargs = tqdm_kwargs.copy() + if "total" not in kwargs: + kwargs["total"] = length_hint(iterables[0]) + tqdm_class = kwargs.pop("tqdm_class", tqdm_auto) + max_workers = kwargs.pop("max_workers", min(32, cpu_count() + 4)) + chunksize = kwargs.pop("chunksize", 1) + lock_name = kwargs.pop("lock_name", "") + with ensure_lock(tqdm_class, lock_name=lock_name) as lk: + # share lock in case workers are already using `tqdm` + with PoolExecutor(max_workers=max_workers, initializer=tqdm_class.set_lock, + initargs=(lk,)) as ex: + return list(tqdm_class(ex.map(fn, *iterables, chunksize=chunksize), **kwargs)) + + +def thread_map(fn, *iterables, **tqdm_kwargs): + """ + Equivalent of `list(map(fn, *iterables))` + driven by `concurrent.futures.ThreadPoolExecutor`. + + Parameters + ---------- + tqdm_class : optional + `tqdm` class to use for bars [default: tqdm.auto.tqdm]. + max_workers : int, optional + Maximum number of workers to spawn; passed to + `concurrent.futures.ThreadPoolExecutor.__init__`. + [default: max(32, cpu_count() + 4)]. + """ + from concurrent.futures import ThreadPoolExecutor + return _executor_map(ThreadPoolExecutor, fn, *iterables, **tqdm_kwargs) + + +def process_map(fn, *iterables, **tqdm_kwargs): + """ + Equivalent of `list(map(fn, *iterables))` + driven by `concurrent.futures.ProcessPoolExecutor`. + + Parameters + ---------- + tqdm_class : optional + `tqdm` class to use for bars [default: tqdm.auto.tqdm]. + max_workers : int, optional + Maximum number of workers to spawn; passed to + `concurrent.futures.ProcessPoolExecutor.__init__`. + [default: min(32, cpu_count() + 4)]. + chunksize : int, optional + Size of chunks sent to worker processes; passed to + `concurrent.futures.ProcessPoolExecutor.map`. [default: 1]. + lock_name : str, optional + Member of `tqdm_class.get_lock()` to use [default: mp_lock]. + """ + from concurrent.futures import ProcessPoolExecutor + if iterables and "chunksize" not in tqdm_kwargs: + # default `chunksize=1` has poor performance for large iterables + # (most time spent dispatching items to workers). + longest_iterable_len = max(map(length_hint, iterables)) + if longest_iterable_len > 1000: + from warnings import warn + warn("Iterable length %d > 1000 but `chunksize` is not set." + " This may seriously degrade multiprocess performance." + " Set `chunksize=1` or more." % longest_iterable_len, + TqdmWarning, stacklevel=2) + if "lock_name" not in tqdm_kwargs: + tqdm_kwargs = tqdm_kwargs.copy() + tqdm_kwargs["lock_name"] = "mp_lock" + return _executor_map(ProcessPoolExecutor, fn, *iterables, **tqdm_kwargs) diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/discord.py b/.venv/lib/python3.13/site-packages/tqdm/contrib/discord.py new file mode 100644 index 0000000000000000000000000000000000000000..574baa84bbbeb5afce4a49f23edac894d680ca82 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/tqdm/contrib/discord.py @@ -0,0 +1,156 @@ +""" +Sends updates to a Discord bot. + +Usage: +>>> from tqdm.contrib.discord import tqdm, trange +>>> for i in trange(10, token='{token}', channel_id='{channel_id}'): +... ... + +![screenshot](https://tqdm.github.io/img/screenshot-discord.png) +""" +from os import getenv +from warnings import warn + +from requests import Session +from requests.utils import default_user_agent + +from ..auto import tqdm as tqdm_auto +from ..std import TqdmWarning +from ..version import __version__ +from .utils_worker import MonoWorker + +__author__ = {"github.com/": ["casperdcl", "guigoruiz1"]} +__all__ = ['DiscordIO', 'tqdm_discord', 'tdrange', 'tqdm', 'trange'] + + +class DiscordIO(MonoWorker): + """Non-blocking file-like IO using a Discord Bot.""" + API = "https://discord.com/api/v10" + UA = f"tqdm (https://tqdm.github.io, {__version__}) {default_user_agent()}" + + def __init__(self, token, channel_id): + """Creates a new message in the given `channel_id`.""" + super().__init__() + self.token = token + self.channel_id = channel_id + self.session = Session() + self.text = self.__class__.__name__ + self.message_id + + @property + def message_id(self): + if hasattr(self, '_message_id'): + return self._message_id + try: + res = self.session.post( + f'{self.API}/channels/{self.channel_id}/messages', + headers={'Authorization': f'Bot {self.token}', 'User-Agent': self.UA}, + json={'content': f"`{self.text}`"}).json() + except Exception as e: + tqdm_auto.write(str(e)) + else: + if res.get('error_code') == 429: + warn("Creation rate limit: try increasing `mininterval`.", + TqdmWarning, stacklevel=2) + else: + self._message_id = res['id'] + return self._message_id + + def write(self, s): + """Replaces internal `message_id`'s text with `s`.""" + if not s: + s = "..." + s = s.replace('\r', '').strip() + if s == self.text: + return # avoid duplicate message Bot error + message_id = self.message_id + if message_id is None: + return + self.text = s + try: + future = self.submit( + self.session.patch, + f'{self.API}/channels/{self.channel_id}/messages/{message_id}', + headers={'Authorization': f'Bot {self.token}', 'User-Agent': self.UA}, + json={'content': f"`{self.text}`"}) + except Exception as e: + tqdm_auto.write(str(e)) + else: + return future + + def delete(self): + """Deletes internal `message_id`.""" + try: + future = self.submit( + self.session.delete, + f'{self.API}/channels/{self.channel_id}/messages/{self.message_id}', + headers={'Authorization': f'Bot {self.token}', 'User-Agent': self.UA}) + except Exception as e: + tqdm_auto.write(str(e)) + else: + return future + + +class tqdm_discord(tqdm_auto): + """ + Standard `tqdm.auto.tqdm` but also sends updates to a Discord Bot. + May take a few seconds to create (`__init__`). + + - create a discord bot (not public, no requirement of OAuth2 code + grant, only send message permissions) & invite it to a channel: + + - copy the bot `{token}` & `{channel_id}` and paste below + + >>> from tqdm.contrib.discord import tqdm, trange + >>> for i in tqdm(iterable, token='{token}', channel_id='{channel_id}'): + ... ... + """ + def __init__(self, *args, **kwargs): + """ + Parameters + ---------- + token : str, required. Discord bot token + [default: ${TQDM_DISCORD_TOKEN}]. + channel_id : int, required. Discord channel ID + [default: ${TQDM_DISCORD_CHANNEL_ID}]. + + See `tqdm.auto.tqdm.__init__` for other parameters. + """ + if not kwargs.get('disable'): + kwargs = kwargs.copy() + self.dio = DiscordIO( + kwargs.pop('token', getenv('TQDM_DISCORD_TOKEN')), + kwargs.pop('channel_id', getenv('TQDM_DISCORD_CHANNEL_ID'))) + super().__init__(*args, **kwargs) + + def display(self, **kwargs): + super().display(**kwargs) + fmt = self.format_dict + if fmt.get('bar_format', None): + fmt['bar_format'] = fmt['bar_format'].replace( + '', '{bar:10u}').replace('{bar}', '{bar:10u}') + else: + fmt['bar_format'] = '{l_bar}{bar:10u}{r_bar}' + self.dio.write(self.format_meter(**fmt)) + + def clear(self, *args, **kwargs): + super().clear(*args, **kwargs) + if not self.disable: + self.dio.write("") + + def close(self): + if self.disable: + return + super().close() + if not (self.leave or (self.leave is None and self.pos == 0)): + self.dio.delete() + + +def tdrange(*args, **kwargs): + """Shortcut for `tqdm.contrib.discord.tqdm(range(*args), **kwargs)`.""" + return tqdm_discord(range(*args), **kwargs) + + +# Aliases +tqdm = tqdm_discord +trange = tdrange diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/itertools.py b/.venv/lib/python3.13/site-packages/tqdm/contrib/itertools.py new file mode 100644 index 0000000000000000000000000000000000000000..e67651a41a6b8760d9b928ea48239e4611d70315 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/tqdm/contrib/itertools.py @@ -0,0 +1,35 @@ +""" +Thin wrappers around `itertools`. +""" +import itertools + +from ..auto import tqdm as tqdm_auto + +__author__ = {"github.com/": ["casperdcl"]} +__all__ = ['product'] + + +def product(*iterables, **tqdm_kwargs): + """ + Equivalent of `itertools.product`. + + Parameters + ---------- + tqdm_class : [default: tqdm.auto.tqdm]. + """ + kwargs = tqdm_kwargs.copy() + tqdm_class = kwargs.pop("tqdm_class", tqdm_auto) + try: + lens = list(map(len, iterables)) + except TypeError: + total = None + else: + total = 1 + for i in lens: + total *= i + kwargs.setdefault("total", total) + with tqdm_class(**kwargs) as t: + it = itertools.product(*iterables) + for i in it: + yield i + t.update() diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/logging.py b/.venv/lib/python3.13/site-packages/tqdm/contrib/logging.py new file mode 100644 index 0000000000000000000000000000000000000000..e06febe37b5d70b5296804c55dca48a397c250e3 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/tqdm/contrib/logging.py @@ -0,0 +1,126 @@ +""" +Helper functionality for interoperability with stdlib `logging`. +""" +import logging +import sys +from contextlib import contextmanager + +try: + from typing import Iterator, List, Optional, Type # noqa: F401 +except ImportError: + pass + +from ..std import tqdm as std_tqdm + + +class _TqdmLoggingHandler(logging.StreamHandler): + def __init__( + self, + tqdm_class=std_tqdm # type: Type[std_tqdm] + ): + super().__init__() + self.tqdm_class = tqdm_class + + def emit(self, record): + try: + msg = self.format(record) + self.tqdm_class.write(msg, file=self.stream) + self.flush() + except (KeyboardInterrupt, SystemExit): + raise + except: # noqa pylint: disable=bare-except + self.handleError(record) + + +def _is_console_logging_handler(handler): + return (isinstance(handler, logging.StreamHandler) + and handler.stream in {sys.stdout, sys.stderr}) + + +def _get_first_found_console_logging_handler(handlers): + for handler in handlers: + if _is_console_logging_handler(handler): + return handler + + +@contextmanager +def logging_redirect_tqdm( + loggers=None, # type: Optional[List[logging.Logger]], + tqdm_class=std_tqdm # type: Type[std_tqdm] +): + # type: (...) -> Iterator[None] + """ + Context manager redirecting console logging to `tqdm.write()`, leaving + other logging handlers (e.g. log files) unaffected. + + Parameters + ---------- + loggers : list, optional + Which handlers to redirect (default: [logging.root]). + tqdm_class : optional + + Example + ------- + ```python + import logging + from tqdm import trange + from tqdm.contrib.logging import logging_redirect_tqdm + + LOG = logging.getLogger(__name__) + + if __name__ == '__main__': + logging.basicConfig(level=logging.INFO) + with logging_redirect_tqdm(): + for i in trange(9): + if i == 4: + LOG.info("console logging redirected to `tqdm.write()`") + # logging restored + ``` + """ + if loggers is None: + loggers = [logging.root] + original_handlers_list = [logger.handlers for logger in loggers] + try: + for logger in loggers: + tqdm_handler = _TqdmLoggingHandler(tqdm_class) + orig_handler = _get_first_found_console_logging_handler(logger.handlers) + if orig_handler is not None: + tqdm_handler.setFormatter(orig_handler.formatter) + tqdm_handler.stream = orig_handler.stream + logger.handlers = [ + handler for handler in logger.handlers + if not _is_console_logging_handler(handler)] + [tqdm_handler] + yield + finally: + for logger, original_handlers in zip(loggers, original_handlers_list): + logger.handlers = original_handlers + + +@contextmanager +def tqdm_logging_redirect( + *args, + # loggers=None, # type: Optional[List[logging.Logger]] + # tqdm=None, # type: Optional[Type[tqdm.tqdm]] + **kwargs +): + # type: (...) -> Iterator[None] + """ + Convenience shortcut for: + ```python + with tqdm_class(*args, **tqdm_kwargs) as pbar: + with logging_redirect_tqdm(loggers=loggers, tqdm_class=tqdm_class): + yield pbar + ``` + + Parameters + ---------- + tqdm_class : optional, (default: tqdm.std.tqdm). + loggers : optional, list. + **tqdm_kwargs : passed to `tqdm_class`. + """ + tqdm_kwargs = kwargs.copy() + loggers = tqdm_kwargs.pop('loggers', None) + tqdm_class = tqdm_kwargs.pop('tqdm_class', std_tqdm) + with tqdm_class(*args, **tqdm_kwargs) as pbar: + with logging_redirect_tqdm(loggers=loggers, tqdm_class=tqdm_class): + yield pbar diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/slack.py b/.venv/lib/python3.13/site-packages/tqdm/contrib/slack.py new file mode 100644 index 0000000000000000000000000000000000000000..9bca8ee98904ce869a4f8d6417bbdc4f00b38751 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/tqdm/contrib/slack.py @@ -0,0 +1,120 @@ +""" +Sends updates to a Slack app. + +Usage: +>>> from tqdm.contrib.slack import tqdm, trange +>>> for i in trange(10, token='{token}', channel='{channel}'): +... ... + +![screenshot](https://tqdm.github.io/img/screenshot-slack.png) +""" +import logging +from os import getenv + +try: + from slack_sdk import WebClient +except ImportError: + raise ImportError("Please `pip install slack-sdk`") + +from ..auto import tqdm as tqdm_auto +from .utils_worker import MonoWorker + +__author__ = {"github.com/": ["0x2b3bfa0", "casperdcl"]} +__all__ = ['SlackIO', 'tqdm_slack', 'tsrange', 'tqdm', 'trange'] + + +class SlackIO(MonoWorker): + """Non-blocking file-like IO using a Slack app.""" + def __init__(self, token, channel): + """Creates a new message in the given `channel`.""" + super().__init__() + self.client = WebClient(token=token) + self.text = self.__class__.__name__ + try: + self.message = self.client.chat_postMessage(channel=channel, text=self.text) + except Exception as e: + tqdm_auto.write(str(e)) + self.message = None + + def write(self, s): + """Replaces internal `message`'s text with `s`.""" + if not s: + s = "..." + s = s.replace('\r', '').strip() + if s == self.text: + return # skip duplicate message + message = self.message + if message is None: + return + self.text = s + try: + future = self.submit(self.client.chat_update, channel=message['channel'], + ts=message['ts'], text='`' + s + '`') + except Exception as e: + tqdm_auto.write(str(e)) + else: + return future + + +class tqdm_slack(tqdm_auto): + """ + Standard `tqdm.auto.tqdm` but also sends updates to a Slack app. + May take a few seconds to create (`__init__`). + + - create a Slack app with the `chat:write` scope & invite it to a + channel: + - copy the bot `{token}` & `{channel}` and paste below + >>> from tqdm.contrib.slack import tqdm, trange + >>> for i in tqdm(iterable, token='{token}', channel='{channel}'): + ... ... + """ + def __init__(self, *args, **kwargs): + """ + Parameters + ---------- + token : str, required. Slack token + [default: ${TQDM_SLACK_TOKEN}]. + channel : int, required. Slack channel + [default: ${TQDM_SLACK_CHANNEL}]. + mininterval : float, optional. + Minimum of [default: 1.5] to avoid rate limit. + + See `tqdm.auto.tqdm.__init__` for other parameters. + """ + if not kwargs.get('disable'): + kwargs = kwargs.copy() + logging.getLogger("HTTPClient").setLevel(logging.WARNING) + self.sio = SlackIO( + kwargs.pop('token', getenv("TQDM_SLACK_TOKEN")), + kwargs.pop('channel', getenv("TQDM_SLACK_CHANNEL"))) + kwargs['mininterval'] = max(1.5, kwargs.get('mininterval', 1.5)) + super().__init__(*args, **kwargs) + + def display(self, **kwargs): + super().display(**kwargs) + fmt = self.format_dict + if fmt.get('bar_format', None): + fmt['bar_format'] = fmt['bar_format'].replace( + '', '`{bar:10}`').replace('{bar}', '`{bar:10u}`') + else: + fmt['bar_format'] = '{l_bar}`{bar:10}`{r_bar}' + if fmt['ascii'] is False: + fmt['ascii'] = [":black_square:", ":small_blue_diamond:", ":large_blue_diamond:", + ":large_blue_square:"] + fmt['ncols'] = 336 + self.sio.write(self.format_meter(**fmt)) + + def clear(self, *args, **kwargs): + super().clear(*args, **kwargs) + if not self.disable: + self.sio.write("") + + +def tsrange(*args, **kwargs): + """Shortcut for `tqdm.contrib.slack.tqdm(range(*args), **kwargs)`.""" + return tqdm_slack(range(*args), **kwargs) + + +# Aliases +tqdm = tqdm_slack +trange = tsrange diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/telegram.py b/.venv/lib/python3.13/site-packages/tqdm/contrib/telegram.py new file mode 100644 index 0000000000000000000000000000000000000000..019151800bc0c4c4fc543314b6398aa602b0692a --- /dev/null +++ b/.venv/lib/python3.13/site-packages/tqdm/contrib/telegram.py @@ -0,0 +1,153 @@ +""" +Sends updates to a Telegram bot. + +Usage: +>>> from tqdm.contrib.telegram import tqdm, trange +>>> for i in trange(10, token='{token}', chat_id='{chat_id}'): +... ... + +![screenshot](https://tqdm.github.io/img/screenshot-telegram.gif) +""" +from os import getenv +from warnings import warn + +from requests import Session + +from ..auto import tqdm as tqdm_auto +from ..std import TqdmWarning +from .utils_worker import MonoWorker + +__author__ = {"github.com/": ["casperdcl"]} +__all__ = ['TelegramIO', 'tqdm_telegram', 'ttgrange', 'tqdm', 'trange'] + + +class TelegramIO(MonoWorker): + """Non-blocking file-like IO using a Telegram Bot.""" + API = 'https://api.telegram.org/bot' + + def __init__(self, token, chat_id): + """Creates a new message in the given `chat_id`.""" + super().__init__() + self.token = token + self.chat_id = chat_id + self.session = Session() + self.text = self.__class__.__name__ + self.message_id + + @property + def message_id(self): + if hasattr(self, '_message_id'): + return self._message_id + try: + res = self.session.post( + self.API + '%s/sendMessage' % self.token, + data={'text': '`' + self.text + '`', 'chat_id': self.chat_id, + 'parse_mode': 'MarkdownV2'}).json() + except Exception as e: + tqdm_auto.write(str(e)) + else: + if res.get('error_code') == 429: + warn("Creation rate limit: try increasing `mininterval`.", + TqdmWarning, stacklevel=2) + else: + self._message_id = res['result']['message_id'] + return self._message_id + + def write(self, s): + """Replaces internal `message_id`'s text with `s`.""" + if not s: + s = "..." + s = s.replace('\r', '').strip() + if s == self.text: + return # avoid duplicate message Bot error + message_id = self.message_id + if message_id is None: + return + self.text = s + try: + future = self.submit( + self.session.post, self.API + '%s/editMessageText' % self.token, + data={'text': '`' + s + '`', 'chat_id': self.chat_id, + 'message_id': message_id, 'parse_mode': 'MarkdownV2'}) + except Exception as e: + tqdm_auto.write(str(e)) + else: + return future + + def delete(self): + """Deletes internal `message_id`.""" + try: + future = self.submit( + self.session.post, self.API + '%s/deleteMessage' % self.token, + data={'chat_id': self.chat_id, 'message_id': self.message_id}) + except Exception as e: + tqdm_auto.write(str(e)) + else: + return future + + +class tqdm_telegram(tqdm_auto): + """ + Standard `tqdm.auto.tqdm` but also sends updates to a Telegram Bot. + May take a few seconds to create (`__init__`). + + - create a bot + - copy its `{token}` + - add the bot to a chat and send it a message such as `/start` + - go to to find out + the `{chat_id}` + - paste the `{token}` & `{chat_id}` below + + >>> from tqdm.contrib.telegram import tqdm, trange + >>> for i in tqdm(iterable, token='{token}', chat_id='{chat_id}'): + ... ... + """ + def __init__(self, *args, **kwargs): + """ + Parameters + ---------- + token : str, required. Telegram token + [default: ${TQDM_TELEGRAM_TOKEN}]. + chat_id : str, required. Telegram chat ID + [default: ${TQDM_TELEGRAM_CHAT_ID}]. + + See `tqdm.auto.tqdm.__init__` for other parameters. + """ + if not kwargs.get('disable'): + kwargs = kwargs.copy() + self.tgio = TelegramIO( + kwargs.pop('token', getenv('TQDM_TELEGRAM_TOKEN')), + kwargs.pop('chat_id', getenv('TQDM_TELEGRAM_CHAT_ID'))) + super().__init__(*args, **kwargs) + + def display(self, **kwargs): + super().display(**kwargs) + fmt = self.format_dict + if fmt.get('bar_format', None): + fmt['bar_format'] = fmt['bar_format'].replace( + '', '{bar:10u}').replace('{bar}', '{bar:10u}') + else: + fmt['bar_format'] = '{l_bar}{bar:10u}{r_bar}' + self.tgio.write(self.format_meter(**fmt)) + + def clear(self, *args, **kwargs): + super().clear(*args, **kwargs) + if not self.disable: + self.tgio.write("") + + def close(self): + if self.disable: + return + super().close() + if not (self.leave or (self.leave is None and self.pos == 0)): + self.tgio.delete() + + +def ttgrange(*args, **kwargs): + """Shortcut for `tqdm.contrib.telegram.tqdm(range(*args), **kwargs)`.""" + return tqdm_telegram(range(*args), **kwargs) + + +# Aliases +tqdm = tqdm_telegram +trange = ttgrange diff --git a/.venv/lib/python3.13/site-packages/tqdm/contrib/utils_worker.py b/.venv/lib/python3.13/site-packages/tqdm/contrib/utils_worker.py new file mode 100644 index 0000000000000000000000000000000000000000..2a03a2a8930001e37938836196e0d15b649b07a8 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/tqdm/contrib/utils_worker.py @@ -0,0 +1,38 @@ +""" +IO/concurrency helpers for `tqdm.contrib`. +""" +from collections import deque +from concurrent.futures import ThreadPoolExecutor + +from ..auto import tqdm as tqdm_auto + +__author__ = {"github.com/": ["casperdcl"]} +__all__ = ['MonoWorker'] + + +class MonoWorker(object): + """ + Supports one running task and one waiting task. + The waiting task is the most recent submitted (others are discarded). + """ + def __init__(self): + self.pool = ThreadPoolExecutor(max_workers=1) + self.futures = deque([], 2) + + def submit(self, func, *args, **kwargs): + """`func(*args, **kwargs)` may replace currently waiting task.""" + futures = self.futures + if len(futures) == futures.maxlen: + running = futures.popleft() + if not running.done(): + if len(futures): # clear waiting + waiting = futures.pop() + waiting.cancel() + futures.appendleft(running) # re-insert running + try: + waiting = self.pool.submit(func, *args, **kwargs) + except Exception as e: + tqdm_auto.write(str(e)) + else: + futures.append(waiting) + return waiting diff --git a/.venv/lib/python3.13/site-packages/urllib3-2.5.0.dist-info/licenses/LICENSE.txt b/.venv/lib/python3.13/site-packages/urllib3-2.5.0.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6183d0276b26c5b87aecccf8d0d5bcd7b1148d4 --- /dev/null +++ b/.venv/lib/python3.13/site-packages/urllib3-2.5.0.dist-info/licenses/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2008-2020 Andrey Petrov and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/__init__.cpython-313.pyc b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/__init__.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94d98ceec51086e093b90b6e842e8067a8ed365c Binary files /dev/null and b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/__init__.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/table_vs16.cpython-313.pyc b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/table_vs16.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c42cb001852d21b2ad39acefafc7f309ec7a320d Binary files /dev/null and b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/table_vs16.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/table_wide.cpython-313.pyc b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/table_wide.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c94832363fb1755ad95b17f864037f0a2f0833f1 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/table_wide.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/table_zero.cpython-313.pyc b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/table_zero.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1ab2540c02fd72c8dd77156a38ca43ecbb580f0 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/table_zero.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/unicode_versions.cpython-313.pyc b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/unicode_versions.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d13bfa380df5806db6260657b1e34e31b1deaaf2 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/unicode_versions.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/wcwidth.cpython-313.pyc b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/wcwidth.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f494215f032b49236297b6abb360b7e6a1a12554 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/wcwidth/__pycache__/wcwidth.cpython-313.pyc differ diff --git a/.venv/lib/python3.13/site-packages/yaml/__pycache__/parser.cpython-313.pyc b/.venv/lib/python3.13/site-packages/yaml/__pycache__/parser.cpython-313.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8012dcea71b6f6af01a8d67abc49db16fc0ccaf2 Binary files /dev/null and b/.venv/lib/python3.13/site-packages/yaml/__pycache__/parser.cpython-313.pyc differ