diff --git a/.gitattributes b/.gitattributes index c993a05d67efc51643f790c26edd2cba594c5235..6081d37f754411703f875e9178d231ce0e0f7b58 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1649,3 +1649,5 @@ vllm/lib/python3.10/site-packages/transformers/models/perceiver/__pycache__/mode vllm/lib/python3.10/site-packages/cupyx/cutensor.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/transformers/models/seamless_m4t_v2/__pycache__/modeling_seamless_m4t_v2.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +videollama2/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/videollama2/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 b/videollama2/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 new file mode 100644 index 0000000000000000000000000000000000000000..f6c468c877574511b42458b4db54abb337ed64c0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/nvidia/cusparse/lib/libcusparse.so.12 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:500466a2f559de622a71bb920d3d2923e69135245747a2742ee13edff0ba6085 +size 264876688 diff --git a/vllm/lib/python3.10/site-packages/blessed/__init__.py b/vllm/lib/python3.10/site-packages/blessed/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e7b00b54004de692c7d92d0d62158cd58810aa01 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/__init__.py @@ -0,0 +1,23 @@ +""" +A thin, practical wrapper around terminal capabilities in Python. + +http://pypi.python.org/pypi/blessed +""" +# std imports +import sys as _sys +import platform as _platform + +# isort: off +if _platform.system() == 'Windows': + from blessed.win_terminal import Terminal +else: + from blessed.terminal import Terminal # type: ignore + +if (3, 0, 0) <= _sys.version_info[:3] < (3, 2, 3): + # Good till 3.2.10 + # Python 3.x < 3.2.3 has a bug in which tparm() erroneously takes a string. + raise ImportError('Blessed needs Python 3.2.3 or greater for Python 3 ' + 'support due to http://bugs.python.org/issue10570.') + +__all__ = ('Terminal',) +__version__ = "1.20.0" diff --git a/vllm/lib/python3.10/site-packages/blessed/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/blessed/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e5b756ce1ddecbb66abdf6aff71a258f3d64b46 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/blessed/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/blessed/__pycache__/_capabilities.cpython-310.pyc b/vllm/lib/python3.10/site-packages/blessed/__pycache__/_capabilities.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2ef91f25b0c45c957a089805fbcbcc7536b768e5 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/blessed/__pycache__/_capabilities.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/blessed/__pycache__/color.cpython-310.pyc b/vllm/lib/python3.10/site-packages/blessed/__pycache__/color.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45d1ba08c35a8a05c51f85facabc2ca161dfa895 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/blessed/__pycache__/color.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/blessed/__pycache__/colorspace.cpython-310.pyc b/vllm/lib/python3.10/site-packages/blessed/__pycache__/colorspace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5f564071e86509cb646c0d603650ec9f63e4bc1c Binary files /dev/null and b/vllm/lib/python3.10/site-packages/blessed/__pycache__/colorspace.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/blessed/__pycache__/formatters.cpython-310.pyc b/vllm/lib/python3.10/site-packages/blessed/__pycache__/formatters.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8faff38a9dae806545a46358ea2a8ffff02cd75e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/blessed/__pycache__/formatters.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/blessed/__pycache__/keyboard.cpython-310.pyc b/vllm/lib/python3.10/site-packages/blessed/__pycache__/keyboard.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0f8ca5d218c0081488bd7f4c39809a48b505176a Binary files /dev/null and b/vllm/lib/python3.10/site-packages/blessed/__pycache__/keyboard.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/blessed/__pycache__/sequences.cpython-310.pyc b/vllm/lib/python3.10/site-packages/blessed/__pycache__/sequences.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..37f3a61093fa3621b157ef933736793b827e9122 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/blessed/__pycache__/sequences.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/blessed/__pycache__/terminal.cpython-310.pyc b/vllm/lib/python3.10/site-packages/blessed/__pycache__/terminal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfaf559ef93427b4299804cba2d995cef5860edd Binary files /dev/null and b/vllm/lib/python3.10/site-packages/blessed/__pycache__/terminal.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/blessed/__pycache__/win_terminal.cpython-310.pyc b/vllm/lib/python3.10/site-packages/blessed/__pycache__/win_terminal.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b7ba3d2e0fe6b7117d5b5b34b395aaa1f91984e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/blessed/__pycache__/win_terminal.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/blessed/_capabilities.py b/vllm/lib/python3.10/site-packages/blessed/_capabilities.py new file mode 100644 index 0000000000000000000000000000000000000000..c4df54bca6ce4457131eaebcfc77b4e382f0b0dc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/_capabilities.py @@ -0,0 +1,168 @@ +"""Terminal capability builder patterns.""" +# std imports +import re +from collections import OrderedDict + +__all__ = ( + 'CAPABILITY_DATABASE', + 'CAPABILITIES_RAW_MIXIN', + 'CAPABILITIES_ADDITIVES', + 'CAPABILITIES_CAUSE_MOVEMENT', +) + +CAPABILITY_DATABASE = OrderedDict(( + ('bell', ('bel', {})), + ('carriage_return', ('cr', {})), + ('change_scroll_region', ('csr', {'nparams': 2})), + ('clear_all_tabs', ('tbc', {})), + ('clear_screen', ('clear', {})), + ('clr_bol', ('el1', {})), + ('clr_eol', ('el', {})), + ('clr_eos', ('clear_eos', {})), + ('column_address', ('hpa', {'nparams': 1})), + ('cursor_address', ('cup', {'nparams': 2, 'match_grouped': True})), + ('cursor_down', ('cud1', {})), + ('cursor_home', ('home', {})), + ('cursor_invisible', ('civis', {})), + ('cursor_left', ('cub1', {})), + ('cursor_normal', ('cnorm', {})), + ('cursor_report', ('u6', {'nparams': 2, 'match_grouped': True})), + ('cursor_right', ('cuf1', {})), + ('cursor_up', ('cuu1', {})), + ('cursor_visible', ('cvvis', {})), + ('delete_character', ('dch1', {})), + ('delete_line', ('dl1', {})), + ('enter_blink_mode', ('blink', {})), + ('enter_bold_mode', ('bold', {})), + ('enter_dim_mode', ('dim', {})), + ('enter_fullscreen', ('smcup', {})), + ('enter_standout_mode', ('standout', {})), + ('enter_superscript_mode', ('superscript', {})), + ('enter_susimpleript_mode', ('susimpleript', {})), + ('enter_underline_mode', ('underline', {})), + ('erase_chars', ('ech', {'nparams': 1})), + ('exit_alt_charset_mode', ('rmacs', {})), + ('exit_am_mode', ('rmam', {})), + ('exit_attribute_mode', ('sgr0', {})), + ('exit_ca_mode', ('rmcup', {})), + ('exit_fullscreen', ('rmcup', {})), + ('exit_insert_mode', ('rmir', {})), + ('exit_standout_mode', ('rmso', {})), + ('exit_underline_mode', ('rmul', {})), + ('flash_hook', ('hook', {})), + ('flash_screen', ('flash', {})), + ('insert_line', ('il1', {})), + ('keypad_local', ('rmkx', {})), + ('keypad_xmit', ('smkx', {})), + ('meta_off', ('rmm', {})), + ('meta_on', ('smm', {})), + ('orig_pair', ('op', {})), + ('parm_down_cursor', ('cud', {'nparams': 1})), + ('parm_left_cursor', ('cub', {'nparams': 1, 'match_grouped': True})), + ('parm_dch', ('dch', {'nparams': 1})), + ('parm_delete_line', ('dl', {'nparams': 1})), + ('parm_ich', ('ich', {'nparams': 1})), + ('parm_index', ('indn', {'nparams': 1})), + ('parm_insert_line', ('il', {'nparams': 1})), + ('parm_right_cursor', ('cuf', {'nparams': 1, 'match_grouped': True})), + ('parm_rindex', ('rin', {'nparams': 1})), + ('parm_up_cursor', ('cuu', {'nparams': 1})), + ('print_screen', ('mc0', {})), + ('prtr_off', ('mc4', {})), + ('prtr_on', ('mc5', {})), + ('reset_1string', ('r1', {})), + ('reset_2string', ('r2', {})), + ('reset_3string', ('r3', {})), + ('restore_cursor', ('rc', {})), + ('row_address', ('vpa', {'nparams': 1})), + ('save_cursor', ('sc', {})), + ('scroll_forward', ('ind', {})), + ('scroll_reverse', ('rev', {})), + ('set0_des_seq', ('s0ds', {})), + ('set1_des_seq', ('s1ds', {})), + ('set2_des_seq', ('s2ds', {})), + ('set3_des_seq', ('s3ds', {})), + # this 'color' is deceiving, but often matching, and a better match + # than set_a_attributes1 or set_a_foreground. + ('color', ('_foreground_color', {'nparams': 1, 'match_any': True, + 'numeric': 1})), + ('set_a_foreground', ('color', {'nparams': 1, 'match_any': True, + 'numeric': 1})), + ('set_a_background', ('on_color', {'nparams': 1, 'match_any': True, + 'numeric': 1})), + ('set_tab', ('hts', {})), + ('tab', ('ht', {})), + ('italic', ('sitm', {})), + ('no_italic', ('sitm', {})), +)) + +CAPABILITIES_RAW_MIXIN = { + 'bell': re.escape('\a'), + 'carriage_return': re.escape('\r'), + 'cursor_left': re.escape('\b'), + 'cursor_report': re.escape('\x1b') + r'\[(\d+)\;(\d+)R', + 'cursor_right': re.escape('\x1b') + r'\[C', + 'exit_attribute_mode': re.escape('\x1b') + r'\[m', + 'parm_left_cursor': re.escape('\x1b') + r'\[(\d+)D', + 'parm_right_cursor': re.escape('\x1b') + r'\[(\d+)C', + 'restore_cursor': re.escape(r'\x1b\[u'), + 'save_cursor': re.escape(r'\x1b\[s'), + 'scroll_forward': re.escape('\n'), + 'set0_des_seq': re.escape('\x1b(B'), + 'tab': re.escape('\t'), +} +_ANY_NOTESC = '[^' + re.escape('\x1b') + ']*' + +CAPABILITIES_ADDITIVES = { + 'link': ('link', + re.escape('\x1b') + r'\]8;' + _ANY_NOTESC + ';' + + _ANY_NOTESC + re.escape('\x1b') + '\\\\'), + 'color256': ('color', re.escape('\x1b') + r'\[38;5;\d+m'), + 'on_color256': ('on_color', re.escape('\x1b') + r'\[48;5;\d+m'), + 'color_rgb': ('color_rgb', re.escape('\x1b') + r'\[38;2;\d+;\d+;\d+m'), + 'on_color_rgb': ('on_color_rgb', re.escape('\x1b') + r'\[48;2;\d+;\d+;\d+m'), + 'shift_in': ('', re.escape('\x0f')), + 'shift_out': ('', re.escape('\x0e')), + # sgr(...) outputs strangely, use the basic ANSI/EMCA-48 codes here. + 'set_a_attributes1': ( + 'sgr', re.escape('\x1b') + r'\[\d+m'), + 'set_a_attributes2': ( + 'sgr', re.escape('\x1b') + r'\[\d+\;\d+m'), + 'set_a_attributes3': ( + 'sgr', re.escape('\x1b') + r'\[\d+\;\d+\;\d+m'), + 'set_a_attributes4': ( + 'sgr', re.escape('\x1b') + r'\[\d+\;\d+\;\d+\;\d+m'), + # this helps where xterm's sgr0 includes set0_des_seq, we'd + # rather like to also match this immediate substring. + 'sgr0': ('sgr0', re.escape('\x1b') + r'\[m'), + 'backspace': ('', re.escape('\b')), + 'ascii_tab': ('', re.escape('\t')), + 'clr_eol': ('', re.escape('\x1b[K')), + 'clr_eol0': ('', re.escape('\x1b[0K')), + 'clr_bol': ('', re.escape('\x1b[1K')), + 'clr_eosK': ('', re.escape('\x1b[2K')), +} + +CAPABILITIES_CAUSE_MOVEMENT = ( + 'ascii_tab', + 'backspace', + 'carriage_return', + 'clear_screen', + 'column_address', + 'cursor_address', + 'cursor_down', + 'cursor_home', + 'cursor_left', + 'cursor_right', + 'cursor_up', + 'enter_fullscreen', + 'exit_fullscreen', + 'parm_down_cursor', + 'parm_left_cursor', + 'parm_right_cursor', + 'parm_up_cursor', + 'restore_cursor', + 'row_address', + 'scroll_forward', + 'tab', +) diff --git a/vllm/lib/python3.10/site-packages/blessed/_capabilities.pyi b/vllm/lib/python3.10/site-packages/blessed/_capabilities.pyi new file mode 100644 index 0000000000000000000000000000000000000000..04c59c35fa4944cbdee0c03b2ce3b226718ba997 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/_capabilities.pyi @@ -0,0 +1,7 @@ +# std imports +from typing import Any, Dict, Tuple, OrderedDict + +CAPABILITY_DATABASE: OrderedDict[str, Tuple[str, Dict[str, Any]]] +CAPABILITIES_RAW_MIXIN: Dict[str, str] +CAPABILITIES_ADDITIVES: Dict[str, Tuple[str, str]] +CAPABILITIES_CAUSE_MOVEMENT: Tuple[str, ...] diff --git a/vllm/lib/python3.10/site-packages/blessed/color.py b/vllm/lib/python3.10/site-packages/blessed/color.py new file mode 100644 index 0000000000000000000000000000000000000000..482fc0e11eddff8d0c127e271ee7de2d4299465d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/color.py @@ -0,0 +1,258 @@ +# -*- coding: utf-8 -*- +""" +Sub-module providing color functions. + +References, + +- https://en.wikipedia.org/wiki/Color_difference +- http://www.easyrgb.com/en/math.php +- Measuring Colour by R.W.G. Hunt and M.R. Pointer +""" + +# std imports +from math import cos, exp, sin, sqrt, atan2 + +# isort: off +try: + from functools import lru_cache +except ImportError: + # lru_cache was added in Python 3.2 + from backports.functools_lru_cache import lru_cache + + +def rgb_to_xyz(red, green, blue): + """ + Convert standard RGB color to XYZ color. + + :arg int red: RGB value of Red. + :arg int green: RGB value of Green. + :arg int blue: RGB value of Blue. + :returns: Tuple (X, Y, Z) representing XYZ color + :rtype: tuple + + D65/2° standard illuminant + """ + rgb = [] + for val in red, green, blue: + val /= 255.0 + if val > 0.04045: + val = pow((val + 0.055) / 1.055, 2.4) + else: + val /= 12.92 + val *= 100 + rgb.append(val) + + red, green, blue = rgb # pylint: disable=unbalanced-tuple-unpacking + x_val = red * 0.4124 + green * 0.3576 + blue * 0.1805 + y_val = red * 0.2126 + green * 0.7152 + blue * 0.0722 + z_val = red * 0.0193 + green * 0.1192 + blue * 0.9505 + + return x_val, y_val, z_val + + +def xyz_to_lab(x_val, y_val, z_val): + """ + Convert XYZ color to CIE-Lab color. + + :arg float x_val: XYZ value of X. + :arg float y_val: XYZ value of Y. + :arg float z_val: XYZ value of Z. + :returns: Tuple (L, a, b) representing CIE-Lab color + :rtype: tuple + + D65/2° standard illuminant + """ + xyz = [] + for val, ref in (x_val, 95.047), (y_val, 100.0), (z_val, 108.883): + val /= ref + val = pow(val, 1 / 3.0) if val > 0.008856 else 7.787 * val + 16 / 116.0 + xyz.append(val) + + x_val, y_val, z_val = xyz # pylint: disable=unbalanced-tuple-unpacking + cie_l = 116 * y_val - 16 + cie_a = 500 * (x_val - y_val) + cie_b = 200 * (y_val - z_val) + + return cie_l, cie_a, cie_b + + +@lru_cache(maxsize=256) +def rgb_to_lab(red, green, blue): + """ + Convert RGB color to CIE-Lab color. + + :arg int red: RGB value of Red. + :arg int green: RGB value of Green. + :arg int blue: RGB value of Blue. + :returns: Tuple (L, a, b) representing CIE-Lab color + :rtype: tuple + + D65/2° standard illuminant + """ + return xyz_to_lab(*rgb_to_xyz(red, green, blue)) + + +def dist_rgb(rgb1, rgb2): + """ + Determine distance between two rgb colors. + + :arg tuple rgb1: RGB color definition + :arg tuple rgb2: RGB color definition + :returns: Square of the distance between provided colors + :rtype: float + + This works by treating RGB colors as coordinates in three dimensional + space and finding the closest point within the configured color range + using the formula:: + + d^2 = (r2 - r1)^2 + (g2 - g1)^2 + (b2 - b1)^2 + + For efficiency, the square of the distance is returned + which is sufficient for comparisons + """ + return sum(pow(rgb1[idx] - rgb2[idx], 2) for idx in (0, 1, 2)) + + +def dist_rgb_weighted(rgb1, rgb2): + """ + Determine the weighted distance between two rgb colors. + + :arg tuple rgb1: RGB color definition + :arg tuple rgb2: RGB color definition + :returns: Square of the distance between provided colors + :rtype: float + + Similar to a standard distance formula, the values are weighted + to approximate human perception of color differences + + For efficiency, the square of the distance is returned + which is sufficient for comparisons + """ + red_mean = (rgb1[0] + rgb2[0]) / 2.0 + + return ((2 + red_mean / 256) * pow(rgb1[0] - rgb2[0], 2) + + 4 * pow(rgb1[1] - rgb2[1], 2) + + (2 + (255 - red_mean) / 256) * pow(rgb1[2] - rgb2[2], 2)) + + +def dist_cie76(rgb1, rgb2): + """ + Determine distance between two rgb colors using the CIE94 algorithm. + + :arg tuple rgb1: RGB color definition + :arg tuple rgb2: RGB color definition + :returns: Square of the distance between provided colors + :rtype: float + + For efficiency, the square of the distance is returned + which is sufficient for comparisons + """ + l_1, a_1, b_1 = rgb_to_lab(*rgb1) + l_2, a_2, b_2 = rgb_to_lab(*rgb2) + return pow(l_1 - l_2, 2) + pow(a_1 - a_2, 2) + pow(b_1 - b_2, 2) + + +def dist_cie94(rgb1, rgb2): + # pylint: disable=too-many-locals + """ + Determine distance between two rgb colors using the CIE94 algorithm. + + :arg tuple rgb1: RGB color definition + :arg tuple rgb2: RGB color definition + :returns: Square of the distance between provided colors + :rtype: float + + For efficiency, the square of the distance is returned + which is sufficient for comparisons + """ + l_1, a_1, b_1 = rgb_to_lab(*rgb1) + l_2, a_2, b_2 = rgb_to_lab(*rgb2) + + s_l = k_l = k_c = k_h = 1 + k_1 = 0.045 + k_2 = 0.015 + + delta_l = l_1 - l_2 + delta_a = a_1 - a_2 + delta_b = b_1 - b_2 + c_1 = sqrt(a_1 ** 2 + b_1 ** 2) + c_2 = sqrt(a_2 ** 2 + b_2 ** 2) + delta_c = c_1 - c_2 + delta_h = sqrt(delta_a ** 2 + delta_b ** 2 + delta_c ** 2) + s_c = 1 + k_1 * c_1 + s_h = 1 + k_2 * c_1 + + return ((delta_l / (k_l * s_l)) ** 2 + # pylint: disable=superfluous-parens + (delta_c / (k_c * s_c)) ** 2 + + (delta_h / (k_h * s_h)) ** 2) + + +def dist_cie2000(rgb1, rgb2): + # pylint: disable=too-many-locals + """ + Determine distance between two rgb colors using the CIE2000 algorithm. + + :arg tuple rgb1: RGB color definition + :arg tuple rgb2: RGB color definition + :returns: Square of the distance between provided colors + :rtype: float + + For efficiency, the square of the distance is returned + which is sufficient for comparisons + """ + s_l = k_l = k_c = k_h = 1 + + l_1, a_1, b_1 = rgb_to_lab(*rgb1) + l_2, a_2, b_2 = rgb_to_lab(*rgb2) + + delta_l = l_2 - l_1 + l_mean = (l_1 + l_2) / 2 + + c_1 = sqrt(a_1 ** 2 + b_1 ** 2) + c_2 = sqrt(a_2 ** 2 + b_2 ** 2) + c_mean = (c_1 + c_2) / 2 + delta_c = c_1 - c_2 + + g_x = sqrt(c_mean ** 7 / (c_mean ** 7 + 25 ** 7)) + h_1 = atan2(b_1, a_1 + (a_1 / 2) * (1 - g_x)) % 360 + h_2 = atan2(b_2, a_2 + (a_2 / 2) * (1 - g_x)) % 360 + + if 0 in (c_1, c_2): + delta_h_prime = 0 + h_mean = h_1 + h_2 + else: + delta_h_prime = h_2 - h_1 + if abs(delta_h_prime) <= 180: + h_mean = (h_1 + h_2) / 2 + else: + if h_2 <= h_1: + delta_h_prime += 360 + else: + delta_h_prime -= 360 + h_mean = (h_1 + h_2 + 360) / 2 if h_1 + h_2 < 360 else (h_1 + h_2 - 360) / 2 + + delta_h = 2 * sqrt(c_1 * c_2) * sin(delta_h_prime / 2) + + t_x = (1 - + 0.17 * cos(h_mean - 30) + + 0.24 * cos(2 * h_mean) + + 0.32 * cos(3 * h_mean + 6) - + 0.20 * cos(4 * h_mean - 63)) + + s_l = 1 + (0.015 * (l_mean - 50) ** 2) / sqrt(20 + (l_mean - 50) ** 2) + s_c = 1 + 0.045 * c_mean + s_h = 1 + 0.015 * c_mean * t_x + r_t = -2 * g_x * sin(abs(60 * exp(-1 * abs((delta_h - 275) / 25) ** 2))) + + delta_l = delta_l / (k_l * s_l) + delta_c = delta_c / (k_c * s_c) + delta_h = delta_h / (k_h * s_h) + + return delta_l ** 2 + delta_c ** 2 + delta_h ** 2 + r_t * delta_c * delta_h + + +COLOR_DISTANCE_ALGORITHMS = {'rgb': dist_rgb, + 'rgb-weighted': dist_rgb_weighted, + 'cie76': dist_cie76, + 'cie94': dist_cie94, + 'cie2000': dist_cie2000} diff --git a/vllm/lib/python3.10/site-packages/blessed/color.pyi b/vllm/lib/python3.10/site-packages/blessed/color.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ece82e3e98f9112bef80dbe776232324a12e0d40 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/color.pyi @@ -0,0 +1,17 @@ +# std imports +from typing import Dict, Tuple, Callable + +_RGB = Tuple[int, int, int] + +def rgb_to_xyz(red: int, green: int, blue: int) -> Tuple[float, float, float]: ... +def xyz_to_lab( + x_val: float, y_val: float, z_val: float +) -> Tuple[float, float, float]: ... +def rgb_to_lab(red: int, green: int, blue: int) -> Tuple[float, float, float]: ... +def dist_rgb(rgb1: _RGB, rgb2: _RGB) -> float: ... +def dist_rgb_weighted(rgb1: _RGB, rgb2: _RGB) -> float: ... +def dist_cie76(rgb1: _RGB, rgb2: _RGB) -> float: ... +def dist_cie94(rgb1: _RGB, rgb2: _RGB) -> float: ... +def dist_cie2000(rgb1: _RGB, rgb2: _RGB) -> float: ... + +COLOR_DISTANCE_ALGORITHMS: Dict[str, Callable[[_RGB, _RGB], float]] diff --git a/vllm/lib/python3.10/site-packages/blessed/colorspace.py b/vllm/lib/python3.10/site-packages/blessed/colorspace.py new file mode 100644 index 0000000000000000000000000000000000000000..f95bfd9f75df6a58b4f650fcc090554229d717f8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/colorspace.py @@ -0,0 +1,973 @@ +""" +Color reference data. + +References, + +- https://github.com/freedesktop/xorg-rgb/blob/master/rgb.txt +- https://github.com/ThomasDickey/xterm-snapshots/blob/master/256colres.h +- https://github.com/ThomasDickey/xterm-snapshots/blob/master/XTerm-col.ad +- https://en.wikipedia.org/wiki/ANSI_escape_code#Colors +- https://gist.github.com/XVilka/8346728 +- https://devblogs.microsoft.com/commandline/24-bit-color-in-the-windows-console/ +- http://jdebp.uk/Softwares/nosh/guide/TerminalCapabilities.html +""" + +# std imports +import collections + +__all__ = ( + 'CGA_COLORS', + 'RGBColor', + 'RGB_256TABLE', + 'X11_COLORNAMES_TO_RGB', +) + +CGA_COLORS = {'black', 'red', 'green', 'yellow', 'blue', 'magenta', 'cyan', 'white'} + + +class RGBColor(collections.namedtuple("RGBColor", ["red", "green", "blue"])): + """Named tuple for an RGB color definition.""" + + def __str__(self): + return '#{0:02x}{1:02x}{2:02x}'.format(*self) + + +#: X11 Color names to (XTerm-defined) RGB values from xorg-rgb/rgb.txt +X11_COLORNAMES_TO_RGB = { + 'aliceblue': RGBColor(240, 248, 255), + 'antiquewhite': RGBColor(250, 235, 215), + 'antiquewhite1': RGBColor(255, 239, 219), + 'antiquewhite2': RGBColor(238, 223, 204), + 'antiquewhite3': RGBColor(205, 192, 176), + 'antiquewhite4': RGBColor(139, 131, 120), + 'aqua': RGBColor(0, 255, 255), + 'aquamarine': RGBColor(127, 255, 212), + 'aquamarine1': RGBColor(127, 255, 212), + 'aquamarine2': RGBColor(118, 238, 198), + 'aquamarine3': RGBColor(102, 205, 170), + 'aquamarine4': RGBColor(69, 139, 116), + 'azure': RGBColor(240, 255, 255), + 'azure1': RGBColor(240, 255, 255), + 'azure2': RGBColor(224, 238, 238), + 'azure3': RGBColor(193, 205, 205), + 'azure4': RGBColor(131, 139, 139), + 'beige': RGBColor(245, 245, 220), + 'bisque': RGBColor(255, 228, 196), + 'bisque1': RGBColor(255, 228, 196), + 'bisque2': RGBColor(238, 213, 183), + 'bisque3': RGBColor(205, 183, 158), + 'bisque4': RGBColor(139, 125, 107), + 'black': RGBColor(0, 0, 0), + 'blanchedalmond': RGBColor(255, 235, 205), + 'blue': RGBColor(0, 0, 255), + 'blue1': RGBColor(0, 0, 255), + 'blue2': RGBColor(0, 0, 238), + 'blue3': RGBColor(0, 0, 205), + 'blue4': RGBColor(0, 0, 139), + 'blueviolet': RGBColor(138, 43, 226), + 'brown': RGBColor(165, 42, 42), + 'brown1': RGBColor(255, 64, 64), + 'brown2': RGBColor(238, 59, 59), + 'brown3': RGBColor(205, 51, 51), + 'brown4': RGBColor(139, 35, 35), + 'burlywood': RGBColor(222, 184, 135), + 'burlywood1': RGBColor(255, 211, 155), + 'burlywood2': RGBColor(238, 197, 145), + 'burlywood3': RGBColor(205, 170, 125), + 'burlywood4': RGBColor(139, 115, 85), + 'cadetblue': RGBColor(95, 158, 160), + 'cadetblue1': RGBColor(152, 245, 255), + 'cadetblue2': RGBColor(142, 229, 238), + 'cadetblue3': RGBColor(122, 197, 205), + 'cadetblue4': RGBColor(83, 134, 139), + 'chartreuse': RGBColor(127, 255, 0), + 'chartreuse1': RGBColor(127, 255, 0), + 'chartreuse2': RGBColor(118, 238, 0), + 'chartreuse3': RGBColor(102, 205, 0), + 'chartreuse4': RGBColor(69, 139, 0), + 'chocolate': RGBColor(210, 105, 30), + 'chocolate1': RGBColor(255, 127, 36), + 'chocolate2': RGBColor(238, 118, 33), + 'chocolate3': RGBColor(205, 102, 29), + 'chocolate4': RGBColor(139, 69, 19), + 'coral': RGBColor(255, 127, 80), + 'coral1': RGBColor(255, 114, 86), + 'coral2': RGBColor(238, 106, 80), + 'coral3': RGBColor(205, 91, 69), + 'coral4': RGBColor(139, 62, 47), + 'cornflowerblue': RGBColor(100, 149, 237), + 'cornsilk': RGBColor(255, 248, 220), + 'cornsilk1': RGBColor(255, 248, 220), + 'cornsilk2': RGBColor(238, 232, 205), + 'cornsilk3': RGBColor(205, 200, 177), + 'cornsilk4': RGBColor(139, 136, 120), + 'crimson': RGBColor(220, 20, 60), + 'cyan': RGBColor(0, 255, 255), + 'cyan1': RGBColor(0, 255, 255), + 'cyan2': RGBColor(0, 238, 238), + 'cyan3': RGBColor(0, 205, 205), + 'cyan4': RGBColor(0, 139, 139), + 'darkblue': RGBColor(0, 0, 139), + 'darkcyan': RGBColor(0, 139, 139), + 'darkgoldenrod': RGBColor(184, 134, 11), + 'darkgoldenrod1': RGBColor(255, 185, 15), + 'darkgoldenrod2': RGBColor(238, 173, 14), + 'darkgoldenrod3': RGBColor(205, 149, 12), + 'darkgoldenrod4': RGBColor(139, 101, 8), + 'darkgray': RGBColor(169, 169, 169), + 'darkgreen': RGBColor(0, 100, 0), + 'darkgrey': RGBColor(169, 169, 169), + 'darkkhaki': RGBColor(189, 183, 107), + 'darkmagenta': RGBColor(139, 0, 139), + 'darkolivegreen': RGBColor(85, 107, 47), + 'darkolivegreen1': RGBColor(202, 255, 112), + 'darkolivegreen2': RGBColor(188, 238, 104), + 'darkolivegreen3': RGBColor(162, 205, 90), + 'darkolivegreen4': RGBColor(110, 139, 61), + 'darkorange': RGBColor(255, 140, 0), + 'darkorange1': RGBColor(255, 127, 0), + 'darkorange2': RGBColor(238, 118, 0), + 'darkorange3': RGBColor(205, 102, 0), + 'darkorange4': RGBColor(139, 69, 0), + 'darkorchid': RGBColor(153, 50, 204), + 'darkorchid1': RGBColor(191, 62, 255), + 'darkorchid2': RGBColor(178, 58, 238), + 'darkorchid3': RGBColor(154, 50, 205), + 'darkorchid4': RGBColor(104, 34, 139), + 'darkred': RGBColor(139, 0, 0), + 'darksalmon': RGBColor(233, 150, 122), + 'darkseagreen': RGBColor(143, 188, 143), + 'darkseagreen1': RGBColor(193, 255, 193), + 'darkseagreen2': RGBColor(180, 238, 180), + 'darkseagreen3': RGBColor(155, 205, 155), + 'darkseagreen4': RGBColor(105, 139, 105), + 'darkslateblue': RGBColor(72, 61, 139), + 'darkslategray': RGBColor(47, 79, 79), + 'darkslategray1': RGBColor(151, 255, 255), + 'darkslategray2': RGBColor(141, 238, 238), + 'darkslategray3': RGBColor(121, 205, 205), + 'darkslategray4': RGBColor(82, 139, 139), + 'darkslategrey': RGBColor(47, 79, 79), + 'darkturquoise': RGBColor(0, 206, 209), + 'darkviolet': RGBColor(148, 0, 211), + 'deeppink': RGBColor(255, 20, 147), + 'deeppink1': RGBColor(255, 20, 147), + 'deeppink2': RGBColor(238, 18, 137), + 'deeppink3': RGBColor(205, 16, 118), + 'deeppink4': RGBColor(139, 10, 80), + 'deepskyblue': RGBColor(0, 191, 255), + 'deepskyblue1': RGBColor(0, 191, 255), + 'deepskyblue2': RGBColor(0, 178, 238), + 'deepskyblue3': RGBColor(0, 154, 205), + 'deepskyblue4': RGBColor(0, 104, 139), + 'dimgray': RGBColor(105, 105, 105), + 'dimgrey': RGBColor(105, 105, 105), + 'dodgerblue': RGBColor(30, 144, 255), + 'dodgerblue1': RGBColor(30, 144, 255), + 'dodgerblue2': RGBColor(28, 134, 238), + 'dodgerblue3': RGBColor(24, 116, 205), + 'dodgerblue4': RGBColor(16, 78, 139), + 'firebrick': RGBColor(178, 34, 34), + 'firebrick1': RGBColor(255, 48, 48), + 'firebrick2': RGBColor(238, 44, 44), + 'firebrick3': RGBColor(205, 38, 38), + 'firebrick4': RGBColor(139, 26, 26), + 'floralwhite': RGBColor(255, 250, 240), + 'forestgreen': RGBColor(34, 139, 34), + 'fuchsia': RGBColor(255, 0, 255), + 'gainsboro': RGBColor(220, 220, 220), + 'ghostwhite': RGBColor(248, 248, 255), + 'gold': RGBColor(255, 215, 0), + 'gold1': RGBColor(255, 215, 0), + 'gold2': RGBColor(238, 201, 0), + 'gold3': RGBColor(205, 173, 0), + 'gold4': RGBColor(139, 117, 0), + 'goldenrod': RGBColor(218, 165, 32), + 'goldenrod1': RGBColor(255, 193, 37), + 'goldenrod2': RGBColor(238, 180, 34), + 'goldenrod3': RGBColor(205, 155, 29), + 'goldenrod4': RGBColor(139, 105, 20), + 'gray': RGBColor(190, 190, 190), + 'gray0': RGBColor(0, 0, 0), + 'gray1': RGBColor(3, 3, 3), + 'gray10': RGBColor(26, 26, 26), + 'gray100': RGBColor(255, 255, 255), + 'gray11': RGBColor(28, 28, 28), + 'gray12': RGBColor(31, 31, 31), + 'gray13': RGBColor(33, 33, 33), + 'gray14': RGBColor(36, 36, 36), + 'gray15': RGBColor(38, 38, 38), + 'gray16': RGBColor(41, 41, 41), + 'gray17': RGBColor(43, 43, 43), + 'gray18': RGBColor(46, 46, 46), + 'gray19': RGBColor(48, 48, 48), + 'gray2': RGBColor(5, 5, 5), + 'gray20': RGBColor(51, 51, 51), + 'gray21': RGBColor(54, 54, 54), + 'gray22': RGBColor(56, 56, 56), + 'gray23': RGBColor(59, 59, 59), + 'gray24': RGBColor(61, 61, 61), + 'gray25': RGBColor(64, 64, 64), + 'gray26': RGBColor(66, 66, 66), + 'gray27': RGBColor(69, 69, 69), + 'gray28': RGBColor(71, 71, 71), + 'gray29': RGBColor(74, 74, 74), + 'gray3': RGBColor(8, 8, 8), + 'gray30': RGBColor(77, 77, 77), + 'gray31': RGBColor(79, 79, 79), + 'gray32': RGBColor(82, 82, 82), + 'gray33': RGBColor(84, 84, 84), + 'gray34': RGBColor(87, 87, 87), + 'gray35': RGBColor(89, 89, 89), + 'gray36': RGBColor(92, 92, 92), + 'gray37': RGBColor(94, 94, 94), + 'gray38': RGBColor(97, 97, 97), + 'gray39': RGBColor(99, 99, 99), + 'gray4': RGBColor(10, 10, 10), + 'gray40': RGBColor(102, 102, 102), + 'gray41': RGBColor(105, 105, 105), + 'gray42': RGBColor(107, 107, 107), + 'gray43': RGBColor(110, 110, 110), + 'gray44': RGBColor(112, 112, 112), + 'gray45': RGBColor(115, 115, 115), + 'gray46': RGBColor(117, 117, 117), + 'gray47': RGBColor(120, 120, 120), + 'gray48': RGBColor(122, 122, 122), + 'gray49': RGBColor(125, 125, 125), + 'gray5': RGBColor(13, 13, 13), + 'gray50': RGBColor(127, 127, 127), + 'gray51': RGBColor(130, 130, 130), + 'gray52': RGBColor(133, 133, 133), + 'gray53': RGBColor(135, 135, 135), + 'gray54': RGBColor(138, 138, 138), + 'gray55': RGBColor(140, 140, 140), + 'gray56': RGBColor(143, 143, 143), + 'gray57': RGBColor(145, 145, 145), + 'gray58': RGBColor(148, 148, 148), + 'gray59': RGBColor(150, 150, 150), + 'gray6': RGBColor(15, 15, 15), + 'gray60': RGBColor(153, 153, 153), + 'gray61': RGBColor(156, 156, 156), + 'gray62': RGBColor(158, 158, 158), + 'gray63': RGBColor(161, 161, 161), + 'gray64': RGBColor(163, 163, 163), + 'gray65': RGBColor(166, 166, 166), + 'gray66': RGBColor(168, 168, 168), + 'gray67': RGBColor(171, 171, 171), + 'gray68': RGBColor(173, 173, 173), + 'gray69': RGBColor(176, 176, 176), + 'gray7': RGBColor(18, 18, 18), + 'gray70': RGBColor(179, 179, 179), + 'gray71': RGBColor(181, 181, 181), + 'gray72': RGBColor(184, 184, 184), + 'gray73': RGBColor(186, 186, 186), + 'gray74': RGBColor(189, 189, 189), + 'gray75': RGBColor(191, 191, 191), + 'gray76': RGBColor(194, 194, 194), + 'gray77': RGBColor(196, 196, 196), + 'gray78': RGBColor(199, 199, 199), + 'gray79': RGBColor(201, 201, 201), + 'gray8': RGBColor(20, 20, 20), + 'gray80': RGBColor(204, 204, 204), + 'gray81': RGBColor(207, 207, 207), + 'gray82': RGBColor(209, 209, 209), + 'gray83': RGBColor(212, 212, 212), + 'gray84': RGBColor(214, 214, 214), + 'gray85': RGBColor(217, 217, 217), + 'gray86': RGBColor(219, 219, 219), + 'gray87': RGBColor(222, 222, 222), + 'gray88': RGBColor(224, 224, 224), + 'gray89': RGBColor(227, 227, 227), + 'gray9': RGBColor(23, 23, 23), + 'gray90': RGBColor(229, 229, 229), + 'gray91': RGBColor(232, 232, 232), + 'gray92': RGBColor(235, 235, 235), + 'gray93': RGBColor(237, 237, 237), + 'gray94': RGBColor(240, 240, 240), + 'gray95': RGBColor(242, 242, 242), + 'gray96': RGBColor(245, 245, 245), + 'gray97': RGBColor(247, 247, 247), + 'gray98': RGBColor(250, 250, 250), + 'gray99': RGBColor(252, 252, 252), + 'green': RGBColor(0, 255, 0), + 'green1': RGBColor(0, 255, 0), + 'green2': RGBColor(0, 238, 0), + 'green3': RGBColor(0, 205, 0), + 'green4': RGBColor(0, 139, 0), + 'greenyellow': RGBColor(173, 255, 47), + 'grey': RGBColor(190, 190, 190), + 'grey0': RGBColor(0, 0, 0), + 'grey1': RGBColor(3, 3, 3), + 'grey10': RGBColor(26, 26, 26), + 'grey100': RGBColor(255, 255, 255), + 'grey11': RGBColor(28, 28, 28), + 'grey12': RGBColor(31, 31, 31), + 'grey13': RGBColor(33, 33, 33), + 'grey14': RGBColor(36, 36, 36), + 'grey15': RGBColor(38, 38, 38), + 'grey16': RGBColor(41, 41, 41), + 'grey17': RGBColor(43, 43, 43), + 'grey18': RGBColor(46, 46, 46), + 'grey19': RGBColor(48, 48, 48), + 'grey2': RGBColor(5, 5, 5), + 'grey20': RGBColor(51, 51, 51), + 'grey21': RGBColor(54, 54, 54), + 'grey22': RGBColor(56, 56, 56), + 'grey23': RGBColor(59, 59, 59), + 'grey24': RGBColor(61, 61, 61), + 'grey25': RGBColor(64, 64, 64), + 'grey26': RGBColor(66, 66, 66), + 'grey27': RGBColor(69, 69, 69), + 'grey28': RGBColor(71, 71, 71), + 'grey29': RGBColor(74, 74, 74), + 'grey3': RGBColor(8, 8, 8), + 'grey30': RGBColor(77, 77, 77), + 'grey31': RGBColor(79, 79, 79), + 'grey32': RGBColor(82, 82, 82), + 'grey33': RGBColor(84, 84, 84), + 'grey34': RGBColor(87, 87, 87), + 'grey35': RGBColor(89, 89, 89), + 'grey36': RGBColor(92, 92, 92), + 'grey37': RGBColor(94, 94, 94), + 'grey38': RGBColor(97, 97, 97), + 'grey39': RGBColor(99, 99, 99), + 'grey4': RGBColor(10, 10, 10), + 'grey40': RGBColor(102, 102, 102), + 'grey41': RGBColor(105, 105, 105), + 'grey42': RGBColor(107, 107, 107), + 'grey43': RGBColor(110, 110, 110), + 'grey44': RGBColor(112, 112, 112), + 'grey45': RGBColor(115, 115, 115), + 'grey46': RGBColor(117, 117, 117), + 'grey47': RGBColor(120, 120, 120), + 'grey48': RGBColor(122, 122, 122), + 'grey49': RGBColor(125, 125, 125), + 'grey5': RGBColor(13, 13, 13), + 'grey50': RGBColor(127, 127, 127), + 'grey51': RGBColor(130, 130, 130), + 'grey52': RGBColor(133, 133, 133), + 'grey53': RGBColor(135, 135, 135), + 'grey54': RGBColor(138, 138, 138), + 'grey55': RGBColor(140, 140, 140), + 'grey56': RGBColor(143, 143, 143), + 'grey57': RGBColor(145, 145, 145), + 'grey58': RGBColor(148, 148, 148), + 'grey59': RGBColor(150, 150, 150), + 'grey6': RGBColor(15, 15, 15), + 'grey60': RGBColor(153, 153, 153), + 'grey61': RGBColor(156, 156, 156), + 'grey62': RGBColor(158, 158, 158), + 'grey63': RGBColor(161, 161, 161), + 'grey64': RGBColor(163, 163, 163), + 'grey65': RGBColor(166, 166, 166), + 'grey66': RGBColor(168, 168, 168), + 'grey67': RGBColor(171, 171, 171), + 'grey68': RGBColor(173, 173, 173), + 'grey69': RGBColor(176, 176, 176), + 'grey7': RGBColor(18, 18, 18), + 'grey70': RGBColor(179, 179, 179), + 'grey71': RGBColor(181, 181, 181), + 'grey72': RGBColor(184, 184, 184), + 'grey73': RGBColor(186, 186, 186), + 'grey74': RGBColor(189, 189, 189), + 'grey75': RGBColor(191, 191, 191), + 'grey76': RGBColor(194, 194, 194), + 'grey77': RGBColor(196, 196, 196), + 'grey78': RGBColor(199, 199, 199), + 'grey79': RGBColor(201, 201, 201), + 'grey8': RGBColor(20, 20, 20), + 'grey80': RGBColor(204, 204, 204), + 'grey81': RGBColor(207, 207, 207), + 'grey82': RGBColor(209, 209, 209), + 'grey83': RGBColor(212, 212, 212), + 'grey84': RGBColor(214, 214, 214), + 'grey85': RGBColor(217, 217, 217), + 'grey86': RGBColor(219, 219, 219), + 'grey87': RGBColor(222, 222, 222), + 'grey88': RGBColor(224, 224, 224), + 'grey89': RGBColor(227, 227, 227), + 'grey9': RGBColor(23, 23, 23), + 'grey90': RGBColor(229, 229, 229), + 'grey91': RGBColor(232, 232, 232), + 'grey92': RGBColor(235, 235, 235), + 'grey93': RGBColor(237, 237, 237), + 'grey94': RGBColor(240, 240, 240), + 'grey95': RGBColor(242, 242, 242), + 'grey96': RGBColor(245, 245, 245), + 'grey97': RGBColor(247, 247, 247), + 'grey98': RGBColor(250, 250, 250), + 'grey99': RGBColor(252, 252, 252), + 'honeydew': RGBColor(240, 255, 240), + 'honeydew1': RGBColor(240, 255, 240), + 'honeydew2': RGBColor(224, 238, 224), + 'honeydew3': RGBColor(193, 205, 193), + 'honeydew4': RGBColor(131, 139, 131), + 'hotpink': RGBColor(255, 105, 180), + 'hotpink1': RGBColor(255, 110, 180), + 'hotpink2': RGBColor(238, 106, 167), + 'hotpink3': RGBColor(205, 96, 144), + 'hotpink4': RGBColor(139, 58, 98), + 'indianred': RGBColor(205, 92, 92), + 'indianred1': RGBColor(255, 106, 106), + 'indianred2': RGBColor(238, 99, 99), + 'indianred3': RGBColor(205, 85, 85), + 'indianred4': RGBColor(139, 58, 58), + 'indigo': RGBColor(75, 0, 130), + 'ivory': RGBColor(255, 255, 240), + 'ivory1': RGBColor(255, 255, 240), + 'ivory2': RGBColor(238, 238, 224), + 'ivory3': RGBColor(205, 205, 193), + 'ivory4': RGBColor(139, 139, 131), + 'khaki': RGBColor(240, 230, 140), + 'khaki1': RGBColor(255, 246, 143), + 'khaki2': RGBColor(238, 230, 133), + 'khaki3': RGBColor(205, 198, 115), + 'khaki4': RGBColor(139, 134, 78), + 'lavender': RGBColor(230, 230, 250), + 'lavenderblush': RGBColor(255, 240, 245), + 'lavenderblush1': RGBColor(255, 240, 245), + 'lavenderblush2': RGBColor(238, 224, 229), + 'lavenderblush3': RGBColor(205, 193, 197), + 'lavenderblush4': RGBColor(139, 131, 134), + 'lawngreen': RGBColor(124, 252, 0), + 'lemonchiffon': RGBColor(255, 250, 205), + 'lemonchiffon1': RGBColor(255, 250, 205), + 'lemonchiffon2': RGBColor(238, 233, 191), + 'lemonchiffon3': RGBColor(205, 201, 165), + 'lemonchiffon4': RGBColor(139, 137, 112), + 'lightblue': RGBColor(173, 216, 230), + 'lightblue1': RGBColor(191, 239, 255), + 'lightblue2': RGBColor(178, 223, 238), + 'lightblue3': RGBColor(154, 192, 205), + 'lightblue4': RGBColor(104, 131, 139), + 'lightcoral': RGBColor(240, 128, 128), + 'lightcyan': RGBColor(224, 255, 255), + 'lightcyan1': RGBColor(224, 255, 255), + 'lightcyan2': RGBColor(209, 238, 238), + 'lightcyan3': RGBColor(180, 205, 205), + 'lightcyan4': RGBColor(122, 139, 139), + 'lightgoldenrod': RGBColor(238, 221, 130), + 'lightgoldenrod1': RGBColor(255, 236, 139), + 'lightgoldenrod2': RGBColor(238, 220, 130), + 'lightgoldenrod3': RGBColor(205, 190, 112), + 'lightgoldenrod4': RGBColor(139, 129, 76), + 'lightgoldenrodyellow': RGBColor(250, 250, 210), + 'lightgray': RGBColor(211, 211, 211), + 'lightgreen': RGBColor(144, 238, 144), + 'lightgrey': RGBColor(211, 211, 211), + 'lightpink': RGBColor(255, 182, 193), + 'lightpink1': RGBColor(255, 174, 185), + 'lightpink2': RGBColor(238, 162, 173), + 'lightpink3': RGBColor(205, 140, 149), + 'lightpink4': RGBColor(139, 95, 101), + 'lightsalmon': RGBColor(255, 160, 122), + 'lightsalmon1': RGBColor(255, 160, 122), + 'lightsalmon2': RGBColor(238, 149, 114), + 'lightsalmon3': RGBColor(205, 129, 98), + 'lightsalmon4': RGBColor(139, 87, 66), + 'lightseagreen': RGBColor(32, 178, 170), + 'lightskyblue': RGBColor(135, 206, 250), + 'lightskyblue1': RGBColor(176, 226, 255), + 'lightskyblue2': RGBColor(164, 211, 238), + 'lightskyblue3': RGBColor(141, 182, 205), + 'lightskyblue4': RGBColor(96, 123, 139), + 'lightslateblue': RGBColor(132, 112, 255), + 'lightslategray': RGBColor(119, 136, 153), + 'lightslategrey': RGBColor(119, 136, 153), + 'lightsteelblue': RGBColor(176, 196, 222), + 'lightsteelblue1': RGBColor(202, 225, 255), + 'lightsteelblue2': RGBColor(188, 210, 238), + 'lightsteelblue3': RGBColor(162, 181, 205), + 'lightsteelblue4': RGBColor(110, 123, 139), + 'lightyellow': RGBColor(255, 255, 224), + 'lightyellow1': RGBColor(255, 255, 224), + 'lightyellow2': RGBColor(238, 238, 209), + 'lightyellow3': RGBColor(205, 205, 180), + 'lightyellow4': RGBColor(139, 139, 122), + 'lime': RGBColor(0, 255, 0), + 'limegreen': RGBColor(50, 205, 50), + 'linen': RGBColor(250, 240, 230), + 'magenta': RGBColor(255, 0, 255), + 'magenta1': RGBColor(255, 0, 255), + 'magenta2': RGBColor(238, 0, 238), + 'magenta3': RGBColor(205, 0, 205), + 'magenta4': RGBColor(139, 0, 139), + 'maroon': RGBColor(176, 48, 96), + 'maroon1': RGBColor(255, 52, 179), + 'maroon2': RGBColor(238, 48, 167), + 'maroon3': RGBColor(205, 41, 144), + 'maroon4': RGBColor(139, 28, 98), + 'mediumaquamarine': RGBColor(102, 205, 170), + 'mediumblue': RGBColor(0, 0, 205), + 'mediumorchid': RGBColor(186, 85, 211), + 'mediumorchid1': RGBColor(224, 102, 255), + 'mediumorchid2': RGBColor(209, 95, 238), + 'mediumorchid3': RGBColor(180, 82, 205), + 'mediumorchid4': RGBColor(122, 55, 139), + 'mediumpurple': RGBColor(147, 112, 219), + 'mediumpurple1': RGBColor(171, 130, 255), + 'mediumpurple2': RGBColor(159, 121, 238), + 'mediumpurple3': RGBColor(137, 104, 205), + 'mediumpurple4': RGBColor(93, 71, 139), + 'mediumseagreen': RGBColor(60, 179, 113), + 'mediumslateblue': RGBColor(123, 104, 238), + 'mediumspringgreen': RGBColor(0, 250, 154), + 'mediumturquoise': RGBColor(72, 209, 204), + 'mediumvioletred': RGBColor(199, 21, 133), + 'midnightblue': RGBColor(25, 25, 112), + 'mintcream': RGBColor(245, 255, 250), + 'mistyrose': RGBColor(255, 228, 225), + 'mistyrose1': RGBColor(255, 228, 225), + 'mistyrose2': RGBColor(238, 213, 210), + 'mistyrose3': RGBColor(205, 183, 181), + 'mistyrose4': RGBColor(139, 125, 123), + 'moccasin': RGBColor(255, 228, 181), + 'navajowhite': RGBColor(255, 222, 173), + 'navajowhite1': RGBColor(255, 222, 173), + 'navajowhite2': RGBColor(238, 207, 161), + 'navajowhite3': RGBColor(205, 179, 139), + 'navajowhite4': RGBColor(139, 121, 94), + 'navy': RGBColor(0, 0, 128), + 'navyblue': RGBColor(0, 0, 128), + 'oldlace': RGBColor(253, 245, 230), + 'olive': RGBColor(128, 128, 0), + 'olivedrab': RGBColor(107, 142, 35), + 'olivedrab1': RGBColor(192, 255, 62), + 'olivedrab2': RGBColor(179, 238, 58), + 'olivedrab3': RGBColor(154, 205, 50), + 'olivedrab4': RGBColor(105, 139, 34), + 'orange': RGBColor(255, 165, 0), + 'orange1': RGBColor(255, 165, 0), + 'orange2': RGBColor(238, 154, 0), + 'orange3': RGBColor(205, 133, 0), + 'orange4': RGBColor(139, 90, 0), + 'orangered': RGBColor(255, 69, 0), + 'orangered1': RGBColor(255, 69, 0), + 'orangered2': RGBColor(238, 64, 0), + 'orangered3': RGBColor(205, 55, 0), + 'orangered4': RGBColor(139, 37, 0), + 'orchid': RGBColor(218, 112, 214), + 'orchid1': RGBColor(255, 131, 250), + 'orchid2': RGBColor(238, 122, 233), + 'orchid3': RGBColor(205, 105, 201), + 'orchid4': RGBColor(139, 71, 137), + 'palegoldenrod': RGBColor(238, 232, 170), + 'palegreen': RGBColor(152, 251, 152), + 'palegreen1': RGBColor(154, 255, 154), + 'palegreen2': RGBColor(144, 238, 144), + 'palegreen3': RGBColor(124, 205, 124), + 'palegreen4': RGBColor(84, 139, 84), + 'paleturquoise': RGBColor(175, 238, 238), + 'paleturquoise1': RGBColor(187, 255, 255), + 'paleturquoise2': RGBColor(174, 238, 238), + 'paleturquoise3': RGBColor(150, 205, 205), + 'paleturquoise4': RGBColor(102, 139, 139), + 'palevioletred': RGBColor(219, 112, 147), + 'palevioletred1': RGBColor(255, 130, 171), + 'palevioletred2': RGBColor(238, 121, 159), + 'palevioletred3': RGBColor(205, 104, 137), + 'palevioletred4': RGBColor(139, 71, 93), + 'papayawhip': RGBColor(255, 239, 213), + 'peachpuff': RGBColor(255, 218, 185), + 'peachpuff1': RGBColor(255, 218, 185), + 'peachpuff2': RGBColor(238, 203, 173), + 'peachpuff3': RGBColor(205, 175, 149), + 'peachpuff4': RGBColor(139, 119, 101), + 'peru': RGBColor(205, 133, 63), + 'pink': RGBColor(255, 192, 203), + 'pink1': RGBColor(255, 181, 197), + 'pink2': RGBColor(238, 169, 184), + 'pink3': RGBColor(205, 145, 158), + 'pink4': RGBColor(139, 99, 108), + 'plum': RGBColor(221, 160, 221), + 'plum1': RGBColor(255, 187, 255), + 'plum2': RGBColor(238, 174, 238), + 'plum3': RGBColor(205, 150, 205), + 'plum4': RGBColor(139, 102, 139), + 'powderblue': RGBColor(176, 224, 230), + 'purple': RGBColor(160, 32, 240), + 'purple1': RGBColor(155, 48, 255), + 'purple2': RGBColor(145, 44, 238), + 'purple3': RGBColor(125, 38, 205), + 'purple4': RGBColor(85, 26, 139), + 'rebeccapurple': RGBColor(102, 51, 153), + 'red': RGBColor(255, 0, 0), + 'red1': RGBColor(255, 0, 0), + 'red2': RGBColor(238, 0, 0), + 'red3': RGBColor(205, 0, 0), + 'red4': RGBColor(139, 0, 0), + 'rosybrown': RGBColor(188, 143, 143), + 'rosybrown1': RGBColor(255, 193, 193), + 'rosybrown2': RGBColor(238, 180, 180), + 'rosybrown3': RGBColor(205, 155, 155), + 'rosybrown4': RGBColor(139, 105, 105), + 'royalblue': RGBColor(65, 105, 225), + 'royalblue1': RGBColor(72, 118, 255), + 'royalblue2': RGBColor(67, 110, 238), + 'royalblue3': RGBColor(58, 95, 205), + 'royalblue4': RGBColor(39, 64, 139), + 'saddlebrown': RGBColor(139, 69, 19), + 'salmon': RGBColor(250, 128, 114), + 'salmon1': RGBColor(255, 140, 105), + 'salmon2': RGBColor(238, 130, 98), + 'salmon3': RGBColor(205, 112, 84), + 'salmon4': RGBColor(139, 76, 57), + 'sandybrown': RGBColor(244, 164, 96), + 'seagreen': RGBColor(46, 139, 87), + 'seagreen1': RGBColor(84, 255, 159), + 'seagreen2': RGBColor(78, 238, 148), + 'seagreen3': RGBColor(67, 205, 128), + 'seagreen4': RGBColor(46, 139, 87), + 'seashell': RGBColor(255, 245, 238), + 'seashell1': RGBColor(255, 245, 238), + 'seashell2': RGBColor(238, 229, 222), + 'seashell3': RGBColor(205, 197, 191), + 'seashell4': RGBColor(139, 134, 130), + 'sienna': RGBColor(160, 82, 45), + 'sienna1': RGBColor(255, 130, 71), + 'sienna2': RGBColor(238, 121, 66), + 'sienna3': RGBColor(205, 104, 57), + 'sienna4': RGBColor(139, 71, 38), + 'silver': RGBColor(192, 192, 192), + 'skyblue': RGBColor(135, 206, 235), + 'skyblue1': RGBColor(135, 206, 255), + 'skyblue2': RGBColor(126, 192, 238), + 'skyblue3': RGBColor(108, 166, 205), + 'skyblue4': RGBColor(74, 112, 139), + 'slateblue': RGBColor(106, 90, 205), + 'slateblue1': RGBColor(131, 111, 255), + 'slateblue2': RGBColor(122, 103, 238), + 'slateblue3': RGBColor(105, 89, 205), + 'slateblue4': RGBColor(71, 60, 139), + 'slategray': RGBColor(112, 128, 144), + 'slategray1': RGBColor(198, 226, 255), + 'slategray2': RGBColor(185, 211, 238), + 'slategray3': RGBColor(159, 182, 205), + 'slategray4': RGBColor(108, 123, 139), + 'slategrey': RGBColor(112, 128, 144), + 'snow': RGBColor(255, 250, 250), + 'snow1': RGBColor(255, 250, 250), + 'snow2': RGBColor(238, 233, 233), + 'snow3': RGBColor(205, 201, 201), + 'snow4': RGBColor(139, 137, 137), + 'springgreen': RGBColor(0, 255, 127), + 'springgreen1': RGBColor(0, 255, 127), + 'springgreen2': RGBColor(0, 238, 118), + 'springgreen3': RGBColor(0, 205, 102), + 'springgreen4': RGBColor(0, 139, 69), + 'steelblue': RGBColor(70, 130, 180), + 'steelblue1': RGBColor(99, 184, 255), + 'steelblue2': RGBColor(92, 172, 238), + 'steelblue3': RGBColor(79, 148, 205), + 'steelblue4': RGBColor(54, 100, 139), + 'tan': RGBColor(210, 180, 140), + 'tan1': RGBColor(255, 165, 79), + 'tan2': RGBColor(238, 154, 73), + 'tan3': RGBColor(205, 133, 63), + 'tan4': RGBColor(139, 90, 43), + 'teal': RGBColor(0, 128, 128), + 'thistle': RGBColor(216, 191, 216), + 'thistle1': RGBColor(255, 225, 255), + 'thistle2': RGBColor(238, 210, 238), + 'thistle3': RGBColor(205, 181, 205), + 'thistle4': RGBColor(139, 123, 139), + 'tomato': RGBColor(255, 99, 71), + 'tomato1': RGBColor(255, 99, 71), + 'tomato2': RGBColor(238, 92, 66), + 'tomato3': RGBColor(205, 79, 57), + 'tomato4': RGBColor(139, 54, 38), + 'turquoise': RGBColor(64, 224, 208), + 'turquoise1': RGBColor(0, 245, 255), + 'turquoise2': RGBColor(0, 229, 238), + 'turquoise3': RGBColor(0, 197, 205), + 'turquoise4': RGBColor(0, 134, 139), + 'violet': RGBColor(238, 130, 238), + 'violetred': RGBColor(208, 32, 144), + 'violetred1': RGBColor(255, 62, 150), + 'violetred2': RGBColor(238, 58, 140), + 'violetred3': RGBColor(205, 50, 120), + 'violetred4': RGBColor(139, 34, 82), + 'webgray': RGBColor(128, 128, 128), + 'webgreen': RGBColor(0, 128, 0), + 'webgrey': RGBColor(128, 128, 128), + 'webmaroon': RGBColor(128, 0, 0), + 'webpurple': RGBColor(128, 0, 128), + 'wheat': RGBColor(245, 222, 179), + 'wheat1': RGBColor(255, 231, 186), + 'wheat2': RGBColor(238, 216, 174), + 'wheat3': RGBColor(205, 186, 150), + 'wheat4': RGBColor(139, 126, 102), + 'white': RGBColor(255, 255, 255), + 'whitesmoke': RGBColor(245, 245, 245), + 'x11gray': RGBColor(190, 190, 190), + 'x11green': RGBColor(0, 255, 0), + 'x11grey': RGBColor(190, 190, 190), + 'x11maroon': RGBColor(176, 48, 96), + 'x11purple': RGBColor(160, 32, 240), + 'yellow': RGBColor(255, 255, 0), + 'yellow1': RGBColor(255, 255, 0), + 'yellow2': RGBColor(238, 238, 0), + 'yellow3': RGBColor(205, 205, 0), + 'yellow4': RGBColor(139, 139, 0), + 'yellowgreen': RGBColor(154, 205, 50) +} + +#: Curses color indices of 8, 16, and 256-color terminals +RGB_256TABLE = ( + RGBColor(0, 0, 0), + RGBColor(205, 0, 0), + RGBColor(0, 205, 0), + RGBColor(205, 205, 0), + RGBColor(0, 0, 238), + RGBColor(205, 0, 205), + RGBColor(0, 205, 205), + RGBColor(229, 229, 229), + RGBColor(127, 127, 127), + RGBColor(255, 0, 0), + RGBColor(0, 255, 0), + RGBColor(255, 255, 0), + RGBColor(92, 92, 255), + RGBColor(255, 0, 255), + RGBColor(0, 255, 255), + RGBColor(255, 255, 255), + RGBColor(0, 0, 0), + RGBColor(0, 0, 95), + RGBColor(0, 0, 135), + RGBColor(0, 0, 175), + RGBColor(0, 0, 215), + RGBColor(0, 0, 255), + RGBColor(0, 95, 0), + RGBColor(0, 95, 95), + RGBColor(0, 95, 135), + RGBColor(0, 95, 175), + RGBColor(0, 95, 215), + RGBColor(0, 95, 255), + RGBColor(0, 135, 0), + RGBColor(0, 135, 95), + RGBColor(0, 135, 135), + RGBColor(0, 135, 175), + RGBColor(0, 135, 215), + RGBColor(0, 135, 255), + RGBColor(0, 175, 0), + RGBColor(0, 175, 95), + RGBColor(0, 175, 135), + RGBColor(0, 175, 175), + RGBColor(0, 175, 215), + RGBColor(0, 175, 255), + RGBColor(0, 215, 0), + RGBColor(0, 215, 95), + RGBColor(0, 215, 135), + RGBColor(0, 215, 175), + RGBColor(0, 215, 215), + RGBColor(0, 215, 255), + RGBColor(0, 255, 0), + RGBColor(0, 255, 95), + RGBColor(0, 255, 135), + RGBColor(0, 255, 175), + RGBColor(0, 255, 215), + RGBColor(0, 255, 255), + RGBColor(95, 0, 0), + RGBColor(95, 0, 95), + RGBColor(95, 0, 135), + RGBColor(95, 0, 175), + RGBColor(95, 0, 215), + RGBColor(95, 0, 255), + RGBColor(95, 95, 0), + RGBColor(95, 95, 95), + RGBColor(95, 95, 135), + RGBColor(95, 95, 175), + RGBColor(95, 95, 215), + RGBColor(95, 95, 255), + RGBColor(95, 135, 0), + RGBColor(95, 135, 95), + RGBColor(95, 135, 135), + RGBColor(95, 135, 175), + RGBColor(95, 135, 215), + RGBColor(95, 135, 255), + RGBColor(95, 175, 0), + RGBColor(95, 175, 95), + RGBColor(95, 175, 135), + RGBColor(95, 175, 175), + RGBColor(95, 175, 215), + RGBColor(95, 175, 255), + RGBColor(95, 215, 0), + RGBColor(95, 215, 95), + RGBColor(95, 215, 135), + RGBColor(95, 215, 175), + RGBColor(95, 215, 215), + RGBColor(95, 215, 255), + RGBColor(95, 255, 0), + RGBColor(95, 255, 95), + RGBColor(95, 255, 135), + RGBColor(95, 255, 175), + RGBColor(95, 255, 215), + RGBColor(95, 255, 255), + RGBColor(135, 0, 0), + RGBColor(135, 0, 95), + RGBColor(135, 0, 135), + RGBColor(135, 0, 175), + RGBColor(135, 0, 215), + RGBColor(135, 0, 255), + RGBColor(135, 95, 0), + RGBColor(135, 95, 95), + RGBColor(135, 95, 135), + RGBColor(135, 95, 175), + RGBColor(135, 95, 215), + RGBColor(135, 95, 255), + RGBColor(135, 135, 0), + RGBColor(135, 135, 95), + RGBColor(135, 135, 135), + RGBColor(135, 135, 175), + RGBColor(135, 135, 215), + RGBColor(135, 135, 255), + RGBColor(135, 175, 0), + RGBColor(135, 175, 95), + RGBColor(135, 175, 135), + RGBColor(135, 175, 175), + RGBColor(135, 175, 215), + RGBColor(135, 175, 255), + RGBColor(135, 215, 0), + RGBColor(135, 215, 95), + RGBColor(135, 215, 135), + RGBColor(135, 215, 175), + RGBColor(135, 215, 215), + RGBColor(135, 215, 255), + RGBColor(135, 255, 0), + RGBColor(135, 255, 95), + RGBColor(135, 255, 135), + RGBColor(135, 255, 175), + RGBColor(135, 255, 215), + RGBColor(135, 255, 255), + RGBColor(175, 0, 0), + RGBColor(175, 0, 95), + RGBColor(175, 0, 135), + RGBColor(175, 0, 175), + RGBColor(175, 0, 215), + RGBColor(175, 0, 255), + RGBColor(175, 95, 0), + RGBColor(175, 95, 95), + RGBColor(175, 95, 135), + RGBColor(175, 95, 175), + RGBColor(175, 95, 215), + RGBColor(175, 95, 255), + RGBColor(175, 135, 0), + RGBColor(175, 135, 95), + RGBColor(175, 135, 135), + RGBColor(175, 135, 175), + RGBColor(175, 135, 215), + RGBColor(175, 135, 255), + RGBColor(175, 175, 0), + RGBColor(175, 175, 95), + RGBColor(175, 175, 135), + RGBColor(175, 175, 175), + RGBColor(175, 175, 215), + RGBColor(175, 175, 255), + RGBColor(175, 215, 0), + RGBColor(175, 215, 95), + RGBColor(175, 215, 135), + RGBColor(175, 215, 175), + RGBColor(175, 215, 215), + RGBColor(175, 215, 255), + RGBColor(175, 255, 0), + RGBColor(175, 255, 95), + RGBColor(175, 255, 135), + RGBColor(175, 255, 175), + RGBColor(175, 255, 215), + RGBColor(175, 255, 255), + RGBColor(215, 0, 0), + RGBColor(215, 0, 95), + RGBColor(215, 0, 135), + RGBColor(215, 0, 175), + RGBColor(215, 0, 215), + RGBColor(215, 0, 255), + RGBColor(215, 95, 0), + RGBColor(215, 95, 95), + RGBColor(215, 95, 135), + RGBColor(215, 95, 175), + RGBColor(215, 95, 215), + RGBColor(215, 95, 255), + RGBColor(215, 135, 0), + RGBColor(215, 135, 95), + RGBColor(215, 135, 135), + RGBColor(215, 135, 175), + RGBColor(215, 135, 215), + RGBColor(215, 135, 255), + RGBColor(215, 175, 0), + RGBColor(215, 175, 95), + RGBColor(215, 175, 135), + RGBColor(215, 175, 175), + RGBColor(215, 175, 215), + RGBColor(215, 175, 255), + RGBColor(215, 215, 0), + RGBColor(215, 215, 95), + RGBColor(215, 215, 135), + RGBColor(215, 215, 175), + RGBColor(215, 215, 215), + RGBColor(215, 215, 255), + RGBColor(215, 255, 0), + RGBColor(215, 255, 95), + RGBColor(215, 255, 135), + RGBColor(215, 255, 175), + RGBColor(215, 255, 215), + RGBColor(215, 255, 255), + RGBColor(255, 0, 0), + RGBColor(255, 0, 135), + RGBColor(255, 0, 95), + RGBColor(255, 0, 175), + RGBColor(255, 0, 215), + RGBColor(255, 0, 255), + RGBColor(255, 95, 0), + RGBColor(255, 95, 95), + RGBColor(255, 95, 135), + RGBColor(255, 95, 175), + RGBColor(255, 95, 215), + RGBColor(255, 95, 255), + RGBColor(255, 135, 0), + RGBColor(255, 135, 95), + RGBColor(255, 135, 135), + RGBColor(255, 135, 175), + RGBColor(255, 135, 215), + RGBColor(255, 135, 255), + RGBColor(255, 175, 0), + RGBColor(255, 175, 95), + RGBColor(255, 175, 135), + RGBColor(255, 175, 175), + RGBColor(255, 175, 215), + RGBColor(255, 175, 255), + RGBColor(255, 215, 0), + RGBColor(255, 215, 95), + RGBColor(255, 215, 135), + RGBColor(255, 215, 175), + RGBColor(255, 215, 215), + RGBColor(255, 215, 255), + RGBColor(255, 255, 0), + RGBColor(255, 255, 95), + RGBColor(255, 255, 135), + RGBColor(255, 255, 175), + RGBColor(255, 255, 215), + RGBColor(255, 255, 255), + RGBColor(8, 8, 8), + RGBColor(18, 18, 18), + RGBColor(28, 28, 28), + RGBColor(38, 38, 38), + RGBColor(48, 48, 48), + RGBColor(58, 58, 58), + RGBColor(68, 68, 68), + RGBColor(78, 78, 78), + RGBColor(88, 88, 88), + RGBColor(98, 98, 98), + RGBColor(108, 108, 108), + RGBColor(118, 118, 118), + RGBColor(128, 128, 128), + RGBColor(138, 138, 138), + RGBColor(148, 148, 148), + RGBColor(158, 158, 158), + RGBColor(168, 168, 168), + RGBColor(178, 178, 178), + RGBColor(188, 188, 188), + RGBColor(198, 198, 198), + RGBColor(208, 208, 208), + RGBColor(218, 218, 218), + RGBColor(228, 228, 228), + RGBColor(238, 238, 238), +) diff --git a/vllm/lib/python3.10/site-packages/blessed/colorspace.pyi b/vllm/lib/python3.10/site-packages/blessed/colorspace.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a799cd01cf22c24c346069536fd11bf77df5fe32 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/colorspace.pyi @@ -0,0 +1,12 @@ +# std imports +from typing import Set, Dict, Tuple, NamedTuple + +CGA_COLORS: Set[str] + +class RGBColor(NamedTuple): + red: int + green: int + blue: int + +X11_COLORNAMES_TO_RGB: Dict[str, RGBColor] +RGB_256TABLE: Tuple[RGBColor, ...] diff --git a/vllm/lib/python3.10/site-packages/blessed/formatters.py b/vllm/lib/python3.10/site-packages/blessed/formatters.py new file mode 100644 index 0000000000000000000000000000000000000000..31c10979857987ba1e8f8f752ffc9a01814c1cd1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/formatters.py @@ -0,0 +1,496 @@ +"""Sub-module providing sequence-formatting functions.""" +# std imports +import platform + +# 3rd party +import six + +# local +from blessed.colorspace import CGA_COLORS, X11_COLORNAMES_TO_RGB + +# isort: off +# curses +if platform.system() == 'Windows': + import jinxed as curses # pylint: disable=import-error +else: + import curses + + +def _make_colors(): + """ + Return set of valid colors and their derivatives. + + :rtype: set + :returns: Color names with prefixes + """ + colors = set() + # basic CGA foreground color, background, high intensity, and bold + # background ('iCE colors' in my day). + for cga_color in CGA_COLORS: + colors.add(cga_color) + colors.add('on_' + cga_color) + colors.add('bright_' + cga_color) + colors.add('on_bright_' + cga_color) + + # foreground and background VGA color + for vga_color in X11_COLORNAMES_TO_RGB: + colors.add(vga_color) + colors.add('on_' + vga_color) + return colors + + +#: Valid colors and their background (on), bright, and bright-background +#: derivatives. +COLORS = _make_colors() + +#: Attributes that may be compounded with colors, by underscore, such as +#: 'reverse_indigo'. +COMPOUNDABLES = set('bold underline reverse blink italic standout'.split()) + + +class ParameterizingString(six.text_type): + r""" + A Unicode string which can be called as a parameterizing termcap. + + For example:: + + >>> from blessed import Terminal + >>> term = Terminal() + >>> color = ParameterizingString(term.color, term.normal, 'color') + >>> color(9)('color #9') + u'\x1b[91mcolor #9\x1b(B\x1b[m' + """ + + def __new__(cls, cap, normal=u'', name=u''): + # pylint: disable = missing-return-doc, missing-return-type-doc + """ + Class constructor accepting 3 positional arguments. + + :arg str cap: parameterized string suitable for curses.tparm() + :arg str normal: terminating sequence for this capability (optional). + :arg str name: name of this terminal capability (optional). + """ + new = six.text_type.__new__(cls, cap) + new._normal = normal + new._name = name + return new + + def __call__(self, *args): + """ + Returning :class:`FormattingString` instance for given parameters. + + Return evaluated terminal capability (self), receiving arguments + ``*args``, followed by the terminating sequence (self.normal) into + a :class:`FormattingString` capable of being called. + + :raises TypeError: Mismatch between capability and arguments + :raises curses.error: :func:`curses.tparm` raised an exception + :rtype: :class:`FormattingString` or :class:`NullCallableString` + :returns: Callable string for given parameters + """ + try: + # Re-encode the cap, because tparm() takes a bytestring in Python + # 3. However, appear to be a plain Unicode string otherwise so + # concats work. + attr = curses.tparm(self.encode('latin1'), *args).decode('latin1') + return FormattingString(attr, self._normal) + except TypeError as err: + # If the first non-int (i.e. incorrect) arg was a string, suggest + # something intelligent: + if args and isinstance(args[0], six.string_types): + raise TypeError( + "Unknown terminal capability, %r, or, TypeError " + "for arguments %r: %s" % (self._name, args, err)) + # Somebody passed a non-string; I don't feel confident + # guessing what they were trying to do. + raise + except curses.error as err: + # ignore 'tparm() returned NULL', you won't get any styling, + # even if does_styling is True. This happens on win32 platforms + # with http://www.lfd.uci.edu/~gohlke/pythonlibs/#curses installed + if "tparm() returned NULL" not in six.text_type(err): + raise + return NullCallableString() + + +class ParameterizingProxyString(six.text_type): + r""" + A Unicode string which can be called to proxy missing termcap entries. + + This class supports the function :func:`get_proxy_string`, and mirrors + the behavior of :class:`ParameterizingString`, except that instead of + a capability name, receives a format string, and callable to filter the + given positional ``*args`` of :meth:`ParameterizingProxyString.__call__` + into a terminal sequence. + + For example:: + + >>> from blessed import Terminal + >>> term = Terminal('screen') + >>> hpa = ParameterizingString(term.hpa, term.normal, 'hpa') + >>> hpa(9) + u'' + >>> fmt = u'\x1b[{0}G' + >>> fmt_arg = lambda *arg: (arg[0] + 1,) + >>> hpa = ParameterizingProxyString((fmt, fmt_arg), term.normal, 'hpa') + >>> hpa(9) + u'\x1b[10G' + """ + + def __new__(cls, fmt_pair, normal=u'', name=u''): + # pylint: disable = missing-return-doc, missing-return-type-doc + """ + Class constructor accepting 4 positional arguments. + + :arg tuple fmt_pair: Two element tuple containing: + - format string suitable for displaying terminal sequences + - callable suitable for receiving __call__ arguments for formatting string + :arg str normal: terminating sequence for this capability (optional). + :arg str name: name of this terminal capability (optional). + """ + assert isinstance(fmt_pair, tuple), fmt_pair + assert callable(fmt_pair[1]), fmt_pair[1] + new = six.text_type.__new__(cls, fmt_pair[0]) + new._fmt_args = fmt_pair[1] + new._normal = normal + new._name = name + return new + + def __call__(self, *args): + """ + Returning :class:`FormattingString` instance for given parameters. + + Arguments are determined by the capability. For example, ``hpa`` + (move_x) receives only a single integer, whereas ``cup`` (move) + receives two integers. See documentation in terminfo(5) for the + given capability. + + :rtype: FormattingString + :returns: Callable string for given parameters + """ + return FormattingString(self.format(*self._fmt_args(*args)), + self._normal) + + +class FormattingString(six.text_type): + r""" + A Unicode string which doubles as a callable. + + This is used for terminal attributes, so that it may be used both + directly, or as a callable. When used directly, it simply emits + the given terminal sequence. When used as a callable, it wraps the + given (string) argument with the 2nd argument used by the class + constructor:: + + >>> from blessed import Terminal + >>> term = Terminal() + >>> style = FormattingString(term.bright_blue, term.normal) + >>> print(repr(style)) + u'\x1b[94m' + >>> style('Big Blue') + u'\x1b[94mBig Blue\x1b(B\x1b[m' + """ + + def __new__(cls, sequence, normal=u''): + # pylint: disable = missing-return-doc, missing-return-type-doc + """ + Class constructor accepting 2 positional arguments. + + :arg str sequence: terminal attribute sequence. + :arg str normal: terminating sequence for this attribute (optional). + """ + new = six.text_type.__new__(cls, sequence) + new._normal = normal + return new + + def __call__(self, *args): + """ + Return ``text`` joined by ``sequence`` and ``normal``. + + :raises TypeError: Not a string type + :rtype: str + :returns: Arguments wrapped in sequence and normal + """ + # Jim Allman brings us this convenience of allowing existing + # unicode strings to be joined as a call parameter to a formatting + # string result, allowing nestation: + # + # >>> t.red('This is ', t.bold('extremely'), ' dangerous!') + for idx, ucs_part in enumerate(args): + if not isinstance(ucs_part, six.string_types): + expected_types = ', '.join(_type.__name__ for _type in six.string_types) + raise TypeError( + "TypeError for FormattingString argument, " + "%r, at position %s: expected type %s, " + "got %s" % (ucs_part, idx, expected_types, + type(ucs_part).__name__)) + postfix = u'' + if self and self._normal: + postfix = self._normal + _refresh = self._normal + self + args = [_refresh.join(ucs_part.split(self._normal)) + for ucs_part in args] + + return self + u''.join(args) + postfix + + +class FormattingOtherString(six.text_type): + r""" + A Unicode string which doubles as a callable for another sequence when called. + + This is used for the :meth:`~.Terminal.move_up`, ``down``, ``left``, and ``right()`` + family of functions:: + + >>> from blessed import Terminal + >>> term = Terminal() + >>> move_right = FormattingOtherString(term.cuf1, term.cuf) + >>> print(repr(move_right)) + u'\x1b[C' + >>> print(repr(move_right(666))) + u'\x1b[666C' + >>> print(repr(move_right())) + u'\x1b[C' + """ + + def __new__(cls, direct, target): + # pylint: disable = missing-return-doc, missing-return-type-doc + """ + Class constructor accepting 2 positional arguments. + + :arg str direct: capability name for direct formatting, eg ``('x' + term.right)``. + :arg str target: capability name for callable, eg ``('x' + term.right(99))``. + """ + new = six.text_type.__new__(cls, direct) + new._callable = target + return new + + def __getnewargs__(self): + # return arguments used for the __new__ method upon unpickling. + return six.text_type.__new__(six.text_type, self), self._callable + + def __call__(self, *args): + """Return ``text`` by ``target``.""" + return self._callable(*args) if args else self + + +class NullCallableString(six.text_type): + """ + A dummy callable Unicode alternative to :class:`FormattingString`. + + This is used for colors on terminals that do not support colors, it is just a basic form of + unicode that may also act as a callable. + """ + + def __new__(cls): + """Class constructor.""" + return six.text_type.__new__(cls, u'') + + def __call__(self, *args): + """ + Allow empty string to be callable, returning given string, if any. + + When called with an int as the first arg, return an empty Unicode. An + int is a good hint that I am a :class:`ParameterizingString`, as there + are only about half a dozen string-returning capabilities listed in + terminfo(5) which accept non-int arguments, they are seldom used. + + When called with a non-int as the first arg (no no args at all), return + the first arg, acting in place of :class:`FormattingString` without + any attributes. + """ + if not args or isinstance(args[0], int): + # As a NullCallableString, even when provided with a parameter, + # such as t.color(5), we must also still be callable, fe: + # + # >>> t.color(5)('shmoo') + # + # is actually simplified result of NullCallable()() on terminals + # without color support, so turtles all the way down: we return + # another instance. + return NullCallableString() + return u''.join(args) + + +def get_proxy_string(term, attr): + """ + Proxy and return callable string for proxied attributes. + + :arg Terminal term: :class:`~.Terminal` instance. + :arg str attr: terminal capability name that may be proxied. + :rtype: None or :class:`ParameterizingProxyString`. + :returns: :class:`ParameterizingProxyString` for some attributes + of some terminal types that support it, where the terminfo(5) + database would otherwise come up empty, such as ``move_x`` + attribute for ``term.kind`` of ``screen``. Otherwise, None. + """ + # normalize 'screen-256color', or 'ansi.sys' to its basic names + term_kind = next(iter(_kind for _kind in ('screen', 'ansi',) + if term.kind.startswith(_kind)), term) + _proxy_table = { # pragma: no cover + 'screen': { + # proxy move_x/move_y for 'screen' terminal type, used by tmux(1). + 'hpa': ParameterizingProxyString( + (u'\x1b[{0}G', lambda *arg: (arg[0] + 1,)), term.normal, attr), + 'vpa': ParameterizingProxyString( + (u'\x1b[{0}d', lambda *arg: (arg[0] + 1,)), term.normal, attr), + }, + 'ansi': { + # proxy show/hide cursor for 'ansi' terminal type. There is some + # demand for a richly working ANSI terminal type for some reason. + 'civis': ParameterizingProxyString( + (u'\x1b[?25l', lambda *arg: ()), term.normal, attr), + 'cnorm': ParameterizingProxyString( + (u'\x1b[?25h', lambda *arg: ()), term.normal, attr), + 'hpa': ParameterizingProxyString( + (u'\x1b[{0}G', lambda *arg: (arg[0] + 1,)), term.normal, attr), + 'vpa': ParameterizingProxyString( + (u'\x1b[{0}d', lambda *arg: (arg[0] + 1,)), term.normal, attr), + 'sc': '\x1b[s', + 'rc': '\x1b[u', + } + } + return _proxy_table.get(term_kind, {}).get(attr, None) + + +def split_compound(compound): + """ + Split compound formating string into segments. + + >>> split_compound('bold_underline_bright_blue_on_red') + ['bold', 'underline', 'bright_blue', 'on_red'] + + :arg str compound: a string that may contain compounds, separated by + underline (``_``). + :rtype: list + :returns: List of formating string segments + """ + merged_segs = [] + # These occur only as prefixes, so they can always be merged: + mergeable_prefixes = ['on', 'bright', 'on_bright'] + for segment in compound.split('_'): + if merged_segs and merged_segs[-1] in mergeable_prefixes: + merged_segs[-1] += '_' + segment + else: + merged_segs.append(segment) + return merged_segs + + +def resolve_capability(term, attr): + """ + Resolve a raw terminal capability using :func:`tigetstr`. + + :arg Terminal term: :class:`~.Terminal` instance. + :arg str attr: terminal capability name. + :returns: string of the given terminal capability named by ``attr``, + which may be empty (u'') if not found or not supported by the + given :attr:`~.Terminal.kind`. + :rtype: str + """ + if not term.does_styling: + return u'' + val = curses.tigetstr(term._sugar.get(attr, attr)) # pylint: disable=protected-access + # Decode sequences as latin1, as they are always 8-bit bytes, so when + # b'\xff' is returned, this is decoded as u'\xff'. + return u'' if val is None else val.decode('latin1') + + +def resolve_color(term, color): + """ + Resolve a simple color name to a callable capability. + + This function supports :func:`resolve_attribute`. + + :arg Terminal term: :class:`~.Terminal` instance. + :arg str color: any string found in set :const:`COLORS`. + :returns: a string class instance which emits the terminal sequence + for the given color, and may be used as a callable to wrap the + given string with such sequence. + :returns: :class:`NullCallableString` when + :attr:`~.Terminal.number_of_colors` is 0, + otherwise :class:`FormattingString`. + :rtype: :class:`NullCallableString` or :class:`FormattingString` + """ + # pylint: disable=protected-access + if term.number_of_colors == 0: + return NullCallableString() + + # fg/bg capabilities terminals that support 0-256+ colors. + vga_color_cap = (term._background_color if 'on_' in color else + term._foreground_color) + + base_color = color.rsplit('_', 1)[-1] + if base_color in CGA_COLORS: + # curses constants go up to only 7, so add an offset to get at the + # bright colors at 8-15: + offset = 8 if 'bright_' in color else 0 + base_color = color.rsplit('_', 1)[-1] + attr = 'COLOR_%s' % (base_color.upper(),) + fmt_attr = vga_color_cap(getattr(curses, attr) + offset) + return FormattingString(fmt_attr, term.normal) + + assert base_color in X11_COLORNAMES_TO_RGB, ( + 'color not known', base_color) + rgb = X11_COLORNAMES_TO_RGB[base_color] + + # downconvert X11 colors to CGA, EGA, or VGA color spaces + if term.number_of_colors <= 256: + fmt_attr = vga_color_cap(term.rgb_downconvert(*rgb)) + return FormattingString(fmt_attr, term.normal) + + # Modern 24-bit color terminals are written pretty basically. The + # foreground and background sequences are: + # - ^[38;2;;;m + # - ^[48;2;;;m + fgbg_seq = ('48' if 'on_' in color else '38') + assert term.number_of_colors == 1 << 24 + fmt_attr = u'\x1b[' + fgbg_seq + ';2;{0};{1};{2}m' + return FormattingString(fmt_attr.format(*rgb), term.normal) + + +def resolve_attribute(term, attr): + """ + Resolve a terminal attribute name into a capability class. + + :arg Terminal term: :class:`~.Terminal` instance. + :arg str attr: Sugary, ordinary, or compound formatted terminal + capability, such as "red_on_white", "normal", "red", or + "bold_on_black". + :returns: a string class instance which emits the terminal sequence + for the given terminal capability, or may be used as a callable to + wrap the given string with such sequence. + :returns: :class:`NullCallableString` when + :attr:`~.Terminal.number_of_colors` is 0, + otherwise :class:`FormattingString`. + :rtype: :class:`NullCallableString` or :class:`FormattingString` + """ + if attr in COLORS: + return resolve_color(term, attr) + + # A direct compoundable, such as `bold' or `on_red'. + if attr in COMPOUNDABLES: + sequence = resolve_capability(term, attr) + return FormattingString(sequence, term.normal) + + # Given `bold_on_red', resolve to ('bold', 'on_red'), RECURSIVE + # call for each compounding section, joined and returned as + # a completed completed FormattingString. + formatters = split_compound(attr) + if all((fmt in COLORS or fmt in COMPOUNDABLES) for fmt in formatters): + resolution = (resolve_attribute(term, fmt) for fmt in formatters) + return FormattingString(u''.join(resolution), term.normal) + + # otherwise, this is our end-game: given a sequence such as 'csr' + # (change scrolling region), return a ParameterizingString instance, + # that when called, performs and returns the final string after curses + # capability lookup is performed. + tparm_capseq = resolve_capability(term, attr) + if not tparm_capseq: + # and, for special terminals, such as 'screen', provide a Proxy + # ParameterizingString for attributes they do not claim to support, + # but actually do! (such as 'hpa' and 'vpa'). + proxy = get_proxy_string(term, + term._sugar.get(attr, attr)) # pylint: disable=protected-access + if proxy is not None: + return proxy + + return ParameterizingString(tparm_capseq, term.normal, attr) diff --git a/vllm/lib/python3.10/site-packages/blessed/formatters.pyi b/vllm/lib/python3.10/site-packages/blessed/formatters.pyi new file mode 100644 index 0000000000000000000000000000000000000000..32a3dc2df3434f73a5392e67f99a5b7f9bab0789 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/formatters.pyi @@ -0,0 +1,70 @@ +# std imports +from typing import (Any, + Set, + List, + Type, + Tuple, + Union, + TypeVar, + Callable, + NoReturn, + Optional, + overload) + +# local +from .terminal import Terminal + +COLORS: Set[str] +COMPOUNDABLES: Set[str] + +_T = TypeVar("_T") + +class ParameterizingString(str): + def __new__(cls: Type[_T], cap: str, normal: str = ..., name: str = ...) -> _T: ... + @overload + def __call__( + self, *args: int + ) -> Union["FormattingString", "NullCallableString"]: ... + @overload + def __call__(self, *args: str) -> NoReturn: ... + +class ParameterizingProxyString(str): + def __new__( + cls: Type[_T], + fmt_pair: Tuple[str, Callable[..., Tuple[object, ...]]], + normal: str = ..., + name: str = ..., + ) -> _T: ... + def __call__(self, *args: Any) -> "FormattingString": ... + +class FormattingString(str): + def __new__(cls: Type[_T], sequence: str, normal: str = ...) -> _T: ... + @overload + def __call__(self, *args: int) -> NoReturn: ... + @overload + def __call__(self, *args: str) -> str: ... + +class FormattingOtherString(str): + def __new__( + cls: Type[_T], direct: ParameterizingString, target: ParameterizingString = ... + ) -> _T: ... + def __call__(self, *args: Union[int, str]) -> str: ... + +class NullCallableString(str): + def __new__(cls: Type[_T]) -> _T: ... + @overload + def __call__(self, *args: int) -> "NullCallableString": ... + @overload + def __call__(self, *args: str) -> str: ... + +def get_proxy_string( + term: Terminal, attr: str +) -> Optional[ParameterizingProxyString]: ... +def split_compound(compound: str) -> List[str]: ... +def resolve_capability(term: Terminal, attr: str) -> str: ... +def resolve_color( + term: Terminal, color: str +) -> Union[NullCallableString, FormattingString]: ... +def resolve_attribute( + term: Terminal, attr: str +) -> Union[ParameterizingString, FormattingString]: ... diff --git a/vllm/lib/python3.10/site-packages/blessed/keyboard.py b/vllm/lib/python3.10/site-packages/blessed/keyboard.py new file mode 100644 index 0000000000000000000000000000000000000000..31cc98c6528de91ea63b0d4e679f1c63678ff77f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/keyboard.py @@ -0,0 +1,451 @@ +"""Sub-module providing 'keyboard awareness'.""" + +# std imports +import re +import time +import platform +from collections import OrderedDict + +# 3rd party +import six + +# isort: off +# curses +if platform.system() == 'Windows': + # pylint: disable=import-error + import jinxed as curses + from jinxed.has_key import _capability_names as capability_names +else: + import curses + from curses.has_key import _capability_names as capability_names + + +class Keystroke(six.text_type): + """ + A unicode-derived class for describing a single keystroke. + + A class instance describes a single keystroke received on input, + which may contain multiple characters as a multibyte sequence, + which is indicated by properties :attr:`is_sequence` returning + ``True``. + + When the string is a known sequence, :attr:`code` matches terminal + class attributes for comparison, such as ``term.KEY_LEFT``. + + The string-name of the sequence, such as ``u'KEY_LEFT'`` is accessed + by property :attr:`name`, and is used by the :meth:`__repr__` method + to display a human-readable form of the Keystroke this class + instance represents. It may otherwise by joined, split, or evaluated + just as as any other unicode string. + """ + + def __new__(cls, ucs='', code=None, name=None): + """Class constructor.""" + new = six.text_type.__new__(cls, ucs) + new._name = name + new._code = code + return new + + @property + def is_sequence(self): + """Whether the value represents a multibyte sequence (bool).""" + return self._code is not None + + def __repr__(self): + """Docstring overwritten.""" + return (six.text_type.__repr__(self) if self._name is None else + self._name) + __repr__.__doc__ = six.text_type.__doc__ + + @property + def name(self): + """String-name of key sequence, such as ``u'KEY_LEFT'`` (str).""" + return self._name + + @property + def code(self): + """Integer keycode value of multibyte sequence (int).""" + return self._code + + +def get_curses_keycodes(): + """ + Return mapping of curses key-names paired by their keycode integer value. + + :rtype: dict + :returns: Dictionary of (name, code) pairs for curses keyboard constant + values and their mnemonic name. Such as code ``260``, with the value of + its key-name identity, ``u'KEY_LEFT'``. + """ + _keynames = [attr for attr in dir(curses) + if attr.startswith('KEY_')] + return {keyname: getattr(curses, keyname) for keyname in _keynames} + + +def get_keyboard_codes(): + """ + Return mapping of keycode integer values paired by their curses key-name. + + :rtype: dict + :returns: Dictionary of (code, name) pairs for curses keyboard constant + values and their mnemonic name. Such as key ``260``, with the value of + its identity, ``u'KEY_LEFT'``. + + These keys are derived from the attributes by the same of the curses module, + with the following exceptions: + + * ``KEY_DELETE`` in place of ``KEY_DC`` + * ``KEY_INSERT`` in place of ``KEY_IC`` + * ``KEY_PGUP`` in place of ``KEY_PPAGE`` + * ``KEY_PGDOWN`` in place of ``KEY_NPAGE`` + * ``KEY_ESCAPE`` in place of ``KEY_EXIT`` + * ``KEY_SUP`` in place of ``KEY_SR`` + * ``KEY_SDOWN`` in place of ``KEY_SF`` + + This function is the inverse of :func:`get_curses_keycodes`. With the + given override "mixins" listed above, the keycode for the delete key will + map to our imaginary ``KEY_DELETE`` mnemonic, effectively erasing the + phrase ``KEY_DC`` from our code vocabulary for anyone that wishes to use + the return value to determine the key-name by keycode. + """ + keycodes = OrderedDict(get_curses_keycodes()) + keycodes.update(CURSES_KEYCODE_OVERRIDE_MIXIN) + # merge _CURSES_KEYCODE_ADDINS added to our module space + keycodes.update( + (name, value) for name, value in globals().copy().items() if name.startswith('KEY_') + ) + + # invert dictionary (key, values) => (values, key), preferring the + # last-most inserted value ('KEY_DELETE' over 'KEY_DC'). + return dict(zip(keycodes.values(), keycodes.keys())) + + +def _alternative_left_right(term): + r""" + Determine and return mapping of left and right arrow keys sequences. + + :arg blessed.Terminal term: :class:`~.Terminal` instance. + :rtype: dict + :returns: Dictionary of sequences ``term._cuf1``, and ``term._cub1``, + valued as ``KEY_RIGHT``, ``KEY_LEFT`` (when appropriate). + + This function supports :func:`get_terminal_sequences` to discover + the preferred input sequence for the left and right application keys. + + It is necessary to check the value of these sequences to ensure we do not + use ``u' '`` and ``u'\b'`` for ``KEY_RIGHT`` and ``KEY_LEFT``, + preferring their true application key sequence, instead. + """ + # pylint: disable=protected-access + keymap = {} + if term._cuf1 and term._cuf1 != u' ': + keymap[term._cuf1] = curses.KEY_RIGHT + if term._cub1 and term._cub1 != u'\b': + keymap[term._cub1] = curses.KEY_LEFT + return keymap + + +def get_keyboard_sequences(term): + r""" + Return mapping of keyboard sequences paired by keycodes. + + :arg blessed.Terminal term: :class:`~.Terminal` instance. + :returns: mapping of keyboard unicode sequences paired by keycodes + as integer. This is used as the argument ``mapper`` to + the supporting function :func:`resolve_sequence`. + :rtype: OrderedDict + + Initialize and return a keyboard map and sequence lookup table, + (sequence, keycode) from :class:`~.Terminal` instance ``term``, + where ``sequence`` is a multibyte input sequence of unicode + characters, such as ``u'\x1b[D'``, and ``keycode`` is an integer + value, matching curses constant such as term.KEY_LEFT. + + The return value is an OrderedDict instance, with their keys + sorted longest-first. + """ + # A small gem from curses.has_key that makes this all possible, + # _capability_names: a lookup table of terminal capability names for + # keyboard sequences (fe. kcub1, key_left), keyed by the values of + # constants found beginning with KEY_ in the main curses module + # (such as KEY_LEFT). + # + # latin1 encoding is used so that bytes in 8-bit range of 127-255 + # have equivalent chr() and unichr() values, so that the sequence + # of a kermit or avatar terminal, for example, remains unchanged + # in its byte sequence values even when represented by unicode. + # + sequence_map = dict(( + (seq.decode('latin1'), val) + for (seq, val) in ( + (curses.tigetstr(cap), val) + for (val, cap) in capability_names.items() + ) if seq + ) if term.does_styling else ()) + + sequence_map.update(_alternative_left_right(term)) + sequence_map.update(DEFAULT_SEQUENCE_MIXIN) + + # This is for fast lookup matching of sequences, preferring + # full-length sequence such as ('\x1b[D', KEY_LEFT) + # over simple sequences such as ('\x1b', KEY_EXIT). + return OrderedDict(( + (seq, sequence_map[seq]) for seq in sorted( + sequence_map.keys(), key=len, reverse=True))) + + +def get_leading_prefixes(sequences): + """ + Return a set of proper prefixes for given sequence of strings. + + :arg iterable sequences + :rtype: set + :return: Set of all string prefixes + + Given an iterable of strings, all textparts leading up to the final + string is returned as a unique set. This function supports the + :meth:`~.Terminal.inkey` method by determining whether the given + input is a sequence that **may** lead to a final matching pattern. + + >>> prefixes(['abc', 'abdf', 'e', 'jkl']) + set([u'a', u'ab', u'abd', u'j', u'jk']) + """ + return {seq[:i] for seq in sequences for i in range(1, len(seq))} + + +def resolve_sequence(text, mapper, codes): + r""" + Return a single :class:`Keystroke` instance for given sequence ``text``. + + :arg str text: string of characters received from terminal input stream. + :arg OrderedDict mapper: unicode multibyte sequences, such as ``u'\x1b[D'`` + paired by their integer value (260) + :arg dict codes: a :type:`dict` of integer values (such as 260) paired + by their mnemonic name, such as ``'KEY_LEFT'``. + :rtype: Keystroke + :returns: Keystroke instance for the given sequence + + The given ``text`` may extend beyond a matching sequence, such as + ``u\x1b[Dxxx`` returns a :class:`Keystroke` instance of attribute + :attr:`Keystroke.sequence` valued only ``u\x1b[D``. It is up to + calls to determine that ``xxx`` remains unresolved. + """ + for sequence, code in mapper.items(): + if text.startswith(sequence): + return Keystroke(ucs=sequence, code=code, name=codes[code]) + return Keystroke(ucs=text and text[0] or u'') + + +def _time_left(stime, timeout): + """ + Return time remaining since ``stime`` before given ``timeout``. + + This function assists determining the value of ``timeout`` for + class method :meth:`~.Terminal.kbhit` and similar functions. + + :arg float stime: starting time for measurement + :arg float timeout: timeout period, may be set to None to + indicate no timeout (where None is always returned). + :rtype: float or int + :returns: time remaining as float. If no time is remaining, + then the integer ``0`` is returned. + """ + return max(0, timeout - (time.time() - stime)) if timeout else timeout + + +def _read_until(term, pattern, timeout): + """ + Convenience read-until-pattern function, supporting :meth:`~.get_location`. + + :arg blessed.Terminal term: :class:`~.Terminal` instance. + :arg float timeout: timeout period, may be set to None to indicate no + timeout (where 0 is always returned). + :arg str pattern: target regular expression pattern to seek. + :rtype: tuple + :returns: tuple in form of ``(match, str)``, *match* + may be :class:`re.MatchObject` if pattern is discovered + in input stream before timeout has elapsed, otherwise + None. ``str`` is any remaining text received exclusive + of the matching pattern). + + The reason a tuple containing non-matching data is returned, is that the + consumer should push such data back into the input buffer by + :meth:`~.Terminal.ungetch` if any was received. + + For example, when a user is performing rapid input keystrokes while its + terminal emulator surreptitiously responds to this in-band sequence, we + must ensure any such keyboard data is well-received by the next call to + term.inkey() without delay. + """ + stime = time.time() + match, buf = None, u'' + + # first, buffer all pending data. pexpect library provides a + # 'searchwindowsize' attribute that limits this memory region. We're not + # concerned about OOM conditions: only (human) keyboard input and terminal + # response sequences are expected. + + while True: # pragma: no branch + # block as long as necessary to ensure at least one character is + # received on input or remaining timeout has elapsed. + ucs = term.inkey(timeout=_time_left(stime, timeout)) + # while the keyboard buffer is "hot" (has input), we continue to + # aggregate all awaiting data. We do this to ensure slow I/O + # calls do not unnecessarily give up within the first 'while' loop + # for short timeout periods. + while ucs: + buf += ucs + ucs = term.inkey(timeout=0) + + match = re.search(pattern=pattern, string=buf) + if match is not None: + # match + break + + if timeout is not None and not _time_left(stime, timeout): + # timeout + break + + return match, buf + + +#: Though we may determine *keynames* and codes for keyboard input that +#: generate multibyte sequences, it is also especially useful to aliases +#: a few basic ASCII characters such as ``KEY_TAB`` instead of ``u'\t'`` for +#: uniformity. +#: +#: Furthermore, many key-names for application keys enabled only by context +#: manager :meth:`~.Terminal.keypad` are surprisingly absent. We inject them +#: here directly into the curses module. +_CURSES_KEYCODE_ADDINS = ( + 'TAB', + 'KP_MULTIPLY', + 'KP_ADD', + 'KP_SEPARATOR', + 'KP_SUBTRACT', + 'KP_DECIMAL', + 'KP_DIVIDE', + 'KP_EQUAL', + 'KP_0', + 'KP_1', + 'KP_2', + 'KP_3', + 'KP_4', + 'KP_5', + 'KP_6', + 'KP_7', + 'KP_8', + 'KP_9') + +_LASTVAL = max(get_curses_keycodes().values()) +for keycode_name in _CURSES_KEYCODE_ADDINS: + _LASTVAL += 1 + globals()['KEY_' + keycode_name] = _LASTVAL + +#: In a perfect world, terminal emulators would always send exactly what +#: the terminfo(5) capability database plans for them, accordingly by the +#: value of the ``TERM`` name they declare. +#: +#: But this isn't a perfect world. Many vt220-derived terminals, such as +#: those declaring 'xterm', will continue to send vt220 codes instead of +#: their native-declared codes, for backwards-compatibility. +#: +#: This goes for many: rxvt, putty, iTerm. +#: +#: These "mixins" are used for *all* terminals, regardless of their type. +#: +#: Furthermore, curses does not provide sequences sent by the keypad, +#: at least, it does not provide a way to distinguish between keypad 0 +#: and numeric 0. +DEFAULT_SEQUENCE_MIXIN = ( + # these common control characters (and 127, ctrl+'?') mapped to + # an application key definition. + (six.unichr(10), curses.KEY_ENTER), + (six.unichr(13), curses.KEY_ENTER), + (six.unichr(8), curses.KEY_BACKSPACE), + (six.unichr(9), KEY_TAB), # noqa # pylint: disable=undefined-variable + (six.unichr(27), curses.KEY_EXIT), + (six.unichr(127), curses.KEY_BACKSPACE), + + (u"\x1b[A", curses.KEY_UP), + (u"\x1b[B", curses.KEY_DOWN), + (u"\x1b[C", curses.KEY_RIGHT), + (u"\x1b[D", curses.KEY_LEFT), + (u"\x1b[1;2A", curses.KEY_SR), + (u"\x1b[1;2B", curses.KEY_SF), + (u"\x1b[1;2C", curses.KEY_SRIGHT), + (u"\x1b[1;2D", curses.KEY_SLEFT), + (u"\x1b[F", curses.KEY_END), + (u"\x1b[H", curses.KEY_HOME), + # not sure where these are from .. please report + (u"\x1b[K", curses.KEY_END), + (u"\x1b[U", curses.KEY_NPAGE), + (u"\x1b[V", curses.KEY_PPAGE), + + # keys sent after term.smkx (keypad_xmit) is emitted, source: + # http://www.xfree86.org/current/ctlseqs.html#PC-Style%20Function%20Keys + # http://fossies.org/linux/rxvt/doc/rxvtRef.html#KeyCodes + # + # keypad, numlock on + (u"\x1bOM", curses.KEY_ENTER), # noqa return + (u"\x1bOj", KEY_KP_MULTIPLY), # noqa * # pylint: disable=undefined-variable + (u"\x1bOk", KEY_KP_ADD), # noqa + # pylint: disable=undefined-variable + (u"\x1bOl", KEY_KP_SEPARATOR), # noqa , # pylint: disable=undefined-variable + (u"\x1bOm", KEY_KP_SUBTRACT), # noqa - # pylint: disable=undefined-variable + (u"\x1bOn", KEY_KP_DECIMAL), # noqa . # pylint: disable=undefined-variable + (u"\x1bOo", KEY_KP_DIVIDE), # noqa / # pylint: disable=undefined-variable + (u"\x1bOX", KEY_KP_EQUAL), # noqa = # pylint: disable=undefined-variable + (u"\x1bOp", KEY_KP_0), # noqa 0 # pylint: disable=undefined-variable + (u"\x1bOq", KEY_KP_1), # noqa 1 # pylint: disable=undefined-variable + (u"\x1bOr", KEY_KP_2), # noqa 2 # pylint: disable=undefined-variable + (u"\x1bOs", KEY_KP_3), # noqa 3 # pylint: disable=undefined-variable + (u"\x1bOt", KEY_KP_4), # noqa 4 # pylint: disable=undefined-variable + (u"\x1bOu", KEY_KP_5), # noqa 5 # pylint: disable=undefined-variable + (u"\x1bOv", KEY_KP_6), # noqa 6 # pylint: disable=undefined-variable + (u"\x1bOw", KEY_KP_7), # noqa 7 # pylint: disable=undefined-variable + (u"\x1bOx", KEY_KP_8), # noqa 8 # pylint: disable=undefined-variable + (u"\x1bOy", KEY_KP_9), # noqa 9 # pylint: disable=undefined-variable + + # keypad, numlock off + (u"\x1b[1~", curses.KEY_FIND), # find + (u"\x1b[2~", curses.KEY_IC), # insert (0) + (u"\x1b[3~", curses.KEY_DC), # delete (.), "Execute" + (u"\x1b[4~", curses.KEY_SELECT), # select + (u"\x1b[5~", curses.KEY_PPAGE), # pgup (9) + (u"\x1b[6~", curses.KEY_NPAGE), # pgdown (3) + (u"\x1b[7~", curses.KEY_HOME), # home + (u"\x1b[8~", curses.KEY_END), # end + (u"\x1b[OA", curses.KEY_UP), # up (8) + (u"\x1b[OB", curses.KEY_DOWN), # down (2) + (u"\x1b[OC", curses.KEY_RIGHT), # right (6) + (u"\x1b[OD", curses.KEY_LEFT), # left (4) + (u"\x1b[OF", curses.KEY_END), # end (1) + (u"\x1b[OH", curses.KEY_HOME), # home (7) + + # The vt220 placed F1-F4 above the keypad, in place of actual + # F1-F4 were local functions (hold screen, print screen, + # set up, data/talk, break). + (u"\x1bOP", curses.KEY_F1), + (u"\x1bOQ", curses.KEY_F2), + (u"\x1bOR", curses.KEY_F3), + (u"\x1bOS", curses.KEY_F4), +) + +#: Override mixins for a few curses constants with easier +#: mnemonics: there may only be a 1:1 mapping when only a +#: keycode (int) is given, where these phrases are preferred. +CURSES_KEYCODE_OVERRIDE_MIXIN = ( + ('KEY_DELETE', curses.KEY_DC), + ('KEY_INSERT', curses.KEY_IC), + ('KEY_PGUP', curses.KEY_PPAGE), + ('KEY_PGDOWN', curses.KEY_NPAGE), + ('KEY_ESCAPE', curses.KEY_EXIT), + ('KEY_SUP', curses.KEY_SR), + ('KEY_SDOWN', curses.KEY_SF), + ('KEY_UP_LEFT', curses.KEY_A1), + ('KEY_UP_RIGHT', curses.KEY_A3), + ('KEY_CENTER', curses.KEY_B2), + ('KEY_BEGIN', curses.KEY_BEG), +) + +__all__ = ('Keystroke', 'get_keyboard_codes', 'get_keyboard_sequences',) diff --git a/vllm/lib/python3.10/site-packages/blessed/keyboard.pyi b/vllm/lib/python3.10/site-packages/blessed/keyboard.pyi new file mode 100644 index 0000000000000000000000000000000000000000..ae76393f14d1e255f81f5e6743c86c602647cf51 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/keyboard.pyi @@ -0,0 +1,28 @@ +# std imports +from typing import Set, Dict, Type, Mapping, TypeVar, Iterable, Optional, OrderedDict + +# local +from .terminal import Terminal + +_T = TypeVar("_T") + +class Keystroke(str): + def __new__( + cls: Type[_T], + ucs: str = ..., + code: Optional[int] = ..., + name: Optional[str] = ..., + ) -> _T: ... + @property + def is_sequence(self) -> bool: ... + @property + def name(self) -> Optional[str]: ... + @property + def code(self) -> Optional[int]: ... + +def get_keyboard_codes() -> Dict[int, str]: ... +def get_keyboard_sequences(term: Terminal) -> OrderedDict[str, int]: ... +def get_leading_prefixes(sequences: Iterable[str]) -> Set[str]: ... +def resolve_sequence( + text: str, mapper: Mapping[str, int], codes: Mapping[int, str] +) -> Keystroke: ... diff --git a/vllm/lib/python3.10/site-packages/blessed/py.typed b/vllm/lib/python3.10/site-packages/blessed/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/blessed/sequences.py b/vllm/lib/python3.10/site-packages/blessed/sequences.py new file mode 100644 index 0000000000000000000000000000000000000000..a5fe0d61f2b38e519639e5e5481b8d4a40997453 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/sequences.py @@ -0,0 +1,461 @@ +# -*- coding: utf-8 -*- +"""Module providing 'sequence awareness'.""" +# std imports +import re +import math +import textwrap + +# 3rd party +import six +from wcwidth import wcwidth + +# local +from blessed._capabilities import CAPABILITIES_CAUSE_MOVEMENT + +__all__ = ('Sequence', 'SequenceTextWrapper', 'iter_parse', 'measure_length') + + +class Termcap(object): + """Terminal capability of given variable name and pattern.""" + + def __init__(self, name, pattern, attribute): + """ + Class initializer. + + :arg str name: name describing capability. + :arg str pattern: regular expression string. + :arg str attribute: :class:`~.Terminal` attribute used to build + this terminal capability. + """ + self.name = name + self.pattern = pattern + self.attribute = attribute + self._re_compiled = None + + def __repr__(self): + # pylint: disable=redundant-keyword-arg + return ''.format(self=self) + + @property + def named_pattern(self): + """Regular expression pattern for capability with named group.""" + # pylint: disable=redundant-keyword-arg + return '(?P<{self.name}>{self.pattern})'.format(self=self) + + @property + def re_compiled(self): + """Compiled regular expression pattern for capability.""" + if self._re_compiled is None: + self._re_compiled = re.compile(self.pattern) + return self._re_compiled + + @property + def will_move(self): + """Whether capability causes cursor movement.""" + return self.name in CAPABILITIES_CAUSE_MOVEMENT + + def horizontal_distance(self, text): + """ + Horizontal carriage adjusted by capability, may be negative. + + :rtype: int + :arg str text: for capabilities *parm_left_cursor*, + *parm_right_cursor*, provide the matching sequence + text, its interpreted distance is returned. + + :returns: 0 except for matching ' + """ + value = { + 'cursor_left': -1, + 'backspace': -1, + 'cursor_right': 1, + 'tab': 8, + 'ascii_tab': 8, + }.get(self.name) + if value is not None: + return value + + unit = { + 'parm_left_cursor': -1, + 'parm_right_cursor': 1 + }.get(self.name) + if unit is not None: + value = int(self.re_compiled.match(text).group(1)) + return unit * value + + return 0 + + # pylint: disable=too-many-arguments + @classmethod + def build(cls, name, capability, attribute, nparams=0, + numeric=99, match_grouped=False, match_any=False, + match_optional=False): + r""" + Class factory builder for given capability definition. + + :arg str name: Variable name given for this pattern. + :arg str capability: A unicode string representing a terminal + capability to build for. When ``nparams`` is non-zero, it + must be a callable unicode string (such as the result from + ``getattr(term, 'bold')``. + :arg str attribute: The terminfo(5) capability name by which this + pattern is known. + :arg int nparams: number of positional arguments for callable. + :arg int numeric: Value to substitute into capability to when generating pattern + :arg bool match_grouped: If the numeric pattern should be + grouped, ``(\d+)`` when ``True``, ``\d+`` default. + :arg bool match_any: When keyword argument ``nparams`` is given, + *any* numeric found in output is suitable for building as + pattern ``(\d+)``. Otherwise, only the first matching value of + range *(numeric - 1)* through *(numeric + 1)* will be replaced by + pattern ``(\d+)`` in builder. + :arg bool match_optional: When ``True``, building of numeric patterns + containing ``(\d+)`` will be built as optional, ``(\d+)?``. + :rtype: blessed.sequences.Termcap + :returns: Terminal capability instance for given capability definition + """ + _numeric_regex = r'\d+' + if match_grouped: + _numeric_regex = r'(\d+)' + if match_optional: + _numeric_regex = r'(\d+)?' + numeric = 99 if numeric is None else numeric + + # basic capability attribute, not used as a callable + if nparams == 0: + return cls(name, re.escape(capability), attribute) + + # a callable capability accepting numeric argument + _outp = re.escape(capability(*(numeric,) * nparams)) + if not match_any: + for num in range(numeric - 1, numeric + 2): + if str(num) in _outp: + pattern = _outp.replace(str(num), _numeric_regex) + return cls(name, pattern, attribute) + + if match_grouped: + pattern = re.sub(r'(\d+)', lambda x: _numeric_regex, _outp) + else: + pattern = re.sub(r'\d+', lambda x: _numeric_regex, _outp) + return cls(name, pattern, attribute) + + +class SequenceTextWrapper(textwrap.TextWrapper): + """Docstring overridden.""" + + def __init__(self, width, term, **kwargs): + """ + Class initializer. + + This class supports the :meth:`~.Terminal.wrap` method. + """ + self.term = term + textwrap.TextWrapper.__init__(self, width, **kwargs) + + def _wrap_chunks(self, chunks): + """ + Sequence-aware variant of :meth:`textwrap.TextWrapper._wrap_chunks`. + + :raises ValueError: ``self.width`` is not a positive integer + :rtype: list + :returns: text chunks adjusted for width + + This simply ensures that word boundaries are not broken mid-sequence, as standard python + textwrap would incorrectly determine the length of a string containing sequences, and may + also break consider sequences part of a "word" that may be broken by hyphen (``-``), where + this implementation corrects both. + """ + lines = [] + if self.width <= 0 or not isinstance(self.width, int): + raise ValueError( + "invalid width {0!r}({1!r}) (must be integer > 0)" + .format(self.width, type(self.width))) + + term = self.term + drop_whitespace = not hasattr(self, 'drop_whitespace' + ) or self.drop_whitespace + chunks.reverse() + while chunks: + cur_line = [] + cur_len = 0 + indent = self.subsequent_indent if lines else self.initial_indent + width = self.width - len(indent) + if drop_whitespace and ( + Sequence(chunks[-1], term).strip() == '' and lines): + del chunks[-1] + while chunks: + chunk_len = Sequence(chunks[-1], term).length() + if cur_len + chunk_len > width: + break + cur_line.append(chunks.pop()) + cur_len += chunk_len + if chunks and Sequence(chunks[-1], term).length() > width: + self._handle_long_word(chunks, cur_line, cur_len, width) + if drop_whitespace and ( + cur_line and Sequence(cur_line[-1], term).strip() == ''): + del cur_line[-1] + if cur_line: + lines.append(indent + u''.join(cur_line)) + return lines + + def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width): + """ + Sequence-aware :meth:`textwrap.TextWrapper._handle_long_word`. + + This simply ensures that word boundaries are not broken mid-sequence, as standard python + textwrap would incorrectly determine the length of a string containing sequences, and may + also break consider sequences part of a "word" that may be broken by hyphen (``-``), where + this implementation corrects both. + """ + # Figure out when indent is larger than the specified width, and make + # sure at least one character is stripped off on every pass + space_left = 1 if width < 1 else width - cur_len + # If we're allowed to break long words, then do so: put as much + # of the next chunk onto the current line as will fit. + + if self.break_long_words: + term = self.term + chunk = reversed_chunks[-1] + idx = nxt = 0 + for text, _ in iter_parse(term, chunk): + nxt += len(text) + if Sequence(chunk[:nxt], term).length() > space_left: + break + idx = nxt + cur_line.append(chunk[:idx]) + reversed_chunks[-1] = chunk[idx:] + + # Otherwise, we have to preserve the long word intact. Only add + # it to the current line if there's nothing already there -- + # that minimizes how much we violate the width constraint. + elif not cur_line: + cur_line.append(reversed_chunks.pop()) + + # If we're not allowed to break long words, and there's already + # text on the current line, do nothing. Next time through the + # main loop of _wrap_chunks(), we'll wind up here again, but + # cur_len will be zero, so the next line will be entirely + # devoted to the long word that we can't handle right now. + + +SequenceTextWrapper.__doc__ = textwrap.TextWrapper.__doc__ + + +class Sequence(six.text_type): + """ + A "sequence-aware" version of the base :class:`str` class. + + This unicode-derived class understands the effect of escape sequences + of printable length, allowing a properly implemented :meth:`rjust`, + :meth:`ljust`, :meth:`center`, and :meth:`length`. + """ + + def __new__(cls, sequence_text, term): + # pylint: disable = missing-return-doc, missing-return-type-doc + """ + Class constructor. + + :arg str sequence_text: A string that may contain sequences. + :arg blessed.Terminal term: :class:`~.Terminal` instance. + """ + new = six.text_type.__new__(cls, sequence_text) + new._term = term + return new + + def ljust(self, width, fillchar=u' '): + """ + Return string containing sequences, left-adjusted. + + :arg int width: Total width given to left-adjust ``text``. If + unspecified, the width of the attached terminal is used (default). + :arg str fillchar: String for padding right-of ``text``. + :returns: String of ``text``, left-aligned by ``width``. + :rtype: str + """ + rightside = fillchar * int( + (max(0.0, float(width.__index__() - self.length()))) / float(len(fillchar))) + return u''.join((self, rightside)) + + def rjust(self, width, fillchar=u' '): + """ + Return string containing sequences, right-adjusted. + + :arg int width: Total width given to right-adjust ``text``. If + unspecified, the width of the attached terminal is used (default). + :arg str fillchar: String for padding left-of ``text``. + :returns: String of ``text``, right-aligned by ``width``. + :rtype: str + """ + leftside = fillchar * int( + (max(0.0, float(width.__index__() - self.length()))) / float(len(fillchar))) + return u''.join((leftside, self)) + + def center(self, width, fillchar=u' '): + """ + Return string containing sequences, centered. + + :arg int width: Total width given to center ``text``. If + unspecified, the width of the attached terminal is used (default). + :arg str fillchar: String for padding left and right-of ``text``. + :returns: String of ``text``, centered by ``width``. + :rtype: str + """ + split = max(0.0, float(width.__index__()) - self.length()) / 2 + leftside = fillchar * int( + (max(0.0, math.floor(split))) / float(len(fillchar))) + rightside = fillchar * int( + (max(0.0, math.ceil(split))) / float(len(fillchar))) + return u''.join((leftside, self, rightside)) + + def truncate(self, width): + """ + Truncate a string in a sequence-aware manner. + + Any printable characters beyond ``width`` are removed, while all + sequences remain in place. Horizontal Sequences are first expanded + by :meth:`padd`. + + :arg int width: The printable width to truncate the string to. + :rtype: str + :returns: String truncated to at most ``width`` printable characters. + """ + output = "" + current_width = 0 + target_width = width.__index__() + parsed_seq = iter_parse(self._term, self.padd()) + + # Retain all text until non-cap width reaches desired width + for text, cap in parsed_seq: + if not cap: + # use wcwidth clipped to 0 because it can sometimes return -1 + current_width += max(wcwidth(text), 0) + if current_width > target_width: + break + output += text + + # Return with remaining caps appended + return output + ''.join(text for text, cap in parsed_seq if cap) + + def length(self): + r""" + Return the printable length of string containing sequences. + + Strings containing ``term.left`` or ``\b`` will cause "overstrike", + but a length less than 0 is not ever returned. So ``_\b+`` is a + length of 1 (displays as ``+``), but ``\b`` alone is simply a + length of 0. + + Some characters may consume more than one cell, mainly those CJK + Unified Ideographs (Chinese, Japanese, Korean) defined by Unicode + as half or full-width characters. + + For example: + + >>> from blessed import Terminal + >>> from blessed.sequences import Sequence + >>> term = Terminal() + >>> msg = term.clear + term.red(u'コンニチハ') + >>> Sequence(msg, term).length() + 10 + + .. note:: Although accounted for, strings containing sequences such + as ``term.clear`` will not give accurate returns, it is not + considered lengthy (a length of 0). + """ + # because control characters may return -1, "clip" their length to 0. + return sum(max(wcwidth(w_char), 0) for w_char in self.padd(strip=True)) + + def strip(self, chars=None): + """ + Return string of sequences, leading and trailing whitespace removed. + + :arg str chars: Remove characters in chars instead of whitespace. + :rtype: str + :returns: string of sequences with leading and trailing whitespace removed. + """ + return self.strip_seqs().strip(chars) + + def lstrip(self, chars=None): + """ + Return string of all sequences and leading whitespace removed. + + :arg str chars: Remove characters in chars instead of whitespace. + :rtype: str + :returns: string of sequences with leading removed. + """ + return self.strip_seqs().lstrip(chars) + + def rstrip(self, chars=None): + """ + Return string of all sequences and trailing whitespace removed. + + :arg str chars: Remove characters in chars instead of whitespace. + :rtype: str + :returns: string of sequences with trailing removed. + """ + return self.strip_seqs().rstrip(chars) + + def strip_seqs(self): + """ + Return ``text`` stripped of only its terminal sequences. + + :rtype: str + :returns: Text with terminal sequences removed + """ + return self.padd(strip=True) + + def padd(self, strip=False): + """ + Return non-destructive horizontal movement as destructive spacing. + + :arg bool strip: Strip terminal sequences + :rtype: str + :returns: Text adjusted for horizontal movement + """ + outp = '' + for text, cap in iter_parse(self._term, self): + if not cap: + outp += text + continue + + value = cap.horizontal_distance(text) + if value > 0: + outp += ' ' * value + elif value < 0: + outp = outp[:value] + elif not strip: + outp += text + return outp + + +def iter_parse(term, text): + """ + Generator yields (text, capability) for characters of ``text``. + + value for ``capability`` may be ``None``, where ``text`` is + :class:`str` of length 1. Otherwise, ``text`` is a full + matching sequence of given capability. + """ + for match in term._caps_compiled_any.finditer(text): # pylint: disable=protected-access + name = match.lastgroup + value = match.group(name) + if name == 'MISMATCH': + yield (value, None) + else: + yield value, term.caps[name] + + +def measure_length(text, term): + """ + .. deprecated:: 1.12.0. + + :rtype: int + :returns: Length of the first sequence in the string + """ + try: + text, capability = next(iter_parse(term, text)) + if capability: + return len(text) + except StopIteration: + return 0 + return 0 diff --git a/vllm/lib/python3.10/site-packages/blessed/sequences.pyi b/vllm/lib/python3.10/site-packages/blessed/sequences.pyi new file mode 100644 index 0000000000000000000000000000000000000000..4460b7a466aad9d677f31dd84b855ce79791481d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/sequences.pyi @@ -0,0 +1,55 @@ +# std imports +import textwrap +from typing import Any, Type, Tuple, Pattern, TypeVar, Iterator, Optional, SupportsIndex + +# local +from .terminal import Terminal + +_T = TypeVar("_T") + +class Termcap: + name: str = ... + pattern: str = ... + attribute: str = ... + def __init__(self, name: str, pattern: str, attribute: str) -> None: ... + @property + def named_pattern(self) -> str: ... + @property + def re_compiled(self) -> Pattern[str]: ... + @property + def will_move(self) -> bool: ... + def horizontal_distance(self, text: str) -> int: ... + @classmethod + def build( + cls, + name: str, + capability: str, + attribute: str, + nparams: int = ..., + numeric: int = ..., + match_grouped: bool = ..., + match_any: bool = ..., + match_optional: bool = ..., + ) -> "Termcap": ... + +class SequenceTextWrapper(textwrap.TextWrapper): + term: Terminal = ... + def __init__(self, width: int, term: Terminal, **kwargs: Any) -> None: ... + +class Sequence(str): + def __new__(cls: Type[_T], sequence_text: str, term: Terminal) -> _T: ... + def ljust(self, width: SupportsIndex, fillchar: str = ...) -> str: ... + def rjust(self, width: SupportsIndex, fillchar: str = ...) -> str: ... + def center(self, width: SupportsIndex, fillchar: str = ...) -> str: ... + def truncate(self, width: SupportsIndex) -> str: ... + def length(self) -> int: ... + def strip(self, chars: Optional[str] = ...) -> str: ... + def lstrip(self, chars: Optional[str] = ...) -> str: ... + def rstrip(self, chars: Optional[str] = ...) -> str: ... + def strip_seqs(self) -> str: ... + def padd(self, strip: bool = ...) -> str: ... + +def iter_parse( + term: Terminal, text: str +) -> Iterator[Tuple[str, Optional[Termcap]]]: ... +def measure_length(text: str, term: Terminal) -> int: ... diff --git a/vllm/lib/python3.10/site-packages/blessed/terminal.py b/vllm/lib/python3.10/site-packages/blessed/terminal.py new file mode 100644 index 0000000000000000000000000000000000000000..7b6165b5d00a4fe9b8645af80aef3fe62f9b1d04 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/terminal.py @@ -0,0 +1,1552 @@ +# -*- coding: utf-8 -*- +# pylint: disable=too-many-lines +"""Module containing :class:`Terminal`, the primary API entry point.""" +# std imports +import os +import re +import sys +import time +import codecs +import locale +import select +import struct +import platform +import warnings +import functools +import contextlib +import collections + +# local +from .color import COLOR_DISTANCE_ALGORITHMS +from .keyboard import (_time_left, + _read_until, + resolve_sequence, + get_keyboard_codes, + get_leading_prefixes, + get_keyboard_sequences) +from .sequences import Termcap, Sequence, SequenceTextWrapper +from .colorspace import RGB_256TABLE +from .formatters import (COLORS, + COMPOUNDABLES, + FormattingString, + NullCallableString, + ParameterizingString, + FormattingOtherString, + split_compound, + resolve_attribute, + resolve_capability) +from ._capabilities import CAPABILITY_DATABASE, CAPABILITIES_ADDITIVES, CAPABILITIES_RAW_MIXIN + +# isort: off + +# Alias py2 exception to py3 +if sys.version_info[:2] < (3, 3): + InterruptedError = select.error # pylint: disable=redefined-builtin + + +HAS_TTY = True +if platform.system() == 'Windows': + IS_WINDOWS = True + import jinxed as curses # pylint: disable=import-error + from jinxed.win32 import get_console_input_encoding # pylint: disable=import-error +else: + IS_WINDOWS = False + import curses + + try: + import fcntl + import termios + import tty + except ImportError: + _TTY_METHODS = ('setraw', 'cbreak', 'kbhit', 'height', 'width') + _MSG_NOSUPPORT = ( + "One or more of the modules: 'termios', 'fcntl', and 'tty' " + "are not found on your platform '{platform}'. " + "The following methods of Terminal are dummy/no-op " + "unless a deriving class overrides them: {tty_methods}." + .format(platform=platform.system(), + tty_methods=', '.join(_TTY_METHODS))) + warnings.warn(_MSG_NOSUPPORT) + HAS_TTY = False + +_CUR_TERM = None # See comments at end of file + + +class Terminal(object): + """ + An abstraction for color, style, positioning, and input in the terminal. + + This keeps the endless calls to ``tigetstr()`` and ``tparm()`` out of your code, acts + intelligently when somebody pipes your output to a non-terminal, and abstracts over the + complexity of unbuffered keyboard input. It uses the terminfo database to remain portable across + terminal types. + """ + # pylint: disable=too-many-instance-attributes,too-many-public-methods + # Too many public methods (28/20) + # Too many instance attributes (12/7) + + #: Sugary names for commonly-used capabilities + _sugar = { + 'save': 'sc', + 'restore': 'rc', + 'clear_eol': 'el', + 'clear_bol': 'el1', + 'clear_eos': 'ed', + 'enter_fullscreen': 'smcup', + 'exit_fullscreen': 'rmcup', + 'move': 'cup', + 'move_yx': 'cup', + 'move_x': 'hpa', + 'move_y': 'vpa', + 'hide_cursor': 'civis', + 'normal_cursor': 'cnorm', + 'reset_colors': 'op', + 'normal': 'sgr0', + 'reverse': 'rev', + 'italic': 'sitm', + 'no_italic': 'ritm', + 'shadow': 'sshm', + 'no_shadow': 'rshm', + 'standout': 'smso', + 'no_standout': 'rmso', + 'subscript': 'ssubm', + 'no_subscript': 'rsubm', + 'superscript': 'ssupm', + 'no_superscript': 'rsupm', + 'underline': 'smul', + 'no_underline': 'rmul', + 'cursor_report': 'u6', + 'cursor_request': 'u7', + 'terminal_answerback': 'u8', + 'terminal_enquire': 'u9', + } + + def __init__(self, kind=None, stream=None, force_styling=False): + """ + Initialize the terminal. + + :arg str kind: A terminal string as taken by :func:`curses.setupterm`. + Defaults to the value of the ``TERM`` environment variable. + + .. note:: Terminals withing a single process must share a common + ``kind``. See :obj:`_CUR_TERM`. + + :arg file stream: A file-like object representing the Terminal output. + Defaults to the original value of :obj:`sys.__stdout__`, like + :func:`curses.initscr` does. + + If ``stream`` is not a tty, empty Unicode strings are returned for + all capability values, so things like piping your program output to + a pipe or file does not emit terminal sequences. + + :arg bool force_styling: Whether to force the emission of capabilities + even if :obj:`sys.__stdout__` does not seem to be connected to a + terminal. If you want to force styling to not happen, use + ``force_styling=None``. + + This comes in handy if users are trying to pipe your output through + something like ``less -r`` or build systems which support decoding + of terminal sequences. + """ + # pylint: disable=global-statement,too-many-branches + global _CUR_TERM + self.errors = ['parameters: kind=%r, stream=%r, force_styling=%r' % + (kind, stream, force_styling)] + self._normal = None # cache normal attr, preventing recursive lookups + # we assume our input stream to be line-buffered until either the + # cbreak of raw context manager methods are entered with an attached tty. + self._line_buffered = True + + self._stream = stream + self._keyboard_fd = None + self._init_descriptor = None + self._is_a_tty = False + self.__init__streams() + + if IS_WINDOWS and self._init_descriptor is not None: + self._kind = kind or curses.get_term(self._init_descriptor) + else: + self._kind = kind or os.environ.get('TERM', 'dumb') or 'dumb' + + self._does_styling = False + if force_styling is None and self.is_a_tty: + self.errors.append('force_styling is None') + elif force_styling or self.is_a_tty: + self._does_styling = True + + if self.does_styling: + # Initialize curses (call setupterm), so things like tigetstr() work. + try: + curses.setupterm(self._kind, self._init_descriptor) + except curses.error as err: + msg = 'Failed to setupterm(kind={0!r}): {1}'.format(self._kind, err) + warnings.warn(msg) + self.errors.append(msg) + self._kind = None + self._does_styling = False + else: + if _CUR_TERM is None or self._kind == _CUR_TERM: + _CUR_TERM = self._kind + else: + # termcap 'kind' is immutable in a python process! Once + # initialized by setupterm, it is unsupported by the + # 'curses' module to change the terminal type again. If you + # are a downstream developer and you need this + # functionality, consider sub-processing, instead. + warnings.warn( + 'A terminal of kind "%s" has been requested; due to an' + ' internal python curses bug, terminal capabilities' + ' for a terminal of kind "%s" will continue to be' + ' returned for the remainder of this process.' % ( + self._kind, _CUR_TERM,)) + + self.__init__color_capabilities() + self.__init__capabilities() + self.__init__keycodes() + + def __init__streams(self): + # pylint: disable=too-complex,too-many-branches + # Agree to disagree ! + stream_fd = None + + # Default stream is stdout + if self._stream is None: + self._stream = sys.__stdout__ + + if not hasattr(self._stream, 'fileno'): + self.errors.append('stream has no fileno method') + elif not callable(self._stream.fileno): + self.errors.append('stream.fileno is not callable') + else: + try: + stream_fd = self._stream.fileno() + except ValueError as err: + # The stream is not a file, such as the case of StringIO, or, when it has been + # "detached", such as might be the case of stdout in some test scenarios. + self.errors.append('Unable to determine output stream file descriptor: %s' % err) + else: + self._is_a_tty = os.isatty(stream_fd) + if not self._is_a_tty: + self.errors.append('stream not a TTY') + + # Keyboard valid as stdin only when output stream is stdout or stderr and is a tty. + if self._stream in (sys.__stdout__, sys.__stderr__): + try: + self._keyboard_fd = sys.__stdin__.fileno() + except (AttributeError, ValueError) as err: + self.errors.append('Unable to determine input stream file descriptor: %s' % err) + else: + # _keyboard_fd only non-None if both stdin and stdout is a tty. + if not self.is_a_tty: + self.errors.append('Output stream is not a TTY') + self._keyboard_fd = None + elif not os.isatty(self._keyboard_fd): + self.errors.append('Input stream is not a TTY') + self._keyboard_fd = None + else: + self.errors.append('Output stream is not a default stream') + + # The descriptor to direct terminal initialization sequences to. + self._init_descriptor = stream_fd + if stream_fd is None: + try: + self._init_descriptor = sys.__stdout__.fileno() + except ValueError as err: + self.errors.append('Unable to determine __stdout__ file descriptor: %s' % err) + + def __init__color_capabilities(self): + self._color_distance_algorithm = 'cie2000' + if not self.does_styling: + self.number_of_colors = 0 + elif IS_WINDOWS or os.environ.get('COLORTERM') in ('truecolor', '24bit'): + self.number_of_colors = 1 << 24 + else: + self.number_of_colors = max(0, curses.tigetnum('colors') or -1) + + def __clear_color_capabilities(self): + for cached_color_cap in set(dir(self)) & COLORS: + delattr(self, cached_color_cap) + + def __init__capabilities(self): + # important that we lay these in their ordered direction, so that our + # preferred, 'color' over 'set_a_attributes1', for example. + self.caps = collections.OrderedDict() + + # some static injected patterns, esp. without named attribute access. + for name, (attribute, pattern) in CAPABILITIES_ADDITIVES.items(): + self.caps[name] = Termcap(name, pattern, attribute) + + for name, (attribute, kwds) in CAPABILITY_DATABASE.items(): + if self.does_styling: + # attempt dynamic lookup + cap = getattr(self, attribute) + if cap: + self.caps[name] = Termcap.build( + name, cap, attribute, **kwds) + continue + + # fall-back + pattern = CAPABILITIES_RAW_MIXIN.get(name) + if pattern: + self.caps[name] = Termcap(name, pattern, attribute) + + # make a compiled named regular expression table + self.caps_compiled = re.compile( + '|'.join(cap.pattern for name, cap in self.caps.items())) + + # for tokenizer, the '.lastgroup' is the primary lookup key for + # 'self.caps', unless 'MISMATCH'; then it is an unmatched character. + self._caps_compiled_any = re.compile('|'.join( + cap.named_pattern for name, cap in self.caps.items() + ) + '|(?P.)') + self._caps_unnamed_any = re.compile('|'.join( + '({0})'.format(cap.pattern) for name, cap in self.caps.items() + ) + '|(.)') + + def __init__keycodes(self): + # Initialize keyboard data determined by capability. + # Build database of int code <=> KEY_NAME. + self._keycodes = get_keyboard_codes() + + # Store attributes as: self.KEY_NAME = code. + for key_code, key_name in self._keycodes.items(): + setattr(self, key_name, key_code) + + # Build database of sequence <=> KEY_NAME. + self._keymap = get_keyboard_sequences(self) + + # build set of prefixes of sequences + self._keymap_prefixes = get_leading_prefixes(self._keymap) + + # keyboard stream buffer + self._keyboard_buf = collections.deque() + + if self._keyboard_fd is not None: + # set input encoding and initialize incremental decoder + + if IS_WINDOWS: + self._encoding = get_console_input_encoding() \ + or locale.getpreferredencoding() or 'UTF-8' + else: + self._encoding = locale.getpreferredencoding() or 'UTF-8' + + try: + self._keyboard_decoder = codecs.getincrementaldecoder(self._encoding)() + except LookupError as err: + # encoding is illegal or unsupported, use 'UTF-8' + warnings.warn('LookupError: {0}, defaulting to UTF-8 for keyboard.'.format(err)) + self._encoding = 'UTF-8' + self._keyboard_decoder = codecs.getincrementaldecoder(self._encoding)() + + def __getattr__(self, attr): + r""" + Return a terminal capability as Unicode string. + + For example, ``term.bold`` is a unicode string that may be prepended + to text to set the video attribute for bold, which should also be + terminated with the pairing :attr:`normal`. This capability + returns a callable, so you can use ``term.bold("hi")`` which + results in the joining of ``(term.bold, "hi", term.normal)``. + + Compound formatters may also be used. For example:: + + >>> term.bold_blink_red_on_green("merry x-mas!") + + For a parameterized capability such as ``move`` (or ``cup``), pass the + parameters as positional arguments:: + + >>> term.move(line, column) + + See the manual page `terminfo(5) + `_ for a + complete list of capabilities and their arguments. + """ + if not self._does_styling: + return NullCallableString() + # Fetch the missing 'attribute' into some kind of curses-resolved + # capability, and cache by attaching to this Terminal class instance. + # + # Note that this will prevent future calls to __getattr__(), but + # that's precisely the idea of the cache! + val = resolve_attribute(self, attr) + setattr(self, attr, val) + return val + + @property + def kind(self): + """ + Read-only property: Terminal kind determined on class initialization. + + :rtype: str + """ + return self._kind + + @property + def does_styling(self): + """ + Read-only property: Whether this class instance may emit sequences. + + :rtype: bool + """ + return self._does_styling + + @property + def is_a_tty(self): + """ + Read-only property: Whether :attr:`~.stream` is a terminal. + + :rtype: bool + """ + return self._is_a_tty + + @property + def height(self): + """ + Read-only property: Height of the terminal (in number of lines). + + :rtype: int + """ + return self._height_and_width().ws_row + + @property + def width(self): + """ + Read-only property: Width of the terminal (in number of columns). + + :rtype: int + """ + return self._height_and_width().ws_col + + @property + def pixel_height(self): + """ + Read-only property: Height ofthe terminal (in pixels). + + :rtype: int + """ + return self._height_and_width().ws_ypixel + + @property + def pixel_width(self): + """ + Read-only property: Width of terminal (in pixels). + + :rtype: int + """ + return self._height_and_width().ws_xpixel + + @staticmethod + def _winsize(fd): + """ + Return named tuple describing size of the terminal by ``fd``. + + If the given platform does not have modules :mod:`termios`, + :mod:`fcntl`, or :mod:`tty`, window size of 80 columns by 25 + rows is always returned. + + :arg int fd: file descriptor queries for its window size. + :raises IOError: the file descriptor ``fd`` is not a terminal. + :rtype: WINSZ + :returns: named tuple describing size of the terminal + + WINSZ is a :class:`collections.namedtuple` instance, whose structure + directly maps to the return value of the :const:`termios.TIOCGWINSZ` + ioctl return value. The return parameters are: + + - ``ws_row``: width of terminal by its number of character cells. + - ``ws_col``: height of terminal by its number of character cells. + - ``ws_xpixel``: width of terminal by pixels (not accurate). + - ``ws_ypixel``: height of terminal by pixels (not accurate). + """ + if HAS_TTY: + # pylint: disable=protected-access + data = fcntl.ioctl(fd, termios.TIOCGWINSZ, WINSZ._BUF) + return WINSZ(*struct.unpack(WINSZ._FMT, data)) + return WINSZ(ws_row=25, ws_col=80, ws_xpixel=0, ws_ypixel=0) + + def _height_and_width(self): + """ + Return a tuple of (terminal height, terminal width). + + If :attr:`stream` or :obj:`sys.__stdout__` is not a tty or does not + support :func:`fcntl.ioctl` of :const:`termios.TIOCGWINSZ`, a window + size of 80 columns by 25 rows is returned for any values not + represented by environment variables ``LINES`` and ``COLUMNS``, which + is the default text mode of IBM PC compatibles. + + :rtype: WINSZ + :returns: Named tuple specifying the terminal size + + WINSZ is a :class:`collections.namedtuple` instance, whose structure + directly maps to the return value of the :const:`termios.TIOCGWINSZ` + ioctl return value. The return parameters are: + + - ``ws_row``: height of terminal by its number of cell rows. + - ``ws_col``: width of terminal by its number of cell columns. + - ``ws_xpixel``: width of terminal by pixels (not accurate). + - ``ws_ypixel``: height of terminal by pixels (not accurate). + + .. note:: the peculiar (height, width, width, height) order, which + matches the return order of TIOCGWINSZ! + """ + for fd in (self._init_descriptor, sys.__stdout__): + try: + if fd is not None: + return self._winsize(fd) + except (IOError, OSError, ValueError, TypeError): # pylint: disable=overlapping-except + pass + + return WINSZ(ws_row=int(os.getenv('LINES', '25')), + ws_col=int(os.getenv('COLUMNS', '80')), + ws_xpixel=None, + ws_ypixel=None) + + def _query_response(self, query_str, response_re, timeout): + """ + Sends a query string to the terminal and waits for a response. + + :arg str query_str: Query string written to output + :arg str response_re: Regular expression matching query response + :arg float timeout: Return after time elapsed in seconds + :return: re.match object for response_re or None if not found + :rtype: re.Match + """ + # Avoid changing user's desired raw or cbreak mode if already entered, + # by entering cbreak mode ourselves. This is necessary to receive user + # input without awaiting a human to press the return key. This mode + # also disables echo, which we should also hide, as our input is an + # sequence that is not meaningful for display as an output sequence. + + ctx = None + try: + if self._line_buffered: + ctx = self.cbreak() + ctx.__enter__() # pylint: disable=no-member + + # Emit the query sequence, + self.stream.write(query_str) + self.stream.flush() + + # Wait for response + match, data = _read_until(term=self, + pattern=response_re, + timeout=timeout) + + # Exclude response from subsequent input + if match: + data = data[:match.start()] + data[match.end():] + + # re-buffer keyboard data, if any + self.ungetch(data) + + finally: + if ctx is not None: + ctx.__exit__(None, None, None) # pylint: disable=no-member + + return match + + @contextlib.contextmanager + def location(self, x=None, y=None): + """ + Context manager for temporarily moving the cursor. + + :arg int x: horizontal position, from left, *0*, to right edge of screen, *self.width - 1*. + :arg int y: vertical position, from top, *0*, to bottom of screen, *self.height - 1*. + :return: a context manager. + :rtype: Iterator + + Move the cursor to a certain position on entry, do any kind of I/O, and upon exit + let you print stuff there, then return the cursor to its original position: + + + .. code-block:: python + + term = Terminal() + with term.location(y=0, x=0): + for row_num in range(term.height-1): + print('Row #{row_num}') + print(term.clear_eol + 'Back to original location.') + + Specify ``x`` to move to a certain column, ``y`` to move to a certain + row, both, or neither. If you specify neither, only the saving and + restoration of cursor position will happen. This can be useful if you + simply want to restore your place after doing some manual cursor + movement. + + Calls cannot be nested: only one should be entered at a time. + + .. note:: The argument order *(x, y)* differs from the return value order *(y, x)* + of :meth:`get_location`, or argument order *(y, x)* of :meth:`move`. This is + for API Compaibility with the blessings library, sorry for the trouble! + """ + # pylint: disable=invalid-name + # Invalid argument name "x" + + # Save position and move to the requested column, row, or both: + self.stream.write(self.save) + if x is not None and y is not None: + self.stream.write(self.move(y, x)) + elif x is not None: + self.stream.write(self.move_x(x)) + elif y is not None: + self.stream.write(self.move_y(y)) + try: + self.stream.flush() + yield + finally: + # Restore original cursor position: + self.stream.write(self.restore) + self.stream.flush() + + def get_location(self, timeout=None): + r""" + Return tuple (row, column) of cursor position. + + :arg float timeout: Return after time elapsed in seconds with value ``(-1, -1)`` indicating + that the remote end did not respond. + :rtype: tuple + :returns: cursor position as tuple in form of ``(y, x)``. When a timeout is specified, + always ensure the return value is checked for ``(-1, -1)``. + + The location of the cursor is determined by emitting the ``u7`` terminal capability, or + VT100 `Query Cursor Position + `_ + when such capability is undefined, which elicits a response from a reply string described by + capability ``u6``, or again VT100's definition of ``\x1b[%i%d;%dR`` when undefined. + + The ``(y, x)`` return value matches the parameter order of the :meth:`move_xy` capability. + The following sequence should cause the cursor to not move at all:: + + >>> term = Terminal() + >>> term.move_yx(*term.get_location())) + + And the following should assert True with a terminal: + + >>> term = Terminal() + >>> given_y, given_x = 10, 20 + >>> with term.location(y=given_y, x=given_x): + ... result_y, result_x = term.get_location() + ... + >>> assert given_x == result_x, (given_x, result_x) + >>> assert given_y == result_y, (given_y, result_y) + """ + # Local lines attached by termios and remote login protocols such as + # ssh and telnet both provide a means to determine the window + # dimensions of a connected client, but **no means to determine the + # location of the cursor**. + # + # from https://invisible-island.net/ncurses/terminfo.src.html, + # + # > The System V Release 4 and XPG4 terminfo format defines ten string + # > capabilities for use by applications, .... In this file, + # > we use certain of these capabilities to describe functions which + # > are not covered by terminfo. The mapping is as follows: + # > + # > u9 terminal enquire string (equiv. to ANSI/ECMA-48 DA) + # > u8 terminal answerback description + # > u7 cursor position request (equiv. to VT100/ANSI/ECMA-48 DSR 6) + # > u6 cursor position report (equiv. to ANSI/ECMA-48 CPR) + + response_str = getattr(self, self.caps['cursor_report'].attribute) or u'\x1b[%i%d;%dR' + match = self._query_response( + self.u7 or u'\x1b[6n', self.caps['cursor_report'].re_compiled, timeout + ) + + if match: + # return matching sequence response, the cursor location. + row, col = (int(val) for val in match.groups()) + + # Per https://invisible-island.net/ncurses/terminfo.src.html + # The cursor position report () string must contain two + # scanf(3)-style %d format elements. The first of these must + # correspond to the Y coordinate and the second to the %d. + # If the string contains the sequence %i, it is taken as an + # instruction to decrement each value after reading it (this is + # the inverse sense from the cup string). + if u'%i' in response_str: + row -= 1 + col -= 1 + return row, col + + # We chose to return an illegal value rather than an exception, + # favoring that users author function filters, such as max(0, y), + # rather than crowbarring such logic into an exception handler. + return -1, -1 + + def get_fgcolor(self, timeout=None): + """ + Return tuple (r, g, b) of foreground color. + + :arg float timeout: Return after time elapsed in seconds with value ``(-1, -1, -1)`` + indicating that the remote end did not respond. + :rtype: tuple + :returns: foreground color as tuple in form of ``(r, g, b)``. When a timeout is specified, + always ensure the return value is checked for ``(-1, -1, -1)``. + + The foreground color is determined by emitting an `OSC 10 color query + `_. + """ + match = self._query_response( + u'\x1b]10;?\x07', + re.compile(u'\x1b]10;rgb:([0-9a-fA-F]+)/([0-9a-fA-F]+)/([0-9a-fA-F]+)\x07'), + timeout + ) + + return tuple(int(val, 16) for val in match.groups()) if match else (-1, -1, -1) + + def get_bgcolor(self, timeout=None): + """ + Return tuple (r, g, b) of background color. + + :arg float timeout: Return after time elapsed in seconds with value ``(-1, -1, -1)`` + indicating that the remote end did not respond. + :rtype: tuple + :returns: background color as tuple in form of ``(r, g, b)``. When a timeout is specified, + always ensure the return value is checked for ``(-1, -1, -1)``. + + The background color is determined by emitting an `OSC 11 color query + `_. + """ + match = self._query_response( + u'\x1b]11;?\x07', + re.compile(u'\x1b]11;rgb:([0-9a-fA-F]+)/([0-9a-fA-F]+)/([0-9a-fA-F]+)\x07'), + timeout + ) + + return tuple(int(val, 16) for val in match.groups()) if match else (-1, -1, -1) + + @contextlib.contextmanager + def fullscreen(self): + """ + Context manager that switches to secondary screen, restoring on exit. + + Under the hood, this switches between the primary screen buffer and + the secondary one. The primary one is saved on entry and restored on + exit. Likewise, the secondary contents are also stable and are + faithfully restored on the next entry:: + + with term.fullscreen(): + main() + + .. note:: There is only one primary and one secondary screen buffer. + :meth:`fullscreen` calls cannot be nested, only one should be + entered at a time. + """ + self.stream.write(self.enter_fullscreen) + self.stream.flush() + try: + yield + finally: + self.stream.write(self.exit_fullscreen) + self.stream.flush() + + @contextlib.contextmanager + def hidden_cursor(self): + """ + Context manager that hides the cursor, setting visibility on exit. + + with term.hidden_cursor(): + main() + + .. note:: :meth:`hidden_cursor` calls cannot be nested: only one + should be entered at a time. + """ + self.stream.write(self.hide_cursor) + self.stream.flush() + try: + yield + finally: + self.stream.write(self.normal_cursor) + self.stream.flush() + + def move_xy(self, x, y): + """ + A callable string that moves the cursor to the given ``(x, y)`` screen coordinates. + + :arg int x: horizontal position, from left, *0*, to right edge of screen, *self.width - 1*. + :arg int y: vertical position, from top, *0*, to bottom of screen, *self.height - 1*. + :rtype: ParameterizingString + :returns: Callable string that moves the cursor to the given coordinates + """ + # this is just a convenience alias to the built-in, but hidden 'move' + # attribute -- we encourage folks to use only (x, y) positional + # arguments, or, if they must use (y, x), then use the 'move_yx' + # alias. + return self.move(y, x) + + def move_yx(self, y, x): + """ + A callable string that moves the cursor to the given ``(y, x)`` screen coordinates. + + :arg int y: vertical position, from top, *0*, to bottom of screen, *self.height - 1*. + :arg int x: horizontal position, from left, *0*, to right edge of screen, *self.width - 1*. + :rtype: ParameterizingString + :returns: Callable string that moves the cursor to the given coordinates + """ + return self.move(y, x) + + @property + def move_left(self): + """Move cursor 1 cells to the left, or callable string for n>1 cells.""" + return FormattingOtherString(self.cub1, ParameterizingString(self.cub)) + + @property + def move_right(self): + """Move cursor 1 or more cells to the right, or callable string for n>1 cells.""" + return FormattingOtherString(self.cuf1, ParameterizingString(self.cuf)) + + @property + def move_up(self): + """Move cursor 1 or more cells upwards, or callable string for n>1 cells.""" + return FormattingOtherString(self.cuu1, ParameterizingString(self.cuu)) + + @property + def move_down(self): + """Move cursor 1 or more cells downwards, or callable string for n>1 cells.""" + return FormattingOtherString(self.cud1, ParameterizingString(self.cud)) + + @property + def color(self): + """ + A callable string that sets the foreground color. + + :rtype: ParameterizingString + + The capability is unparameterized until called and passed a number, at which point it + returns another string which represents a specific color change. This second string can + further be called to color a piece of text and set everything back to normal afterward. + + This should not be used directly, but rather a specific color by name or + :meth:`~.Terminal.color_rgb` value. + """ + if self.does_styling: + return ParameterizingString(self._foreground_color, self.normal, 'color') + + return NullCallableString() + + def color_rgb(self, red, green, blue): + """ + Provides callable formatting string to set foreground color to the specified RGB color. + + :arg int red: RGB value of Red. + :arg int green: RGB value of Green. + :arg int blue: RGB value of Blue. + :rtype: FormattingString + :returns: Callable string that sets the foreground color + + If the terminal does not support RGB color, the nearest supported + color will be determined using :py:attr:`color_distance_algorithm`. + """ + if self.number_of_colors == 1 << 24: + # "truecolor" 24-bit + fmt_attr = u'\x1b[38;2;{0};{1};{2}m'.format(red, green, blue) + return FormattingString(fmt_attr, self.normal) + + # color by approximation to 256 or 16-color terminals + color_idx = self.rgb_downconvert(red, green, blue) + return FormattingString(self._foreground_color(color_idx), self.normal) + + @property + def on_color(self): + """ + A callable capability that sets the background color. + + :rtype: ParameterizingString + """ + if self.does_styling: + return ParameterizingString(self._background_color, self.normal, 'on_color') + + return NullCallableString() + + def on_color_rgb(self, red, green, blue): + """ + Provides callable formatting string to set background color to the specified RGB color. + + :arg int red: RGB value of Red. + :arg int green: RGB value of Green. + :arg int blue: RGB value of Blue. + :rtype: FormattingString + :returns: Callable string that sets the foreground color + + If the terminal does not support RGB color, the nearest supported + color will be determined using :py:attr:`color_distance_algorithm`. + """ + if self.number_of_colors == 1 << 24: + fmt_attr = u'\x1b[48;2;{0};{1};{2}m'.format(red, green, blue) + return FormattingString(fmt_attr, self.normal) + + color_idx = self.rgb_downconvert(red, green, blue) + return FormattingString(self._background_color(color_idx), self.normal) + + def formatter(self, value): + """ + Provides callable formatting string to set color and other text formatting options. + + :arg str value: Sugary, ordinary, or compound formatted terminal capability, + such as "red_on_white", "normal", "red", or "bold_on_black". + :rtype: :class:`FormattingString` or :class:`NullCallableString` + :returns: Callable string that sets color and other text formatting options + + Calling ``term.formatter('bold_on_red')`` is equivalent to ``term.bold_on_red``, but a + string that is not a valid text formatter will return a :class:`NullCallableString`. + This is intended to allow validation of text formatters without the possibility of + inadvertently returning another terminal capability. + """ + formatters = split_compound(value) + if all((fmt in COLORS or fmt in COMPOUNDABLES) for fmt in formatters): + return getattr(self, value) + + return NullCallableString() + + def rgb_downconvert(self, red, green, blue): + """ + Translate an RGB color to a color code of the terminal's color depth. + + :arg int red: RGB value of Red (0-255). + :arg int green: RGB value of Green (0-255). + :arg int blue: RGB value of Blue (0-255). + :rtype: int + :returns: Color code of downconverted RGB color + """ + # Though pre-computing all 1 << 24 options is memory-intensive, a pre-computed + # "k-d tree" of 256 (x,y,z) vectors of a colorspace in 3 dimensions, such as a + # cone of HSV, or simply 255x255x255 RGB square, any given rgb value is just a + # nearest-neighbor search of 256 points, which k-d should be much faster by + # sub-dividing / culling search points, rather than our "search all 256 points + # always" approach. + fn_distance = COLOR_DISTANCE_ALGORITHMS[self.color_distance_algorithm] + color_idx = 7 + shortest_distance = None + for cmp_depth, cmp_rgb in enumerate(RGB_256TABLE): + cmp_distance = fn_distance(cmp_rgb, (red, green, blue)) + if shortest_distance is None or cmp_distance < shortest_distance: + shortest_distance = cmp_distance + color_idx = cmp_depth + if cmp_depth >= self.number_of_colors: + break + return color_idx + + @property + def normal(self): + """ + A capability that resets all video attributes. + + :rtype: str + + ``normal`` is an alias for ``sgr0`` or ``exit_attribute_mode``. Any + styling attributes previously applied, such as foreground or + background colors, reverse video, or bold are reset to defaults. + """ + if self._normal: + return self._normal + self._normal = resolve_capability(self, 'normal') + return self._normal + + def link(self, url, text, url_id=''): + """ + Display ``text`` that when touched or clicked, navigates to ``url``. + + Optional ``url_id`` may be specified, so that non-adjacent cells can reference a single + target, all cells painted with the same "id" will highlight on hover, rather than any + individual one, as described in "Hovering and underlining the id parameter" of gist + https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda. + + :param str url: Hyperlink URL. + :param str text: Clickable text. + :param str url_id: Optional 'id'. + :rtype: str + :returns: String of ``text`` as a hyperlink to ``url``. + """ + assert len(url) < 2000, (len(url), url) + if url_id: + assert len(str(url_id)) < 250, (len(str(url_id)), url_id) + params = 'id={0}'.format(url_id) + else: + params = '' + if not self.does_styling: + return text + return ('\x1b]8;{0};{1}\x1b\\{2}' + '\x1b]8;;\x1b\\'.format(params, url, text)) + + @property + def stream(self): + """ + Read-only property: stream the terminal outputs to. + + This is a convenience attribute. It is used internally for implied + writes performed by context managers :meth:`~.hidden_cursor`, + :meth:`~.fullscreen`, :meth:`~.location`, and :meth:`~.keypad`. + """ + return self._stream + + @property + def number_of_colors(self): + """ + Number of colors supported by terminal. + + Common return values are 0, 8, 16, 256, or 1 << 24. + + This may be used to test whether the terminal supports colors, + and at what depth, if that's a concern. + + If this property is assigned a value of 88, the value 16 will be saved. This is due to the + the rarity of 88 color support and the inconsistency of behavior between implementations. + + Assigning this property to a value other than 0, 4, 8, 16, 88, 256, or 1 << 24 will + raise an :py:exc:`AssertionError`. + """ + return self._number_of_colors + + @number_of_colors.setter + def number_of_colors(self, value): + assert value in (0, 4, 8, 16, 88, 256, 1 << 24) + # Because 88 colors is rare and we can't guarantee consistent behavior, + # when 88 colors is detected, it is treated as 16 colors + self._number_of_colors = 16 if value == 88 else value + self.__clear_color_capabilities() + + @property + def color_distance_algorithm(self): + """ + Color distance algorithm used by :meth:`rgb_downconvert`. + + The slowest, but most accurate, 'cie2000', is default. Other available options are 'rgb', + 'rgb-weighted', 'cie76', and 'cie94'. + """ + return self._color_distance_algorithm + + @color_distance_algorithm.setter + def color_distance_algorithm(self, value): + assert value in COLOR_DISTANCE_ALGORITHMS + self._color_distance_algorithm = value + self.__clear_color_capabilities() + + @property + def _foreground_color(self): + """ + Convenience capability to support :attr:`~.on_color`. + + Prefers returning sequence for capability ``setaf``, "Set foreground color to #1, using ANSI + escape". If the given terminal does not support such sequence, fallback to returning + attribute ``setf``, "Set foreground color #1". + """ + return self.setaf or self.setf + + @property + def _background_color(self): + """ + Convenience capability to support :attr:`~.on_color`. + + Prefers returning sequence for capability ``setab``, "Set background color to #1, using ANSI + escape". If the given terminal does not support such sequence, fallback to returning + attribute ``setb``, "Set background color #1". + """ + return self.setab or self.setb + + def ljust(self, text, width=None, fillchar=u' '): + """ + Left-align ``text``, which may contain terminal sequences. + + :arg str text: String to be aligned + :arg int width: Total width to fill with aligned text. If + unspecified, the whole width of the terminal is filled. + :arg str fillchar: String for padding the right of ``text`` + :rtype: str + :returns: String of ``text``, left-aligned by ``width``. + """ + # Left justification is different from left alignment, but we continue + # the vocabulary error of the str method for polymorphism. + if width is None: + width = self.width + return Sequence(text, self).ljust(width, fillchar) + + def rjust(self, text, width=None, fillchar=u' '): + """ + Right-align ``text``, which may contain terminal sequences. + + :arg str text: String to be aligned + :arg int width: Total width to fill with aligned text. If + unspecified, the whole width of the terminal is used. + :arg str fillchar: String for padding the left of ``text`` + :rtype: str + :returns: String of ``text``, right-aligned by ``width``. + """ + if width is None: + width = self.width + return Sequence(text, self).rjust(width, fillchar) + + def center(self, text, width=None, fillchar=u' '): + """ + Center ``text``, which may contain terminal sequences. + + :arg str text: String to be centered + :arg int width: Total width in which to center text. If + unspecified, the whole width of the terminal is used. + :arg str fillchar: String for padding the left and right of ``text`` + :rtype: str + :returns: String of ``text``, centered by ``width`` + """ + if width is None: + width = self.width + return Sequence(text, self).center(width, fillchar) + + def truncate(self, text, width=None): + r""" + Truncate ``text`` to maximum ``width`` printable characters, retaining terminal sequences. + + :arg str text: Text to truncate + :arg int width: The maximum width to truncate it to + :rtype: str + :returns: ``text`` truncated to at most ``width`` printable characters + + >>> term.truncate(u'xyz\x1b[0;3m', 2) + u'xy\x1b[0;3m' + """ + if width is None: + width = self.width + return Sequence(text, self).truncate(width) + + def length(self, text): + u""" + Return printable length of a string containing sequences. + + :arg str text: String to measure. May contain terminal sequences. + :rtype: int + :returns: The number of terminal character cells the string will occupy + when printed + + Wide characters that consume 2 character cells are supported: + + >>> term = Terminal() + >>> term.length(term.clear + term.red(u'コンニチハ')) + 10 + + .. note:: Sequences such as 'clear', which is considered as a + "movement sequence" because it would move the cursor to + (y, x)(0, 0), are evaluated as a printable length of + *0*. + """ + return Sequence(text, self).length() + + def strip(self, text, chars=None): + r""" + Return ``text`` without sequences and leading or trailing whitespace. + + :rtype: str + :returns: Text with leading and trailing whitespace removed + + >>> term.strip(u' \x1b[0;3m xyz ') + u'xyz' + """ + return Sequence(text, self).strip(chars) + + def rstrip(self, text, chars=None): + r""" + Return ``text`` without terminal sequences or trailing whitespace. + + :rtype: str + :returns: Text with terminal sequences and trailing whitespace removed + + >>> term.rstrip(u' \x1b[0;3m xyz ') + u' xyz' + """ + return Sequence(text, self).rstrip(chars) + + def lstrip(self, text, chars=None): + r""" + Return ``text`` without terminal sequences or leading whitespace. + + :rtype: str + :returns: Text with terminal sequences and leading whitespace removed + + >>> term.lstrip(u' \x1b[0;3m xyz ') + u'xyz ' + """ + return Sequence(text, self).lstrip(chars) + + def strip_seqs(self, text): + r""" + Return ``text`` stripped of only its terminal sequences. + + :rtype: str + :returns: Text with terminal sequences removed + + >>> term.strip_seqs(u'\x1b[0;3mxyz') + u'xyz' + >>> term.strip_seqs(term.cuf(5) + term.red(u'test')) + u' test' + + .. note:: Non-destructive sequences that adjust horizontal distance + (such as ``\b`` or ``term.cuf(5)``) are replaced by destructive + space or erasing. + """ + return Sequence(text, self).strip_seqs() + + def split_seqs(self, text, maxsplit=0): + r""" + Return ``text`` split by individual character elements and sequences. + + :arg str text: String containing sequences + :arg int maxsplit: When maxsplit is nonzero, at most maxsplit splits + occur, and the remainder of the string is returned as the final element + of the list (same meaning is argument for :func:`re.split`). + :rtype: list[str] + :returns: List of sequences and individual characters + + >>> term.split_seqs(term.underline(u'xyz')) + ['\x1b[4m', 'x', 'y', 'z', '\x1b(B', '\x1b[m'] + + >>> term.split_seqs(term.underline(u'xyz'), 1) + ['\x1b[4m', r'xyz\x1b(B\x1b[m'] + """ + pattern = self._caps_unnamed_any + result = [] + for idx, match in enumerate(re.finditer(pattern, text)): + result.append(match.group()) + if maxsplit and idx == maxsplit: + remaining = text[match.end():] + if remaining: + result[-1] += remaining + break + return result + + def wrap(self, text, width=None, **kwargs): + r""" + Text-wrap a string, returning a list of wrapped lines. + + :arg str text: Unlike :func:`textwrap.wrap`, ``text`` may contain + terminal sequences, such as colors, bold, or underline. By + default, tabs in ``text`` are expanded by + :func:`string.expandtabs`. + :arg int width: Unlike :func:`textwrap.wrap`, ``width`` will + default to the width of the attached terminal. + :arg \**kwargs: See :py:class:`textwrap.TextWrapper` + :rtype: list + :returns: List of wrapped lines + + See :class:`textwrap.TextWrapper` for keyword arguments that can + customize wrapping behaviour. + """ + width = self.width if width is None else width + wrapper = SequenceTextWrapper(width=width, term=self, **kwargs) + lines = [] + for line in text.splitlines(): + lines.extend(iter(wrapper.wrap(line)) if line.strip() else (u'',)) + + return lines + + def getch(self): + """ + Read, decode, and return the next byte from the keyboard stream. + + :rtype: unicode + :returns: a single unicode character, or ``u''`` if a multi-byte + sequence has not yet been fully received. + + This method name and behavior mimics curses ``getch(void)``, and + it supports :meth:`inkey`, reading only one byte from + the keyboard string at a time. This method should always return + without blocking if called after :meth:`kbhit` has returned True. + + Implementors of alternate input stream methods should override + this method. + """ + assert self._keyboard_fd is not None + byte = os.read(self._keyboard_fd, 1) + return self._keyboard_decoder.decode(byte, final=False) + + def ungetch(self, text): + """ + Buffer input data to be discovered by next call to :meth:`~.inkey`. + + :arg str text: String to be buffered as keyboard input. + """ + self._keyboard_buf.extendleft(text) + + def kbhit(self, timeout=None): + """ + Return whether a keypress has been detected on the keyboard. + + This method is used by :meth:`inkey` to determine if a byte may + be read using :meth:`getch` without blocking. The standard + implementation simply uses the :func:`select.select` call on stdin. + + :arg float timeout: When ``timeout`` is 0, this call is + non-blocking, otherwise blocking indefinitely until keypress + is detected when None (default). When ``timeout`` is a + positive number, returns after ``timeout`` seconds have + elapsed (float). + :rtype: bool + :returns: True if a keypress is awaiting to be read on the keyboard + attached to this terminal. When input is not a terminal, False is + always returned. + """ + stime = time.time() + ready_r = [None, ] + check_r = [self._keyboard_fd] if self._keyboard_fd is not None else [] + + while HAS_TTY: + try: + ready_r, _, _ = select.select(check_r, [], [], timeout) + except InterruptedError: + # Beginning with python3.5, IntrruptError is no longer thrown + # https://www.python.org/dev/peps/pep-0475/ + # + # For previous versions of python, we take special care to + # retry select on InterruptedError exception, namely to handle + # a custom SIGWINCH handler. When installed, it would cause + # select() to be interrupted with errno 4 (EAGAIN). + # + # Just as in python3.5, it is ignored, and a new timeout value + # is derived from the previous unless timeout becomes negative. + # because the signal handler has blocked beyond timeout, then + # False is returned. Otherwise, when timeout is None, we + # continue to block indefinitely (default). + if timeout is not None: + # subtract time already elapsed, + timeout -= time.time() - stime + if timeout > 0: + continue + # no time remains after handling exception (rare) + ready_r = [] # pragma: no cover + break # pragma: no cover + else: + break + + return False if self._keyboard_fd is None else check_r == ready_r + + @contextlib.contextmanager + def cbreak(self): + """ + Allow each keystroke to be read immediately after it is pressed. + + This is a context manager for :func:`tty.setcbreak`. + + This context manager activates 'rare' mode, the opposite of 'cooked' + mode: On entry, :func:`tty.setcbreak` mode is activated disabling + line-buffering of keyboard input and turning off automatic echo of + input as output. + + .. note:: You must explicitly print any user input you would like + displayed. If you provide any kind of editing, you must handle + backspace and other line-editing control functions in this mode + as well! + + **Normally**, characters received from the keyboard cannot be read + by Python until the *Return* key is pressed. Also known as *cooked* or + *canonical input* mode, it allows the tty driver to provide + line-editing before shuttling the input to your program and is the + (implicit) default terminal mode set by most unix shells before + executing programs. + + Technically, this context manager sets the :mod:`termios` attributes + of the terminal attached to :obj:`sys.__stdin__`. + + .. note:: :func:`tty.setcbreak` sets ``VMIN = 1`` and ``VTIME = 0``, + see http://www.unixwiz.net/techtips/termios-vmin-vtime.html + """ + if HAS_TTY and self._keyboard_fd is not None: + # Save current terminal mode: + save_mode = termios.tcgetattr(self._keyboard_fd) + save_line_buffered = self._line_buffered + tty.setcbreak(self._keyboard_fd, termios.TCSANOW) + try: + self._line_buffered = False + yield + finally: + # Restore prior mode: + termios.tcsetattr(self._keyboard_fd, + termios.TCSAFLUSH, + save_mode) + self._line_buffered = save_line_buffered + else: + yield + + @contextlib.contextmanager + def raw(self): + r""" + A context manager for :func:`tty.setraw`. + + Although both :meth:`break` and :meth:`raw` modes allow each keystroke + to be read immediately after it is pressed, Raw mode disables + processing of input and output. + + In cbreak mode, special input characters such as ``^C`` or ``^S`` are + interpreted by the terminal driver and excluded from the stdin stream. + In raw mode these values are receive by the :meth:`inkey` method. + + Because output processing is not done, the newline ``'\n'`` is not + enough, you must also print carriage return to ensure that the cursor + is returned to the first column:: + + with term.raw(): + print("printing in raw mode", end="\r\n") + """ + if HAS_TTY and self._keyboard_fd is not None: + # Save current terminal mode: + save_mode = termios.tcgetattr(self._keyboard_fd) + save_line_buffered = self._line_buffered + tty.setraw(self._keyboard_fd, termios.TCSANOW) + try: + self._line_buffered = False + yield + finally: + # Restore prior mode: + termios.tcsetattr(self._keyboard_fd, + termios.TCSAFLUSH, + save_mode) + self._line_buffered = save_line_buffered + else: + yield + + @contextlib.contextmanager + def keypad(self): + r""" + Context manager that enables directional keypad input. + + On entrying, this puts the terminal into "keyboard_transmit" mode by + emitting the keypad_xmit (smkx) capability. On exit, it emits + keypad_local (rmkx). + + On an IBM-PC keyboard with numeric keypad of terminal-type *xterm*, + with numlock off, the lower-left diagonal key transmits sequence + ``\\x1b[F``, translated to :class:`~.Terminal` attribute + ``KEY_END``. + + However, upon entering :meth:`keypad`, ``\\x1b[OF`` is transmitted, + translating to ``KEY_LL`` (lower-left key), allowing you to determine + diagonal direction keys. + """ + try: + self.stream.write(self.smkx) + self.stream.flush() + yield + finally: + self.stream.write(self.rmkx) + self.stream.flush() + + def inkey(self, timeout=None, esc_delay=0.35): + """ + Read and return the next keyboard event within given timeout. + + Generally, this should be used inside the :meth:`raw` context manager. + + :arg float timeout: Number of seconds to wait for a keystroke before + returning. When ``None`` (default), this method may block + indefinitely. + :arg float esc_delay: To distinguish between the keystroke of + ``KEY_ESCAPE``, and sequences beginning with escape, the parameter + ``esc_delay`` specifies the amount of time after receiving escape + (``chr(27)``) to seek for the completion of an application key + before returning a :class:`~.Keystroke` instance for + ``KEY_ESCAPE``. + :rtype: :class:`~.Keystroke`. + :returns: :class:`~.Keystroke`, which may be empty (``u''``) if + ``timeout`` is specified and keystroke is not received. + + .. note:: When used without the context manager :meth:`cbreak`, or + :meth:`raw`, :obj:`sys.__stdin__` remains line-buffered, and this + function will block until the return key is pressed! + + .. note:: On Windows, a 10 ms sleep is added to the key press detection loop to reduce CPU + load. Due to the behavior of :py:func:`time.sleep` on Windows, this will actually + result in a 15.6 ms delay when using the default `time resolution + `_. + Decreasing the time resolution will reduce this to 10 ms, while increasing it, which + is rarely done, will have a perceptable impact on the behavior. + """ + resolve = functools.partial(resolve_sequence, + mapper=self._keymap, + codes=self._keycodes) + + stime = time.time() + + # re-buffer previously received keystrokes, + ucs = u'' + while self._keyboard_buf: + ucs += self._keyboard_buf.pop() + + # receive all immediately available bytes + while self.kbhit(timeout=0): + ucs += self.getch() + + # decode keystroke, if any + ks = resolve(text=ucs) + + # so long as the most immediately received or buffered keystroke is + # incomplete, (which may be a multibyte encoding), block until until + # one is received. + while not ks and self.kbhit(timeout=_time_left(stime, timeout)): + ucs += self.getch() + ks = resolve(text=ucs) + + # handle escape key (KEY_ESCAPE) vs. escape sequence (like those + # that begin with \x1b[ or \x1bO) up to esc_delay when + # received. This is not optimal, but causes least delay when + # "meta sends escape" is used, or when an unsupported sequence is + # sent. + # + # The statement, "ucs in self._keymap_prefixes" has an effect on + # keystrokes such as Alt + Z ("\x1b[z" with metaSendsEscape): because + # no known input sequences begin with such phrasing to allow it to be + # returned more quickly than esc_delay otherwise blocks for. + if ks.code == self.KEY_ESCAPE: + esctime = time.time() + while (ks.code == self.KEY_ESCAPE and + ucs in self._keymap_prefixes and + self.kbhit(timeout=_time_left(esctime, esc_delay))): + ucs += self.getch() + ks = resolve(text=ucs) + + # buffer any remaining text received + self.ungetch(ucs[len(ks):]) + return ks + + +class WINSZ(collections.namedtuple('WINSZ', ( + 'ws_row', 'ws_col', 'ws_xpixel', 'ws_ypixel'))): + """ + Structure represents return value of :const:`termios.TIOCGWINSZ`. + + .. py:attribute:: ws_row + + rows, in characters + + .. py:attribute:: ws_col + + columns, in characters + + .. py:attribute:: ws_xpixel + + horizontal size, pixels + + .. py:attribute:: ws_ypixel + + vertical size, pixels + """ + #: format of termios structure + _FMT = 'hhhh' + #: buffer of termios structure appropriate for ioctl argument + _BUF = '\x00' * struct.calcsize(_FMT) + + +#: _CUR_TERM = None +#: From libcurses/doc/ncurses-intro.html (ESR, Thomas Dickey, et. al):: +#: +#: "After the call to setupterm(), the global variable cur_term is set to +#: point to the current structure of terminal capabilities. By calling +#: setupterm() for each terminal, and saving and restoring cur_term, it +#: is possible for a program to use two or more terminals at once." +#: +#: However, if you study Python's ``./Modules/_cursesmodule.c``, you'll find:: +#: +#: if (!initialised_setupterm && setupterm(termstr,fd,&err) == ERR) { +#: +#: Python - perhaps wrongly - will not allow for re-initialisation of new +#: terminals through :func:`curses.setupterm`, so the value of cur_term cannot +#: be changed once set: subsequent calls to :func:`curses.setupterm` have no +#: effect. +#: +#: Therefore, the :attr:`Terminal.kind` of each :class:`Terminal` is +#: essentially a singleton. This global variable reflects that, and a warning +#: is emitted if somebody expects otherwise. diff --git a/vllm/lib/python3.10/site-packages/blessed/terminal.pyi b/vllm/lib/python3.10/site-packages/blessed/terminal.pyi new file mode 100644 index 0000000000000000000000000000000000000000..bff670b708ad4d40c30d3575457f4de89c43de7d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/terminal.pyi @@ -0,0 +1,108 @@ +# std imports +from typing import IO, Any, List, Tuple, Union, Optional, OrderedDict, SupportsIndex, ContextManager + +# local +from .keyboard import Keystroke +from .sequences import Termcap +from .formatters import (FormattingString, + NullCallableString, + ParameterizingString, + FormattingOtherString) + +HAS_TTY: bool + +class Terminal: + caps: OrderedDict[str, Termcap] + errors: List[str] = ... + def __init__( + self, + kind: Optional[str] = ..., + stream: Optional[IO[str]] = ..., + force_styling: bool = ..., + ) -> None: ... + def __getattr__( + self, attr: str + ) -> Union[NullCallableString, ParameterizingString, FormattingString]: ... + @property + def kind(self) -> str: ... + @property + def does_styling(self) -> bool: ... + @property + def is_a_tty(self) -> bool: ... + @property + def height(self) -> int: ... + @property + def width(self) -> int: ... + @property + def pixel_height(self) -> int: ... + @property + def pixel_width(self) -> int: ... + def location( + self, x: Optional[int] = ..., y: Optional[int] = ... + ) -> ContextManager[None]: ... + def get_location(self, timeout: Optional[float] = ...) -> Tuple[int, int]: ... + def get_fgcolor(self, timeout: Optional[float] = ...) -> Tuple[int, int, int]: ... + def get_bgcolor(self, timeout: Optional[float] = ...) -> Tuple[int, int, int]: ... + def fullscreen(self) -> ContextManager[None]: ... + def hidden_cursor(self) -> ContextManager[None]: ... + def move_xy(self, x: int, y: int) -> ParameterizingString: ... + def move_yx(self, y: int, x: int) -> ParameterizingString: ... + @property + def move_left(self) -> FormattingOtherString: ... + @property + def move_right(self) -> FormattingOtherString: ... + @property + def move_up(self) -> FormattingOtherString: ... + @property + def move_down(self) -> FormattingOtherString: ... + @property + def color(self) -> Union[NullCallableString, ParameterizingString]: ... + def color_rgb(self, red: int, green: int, blue: int) -> FormattingString: ... + @property + def on_color(self) -> Union[NullCallableString, ParameterizingString]: ... + def on_color_rgb(self, red: int, green: int, blue: int) -> FormattingString: ... + def formatter(self, value: str) -> Union[NullCallableString, FormattingString]: ... + def rgb_downconvert(self, red: int, green: int, blue: int) -> int: ... + @property + def normal(self) -> str: ... + def link(self, url: str, text: str, url_id: str = ...) -> str: ... + @property + def stream(self) -> IO[str]: ... + @property + def number_of_colors(self) -> int: ... + @number_of_colors.setter + def number_of_colors(self, value: int) -> None: ... + @property + def color_distance_algorithm(self) -> str: ... + @color_distance_algorithm.setter + def color_distance_algorithm(self, value: str) -> None: ... + def ljust( + self, text: str, width: Optional[SupportsIndex] = ..., fillchar: str = ... + ) -> str: ... + def rjust( + self, text: str, width: Optional[SupportsIndex] = ..., fillchar: str = ... + ) -> str: ... + def center( + self, text: str, width: Optional[SupportsIndex] = ..., fillchar: str = ... + ) -> str: ... + def truncate(self, text: str, width: Optional[SupportsIndex] = ...) -> str: ... + def length(self, text: str) -> int: ... + def strip(self, text: str, chars: Optional[str] = ...) -> str: ... + def rstrip(self, text: str, chars: Optional[str] = ...) -> str: ... + def lstrip(self, text: str, chars: Optional[str] = ...) -> str: ... + def strip_seqs(self, text: str) -> str: ... + def split_seqs(self, text: str, maxsplit: int) -> List[str]: ... + def wrap( + self, text: str, width: Optional[int] = ..., **kwargs: Any + ) -> List[str]: ... + def getch(self) -> str: ... + def ungetch(self, text: str) -> None: ... + def kbhit(self, timeout: Optional[float] = ...) -> bool: ... + def cbreak(self) -> ContextManager[None]: ... + def raw(self) -> ContextManager[None]: ... + def keypad(self) -> ContextManager[None]: ... + def inkey( + self, timeout: Optional[float] = ..., esc_delay: float = ... + ) -> Keystroke: ... + +class WINSZ: ... diff --git a/vllm/lib/python3.10/site-packages/blessed/win_terminal.py b/vllm/lib/python3.10/site-packages/blessed/win_terminal.py new file mode 100644 index 0000000000000000000000000000000000000000..267e028b96f75dd4473b9fe9cce9a191574ee522 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/win_terminal.py @@ -0,0 +1,163 @@ +# -*- coding: utf-8 -*- +"""Module containing Windows version of :class:`Terminal`.""" + +from __future__ import absolute_import + +# std imports +import time +import msvcrt # pylint: disable=import-error +import contextlib + +# 3rd party +from jinxed import win32 # pylint: disable=import-error + +# local +from .terminal import WINSZ +from .terminal import Terminal as _Terminal + + +class Terminal(_Terminal): + """Windows subclass of :class:`Terminal`.""" + + def getch(self): + r""" + Read, decode, and return the next byte from the keyboard stream. + + :rtype: unicode + :returns: a single unicode character, or ``u''`` if a multi-byte + sequence has not yet been fully received. + + For versions of Windows 10.0.10586 and later, the console is expected + to be in ENABLE_VIRTUAL_TERMINAL_INPUT mode and the default method is + called. + + For older versions of Windows, msvcrt.getwch() is used. If the received + character is ``\x00`` or ``\xe0``, the next character is + automatically retrieved. + """ + if win32.VTMODE_SUPPORTED: + return super(Terminal, self).getch() + + rtn = msvcrt.getwch() + if rtn in ('\x00', '\xe0'): + rtn += msvcrt.getwch() + return rtn + + def kbhit(self, timeout=None): + """ + Return whether a keypress has been detected on the keyboard. + + This method is used by :meth:`inkey` to determine if a byte may + be read using :meth:`getch` without blocking. This is implemented + by wrapping msvcrt.kbhit() in a timeout. + + :arg float timeout: When ``timeout`` is 0, this call is + non-blocking, otherwise blocking indefinitely until keypress + is detected when None (default). When ``timeout`` is a + positive number, returns after ``timeout`` seconds have + elapsed (float). + :rtype: bool + :returns: True if a keypress is awaiting to be read on the keyboard + attached to this terminal. + """ + end = time.time() + (timeout or 0) + while True: + + if msvcrt.kbhit(): + return True + + if timeout is not None and end < time.time(): + break + + time.sleep(0.01) # Sleep to reduce CPU load + return False + + @staticmethod + def _winsize(fd): + """ + Return named tuple describing size of the terminal by ``fd``. + + :arg int fd: file descriptor queries for its window size. + :rtype: WINSZ + :returns: named tuple describing size of the terminal + + WINSZ is a :class:`collections.namedtuple` instance, whose structure + directly maps to the return value of the :const:`termios.TIOCGWINSZ` + ioctl return value. The return parameters are: + + - ``ws_row``: width of terminal by its number of character cells. + - ``ws_col``: height of terminal by its number of character cells. + - ``ws_xpixel``: width of terminal by pixels (not accurate). + - ``ws_ypixel``: height of terminal by pixels (not accurate). + """ + window = win32.get_terminal_size(fd) + return WINSZ(ws_row=window.lines, ws_col=window.columns, + ws_xpixel=0, ws_ypixel=0) + + @contextlib.contextmanager + def cbreak(self): + """ + Allow each keystroke to be read immediately after it is pressed. + + This is a context manager for ``jinxed.w32.setcbreak()``. + + .. note:: You must explicitly print any user input you would like + displayed. If you provide any kind of editing, you must handle + backspace and other line-editing control functions in this mode + as well! + + **Normally**, characters received from the keyboard cannot be read + by Python until the *Return* key is pressed. Also known as *cooked* or + *canonical input* mode, it allows the tty driver to provide + line-editing before shuttling the input to your program and is the + (implicit) default terminal mode set by most unix shells before + executing programs. + """ + if self._keyboard_fd is not None: + + filehandle = msvcrt.get_osfhandle(self._keyboard_fd) + + # Save current terminal mode: + save_mode = win32.get_console_mode(filehandle) + save_line_buffered = self._line_buffered + win32.setcbreak(filehandle) + try: + self._line_buffered = False + yield + finally: + win32.set_console_mode(filehandle, save_mode) + self._line_buffered = save_line_buffered + + else: + yield + + @contextlib.contextmanager + def raw(self): + """ + A context manager for ``jinxed.w32.setcbreak()``. + + Although both :meth:`break` and :meth:`raw` modes allow each keystroke + to be read immediately after it is pressed, Raw mode disables + processing of input and output. + + In cbreak mode, special input characters such as ``^C`` are + interpreted by the terminal driver and excluded from the stdin stream. + In raw mode these values are receive by the :meth:`inkey` method. + """ + if self._keyboard_fd is not None: + + filehandle = msvcrt.get_osfhandle(self._keyboard_fd) + + # Save current terminal mode: + save_mode = win32.get_console_mode(filehandle) + save_line_buffered = self._line_buffered + win32.setraw(filehandle) + try: + self._line_buffered = False + yield + finally: + win32.set_console_mode(filehandle, save_mode) + self._line_buffered = save_line_buffered + + else: + yield diff --git a/vllm/lib/python3.10/site-packages/blessed/win_terminal.pyi b/vllm/lib/python3.10/site-packages/blessed/win_terminal.pyi new file mode 100644 index 0000000000000000000000000000000000000000..275f16f9ee0ad9b2056e7279b4a9eab3e64d55dd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/blessed/win_terminal.pyi @@ -0,0 +1,11 @@ +# std imports +from typing import Optional, ContextManager + +# local +from .terminal import Terminal as _Terminal + +class Terminal(_Terminal): + def getch(self) -> str: ... + def kbhit(self, timeout: Optional[float] = ...) -> bool: ... + def cbreak(self) -> ContextManager[None]: ... + def raw(self) -> ContextManager[None]: ... diff --git a/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/RECORD b/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..e59f013ee1f1f1e678b2d1a14711add75d0baf8d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/RECORD @@ -0,0 +1,23 @@ +multipart/__init__.py,sha256=_ttxOAFnTN4jeac-_8NeXpaXYYo0PPEIp8Ogo4YFNHE,935 +multipart/__pycache__/__init__.cpython-310.pyc,, +multipart/__pycache__/decoders.cpython-310.pyc,, +multipart/__pycache__/exceptions.cpython-310.pyc,, +multipart/__pycache__/multipart.cpython-310.pyc,, +multipart/decoders.py,sha256=XvkAwTU9UFPiXkc0hkvovHf0W6H3vK-2ieWlhav02hQ,40 +multipart/exceptions.py,sha256=6D_X-seiOmMAlIeiGlPGUs8-vpcvIGJeQycFMDb1f7A,42 +multipart/multipart.py,sha256=8fDH14j_VMbrch_58wlzi63XNARGv80kOZAyN72aG7A,41 +python_multipart-0.0.20.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +python_multipart-0.0.20.dist-info/METADATA,sha256=h2GtPOVShbVkpBUrjp5KE3t6eiJJhd0_WCaCXrb5TgU,1817 +python_multipart-0.0.20.dist-info/RECORD,, +python_multipart-0.0.20.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +python_multipart-0.0.20.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +python_multipart-0.0.20.dist-info/licenses/LICENSE.txt,sha256=qOgzF2zWF9rwC51tOfoVyo7evG0WQwec0vSJPAwom-I,556 +python_multipart/__init__.py,sha256=Nlw6Yrc__qXnCZLo17OzbJR2w2mwiSFk69IG4Wl35EU,512 +python_multipart/__pycache__/__init__.cpython-310.pyc,, +python_multipart/__pycache__/decoders.cpython-310.pyc,, +python_multipart/__pycache__/exceptions.cpython-310.pyc,, +python_multipart/__pycache__/multipart.cpython-310.pyc,, +python_multipart/decoders.py,sha256=JM43FMNn_EKP0MI2ZkuZHhNa0MOASoIR0U5TvdG585k,6669 +python_multipart/exceptions.py,sha256=a9buSOv_eiHZoukEJhdWX9LJYSJ6t7XOK3ZEaWoQZlk,992 +python_multipart/multipart.py,sha256=pk3o3eB3KXbNxzOBxbEjCdz-1ESEZIMXVIfl12grG-o,76427 +python_multipart/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 diff --git a/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..12228d414b6cfed7c39d3781c85c63256a1d7fb5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.27.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/licenses/LICENSE.txt b/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..303a1bf5015ca5c7374a9bde74dee5e2b205fbe3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/python_multipart-0.0.20.dist-info/licenses/LICENSE.txt @@ -0,0 +1,14 @@ +Copyright 2012, Andrew Dunham + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + diff --git a/vllm/lib/python3.10/site-packages/requests/__init__.py b/vllm/lib/python3.10/site-packages/requests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..051cda1340effaa0706b46dd68ac002ceda3d45c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/requests/__init__.py @@ -0,0 +1,184 @@ +# __ +# /__) _ _ _ _ _/ _ +# / ( (- (/ (/ (- _) / _) +# / + +""" +Requests HTTP Library +~~~~~~~~~~~~~~~~~~~~~ + +Requests is an HTTP library, written in Python, for human beings. +Basic GET usage: + + >>> import requests + >>> r = requests.get('https://www.python.org') + >>> r.status_code + 200 + >>> b'Python is a programming language' in r.content + True + +... or POST: + + >>> payload = dict(key1='value1', key2='value2') + >>> r = requests.post('https://httpbin.org/post', data=payload) + >>> print(r.text) + { + ... + "form": { + "key1": "value1", + "key2": "value2" + }, + ... + } + +The other HTTP methods are supported - see `requests.api`. Full documentation +is at . + +:copyright: (c) 2017 by Kenneth Reitz. +:license: Apache 2.0, see LICENSE for more details. +""" + +import warnings + +import urllib3 + +from .exceptions import RequestsDependencyWarning + +try: + from charset_normalizer import __version__ as charset_normalizer_version +except ImportError: + charset_normalizer_version = None + +try: + from chardet import __version__ as chardet_version +except ImportError: + chardet_version = None + + +def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version): + urllib3_version = urllib3_version.split(".") + assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git. + + # Sometimes, urllib3 only reports its version as 16.1. + if len(urllib3_version) == 2: + urllib3_version.append("0") + + # Check urllib3 for compatibility. + major, minor, patch = urllib3_version # noqa: F811 + major, minor, patch = int(major), int(minor), int(patch) + # urllib3 >= 1.21.1 + assert major >= 1 + if major == 1: + assert minor >= 21 + + # Check charset_normalizer for compatibility. + if chardet_version: + major, minor, patch = chardet_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # chardet_version >= 3.0.2, < 6.0.0 + assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0) + elif charset_normalizer_version: + major, minor, patch = charset_normalizer_version.split(".")[:3] + major, minor, patch = int(major), int(minor), int(patch) + # charset_normalizer >= 2.0.0 < 4.0.0 + assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0) + else: + warnings.warn( + "Unable to find acceptable character detection dependency " + "(chardet or charset_normalizer).", + RequestsDependencyWarning, + ) + + +def _check_cryptography(cryptography_version): + # cryptography < 1.3.4 + try: + cryptography_version = list(map(int, cryptography_version.split("."))) + except ValueError: + return + + if cryptography_version < [1, 3, 4]: + warning = "Old version of cryptography ({}) may cause slowdown.".format( + cryptography_version + ) + warnings.warn(warning, RequestsDependencyWarning) + + +# Check imported dependencies for compatibility. +try: + check_compatibility( + urllib3.__version__, chardet_version, charset_normalizer_version + ) +except (AssertionError, ValueError): + warnings.warn( + "urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported " + "version!".format( + urllib3.__version__, chardet_version, charset_normalizer_version + ), + RequestsDependencyWarning, + ) + +# Attempt to enable urllib3's fallback for SNI support +# if the standard library doesn't support SNI or the +# 'ssl' library isn't available. +try: + try: + import ssl + except ImportError: + ssl = None + + if not getattr(ssl, "HAS_SNI", False): + from urllib3.contrib import pyopenssl + + pyopenssl.inject_into_urllib3() + + # Check cryptography version + from cryptography import __version__ as cryptography_version + + _check_cryptography(cryptography_version) +except ImportError: + pass + +# urllib3's DependencyWarnings should be silenced. +from urllib3.exceptions import DependencyWarning + +warnings.simplefilter("ignore", DependencyWarning) + +# Set default logging handler to avoid "No handler found" warnings. +import logging +from logging import NullHandler + +from . import packages, utils +from .__version__ import ( + __author__, + __author_email__, + __build__, + __cake__, + __copyright__, + __description__, + __license__, + __title__, + __url__, + __version__, +) +from .api import delete, get, head, options, patch, post, put, request +from .exceptions import ( + ConnectionError, + ConnectTimeout, + FileModeWarning, + HTTPError, + JSONDecodeError, + ReadTimeout, + RequestException, + Timeout, + TooManyRedirects, + URLRequired, +) +from .models import PreparedRequest, Request, Response +from .sessions import Session, session +from .status_codes import codes + +logging.getLogger(__name__).addHandler(NullHandler()) + +# FileModeWarnings go off per the default. +warnings.simplefilter("default", FileModeWarning, append=True) diff --git a/vllm/lib/python3.10/site-packages/requests/adapters.py b/vllm/lib/python3.10/site-packages/requests/adapters.py new file mode 100644 index 0000000000000000000000000000000000000000..9a58b1602532f9bee41afb0dfadaa7eb548e98c1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/requests/adapters.py @@ -0,0 +1,719 @@ +""" +requests.adapters +~~~~~~~~~~~~~~~~~ + +This module contains the transport adapters that Requests uses to define +and maintain connections. +""" + +import os.path +import socket # noqa: F401 +import typing +import warnings + +from urllib3.exceptions import ClosedPoolError, ConnectTimeoutError +from urllib3.exceptions import HTTPError as _HTTPError +from urllib3.exceptions import InvalidHeader as _InvalidHeader +from urllib3.exceptions import ( + LocationValueError, + MaxRetryError, + NewConnectionError, + ProtocolError, +) +from urllib3.exceptions import ProxyError as _ProxyError +from urllib3.exceptions import ReadTimeoutError, ResponseError +from urllib3.exceptions import SSLError as _SSLError +from urllib3.poolmanager import PoolManager, proxy_from_url +from urllib3.util import Timeout as TimeoutSauce +from urllib3.util import parse_url +from urllib3.util.retry import Retry +from urllib3.util.ssl_ import create_urllib3_context + +from .auth import _basic_auth_str +from .compat import basestring, urlparse +from .cookies import extract_cookies_to_jar +from .exceptions import ( + ConnectionError, + ConnectTimeout, + InvalidHeader, + InvalidProxyURL, + InvalidSchema, + InvalidURL, + ProxyError, + ReadTimeout, + RetryError, + SSLError, +) +from .models import Response +from .structures import CaseInsensitiveDict +from .utils import ( + DEFAULT_CA_BUNDLE_PATH, + extract_zipped_paths, + get_auth_from_url, + get_encoding_from_headers, + prepend_scheme_if_needed, + select_proxy, + urldefragauth, +) + +try: + from urllib3.contrib.socks import SOCKSProxyManager +except ImportError: + + def SOCKSProxyManager(*args, **kwargs): + raise InvalidSchema("Missing dependencies for SOCKS support.") + + +if typing.TYPE_CHECKING: + from .models import PreparedRequest + + +DEFAULT_POOLBLOCK = False +DEFAULT_POOLSIZE = 10 +DEFAULT_RETRIES = 0 +DEFAULT_POOL_TIMEOUT = None + + +try: + import ssl # noqa: F401 + + _preloaded_ssl_context = create_urllib3_context() + _preloaded_ssl_context.load_verify_locations( + extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH) + ) +except ImportError: + # Bypass default SSLContext creation when Python + # interpreter isn't built with the ssl module. + _preloaded_ssl_context = None + + +def _urllib3_request_context( + request: "PreparedRequest", + verify: "bool | str | None", + client_cert: "typing.Tuple[str, str] | str | None", + poolmanager: "PoolManager", +) -> "(typing.Dict[str, typing.Any], typing.Dict[str, typing.Any])": + host_params = {} + pool_kwargs = {} + parsed_request_url = urlparse(request.url) + scheme = parsed_request_url.scheme.lower() + port = parsed_request_url.port + + # Determine if we have and should use our default SSLContext + # to optimize performance on standard requests. + poolmanager_kwargs = getattr(poolmanager, "connection_pool_kw", {}) + has_poolmanager_ssl_context = poolmanager_kwargs.get("ssl_context") + should_use_default_ssl_context = ( + _preloaded_ssl_context is not None and not has_poolmanager_ssl_context + ) + + cert_reqs = "CERT_REQUIRED" + if verify is False: + cert_reqs = "CERT_NONE" + elif verify is True and should_use_default_ssl_context: + pool_kwargs["ssl_context"] = _preloaded_ssl_context + elif isinstance(verify, str): + if not os.path.isdir(verify): + pool_kwargs["ca_certs"] = verify + else: + pool_kwargs["ca_cert_dir"] = verify + pool_kwargs["cert_reqs"] = cert_reqs + if client_cert is not None: + if isinstance(client_cert, tuple) and len(client_cert) == 2: + pool_kwargs["cert_file"] = client_cert[0] + pool_kwargs["key_file"] = client_cert[1] + else: + # According to our docs, we allow users to specify just the client + # cert path + pool_kwargs["cert_file"] = client_cert + host_params = { + "scheme": scheme, + "host": parsed_request_url.hostname, + "port": port, + } + return host_params, pool_kwargs + + +class BaseAdapter: + """The Base Transport Adapter""" + + def __init__(self): + super().__init__() + + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest ` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + """ + raise NotImplementedError + + def close(self): + """Cleans up adapter specific items.""" + raise NotImplementedError + + +class HTTPAdapter(BaseAdapter): + """The built-in HTTP Adapter for urllib3. + + Provides a general-case interface for Requests sessions to contact HTTP and + HTTPS urls by implementing the Transport Adapter interface. This class will + usually be created by the :class:`Session ` class under the + covers. + + :param pool_connections: The number of urllib3 connection pools to cache. + :param pool_maxsize: The maximum number of connections to save in the pool. + :param max_retries: The maximum number of retries each connection + should attempt. Note, this applies only to failed DNS lookups, socket + connections and connection timeouts, never to requests where data has + made it to the server. By default, Requests does not retry failed + connections. If you need granular control over the conditions under + which we retry a request, import urllib3's ``Retry`` class and pass + that instead. + :param pool_block: Whether the connection pool should block for connections. + + Usage:: + + >>> import requests + >>> s = requests.Session() + >>> a = requests.adapters.HTTPAdapter(max_retries=3) + >>> s.mount('http://', a) + """ + + __attrs__ = [ + "max_retries", + "config", + "_pool_connections", + "_pool_maxsize", + "_pool_block", + ] + + def __init__( + self, + pool_connections=DEFAULT_POOLSIZE, + pool_maxsize=DEFAULT_POOLSIZE, + max_retries=DEFAULT_RETRIES, + pool_block=DEFAULT_POOLBLOCK, + ): + if max_retries == DEFAULT_RETRIES: + self.max_retries = Retry(0, read=False) + else: + self.max_retries = Retry.from_int(max_retries) + self.config = {} + self.proxy_manager = {} + + super().__init__() + + self._pool_connections = pool_connections + self._pool_maxsize = pool_maxsize + self._pool_block = pool_block + + self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block) + + def __getstate__(self): + return {attr: getattr(self, attr, None) for attr in self.__attrs__} + + def __setstate__(self, state): + # Can't handle by adding 'proxy_manager' to self.__attrs__ because + # self.poolmanager uses a lambda function, which isn't pickleable. + self.proxy_manager = {} + self.config = {} + + for attr, value in state.items(): + setattr(self, attr, value) + + self.init_poolmanager( + self._pool_connections, self._pool_maxsize, block=self._pool_block + ) + + def init_poolmanager( + self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs + ): + """Initializes a urllib3 PoolManager. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param connections: The number of urllib3 connection pools to cache. + :param maxsize: The maximum number of connections to save in the pool. + :param block: Block when no free connections are available. + :param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager. + """ + # save these values for pickling + self._pool_connections = connections + self._pool_maxsize = maxsize + self._pool_block = block + + self.poolmanager = PoolManager( + num_pools=connections, + maxsize=maxsize, + block=block, + **pool_kwargs, + ) + + def proxy_manager_for(self, proxy, **proxy_kwargs): + """Return urllib3 ProxyManager for the given proxy. + + This method should not be called from user code, and is only + exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param proxy: The proxy to return a urllib3 ProxyManager for. + :param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager. + :returns: ProxyManager + :rtype: urllib3.ProxyManager + """ + if proxy in self.proxy_manager: + manager = self.proxy_manager[proxy] + elif proxy.lower().startswith("socks"): + username, password = get_auth_from_url(proxy) + manager = self.proxy_manager[proxy] = SOCKSProxyManager( + proxy, + username=username, + password=password, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs, + ) + else: + proxy_headers = self.proxy_headers(proxy) + manager = self.proxy_manager[proxy] = proxy_from_url( + proxy, + proxy_headers=proxy_headers, + num_pools=self._pool_connections, + maxsize=self._pool_maxsize, + block=self._pool_block, + **proxy_kwargs, + ) + + return manager + + def cert_verify(self, conn, url, verify, cert): + """Verify a SSL certificate. This method should not be called from user + code, and is only exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param conn: The urllib3 connection object associated with the cert. + :param url: The requested URL. + :param verify: Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use + :param cert: The SSL certificate to verify. + """ + if url.lower().startswith("https") and verify: + conn.cert_reqs = "CERT_REQUIRED" + + # Only load the CA certificates if 'verify' is a string indicating the CA bundle to use. + # Otherwise, if verify is a boolean, we don't load anything since + # the connection will be using a context with the default certificates already loaded, + # and this avoids a call to the slow load_verify_locations() + if verify is not True: + # `verify` must be a str with a path then + cert_loc = verify + + if not os.path.exists(cert_loc): + raise OSError( + f"Could not find a suitable TLS CA certificate bundle, " + f"invalid path: {cert_loc}" + ) + + if not os.path.isdir(cert_loc): + conn.ca_certs = cert_loc + else: + conn.ca_cert_dir = cert_loc + else: + conn.cert_reqs = "CERT_NONE" + conn.ca_certs = None + conn.ca_cert_dir = None + + if cert: + if not isinstance(cert, basestring): + conn.cert_file = cert[0] + conn.key_file = cert[1] + else: + conn.cert_file = cert + conn.key_file = None + if conn.cert_file and not os.path.exists(conn.cert_file): + raise OSError( + f"Could not find the TLS certificate file, " + f"invalid path: {conn.cert_file}" + ) + if conn.key_file and not os.path.exists(conn.key_file): + raise OSError( + f"Could not find the TLS key file, invalid path: {conn.key_file}" + ) + + def build_response(self, req, resp): + """Builds a :class:`Response ` object from a urllib3 + response. This should not be called from user code, and is only exposed + for use when subclassing the + :class:`HTTPAdapter ` + + :param req: The :class:`PreparedRequest ` used to generate the response. + :param resp: The urllib3 response object. + :rtype: requests.Response + """ + response = Response() + + # Fallback to None if there's no status_code, for whatever reason. + response.status_code = getattr(resp, "status", None) + + # Make headers case-insensitive. + response.headers = CaseInsensitiveDict(getattr(resp, "headers", {})) + + # Set encoding. + response.encoding = get_encoding_from_headers(response.headers) + response.raw = resp + response.reason = response.raw.reason + + if isinstance(req.url, bytes): + response.url = req.url.decode("utf-8") + else: + response.url = req.url + + # Add new cookies from the server. + extract_cookies_to_jar(response.cookies, req, resp) + + # Give the Response some context. + response.request = req + response.connection = self + + return response + + def build_connection_pool_key_attributes(self, request, verify, cert=None): + """Build the PoolKey attributes used by urllib3 to return a connection. + + This looks at the PreparedRequest, the user-specified verify value, + and the value of the cert parameter to determine what PoolKey values + to use to select a connection from a given urllib3 Connection Pool. + + The SSL related pool key arguments are not consistently set. As of + this writing, use the following to determine what keys may be in that + dictionary: + + * If ``verify`` is ``True``, ``"ssl_context"`` will be set and will be the + default Requests SSL Context + * If ``verify`` is ``False``, ``"ssl_context"`` will not be set but + ``"cert_reqs"`` will be set + * If ``verify`` is a string, (i.e., it is a user-specified trust bundle) + ``"ca_certs"`` will be set if the string is not a directory recognized + by :py:func:`os.path.isdir`, otherwise ``"ca_certs_dir"`` will be + set. + * If ``"cert"`` is specified, ``"cert_file"`` will always be set. If + ``"cert"`` is a tuple with a second item, ``"key_file"`` will also + be present + + To override these settings, one may subclass this class, call this + method and use the above logic to change parameters as desired. For + example, if one wishes to use a custom :py:class:`ssl.SSLContext` one + must both set ``"ssl_context"`` and based on what else they require, + alter the other keys to ensure the desired behaviour. + + :param request: + The PreparedReqest being sent over the connection. + :type request: + :class:`~requests.models.PreparedRequest` + :param verify: + Either a boolean, in which case it controls whether + we verify the server's TLS certificate, or a string, in which case it + must be a path to a CA bundle to use. + :param cert: + (optional) Any user-provided SSL certificate for client + authentication (a.k.a., mTLS). This may be a string (i.e., just + the path to a file which holds both certificate and key) or a + tuple of length 2 with the certificate file path and key file + path. + :returns: + A tuple of two dictionaries. The first is the "host parameters" + portion of the Pool Key including scheme, hostname, and port. The + second is a dictionary of SSLContext related parameters. + """ + return _urllib3_request_context(request, verify, cert, self.poolmanager) + + def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None): + """Returns a urllib3 connection for the given request and TLS settings. + This should not be called from user code, and is only exposed for use + when subclassing the :class:`HTTPAdapter `. + + :param request: + The :class:`PreparedRequest ` object to be sent + over the connection. + :param verify: + Either a boolean, in which case it controls whether we verify the + server's TLS certificate, or a string, in which case it must be a + path to a CA bundle to use. + :param proxies: + (optional) The proxies dictionary to apply to the request. + :param cert: + (optional) Any user-provided SSL certificate to be used for client + authentication (a.k.a., mTLS). + :rtype: + urllib3.ConnectionPool + """ + proxy = select_proxy(request.url, proxies) + try: + host_params, pool_kwargs = self.build_connection_pool_key_attributes( + request, + verify, + cert, + ) + except ValueError as e: + raise InvalidURL(e, request=request) + if proxy: + proxy = prepend_scheme_if_needed(proxy, "http") + proxy_url = parse_url(proxy) + if not proxy_url.host: + raise InvalidProxyURL( + "Please check proxy URL. It is malformed " + "and could be missing the host." + ) + proxy_manager = self.proxy_manager_for(proxy) + conn = proxy_manager.connection_from_host( + **host_params, pool_kwargs=pool_kwargs + ) + else: + # Only scheme should be lower case + conn = self.poolmanager.connection_from_host( + **host_params, pool_kwargs=pool_kwargs + ) + + return conn + + def get_connection(self, url, proxies=None): + """DEPRECATED: Users should move to `get_connection_with_tls_context` + for all subclasses of HTTPAdapter using Requests>=2.32.2. + + Returns a urllib3 connection for the given URL. This should not be + called from user code, and is only exposed for use when subclassing the + :class:`HTTPAdapter `. + + :param url: The URL to connect to. + :param proxies: (optional) A Requests-style dictionary of proxies used on this request. + :rtype: urllib3.ConnectionPool + """ + warnings.warn( + ( + "`get_connection` has been deprecated in favor of " + "`get_connection_with_tls_context`. Custom HTTPAdapter subclasses " + "will need to migrate for Requests>=2.32.2. Please see " + "https://github.com/psf/requests/pull/6710 for more details." + ), + DeprecationWarning, + ) + proxy = select_proxy(url, proxies) + + if proxy: + proxy = prepend_scheme_if_needed(proxy, "http") + proxy_url = parse_url(proxy) + if not proxy_url.host: + raise InvalidProxyURL( + "Please check proxy URL. It is malformed " + "and could be missing the host." + ) + proxy_manager = self.proxy_manager_for(proxy) + conn = proxy_manager.connection_from_url(url) + else: + # Only scheme should be lower case + parsed = urlparse(url) + url = parsed.geturl() + conn = self.poolmanager.connection_from_url(url) + + return conn + + def close(self): + """Disposes of any internal state. + + Currently, this closes the PoolManager and any active ProxyManager, + which closes any pooled connections. + """ + self.poolmanager.clear() + for proxy in self.proxy_manager.values(): + proxy.clear() + + def request_url(self, request, proxies): + """Obtain the url to use when making the final request. + + If the message is being sent through a HTTP proxy, the full URL has to + be used. Otherwise, we should only use the path portion of the URL. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param request: The :class:`PreparedRequest ` being sent. + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs. + :rtype: str + """ + proxy = select_proxy(request.url, proxies) + scheme = urlparse(request.url).scheme + + is_proxied_http_request = proxy and scheme != "https" + using_socks_proxy = False + if proxy: + proxy_scheme = urlparse(proxy).scheme.lower() + using_socks_proxy = proxy_scheme.startswith("socks") + + url = request.path_url + if url.startswith("//"): # Don't confuse urllib3 + url = f"/{url.lstrip('/')}" + + if is_proxied_http_request and not using_socks_proxy: + url = urldefragauth(request.url) + + return url + + def add_headers(self, request, **kwargs): + """Add any headers needed by the connection. As of v2.0 this does + nothing by default, but is left for overriding by users that subclass + the :class:`HTTPAdapter `. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param request: The :class:`PreparedRequest ` to add headers to. + :param kwargs: The keyword arguments from the call to send(). + """ + pass + + def proxy_headers(self, proxy): + """Returns a dictionary of the headers to add to any request sent + through a proxy. This works with urllib3 magic to ensure that they are + correctly sent to the proxy, rather than in a tunnelled request if + CONNECT is being used. + + This should not be called from user code, and is only exposed for use + when subclassing the + :class:`HTTPAdapter `. + + :param proxy: The url of the proxy being used for this request. + :rtype: dict + """ + headers = {} + username, password = get_auth_from_url(proxy) + + if username: + headers["Proxy-Authorization"] = _basic_auth_str(username, password) + + return headers + + def send( + self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None + ): + """Sends PreparedRequest object. Returns Response object. + + :param request: The :class:`PreparedRequest ` being sent. + :param stream: (optional) Whether to stream the request content. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple or urllib3 Timeout object + :param verify: (optional) Either a boolean, in which case it controls whether + we verify the server's TLS certificate, or a string, in which case it + must be a path to a CA bundle to use + :param cert: (optional) Any user-provided SSL certificate to be trusted. + :param proxies: (optional) The proxies dictionary to apply to the request. + :rtype: requests.Response + """ + + try: + conn = self.get_connection_with_tls_context( + request, verify, proxies=proxies, cert=cert + ) + except LocationValueError as e: + raise InvalidURL(e, request=request) + + self.cert_verify(conn, request.url, verify, cert) + url = self.request_url(request, proxies) + self.add_headers( + request, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + ) + + chunked = not (request.body is None or "Content-Length" in request.headers) + + if isinstance(timeout, tuple): + try: + connect, read = timeout + timeout = TimeoutSauce(connect=connect, read=read) + except ValueError: + raise ValueError( + f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, " + f"or a single float to set both timeouts to the same value." + ) + elif isinstance(timeout, TimeoutSauce): + pass + else: + timeout = TimeoutSauce(connect=timeout, read=timeout) + + try: + resp = conn.urlopen( + method=request.method, + url=url, + body=request.body, + headers=request.headers, + redirect=False, + assert_same_host=False, + preload_content=False, + decode_content=False, + retries=self.max_retries, + timeout=timeout, + chunked=chunked, + ) + + except (ProtocolError, OSError) as err: + raise ConnectionError(err, request=request) + + except MaxRetryError as e: + if isinstance(e.reason, ConnectTimeoutError): + # TODO: Remove this in 3.0.0: see #2811 + if not isinstance(e.reason, NewConnectionError): + raise ConnectTimeout(e, request=request) + + if isinstance(e.reason, ResponseError): + raise RetryError(e, request=request) + + if isinstance(e.reason, _ProxyError): + raise ProxyError(e, request=request) + + if isinstance(e.reason, _SSLError): + # This branch is for urllib3 v1.22 and later. + raise SSLError(e, request=request) + + raise ConnectionError(e, request=request) + + except ClosedPoolError as e: + raise ConnectionError(e, request=request) + + except _ProxyError as e: + raise ProxyError(e) + + except (_SSLError, _HTTPError) as e: + if isinstance(e, _SSLError): + # This branch is for urllib3 versions earlier than v1.22 + raise SSLError(e, request=request) + elif isinstance(e, ReadTimeoutError): + raise ReadTimeout(e, request=request) + elif isinstance(e, _InvalidHeader): + raise InvalidHeader(e, request=request) + else: + raise + + return self.build_response(request, resp) diff --git a/vllm/lib/python3.10/site-packages/requests/api.py b/vllm/lib/python3.10/site-packages/requests/api.py new file mode 100644 index 0000000000000000000000000000000000000000..5960744552e7f8eea815429e7bdad38b0cc2741d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/requests/api.py @@ -0,0 +1,157 @@ +""" +requests.api +~~~~~~~~~~~~ + +This module implements the Requests API. + +:copyright: (c) 2012 by Kenneth Reitz. +:license: Apache2, see LICENSE for more details. +""" + +from . import sessions + + +def request(method, url, **kwargs): + """Constructs and sends a :class:`Request `. + + :param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`. + :param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload. + ``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')`` + or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content_type'`` is a string + defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers + to add for the file. + :param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How many seconds to wait for the server to send data + before giving up, as a float, or a :ref:`(connect timeout, read + timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol to the URL of the proxy. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. + :param stream: (optional) if ``False``, the response content will be immediately downloaded. + :param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair. + :return: :class:`Response ` object + :rtype: requests.Response + + Usage:: + + >>> import requests + >>> req = requests.request('GET', 'https://httpbin.org/get') + >>> req + + """ + + # By using the 'with' statement we are sure the session is closed, thus we + # avoid leaving sockets open which can trigger a ResourceWarning in some + # cases, and look like a memory leak in others. + with sessions.Session() as session: + return session.request(method=method, url=url, **kwargs) + + +def get(url, params=None, **kwargs): + r"""Sends a GET request. + + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary, list of tuples or bytes to send + in the query string for the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("get", url, params=params, **kwargs) + + +def options(url, **kwargs): + r"""Sends an OPTIONS request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("options", url, **kwargs) + + +def head(url, **kwargs): + r"""Sends a HEAD request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. If + `allow_redirects` is not provided, it will be set to `False` (as + opposed to the default :meth:`request` behavior). + :return: :class:`Response ` object + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", False) + return request("head", url, **kwargs) + + +def post(url, data=None, json=None, **kwargs): + r"""Sends a POST request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("post", url, data=data, json=json, **kwargs) + + +def put(url, data=None, **kwargs): + r"""Sends a PUT request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("put", url, data=data, **kwargs) + + +def patch(url, data=None, **kwargs): + r"""Sends a PATCH request. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("patch", url, data=data, **kwargs) + + +def delete(url, **kwargs): + r"""Sends a DELETE request. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :return: :class:`Response ` object + :rtype: requests.Response + """ + + return request("delete", url, **kwargs) diff --git a/vllm/lib/python3.10/site-packages/requests/certs.py b/vllm/lib/python3.10/site-packages/requests/certs.py new file mode 100644 index 0000000000000000000000000000000000000000..be422c3e91e43bacf60ff3302688df0b28742333 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/requests/certs.py @@ -0,0 +1,17 @@ +#!/usr/bin/env python + +""" +requests.certs +~~~~~~~~~~~~~~ + +This module returns the preferred default CA certificate bundle. There is +only one — the one from the certifi package. + +If you are packaging Requests, e.g., for a Linux distribution or a managed +environment, you can change the definition of where() to return a separately +packaged CA bundle. +""" +from certifi import where + +if __name__ == "__main__": + print(where()) diff --git a/vllm/lib/python3.10/site-packages/requests/compat.py b/vllm/lib/python3.10/site-packages/requests/compat.py new file mode 100644 index 0000000000000000000000000000000000000000..095de1b6cae2f460174af54efa975411645f40c6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/requests/compat.py @@ -0,0 +1,94 @@ +""" +requests.compat +~~~~~~~~~~~~~~~ + +This module previously handled import compatibility issues +between Python 2 and Python 3. It remains for backwards +compatibility until the next major version. +""" + +import importlib +import sys + +# ------------------- +# Character Detection +# ------------------- + + +def _resolve_char_detection(): + """Find supported character detection libraries.""" + chardet = None + for lib in ("chardet", "charset_normalizer"): + if chardet is None: + try: + chardet = importlib.import_module(lib) + except ImportError: + pass + return chardet + + +chardet = _resolve_char_detection() + +# ------- +# Pythons +# ------- + +# Syntax sugar. +_ver = sys.version_info + +#: Python 2.x? +is_py2 = _ver[0] == 2 + +#: Python 3.x? +is_py3 = _ver[0] == 3 + +# json/simplejson module import resolution +has_simplejson = False +try: + import simplejson as json + + has_simplejson = True +except ImportError: + import json + +if has_simplejson: + from simplejson import JSONDecodeError +else: + from json import JSONDecodeError + +# Keep OrderedDict for backwards compatibility. +from collections import OrderedDict +from collections.abc import Callable, Mapping, MutableMapping +from http import cookiejar as cookielib +from http.cookies import Morsel +from io import StringIO + +# -------------- +# Legacy Imports +# -------------- +from urllib.parse import ( + quote, + quote_plus, + unquote, + unquote_plus, + urldefrag, + urlencode, + urljoin, + urlparse, + urlsplit, + urlunparse, +) +from urllib.request import ( + getproxies, + getproxies_environment, + parse_http_list, + proxy_bypass, + proxy_bypass_environment, +) + +builtin_str = str +str = str +bytes = bytes +basestring = (str, bytes) +numeric_types = (int, float) +integer_types = (int,) diff --git a/vllm/lib/python3.10/site-packages/requests/exceptions.py b/vllm/lib/python3.10/site-packages/requests/exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..83986b489849131efeb7f286b328961205256fd8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/requests/exceptions.py @@ -0,0 +1,151 @@ +""" +requests.exceptions +~~~~~~~~~~~~~~~~~~~ + +This module contains the set of Requests' exceptions. +""" +from urllib3.exceptions import HTTPError as BaseHTTPError + +from .compat import JSONDecodeError as CompatJSONDecodeError + + +class RequestException(IOError): + """There was an ambiguous exception that occurred while handling your + request. + """ + + def __init__(self, *args, **kwargs): + """Initialize RequestException with `request` and `response` objects.""" + response = kwargs.pop("response", None) + self.response = response + self.request = kwargs.pop("request", None) + if response is not None and not self.request and hasattr(response, "request"): + self.request = self.response.request + super().__init__(*args, **kwargs) + + +class InvalidJSONError(RequestException): + """A JSON error occurred.""" + + +class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError): + """Couldn't decode the text into json""" + + def __init__(self, *args, **kwargs): + """ + Construct the JSONDecodeError instance first with all + args. Then use it's args to construct the IOError so that + the json specific args aren't used as IOError specific args + and the error message from JSONDecodeError is preserved. + """ + CompatJSONDecodeError.__init__(self, *args) + InvalidJSONError.__init__(self, *self.args, **kwargs) + + def __reduce__(self): + """ + The __reduce__ method called when pickling the object must + be the one from the JSONDecodeError (be it json/simplejson) + as it expects all the arguments for instantiation, not just + one like the IOError, and the MRO would by default call the + __reduce__ method from the IOError due to the inheritance order. + """ + return CompatJSONDecodeError.__reduce__(self) + + +class HTTPError(RequestException): + """An HTTP error occurred.""" + + +class ConnectionError(RequestException): + """A Connection error occurred.""" + + +class ProxyError(ConnectionError): + """A proxy error occurred.""" + + +class SSLError(ConnectionError): + """An SSL error occurred.""" + + +class Timeout(RequestException): + """The request timed out. + + Catching this error will catch both + :exc:`~requests.exceptions.ConnectTimeout` and + :exc:`~requests.exceptions.ReadTimeout` errors. + """ + + +class ConnectTimeout(ConnectionError, Timeout): + """The request timed out while trying to connect to the remote server. + + Requests that produced this error are safe to retry. + """ + + +class ReadTimeout(Timeout): + """The server did not send any data in the allotted amount of time.""" + + +class URLRequired(RequestException): + """A valid URL is required to make a request.""" + + +class TooManyRedirects(RequestException): + """Too many redirects.""" + + +class MissingSchema(RequestException, ValueError): + """The URL scheme (e.g. http or https) is missing.""" + + +class InvalidSchema(RequestException, ValueError): + """The URL scheme provided is either invalid or unsupported.""" + + +class InvalidURL(RequestException, ValueError): + """The URL provided was somehow invalid.""" + + +class InvalidHeader(RequestException, ValueError): + """The header value provided was somehow invalid.""" + + +class InvalidProxyURL(InvalidURL): + """The proxy URL provided is invalid.""" + + +class ChunkedEncodingError(RequestException): + """The server declared chunked encoding but sent an invalid chunk.""" + + +class ContentDecodingError(RequestException, BaseHTTPError): + """Failed to decode response content.""" + + +class StreamConsumedError(RequestException, TypeError): + """The content for this response was already consumed.""" + + +class RetryError(RequestException): + """Custom retries logic failed""" + + +class UnrewindableBodyError(RequestException): + """Requests encountered an error when trying to rewind a body.""" + + +# Warnings + + +class RequestsWarning(Warning): + """Base warning for Requests.""" + + +class FileModeWarning(RequestsWarning, DeprecationWarning): + """A file was opened in text mode, but Requests determined its binary length.""" + + +class RequestsDependencyWarning(RequestsWarning): + """An imported dependency doesn't match the expected version range.""" diff --git a/vllm/lib/python3.10/site-packages/requests/hooks.py b/vllm/lib/python3.10/site-packages/requests/hooks.py new file mode 100644 index 0000000000000000000000000000000000000000..d181ba2ec2e55d274897315887b78fbdca757da8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/requests/hooks.py @@ -0,0 +1,33 @@ +""" +requests.hooks +~~~~~~~~~~~~~~ + +This module provides the capabilities for the Requests hooks system. + +Available hooks: + +``response``: + The response generated from a Request. +""" +HOOKS = ["response"] + + +def default_hooks(): + return {event: [] for event in HOOKS} + + +# TODO: response is the only one + + +def dispatch_hook(key, hooks, hook_data, **kwargs): + """Dispatches a hook dictionary on a given piece of data.""" + hooks = hooks or {} + hooks = hooks.get(key) + if hooks: + if hasattr(hooks, "__call__"): + hooks = [hooks] + for hook in hooks: + _hook_data = hook(hook_data, **kwargs) + if _hook_data is not None: + hook_data = _hook_data + return hook_data diff --git a/vllm/lib/python3.10/site-packages/requests/sessions.py b/vllm/lib/python3.10/site-packages/requests/sessions.py new file mode 100644 index 0000000000000000000000000000000000000000..b387bc36df7bc064b502adcb3c1a4527dd401fda --- /dev/null +++ b/vllm/lib/python3.10/site-packages/requests/sessions.py @@ -0,0 +1,831 @@ +""" +requests.sessions +~~~~~~~~~~~~~~~~~ + +This module provides a Session object to manage and persist settings across +requests (cookies, auth, proxies). +""" +import os +import sys +import time +from collections import OrderedDict +from datetime import timedelta + +from ._internal_utils import to_native_string +from .adapters import HTTPAdapter +from .auth import _basic_auth_str +from .compat import Mapping, cookielib, urljoin, urlparse +from .cookies import ( + RequestsCookieJar, + cookiejar_from_dict, + extract_cookies_to_jar, + merge_cookies, +) +from .exceptions import ( + ChunkedEncodingError, + ContentDecodingError, + InvalidSchema, + TooManyRedirects, +) +from .hooks import default_hooks, dispatch_hook + +# formerly defined here, reexposed here for backward compatibility +from .models import ( # noqa: F401 + DEFAULT_REDIRECT_LIMIT, + REDIRECT_STATI, + PreparedRequest, + Request, +) +from .status_codes import codes +from .structures import CaseInsensitiveDict +from .utils import ( # noqa: F401 + DEFAULT_PORTS, + default_headers, + get_auth_from_url, + get_environ_proxies, + get_netrc_auth, + requote_uri, + resolve_proxies, + rewind_body, + should_bypass_proxies, + to_key_val_list, +) + +# Preferred clock, based on which one is more accurate on a given system. +if sys.platform == "win32": + preferred_clock = time.perf_counter +else: + preferred_clock = time.time + + +def merge_setting(request_setting, session_setting, dict_class=OrderedDict): + """Determines appropriate setting for a given request, taking into account + the explicit setting on that request, and the setting in the session. If a + setting is a dictionary, they will be merged together using `dict_class` + """ + + if session_setting is None: + return request_setting + + if request_setting is None: + return session_setting + + # Bypass if not a dictionary (e.g. verify) + if not ( + isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping) + ): + return request_setting + + merged_setting = dict_class(to_key_val_list(session_setting)) + merged_setting.update(to_key_val_list(request_setting)) + + # Remove keys that are set to None. Extract keys first to avoid altering + # the dictionary during iteration. + none_keys = [k for (k, v) in merged_setting.items() if v is None] + for key in none_keys: + del merged_setting[key] + + return merged_setting + + +def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict): + """Properly merges both requests and session hooks. + + This is necessary because when request_hooks == {'response': []}, the + merge breaks Session hooks entirely. + """ + if session_hooks is None or session_hooks.get("response") == []: + return request_hooks + + if request_hooks is None or request_hooks.get("response") == []: + return session_hooks + + return merge_setting(request_hooks, session_hooks, dict_class) + + +class SessionRedirectMixin: + def get_redirect_target(self, resp): + """Receives a Response. Returns a redirect URI or ``None``""" + # Due to the nature of how requests processes redirects this method will + # be called at least once upon the original response and at least twice + # on each subsequent redirect response (if any). + # If a custom mixin is used to handle this logic, it may be advantageous + # to cache the redirect location onto the response object as a private + # attribute. + if resp.is_redirect: + location = resp.headers["location"] + # Currently the underlying http module on py3 decode headers + # in latin1, but empirical evidence suggests that latin1 is very + # rarely used with non-ASCII characters in HTTP headers. + # It is more likely to get UTF8 header rather than latin1. + # This causes incorrect handling of UTF8 encoded location headers. + # To solve this, we re-encode the location in latin1. + location = location.encode("latin1") + return to_native_string(location, "utf8") + return None + + def should_strip_auth(self, old_url, new_url): + """Decide whether Authorization header should be removed when redirecting""" + old_parsed = urlparse(old_url) + new_parsed = urlparse(new_url) + if old_parsed.hostname != new_parsed.hostname: + return True + # Special case: allow http -> https redirect when using the standard + # ports. This isn't specified by RFC 7235, but is kept to avoid + # breaking backwards compatibility with older versions of requests + # that allowed any redirects on the same host. + if ( + old_parsed.scheme == "http" + and old_parsed.port in (80, None) + and new_parsed.scheme == "https" + and new_parsed.port in (443, None) + ): + return False + + # Handle default port usage corresponding to scheme. + changed_port = old_parsed.port != new_parsed.port + changed_scheme = old_parsed.scheme != new_parsed.scheme + default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None) + if ( + not changed_scheme + and old_parsed.port in default_port + and new_parsed.port in default_port + ): + return False + + # Standard case: root URI must match + return changed_port or changed_scheme + + def resolve_redirects( + self, + resp, + req, + stream=False, + timeout=None, + verify=True, + cert=None, + proxies=None, + yield_requests=False, + **adapter_kwargs, + ): + """Receives a Response. Returns a generator of Responses or Requests.""" + + hist = [] # keep track of history + + url = self.get_redirect_target(resp) + previous_fragment = urlparse(req.url).fragment + while url: + prepared_request = req.copy() + + # Update history and keep track of redirects. + # resp.history must ignore the original request in this loop + hist.append(resp) + resp.history = hist[1:] + + try: + resp.content # Consume socket so it can be released + except (ChunkedEncodingError, ContentDecodingError, RuntimeError): + resp.raw.read(decode_content=False) + + if len(resp.history) >= self.max_redirects: + raise TooManyRedirects( + f"Exceeded {self.max_redirects} redirects.", response=resp + ) + + # Release the connection back into the pool. + resp.close() + + # Handle redirection without scheme (see: RFC 1808 Section 4) + if url.startswith("//"): + parsed_rurl = urlparse(resp.url) + url = ":".join([to_native_string(parsed_rurl.scheme), url]) + + # Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2) + parsed = urlparse(url) + if parsed.fragment == "" and previous_fragment: + parsed = parsed._replace(fragment=previous_fragment) + elif parsed.fragment: + previous_fragment = parsed.fragment + url = parsed.geturl() + + # Facilitate relative 'location' headers, as allowed by RFC 7231. + # (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource') + # Compliant with RFC3986, we percent encode the url. + if not parsed.netloc: + url = urljoin(resp.url, requote_uri(url)) + else: + url = requote_uri(url) + + prepared_request.url = to_native_string(url) + + self.rebuild_method(prepared_request, resp) + + # https://github.com/psf/requests/issues/1084 + if resp.status_code not in ( + codes.temporary_redirect, + codes.permanent_redirect, + ): + # https://github.com/psf/requests/issues/3490 + purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding") + for header in purged_headers: + prepared_request.headers.pop(header, None) + prepared_request.body = None + + headers = prepared_request.headers + headers.pop("Cookie", None) + + # Extract any cookies sent on the response to the cookiejar + # in the new request. Because we've mutated our copied prepared + # request, use the old one that we haven't yet touched. + extract_cookies_to_jar(prepared_request._cookies, req, resp.raw) + merge_cookies(prepared_request._cookies, self.cookies) + prepared_request.prepare_cookies(prepared_request._cookies) + + # Rebuild auth and proxy information. + proxies = self.rebuild_proxies(prepared_request, proxies) + self.rebuild_auth(prepared_request, resp) + + # A failed tell() sets `_body_position` to `object()`. This non-None + # value ensures `rewindable` will be True, allowing us to raise an + # UnrewindableBodyError, instead of hanging the connection. + rewindable = prepared_request._body_position is not None and ( + "Content-Length" in headers or "Transfer-Encoding" in headers + ) + + # Attempt to rewind consumed file-like object. + if rewindable: + rewind_body(prepared_request) + + # Override the original request. + req = prepared_request + + if yield_requests: + yield req + else: + resp = self.send( + req, + stream=stream, + timeout=timeout, + verify=verify, + cert=cert, + proxies=proxies, + allow_redirects=False, + **adapter_kwargs, + ) + + extract_cookies_to_jar(self.cookies, prepared_request, resp.raw) + + # extract redirect url, if any, for the next loop + url = self.get_redirect_target(resp) + yield resp + + def rebuild_auth(self, prepared_request, response): + """When being redirected we may want to strip authentication from the + request to avoid leaking credentials. This method intelligently removes + and reapplies authentication where possible to avoid credential loss. + """ + headers = prepared_request.headers + url = prepared_request.url + + if "Authorization" in headers and self.should_strip_auth( + response.request.url, url + ): + # If we get redirected to a new host, we should strip out any + # authentication headers. + del headers["Authorization"] + + # .netrc might have more auth for us on our new host. + new_auth = get_netrc_auth(url) if self.trust_env else None + if new_auth is not None: + prepared_request.prepare_auth(new_auth) + + def rebuild_proxies(self, prepared_request, proxies): + """This method re-evaluates the proxy configuration by considering the + environment variables. If we are redirected to a URL covered by + NO_PROXY, we strip the proxy configuration. Otherwise, we set missing + proxy keys for this URL (in case they were stripped by a previous + redirect). + + This method also replaces the Proxy-Authorization header where + necessary. + + :rtype: dict + """ + headers = prepared_request.headers + scheme = urlparse(prepared_request.url).scheme + new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env) + + if "Proxy-Authorization" in headers: + del headers["Proxy-Authorization"] + + try: + username, password = get_auth_from_url(new_proxies[scheme]) + except KeyError: + username, password = None, None + + # urllib3 handles proxy authorization for us in the standard adapter. + # Avoid appending this to TLS tunneled requests where it may be leaked. + if not scheme.startswith("https") and username and password: + headers["Proxy-Authorization"] = _basic_auth_str(username, password) + + return new_proxies + + def rebuild_method(self, prepared_request, response): + """When being redirected we may want to change the method of the request + based on certain specs or browser behavior. + """ + method = prepared_request.method + + # https://tools.ietf.org/html/rfc7231#section-6.4.4 + if response.status_code == codes.see_other and method != "HEAD": + method = "GET" + + # Do what the browsers do, despite standards... + # First, turn 302s into GETs. + if response.status_code == codes.found and method != "HEAD": + method = "GET" + + # Second, if a POST is responded to with a 301, turn it into a GET. + # This bizarre behaviour is explained in Issue 1704. + if response.status_code == codes.moved and method == "POST": + method = "GET" + + prepared_request.method = method + + +class Session(SessionRedirectMixin): + """A Requests session. + + Provides cookie persistence, connection-pooling, and configuration. + + Basic Usage:: + + >>> import requests + >>> s = requests.Session() + >>> s.get('https://httpbin.org/get') + + + Or as a context manager:: + + >>> with requests.Session() as s: + ... s.get('https://httpbin.org/get') + + """ + + __attrs__ = [ + "headers", + "cookies", + "auth", + "proxies", + "hooks", + "params", + "verify", + "cert", + "adapters", + "stream", + "trust_env", + "max_redirects", + ] + + def __init__(self): + #: A case-insensitive dictionary of headers to be sent on each + #: :class:`Request ` sent from this + #: :class:`Session `. + self.headers = default_headers() + + #: Default Authentication tuple or object to attach to + #: :class:`Request `. + self.auth = None + + #: Dictionary mapping protocol or protocol and host to the URL of the proxy + #: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to + #: be used on each :class:`Request `. + self.proxies = {} + + #: Event-handling hooks. + self.hooks = default_hooks() + + #: Dictionary of querystring data to attach to each + #: :class:`Request `. The dictionary values may be lists for + #: representing multivalued query parameters. + self.params = {} + + #: Stream response content default. + self.stream = False + + #: SSL Verification default. + #: Defaults to `True`, requiring requests to verify the TLS certificate at the + #: remote end. + #: If verify is set to `False`, requests will accept any TLS certificate + #: presented by the server, and will ignore hostname mismatches and/or + #: expired certificates, which will make your application vulnerable to + #: man-in-the-middle (MitM) attacks. + #: Only set this to `False` for testing. + self.verify = True + + #: SSL client certificate default, if String, path to ssl client + #: cert file (.pem). If Tuple, ('cert', 'key') pair. + self.cert = None + + #: Maximum number of redirects allowed. If the request exceeds this + #: limit, a :class:`TooManyRedirects` exception is raised. + #: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is + #: 30. + self.max_redirects = DEFAULT_REDIRECT_LIMIT + + #: Trust environment settings for proxy configuration, default + #: authentication and similar. + self.trust_env = True + + #: A CookieJar containing all currently outstanding cookies set on this + #: session. By default it is a + #: :class:`RequestsCookieJar `, but + #: may be any other ``cookielib.CookieJar`` compatible object. + self.cookies = cookiejar_from_dict({}) + + # Default connection adapters. + self.adapters = OrderedDict() + self.mount("https://", HTTPAdapter()) + self.mount("http://", HTTPAdapter()) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.close() + + def prepare_request(self, request): + """Constructs a :class:`PreparedRequest ` for + transmission and returns it. The :class:`PreparedRequest` has settings + merged from the :class:`Request ` instance and those of the + :class:`Session`. + + :param request: :class:`Request` instance to prepare with this + session's settings. + :rtype: requests.PreparedRequest + """ + cookies = request.cookies or {} + + # Bootstrap CookieJar. + if not isinstance(cookies, cookielib.CookieJar): + cookies = cookiejar_from_dict(cookies) + + # Merge with session cookies + merged_cookies = merge_cookies( + merge_cookies(RequestsCookieJar(), self.cookies), cookies + ) + + # Set environment's basic authentication if not explicitly set. + auth = request.auth + if self.trust_env and not auth and not self.auth: + auth = get_netrc_auth(request.url) + + p = PreparedRequest() + p.prepare( + method=request.method.upper(), + url=request.url, + files=request.files, + data=request.data, + json=request.json, + headers=merge_setting( + request.headers, self.headers, dict_class=CaseInsensitiveDict + ), + params=merge_setting(request.params, self.params), + auth=merge_setting(auth, self.auth), + cookies=merged_cookies, + hooks=merge_hooks(request.hooks, self.hooks), + ) + return p + + def request( + self, + method, + url, + params=None, + data=None, + headers=None, + cookies=None, + files=None, + auth=None, + timeout=None, + allow_redirects=True, + proxies=None, + hooks=None, + stream=None, + verify=None, + cert=None, + json=None, + ): + """Constructs a :class:`Request `, prepares it and sends it. + Returns :class:`Response ` object. + + :param method: method for the new :class:`Request` object. + :param url: URL for the new :class:`Request` object. + :param params: (optional) Dictionary or bytes to be sent in the query + string for the :class:`Request`. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the + :class:`Request`. + :param headers: (optional) Dictionary of HTTP Headers to send with the + :class:`Request`. + :param cookies: (optional) Dict or CookieJar object to send with the + :class:`Request`. + :param files: (optional) Dictionary of ``'filename': file-like-objects`` + for multipart encoding upload. + :param auth: (optional) Auth tuple or callable to enable + Basic/Digest/Custom HTTP Auth. + :param timeout: (optional) How long to wait for the server to send + data before giving up, as a float, or a :ref:`(connect timeout, + read timeout) ` tuple. + :type timeout: float or tuple + :param allow_redirects: (optional) Set to True by default. + :type allow_redirects: bool + :param proxies: (optional) Dictionary mapping protocol or protocol and + hostname to the URL of the proxy. + :param hooks: (optional) Dictionary mapping hook name to one event or + list of events, event must be callable. + :param stream: (optional) whether to immediately download the response + content. Defaults to ``False``. + :param verify: (optional) Either a boolean, in which case it controls whether we verify + the server's TLS certificate, or a string, in which case it must be a path + to a CA bundle to use. Defaults to ``True``. When set to + ``False``, requests will accept any TLS certificate presented by + the server, and will ignore hostname mismatches and/or expired + certificates, which will make your application vulnerable to + man-in-the-middle (MitM) attacks. Setting verify to ``False`` + may be useful during local development or testing. + :param cert: (optional) if String, path to ssl client cert file (.pem). + If Tuple, ('cert', 'key') pair. + :rtype: requests.Response + """ + # Create the Request. + req = Request( + method=method.upper(), + url=url, + headers=headers, + files=files, + data=data or {}, + json=json, + params=params or {}, + auth=auth, + cookies=cookies, + hooks=hooks, + ) + prep = self.prepare_request(req) + + proxies = proxies or {} + + settings = self.merge_environment_settings( + prep.url, proxies, stream, verify, cert + ) + + # Send the request. + send_kwargs = { + "timeout": timeout, + "allow_redirects": allow_redirects, + } + send_kwargs.update(settings) + resp = self.send(prep, **send_kwargs) + + return resp + + def get(self, url, **kwargs): + r"""Sends a GET request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", True) + return self.request("GET", url, **kwargs) + + def options(self, url, **kwargs): + r"""Sends a OPTIONS request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", True) + return self.request("OPTIONS", url, **kwargs) + + def head(self, url, **kwargs): + r"""Sends a HEAD request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + kwargs.setdefault("allow_redirects", False) + return self.request("HEAD", url, **kwargs) + + def post(self, url, data=None, json=None, **kwargs): + r"""Sends a POST request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param json: (optional) json to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("POST", url, data=data, json=json, **kwargs) + + def put(self, url, data=None, **kwargs): + r"""Sends a PUT request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("PUT", url, data=data, **kwargs) + + def patch(self, url, data=None, **kwargs): + r"""Sends a PATCH request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param data: (optional) Dictionary, list of tuples, bytes, or file-like + object to send in the body of the :class:`Request`. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("PATCH", url, data=data, **kwargs) + + def delete(self, url, **kwargs): + r"""Sends a DELETE request. Returns :class:`Response` object. + + :param url: URL for the new :class:`Request` object. + :param \*\*kwargs: Optional arguments that ``request`` takes. + :rtype: requests.Response + """ + + return self.request("DELETE", url, **kwargs) + + def send(self, request, **kwargs): + """Send a given PreparedRequest. + + :rtype: requests.Response + """ + # Set defaults that the hooks can utilize to ensure they always have + # the correct parameters to reproduce the previous request. + kwargs.setdefault("stream", self.stream) + kwargs.setdefault("verify", self.verify) + kwargs.setdefault("cert", self.cert) + if "proxies" not in kwargs: + kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env) + + # It's possible that users might accidentally send a Request object. + # Guard against that specific failure case. + if isinstance(request, Request): + raise ValueError("You can only send PreparedRequests.") + + # Set up variables needed for resolve_redirects and dispatching of hooks + allow_redirects = kwargs.pop("allow_redirects", True) + stream = kwargs.get("stream") + hooks = request.hooks + + # Get the appropriate adapter to use + adapter = self.get_adapter(url=request.url) + + # Start time (approximately) of the request + start = preferred_clock() + + # Send the request + r = adapter.send(request, **kwargs) + + # Total elapsed time of the request (approximately) + elapsed = preferred_clock() - start + r.elapsed = timedelta(seconds=elapsed) + + # Response manipulation hooks + r = dispatch_hook("response", hooks, r, **kwargs) + + # Persist cookies + if r.history: + # If the hooks create history then we want those cookies too + for resp in r.history: + extract_cookies_to_jar(self.cookies, resp.request, resp.raw) + + extract_cookies_to_jar(self.cookies, request, r.raw) + + # Resolve redirects if allowed. + if allow_redirects: + # Redirect resolving generator. + gen = self.resolve_redirects(r, request, **kwargs) + history = [resp for resp in gen] + else: + history = [] + + # Shuffle things around if there's history. + if history: + # Insert the first (original) request at the start + history.insert(0, r) + # Get the last request made + r = history.pop() + r.history = history + + # If redirects aren't being followed, store the response on the Request for Response.next(). + if not allow_redirects: + try: + r._next = next( + self.resolve_redirects(r, request, yield_requests=True, **kwargs) + ) + except StopIteration: + pass + + if not stream: + r.content + + return r + + def merge_environment_settings(self, url, proxies, stream, verify, cert): + """ + Check the environment and merge it with some settings. + + :rtype: dict + """ + # Gather clues from the surrounding environment. + if self.trust_env: + # Set environment's proxies. + no_proxy = proxies.get("no_proxy") if proxies is not None else None + env_proxies = get_environ_proxies(url, no_proxy=no_proxy) + for k, v in env_proxies.items(): + proxies.setdefault(k, v) + + # Look for requests environment configuration + # and be compatible with cURL. + if verify is True or verify is None: + verify = ( + os.environ.get("REQUESTS_CA_BUNDLE") + or os.environ.get("CURL_CA_BUNDLE") + or verify + ) + + # Merge all the kwargs. + proxies = merge_setting(proxies, self.proxies) + stream = merge_setting(stream, self.stream) + verify = merge_setting(verify, self.verify) + cert = merge_setting(cert, self.cert) + + return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert} + + def get_adapter(self, url): + """ + Returns the appropriate connection adapter for the given URL. + + :rtype: requests.adapters.BaseAdapter + """ + for prefix, adapter in self.adapters.items(): + if url.lower().startswith(prefix.lower()): + return adapter + + # Nothing matches :-/ + raise InvalidSchema(f"No connection adapters were found for {url!r}") + + def close(self): + """Closes all adapters and as such the session""" + for v in self.adapters.values(): + v.close() + + def mount(self, prefix, adapter): + """Registers a connection adapter to a prefix. + + Adapters are sorted in descending order by prefix length. + """ + self.adapters[prefix] = adapter + keys_to_move = [k for k in self.adapters if len(k) < len(prefix)] + + for key in keys_to_move: + self.adapters[key] = self.adapters.pop(key) + + def __getstate__(self): + state = {attr: getattr(self, attr, None) for attr in self.__attrs__} + return state + + def __setstate__(self, state): + for attr, value in state.items(): + setattr(self, attr, value) + + +def session(): + """ + Returns a :class:`Session` for context-management. + + .. deprecated:: 1.0.0 + + This method has been deprecated since version 1.0.0 and is only kept for + backwards compatibility. New code should use :class:`~requests.sessions.Session` + to create a session. This may be removed at a future date. + + :rtype: Session + """ + return Session() diff --git a/vllm/lib/python3.10/site-packages/requests/structures.py b/vllm/lib/python3.10/site-packages/requests/structures.py new file mode 100644 index 0000000000000000000000000000000000000000..188e13e4829591facb23ae0e2eda84b9807cb818 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/requests/structures.py @@ -0,0 +1,99 @@ +""" +requests.structures +~~~~~~~~~~~~~~~~~~~ + +Data structures that power Requests. +""" + +from collections import OrderedDict + +from .compat import Mapping, MutableMapping + + +class CaseInsensitiveDict(MutableMapping): + """A case-insensitive ``dict``-like object. + + Implements all methods and operations of + ``MutableMapping`` as well as dict's ``copy``. Also + provides ``lower_items``. + + All keys are expected to be strings. The structure remembers the + case of the last key to be set, and ``iter(instance)``, + ``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()`` + will contain case-sensitive keys. However, querying and contains + testing is case insensitive:: + + cid = CaseInsensitiveDict() + cid['Accept'] = 'application/json' + cid['aCCEPT'] == 'application/json' # True + list(cid) == ['Accept'] # True + + For example, ``headers['content-encoding']`` will return the + value of a ``'Content-Encoding'`` response header, regardless + of how the header name was originally stored. + + If the constructor, ``.update``, or equality comparison + operations are given keys that have equal ``.lower()``s, the + behavior is undefined. + """ + + def __init__(self, data=None, **kwargs): + self._store = OrderedDict() + if data is None: + data = {} + self.update(data, **kwargs) + + def __setitem__(self, key, value): + # Use the lowercased key for lookups, but store the actual + # key alongside the value. + self._store[key.lower()] = (key, value) + + def __getitem__(self, key): + return self._store[key.lower()][1] + + def __delitem__(self, key): + del self._store[key.lower()] + + def __iter__(self): + return (casedkey for casedkey, mappedvalue in self._store.values()) + + def __len__(self): + return len(self._store) + + def lower_items(self): + """Like iteritems(), but with all lowercase keys.""" + return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items()) + + def __eq__(self, other): + if isinstance(other, Mapping): + other = CaseInsensitiveDict(other) + else: + return NotImplemented + # Compare insensitively + return dict(self.lower_items()) == dict(other.lower_items()) + + # Copy is required + def copy(self): + return CaseInsensitiveDict(self._store.values()) + + def __repr__(self): + return str(dict(self.items())) + + +class LookupDict(dict): + """Dictionary lookup object.""" + + def __init__(self, name=None): + self.name = name + super().__init__() + + def __repr__(self): + return f"" + + def __getitem__(self, key): + # We allow fall-through here, so values default to None + + return self.__dict__.get(key, None) + + def get(self, key, default=None): + return self.__dict__.get(key, default) diff --git a/vllm/lib/python3.10/site-packages/requests/utils.py b/vllm/lib/python3.10/site-packages/requests/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ae6c42f6cb48d2beaa3b7352bc1d130db3e4e3be --- /dev/null +++ b/vllm/lib/python3.10/site-packages/requests/utils.py @@ -0,0 +1,1096 @@ +""" +requests.utils +~~~~~~~~~~~~~~ + +This module provides utility functions that are used within Requests +that are also useful for external consumption. +""" + +import codecs +import contextlib +import io +import os +import re +import socket +import struct +import sys +import tempfile +import warnings +import zipfile +from collections import OrderedDict + +from urllib3.util import make_headers, parse_url + +from . import certs +from .__version__ import __version__ + +# to_native_string is unused here, but imported here for backwards compatibility +from ._internal_utils import ( # noqa: F401 + _HEADER_VALIDATORS_BYTE, + _HEADER_VALIDATORS_STR, + HEADER_VALIDATORS, + to_native_string, +) +from .compat import ( + Mapping, + basestring, + bytes, + getproxies, + getproxies_environment, + integer_types, +) +from .compat import parse_http_list as _parse_list_header +from .compat import ( + proxy_bypass, + proxy_bypass_environment, + quote, + str, + unquote, + urlparse, + urlunparse, +) +from .cookies import cookiejar_from_dict +from .exceptions import ( + FileModeWarning, + InvalidHeader, + InvalidURL, + UnrewindableBodyError, +) +from .structures import CaseInsensitiveDict + +NETRC_FILES = (".netrc", "_netrc") + +DEFAULT_CA_BUNDLE_PATH = certs.where() + +DEFAULT_PORTS = {"http": 80, "https": 443} + +# Ensure that ', ' is used to preserve previous delimiter behavior. +DEFAULT_ACCEPT_ENCODING = ", ".join( + re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"]) +) + + +if sys.platform == "win32": + # provide a proxy_bypass version on Windows without DNS lookups + + def proxy_bypass_registry(host): + try: + import winreg + except ImportError: + return False + + try: + internetSettings = winreg.OpenKey( + winreg.HKEY_CURRENT_USER, + r"Software\Microsoft\Windows\CurrentVersion\Internet Settings", + ) + # ProxyEnable could be REG_SZ or REG_DWORD, normalizing it + proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0]) + # ProxyOverride is almost always a string + proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0] + except (OSError, ValueError): + return False + if not proxyEnable or not proxyOverride: + return False + + # make a check value list from the registry entry: replace the + # '' string by the localhost entry and the corresponding + # canonical entry. + proxyOverride = proxyOverride.split(";") + # filter out empty strings to avoid re.match return true in the following code. + proxyOverride = filter(None, proxyOverride) + # now check if we match one of the registry values. + for test in proxyOverride: + if test == "": + if "." not in host: + return True + test = test.replace(".", r"\.") # mask dots + test = test.replace("*", r".*") # change glob sequence + test = test.replace("?", r".") # change glob char + if re.match(test, host, re.I): + return True + return False + + def proxy_bypass(host): # noqa + """Return True, if the host should be bypassed. + + Checks proxy settings gathered from the environment, if specified, + or the registry. + """ + if getproxies_environment(): + return proxy_bypass_environment(host) + else: + return proxy_bypass_registry(host) + + +def dict_to_sequence(d): + """Returns an internal sequence dictionary update.""" + + if hasattr(d, "items"): + d = d.items() + + return d + + +def super_len(o): + total_length = None + current_position = 0 + + if isinstance(o, str): + o = o.encode("utf-8") + + if hasattr(o, "__len__"): + total_length = len(o) + + elif hasattr(o, "len"): + total_length = o.len + + elif hasattr(o, "fileno"): + try: + fileno = o.fileno() + except (io.UnsupportedOperation, AttributeError): + # AttributeError is a surprising exception, seeing as how we've just checked + # that `hasattr(o, 'fileno')`. It happens for objects obtained via + # `Tarfile.extractfile()`, per issue 5229. + pass + else: + total_length = os.fstat(fileno).st_size + + # Having used fstat to determine the file length, we need to + # confirm that this file was opened up in binary mode. + if "b" not in o.mode: + warnings.warn( + ( + "Requests has determined the content-length for this " + "request using the binary size of the file: however, the " + "file has been opened in text mode (i.e. without the 'b' " + "flag in the mode). This may lead to an incorrect " + "content-length. In Requests 3.0, support will be removed " + "for files in text mode." + ), + FileModeWarning, + ) + + if hasattr(o, "tell"): + try: + current_position = o.tell() + except OSError: + # This can happen in some weird situations, such as when the file + # is actually a special file descriptor like stdin. In this + # instance, we don't know what the length is, so set it to zero and + # let requests chunk it instead. + if total_length is not None: + current_position = total_length + else: + if hasattr(o, "seek") and total_length is None: + # StringIO and BytesIO have seek but no usable fileno + try: + # seek to end of file + o.seek(0, 2) + total_length = o.tell() + + # seek back to current position to support + # partially read file-like objects + o.seek(current_position or 0) + except OSError: + total_length = 0 + + if total_length is None: + total_length = 0 + + return max(0, total_length - current_position) + + +def get_netrc_auth(url, raise_errors=False): + """Returns the Requests tuple auth for a given url from netrc.""" + + netrc_file = os.environ.get("NETRC") + if netrc_file is not None: + netrc_locations = (netrc_file,) + else: + netrc_locations = (f"~/{f}" for f in NETRC_FILES) + + try: + from netrc import NetrcParseError, netrc + + netrc_path = None + + for f in netrc_locations: + try: + loc = os.path.expanduser(f) + except KeyError: + # os.path.expanduser can fail when $HOME is undefined and + # getpwuid fails. See https://bugs.python.org/issue20164 & + # https://github.com/psf/requests/issues/1846 + return + + if os.path.exists(loc): + netrc_path = loc + break + + # Abort early if there isn't one. + if netrc_path is None: + return + + ri = urlparse(url) + + # Strip port numbers from netloc. This weird `if...encode`` dance is + # used for Python 3.2, which doesn't support unicode literals. + splitstr = b":" + if isinstance(url, str): + splitstr = splitstr.decode("ascii") + host = ri.netloc.split(splitstr)[0] + + try: + _netrc = netrc(netrc_path).authenticators(host) + if _netrc: + # Return with login / password + login_i = 0 if _netrc[0] else 1 + return (_netrc[login_i], _netrc[2]) + except (NetrcParseError, OSError): + # If there was a parsing error or a permissions issue reading the file, + # we'll just skip netrc auth unless explicitly asked to raise errors. + if raise_errors: + raise + + # App Engine hackiness. + except (ImportError, AttributeError): + pass + + +def guess_filename(obj): + """Tries to guess the filename of the given object.""" + name = getattr(obj, "name", None) + if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">": + return os.path.basename(name) + + +def extract_zipped_paths(path): + """Replace nonexistent paths that look like they refer to a member of a zip + archive with the location of an extracted copy of the target, or else + just return the provided path unchanged. + """ + if os.path.exists(path): + # this is already a valid path, no need to do anything further + return path + + # find the first valid part of the provided path and treat that as a zip archive + # assume the rest of the path is the name of a member in the archive + archive, member = os.path.split(path) + while archive and not os.path.exists(archive): + archive, prefix = os.path.split(archive) + if not prefix: + # If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split), + # we _can_ end up in an infinite loop on a rare corner case affecting a small number of users + break + member = "/".join([prefix, member]) + + if not zipfile.is_zipfile(archive): + return path + + zip_file = zipfile.ZipFile(archive) + if member not in zip_file.namelist(): + return path + + # we have a valid zip archive and a valid member of that archive + tmp = tempfile.gettempdir() + extracted_path = os.path.join(tmp, member.split("/")[-1]) + if not os.path.exists(extracted_path): + # use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition + with atomic_open(extracted_path) as file_handler: + file_handler.write(zip_file.read(member)) + return extracted_path + + +@contextlib.contextmanager +def atomic_open(filename): + """Write a file to the disk in an atomic fashion""" + tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename)) + try: + with os.fdopen(tmp_descriptor, "wb") as tmp_handler: + yield tmp_handler + os.replace(tmp_name, filename) + except BaseException: + os.remove(tmp_name) + raise + + +def from_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. Unless it can not be represented as such, return an + OrderedDict, e.g., + + :: + + >>> from_key_val_list([('key', 'val')]) + OrderedDict([('key', 'val')]) + >>> from_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + >>> from_key_val_list({'key': 'val'}) + OrderedDict([('key', 'val')]) + + :rtype: OrderedDict + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError("cannot encode objects that are not 2-tuples") + + return OrderedDict(value) + + +def to_key_val_list(value): + """Take an object and test to see if it can be represented as a + dictionary. If it can be, return a list of tuples, e.g., + + :: + + >>> to_key_val_list([('key', 'val')]) + [('key', 'val')] + >>> to_key_val_list({'key': 'val'}) + [('key', 'val')] + >>> to_key_val_list('string') + Traceback (most recent call last): + ... + ValueError: cannot encode objects that are not 2-tuples + + :rtype: list + """ + if value is None: + return None + + if isinstance(value, (str, bytes, bool, int)): + raise ValueError("cannot encode objects that are not 2-tuples") + + if isinstance(value, Mapping): + value = value.items() + + return list(value) + + +# From mitsuhiko/werkzeug (used with permission). +def parse_list_header(value): + """Parse lists as described by RFC 2068 Section 2. + + In particular, parse comma-separated lists where the elements of + the list may include quoted-strings. A quoted-string could + contain a comma. A non-quoted string could have quotes in the + middle. Quotes are removed automatically after parsing. + + It basically works like :func:`parse_set_header` just that items + may appear multiple times and case sensitivity is preserved. + + The return value is a standard :class:`list`: + + >>> parse_list_header('token, "quoted value"') + ['token', 'quoted value'] + + To create a header from the :class:`list` again, use the + :func:`dump_header` function. + + :param value: a string with a list header. + :return: :class:`list` + :rtype: list + """ + result = [] + for item in _parse_list_header(value): + if item[:1] == item[-1:] == '"': + item = unquote_header_value(item[1:-1]) + result.append(item) + return result + + +# From mitsuhiko/werkzeug (used with permission). +def parse_dict_header(value): + """Parse lists of key, value pairs as described by RFC 2068 Section 2 and + convert them into a python dict: + + >>> d = parse_dict_header('foo="is a fish", bar="as well"') + >>> type(d) is dict + True + >>> sorted(d.items()) + [('bar', 'as well'), ('foo', 'is a fish')] + + If there is no value for a key it will be `None`: + + >>> parse_dict_header('key_without_value') + {'key_without_value': None} + + To create a header from the :class:`dict` again, use the + :func:`dump_header` function. + + :param value: a string with a dict header. + :return: :class:`dict` + :rtype: dict + """ + result = {} + for item in _parse_list_header(value): + if "=" not in item: + result[item] = None + continue + name, value = item.split("=", 1) + if value[:1] == value[-1:] == '"': + value = unquote_header_value(value[1:-1]) + result[name] = value + return result + + +# From mitsuhiko/werkzeug (used with permission). +def unquote_header_value(value, is_filename=False): + r"""Unquotes a header value. (Reversal of :func:`quote_header_value`). + This does not use the real unquoting but what browsers are actually + using for quoting. + + :param value: the header value to unquote. + :rtype: str + """ + if value and value[0] == value[-1] == '"': + # this is not the real unquoting, but fixing this so that the + # RFC is met will result in bugs with internet explorer and + # probably some other browsers as well. IE for example is + # uploading files with "C:\foo\bar.txt" as filename + value = value[1:-1] + + # if this is a filename and the starting characters look like + # a UNC path, then just return the value without quotes. Using the + # replace sequence below on a UNC path has the effect of turning + # the leading double slash into a single slash and then + # _fix_ie_filename() doesn't work correctly. See #458. + if not is_filename or value[:2] != "\\\\": + return value.replace("\\\\", "\\").replace('\\"', '"') + return value + + +def dict_from_cookiejar(cj): + """Returns a key/value dictionary from a CookieJar. + + :param cj: CookieJar object to extract cookies from. + :rtype: dict + """ + + cookie_dict = {cookie.name: cookie.value for cookie in cj} + return cookie_dict + + +def add_dict_to_cookiejar(cj, cookie_dict): + """Returns a CookieJar from a key/value dictionary. + + :param cj: CookieJar to insert cookies into. + :param cookie_dict: Dict of key/values to insert into CookieJar. + :rtype: CookieJar + """ + + return cookiejar_from_dict(cookie_dict, cj) + + +def get_encodings_from_content(content): + """Returns encodings from given content string. + + :param content: bytestring to extract encodings from. + """ + warnings.warn( + ( + "In requests 3.0, get_encodings_from_content will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) + + charset_re = re.compile(r']', flags=re.I) + pragma_re = re.compile(r']', flags=re.I) + xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]') + + return ( + charset_re.findall(content) + + pragma_re.findall(content) + + xml_re.findall(content) + ) + + +def _parse_content_type_header(header): + """Returns content type and parameters from given header + + :param header: string + :return: tuple containing content type and dictionary of + parameters + """ + + tokens = header.split(";") + content_type, params = tokens[0].strip(), tokens[1:] + params_dict = {} + items_to_strip = "\"' " + + for param in params: + param = param.strip() + if param: + key, value = param, True + index_of_equals = param.find("=") + if index_of_equals != -1: + key = param[:index_of_equals].strip(items_to_strip) + value = param[index_of_equals + 1 :].strip(items_to_strip) + params_dict[key.lower()] = value + return content_type, params_dict + + +def get_encoding_from_headers(headers): + """Returns encodings from given HTTP Header Dict. + + :param headers: dictionary to extract encoding from. + :rtype: str + """ + + content_type = headers.get("content-type") + + if not content_type: + return None + + content_type, params = _parse_content_type_header(content_type) + + if "charset" in params: + return params["charset"].strip("'\"") + + if "text" in content_type: + return "ISO-8859-1" + + if "application/json" in content_type: + # Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset + return "utf-8" + + +def stream_decode_response_unicode(iterator, r): + """Stream decodes an iterator.""" + + if r.encoding is None: + yield from iterator + return + + decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace") + for chunk in iterator: + rv = decoder.decode(chunk) + if rv: + yield rv + rv = decoder.decode(b"", final=True) + if rv: + yield rv + + +def iter_slices(string, slice_length): + """Iterate over slices of a string.""" + pos = 0 + if slice_length is None or slice_length <= 0: + slice_length = len(string) + while pos < len(string): + yield string[pos : pos + slice_length] + pos += slice_length + + +def get_unicode_from_response(r): + """Returns the requested content back in unicode. + + :param r: Response object to get unicode content from. + + Tried: + + 1. charset from content-type + 2. fall back and replace all unicode characters + + :rtype: str + """ + warnings.warn( + ( + "In requests 3.0, get_unicode_from_response will be removed. For " + "more information, please see the discussion on issue #2266. (This" + " warning should only appear once.)" + ), + DeprecationWarning, + ) + + tried_encodings = [] + + # Try charset from content-type + encoding = get_encoding_from_headers(r.headers) + + if encoding: + try: + return str(r.content, encoding) + except UnicodeError: + tried_encodings.append(encoding) + + # Fall back: + try: + return str(r.content, encoding, errors="replace") + except TypeError: + return r.content + + +# The unreserved URI characters (RFC 3986) +UNRESERVED_SET = frozenset( + "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~" +) + + +def unquote_unreserved(uri): + """Un-escape any percent-escape sequences in a URI that are unreserved + characters. This leaves all reserved, illegal and non-ASCII bytes encoded. + + :rtype: str + """ + parts = uri.split("%") + for i in range(1, len(parts)): + h = parts[i][0:2] + if len(h) == 2 and h.isalnum(): + try: + c = chr(int(h, 16)) + except ValueError: + raise InvalidURL(f"Invalid percent-escape sequence: '{h}'") + + if c in UNRESERVED_SET: + parts[i] = c + parts[i][2:] + else: + parts[i] = f"%{parts[i]}" + else: + parts[i] = f"%{parts[i]}" + return "".join(parts) + + +def requote_uri(uri): + """Re-quote the given URI. + + This function passes the given URI through an unquote/quote cycle to + ensure that it is fully and consistently quoted. + + :rtype: str + """ + safe_with_percent = "!#$%&'()*+,/:;=?@[]~" + safe_without_percent = "!#$&'()*+,/:;=?@[]~" + try: + # Unquote only the unreserved characters + # Then quote only illegal characters (do not quote reserved, + # unreserved, or '%') + return quote(unquote_unreserved(uri), safe=safe_with_percent) + except InvalidURL: + # We couldn't unquote the given URI, so let's try quoting it, but + # there may be unquoted '%'s in the URI. We need to make sure they're + # properly quoted so they do not cause issues elsewhere. + return quote(uri, safe=safe_without_percent) + + +def address_in_network(ip, net): + """This function allows you to check if an IP belongs to a network subnet + + Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24 + returns False if ip = 192.168.1.1 and net = 192.168.100.0/24 + + :rtype: bool + """ + ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0] + netaddr, bits = net.split("/") + netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0] + network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask + return (ipaddr & netmask) == (network & netmask) + + +def dotted_netmask(mask): + """Converts mask from /xx format to xxx.xxx.xxx.xxx + + Example: if mask is 24 function returns 255.255.255.0 + + :rtype: str + """ + bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1 + return socket.inet_ntoa(struct.pack(">I", bits)) + + +def is_ipv4_address(string_ip): + """ + :rtype: bool + """ + try: + socket.inet_aton(string_ip) + except OSError: + return False + return True + + +def is_valid_cidr(string_network): + """ + Very simple check of the cidr format in no_proxy variable. + + :rtype: bool + """ + if string_network.count("/") == 1: + try: + mask = int(string_network.split("/")[1]) + except ValueError: + return False + + if mask < 1 or mask > 32: + return False + + try: + socket.inet_aton(string_network.split("/")[0]) + except OSError: + return False + else: + return False + return True + + +@contextlib.contextmanager +def set_environ(env_name, value): + """Set the environment variable 'env_name' to 'value' + + Save previous value, yield, and then restore the previous value stored in + the environment variable 'env_name'. + + If 'value' is None, do nothing""" + value_changed = value is not None + if value_changed: + old_value = os.environ.get(env_name) + os.environ[env_name] = value + try: + yield + finally: + if value_changed: + if old_value is None: + del os.environ[env_name] + else: + os.environ[env_name] = old_value + + +def should_bypass_proxies(url, no_proxy): + """ + Returns whether we should bypass proxies or not. + + :rtype: bool + """ + + # Prioritize lowercase environment variables over uppercase + # to keep a consistent behaviour with other http projects (curl, wget). + def get_proxy(key): + return os.environ.get(key) or os.environ.get(key.upper()) + + # First check whether no_proxy is defined. If it is, check that the URL + # we're getting isn't in the no_proxy list. + no_proxy_arg = no_proxy + if no_proxy is None: + no_proxy = get_proxy("no_proxy") + parsed = urlparse(url) + + if parsed.hostname is None: + # URLs don't always have hostnames, e.g. file:/// urls. + return True + + if no_proxy: + # We need to check whether we match here. We need to see if we match + # the end of the hostname, both with and without the port. + no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host) + + if is_ipv4_address(parsed.hostname): + for proxy_ip in no_proxy: + if is_valid_cidr(proxy_ip): + if address_in_network(parsed.hostname, proxy_ip): + return True + elif parsed.hostname == proxy_ip: + # If no_proxy ip was defined in plain IP notation instead of cidr notation & + # matches the IP of the index + return True + else: + host_with_port = parsed.hostname + if parsed.port: + host_with_port += f":{parsed.port}" + + for host in no_proxy: + if parsed.hostname.endswith(host) or host_with_port.endswith(host): + # The URL does match something in no_proxy, so we don't want + # to apply the proxies on this URL. + return True + + with set_environ("no_proxy", no_proxy_arg): + # parsed.hostname can be `None` in cases such as a file URI. + try: + bypass = proxy_bypass(parsed.hostname) + except (TypeError, socket.gaierror): + bypass = False + + if bypass: + return True + + return False + + +def get_environ_proxies(url, no_proxy=None): + """ + Return a dict of environment proxies. + + :rtype: dict + """ + if should_bypass_proxies(url, no_proxy=no_proxy): + return {} + else: + return getproxies() + + +def select_proxy(url, proxies): + """Select a proxy for the url, if applicable. + + :param url: The url being for the request + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + """ + proxies = proxies or {} + urlparts = urlparse(url) + if urlparts.hostname is None: + return proxies.get(urlparts.scheme, proxies.get("all")) + + proxy_keys = [ + urlparts.scheme + "://" + urlparts.hostname, + urlparts.scheme, + "all://" + urlparts.hostname, + "all", + ] + proxy = None + for proxy_key in proxy_keys: + if proxy_key in proxies: + proxy = proxies[proxy_key] + break + + return proxy + + +def resolve_proxies(request, proxies, trust_env=True): + """This method takes proxy information from a request and configuration + input to resolve a mapping of target proxies. This will consider settings + such as NO_PROXY to strip proxy configurations. + + :param request: Request or PreparedRequest + :param proxies: A dictionary of schemes or schemes and hosts to proxy URLs + :param trust_env: Boolean declaring whether to trust environment configs + + :rtype: dict + """ + proxies = proxies if proxies is not None else {} + url = request.url + scheme = urlparse(url).scheme + no_proxy = proxies.get("no_proxy") + new_proxies = proxies.copy() + + if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy): + environ_proxies = get_environ_proxies(url, no_proxy=no_proxy) + + proxy = environ_proxies.get(scheme, environ_proxies.get("all")) + + if proxy: + new_proxies.setdefault(scheme, proxy) + return new_proxies + + +def default_user_agent(name="python-requests"): + """ + Return a string representing the default user agent. + + :rtype: str + """ + return f"{name}/{__version__}" + + +def default_headers(): + """ + :rtype: requests.structures.CaseInsensitiveDict + """ + return CaseInsensitiveDict( + { + "User-Agent": default_user_agent(), + "Accept-Encoding": DEFAULT_ACCEPT_ENCODING, + "Accept": "*/*", + "Connection": "keep-alive", + } + ) + + +def parse_header_links(value): + """Return a list of parsed link headers proxies. + + i.e. Link: ; rel=front; type="image/jpeg",; rel=back;type="image/jpeg" + + :rtype: list + """ + + links = [] + + replace_chars = " '\"" + + value = value.strip(replace_chars) + if not value: + return links + + for val in re.split(", *<", value): + try: + url, params = val.split(";", 1) + except ValueError: + url, params = val, "" + + link = {"url": url.strip("<> '\"")} + + for param in params.split(";"): + try: + key, value = param.split("=") + except ValueError: + break + + link[key.strip(replace_chars)] = value.strip(replace_chars) + + links.append(link) + + return links + + +# Null bytes; no need to recreate these on each call to guess_json_utf +_null = "\x00".encode("ascii") # encoding to ASCII for Python 3 +_null2 = _null * 2 +_null3 = _null * 3 + + +def guess_json_utf(data): + """ + :rtype: str + """ + # JSON always starts with two ASCII characters, so detection is as + # easy as counting the nulls and from their location and count + # determine the encoding. Also detect a BOM, if present. + sample = data[:4] + if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE): + return "utf-32" # BOM included + if sample[:3] == codecs.BOM_UTF8: + return "utf-8-sig" # BOM included, MS style (discouraged) + if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE): + return "utf-16" # BOM included + nullcount = sample.count(_null) + if nullcount == 0: + return "utf-8" + if nullcount == 2: + if sample[::2] == _null2: # 1st and 3rd are null + return "utf-16-be" + if sample[1::2] == _null2: # 2nd and 4th are null + return "utf-16-le" + # Did not detect 2 valid UTF-16 ascii-range characters + if nullcount == 3: + if sample[:3] == _null3: + return "utf-32-be" + if sample[1:] == _null3: + return "utf-32-le" + # Did not detect a valid UTF-32 ascii-range character + return None + + +def prepend_scheme_if_needed(url, new_scheme): + """Given a URL that may or may not have a scheme, prepend the given scheme. + Does not replace a present scheme with the one provided as an argument. + + :rtype: str + """ + parsed = parse_url(url) + scheme, auth, host, port, path, query, fragment = parsed + + # A defect in urlparse determines that there isn't a netloc present in some + # urls. We previously assumed parsing was overly cautious, and swapped the + # netloc and path. Due to a lack of tests on the original defect, this is + # maintained with parse_url for backwards compatibility. + netloc = parsed.netloc + if not netloc: + netloc, path = path, netloc + + if auth: + # parse_url doesn't provide the netloc with auth + # so we'll add it ourselves. + netloc = "@".join([auth, netloc]) + if scheme is None: + scheme = new_scheme + if path is None: + path = "" + + return urlunparse((scheme, netloc, path, "", query, fragment)) + + +def get_auth_from_url(url): + """Given a url with authentication components, extract them into a tuple of + username,password. + + :rtype: (str,str) + """ + parsed = urlparse(url) + + try: + auth = (unquote(parsed.username), unquote(parsed.password)) + except (AttributeError, TypeError): + auth = ("", "") + + return auth + + +def check_header_validity(header): + """Verifies that header parts don't contain leading whitespace + reserved characters, or return characters. + + :param header: tuple, in the format (name, value). + """ + name, value = header + _validate_header_part(header, name, 0) + _validate_header_part(header, value, 1) + + +def _validate_header_part(header, header_part, header_validator_index): + if isinstance(header_part, str): + validator = _HEADER_VALIDATORS_STR[header_validator_index] + elif isinstance(header_part, bytes): + validator = _HEADER_VALIDATORS_BYTE[header_validator_index] + else: + raise InvalidHeader( + f"Header part ({header_part!r}) from {header} " + f"must be of type str or bytes, not {type(header_part)}" + ) + + if not validator.match(header_part): + header_kind = "name" if header_validator_index == 0 else "value" + raise InvalidHeader( + f"Invalid leading whitespace, reserved character(s), or return " + f"character(s) in header {header_kind}: {header_part!r}" + ) + + +def urldefragauth(url): + """ + Given a url remove the fragment and the authentication part. + + :rtype: str + """ + scheme, netloc, path, params, query, fragment = urlparse(url) + + # see func:`prepend_scheme_if_needed` + if not netloc: + netloc, path = path, netloc + + netloc = netloc.rsplit("@", 1)[-1] + + return urlunparse((scheme, netloc, path, params, query, "")) + + +def rewind_body(prepared_request): + """Move file pointer back to its recorded starting position + so it can be read again on redirect. + """ + body_seek = getattr(prepared_request.body, "seek", None) + if body_seek is not None and isinstance( + prepared_request._body_position, integer_types + ): + try: + body_seek(prepared_request._body_position) + except OSError: + raise UnrewindableBodyError( + "An error occurred when rewinding request body for redirect." + ) + else: + raise UnrewindableBodyError("Unable to rewind request body for redirect.") diff --git a/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/LICENSE b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..67589cbb8600ecbd6589f3374ccb724320c82617 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/LICENSE @@ -0,0 +1,13 @@ +Copyright 2011 Sybren A. Stüvel + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/METADATA b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..926968149bf44093ead38e5b403598063eac407a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/METADATA @@ -0,0 +1,106 @@ +Metadata-Version: 2.1 +Name: rsa +Version: 4.9 +Summary: Pure-Python RSA implementation +Home-page: https://stuvel.eu/rsa +License: Apache-2.0 +Author: Sybren A. Stüvel +Author-email: sybren@stuvel.eu +Requires-Python: >=3.6,<4 +Classifier: Development Status :: 5 - Production/Stable +Classifier: Intended Audience :: Developers +Classifier: Intended Audience :: Education +Classifier: Intended Audience :: Information Technology +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Security :: Cryptography +Requires-Dist: pyasn1 (>=0.1.3) +Project-URL: Repository, https://github.com/sybrenstuvel/python-rsa +Description-Content-Type: text/markdown + +# Pure Python RSA implementation + +[![PyPI](https://img.shields.io/pypi/v/rsa.svg)](https://pypi.org/project/rsa/) +[![Build Status](https://travis-ci.org/sybrenstuvel/python-rsa.svg?branch=master)](https://travis-ci.org/sybrenstuvel/python-rsa) +[![Coverage Status](https://coveralls.io/repos/github/sybrenstuvel/python-rsa/badge.svg?branch=master)](https://coveralls.io/github/sybrenstuvel/python-rsa?branch=master) +[![Code Climate](https://api.codeclimate.com/v1/badges/a99a88d28ad37a79dbf6/maintainability)](https://codeclimate.com/github/codeclimate/codeclimate/maintainability) + +[Python-RSA](https://stuvel.eu/rsa) is a pure-Python RSA implementation. It supports +encryption and decryption, signing and verifying signatures, and key +generation according to PKCS#1 version 1.5. It can be used as a Python +library as well as on the commandline. The code was mostly written by +Sybren A. Stüvel. + +Documentation can be found at the [Python-RSA homepage](https://stuvel.eu/rsa). For all changes, check [the changelog](https://github.com/sybrenstuvel/python-rsa/blob/master/CHANGELOG.md). + +Download and install using: + + pip install rsa + +or download it from the [Python Package Index](https://pypi.org/project/rsa/). + +The source code is maintained at [GitHub](https://github.com/sybrenstuvel/python-rsa/) and is +licensed under the [Apache License, version 2.0](https://www.apache.org/licenses/LICENSE-2.0) + +## Security + +Because of how Python internally stores numbers, it is very hard (if not impossible) to make a pure-Python program secure against timing attacks. This library is no exception, so use it with care. See https://securitypitfalls.wordpress.com/2018/08/03/constant-time-compare-in-python/ for more info. + +## Setup of Development Environment + +``` +python3 -m venv .venv +. ./.venv/bin/activate +pip install poetry +poetry install +``` + +## Publishing a New Release + +Since this project is considered critical on the Python Package Index, +two-factor authentication is required. For uploading packages to PyPi, an API +key is required; username+password will not work. + +First, generate an API token at https://pypi.org/manage/account/token/. Then, +use this token when publishing instead of your username and password. + +As username, use `__token__`. +As password, use the token itself, including the `pypi-` prefix. + +See https://pypi.org/help/#apitoken for help using API tokens to publish. This +is what I have in `~/.pypirc`: + +``` +[distutils] +index-servers = + rsa + +# Use `twine upload -r rsa` to upload with this token. +[rsa] + repository = https://upload.pypi.org/legacy/ + username = __token__ + password = pypi-token +``` + +``` +. ./.venv/bin/activate +pip install twine + +poetry build +twine check dist/rsa-4.9.tar.gz dist/rsa-4.9-*.whl +twine upload -r rsa dist/rsa-4.9.tar.gz dist/rsa-4.9-*.whl +``` + +The `pip install twine` is necessary as Python-RSA requires Python >= 3.6, and +Twine requires at least version 3.7. This means Poetry refuses to add it as +dependency. + diff --git a/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/RECORD b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..05c0ea8eed1f4db680f164bfe5c0b556fde43afe --- /dev/null +++ b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/RECORD @@ -0,0 +1,42 @@ +../../../bin/pyrsa-decrypt,sha256=tAsI7GKCA4cITVWBlc00H-Pfp5XVADAxIdsktqOgfwc,222 +../../../bin/pyrsa-encrypt,sha256=zV92ZQ4x8pk9HRzSwStdRrGoFHJOXOluRa4ih6_UM9M,222 +../../../bin/pyrsa-keygen,sha256=Z7wBo6532p6lsX1Z6tiVf0ovrc-EFDhfDvgBJ6f908s,220 +../../../bin/pyrsa-priv2pub,sha256=0vc_03u4pi4xFIr-86qM-7eKWtkNQpV9xy3_01hjTMA,243 +../../../bin/pyrsa-sign,sha256=gLjeBckcKpRGE5yqk-xzahg0BK7IY0FKW8n0u1gBkCc,216 +../../../bin/pyrsa-verify,sha256=i9uGJ9Ls1PyM7oOauehuGO4DSigKIbRu8PSzR32jHpo,220 +rsa-4.9.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +rsa-4.9.dist-info/LICENSE,sha256=Bz8ot9OJyP509gfhfCf4HqpazmntxDqITyP0G0HFxyY,577 +rsa-4.9.dist-info/METADATA,sha256=-540qZBdoxQdUSuhxWlXTnY-oMNVz3EML49u9IfmmQ4,4173 +rsa-4.9.dist-info/RECORD,, +rsa-4.9.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +rsa-4.9.dist-info/WHEEL,sha256=y3eDiaFVSNTPbgzfNn0nYn5tEn1cX6WrdetDlQM4xWw,83 +rsa-4.9.dist-info/entry_points.txt,sha256=p0nVsezmPSjm5x4GDMD4a9Sshc9ukdfw1kkmOmpaAu0,201 +rsa/__init__.py,sha256=5bc5rkBB8vxWEtVYwoMQxM8df3O1Ak2_zEXqnkK9oes,1605 +rsa/__pycache__/__init__.cpython-310.pyc,, +rsa/__pycache__/asn1.cpython-310.pyc,, +rsa/__pycache__/cli.cpython-310.pyc,, +rsa/__pycache__/common.cpython-310.pyc,, +rsa/__pycache__/core.cpython-310.pyc,, +rsa/__pycache__/key.cpython-310.pyc,, +rsa/__pycache__/parallel.cpython-310.pyc,, +rsa/__pycache__/pem.cpython-310.pyc,, +rsa/__pycache__/pkcs1.cpython-310.pyc,, +rsa/__pycache__/pkcs1_v2.cpython-310.pyc,, +rsa/__pycache__/prime.cpython-310.pyc,, +rsa/__pycache__/randnum.cpython-310.pyc,, +rsa/__pycache__/transform.cpython-310.pyc,, +rsa/__pycache__/util.cpython-310.pyc,, +rsa/asn1.py,sha256=WL2bhDg-q7riT8P8cBMpydsh020i6Ejl6vcQIuA0VXA,1792 +rsa/cli.py,sha256=DOE66cB0-0SjUhs-PX2gbxiSma5-CT1lEAdcCYrTXwE,10183 +rsa/common.py,sha256=DAWwAuOSv1X67CBHzBvH-1wOsRe9np6eVsL_ZLrBWcg,4863 +rsa/core.py,sha256=Rf33atg4-pI7U-mTdoosmn8gTeTyX5xP7yv0iqWyogc,1714 +rsa/key.py,sha256=3_xv7B-AZZ5jIIz-vpnpfJtStS415e8fNr2iTYOu5CM,28285 +rsa/parallel.py,sha256=NcL1QjNWJxH9zL2OAOYKgr-HbAeEEmdckdxC6KMhkmM,2405 +rsa/pem.py,sha256=lzFulzgLHyqhimeo3T4GeBXuGRClfkTMYYZbgmYYmQk,4123 +rsa/pkcs1.py,sha256=wN9SWn1_zFJvHDNLGPeGZxoDA5T7ipVy9DntNcCYBpU,16690 +rsa/pkcs1_v2.py,sha256=d5A27EcOgbgJeikuLZkzANOzBQh4nVX-Bom5DUXgXHw,3549 +rsa/prime.py,sha256=Kij81g-VneGw20Cq6LRaCVT3b9tX4gWIzkWV-3h4qMg,5304 +rsa/py.typed,sha256=TfYjsEjlfDcVNGFibSYzbCf81u37bSXWmv4oTYf0zY8,64 +rsa/randnum.py,sha256=AwhXEZAT6spbUUPjhwQXGXKOTlG8FPHOI3gmTAcQ0pk,2752 +rsa/transform.py,sha256=i-nVC7JcPZkYz1W-d-qg0n0PQS17kKeXhfd9IkDehj4,2272 +rsa/util.py,sha256=9PuWg2jQfV8FHdE9hpGHDCi2iGM8Z-r4tIQXRVFmqYY,3090 diff --git a/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..376465212164654cc5cf792aeaf69d01141f0343 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: poetry 1.0.7 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/entry_points.txt b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/entry_points.txt new file mode 100644 index 0000000000000000000000000000000000000000..bf058e3ebda1b5bda76eefdf3e7c188ae20bb911 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/rsa-4.9.dist-info/entry_points.txt @@ -0,0 +1,8 @@ +[console_scripts] +pyrsa-decrypt=rsa.cli:decrypt +pyrsa-encrypt=rsa.cli:encrypt +pyrsa-keygen=rsa.cli:keygen +pyrsa-priv2pub=rsa.util:private_to_public +pyrsa-sign=rsa.cli:sign +pyrsa-verify=rsa.cli:verify + diff --git a/vllm/lib/python3.10/site-packages/safetensors/__init__.py b/vllm/lib/python3.10/site-packages/safetensors/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c9a5d2ca92b5248ce798a19f8e14c3492992cae1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/safetensors/__init__.py @@ -0,0 +1,9 @@ +# Re-export this +from ._safetensors_rust import ( # noqa: F401 + SafetensorError, + __version__, + deserialize, + safe_open, + serialize, + serialize_file, +) diff --git a/vllm/lib/python3.10/site-packages/safetensors/__init__.pyi b/vllm/lib/python3.10/site-packages/safetensors/__init__.pyi new file mode 100644 index 0000000000000000000000000000000000000000..a125e8de683c424db86d8edd9a301ced30d56296 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/safetensors/__init__.pyi @@ -0,0 +1,73 @@ +# Generated content DO NOT EDIT +@staticmethod +def deserialize(bytes): + """ + Opens a safetensors lazily and returns tensors as asked + + Args: + data (:obj:`bytes`): + The byte content of a file + + Returns: + (:obj:`List[str, Dict[str, Dict[str, any]]]`): + The deserialized content is like: + [("tensor_name", {"shape": [2, 3], "dtype": "F32", "data": b"\0\0.." }), (...)] + """ + pass + +@staticmethod +def serialize(tensor_dict, metadata=None): + """ + Serializes raw data. + + Args: + tensor_dict (:obj:`Dict[str, Dict[Any]]`): + The tensor dict is like: + {"tensor_name": {"dtype": "F32", "shape": [2, 3], "data": b"\0\0"}} + metadata (:obj:`Dict[str, str]`, *optional*): + The optional purely text annotations + + Returns: + (:obj:`bytes`): + The serialized content. + """ + pass + +@staticmethod +def serialize_file(tensor_dict, filename, metadata=None): + """ + Serializes raw data. + + Args: + tensor_dict (:obj:`Dict[str, Dict[Any]]`): + The tensor dict is like: + {"tensor_name": {"dtype": "F32", "shape": [2, 3], "data": b"\0\0"}} + filename (:obj:`str`): + The name of the file to write into. + metadata (:obj:`Dict[str, str]`, *optional*): + The optional purely text annotations + + Returns: + (:obj:`bytes`): + The serialized content. + """ + pass + +class safe_open: + """ + Opens a safetensors lazily and returns tensors as asked + + Args: + filename (:obj:`str`): + The filename to open + + framework (:obj:`str`): + The framework you want you tensors in. Supported values: + `pt`, `tf`, `flax`, `numpy`. + + device (:obj:`str`, defaults to :obj:`"cpu"`): + The device on which you want the tensors. + """ + + def __init__(self, filename, framework, device="cpu"): + pass diff --git a/vllm/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so b/vllm/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..e5d9d922b95afb6382f2d166fdb50cdf62add0c5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/safetensors/_safetensors_rust.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7a8aa6ef051354553c86afcb40a3bae5d68dac20017e4b5225a3054b7bed71e9 +size 1012224 diff --git a/vllm/lib/python3.10/site-packages/safetensors/flax.py b/vllm/lib/python3.10/site-packages/safetensors/flax.py new file mode 100644 index 0000000000000000000000000000000000000000..d0b8375e038eff487af33fcfaa4a597aacb5743f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/safetensors/flax.py @@ -0,0 +1,138 @@ +import os +from typing import Dict, Optional, Union + +import numpy as np + +import jax.numpy as jnp +from jax import Array +from safetensors import numpy, safe_open + + +def save(tensors: Dict[str, Array], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, Array]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.flax import save + from jax import numpy as jnp + + tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + np_tensors = _jnp2np(tensors) + return numpy.save(np_tensors, metadata=metadata) + + +def save_file( + tensors: Dict[str, Array], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, Array]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.flax import save_file + from jax import numpy as jnp + + tensors = {"embedding": jnp.zeros((512, 1024)), "attention": jnp.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + np_tensors = _jnp2np(tensors) + return numpy.save_file(np_tensors, filename, metadata=metadata) + + +def load(data: bytes) -> Dict[str, Array]: + """ + Loads a safetensors file into flax format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, Array]`: dictionary that contains name as key, value as `Array` on cpu + + Example: + + ```python + from safetensors.flax import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = numpy.load(data) + return _np2jnp(flat) + + +def load_file(filename: Union[str, os.PathLike]) -> Dict[str, Array]: + """ + Loads a safetensors file into flax format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + + Returns: + `Dict[str, Array]`: dictionary that contains name as key, value as `Array` + + Example: + + ```python + from safetensors.flax import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="flax") as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +def _np2jnp(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, Array]: + for k, v in numpy_dict.items(): + numpy_dict[k] = jnp.array(v) + return numpy_dict + + +def _jnp2np(jnp_dict: Dict[str, Array]) -> Dict[str, np.array]: + for k, v in jnp_dict.items(): + jnp_dict[k] = np.asarray(v) + return jnp_dict diff --git a/vllm/lib/python3.10/site-packages/safetensors/numpy.py b/vllm/lib/python3.10/site-packages/safetensors/numpy.py new file mode 100644 index 0000000000000000000000000000000000000000..0b245f12c1c949456c9b2edb45a11343e6a8099a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/safetensors/numpy.py @@ -0,0 +1,176 @@ +import os +import sys +from typing import Dict, Optional, Union + +import numpy as np + +from safetensors import deserialize, safe_open, serialize, serialize_file + + +def _tobytes(tensor: np.ndarray) -> bytes: + if not _is_little_endian(tensor): + tensor = tensor.byteswap(inplace=False) + return tensor.tobytes() + + +def save(tensor_dict: Dict[str, np.ndarray], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensor_dict (`Dict[str, np.ndarray]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.numpy import save + import numpy as np + + tensors = {"embedding": np.zeros((512, 1024)), "attention": np.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + flattened = {k: {"dtype": v.dtype.name, "shape": v.shape, "data": _tobytes(v)} for k, v in tensor_dict.items()} + serialized = serialize(flattened, metadata=metadata) + result = bytes(serialized) + return result + + +def save_file( + tensor_dict: Dict[str, np.ndarray], filename: Union[str, os.PathLike], metadata: Optional[Dict[str, str]] = None +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensor_dict (`Dict[str, np.ndarray]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.numpy import save_file + import numpy as np + + tensors = {"embedding": np.zeros((512, 1024)), "attention": np.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + flattened = {k: {"dtype": v.dtype.name, "shape": v.shape, "data": _tobytes(v)} for k, v in tensor_dict.items()} + serialize_file(flattened, filename, metadata=metadata) + + +def load(data: bytes) -> Dict[str, np.ndarray]: + """ + Loads a safetensors file into numpy format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, np.ndarray]`: dictionary that contains name as key, value as `np.ndarray` on cpu + + Example: + + ```python + from safetensors.numpy import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = deserialize(data) + return _view2np(flat) + + +def load_file(filename: Union[str, os.PathLike]) -> Dict[str, np.ndarray]: + """ + Loads a safetensors file into numpy format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + + Returns: + `Dict[str, np.ndarray]`: dictionary that contains name as key, value as `np.ndarray` + + Example: + + ```python + from safetensors.numpy import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="np") as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +_TYPES = { + "F64": np.float64, + "F32": np.float32, + "F16": np.float16, + "I64": np.int64, + "U64": np.uint64, + "I32": np.int32, + "U32": np.uint32, + "I16": np.int16, + "U16": np.uint16, + "I8": np.int8, + "U8": np.uint8, + "BOOL": bool, +} + + +def _getdtype(dtype_str: str) -> np.dtype: + return _TYPES[dtype_str] + + +def _view2np(safeview) -> Dict[str, np.ndarray]: + result = {} + for k, v in safeview: + dtype = _getdtype(v["dtype"]) + arr = np.frombuffer(v["data"], dtype=dtype).reshape(v["shape"]) + result[k] = arr + return result + + +def _is_little_endian(tensor: np.ndarray) -> bool: + byteorder = tensor.dtype.byteorder + if byteorder == "=": + if sys.byteorder == "little": + return True + else: + return False + elif byteorder == "|": + return True + elif byteorder == "<": + return True + elif byteorder == ">": + return False + raise ValueError(f"Unexpected byte order {byteorder}") diff --git a/vllm/lib/python3.10/site-packages/safetensors/paddle.py b/vllm/lib/python3.10/site-packages/safetensors/paddle.py new file mode 100644 index 0000000000000000000000000000000000000000..cec368665de31d17757c0c6621df5dc4926bfab1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/safetensors/paddle.py @@ -0,0 +1,138 @@ +import os +from typing import Dict, Optional, Union + +import numpy as np + +import paddle +from safetensors import numpy + + +def save(tensors: Dict[str, paddle.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, paddle.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.paddle import save + import paddle + + tensors = {"embedding": paddle.zeros((512, 1024)), "attention": paddle.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + np_tensors = _paddle2np(tensors) + return numpy.save(np_tensors, metadata=metadata) + + +def save_file( + tensors: Dict[str, paddle.Tensor], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, paddle.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.paddle import save_file + import paddle + + tensors = {"embedding": paddle.zeros((512, 1024)), "attention": paddle.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + np_tensors = _paddle2np(tensors) + return numpy.save_file(np_tensors, filename, metadata=metadata) + + +def load(data: bytes, device: str = "cpu") -> Dict[str, paddle.Tensor]: + """ + Loads a safetensors file into paddle format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, paddle.Tensor]`: dictionary that contains name as key, value as `paddle.Tensor` on cpu + + Example: + + ```python + from safetensors.paddle import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = numpy.load(data) + return _np2paddle(flat, device) + + +def load_file(filename: Union[str, os.PathLike], device="cpu") -> Dict[str, paddle.Tensor]: + """ + Loads a safetensors file into paddle format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + device (`Union[Dict[str, any], str]`, *optional*, defaults to `cpu`): + The device where the tensors need to be located after load. + available options are all regular paddle device locations + + Returns: + `Dict[str, paddle.Tensor]`: dictionary that contains name as key, value as `paddle.Tensor` + + Example: + + ```python + from safetensors.paddle import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + flat = numpy.load_file(filename) + output = _np2paddle(flat, device) + return output + + +def _np2paddle(numpy_dict: Dict[str, np.ndarray], device: str = "cpu") -> Dict[str, paddle.Tensor]: + for k, v in numpy_dict.items(): + numpy_dict[k] = paddle.to_tensor(v, place=device) + return numpy_dict + + +def _paddle2np(paddle_dict: Dict[str, paddle.Tensor]) -> Dict[str, np.array]: + for k, v in paddle_dict.items(): + paddle_dict[k] = v.detach().cpu().numpy() + return paddle_dict diff --git a/vllm/lib/python3.10/site-packages/safetensors/py.typed b/vllm/lib/python3.10/site-packages/safetensors/py.typed new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/safetensors/tensorflow.py b/vllm/lib/python3.10/site-packages/safetensors/tensorflow.py new file mode 100644 index 0000000000000000000000000000000000000000..e2d74b0522698b3748a7da93753e065f4053beea --- /dev/null +++ b/vllm/lib/python3.10/site-packages/safetensors/tensorflow.py @@ -0,0 +1,137 @@ +import os +from typing import Dict, Optional, Union + +import numpy as np +import tensorflow as tf + +from safetensors import numpy, safe_open + + +def save(tensors: Dict[str, tf.Tensor], metadata: Optional[Dict[str, str]] = None) -> bytes: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, tf.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `bytes`: The raw bytes representing the format + + Example: + + ```python + from safetensors.tensorflow import save + import tensorflow as tf + + tensors = {"embedding": tf.zeros((512, 1024)), "attention": tf.zeros((256, 256))} + byte_data = save(tensors) + ``` + """ + np_tensors = _tf2np(tensors) + return numpy.save(np_tensors, metadata=metadata) + + +def save_file( + tensors: Dict[str, tf.Tensor], + filename: Union[str, os.PathLike], + metadata: Optional[Dict[str, str]] = None, +) -> None: + """ + Saves a dictionary of tensors into raw bytes in safetensors format. + + Args: + tensors (`Dict[str, tf.Tensor]`): + The incoming tensors. Tensors need to be contiguous and dense. + filename (`str`, or `os.PathLike`)): + The filename we're saving into. + metadata (`Dict[str, str]`, *optional*, defaults to `None`): + Optional text only metadata you might want to save in your header. + For instance it can be useful to specify more about the underlying + tensors. This is purely informative and does not affect tensor loading. + + Returns: + `None` + + Example: + + ```python + from safetensors.tensorflow import save_file + import tensorflow as tf + + tensors = {"embedding": tf.zeros((512, 1024)), "attention": tf.zeros((256, 256))} + save_file(tensors, "model.safetensors") + ``` + """ + np_tensors = _tf2np(tensors) + return numpy.save_file(np_tensors, filename, metadata=metadata) + + +def load(data: bytes) -> Dict[str, tf.Tensor]: + """ + Loads a safetensors file into tensorflow format from pure bytes. + + Args: + data (`bytes`): + The content of a safetensors file + + Returns: + `Dict[str, tf.Tensor]`: dictionary that contains name as key, value as `tf.Tensor` on cpu + + Example: + + ```python + from safetensors.tensorflow import load + + file_path = "./my_folder/bert.safetensors" + with open(file_path, "rb") as f: + data = f.read() + + loaded = load(data) + ``` + """ + flat = numpy.load(data) + return _np2tf(flat) + + +def load_file(filename: Union[str, os.PathLike]) -> Dict[str, tf.Tensor]: + """ + Loads a safetensors file into tensorflow format. + + Args: + filename (`str`, or `os.PathLike`)): + The name of the file which contains the tensors + + Returns: + `Dict[str, tf.Tensor]`: dictionary that contains name as key, value as `tf.Tensor` + + Example: + + ```python + from safetensors.tensorflow import load_file + + file_path = "./my_folder/bert.safetensors" + loaded = load_file(file_path) + ``` + """ + result = {} + with safe_open(filename, framework="tf") as f: + for k in f.keys(): + result[k] = f.get_tensor(k) + return result + + +def _np2tf(numpy_dict: Dict[str, np.ndarray]) -> Dict[str, tf.Tensor]: + for k, v in numpy_dict.items(): + numpy_dict[k] = tf.convert_to_tensor(v) + return numpy_dict + + +def _tf2np(tf_dict: Dict[str, tf.Tensor]) -> Dict[str, np.array]: + for k, v in tf_dict.items(): + tf_dict[k] = v.numpy() + return tf_dict diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/transformers/kernels/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfbbea893da0d10aa616c572bb5cf2ca0f7a4c1e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/transformers/kernels/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.cu b/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.cu new file mode 100644 index 0000000000000000000000000000000000000000..87ed89052873813153786bd416a981d3e5279af9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.cu @@ -0,0 +1,383 @@ +#include "cuda_kernel.h" + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +__global__ void index_max_cuda_kernel( + float *index_vals, // [batch_size, 32, num_block] + int *indices, // [batch_size, num_block] + float *max_vals, // [batch_size, A_num_block * 32] + float *max_vals_scatter, // [batch_size, 32, num_block] + long batch_size, + long A_num_block, + long B_num_block, + long num_block +) { + + long batch_idx = blockIdx.x; + + long thread_idx = threadIdx.x; + long num_thread = blockDim.x; + + extern __shared__ float buffer[]; + int *max_buffer = (int*)buffer; + + for (int i = 0; i < A_num_block * 32; i = i + num_thread) { + int idx = i + thread_idx; + if (idx < A_num_block * 32) { + max_buffer[idx] = -1e8; + } + } + __syncthreads(); + + int *indices_pt = &indices[batch_idx * num_block]; + float *index_vals_pt = &index_vals[batch_idx * num_block * 32]; + + for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) { + int idx = idx_start + thread_idx; + int A_block_idx = indices_pt[idx % num_block] / B_num_block; + atomicMax(&max_buffer[A_block_idx * 32 + idx / num_block], (int)(index_vals_pt[idx] * 1000)); + } + __syncthreads(); + + float *max_vals_pt = &max_vals[batch_idx * A_num_block * 32]; + for (int i = 0; i < A_num_block * 32; i = i + num_thread) { + int idx = i + thread_idx; + if (idx < A_num_block * 32) { + max_vals_pt[idx] = (float)max_buffer[idx] / 1000.; + } + } + + float *max_vals_scatter_pt = &max_vals_scatter[batch_idx * num_block * 32]; + for (int idx_start = 0; idx_start < 32 * num_block; idx_start = idx_start + num_thread) { + int idx = idx_start + thread_idx; + int A_block_idx = indices_pt[idx % num_block] / B_num_block; + max_vals_scatter_pt[idx] = (float)max_buffer[A_block_idx * 32 + idx / num_block] / 1000.; + } + +} + +__global__ void mm_to_sparse_cuda_kernel( + float *dense_A, // [batch_size, A_num_block, dim, 32] + float *dense_B, // [batch_size, B_num_block, dim, 32] + int *indices, // [batch_size, num_block] + float *sparse_C, // [batch_size, num_block, 32, 32] + long batch_size, + long A_num_block, + long B_num_block, + long dim, + long num_block +) { + + long batch_idx = blockIdx.y; + long block_idx = blockIdx.x * blockDim.y + threadIdx.y; + + long thread_idx = threadIdx.x; + + __shared__ float buffer[4096]; + float *A_buffer = &buffer[threadIdx.y * 1024]; // [2, 8, 32] + float *B_buffer = &buffer[threadIdx.y * 1024 + 512]; // [2, 8, 32] + + long batch_idx__block_idx = batch_idx * num_block + block_idx; + + long AB_block_idx = indices[batch_idx__block_idx]; + float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * dim * 32]; + float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * dim * 32]; + + int reg_1_idx = thread_idx / 8; // [0000000011111111222222223333333344444444555555556666666677777777] + int reg_2_idx = thread_idx % 8; // [0123456701234567012345670123456701234567012345670123456701234567] + + float reg_1[8]; + float reg_2[8]; + + float reg_array[16] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; + + #pragma unroll + for (int i = 0; i < 4; i++) { + A_buffer[i * 64 + thread_idx] = dense_A_pt[i * 64 + thread_idx]; + B_buffer[i * 64 + thread_idx] = dense_B_pt[i * 64 + thread_idx]; + } + + __syncthreads(); + + #pragma unroll + for (int i = 0; i < 4; i++) { + reg_1[i] = A_buffer[reg_1_idx * 4 + i]; + reg_2[i] = B_buffer[reg_2_idx * 4 + i]; + } + + for (int dim_stride = 1; dim_stride < (dim / 8); dim_stride++) { + + #pragma unroll + for (int i = 0; i < 4; i++) { + A_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_A_pt[dim_stride * 256 + i * 64 + thread_idx]; + B_buffer[(dim_stride % 2) * 256 + i * 64 + thread_idx] = dense_B_pt[dim_stride * 256 + i * 64 + thread_idx]; + } + + #pragma unroll + for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) { + #pragma unroll + for (int i = 0; i < 4; i++) { + reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_1_idx * 4 + i]; + reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[((dim_stride - 1) % 2) * 256 + mini_dim_idx * 32 + reg_2_idx * 4 + i]; + } + #pragma unroll + for (int i = 0; i < 4; i++) { + #pragma unroll + for (int j = 0; j < 4; j++) { + reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; + } + } + } + + __syncthreads(); + + #pragma unroll + for (int i = 0; i < 4; i++) { + reg_1[i] = A_buffer[(dim_stride % 2) * 256 + reg_1_idx * 4 + i]; + reg_2[i] = B_buffer[(dim_stride % 2) * 256 + reg_2_idx * 4 + i]; + } + + #pragma unroll + for (int i = 0; i < 4; i++) { + #pragma unroll + for (int j = 0; j < 4; j++) { + reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; + } + } + + } + + #pragma unroll + for (int mini_dim_idx = 1; mini_dim_idx < 8; mini_dim_idx++) { + #pragma unroll + for (int i = 0; i < 4; i++) { + reg_1[(mini_dim_idx % 2) * 4 + i] = A_buffer[256 + mini_dim_idx * 32 + reg_1_idx * 4 + i]; + reg_2[(mini_dim_idx % 2) * 4 + i] = B_buffer[256 + mini_dim_idx * 32 + reg_2_idx * 4 + i]; + } + #pragma unroll + for (int i = 0; i < 4; i++) { + #pragma unroll + for (int j = 0; j < 4; j++) { + reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; + } + } + } + #pragma unroll + for (int i = 0; i < 4; i++) { + #pragma unroll + for (int j = 0; j < 4; j++) { + reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; + } + } + __syncthreads(); + + float *C_buffer = &buffer[threadIdx.y * 1024]; // [32, 32] + + #pragma unroll + for (int i = 0; i < 4; i++) { + #pragma unroll + for (int j = 0; j < 4; j++) { + C_buffer[(reg_2_idx * 4 + j) * 32 + reg_1_idx * 4 + i] = reg_array[i * 4 + j]; + } + } + __syncthreads(); + + float *sparse_C_pt = &sparse_C[batch_idx__block_idx * 1024]; + + #pragma unroll + for (int i = 0; i < 16; i++) { + sparse_C_pt[i * 64 + thread_idx] = C_buffer[i * 64 + thread_idx]; + } + +} + +__global__ void sparse_dense_mm_cuda_kernel( + float *sparse_A, // [batch_size, num_block, 32, 32] + int *indices, // [batch_size, num_block] + float *dense_B, // [batch_size, B_num_block, dim, 32] + float *dense_C, // [batch_size, A_num_block, dim, 32] + long batch_size, + long A_num_block, + long B_num_block, + long dim, + long num_block +) { + + long batch_idx = blockIdx.y; + long block_idx = blockIdx.x * blockDim.y + threadIdx.y; + + long thread_idx = threadIdx.x; + + __shared__ float buffer[6144]; + float *A_buffer = &buffer[threadIdx.y * 3072]; // [32, 32] + float *B_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [32, 64] + + long batch_idx__block_idx = batch_idx * num_block + block_idx; + + float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024]; + #pragma unroll + for (int i = 0; i < 8; i++) { + A_buffer[i * 128 + thread_idx] = sparse_A_pt[i * 128 + thread_idx]; + } + + long AB_block_idx = indices[batch_idx__block_idx]; + float *dense_B_pt = &dense_B[(batch_idx * B_num_block + AB_block_idx % B_num_block) * 32 * dim]; + float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32 * dim]; + + // [0000000011111111222222223333333344444444555555556666666677777777] + // [0123456701234567012345670123456701234567012345670123456701234567] + int reg_1_idx = thread_idx / 8; + int reg_2_idx = thread_idx % 8; + + float reg_1[8]; + float reg_2[8]; + + float reg_array[16]; + + for (int dim_stride = 0; dim_stride < dim; dim_stride = dim_stride + 64) { + + #pragma unroll + for (int i = 0; i < 16; i++) { + B_buffer[i * 128 + thread_idx] = dense_B_pt[dim_stride * 32 + i * 128 + thread_idx]; + } + + #pragma unroll + for (int i = 0; i < 16; i++) { + reg_array[i] = 0; + } + + __syncthreads(); + + #pragma unroll + for (int i = 0; i < 4; i++) { + reg_1[i] = B_buffer[(reg_1_idx * 4 + i) * 32]; + reg_2[i] = A_buffer[reg_2_idx * 4 + i]; + } + + #pragma unroll + for (int mini_dim_idx = 1; mini_dim_idx < 32; mini_dim_idx++) { + #pragma unroll + for (int i = 0; i < 4; i++) { + reg_1[(mini_dim_idx % 2) * 4 + i] = B_buffer[(reg_1_idx * 4 + i) * 32 + mini_dim_idx]; + reg_2[(mini_dim_idx % 2) * 4 + i] = A_buffer[mini_dim_idx * 32 + reg_2_idx * 4 + i]; + } + #pragma unroll + for (int i = 0; i < 4; i++) { + #pragma unroll + for (int j = 0; j < 4; j++) { + reg_array[i * 4 + j] += reg_1[((mini_dim_idx - 1) % 2) * 4 + i] * reg_2[((mini_dim_idx - 1) % 2) * 4 + j]; + } + } + } + + #pragma unroll + for (int i = 0; i < 4; i++) { + #pragma unroll + for (int j = 0; j < 4; j++) { + reg_array[i * 4 + j] += reg_1[4 + i] * reg_2[4 + j]; + } + } + + __syncthreads(); + + float *C_buffer = &buffer[threadIdx.y * 3072 + 1024]; // [64, 32] + + #pragma unroll + for (int i = 0; i < 4; i++) { + #pragma unroll + for (int j = 0; j < 4; j++) { + C_buffer[(reg_1_idx * 4 + i) * 32 + reg_2_idx * 4 + j] = reg_array[i * 4 + j]; + } + } + __syncthreads(); + + #pragma unroll + for (int i = 0; i < 16; i++) { + atomicAdd(&dense_C_pt[dim_stride * 32 + i * 128 + thread_idx], C_buffer[i * 128 + thread_idx]); + } + __syncthreads(); + + } + +} + + +__global__ void reduce_sum_cuda_kernel( + float *sparse_A, // [batch_size, num_block, 32, 32] + int *indices, // [batch_size, num_block] + float *dense_C, // [batch_size, A_num_block, 32] + long batch_size, + long A_num_block, + long B_num_block, + long num_block +) { + + long batch_idx = blockIdx.y; + long block_idx = blockIdx.x * blockDim.y + threadIdx.y; + + long thread_idx = threadIdx.x; + + long batch_idx__block_idx = batch_idx * num_block + block_idx; + + long AB_block_idx = indices[batch_idx__block_idx]; + float *sparse_A_pt = &sparse_A[batch_idx__block_idx * 1024]; + + float reg_array[16]; + float value = 0; + + #pragma unroll + for (int i = 0; i < 8; i++) { + reg_array[i] = sparse_A_pt[i * 32 + thread_idx]; + } + #pragma unroll + for (int stride = 8; stride < 32; stride = stride + 8) { + #pragma unroll + for (int i = 0; i < 8; i++) { + reg_array[(stride + i) % 16] = sparse_A_pt[(stride + i) * 32 + thread_idx]; + } + #pragma unroll + for (int i = 0; i < 8; i++) { + value = value + reg_array[(stride - 8 + i) % 16]; + } + } + #pragma unroll + for (int i = 0; i < 8; i++) { + value = value + reg_array[8 + i]; + } + + float *dense_C_pt = &dense_C[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32]; + + atomicAdd(&dense_C_pt[thread_idx], value); + +} + +__global__ void scatter_cuda_kernel( + float *dense_A, // [batch_size, A_num_block, 32] + int *indices, // [batch_size, num_block] + float *sparse_C, // [batch_size, num_block, 32, 32] + long batch_size, + long A_num_block, + long B_num_block, + long num_block +) { + + long batch_idx = blockIdx.y; + long block_idx = blockIdx.x * blockDim.y + threadIdx.y; + + long thread_idx = threadIdx.x; + + long batch_idx__block_idx = batch_idx * num_block + block_idx; + + long AB_block_idx = indices[batch_idx__block_idx]; + float *dense_A_pt = &dense_A[(batch_idx * A_num_block + AB_block_idx / B_num_block) * 32]; + float *sparse_C_pt = &sparse_C[(batch_idx * num_block + block_idx) * 1024]; + + float value = dense_A_pt[thread_idx]; + + #pragma unroll + for (int i = 0; i < 32; i++) { + sparse_C_pt[i * 32 + thread_idx] = value; + } + +} diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.h b/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.h new file mode 100644 index 0000000000000000000000000000000000000000..a95b46f7d159b11851143710034cf80c20aa6bf8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_kernel.h @@ -0,0 +1,59 @@ + +#define WARP_SIZE 32 +#define FULL_MASK 0xffffffff +#define OPTIMAL_THREADS 256 + +__global__ void index_max_cuda_kernel( + float *index_vals, // [batch_size, 32, num_block] + int *indices, // [batch_size, num_block] + float *max_vals, // [batch_size, A_num_block * 32] + float *max_vals_scatter, // [batch_size, 32, num_block] + long batch_size, + long A_num_block, + long B_num_block, + long num_block +); + +__global__ void mm_to_sparse_cuda_kernel( + float *dense_A, // [batch_size, A_num_block, dim, 32] + float *dense_B, // [batch_size, B_num_block, dim, 32] + int *indices, // [batch_size, num_block] + float *sparse_C, // [batch_size, num_block, 32, 32] + long batch_size, + long A_num_block, + long B_num_block, + long dim, + long num_block +); + +__global__ void sparse_dense_mm_cuda_kernel( + float *sparse_A, // [batch_size, num_block, 32, 32] + int *indices, // [batch_size, num_block] + float *dense_B, // [batch_size, B_num_block, dim, 32] + float *dense_C, // [batch_size, A_num_block, dim, 32] + long batch_size, + long A_num_block, + long B_num_block, + long dim, + long num_block +); + +__global__ void reduce_sum_cuda_kernel( + float *sparse_A, // [batch_size, num_block, 32, 32] + int *indices, // [batch_size, num_block] + float *dense_C, // [batch_size, A_num_block, 32] + long batch_size, + long A_num_block, + long B_num_block, + long num_block +); + +__global__ void scatter_cuda_kernel( + float *dense_A, // [batch_size, A_num_block, 32] + int *indices, // [batch_size, num_block] + float *sparse_C, // [batch_size, num_block, 32, 32] + long batch_size, + long A_num_block, + long B_num_block, + long num_block +); diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.cu b/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.cu new file mode 100644 index 0000000000000000000000000000000000000000..ba2a0cacfe614e75e06d2dde80dc77a6e8a4ec1a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.cu @@ -0,0 +1,154 @@ +#include +#include +#include "cuda_launch.h" +#include "cuda_kernel.h" +#include + +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +std::vector index_max_kernel( + at::Tensor index_vals, // [batch_size, 32, num_block] + at::Tensor indices, // [batch_size, num_block], + int A_num_block, + int B_num_block +) { + int batch_size = indices.size(0); + int num_block = indices.size(1); + + at::Tensor max_vals = at::zeros({batch_size, A_num_block * 32}, index_vals.options()); + at::Tensor max_vals_scatter = at::zeros({batch_size, 32, num_block}, index_vals.options()); + + dim3 threads(256); + dim3 blocks(batch_size); + int shared_mem = A_num_block * 32 * sizeof(float); + + index_max_cuda_kernel<<>>( + index_vals.data_ptr(), + indices.data_ptr(), + max_vals.data_ptr(), + max_vals_scatter.data_ptr(), + batch_size, + A_num_block, + B_num_block, + num_block + ); + + return {max_vals, max_vals_scatter}; +} + +at::Tensor mm_to_sparse_kernel( + at::Tensor dense_A, // [batch_size, A_num_block, dim, 32] + at::Tensor dense_B, // [batch_size, B_num_block, dim, 32] + at::Tensor indices // [batch_size, num_block] +) { + int batch_size = dense_A.size(0); + int A_num_block = dense_A.size(1); + int B_num_block = dense_B.size(1); + int dim = dense_A.size(2); + int num_block = indices.size(1); + + at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options()); + + dim3 threads(64, 4); + dim3 blocks(num_block / 4, batch_size); + + mm_to_sparse_cuda_kernel<<>>( + dense_A.data_ptr(), + dense_B.data_ptr(), + indices.data_ptr(), + sparse_C.data_ptr(), + batch_size, + A_num_block, + B_num_block, + dim, + num_block + ); + + return sparse_C; +} + +at::Tensor sparse_dense_mm_kernel( + at::Tensor sparse_A, // [batch_size, num_block, 32, 32] + at::Tensor indices, // [batch_size, num_block] + at::Tensor dense_B, // [batch_size, B_num_block, dim, 32] + int A_num_block +) { + int batch_size = sparse_A.size(0); + int num_block = sparse_A.size(1); + int B_num_block = dense_B.size(1); + int dim = dense_B.size(2); + + at::Tensor dense_C = at::zeros({batch_size, A_num_block, dim, 32}, dense_B.options()); + + dim3 threads(128, 2); + dim3 blocks(num_block / 2, batch_size); + + sparse_dense_mm_cuda_kernel<<>>( + sparse_A.data_ptr(), + indices.data_ptr(), + dense_B.data_ptr(), + dense_C.data_ptr(), + batch_size, + A_num_block, + B_num_block, + dim, + num_block + ); + + return dense_C; +} + +at::Tensor reduce_sum_kernel( + at::Tensor sparse_A, // [batch_size, num_block, 32, 32] + at::Tensor indices, // [batch_size, num_block] + int A_num_block, + int B_num_block +) { + int batch_size = sparse_A.size(0); + int num_block = sparse_A.size(1); + + at::Tensor dense_C = at::zeros({batch_size, A_num_block, 32}, sparse_A.options()); + + dim3 threads(32, 4); + dim3 blocks(num_block / 4, batch_size); + + reduce_sum_cuda_kernel<<>>( + sparse_A.data_ptr(), + indices.data_ptr(), + dense_C.data_ptr(), + batch_size, + A_num_block, + B_num_block, + num_block + ); + + return dense_C; +} + +at::Tensor scatter_kernel( + at::Tensor dense_A, // [batch_size, A_num_block, 32] + at::Tensor indices, // [batch_size, num_block] + int B_num_block +) { + int batch_size = dense_A.size(0); + int A_num_block = dense_A.size(1); + int num_block = indices.size(1); + + at::Tensor sparse_C = at::zeros({batch_size, num_block, 32, 32}, dense_A.options()); + + dim3 threads(32, 4); + dim3 blocks(num_block / 4, batch_size); + + scatter_cuda_kernel<<>>( + dense_A.data_ptr(), + indices.data_ptr(), + sparse_C.data_ptr(), + batch_size, + A_num_block, + B_num_block, + num_block + ); + + return sparse_C; +} diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.h b/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.h new file mode 100644 index 0000000000000000000000000000000000000000..0200140ee337b8c5d9583767bbad1e842e9d4677 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/mra/cuda_launch.h @@ -0,0 +1,39 @@ +#include +#include +#include + +#define min(a, b) ((a)<(b)?(a):(b)) +#define max(a, b) ((a)>(b)?(a):(b)) + +std::vector index_max_kernel( + at::Tensor index_vals, + at::Tensor indices, + int A_num_block, + int B_num_block +); + +at::Tensor mm_to_sparse_kernel( + at::Tensor dense_A, + at::Tensor dense_B, + at::Tensor indices +); + +at::Tensor sparse_dense_mm_kernel( + at::Tensor sparse_A, + at::Tensor indices, + at::Tensor dense_B, + int A_num_block +); + +at::Tensor reduce_sum_kernel( + at::Tensor sparse_A, + at::Tensor indices, + int A_num_block, + int B_num_block +); + +at::Tensor scatter_kernel( + at::Tensor dense_A, + at::Tensor indices, + int B_num_block +); diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/mra/torch_extension.cpp b/vllm/lib/python3.10/site-packages/transformers/kernels/mra/torch_extension.cpp new file mode 100644 index 0000000000000000000000000000000000000000..60c9262b779270a6e95ae54f53a67daa6d740a9e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/mra/torch_extension.cpp @@ -0,0 +1,78 @@ +#include +#include +#include "cuda_launch.h" +#include + +std::vector index_max( + at::Tensor index_vals, + at::Tensor indices, + int A_num_block, + int B_num_block +) { + return index_max_kernel( + index_vals, + indices, + A_num_block, + B_num_block + ); +} + +at::Tensor mm_to_sparse( + at::Tensor dense_A, + at::Tensor dense_B, + at::Tensor indices +) { + return mm_to_sparse_kernel( + dense_A, + dense_B, + indices + ); +} + +at::Tensor sparse_dense_mm( + at::Tensor sparse_A, + at::Tensor indices, + at::Tensor dense_B, + int A_num_block +) { + return sparse_dense_mm_kernel( + sparse_A, + indices, + dense_B, + A_num_block + ); +} + +at::Tensor reduce_sum( + at::Tensor sparse_A, + at::Tensor indices, + int A_num_block, + int B_num_block +) { + return reduce_sum_kernel( + sparse_A, + indices, + A_num_block, + B_num_block + ); +} + +at::Tensor scatter( + at::Tensor dense_A, + at::Tensor indices, + int B_num_block +) { + return scatter_kernel( + dense_A, + indices, + B_num_block + ); +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("index_max", &index_max, "index_max (CUDA)"); + m.def("mm_to_sparse", &mm_to_sparse, "mm_to_sparse (CUDA)"); + m.def("sparse_dense_mm", &sparse_dense_mm, "sparse_dense_mm (CUDA)"); + m.def("reduce_sum", &reduce_sum, "reduce_sum (CUDA)"); + m.def("scatter", &scatter, "scatter (CUDA)"); +} diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/common.h b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/common.h new file mode 100644 index 0000000000000000000000000000000000000000..e5085c88dd3ea9a12eec264a8c48946bf2b80b23 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/common.h @@ -0,0 +1,10 @@ + +#define min(a, b) ((a)<(b)?(a):(b)) +#define max(a, b) ((a)>(b)?(a):(b)) +#define ceil_divide(a, b) ((a)/(b)+((a)%(b)!=0)) +#define select(cond, a, b) ((cond)?(a):(b)) +#define PI 3.141592 +#define EPSILON 1e-8 +#define MAX_VAL 1e12 +#define MIN_VAL -1e12 +#define EMPTY_VALUE -1 diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..97030870649a2fdac58cb26cf966e8f5c8cc7909 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda.h @@ -0,0 +1,9 @@ + +#define MAX_THREADS_PER_BLOCK 1024 +#define OPTIMAL_THREADS_PER_BLOCK 256 +#define WARP_SIZE 32 +#define MAX_NUM_BLOCK_X 2147483647 +#define MAX_NUM_BLOCK_Y 65535 +#define MAX_NUM_BLOCK_Z 65535 +#define MAX_SHARED_MEM_PER_BLOCK 48000 +#define FULL_MASK 0xffffffff diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h new file mode 100644 index 0000000000000000000000000000000000000000..6674f93afdc25ab35c5d83881d00028bcf2989fc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/common_cuda_device.h @@ -0,0 +1,79 @@ + +#include "common.h" + +template +__device__ int set_insert(T *set, int set_size, T value) { + int slot = value % set_size; + int start_slot = slot; + while (true) { + T prev = atomicCAS(&set[slot], EMPTY_VALUE, value); + if (prev == EMPTY_VALUE || prev == value) { + return slot; + } + slot = (slot + 1) % set_size; + if (slot == start_slot) { + return -1; + } + } + return -1; +} + +template +__device__ int set_lookup(T *set, int set_size, T value) { + int slot = value % set_size; + int start_slot = slot; + while (true) { + if (set[slot] == value) { + return slot; + } + slot = (slot + 1) % set_size; + if (slot == start_slot) { + return -1; + } + } + return -1; +} + +template +__device__ void init_buffer(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) { + __syncthreads(); + for (int i = 0; i < buffer_size; i = i + num_threads) { + int offset_idx = i + thread_id; + if (offset_idx < buffer_size) { + buffer[offset_idx] = init_value; + } + } + __syncthreads(); +} + +template +__device__ void copy_data(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) { + __syncthreads(); + for (int i = 0; i < data_length; i = i + num_threads) { + int offset_idx = i + thread_id; + if (offset_idx < data_length) { + dist_pt[offset_idx] = src_pt[offset_idx]; + } + } + __syncthreads(); +} + +template +__device__ void init_buffer_nonblocking(T init_value, T *buffer, int buffer_size, int num_threads, int thread_id) { + for (int i = 0; i < buffer_size; i = i + num_threads) { + int offset_idx = i + thread_id; + if (offset_idx < buffer_size) { + buffer[offset_idx] = init_value; + } + } +} + +template +__device__ void copy_data_nonblocking(T *src_pt, T *dist_pt, int data_length, int num_threads, int thread_id) { + for (int i = 0; i < data_length; i = i + num_threads) { + int offset_idx = i + thread_id; + if (offset_idx < data_length) { + dist_pt[offset_idx] = src_pt[offset_idx]; + } + } +} diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu new file mode 100644 index 0000000000000000000000000000000000000000..c6b13e6cb5f53c9c62e51d2c399a14d14dab7037 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.cu @@ -0,0 +1,588 @@ +// File from https://github.com/mlpen/YOSO/blob/main/encoders/backbones/efficient_attentions/yoso/yoso_v1/cuda/fast_lsh_cumulation.cu + +#include +#include +#include "fast_lsh_cumulation.h" +#include "fast_lsh_cumulation_cuda.h" +#include "common_cuda.h" +#include "common.h" +#include +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +std::vector fast_hash_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_vector, + at::Tensor key_mask, + at::Tensor key_vector, + int num_hash_f, + int hash_code_len, + bool use_cuda +) { + + int batch_size = query_vector.size(0); + int num_query = query_vector.size(1); + int num_key = key_vector.size(1); + int vector_dim = query_vector.size(2); + + int num_hash_per_part = vector_dim / hash_code_len; + int num_part = max(1, ceil_divide(num_hash_f, num_hash_per_part)); + + at::Tensor Dmat = 2 * at::randint(0, 2, {batch_size, 3, num_part, vector_dim}, query_mask.options()) - 1; + at::Tensor query_hash_code = at::zeros({batch_size, num_query, num_hash_f}, query_mask.options()); + at::Tensor key_hash_code = at::zeros({batch_size, num_key, num_hash_f}, key_mask.options()); + + int *query_mask_ptr = query_mask.data_ptr(); + float *query_vector_ptr = query_vector.data_ptr(); + int *key_mask_ptr = key_mask.data_ptr(); + float *key_vector_ptr = key_vector.data_ptr(); + + int *Dmat_ptr = Dmat.data_ptr(); + + int *query_hash_code_ptr = query_hash_code.data_ptr(); + int *key_hash_code_ptr = key_hash_code.data_ptr(); + + if (use_cuda) { + { + dim3 threads(vector_dim); + dim3 blocks(num_part, num_query, batch_size); + int shared_mem = vector_dim * sizeof(float); + fast_hash_ver1_cuda_kernel<<>>( + query_mask_ptr, + query_vector_ptr, + Dmat_ptr, + query_hash_code_ptr, + batch_size, + num_query, + vector_dim, + num_part, + num_hash_f, + hash_code_len + ); + } + { + dim3 threads(vector_dim); + dim3 blocks(num_part, num_key, batch_size); + int shared_mem = vector_dim * sizeof(float); + fast_hash_ver1_cuda_kernel<<>>( + key_mask_ptr, + key_vector_ptr, + Dmat_ptr, + key_hash_code_ptr, + batch_size, + num_key, + vector_dim, + num_part, + num_hash_f, + hash_code_len + ); + } + } + + return {query_hash_code, key_hash_code}; + +} + +at::Tensor lsh_cumulation_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +) { + + int batch_size = query_hash_code.size(0); + int num_hash_f = query_hash_code.size(2); + + int num_query = query_hash_code.size(1); + int num_key = key_hash_code.size(1); + int value_dim = value.size(2); + + at::Tensor hashtable_value = at::empty({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options()); + at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); + + if (use_cuda) { + int threads_x = WARP_SIZE; + int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE; + int block_x_step1 = num_key / threads_y; + int block_x_step2 = num_query / threads_y; + int block_y = batch_size; + + dim3 threads(threads_x, threads_y); + dim3 blocks_step1(block_x_step1, block_y); + dim3 blocks_step2(block_x_step2, block_y); + + int *query_mask_ptr = query_mask.data_ptr(); + int *query_hash_code_ptr = query_hash_code.data_ptr(); + int *key_mask_ptr = key_mask.data_ptr(); + int *key_hash_code_ptr = key_hash_code.data_ptr(); + float *value_ptr = value.data_ptr(); + float *hashtable_value_ptr = hashtable_value.data_ptr(); + float *cumulation_value_ptr = cumulation_value.data_ptr(); + + for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { + + cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float)); + + lsh_cumulation_ver1_step1_cuda_kernel<<>>( + key_mask_ptr, + key_hash_code_ptr, + value_ptr, + hashtable_value_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_key, + value_dim, + value_offset + ); + + lsh_cumulation_ver1_step2_cuda_kernel<<>>( + query_mask_ptr, + query_hash_code_ptr, + hashtable_value_ptr, + cumulation_value_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_query, + value_dim, + value_offset + ); + } + + } + + return cumulation_value; + +} + +at::Tensor lsh_weighted_cumulation_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +) { + + int batch_size = query_hash_code.size(0); + int num_hash_f = query_hash_code.size(2); + + int num_query = query_hash_code.size(1); + int num_key = key_hash_code.size(1); + int value_dim = value.size(2); + int weight_dim = query_weight.size(2); + + at::Tensor hashtable_value = at::zeros({batch_size, num_hash_f, hashtable_capacity, WARP_SIZE}, value.options()); + at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); + + if (use_cuda) { + int threads_x = WARP_SIZE; + int threads_y = OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE; + int block_x_step1 = num_key / threads_y; + int block_x_step2 = num_query / threads_y; + int block_y = batch_size; + + dim3 threads(threads_x, threads_y); + dim3 blocks_step1(block_x_step1, block_y); + dim3 blocks_step2(block_x_step2, block_y); + + int *query_mask_ptr = query_mask.data_ptr(); + int *query_hash_code_ptr = query_hash_code.data_ptr(); + float *query_weight_ptr = query_weight.data_ptr(); + int *key_mask_ptr = key_mask.data_ptr(); + int *key_hash_code_ptr = key_hash_code.data_ptr(); + float *key_weight_ptr = key_weight.data_ptr(); + float *value_ptr = value.data_ptr(); + float *hashtable_value_ptr = hashtable_value.data_ptr(); + float *cumulation_value_ptr = cumulation_value.data_ptr(); + + for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { + for (int weight_idx = 0; weight_idx < weight_dim; weight_idx++) { + + cudaMemset(hashtable_value_ptr, 0, (batch_size * num_hash_f * hashtable_capacity * WARP_SIZE) * sizeof(float)); + + lsh_weighted_cumulation_ver1_step1_cuda_kernel<<>>( + key_mask_ptr, + key_hash_code_ptr, + key_weight_ptr, + value_ptr, + hashtable_value_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_key, + value_dim, + weight_dim, + value_offset, + weight_idx + ); + + lsh_weighted_cumulation_ver1_step2_cuda_kernel<<>>( + query_mask_ptr, + query_hash_code_ptr, + query_weight_ptr, + hashtable_value_ptr, + cumulation_value_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_query, + value_dim, + weight_dim, + value_offset, + weight_idx + ); + } + } + + } + + return cumulation_value; + +} + +at::Tensor lsh_weighted_cumulation_ver2_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +) { + + int batch_size = query_hash_code.size(0); + int num_hash_f = query_hash_code.size(2); + + int num_query = query_hash_code.size(1); + int num_key = key_hash_code.size(1); + int value_dim = value.size(2); + int weight_dim = query_weight.size(2); + + at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); + at::Tensor key_sorted_idxes = at::zeros({batch_size, num_hash_f, num_key}, query_hash_code.options()); + at::Tensor query_info = at::zeros({batch_size, num_query, 2, num_hash_f}, query_hash_code.options()); + at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); + + if (use_cuda) { + + int *query_mask_ptr = query_mask.data_ptr(); + int *query_hash_code_ptr = query_hash_code.data_ptr(); + float *query_weight_ptr = query_weight.data_ptr(); + int *key_mask_ptr = key_mask.data_ptr(); + int *key_hash_code_ptr = key_hash_code.data_ptr(); + float *key_weight_ptr = key_weight.data_ptr(); + float *value_ptr = value.data_ptr(); + + int *count_sort_table_ptr = count_sort_table.data_ptr(); + int *key_sorted_idxes_ptr = key_sorted_idxes.data_ptr(); + int *query_info_ptr = query_info.data_ptr(); + + float *cumulation_value_ptr = cumulation_value.data_ptr(); + + { + dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); + dim3 blocks_step13(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); + dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); + dim3 blocks_step2(num_hash_f, batch_size); + int shared_mem = hashtable_capacity * sizeof(float); + count_sort_step1_cuda_kernel<<>>( + key_mask_ptr, + key_hash_code_ptr, + count_sort_table_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_key + ); + count_sort_step2_cuda_kernel<<>>( + count_sort_table_ptr, + batch_size, + num_hash_f, + hashtable_capacity + ); + count_sort_step3_cuda_kernel<<>>( + key_mask_ptr, + key_hash_code_ptr, + count_sort_table_ptr, + key_sorted_idxes_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_key + ); + } + { + dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); + dim3 blocks(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); + extract_query_info_cuda_kernel<<>>( + query_mask_ptr, + query_hash_code_ptr, + count_sort_table_ptr, + query_info_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_query + ); + } + { + dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); + dim3 blocks(num_query, num_hash_f, batch_size); + int shared_mem = (weight_dim + WARP_SIZE) * sizeof(float); + lsh_weighted_cumulation_ver2_step2_cuda_kernel<<>>( + query_mask_ptr, + query_info_ptr, + key_sorted_idxes_ptr, + query_weight_ptr, + key_weight_ptr, + value_ptr, + cumulation_value_ptr, + batch_size, + num_hash_f, + num_query, + num_key, + value_dim, + weight_dim + ); + } + } + + return cumulation_value; + +} + +at::Tensor lsh_weighted_cumulation_ver3_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +) { + + int batch_size = query_hash_code.size(0); + int num_hash_f = query_hash_code.size(2); + + int num_query = query_hash_code.size(1); + int num_key = key_hash_code.size(1); + int value_dim = value.size(2); + int weight_dim = query_weight.size(2); + + at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); + at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options()); + at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options()); + at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); + + if (use_cuda) { + + int *query_mask_ptr = query_mask.data_ptr(); + int *query_hash_code_ptr = query_hash_code.data_ptr(); + float *query_weight_ptr = query_weight.data_ptr(); + int *key_mask_ptr = key_mask.data_ptr(); + int *key_hash_code_ptr = key_hash_code.data_ptr(); + float *key_weight_ptr = key_weight.data_ptr(); + float *value_ptr = value.data_ptr(); + + int *count_sort_table_ptr = count_sort_table.data_ptr(); + int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr(); + int *key_info_ptr = key_info.data_ptr(); + + float *cumulation_value_ptr = cumulation_value.data_ptr(); + + { + dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); + dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); + dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); + dim3 blocks_step2(num_hash_f, batch_size); + int shared_mem = hashtable_capacity * sizeof(float); + count_sort_step1_cuda_kernel<<>>( + query_mask_ptr, + query_hash_code_ptr, + count_sort_table_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_query + ); + count_sort_step2_cuda_kernel<<>>( + count_sort_table_ptr, + batch_size, + num_hash_f, + hashtable_capacity + ); + count_sort_step3_cuda_kernel<<>>( + query_mask_ptr, + query_hash_code_ptr, + count_sort_table_ptr, + query_sorted_idxes_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_query + ); + } + { + dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); + dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); + extract_query_info_cuda_kernel<<>>( + key_mask_ptr, + key_hash_code_ptr, + count_sort_table_ptr, + key_info_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_key + ); + } + { + dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); + dim3 blocks(num_key, num_hash_f, batch_size); + int shared_mem = (weight_dim + value_dim + WARP_SIZE) * sizeof(float); + lsh_weighted_cumulation_ver3_step2_cuda_kernel<<>>( + query_sorted_idxes_ptr, + key_mask_ptr, + key_info_ptr, + query_weight_ptr, + key_weight_ptr, + value_ptr, + cumulation_value_ptr, + batch_size, + num_hash_f, + num_query, + num_key, + value_dim, + weight_dim + ); + } + } + + return cumulation_value; + +} + +at::Tensor lsh_weighted_cumulation_ver4_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +) { + + int batch_size = query_hash_code.size(0); + int num_hash_f = query_hash_code.size(2); + + int num_query = query_hash_code.size(1); + int num_key = key_hash_code.size(1); + int value_dim = value.size(2); + int weight_dim = query_weight.size(2); + + at::Tensor count_sort_table = at::zeros({batch_size, num_hash_f, hashtable_capacity}, query_hash_code.options()); + at::Tensor query_sorted_idxes = at::zeros({batch_size, num_hash_f, num_query}, query_hash_code.options()); + at::Tensor key_info = at::zeros({batch_size, num_key, 2, num_hash_f}, query_hash_code.options()); + at::Tensor cumulation_value = at::zeros({batch_size, num_query, value_dim}, value.options()); + + if (use_cuda) { + + int *query_mask_ptr = query_mask.data_ptr(); + int *query_hash_code_ptr = query_hash_code.data_ptr(); + float *query_weight_ptr = query_weight.data_ptr(); + int *key_mask_ptr = key_mask.data_ptr(); + int *key_hash_code_ptr = key_hash_code.data_ptr(); + float *key_weight_ptr = key_weight.data_ptr(); + float *value_ptr = value.data_ptr(); + + int *count_sort_table_ptr = count_sort_table.data_ptr(); + int *query_sorted_idxes_ptr = query_sorted_idxes.data_ptr(); + int *key_info_ptr = key_info.data_ptr(); + + float *cumulation_value_ptr = cumulation_value.data_ptr(); + + { + dim3 threads_step13(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); + dim3 blocks_step13(num_query / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); + dim3 threads_step2(min(hashtable_capacity, OPTIMAL_THREADS_PER_BLOCK)); + dim3 blocks_step2(num_hash_f, batch_size); + int shared_mem = hashtable_capacity * sizeof(float); + count_sort_step1_cuda_kernel<<>>( + query_mask_ptr, + query_hash_code_ptr, + count_sort_table_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_query + ); + count_sort_step2_cuda_kernel<<>>( + count_sort_table_ptr, + batch_size, + num_hash_f, + hashtable_capacity + ); + count_sort_step3_cuda_kernel<<>>( + query_mask_ptr, + query_hash_code_ptr, + count_sort_table_ptr, + query_sorted_idxes_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_query + ); + } + { + dim3 threads(num_hash_f, max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f)); + dim3 blocks(num_key / max(1, OPTIMAL_THREADS_PER_BLOCK / num_hash_f), batch_size); + extract_query_info_cuda_kernel<<>>( + key_mask_ptr, + key_hash_code_ptr, + count_sort_table_ptr, + key_info_ptr, + batch_size, + num_hash_f, + hashtable_capacity, + num_key + ); + } + { + dim3 threads(WARP_SIZE, OPTIMAL_THREADS_PER_BLOCK / WARP_SIZE); + dim3 blocks(num_key, batch_size); + int shared_mem = (weight_dim + value_dim + 2 * num_hash_f) * sizeof(float); + lsh_weighted_cumulation_ver4_step2_cuda_kernel<<>>( + query_sorted_idxes_ptr, + key_mask_ptr, + key_info_ptr, + query_weight_ptr, + key_weight_ptr, + value_ptr, + cumulation_value_ptr, + batch_size, + num_hash_f, + num_query, + num_key, + value_dim, + weight_dim + ); + } + } + + return cumulation_value; + +} diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h new file mode 100644 index 0000000000000000000000000000000000000000..dd48de0ed159f49ee3afe93b12aaae719fe87688 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation.h @@ -0,0 +1,71 @@ +#include +#include +#include + +std::vector fast_hash_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_vector, + at::Tensor key_mask, + at::Tensor key_vector, + int num_hash_f, + int hash_code_len, + bool use_cuda +); + +at::Tensor lsh_cumulation_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver1_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver2_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver3_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); + +at::Tensor lsh_weighted_cumulation_ver4_kernel( + at::Tensor query_mask, + at::Tensor query_hash_code, + at::Tensor query_weight, + at::Tensor key_mask, + at::Tensor key_hash_code, + at::Tensor key_weight, + at::Tensor value, + int hashtable_capacity, + bool use_cuda +); diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu new file mode 100644 index 0000000000000000000000000000000000000000..ebc6260dd6db3ecaf8cb7b35c3c1a6e1ab3851dc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.cu @@ -0,0 +1,825 @@ +// File from https://github.com/mlpen/YOSO/blob/main/encoders/backbones/efficient_attentions/yoso/yoso_v1/cuda/fast_lsh_cumulation_cuda.cu + +#include "fast_lsh_cumulation_cuda.h" +#include "common_cuda_device.h" +#include "common_cuda.h" +#include "common.h" +#include +////////////////////////////////////////////////////////////////////////////////////////////////// +////////////////////////////////////////////////////////////////////////////////////////////////// + +inline __device__ void fast_hadamard_transform(float *vector_buffer, int vector_dim, int dim_idx) { + int stride = vector_dim / 2; + while (stride > (WARP_SIZE / 2)) { + __syncthreads(); + int sign = 1 - ((dim_idx / stride) % 2) * 2; + float val1 = vector_buffer[dim_idx]; + float val2 = vector_buffer[dim_idx + sign * stride]; + __syncthreads(); + vector_buffer[dim_idx] = float(sign) * val1 + val2; + stride = stride / 2; + } + + float val = vector_buffer[dim_idx]; + #pragma unroll + for (stride = (WARP_SIZE / 2); stride > 0; stride = stride / 2) { + int sign = 1 - ((dim_idx / stride) % 2) * 2; + val = float(sign) * val + __shfl_xor_sync(FULL_MASK, val, stride); + } + vector_buffer[dim_idx] = val; +} + +__global__ void fast_hash_ver1_cuda_kernel( + int *mask, // [batch_size, num_vector] + float *vector, // [batch_size, num_vector, vector_dim] + int *Dmat, // [batch_size, 3, num_part, vector_dim] + int *hash_code, // [batch_size, num_vector, num_hash_f] + int batch_size, + int num_vector, + int vector_dim, + int num_part, + int num_hash_f, + int hash_code_len +) { + + int batch_idx = blockIdx.z; + int vector_idx = blockIdx.y; + int part_idx = blockIdx.x; + + int dim_idx = threadIdx.x; + + int batch_idx__vector_idx = batch_idx * num_vector + vector_idx; + if (mask[batch_idx__vector_idx] == 0) { + return; + } + + extern __shared__ float buffer[]; + float *vector_buffer = buffer; + + vector_buffer[dim_idx] = vector[batch_idx__vector_idx * vector_dim + dim_idx]; + + vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 0) * num_part + part_idx) * vector_dim + dim_idx]; + fast_hadamard_transform(vector_buffer, vector_dim, dim_idx); + vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 1) * num_part + part_idx) * vector_dim + dim_idx]; + fast_hadamard_transform(vector_buffer, vector_dim, dim_idx); + vector_buffer[dim_idx] = vector_buffer[dim_idx] * (float)Dmat[((batch_idx * 3 + 2) * num_part + part_idx) * vector_dim + dim_idx]; + fast_hadamard_transform(vector_buffer, vector_dim, dim_idx); + + int num_hash_per_part = vector_dim / hash_code_len; + if (hash_code_len == 8 || hash_code_len == 16) { + int code = select(vector_buffer[dim_idx] > 0, 1 << (dim_idx % hash_code_len), 0); + for (int offset = 1; offset < hash_code_len; offset = offset * 2) { + code += __shfl_xor_sync(FULL_MASK, code, offset); + } + if (dim_idx % hash_code_len == 0) { + int hash_f_idx = part_idx * num_hash_per_part + dim_idx / hash_code_len; + if (hash_f_idx < num_hash_f) { + hash_code[batch_idx__vector_idx * num_hash_f + hash_f_idx] = code; + } + } + } else { + vector_buffer[dim_idx] = select(vector_buffer[dim_idx] > 0, 1 << (dim_idx % hash_code_len), 0); + __syncthreads(); + if (dim_idx < num_hash_per_part) { + int code = 0; + for (int i = 0; i < hash_code_len; i++) { + code += vector_buffer[dim_idx * hash_code_len + i]; + } + int hash_f_idx = part_idx * num_hash_per_part + dim_idx; + if (hash_f_idx < num_hash_f) { + hash_code[batch_idx__vector_idx * num_hash_f + hash_f_idx] = code; + } + } + } +} + +__global__ void lsh_cumulation_ver1_step1_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + float *value, // [batch_size, num_key, value_dim] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key, + int value_dim, + int offset_warp +) { + + int warp_thread_idx = threadIdx.x; + + int batch_idx = blockIdx.y; + int key_idx = blockIdx.x * blockDim.y + threadIdx.y; + + int batch_idx__key_idx = batch_idx * num_key + key_idx; + if (key_mask[batch_idx__key_idx] == 0) { + return; + } + + if (num_hash_f > WARP_SIZE) { + float warp_value = value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx]; + for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) { + int warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_start + warp_thread_idx]; + #pragma unroll + for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) { + int current_hashcode = warp_hashcode; + current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset); + int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode; + atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value); + } + } + } else { + float warp_value = value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx]; + int warp_hashcode = 0; + if (warp_thread_idx < num_hash_f) { + warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + warp_thread_idx]; + } + for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) { + int current_hashcode = warp_hashcode; + current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx); + int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode; + atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value); + } + } + +} + +__global__ void lsh_cumulation_ver1_step2_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_hash_code, // [batch_size, num_query, num_hash_f] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_query, + int value_dim, + int offset_warp +) { + + int warp_thread_idx = threadIdx.x; + + int batch_idx = blockIdx.y; + int query_idx = blockIdx.x * blockDim.y + threadIdx.y; + + int batch_idx__query_idx = batch_idx * num_query + query_idx; + if (query_mask[batch_idx__query_idx] == 0) { + return; + } + + if (num_hash_f > WARP_SIZE) { + float warp_value = 0; + for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) { + int warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_start + warp_thread_idx]; + #pragma unroll + for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) { + int current_hashcode = warp_hashcode; + current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset); + int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode; + warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx]; + } + } + cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] = warp_value / float(num_hash_f); + } else { + float warp_value = 0; + int warp_hashcode = 0; + if (warp_thread_idx < num_hash_f) { + warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + warp_thread_idx]; + } + for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) { + int current_hashcode = warp_hashcode; + current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx); + int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode; + warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx]; + } + cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] = warp_value / float(num_hash_f); + } + +} + +__global__ void lsh_weighted_cumulation_ver1_step1_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key, + int value_dim, + int weight_dim, + int offset_warp, + int weight_idx +) { + + int warp_thread_idx = threadIdx.x; + + int batch_idx = blockIdx.y; + int key_idx = blockIdx.x * blockDim.y + threadIdx.y; + + int batch_idx__key_idx = batch_idx * num_key + key_idx; + if (key_mask[batch_idx__key_idx] == 0) { + return; + } + + if (num_hash_f > WARP_SIZE) { + float warp_value = key_weight[batch_idx__key_idx * weight_dim + weight_idx] * value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx]; + for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) { + int warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_start + warp_thread_idx]; + #pragma unroll + for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) { + int current_hashcode = warp_hashcode; + current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset); + int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode; + atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value); + } + } + } else { + float warp_value = key_weight[batch_idx__key_idx * weight_dim + weight_idx] * value[batch_idx__key_idx * value_dim + offset_warp + warp_thread_idx]; + int warp_hashcode = 0; + if (warp_thread_idx < num_hash_f) { + warp_hashcode = key_hash_code[batch_idx__key_idx * num_hash_f + warp_thread_idx]; + } + for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) { + int current_hashcode = warp_hashcode; + current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx); + int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode; + atomicAdd(&hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx], warp_value); + } + } + +} + +__global__ void lsh_weighted_cumulation_ver1_step2_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_hash_code, // [batch_size, num_query, num_hash_f] + float *query_weight, // [batch_size, num_query, weight_dim] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_query, + int value_dim, + int weight_dim, + int offset_warp, + int weight_idx +) { + + int warp_thread_idx = threadIdx.x; + + int batch_idx = blockIdx.y; + int query_idx = blockIdx.x * blockDim.y + threadIdx.y; + + int batch_idx__query_idx = batch_idx * num_query + query_idx; + if (query_mask[batch_idx__query_idx] == 0) { + return; + } + + if (num_hash_f > WARP_SIZE) { + float warp_value = 0; + for (int hash_f_start = 0; hash_f_start < num_hash_f; hash_f_start = hash_f_start + WARP_SIZE) { + int warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_start + warp_thread_idx]; + #pragma unroll + for (int hash_f_offset = 0; hash_f_offset < WARP_SIZE; hash_f_offset++) { + int current_hashcode = warp_hashcode; + current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_offset); + int hashtable_idx = (batch_idx * num_hash_f + (hash_f_start + hash_f_offset)) * hashtable_capacity + current_hashcode; + warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx]; + } + } + float warp_weight = query_weight[batch_idx__query_idx * weight_dim + weight_idx]; + cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] += warp_weight * warp_value / float(num_hash_f); + } else { + float warp_value = 0; + int warp_hashcode = 0; + if (warp_thread_idx < num_hash_f) { + warp_hashcode = query_hash_code[batch_idx__query_idx * num_hash_f + warp_thread_idx]; + } + for (int hash_f_idx = 0; hash_f_idx < num_hash_f; hash_f_idx++) { + int current_hashcode = warp_hashcode; + current_hashcode = __shfl_sync(FULL_MASK, current_hashcode, hash_f_idx); + int hashtable_idx = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + current_hashcode; + warp_value = warp_value + hashtable_value[hashtable_idx * WARP_SIZE + warp_thread_idx]; + } + float warp_weight = query_weight[batch_idx__query_idx * weight_dim + weight_idx]; + cumulation_value[batch_idx__query_idx * value_dim + offset_warp + warp_thread_idx] += warp_weight * warp_value / float(num_hash_f); + } + +} + +__global__ void count_sort_step1_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key +) { + + int batch_idx = blockIdx.y; + int key_idx = blockIdx.x * blockDim.y + threadIdx.y; + int hash_f_idx = threadIdx.x; + + int batch_idx__key_idx = batch_idx * num_key + key_idx; + if (key_mask[batch_idx__key_idx] == 0) { + return; + } + + int hash_code = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_idx]; + atomicAdd(&count_sort_table[(batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + hash_code], 1); + +} + +__global__ void count_sort_step2_cuda_kernel( + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int batch_size, + int num_hash_f, + int hashtable_capacity +) { + + int batch_idx = blockIdx.y; + int hash_f_idx = blockIdx.x; + + int num_threads = blockDim.x; + int thread_id = threadIdx.x; + + int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx; + + extern __shared__ float buffer[]; + int *table_buffer = (int*)buffer; + + if (thread_id == 0) { + table_buffer[0] = 0; + } + copy_data(&count_sort_table[batch_idx__hash_f_idx * hashtable_capacity], &table_buffer[1], hashtable_capacity - 1, num_threads, thread_id); + + for (int table_idx_start = 0; table_idx_start < hashtable_capacity; table_idx_start = table_idx_start + num_threads) { + int thread_value = table_buffer[table_idx_start + thread_id]; + int next_thread_value = 0; + for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { + next_thread_value = __shfl_up_sync(FULL_MASK, thread_value, offset); + if (thread_id % WARP_SIZE >= offset) { + thread_value = thread_value + next_thread_value; + } + } + table_buffer[table_idx_start + thread_id] = thread_value; + } + __syncthreads(); + + if (hashtable_capacity > WARP_SIZE) { + if (thread_id < WARP_SIZE) { + for (int table_idx_start = WARP_SIZE; table_idx_start < hashtable_capacity; table_idx_start = table_idx_start + WARP_SIZE) { + table_buffer[table_idx_start + thread_id] += table_buffer[table_idx_start - 1]; + } + } + } + + copy_data(table_buffer, &count_sort_table[batch_idx__hash_f_idx * hashtable_capacity], hashtable_capacity, num_threads, thread_id); + +} + + +__global__ void count_sort_step3_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int *key_sorted_idxes, // [batch_size, num_hash_f, num_key] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key +) { + + int batch_idx = blockIdx.y; + int key_idx = blockIdx.x * blockDim.y + threadIdx.y; + int hash_f_idx = threadIdx.x; + + int batch_idx__key_idx = batch_idx * num_key + key_idx; + if (key_mask[batch_idx__key_idx] == 0) { + return; + } + + int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx; + + int hash_code = key_hash_code[batch_idx__key_idx * num_hash_f + hash_f_idx]; + int sort_idx = atomicAdd(&count_sort_table[batch_idx__hash_f_idx * hashtable_capacity + hash_code], 1); + key_sorted_idxes[batch_idx__hash_f_idx * num_key + sort_idx] = key_idx; + +} + +__global__ void extract_query_info_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_hash_code, // [batch_size, num_query, num_hash_f] + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int *query_info, // [batch_size, num_query, 2, num_hash_f] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_query +) { + + int batch_idx = blockIdx.y; + int query_idx = blockIdx.x * blockDim.y + threadIdx.y; + int hash_f_idx = threadIdx.x; + + int batch_idx__query_idx = batch_idx * num_query + query_idx; + if (query_mask[batch_idx__query_idx] == 0) { + return; + } + + int hash_code = query_hash_code[batch_idx__query_idx * num_hash_f + hash_f_idx]; + int batch_idx__hash_f_idx__hash_code = (batch_idx * num_hash_f + hash_f_idx) * hashtable_capacity + hash_code; + + int key_offset = select(hash_code == 0, 0, count_sort_table[batch_idx__hash_f_idx__hash_code - 1]); + int key_count = count_sort_table[batch_idx__hash_f_idx__hash_code] - key_offset; + + query_info[batch_idx__query_idx * 2 * num_hash_f + hash_f_idx] = key_offset; + query_info[(batch_idx__query_idx * 2 + 1) * num_hash_f + hash_f_idx] = key_count; + +} + +__global__ void lsh_weighted_cumulation_ver2_step2_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_info, // [batch_size, num_query, 2, num_hash_f] + int *key_sorted_idxes, // [batch_size, num_hash_f, num_key] + float *query_weight, // [batch_size, num_query, weight_dim] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int num_query, + int num_key, + int value_dim, + int weight_dim +) { + + int batch_idx = blockIdx.z; + int hash_f_idx = blockIdx.y; + int query_idx = blockIdx.x; + + int num_threads = blockDim.y * blockDim.x; + int thread_id = threadIdx.y * blockDim.x + threadIdx.x; + + int num_warps = blockDim.y; + int warp_idx = threadIdx.y; + int warp_thread_idx = threadIdx.x; + + int batch_idx__query_idx = batch_idx * num_query + query_idx; + if (query_mask[batch_idx__query_idx] == 0) { + return; + } + + int key_offset = query_info[batch_idx__query_idx * 2 * num_hash_f + hash_f_idx]; + int key_count = query_info[(batch_idx__query_idx * 2 + 1) * num_hash_f + hash_f_idx]; + + if (key_count == 0) { + return; + } + + extern __shared__ float buffer[]; + + if (key_count == 1) { + if (warp_idx == 0) { + int key_idx = key_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_key + key_offset]; + int batch_idx__key_idx = batch_idx * num_key + key_idx; + float weight = 0; + for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) { + int weight_dim_idx = weight_offset + warp_thread_idx; + float val = query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx] * key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx]; + #pragma unroll + for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { + val += __shfl_xor_sync(FULL_MASK, val, offset); + } + weight = weight + val; + } + weight = weight / float(num_hash_f); + for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { + int value_dim_idx = value_offset + warp_thread_idx; + float val = value[batch_idx__key_idx * value_dim + value_dim_idx]; + atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val); + } + } + } else { + float *weight_buffer = buffer; + int *key_idxes_buffer = (int*)&buffer[weight_dim]; + + copy_data_nonblocking(&query_weight[batch_idx__query_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id); + + while (key_count > 0) { + int work_size = min(WARP_SIZE, key_count); + copy_data_nonblocking(&key_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_key + key_offset], key_idxes_buffer, work_size, num_threads, thread_id); + __syncthreads(); + for (int work_offset = 0; work_offset < WARP_SIZE; work_offset = work_offset + num_warps) { + int work_idx = work_offset + warp_idx; + if (work_idx < key_count) { + int key_idx = key_idxes_buffer[work_idx]; + int batch_idx__key_idx = batch_idx * num_key + key_idx; + float weight = 0; + for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) { + int weight_dim_idx = weight_offset + warp_thread_idx; + float val = weight_buffer[weight_dim_idx] * key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx]; + #pragma unroll + for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { + val += __shfl_xor_sync(FULL_MASK, val, offset); + } + weight = weight + val; + } + weight = weight / float(num_hash_f); + for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { + int value_dim_idx = value_offset + warp_thread_idx; + float val = value[batch_idx__key_idx * value_dim + value_dim_idx]; + atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val); + } + } + } + key_count = key_count - work_size; + key_offset = key_offset + work_size; + } + } + +} + +__global__ void lsh_weighted_cumulation_ver3_step2_cuda_kernel( + int *query_sorted_idxes, // [batch_size, num_hash_f, num_query] + int *key_mask, // [batch_size, num_key] + int *key_info, // [batch_size, num_key, 2, num_hash_f] + float *query_weight, // [batch_size, num_query, weight_dim] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int num_query, + int num_key, + int value_dim, + int weight_dim +) { + + int batch_idx = blockIdx.z; + int hash_f_idx = blockIdx.y; + int key_idx = blockIdx.x; + + int num_threads = blockDim.y * blockDim.x; + int thread_id = threadIdx.y * blockDim.x + threadIdx.x; + + int num_warps = blockDim.y; + int warp_idx = threadIdx.y; + int warp_thread_idx = threadIdx.x; + + int batch_idx__key_idx = batch_idx * num_key + key_idx; + if (key_mask[batch_idx__key_idx] == 0) { + return; + } + + int query_offset = key_info[batch_idx__key_idx * 2 * num_hash_f + hash_f_idx]; + int query_count = key_info[(batch_idx__key_idx * 2 + 1) * num_hash_f + hash_f_idx]; + + if (query_count == 0) { + return; + } + + extern __shared__ float buffer[]; + + if (query_count == 1) { + if (warp_idx == 0) { + int query_idx = query_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_query + query_offset]; + int batch_idx__query_idx = batch_idx * num_query + query_idx; + float weight = 0; + for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) { + int weight_dim_idx = weight_offset + warp_thread_idx; + float val = key_weight[batch_idx__key_idx * weight_dim + weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx]; + #pragma unroll + for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { + val += __shfl_xor_sync(FULL_MASK, val, offset); + } + weight = weight + val; + } + weight = weight / float(num_hash_f); + for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { + int value_dim_idx = value_offset + warp_thread_idx; + float val = value[batch_idx__key_idx * value_dim + value_dim_idx]; + atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val); + } + } + } else { + float *weight_buffer = buffer; + float *value_buffer = &buffer[weight_dim]; + int *query_idxes_buffer = (int*)&buffer[weight_dim + value_dim]; + + copy_data_nonblocking(&key_weight[batch_idx__key_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id); + copy_data_nonblocking(&value[batch_idx__key_idx * value_dim], value_buffer, value_dim, num_threads, thread_id); + + while (query_count > 0) { + int work_size = min(WARP_SIZE, query_count); + copy_data_nonblocking(&query_sorted_idxes[(batch_idx * num_hash_f + hash_f_idx) * num_query + query_offset], query_idxes_buffer, work_size, num_threads, thread_id); + __syncthreads(); + for (int work_offset = 0; work_offset < WARP_SIZE; work_offset = work_offset + num_warps) { + int work_idx = work_offset + warp_idx; + if (work_idx < query_count) { + int query_idx = query_idxes_buffer[work_idx]; + int batch_idx__query_idx = batch_idx * num_query + query_idx; + float weight = 0; + for (int weight_offset = 0; weight_offset < weight_dim; weight_offset = weight_offset + WARP_SIZE) { + int weight_dim_idx = weight_offset + warp_thread_idx; + float val = weight_buffer[weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx]; + #pragma unroll + for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { + val += __shfl_xor_sync(FULL_MASK, val, offset); + } + weight = weight + val; + } + weight = weight / float(num_hash_f); + for (int value_offset = 0; value_offset < value_dim; value_offset = value_offset + WARP_SIZE) { + int value_dim_idx = value_offset + warp_thread_idx; + float val = value_buffer[value_dim_idx]; + atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val); + } + } + } + query_count = query_count - work_size; + query_offset = query_offset + work_size; + } + } + +} + +__global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel( + int *query_sorted_idxes, // [batch_size, num_hash_f, num_query] + int *key_mask, // [batch_size, num_key] + int *key_info, // [batch_size, num_key, 2, num_hash_f] + float *query_weight, // [batch_size, num_query, weight_dim] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int num_query, + int num_key, + int value_dim, + int weight_dim +) { + + int batch_idx = blockIdx.y; + int key_idx = blockIdx.x; + + int num_threads = blockDim.y * blockDim.x; + int thread_id = threadIdx.y * blockDim.x + threadIdx.x; + + int num_warps = blockDim.y; + int warp_idx = threadIdx.y; + int warp_thread_idx = threadIdx.x; + + int batch_idx__key_idx = batch_idx * num_key + key_idx; + if (key_mask[batch_idx__key_idx] == 0) { + return; + } + + extern __shared__ float buffer[]; + float *weight_buffer = buffer; + float *value_buffer = &buffer[weight_dim]; + int *key_info_buffer = (int*)&buffer[weight_dim + value_dim]; + + copy_data_nonblocking(&key_weight[batch_idx__key_idx * weight_dim], weight_buffer, weight_dim, num_threads, thread_id); + copy_data_nonblocking(&value[batch_idx__key_idx * value_dim], value_buffer, value_dim, num_threads, thread_id); + copy_data_nonblocking(&key_info[batch_idx__key_idx * 2 * num_hash_f], key_info_buffer, 2 * num_hash_f, num_threads, thread_id); + + int *query_offset_buffer = key_info_buffer; + int *query_count_buffer = &key_info_buffer[num_hash_f]; + + const int hashtable_size = 1024 + OPTIMAL_THREADS_PER_BLOCK; + __shared__ int hashtable_query[hashtable_size]; + __shared__ int hashtable_count[hashtable_size]; + __shared__ int inserted_query[hashtable_size]; + __shared__ int query_counter[1]; + + int hash_f_idx_base = 0; + + while (true) { + + init_buffer_nonblocking(EMPTY_VALUE, hashtable_query, hashtable_size, num_threads, thread_id); + init_buffer_nonblocking(0, hashtable_count, hashtable_size, num_threads, thread_id); + init_buffer_nonblocking(EMPTY_VALUE, inserted_query, hashtable_size, num_threads, thread_id); + init_buffer_nonblocking(0, query_counter, 1, num_threads, thread_id); + __syncthreads(); + + while (hash_f_idx_base < num_hash_f) { + + int hash_f_idx = hash_f_idx_base + warp_idx; + int batch_idx__hash_f_idx = batch_idx * num_hash_f + hash_f_idx; + + int stop_flag = 0; + + int query_offset = query_offset_buffer[hash_f_idx]; + int query_count = query_count_buffer[hash_f_idx]; + + while (query_count > 0) { + + int work_size = min(query_count, WARP_SIZE); + + // try inserting query to set and check whether the query is new + int found_new_query = 0; + int query_idx = -1; + if (warp_thread_idx < work_size) { + query_idx = query_sorted_idxes[batch_idx__hash_f_idx * num_query + query_offset + warp_thread_idx]; + int slot = set_insert(hashtable_query, hashtable_size, query_idx); + if (slot >= 0) { + found_new_query = atomicAdd(&hashtable_count[slot], 1) == 0; + } + } + + // compute cumulative offset + int position_offset = found_new_query; + int next_position_offset = 0; + #pragma unroll + for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { + next_position_offset = __shfl_up_sync(FULL_MASK, position_offset, offset); + if (thread_id % WARP_SIZE >= offset) { + position_offset = position_offset + next_position_offset; + } + } + + // get the inserted query list end index + int inserted_query_base = 0; + if (thread_id % WARP_SIZE == WARP_SIZE - 1) { + inserted_query_base = atomicAdd(query_counter, position_offset); + } + inserted_query_base = __shfl_sync(FULL_MASK, inserted_query_base, WARP_SIZE - 1); + + // insert new queries to list + int insert_idx = inserted_query_base + position_offset - 1; + if (found_new_query) { + inserted_query[insert_idx] = query_idx; + } + + // remove inserted queries from list + query_offset_buffer[hash_f_idx] += work_size; + query_count_buffer[hash_f_idx] -= work_size; + query_offset += work_size; + query_count -= work_size; + + // if list is almost full, stop inserting + if (inserted_query_base + OPTIMAL_THREADS_PER_BLOCK > hashtable_size) { + stop_flag = 1; + break; + } + + } + + if (stop_flag) { + break; + } + + hash_f_idx_base = hash_f_idx_base + num_warps; + + } + + __syncthreads(); + + int num_distint_query = query_counter[0]; + + if (num_distint_query > 0) { + for (int idx_base = 0; idx_base < num_distint_query; idx_base = idx_base + num_warps) { + int idx = idx_base + warp_idx; + if (idx < num_distint_query) { + int query_idx = inserted_query[idx]; + int batch_idx__query_idx = batch_idx * num_query + query_idx; + + int slot = set_lookup(hashtable_query, hashtable_size, query_idx); + int duplicate_count = hashtable_count[slot]; + + float weight = 0; + for (int weight_idx_base = 0; weight_idx_base < weight_dim; weight_idx_base = weight_idx_base + WARP_SIZE) { + int weight_dim_idx = weight_idx_base + warp_thread_idx; + float val = weight_buffer[weight_dim_idx] * query_weight[batch_idx__query_idx * weight_dim + weight_dim_idx]; + #pragma unroll + for (int offset = 1; offset < WARP_SIZE; offset = offset << 1) { + val += __shfl_xor_sync(FULL_MASK, val, offset); + } + weight = weight + val; + } + + weight = (float)duplicate_count * weight / float(num_hash_f); + + for (int value_idx_base = 0; value_idx_base < value_dim; value_idx_base = value_idx_base + WARP_SIZE) { + int value_dim_idx = value_idx_base + warp_thread_idx; + float val = value_buffer[value_dim_idx]; + atomicAdd(&cumulation_value[batch_idx__query_idx * value_dim + value_dim_idx], weight * val); + } + } + } + } else { + + // all computation is completed if num_distint_query == 0 + break; + + } + + __syncthreads(); + + } + +} diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h new file mode 100644 index 0000000000000000000000000000000000000000..b2adc0f735358d0fcb6a056e7d19ba745977e129 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_cuda.h @@ -0,0 +1,157 @@ +__global__ void fast_hash_ver1_cuda_kernel( + int *mask, // [batch_size, num_vector] + float *vector, // [batch_size, num_vector, vector_dim] + int *Dmat, // [3, num_part, vector_dim] + int *hash_code, // [batch_size, num_vector, num_hash_f] + int batch_size, + int num_vector, + int vector_dim, + int num_part, + int num_hash_f, + int hash_code_len +); + +__global__ void lsh_cumulation_ver1_step1_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + float *value, // [batch_size, num_key, value_dim] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, value_dim] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key, + int value_dim, + int offset_warp +); + +__global__ void lsh_cumulation_ver1_step2_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_hash_code, // [batch_size, num_query, num_hash_f] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_query, + int value_dim, + int offset_warp +); + +__global__ void lsh_weighted_cumulation_ver1_step1_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key, + int value_dim, + int weight_dim, + int offset_warp, + int weight_idx +); + +__global__ void lsh_weighted_cumulation_ver1_step2_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_hash_code, // [batch_size, num_query, num_hash_f] + float *query_weight, // [batch_size, num_query, weight_dim] + float *hashtable_value, // [batch_size, num_hash_f, hashtable_capacity, WARP_SIZE] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_query, + int value_dim, + int weight_dim, + int offset_warp, + int weight_idx +); + +__global__ void count_sort_step1_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key +); + +__global__ void count_sort_step2_cuda_kernel( + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int batch_size, + int num_hash_f, + int hashtable_capacity +); + +__global__ void count_sort_step3_cuda_kernel( + int *key_mask, // [batch_size, num_key] + int *key_hash_code, // [batch_size, num_key, num_hash_f] + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int *key_sorted_idxes, // [batch_size, num_hash_f, num_key] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_key +); + +__global__ void extract_query_info_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_hash_code, // [batch_size, num_query, num_hash_f] + int *count_sort_table, // [batch_size, num_hash_f, hashtable_capacity] + int *query_info, // [batch_size, num_query, 2, num_hash_f] + int batch_size, + int num_hash_f, + int hashtable_capacity, + int num_query +); + +__global__ void lsh_weighted_cumulation_ver2_step2_cuda_kernel( + int *query_mask, // [batch_size, num_query] + int *query_info, // [batch_size, num_query, 2, num_hash_f] + int *key_sorted_idxes, // [batch_size, num_hash_f, num_key] + float *query_weight, // [batch_size, num_query, weight_dim] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int num_query, + int num_key, + int value_dim, + int weight_dim +); + +__global__ void lsh_weighted_cumulation_ver3_step2_cuda_kernel( + int *query_sorted_idxes, // [batch_size, num_hash_f, num_query] + int *key_mask, // [batch_size, num_key] + int *key_info, // [batch_size, num_key, 2, num_hash_f] + float *query_weight, // [batch_size, num_query, weight_dim] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int num_query, + int num_key, + int value_dim, + int weight_dim +); + +__global__ void lsh_weighted_cumulation_ver4_step2_cuda_kernel( + int *query_sorted_idxes, // [batch_size, num_hash_f, num_query] + int *key_mask, // [batch_size, num_key] + int *key_info, // [batch_size, num_key, 2, num_hash_f] + float *query_weight, // [batch_size, num_query, weight_dim] + float *key_weight, // [batch_size, num_key, weight_dim] + float *value, // [batch_size, num_key, value_dim] + float *cumulation_value, // [batch_size, num_query, value_dim] + int batch_size, + int num_hash_f, + int num_query, + int num_key, + int value_dim, + int weight_dim +); diff --git a/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp new file mode 100644 index 0000000000000000000000000000000000000000..e150a2be604b28f600ab345a8cc9e97819cca416 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/transformers/kernels/yoso/fast_lsh_cumulation_torch.cpp @@ -0,0 +1,128 @@ +#include +#include +#include "fast_lsh_cumulation.h" +#include "common_cuda.h" +#include + +std::vector fast_hash( + at::Tensor query_mask, + at::Tensor query_vector, + at::Tensor key_mask, + at::Tensor key_vector, + int num_hash_f, + int hash_code_len, + bool use_cuda, + int version +) { + return fast_hash_ver1_kernel( + query_mask, + query_vector, + key_mask, + key_vector, + num_hash_f, + hash_code_len, + use_cuda + ); +} + +at::Tensor lsh_cumulation( + at::Tensor query_mask, // [batch_size, num_query] + at::Tensor query_hash_code, // [batch_size, num_query, num_hash_f] + at::Tensor key_mask, // [batch_size, num_key] + at::Tensor key_hash_code, // [batch_size, num_key, num_hash_f] + at::Tensor value, // [batch_size, num_key, value_dim] + int hashtable_capacity, + bool use_cuda, + int version +) { + return lsh_cumulation_ver1_kernel( + query_mask, + query_hash_code, + key_mask, + key_hash_code, + value, + hashtable_capacity, + use_cuda + ); +} + +at::Tensor lsh_weighted_cumulation( + at::Tensor query_mask, // [batch_size, num_query] + at::Tensor query_hash_code, // [batch_size, num_query, num_hash_f] + at::Tensor query_weight, // [batch_size, num_query, weight_dim] + at::Tensor key_mask, // [batch_size, num_key] + at::Tensor key_hash_code, // [batch_size, num_key, num_hash_f] + at::Tensor key_weight, // [batch_size, num_key, weight_dim] + at::Tensor value, // [batch_size, num_key, value_dim] + int hashtable_capacity, + bool use_cuda, + int version +) { + if (version == 1) { + return lsh_weighted_cumulation_ver1_kernel( + query_mask, + query_hash_code, + query_weight, + key_mask, + key_hash_code, + key_weight, + value, + hashtable_capacity, + use_cuda + ); + } else if (version == 2) { + return lsh_weighted_cumulation_ver2_kernel( + query_mask, + query_hash_code, + query_weight, + key_mask, + key_hash_code, + key_weight, + value, + hashtable_capacity, + use_cuda + ); + } else if (version == 3) { + return lsh_weighted_cumulation_ver3_kernel( + query_mask, + query_hash_code, + query_weight, + key_mask, + key_hash_code, + key_weight, + value, + hashtable_capacity, + use_cuda + ); + } else if (version == 4) { + return lsh_weighted_cumulation_ver4_kernel( + query_mask, + query_hash_code, + query_weight, + key_mask, + key_hash_code, + key_weight, + value, + hashtable_capacity, + use_cuda + ); + } else { + return lsh_weighted_cumulation_ver3_kernel( + query_mask, + query_hash_code, + query_weight, + key_mask, + key_hash_code, + key_weight, + value, + hashtable_capacity, + use_cuda + ); + } +} + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("fast_hash", &fast_hash, "Fast Hash (CUDA)"); + m.def("lsh_cumulation", &lsh_cumulation, "LSH Cumulation (CUDA)"); + m.def("lsh_weighted_cumulation", &lsh_weighted_cumulation, "LSH Weighted Cumulation (CUDA)"); +} diff --git a/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/INSTALLER b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/METADATA b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..0b7645966bf6aab57748b20bea01180902e7eab7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/METADATA @@ -0,0 +1,155 @@ +Metadata-Version: 2.3 +Name: urllib3 +Version: 2.2.3 +Summary: HTTP library with thread-safe connection pooling, file post, and more. +Project-URL: Changelog, https://github.com/urllib3/urllib3/blob/main/CHANGES.rst +Project-URL: Documentation, https://urllib3.readthedocs.io +Project-URL: Code, https://github.com/urllib3/urllib3 +Project-URL: Issue tracker, https://github.com/urllib3/urllib3/issues +Author-email: Andrey Petrov +Maintainer-email: Seth Michael Larson , Quentin Pradet , Illia Volochii +License-File: LICENSE.txt +Keywords: filepost,http,httplib,https,pooling,ssl,threadsafe,urllib +Classifier: Environment :: Web Environment +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: MIT License +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3 :: Only +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: Internet :: WWW/HTTP +Classifier: Topic :: Software Development :: Libraries +Requires-Python: >=3.8 +Provides-Extra: brotli +Requires-Dist: brotli>=1.0.9; (platform_python_implementation == 'CPython') and extra == 'brotli' +Requires-Dist: brotlicffi>=0.8.0; (platform_python_implementation != 'CPython') and extra == 'brotli' +Provides-Extra: h2 +Requires-Dist: h2<5,>=4; extra == 'h2' +Provides-Extra: socks +Requires-Dist: pysocks!=1.5.7,<2.0,>=1.5.6; extra == 'socks' +Provides-Extra: zstd +Requires-Dist: zstandard>=0.18.0; extra == 'zstd' +Description-Content-Type: text/markdown + +

+ +![urllib3](https://github.com/urllib3/urllib3/raw/main/docs/_static/banner_github.svg) + +

+ +

+ PyPI Version + Python Versions + Join our Discord + Coverage Status + Build Status on GitHub + Documentation Status
+ OpenSSF Scorecard + SLSA 3 + CII Best Practices +

+ +urllib3 is a powerful, *user-friendly* HTTP client for Python. Much of the +Python ecosystem already uses urllib3 and you should too. +urllib3 brings many critical features that are missing from the Python +standard libraries: + +- Thread safety. +- Connection pooling. +- Client-side SSL/TLS verification. +- File uploads with multipart encoding. +- Helpers for retrying requests and dealing with HTTP redirects. +- Support for gzip, deflate, brotli, and zstd encoding. +- Proxy support for HTTP and SOCKS. +- 100% test coverage. + +urllib3 is powerful and easy to use: + +```python3 +>>> import urllib3 +>>> resp = urllib3.request("GET", "http://httpbin.org/robots.txt") +>>> resp.status +200 +>>> resp.data +b"User-agent: *\nDisallow: /deny\n" +``` + +## Installing + +urllib3 can be installed with [pip](https://pip.pypa.io): + +```bash +$ python -m pip install urllib3 +``` + +Alternatively, you can grab the latest source code from [GitHub](https://github.com/urllib3/urllib3): + +```bash +$ git clone https://github.com/urllib3/urllib3.git +$ cd urllib3 +$ pip install . +``` + + +## Documentation + +urllib3 has usage and reference documentation at [urllib3.readthedocs.io](https://urllib3.readthedocs.io). + + +## Community + +urllib3 has a [community Discord channel](https://discord.gg/urllib3) for asking questions and +collaborating with other contributors. Drop by and say hello 👋 + + +## Contributing + +urllib3 happily accepts contributions. Please see our +[contributing documentation](https://urllib3.readthedocs.io/en/latest/contributing.html) +for some tips on getting started. + + +## Security Disclosures + +To report a security vulnerability, please use the +[Tidelift security contact](https://tidelift.com/security). +Tidelift will coordinate the fix and disclosure with maintainers. + + +## Maintainers + +- [@sethmlarson](https://github.com/sethmlarson) (Seth M. Larson) +- [@pquentin](https://github.com/pquentin) (Quentin Pradet) +- [@illia-v](https://github.com/illia-v) (Illia Volochii) +- [@theacodes](https://github.com/theacodes) (Thea Flowers) +- [@haikuginger](https://github.com/haikuginger) (Jess Shapiro) +- [@lukasa](https://github.com/lukasa) (Cory Benfield) +- [@sigmavirus24](https://github.com/sigmavirus24) (Ian Stapleton Cordasco) +- [@shazow](https://github.com/shazow) (Andrey Petrov) + +👋 + + +## Sponsorship + +If your company benefits from this library, please consider [sponsoring its +development](https://urllib3.readthedocs.io/en/latest/sponsors.html). + + +## For Enterprise + +Professional support for urllib3 is available as part of the [Tidelift +Subscription][1]. Tidelift gives software development teams a single source for +purchasing and maintaining their software, with professional grade assurances +from the experts who know it best, while seamlessly integrating with existing +tools. + +[1]: https://tidelift.com/subscription/pkg/pypi-urllib3?utm_source=pypi-urllib3&utm_medium=referral&utm_campaign=readme diff --git a/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/RECORD b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..24c353b3330008fc5d28a07307ffe1bc199acd3e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/RECORD @@ -0,0 +1,80 @@ +urllib3-2.2.3.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +urllib3-2.2.3.dist-info/METADATA,sha256=NpyLMYu-QoAmQK6pmmgoZRuq0HPt-lf_J9zIuCGMRNY,6485 +urllib3-2.2.3.dist-info/RECORD,, +urllib3-2.2.3.dist-info/REQUESTED,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +urllib3-2.2.3.dist-info/WHEEL,sha256=1yFddiXMmvYK7QYTqtRNtX66WJ0Mz8PYEiEUoOUUxRY,87 +urllib3-2.2.3.dist-info/licenses/LICENSE.txt,sha256=Ew46ZNX91dCWp1JpRjSn2d8oRGnehuVzIQAmgEHj1oY,1093 +urllib3/__init__.py,sha256=JMo1tg1nIV1AeJ2vENC_Txfl0e5h6Gzl9DGVk1rWRbo,6979 +urllib3/__pycache__/__init__.cpython-310.pyc,, +urllib3/__pycache__/_base_connection.cpython-310.pyc,, +urllib3/__pycache__/_collections.cpython-310.pyc,, +urllib3/__pycache__/_request_methods.cpython-310.pyc,, +urllib3/__pycache__/_version.cpython-310.pyc,, +urllib3/__pycache__/connection.cpython-310.pyc,, +urllib3/__pycache__/connectionpool.cpython-310.pyc,, +urllib3/__pycache__/exceptions.cpython-310.pyc,, +urllib3/__pycache__/fields.cpython-310.pyc,, +urllib3/__pycache__/filepost.cpython-310.pyc,, +urllib3/__pycache__/poolmanager.cpython-310.pyc,, +urllib3/__pycache__/response.cpython-310.pyc,, +urllib3/_base_connection.py,sha256=tH0ZlOxWKika-S9NW-MuIlI_PLFQUUmSnoE_9MeywkM,5652 +urllib3/_collections.py,sha256=aGhh9zCYce3o-5FW9DPSUay6O9LjHx8z6T7wDtdhrkY,17370 +urllib3/_request_methods.py,sha256=LTDxHenEP5XX-tVWBNkEkAgizCERkZF0JK-F-wCxqwI,9938 +urllib3/_version.py,sha256=gF7zM8AsdLRqhgteXesNHb7_t8ukr2zzkok2g1nvvhA,411 +urllib3/connection.py,sha256=QAwhOV5GOVdsFnMvX5Vv6fQ8f47EzfrROAArfxRdQOY,39508 +urllib3/connectionpool.py,sha256=5fPIHypPwlbKBASMs6bESTEJVEGlsj9FOY9_GGU2GpM,43393 +urllib3/contrib/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +urllib3/contrib/__pycache__/__init__.cpython-310.pyc,, +urllib3/contrib/__pycache__/pyopenssl.cpython-310.pyc,, +urllib3/contrib/__pycache__/socks.cpython-310.pyc,, +urllib3/contrib/emscripten/__init__.py,sha256=u6KNgzjlFZbuAAXa_ybCR7gQ71VJESnF-IIdDA73brw,733 +urllib3/contrib/emscripten/__pycache__/__init__.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/connection.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/fetch.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/request.cpython-310.pyc,, +urllib3/contrib/emscripten/__pycache__/response.cpython-310.pyc,, +urllib3/contrib/emscripten/connection.py,sha256=kaBe2tWt7Yy9vNUFRBV7CSyDnfhCYILGxju9KTZj8Sw,8755 +urllib3/contrib/emscripten/emscripten_fetch_worker.js,sha256=CDfYF_9CDobtx2lGidyJ1zjDEvwNT5F-dchmVWXDh0E,3655 +urllib3/contrib/emscripten/fetch.py,sha256=ymwJlHBBuw6WTpKgPHpdmmrNBxlsr75HqoD4Rn27YXk,14131 +urllib3/contrib/emscripten/request.py,sha256=mL28szy1KvE3NJhWor5jNmarp8gwplDU-7gwGZY5g0Q,566 +urllib3/contrib/emscripten/response.py,sha256=wEYWPHCL-JsgCtpCpfnWGYA1-DcjDGpFGqWCXZLwbHY,10017 +urllib3/contrib/pyopenssl.py,sha256=9iP4j8JafA4hqtX9AgJ9zxrqI-icohGrqFqAMryoNdA,19338 +urllib3/contrib/socks.py,sha256=-iardc61GypsJzD6W6yuRS7KVCyfowcQrl_719H7lIM,7549 +urllib3/exceptions.py,sha256=RDaiudtR7rqbVKTKpLSgZBBtwaIqV7eZtervZV_mZag,9393 +urllib3/fields.py,sha256=8vi0PeRo_pE5chPmJA07LZtMkVls4UrBS1k2xM506jM,10843 +urllib3/filepost.py,sha256=-9qJT11cNGjO9dqnI20-oErZuTvNaM18xZZPCjZSbOE,2395 +urllib3/http2/__init__.py,sha256=xzrASH7R5ANRkPJOot5lGnATOq3KKuyXzI42rcnwmqs,1741 +urllib3/http2/__pycache__/__init__.cpython-310.pyc,, +urllib3/http2/__pycache__/connection.cpython-310.pyc,, +urllib3/http2/__pycache__/probe.cpython-310.pyc,, +urllib3/http2/connection.py,sha256=GNlp9BjI3DmfSKe1W0b9IqRBeM8Q13xd2MA3ROcJ3dY,12668 +urllib3/http2/probe.py,sha256=nnAkqbhAakOiF75rz7W0udZ38Eeh_uD8fjV74N73FEI,3014 +urllib3/poolmanager.py,sha256=2_L2AjVDgoQ0qBmYbX9u9QqyU1u5J37zQbtv_-ueZQA,22913 +urllib3/py.typed,sha256=UaCuPFa3H8UAakbt-5G8SPacldTOGvJv18pPjUJ5gDY,93 +urllib3/response.py,sha256=NS0rqwRmtwWtC_6XDqgDJN_uo-jEmBVzx0V6KCsHlwg,44801 +urllib3/util/__init__.py,sha256=-qeS0QceivazvBEKDNFCAI-6ACcdDOE4TMvo7SLNlAQ,1001 +urllib3/util/__pycache__/__init__.cpython-310.pyc,, +urllib3/util/__pycache__/connection.cpython-310.pyc,, +urllib3/util/__pycache__/proxy.cpython-310.pyc,, +urllib3/util/__pycache__/request.cpython-310.pyc,, +urllib3/util/__pycache__/response.cpython-310.pyc,, +urllib3/util/__pycache__/retry.cpython-310.pyc,, +urllib3/util/__pycache__/ssl_.cpython-310.pyc,, +urllib3/util/__pycache__/ssl_match_hostname.cpython-310.pyc,, +urllib3/util/__pycache__/ssltransport.cpython-310.pyc,, +urllib3/util/__pycache__/timeout.cpython-310.pyc,, +urllib3/util/__pycache__/url.cpython-310.pyc,, +urllib3/util/__pycache__/util.cpython-310.pyc,, +urllib3/util/__pycache__/wait.cpython-310.pyc,, +urllib3/util/connection.py,sha256=0o79-5NbRfpBDyoehGPLmCA544sCSiXvx0mF9xy3GG0,4458 +urllib3/util/proxy.py,sha256=seP8-Q5B6bB0dMtwPj-YcZZQ30vHuLqRu-tI0JZ2fzs,1148 +urllib3/util/request.py,sha256=UrZ2g3y3stGpH8rm-Sx8-ollgeiiKI496DZXRCwxb9o,8064 +urllib3/util/response.py,sha256=vQE639uoEhj1vpjEdxu5lNIhJCSUZkd7pqllUI0BZOA,3374 +urllib3/util/retry.py,sha256=bj-2YUqblxLlv8THg5fxww-DM54XCbjgZXIQ71XioCY,18459 +urllib3/util/ssl_.py,sha256=WN8a6KPPcvukbZ0MUlBAOkGil8bYKY8NoIYOw7QeOLI,19238 +urllib3/util/ssl_match_hostname.py,sha256=gaWqixoYtQ_GKO8fcRGFj3VXeMoqyxQQuUTPgWeiL_M,5812 +urllib3/util/ssltransport.py,sha256=wprBvhkgjddhhMwwEbHyP4lygHpP6SS-45Euh7oe48k,8887 +urllib3/util/timeout.py,sha256=4eT1FVeZZU7h7mYD1Jq2OXNe4fxekdNvhoWUkZusRpA,10346 +urllib3/util/url.py,sha256=wHORhp80RAXyTlAIkTqLFzSrkU7J34ZDxX-tN65MBZk,15213 +urllib3/util/util.py,sha256=j3lbZK1jPyiwD34T8IgJzdWEZVT-4E-0vYIJi9UjeNA,1146 +urllib3/util/wait.py,sha256=_ph8IrUR3sqPqi0OopQgJUlH4wzkGeM5CiyA7XGGtmI,4423 diff --git a/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/REQUESTED b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/REQUESTED new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/WHEEL b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..cdd68a497cdfa8d3f2b837225beacef711b85047 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.25.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/licenses/LICENSE.txt b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/licenses/LICENSE.txt new file mode 100644 index 0000000000000000000000000000000000000000..e6183d0276b26c5b87aecccf8d0d5bcd7b1148d4 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/urllib3-2.2.3.dist-info/licenses/LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2008-2020 Andrey Petrov and contributors. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE.