diff --git a/parrot/lib/python3.10/asyncio/__pycache__/runners.cpython-310.pyc b/parrot/lib/python3.10/asyncio/__pycache__/runners.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..47de1d836530cd474c83848bd0e17b9a8f9c4af5 Binary files /dev/null and b/parrot/lib/python3.10/asyncio/__pycache__/runners.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/html/__init__.py b/parrot/lib/python3.10/html/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1543460ca33b0ae92b22780a4041dc36251f49a2 --- /dev/null +++ b/parrot/lib/python3.10/html/__init__.py @@ -0,0 +1,132 @@ +""" +General functions for HTML manipulation. +""" + +import re as _re +from html.entities import html5 as _html5 + + +__all__ = ['escape', 'unescape'] + + +def escape(s, quote=True): + """ + Replace special characters "&", "<" and ">" to HTML-safe sequences. + If the optional flag quote is true (the default), the quotation mark + characters, both double quote (") and single quote (') characters are also + translated. + """ + s = s.replace("&", "&") # Must be done first! + s = s.replace("<", "<") + s = s.replace(">", ">") + if quote: + s = s.replace('"', """) + s = s.replace('\'', "'") + return s + + +# see https://html.spec.whatwg.org/multipage/parsing.html#numeric-character-reference-end-state + +_invalid_charrefs = { + 0x00: '\ufffd', # REPLACEMENT CHARACTER + 0x0d: '\r', # CARRIAGE RETURN + 0x80: '\u20ac', # EURO SIGN + 0x81: '\x81', # + 0x82: '\u201a', # SINGLE LOW-9 QUOTATION MARK + 0x83: '\u0192', # LATIN SMALL LETTER F WITH HOOK + 0x84: '\u201e', # DOUBLE LOW-9 QUOTATION MARK + 0x85: '\u2026', # HORIZONTAL ELLIPSIS + 0x86: '\u2020', # DAGGER + 0x87: '\u2021', # DOUBLE DAGGER + 0x88: '\u02c6', # MODIFIER LETTER CIRCUMFLEX ACCENT + 0x89: '\u2030', # PER MILLE SIGN + 0x8a: '\u0160', # LATIN CAPITAL LETTER S WITH CARON + 0x8b: '\u2039', # SINGLE LEFT-POINTING ANGLE QUOTATION MARK + 0x8c: '\u0152', # LATIN CAPITAL LIGATURE OE + 0x8d: '\x8d', # + 0x8e: '\u017d', # LATIN CAPITAL LETTER Z WITH CARON + 0x8f: '\x8f', # + 0x90: '\x90', # + 0x91: '\u2018', # LEFT SINGLE QUOTATION MARK + 0x92: '\u2019', # RIGHT SINGLE QUOTATION MARK + 0x93: '\u201c', # LEFT DOUBLE QUOTATION MARK + 0x94: '\u201d', # RIGHT DOUBLE QUOTATION MARK + 0x95: '\u2022', # BULLET + 0x96: '\u2013', # EN DASH + 0x97: '\u2014', # EM DASH + 0x98: '\u02dc', # SMALL TILDE + 0x99: '\u2122', # TRADE MARK SIGN + 0x9a: '\u0161', # LATIN SMALL LETTER S WITH CARON + 0x9b: '\u203a', # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK + 0x9c: '\u0153', # LATIN SMALL LIGATURE OE + 0x9d: '\x9d', # + 0x9e: '\u017e', # LATIN SMALL LETTER Z WITH CARON + 0x9f: '\u0178', # LATIN CAPITAL LETTER Y WITH DIAERESIS +} + +_invalid_codepoints = { + # 0x0001 to 0x0008 + 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, + # 0x000E to 0x001F + 0xe, 0xf, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, + 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + # 0x007F to 0x009F + 0x7f, 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, + 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, + 0x97, 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, + # 0xFDD0 to 0xFDEF + 0xfdd0, 0xfdd1, 0xfdd2, 0xfdd3, 0xfdd4, 0xfdd5, 0xfdd6, 0xfdd7, 0xfdd8, + 0xfdd9, 0xfdda, 0xfddb, 0xfddc, 0xfddd, 0xfdde, 0xfddf, 0xfde0, 0xfde1, + 0xfde2, 0xfde3, 0xfde4, 0xfde5, 0xfde6, 0xfde7, 0xfde8, 0xfde9, 0xfdea, + 0xfdeb, 0xfdec, 0xfded, 0xfdee, 0xfdef, + # others + 0xb, 0xfffe, 0xffff, 0x1fffe, 0x1ffff, 0x2fffe, 0x2ffff, 0x3fffe, 0x3ffff, + 0x4fffe, 0x4ffff, 0x5fffe, 0x5ffff, 0x6fffe, 0x6ffff, 0x7fffe, 0x7ffff, + 0x8fffe, 0x8ffff, 0x9fffe, 0x9ffff, 0xafffe, 0xaffff, 0xbfffe, 0xbffff, + 0xcfffe, 0xcffff, 0xdfffe, 0xdffff, 0xefffe, 0xeffff, 0xffffe, 0xfffff, + 0x10fffe, 0x10ffff +} + + +def _replace_charref(s): + s = s.group(1) + if s[0] == '#': + # numeric charref + if s[1] in 'xX': + num = int(s[2:].rstrip(';'), 16) + else: + num = int(s[1:].rstrip(';')) + if num in _invalid_charrefs: + return _invalid_charrefs[num] + if 0xD800 <= num <= 0xDFFF or num > 0x10FFFF: + return '\uFFFD' + if num in _invalid_codepoints: + return '' + return chr(num) + else: + # named charref + if s in _html5: + return _html5[s] + # find the longest matching name (as defined by the standard) + for x in range(len(s)-1, 1, -1): + if s[:x] in _html5: + return _html5[s[:x]] + s[x:] + else: + return '&' + s + + +_charref = _re.compile(r'&(#[0-9]+;?' + r'|#[xX][0-9a-fA-F]+;?' + r'|[^\t\n\f <&#;]{1,32};?)') + +def unescape(s): + """ + Convert all named and numeric character references (e.g. >, >, + &x3e;) in the string s to the corresponding unicode characters. + This function uses the rules defined by the HTML 5 standard + for both valid and invalid character references, and the list of + HTML 5 named character references defined in html.entities.html5. + """ + if '&' not in s: + return s + return _charref.sub(_replace_charref, s) diff --git a/parrot/lib/python3.10/html/__pycache__/parser.cpython-310.pyc b/parrot/lib/python3.10/html/__pycache__/parser.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b2e175eab9b8f613e2c8f0e89032c45c62f8aa22 Binary files /dev/null and b/parrot/lib/python3.10/html/__pycache__/parser.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/html/entities.py b/parrot/lib/python3.10/html/entities.py new file mode 100644 index 0000000000000000000000000000000000000000..dc508631ac478943ce42099ea54358bb9c6eb775 --- /dev/null +++ b/parrot/lib/python3.10/html/entities.py @@ -0,0 +1,2510 @@ +"""HTML character entity references.""" + +__all__ = ['html5', 'name2codepoint', 'codepoint2name', 'entitydefs'] + + +# maps the HTML entity name to the Unicode code point +# from https://html.spec.whatwg.org/multipage/named-characters.html +name2codepoint = { + 'AElig': 0x00c6, # latin capital letter AE = latin capital ligature AE, U+00C6 ISOlat1 + 'Aacute': 0x00c1, # latin capital letter A with acute, U+00C1 ISOlat1 + 'Acirc': 0x00c2, # latin capital letter A with circumflex, U+00C2 ISOlat1 + 'Agrave': 0x00c0, # latin capital letter A with grave = latin capital letter A grave, U+00C0 ISOlat1 + 'Alpha': 0x0391, # greek capital letter alpha, U+0391 + 'Aring': 0x00c5, # latin capital letter A with ring above = latin capital letter A ring, U+00C5 ISOlat1 + 'Atilde': 0x00c3, # latin capital letter A with tilde, U+00C3 ISOlat1 + 'Auml': 0x00c4, # latin capital letter A with diaeresis, U+00C4 ISOlat1 + 'Beta': 0x0392, # greek capital letter beta, U+0392 + 'Ccedil': 0x00c7, # latin capital letter C with cedilla, U+00C7 ISOlat1 + 'Chi': 0x03a7, # greek capital letter chi, U+03A7 + 'Dagger': 0x2021, # double dagger, U+2021 ISOpub + 'Delta': 0x0394, # greek capital letter delta, U+0394 ISOgrk3 + 'ETH': 0x00d0, # latin capital letter ETH, U+00D0 ISOlat1 + 'Eacute': 0x00c9, # latin capital letter E with acute, U+00C9 ISOlat1 + 'Ecirc': 0x00ca, # latin capital letter E with circumflex, U+00CA ISOlat1 + 'Egrave': 0x00c8, # latin capital letter E with grave, U+00C8 ISOlat1 + 'Epsilon': 0x0395, # greek capital letter epsilon, U+0395 + 'Eta': 0x0397, # greek capital letter eta, U+0397 + 'Euml': 0x00cb, # latin capital letter E with diaeresis, U+00CB ISOlat1 + 'Gamma': 0x0393, # greek capital letter gamma, U+0393 ISOgrk3 + 'Iacute': 0x00cd, # latin capital letter I with acute, U+00CD ISOlat1 + 'Icirc': 0x00ce, # latin capital letter I with circumflex, U+00CE ISOlat1 + 'Igrave': 0x00cc, # latin capital letter I with grave, U+00CC ISOlat1 + 'Iota': 0x0399, # greek capital letter iota, U+0399 + 'Iuml': 0x00cf, # latin capital letter I with diaeresis, U+00CF ISOlat1 + 'Kappa': 0x039a, # greek capital letter kappa, U+039A + 'Lambda': 0x039b, # greek capital letter lambda, U+039B ISOgrk3 + 'Mu': 0x039c, # greek capital letter mu, U+039C + 'Ntilde': 0x00d1, # latin capital letter N with tilde, U+00D1 ISOlat1 + 'Nu': 0x039d, # greek capital letter nu, U+039D + 'OElig': 0x0152, # latin capital ligature OE, U+0152 ISOlat2 + 'Oacute': 0x00d3, # latin capital letter O with acute, U+00D3 ISOlat1 + 'Ocirc': 0x00d4, # latin capital letter O with circumflex, U+00D4 ISOlat1 + 'Ograve': 0x00d2, # latin capital letter O with grave, U+00D2 ISOlat1 + 'Omega': 0x03a9, # greek capital letter omega, U+03A9 ISOgrk3 + 'Omicron': 0x039f, # greek capital letter omicron, U+039F + 'Oslash': 0x00d8, # latin capital letter O with stroke = latin capital letter O slash, U+00D8 ISOlat1 + 'Otilde': 0x00d5, # latin capital letter O with tilde, U+00D5 ISOlat1 + 'Ouml': 0x00d6, # latin capital letter O with diaeresis, U+00D6 ISOlat1 + 'Phi': 0x03a6, # greek capital letter phi, U+03A6 ISOgrk3 + 'Pi': 0x03a0, # greek capital letter pi, U+03A0 ISOgrk3 + 'Prime': 0x2033, # double prime = seconds = inches, U+2033 ISOtech + 'Psi': 0x03a8, # greek capital letter psi, U+03A8 ISOgrk3 + 'Rho': 0x03a1, # greek capital letter rho, U+03A1 + 'Scaron': 0x0160, # latin capital letter S with caron, U+0160 ISOlat2 + 'Sigma': 0x03a3, # greek capital letter sigma, U+03A3 ISOgrk3 + 'THORN': 0x00de, # latin capital letter THORN, U+00DE ISOlat1 + 'Tau': 0x03a4, # greek capital letter tau, U+03A4 + 'Theta': 0x0398, # greek capital letter theta, U+0398 ISOgrk3 + 'Uacute': 0x00da, # latin capital letter U with acute, U+00DA ISOlat1 + 'Ucirc': 0x00db, # latin capital letter U with circumflex, U+00DB ISOlat1 + 'Ugrave': 0x00d9, # latin capital letter U with grave, U+00D9 ISOlat1 + 'Upsilon': 0x03a5, # greek capital letter upsilon, U+03A5 ISOgrk3 + 'Uuml': 0x00dc, # latin capital letter U with diaeresis, U+00DC ISOlat1 + 'Xi': 0x039e, # greek capital letter xi, U+039E ISOgrk3 + 'Yacute': 0x00dd, # latin capital letter Y with acute, U+00DD ISOlat1 + 'Yuml': 0x0178, # latin capital letter Y with diaeresis, U+0178 ISOlat2 + 'Zeta': 0x0396, # greek capital letter zeta, U+0396 + 'aacute': 0x00e1, # latin small letter a with acute, U+00E1 ISOlat1 + 'acirc': 0x00e2, # latin small letter a with circumflex, U+00E2 ISOlat1 + 'acute': 0x00b4, # acute accent = spacing acute, U+00B4 ISOdia + 'aelig': 0x00e6, # latin small letter ae = latin small ligature ae, U+00E6 ISOlat1 + 'agrave': 0x00e0, # latin small letter a with grave = latin small letter a grave, U+00E0 ISOlat1 + 'alefsym': 0x2135, # alef symbol = first transfinite cardinal, U+2135 NEW + 'alpha': 0x03b1, # greek small letter alpha, U+03B1 ISOgrk3 + 'amp': 0x0026, # ampersand, U+0026 ISOnum + 'and': 0x2227, # logical and = wedge, U+2227 ISOtech + 'ang': 0x2220, # angle, U+2220 ISOamso + 'aring': 0x00e5, # latin small letter a with ring above = latin small letter a ring, U+00E5 ISOlat1 + 'asymp': 0x2248, # almost equal to = asymptotic to, U+2248 ISOamsr + 'atilde': 0x00e3, # latin small letter a with tilde, U+00E3 ISOlat1 + 'auml': 0x00e4, # latin small letter a with diaeresis, U+00E4 ISOlat1 + 'bdquo': 0x201e, # double low-9 quotation mark, U+201E NEW + 'beta': 0x03b2, # greek small letter beta, U+03B2 ISOgrk3 + 'brvbar': 0x00a6, # broken bar = broken vertical bar, U+00A6 ISOnum + 'bull': 0x2022, # bullet = black small circle, U+2022 ISOpub + 'cap': 0x2229, # intersection = cap, U+2229 ISOtech + 'ccedil': 0x00e7, # latin small letter c with cedilla, U+00E7 ISOlat1 + 'cedil': 0x00b8, # cedilla = spacing cedilla, U+00B8 ISOdia + 'cent': 0x00a2, # cent sign, U+00A2 ISOnum + 'chi': 0x03c7, # greek small letter chi, U+03C7 ISOgrk3 + 'circ': 0x02c6, # modifier letter circumflex accent, U+02C6 ISOpub + 'clubs': 0x2663, # black club suit = shamrock, U+2663 ISOpub + 'cong': 0x2245, # approximately equal to, U+2245 ISOtech + 'copy': 0x00a9, # copyright sign, U+00A9 ISOnum + 'crarr': 0x21b5, # downwards arrow with corner leftwards = carriage return, U+21B5 NEW + 'cup': 0x222a, # union = cup, U+222A ISOtech + 'curren': 0x00a4, # currency sign, U+00A4 ISOnum + 'dArr': 0x21d3, # downwards double arrow, U+21D3 ISOamsa + 'dagger': 0x2020, # dagger, U+2020 ISOpub + 'darr': 0x2193, # downwards arrow, U+2193 ISOnum + 'deg': 0x00b0, # degree sign, U+00B0 ISOnum + 'delta': 0x03b4, # greek small letter delta, U+03B4 ISOgrk3 + 'diams': 0x2666, # black diamond suit, U+2666 ISOpub + 'divide': 0x00f7, # division sign, U+00F7 ISOnum + 'eacute': 0x00e9, # latin small letter e with acute, U+00E9 ISOlat1 + 'ecirc': 0x00ea, # latin small letter e with circumflex, U+00EA ISOlat1 + 'egrave': 0x00e8, # latin small letter e with grave, U+00E8 ISOlat1 + 'empty': 0x2205, # empty set = null set = diameter, U+2205 ISOamso + 'emsp': 0x2003, # em space, U+2003 ISOpub + 'ensp': 0x2002, # en space, U+2002 ISOpub + 'epsilon': 0x03b5, # greek small letter epsilon, U+03B5 ISOgrk3 + 'equiv': 0x2261, # identical to, U+2261 ISOtech + 'eta': 0x03b7, # greek small letter eta, U+03B7 ISOgrk3 + 'eth': 0x00f0, # latin small letter eth, U+00F0 ISOlat1 + 'euml': 0x00eb, # latin small letter e with diaeresis, U+00EB ISOlat1 + 'euro': 0x20ac, # euro sign, U+20AC NEW + 'exist': 0x2203, # there exists, U+2203 ISOtech + 'fnof': 0x0192, # latin small f with hook = function = florin, U+0192 ISOtech + 'forall': 0x2200, # for all, U+2200 ISOtech + 'frac12': 0x00bd, # vulgar fraction one half = fraction one half, U+00BD ISOnum + 'frac14': 0x00bc, # vulgar fraction one quarter = fraction one quarter, U+00BC ISOnum + 'frac34': 0x00be, # vulgar fraction three quarters = fraction three quarters, U+00BE ISOnum + 'frasl': 0x2044, # fraction slash, U+2044 NEW + 'gamma': 0x03b3, # greek small letter gamma, U+03B3 ISOgrk3 + 'ge': 0x2265, # greater-than or equal to, U+2265 ISOtech + 'gt': 0x003e, # greater-than sign, U+003E ISOnum + 'hArr': 0x21d4, # left right double arrow, U+21D4 ISOamsa + 'harr': 0x2194, # left right arrow, U+2194 ISOamsa + 'hearts': 0x2665, # black heart suit = valentine, U+2665 ISOpub + 'hellip': 0x2026, # horizontal ellipsis = three dot leader, U+2026 ISOpub + 'iacute': 0x00ed, # latin small letter i with acute, U+00ED ISOlat1 + 'icirc': 0x00ee, # latin small letter i with circumflex, U+00EE ISOlat1 + 'iexcl': 0x00a1, # inverted exclamation mark, U+00A1 ISOnum + 'igrave': 0x00ec, # latin small letter i with grave, U+00EC ISOlat1 + 'image': 0x2111, # blackletter capital I = imaginary part, U+2111 ISOamso + 'infin': 0x221e, # infinity, U+221E ISOtech + 'int': 0x222b, # integral, U+222B ISOtech + 'iota': 0x03b9, # greek small letter iota, U+03B9 ISOgrk3 + 'iquest': 0x00bf, # inverted question mark = turned question mark, U+00BF ISOnum + 'isin': 0x2208, # element of, U+2208 ISOtech + 'iuml': 0x00ef, # latin small letter i with diaeresis, U+00EF ISOlat1 + 'kappa': 0x03ba, # greek small letter kappa, U+03BA ISOgrk3 + 'lArr': 0x21d0, # leftwards double arrow, U+21D0 ISOtech + 'lambda': 0x03bb, # greek small letter lambda, U+03BB ISOgrk3 + 'lang': 0x2329, # left-pointing angle bracket = bra, U+2329 ISOtech + 'laquo': 0x00ab, # left-pointing double angle quotation mark = left pointing guillemet, U+00AB ISOnum + 'larr': 0x2190, # leftwards arrow, U+2190 ISOnum + 'lceil': 0x2308, # left ceiling = apl upstile, U+2308 ISOamsc + 'ldquo': 0x201c, # left double quotation mark, U+201C ISOnum + 'le': 0x2264, # less-than or equal to, U+2264 ISOtech + 'lfloor': 0x230a, # left floor = apl downstile, U+230A ISOamsc + 'lowast': 0x2217, # asterisk operator, U+2217 ISOtech + 'loz': 0x25ca, # lozenge, U+25CA ISOpub + 'lrm': 0x200e, # left-to-right mark, U+200E NEW RFC 2070 + 'lsaquo': 0x2039, # single left-pointing angle quotation mark, U+2039 ISO proposed + 'lsquo': 0x2018, # left single quotation mark, U+2018 ISOnum + 'lt': 0x003c, # less-than sign, U+003C ISOnum + 'macr': 0x00af, # macron = spacing macron = overline = APL overbar, U+00AF ISOdia + 'mdash': 0x2014, # em dash, U+2014 ISOpub + 'micro': 0x00b5, # micro sign, U+00B5 ISOnum + 'middot': 0x00b7, # middle dot = Georgian comma = Greek middle dot, U+00B7 ISOnum + 'minus': 0x2212, # minus sign, U+2212 ISOtech + 'mu': 0x03bc, # greek small letter mu, U+03BC ISOgrk3 + 'nabla': 0x2207, # nabla = backward difference, U+2207 ISOtech + 'nbsp': 0x00a0, # no-break space = non-breaking space, U+00A0 ISOnum + 'ndash': 0x2013, # en dash, U+2013 ISOpub + 'ne': 0x2260, # not equal to, U+2260 ISOtech + 'ni': 0x220b, # contains as member, U+220B ISOtech + 'not': 0x00ac, # not sign, U+00AC ISOnum + 'notin': 0x2209, # not an element of, U+2209 ISOtech + 'nsub': 0x2284, # not a subset of, U+2284 ISOamsn + 'ntilde': 0x00f1, # latin small letter n with tilde, U+00F1 ISOlat1 + 'nu': 0x03bd, # greek small letter nu, U+03BD ISOgrk3 + 'oacute': 0x00f3, # latin small letter o with acute, U+00F3 ISOlat1 + 'ocirc': 0x00f4, # latin small letter o with circumflex, U+00F4 ISOlat1 + 'oelig': 0x0153, # latin small ligature oe, U+0153 ISOlat2 + 'ograve': 0x00f2, # latin small letter o with grave, U+00F2 ISOlat1 + 'oline': 0x203e, # overline = spacing overscore, U+203E NEW + 'omega': 0x03c9, # greek small letter omega, U+03C9 ISOgrk3 + 'omicron': 0x03bf, # greek small letter omicron, U+03BF NEW + 'oplus': 0x2295, # circled plus = direct sum, U+2295 ISOamsb + 'or': 0x2228, # logical or = vee, U+2228 ISOtech + 'ordf': 0x00aa, # feminine ordinal indicator, U+00AA ISOnum + 'ordm': 0x00ba, # masculine ordinal indicator, U+00BA ISOnum + 'oslash': 0x00f8, # latin small letter o with stroke, = latin small letter o slash, U+00F8 ISOlat1 + 'otilde': 0x00f5, # latin small letter o with tilde, U+00F5 ISOlat1 + 'otimes': 0x2297, # circled times = vector product, U+2297 ISOamsb + 'ouml': 0x00f6, # latin small letter o with diaeresis, U+00F6 ISOlat1 + 'para': 0x00b6, # pilcrow sign = paragraph sign, U+00B6 ISOnum + 'part': 0x2202, # partial differential, U+2202 ISOtech + 'permil': 0x2030, # per mille sign, U+2030 ISOtech + 'perp': 0x22a5, # up tack = orthogonal to = perpendicular, U+22A5 ISOtech + 'phi': 0x03c6, # greek small letter phi, U+03C6 ISOgrk3 + 'pi': 0x03c0, # greek small letter pi, U+03C0 ISOgrk3 + 'piv': 0x03d6, # greek pi symbol, U+03D6 ISOgrk3 + 'plusmn': 0x00b1, # plus-minus sign = plus-or-minus sign, U+00B1 ISOnum + 'pound': 0x00a3, # pound sign, U+00A3 ISOnum + 'prime': 0x2032, # prime = minutes = feet, U+2032 ISOtech + 'prod': 0x220f, # n-ary product = product sign, U+220F ISOamsb + 'prop': 0x221d, # proportional to, U+221D ISOtech + 'psi': 0x03c8, # greek small letter psi, U+03C8 ISOgrk3 + 'quot': 0x0022, # quotation mark = APL quote, U+0022 ISOnum + 'rArr': 0x21d2, # rightwards double arrow, U+21D2 ISOtech + 'radic': 0x221a, # square root = radical sign, U+221A ISOtech + 'rang': 0x232a, # right-pointing angle bracket = ket, U+232A ISOtech + 'raquo': 0x00bb, # right-pointing double angle quotation mark = right pointing guillemet, U+00BB ISOnum + 'rarr': 0x2192, # rightwards arrow, U+2192 ISOnum + 'rceil': 0x2309, # right ceiling, U+2309 ISOamsc + 'rdquo': 0x201d, # right double quotation mark, U+201D ISOnum + 'real': 0x211c, # blackletter capital R = real part symbol, U+211C ISOamso + 'reg': 0x00ae, # registered sign = registered trade mark sign, U+00AE ISOnum + 'rfloor': 0x230b, # right floor, U+230B ISOamsc + 'rho': 0x03c1, # greek small letter rho, U+03C1 ISOgrk3 + 'rlm': 0x200f, # right-to-left mark, U+200F NEW RFC 2070 + 'rsaquo': 0x203a, # single right-pointing angle quotation mark, U+203A ISO proposed + 'rsquo': 0x2019, # right single quotation mark, U+2019 ISOnum + 'sbquo': 0x201a, # single low-9 quotation mark, U+201A NEW + 'scaron': 0x0161, # latin small letter s with caron, U+0161 ISOlat2 + 'sdot': 0x22c5, # dot operator, U+22C5 ISOamsb + 'sect': 0x00a7, # section sign, U+00A7 ISOnum + 'shy': 0x00ad, # soft hyphen = discretionary hyphen, U+00AD ISOnum + 'sigma': 0x03c3, # greek small letter sigma, U+03C3 ISOgrk3 + 'sigmaf': 0x03c2, # greek small letter final sigma, U+03C2 ISOgrk3 + 'sim': 0x223c, # tilde operator = varies with = similar to, U+223C ISOtech + 'spades': 0x2660, # black spade suit, U+2660 ISOpub + 'sub': 0x2282, # subset of, U+2282 ISOtech + 'sube': 0x2286, # subset of or equal to, U+2286 ISOtech + 'sum': 0x2211, # n-ary summation, U+2211 ISOamsb + 'sup': 0x2283, # superset of, U+2283 ISOtech + 'sup1': 0x00b9, # superscript one = superscript digit one, U+00B9 ISOnum + 'sup2': 0x00b2, # superscript two = superscript digit two = squared, U+00B2 ISOnum + 'sup3': 0x00b3, # superscript three = superscript digit three = cubed, U+00B3 ISOnum + 'supe': 0x2287, # superset of or equal to, U+2287 ISOtech + 'szlig': 0x00df, # latin small letter sharp s = ess-zed, U+00DF ISOlat1 + 'tau': 0x03c4, # greek small letter tau, U+03C4 ISOgrk3 + 'there4': 0x2234, # therefore, U+2234 ISOtech + 'theta': 0x03b8, # greek small letter theta, U+03B8 ISOgrk3 + 'thetasym': 0x03d1, # greek small letter theta symbol, U+03D1 NEW + 'thinsp': 0x2009, # thin space, U+2009 ISOpub + 'thorn': 0x00fe, # latin small letter thorn with, U+00FE ISOlat1 + 'tilde': 0x02dc, # small tilde, U+02DC ISOdia + 'times': 0x00d7, # multiplication sign, U+00D7 ISOnum + 'trade': 0x2122, # trade mark sign, U+2122 ISOnum + 'uArr': 0x21d1, # upwards double arrow, U+21D1 ISOamsa + 'uacute': 0x00fa, # latin small letter u with acute, U+00FA ISOlat1 + 'uarr': 0x2191, # upwards arrow, U+2191 ISOnum + 'ucirc': 0x00fb, # latin small letter u with circumflex, U+00FB ISOlat1 + 'ugrave': 0x00f9, # latin small letter u with grave, U+00F9 ISOlat1 + 'uml': 0x00a8, # diaeresis = spacing diaeresis, U+00A8 ISOdia + 'upsih': 0x03d2, # greek upsilon with hook symbol, U+03D2 NEW + 'upsilon': 0x03c5, # greek small letter upsilon, U+03C5 ISOgrk3 + 'uuml': 0x00fc, # latin small letter u with diaeresis, U+00FC ISOlat1 + 'weierp': 0x2118, # script capital P = power set = Weierstrass p, U+2118 ISOamso + 'xi': 0x03be, # greek small letter xi, U+03BE ISOgrk3 + 'yacute': 0x00fd, # latin small letter y with acute, U+00FD ISOlat1 + 'yen': 0x00a5, # yen sign = yuan sign, U+00A5 ISOnum + 'yuml': 0x00ff, # latin small letter y with diaeresis, U+00FF ISOlat1 + 'zeta': 0x03b6, # greek small letter zeta, U+03B6 ISOgrk3 + 'zwj': 0x200d, # zero width joiner, U+200D NEW RFC 2070 + 'zwnj': 0x200c, # zero width non-joiner, U+200C NEW RFC 2070 +} + + +# maps the HTML5 named character references to the equivalent Unicode character(s) +html5 = { + 'Aacute': '\xc1', + 'aacute': '\xe1', + 'Aacute;': '\xc1', + 'aacute;': '\xe1', + 'Abreve;': '\u0102', + 'abreve;': '\u0103', + 'ac;': '\u223e', + 'acd;': '\u223f', + 'acE;': '\u223e\u0333', + 'Acirc': '\xc2', + 'acirc': '\xe2', + 'Acirc;': '\xc2', + 'acirc;': '\xe2', + 'acute': '\xb4', + 'acute;': '\xb4', + 'Acy;': '\u0410', + 'acy;': '\u0430', + 'AElig': '\xc6', + 'aelig': '\xe6', + 'AElig;': '\xc6', + 'aelig;': '\xe6', + 'af;': '\u2061', + 'Afr;': '\U0001d504', + 'afr;': '\U0001d51e', + 'Agrave': '\xc0', + 'agrave': '\xe0', + 'Agrave;': '\xc0', + 'agrave;': '\xe0', + 'alefsym;': '\u2135', + 'aleph;': '\u2135', + 'Alpha;': '\u0391', + 'alpha;': '\u03b1', + 'Amacr;': '\u0100', + 'amacr;': '\u0101', + 'amalg;': '\u2a3f', + 'AMP': '&', + 'amp': '&', + 'AMP;': '&', + 'amp;': '&', + 'And;': '\u2a53', + 'and;': '\u2227', + 'andand;': '\u2a55', + 'andd;': '\u2a5c', + 'andslope;': '\u2a58', + 'andv;': '\u2a5a', + 'ang;': '\u2220', + 'ange;': '\u29a4', + 'angle;': '\u2220', + 'angmsd;': '\u2221', + 'angmsdaa;': '\u29a8', + 'angmsdab;': '\u29a9', + 'angmsdac;': '\u29aa', + 'angmsdad;': '\u29ab', + 'angmsdae;': '\u29ac', + 'angmsdaf;': '\u29ad', + 'angmsdag;': '\u29ae', + 'angmsdah;': '\u29af', + 'angrt;': '\u221f', + 'angrtvb;': '\u22be', + 'angrtvbd;': '\u299d', + 'angsph;': '\u2222', + 'angst;': '\xc5', + 'angzarr;': '\u237c', + 'Aogon;': '\u0104', + 'aogon;': '\u0105', + 'Aopf;': '\U0001d538', + 'aopf;': '\U0001d552', + 'ap;': '\u2248', + 'apacir;': '\u2a6f', + 'apE;': '\u2a70', + 'ape;': '\u224a', + 'apid;': '\u224b', + 'apos;': "'", + 'ApplyFunction;': '\u2061', + 'approx;': '\u2248', + 'approxeq;': '\u224a', + 'Aring': '\xc5', + 'aring': '\xe5', + 'Aring;': '\xc5', + 'aring;': '\xe5', + 'Ascr;': '\U0001d49c', + 'ascr;': '\U0001d4b6', + 'Assign;': '\u2254', + 'ast;': '*', + 'asymp;': '\u2248', + 'asympeq;': '\u224d', + 'Atilde': '\xc3', + 'atilde': '\xe3', + 'Atilde;': '\xc3', + 'atilde;': '\xe3', + 'Auml': '\xc4', + 'auml': '\xe4', + 'Auml;': '\xc4', + 'auml;': '\xe4', + 'awconint;': '\u2233', + 'awint;': '\u2a11', + 'backcong;': '\u224c', + 'backepsilon;': '\u03f6', + 'backprime;': '\u2035', + 'backsim;': '\u223d', + 'backsimeq;': '\u22cd', + 'Backslash;': '\u2216', + 'Barv;': '\u2ae7', + 'barvee;': '\u22bd', + 'Barwed;': '\u2306', + 'barwed;': '\u2305', + 'barwedge;': '\u2305', + 'bbrk;': '\u23b5', + 'bbrktbrk;': '\u23b6', + 'bcong;': '\u224c', + 'Bcy;': '\u0411', + 'bcy;': '\u0431', + 'bdquo;': '\u201e', + 'becaus;': '\u2235', + 'Because;': '\u2235', + 'because;': '\u2235', + 'bemptyv;': '\u29b0', + 'bepsi;': '\u03f6', + 'bernou;': '\u212c', + 'Bernoullis;': '\u212c', + 'Beta;': '\u0392', + 'beta;': '\u03b2', + 'beth;': '\u2136', + 'between;': '\u226c', + 'Bfr;': '\U0001d505', + 'bfr;': '\U0001d51f', + 'bigcap;': '\u22c2', + 'bigcirc;': '\u25ef', + 'bigcup;': '\u22c3', + 'bigodot;': '\u2a00', + 'bigoplus;': '\u2a01', + 'bigotimes;': '\u2a02', + 'bigsqcup;': '\u2a06', + 'bigstar;': '\u2605', + 'bigtriangledown;': '\u25bd', + 'bigtriangleup;': '\u25b3', + 'biguplus;': '\u2a04', + 'bigvee;': '\u22c1', + 'bigwedge;': '\u22c0', + 'bkarow;': '\u290d', + 'blacklozenge;': '\u29eb', + 'blacksquare;': '\u25aa', + 'blacktriangle;': '\u25b4', + 'blacktriangledown;': '\u25be', + 'blacktriangleleft;': '\u25c2', + 'blacktriangleright;': '\u25b8', + 'blank;': '\u2423', + 'blk12;': '\u2592', + 'blk14;': '\u2591', + 'blk34;': '\u2593', + 'block;': '\u2588', + 'bne;': '=\u20e5', + 'bnequiv;': '\u2261\u20e5', + 'bNot;': '\u2aed', + 'bnot;': '\u2310', + 'Bopf;': '\U0001d539', + 'bopf;': '\U0001d553', + 'bot;': '\u22a5', + 'bottom;': '\u22a5', + 'bowtie;': '\u22c8', + 'boxbox;': '\u29c9', + 'boxDL;': '\u2557', + 'boxDl;': '\u2556', + 'boxdL;': '\u2555', + 'boxdl;': '\u2510', + 'boxDR;': '\u2554', + 'boxDr;': '\u2553', + 'boxdR;': '\u2552', + 'boxdr;': '\u250c', + 'boxH;': '\u2550', + 'boxh;': '\u2500', + 'boxHD;': '\u2566', + 'boxHd;': '\u2564', + 'boxhD;': '\u2565', + 'boxhd;': '\u252c', + 'boxHU;': '\u2569', + 'boxHu;': '\u2567', + 'boxhU;': '\u2568', + 'boxhu;': '\u2534', + 'boxminus;': '\u229f', + 'boxplus;': '\u229e', + 'boxtimes;': '\u22a0', + 'boxUL;': '\u255d', + 'boxUl;': '\u255c', + 'boxuL;': '\u255b', + 'boxul;': '\u2518', + 'boxUR;': '\u255a', + 'boxUr;': '\u2559', + 'boxuR;': '\u2558', + 'boxur;': '\u2514', + 'boxV;': '\u2551', + 'boxv;': '\u2502', + 'boxVH;': '\u256c', + 'boxVh;': '\u256b', + 'boxvH;': '\u256a', + 'boxvh;': '\u253c', + 'boxVL;': '\u2563', + 'boxVl;': '\u2562', + 'boxvL;': '\u2561', + 'boxvl;': '\u2524', + 'boxVR;': '\u2560', + 'boxVr;': '\u255f', + 'boxvR;': '\u255e', + 'boxvr;': '\u251c', + 'bprime;': '\u2035', + 'Breve;': '\u02d8', + 'breve;': '\u02d8', + 'brvbar': '\xa6', + 'brvbar;': '\xa6', + 'Bscr;': '\u212c', + 'bscr;': '\U0001d4b7', + 'bsemi;': '\u204f', + 'bsim;': '\u223d', + 'bsime;': '\u22cd', + 'bsol;': '\\', + 'bsolb;': '\u29c5', + 'bsolhsub;': '\u27c8', + 'bull;': '\u2022', + 'bullet;': '\u2022', + 'bump;': '\u224e', + 'bumpE;': '\u2aae', + 'bumpe;': '\u224f', + 'Bumpeq;': '\u224e', + 'bumpeq;': '\u224f', + 'Cacute;': '\u0106', + 'cacute;': '\u0107', + 'Cap;': '\u22d2', + 'cap;': '\u2229', + 'capand;': '\u2a44', + 'capbrcup;': '\u2a49', + 'capcap;': '\u2a4b', + 'capcup;': '\u2a47', + 'capdot;': '\u2a40', + 'CapitalDifferentialD;': '\u2145', + 'caps;': '\u2229\ufe00', + 'caret;': '\u2041', + 'caron;': '\u02c7', + 'Cayleys;': '\u212d', + 'ccaps;': '\u2a4d', + 'Ccaron;': '\u010c', + 'ccaron;': '\u010d', + 'Ccedil': '\xc7', + 'ccedil': '\xe7', + 'Ccedil;': '\xc7', + 'ccedil;': '\xe7', + 'Ccirc;': '\u0108', + 'ccirc;': '\u0109', + 'Cconint;': '\u2230', + 'ccups;': '\u2a4c', + 'ccupssm;': '\u2a50', + 'Cdot;': '\u010a', + 'cdot;': '\u010b', + 'cedil': '\xb8', + 'cedil;': '\xb8', + 'Cedilla;': '\xb8', + 'cemptyv;': '\u29b2', + 'cent': '\xa2', + 'cent;': '\xa2', + 'CenterDot;': '\xb7', + 'centerdot;': '\xb7', + 'Cfr;': '\u212d', + 'cfr;': '\U0001d520', + 'CHcy;': '\u0427', + 'chcy;': '\u0447', + 'check;': '\u2713', + 'checkmark;': '\u2713', + 'Chi;': '\u03a7', + 'chi;': '\u03c7', + 'cir;': '\u25cb', + 'circ;': '\u02c6', + 'circeq;': '\u2257', + 'circlearrowleft;': '\u21ba', + 'circlearrowright;': '\u21bb', + 'circledast;': '\u229b', + 'circledcirc;': '\u229a', + 'circleddash;': '\u229d', + 'CircleDot;': '\u2299', + 'circledR;': '\xae', + 'circledS;': '\u24c8', + 'CircleMinus;': '\u2296', + 'CirclePlus;': '\u2295', + 'CircleTimes;': '\u2297', + 'cirE;': '\u29c3', + 'cire;': '\u2257', + 'cirfnint;': '\u2a10', + 'cirmid;': '\u2aef', + 'cirscir;': '\u29c2', + 'ClockwiseContourIntegral;': '\u2232', + 'CloseCurlyDoubleQuote;': '\u201d', + 'CloseCurlyQuote;': '\u2019', + 'clubs;': '\u2663', + 'clubsuit;': '\u2663', + 'Colon;': '\u2237', + 'colon;': ':', + 'Colone;': '\u2a74', + 'colone;': '\u2254', + 'coloneq;': '\u2254', + 'comma;': ',', + 'commat;': '@', + 'comp;': '\u2201', + 'compfn;': '\u2218', + 'complement;': '\u2201', + 'complexes;': '\u2102', + 'cong;': '\u2245', + 'congdot;': '\u2a6d', + 'Congruent;': '\u2261', + 'Conint;': '\u222f', + 'conint;': '\u222e', + 'ContourIntegral;': '\u222e', + 'Copf;': '\u2102', + 'copf;': '\U0001d554', + 'coprod;': '\u2210', + 'Coproduct;': '\u2210', + 'COPY': '\xa9', + 'copy': '\xa9', + 'COPY;': '\xa9', + 'copy;': '\xa9', + 'copysr;': '\u2117', + 'CounterClockwiseContourIntegral;': '\u2233', + 'crarr;': '\u21b5', + 'Cross;': '\u2a2f', + 'cross;': '\u2717', + 'Cscr;': '\U0001d49e', + 'cscr;': '\U0001d4b8', + 'csub;': '\u2acf', + 'csube;': '\u2ad1', + 'csup;': '\u2ad0', + 'csupe;': '\u2ad2', + 'ctdot;': '\u22ef', + 'cudarrl;': '\u2938', + 'cudarrr;': '\u2935', + 'cuepr;': '\u22de', + 'cuesc;': '\u22df', + 'cularr;': '\u21b6', + 'cularrp;': '\u293d', + 'Cup;': '\u22d3', + 'cup;': '\u222a', + 'cupbrcap;': '\u2a48', + 'CupCap;': '\u224d', + 'cupcap;': '\u2a46', + 'cupcup;': '\u2a4a', + 'cupdot;': '\u228d', + 'cupor;': '\u2a45', + 'cups;': '\u222a\ufe00', + 'curarr;': '\u21b7', + 'curarrm;': '\u293c', + 'curlyeqprec;': '\u22de', + 'curlyeqsucc;': '\u22df', + 'curlyvee;': '\u22ce', + 'curlywedge;': '\u22cf', + 'curren': '\xa4', + 'curren;': '\xa4', + 'curvearrowleft;': '\u21b6', + 'curvearrowright;': '\u21b7', + 'cuvee;': '\u22ce', + 'cuwed;': '\u22cf', + 'cwconint;': '\u2232', + 'cwint;': '\u2231', + 'cylcty;': '\u232d', + 'Dagger;': '\u2021', + 'dagger;': '\u2020', + 'daleth;': '\u2138', + 'Darr;': '\u21a1', + 'dArr;': '\u21d3', + 'darr;': '\u2193', + 'dash;': '\u2010', + 'Dashv;': '\u2ae4', + 'dashv;': '\u22a3', + 'dbkarow;': '\u290f', + 'dblac;': '\u02dd', + 'Dcaron;': '\u010e', + 'dcaron;': '\u010f', + 'Dcy;': '\u0414', + 'dcy;': '\u0434', + 'DD;': '\u2145', + 'dd;': '\u2146', + 'ddagger;': '\u2021', + 'ddarr;': '\u21ca', + 'DDotrahd;': '\u2911', + 'ddotseq;': '\u2a77', + 'deg': '\xb0', + 'deg;': '\xb0', + 'Del;': '\u2207', + 'Delta;': '\u0394', + 'delta;': '\u03b4', + 'demptyv;': '\u29b1', + 'dfisht;': '\u297f', + 'Dfr;': '\U0001d507', + 'dfr;': '\U0001d521', + 'dHar;': '\u2965', + 'dharl;': '\u21c3', + 'dharr;': '\u21c2', + 'DiacriticalAcute;': '\xb4', + 'DiacriticalDot;': '\u02d9', + 'DiacriticalDoubleAcute;': '\u02dd', + 'DiacriticalGrave;': '`', + 'DiacriticalTilde;': '\u02dc', + 'diam;': '\u22c4', + 'Diamond;': '\u22c4', + 'diamond;': '\u22c4', + 'diamondsuit;': '\u2666', + 'diams;': '\u2666', + 'die;': '\xa8', + 'DifferentialD;': '\u2146', + 'digamma;': '\u03dd', + 'disin;': '\u22f2', + 'div;': '\xf7', + 'divide': '\xf7', + 'divide;': '\xf7', + 'divideontimes;': '\u22c7', + 'divonx;': '\u22c7', + 'DJcy;': '\u0402', + 'djcy;': '\u0452', + 'dlcorn;': '\u231e', + 'dlcrop;': '\u230d', + 'dollar;': '$', + 'Dopf;': '\U0001d53b', + 'dopf;': '\U0001d555', + 'Dot;': '\xa8', + 'dot;': '\u02d9', + 'DotDot;': '\u20dc', + 'doteq;': '\u2250', + 'doteqdot;': '\u2251', + 'DotEqual;': '\u2250', + 'dotminus;': '\u2238', + 'dotplus;': '\u2214', + 'dotsquare;': '\u22a1', + 'doublebarwedge;': '\u2306', + 'DoubleContourIntegral;': '\u222f', + 'DoubleDot;': '\xa8', + 'DoubleDownArrow;': '\u21d3', + 'DoubleLeftArrow;': '\u21d0', + 'DoubleLeftRightArrow;': '\u21d4', + 'DoubleLeftTee;': '\u2ae4', + 'DoubleLongLeftArrow;': '\u27f8', + 'DoubleLongLeftRightArrow;': '\u27fa', + 'DoubleLongRightArrow;': '\u27f9', + 'DoubleRightArrow;': '\u21d2', + 'DoubleRightTee;': '\u22a8', + 'DoubleUpArrow;': '\u21d1', + 'DoubleUpDownArrow;': '\u21d5', + 'DoubleVerticalBar;': '\u2225', + 'DownArrow;': '\u2193', + 'Downarrow;': '\u21d3', + 'downarrow;': '\u2193', + 'DownArrowBar;': '\u2913', + 'DownArrowUpArrow;': '\u21f5', + 'DownBreve;': '\u0311', + 'downdownarrows;': '\u21ca', + 'downharpoonleft;': '\u21c3', + 'downharpoonright;': '\u21c2', + 'DownLeftRightVector;': '\u2950', + 'DownLeftTeeVector;': '\u295e', + 'DownLeftVector;': '\u21bd', + 'DownLeftVectorBar;': '\u2956', + 'DownRightTeeVector;': '\u295f', + 'DownRightVector;': '\u21c1', + 'DownRightVectorBar;': '\u2957', + 'DownTee;': '\u22a4', + 'DownTeeArrow;': '\u21a7', + 'drbkarow;': '\u2910', + 'drcorn;': '\u231f', + 'drcrop;': '\u230c', + 'Dscr;': '\U0001d49f', + 'dscr;': '\U0001d4b9', + 'DScy;': '\u0405', + 'dscy;': '\u0455', + 'dsol;': '\u29f6', + 'Dstrok;': '\u0110', + 'dstrok;': '\u0111', + 'dtdot;': '\u22f1', + 'dtri;': '\u25bf', + 'dtrif;': '\u25be', + 'duarr;': '\u21f5', + 'duhar;': '\u296f', + 'dwangle;': '\u29a6', + 'DZcy;': '\u040f', + 'dzcy;': '\u045f', + 'dzigrarr;': '\u27ff', + 'Eacute': '\xc9', + 'eacute': '\xe9', + 'Eacute;': '\xc9', + 'eacute;': '\xe9', + 'easter;': '\u2a6e', + 'Ecaron;': '\u011a', + 'ecaron;': '\u011b', + 'ecir;': '\u2256', + 'Ecirc': '\xca', + 'ecirc': '\xea', + 'Ecirc;': '\xca', + 'ecirc;': '\xea', + 'ecolon;': '\u2255', + 'Ecy;': '\u042d', + 'ecy;': '\u044d', + 'eDDot;': '\u2a77', + 'Edot;': '\u0116', + 'eDot;': '\u2251', + 'edot;': '\u0117', + 'ee;': '\u2147', + 'efDot;': '\u2252', + 'Efr;': '\U0001d508', + 'efr;': '\U0001d522', + 'eg;': '\u2a9a', + 'Egrave': '\xc8', + 'egrave': '\xe8', + 'Egrave;': '\xc8', + 'egrave;': '\xe8', + 'egs;': '\u2a96', + 'egsdot;': '\u2a98', + 'el;': '\u2a99', + 'Element;': '\u2208', + 'elinters;': '\u23e7', + 'ell;': '\u2113', + 'els;': '\u2a95', + 'elsdot;': '\u2a97', + 'Emacr;': '\u0112', + 'emacr;': '\u0113', + 'empty;': '\u2205', + 'emptyset;': '\u2205', + 'EmptySmallSquare;': '\u25fb', + 'emptyv;': '\u2205', + 'EmptyVerySmallSquare;': '\u25ab', + 'emsp13;': '\u2004', + 'emsp14;': '\u2005', + 'emsp;': '\u2003', + 'ENG;': '\u014a', + 'eng;': '\u014b', + 'ensp;': '\u2002', + 'Eogon;': '\u0118', + 'eogon;': '\u0119', + 'Eopf;': '\U0001d53c', + 'eopf;': '\U0001d556', + 'epar;': '\u22d5', + 'eparsl;': '\u29e3', + 'eplus;': '\u2a71', + 'epsi;': '\u03b5', + 'Epsilon;': '\u0395', + 'epsilon;': '\u03b5', + 'epsiv;': '\u03f5', + 'eqcirc;': '\u2256', + 'eqcolon;': '\u2255', + 'eqsim;': '\u2242', + 'eqslantgtr;': '\u2a96', + 'eqslantless;': '\u2a95', + 'Equal;': '\u2a75', + 'equals;': '=', + 'EqualTilde;': '\u2242', + 'equest;': '\u225f', + 'Equilibrium;': '\u21cc', + 'equiv;': '\u2261', + 'equivDD;': '\u2a78', + 'eqvparsl;': '\u29e5', + 'erarr;': '\u2971', + 'erDot;': '\u2253', + 'Escr;': '\u2130', + 'escr;': '\u212f', + 'esdot;': '\u2250', + 'Esim;': '\u2a73', + 'esim;': '\u2242', + 'Eta;': '\u0397', + 'eta;': '\u03b7', + 'ETH': '\xd0', + 'eth': '\xf0', + 'ETH;': '\xd0', + 'eth;': '\xf0', + 'Euml': '\xcb', + 'euml': '\xeb', + 'Euml;': '\xcb', + 'euml;': '\xeb', + 'euro;': '\u20ac', + 'excl;': '!', + 'exist;': '\u2203', + 'Exists;': '\u2203', + 'expectation;': '\u2130', + 'ExponentialE;': '\u2147', + 'exponentiale;': '\u2147', + 'fallingdotseq;': '\u2252', + 'Fcy;': '\u0424', + 'fcy;': '\u0444', + 'female;': '\u2640', + 'ffilig;': '\ufb03', + 'fflig;': '\ufb00', + 'ffllig;': '\ufb04', + 'Ffr;': '\U0001d509', + 'ffr;': '\U0001d523', + 'filig;': '\ufb01', + 'FilledSmallSquare;': '\u25fc', + 'FilledVerySmallSquare;': '\u25aa', + 'fjlig;': 'fj', + 'flat;': '\u266d', + 'fllig;': '\ufb02', + 'fltns;': '\u25b1', + 'fnof;': '\u0192', + 'Fopf;': '\U0001d53d', + 'fopf;': '\U0001d557', + 'ForAll;': '\u2200', + 'forall;': '\u2200', + 'fork;': '\u22d4', + 'forkv;': '\u2ad9', + 'Fouriertrf;': '\u2131', + 'fpartint;': '\u2a0d', + 'frac12': '\xbd', + 'frac12;': '\xbd', + 'frac13;': '\u2153', + 'frac14': '\xbc', + 'frac14;': '\xbc', + 'frac15;': '\u2155', + 'frac16;': '\u2159', + 'frac18;': '\u215b', + 'frac23;': '\u2154', + 'frac25;': '\u2156', + 'frac34': '\xbe', + 'frac34;': '\xbe', + 'frac35;': '\u2157', + 'frac38;': '\u215c', + 'frac45;': '\u2158', + 'frac56;': '\u215a', + 'frac58;': '\u215d', + 'frac78;': '\u215e', + 'frasl;': '\u2044', + 'frown;': '\u2322', + 'Fscr;': '\u2131', + 'fscr;': '\U0001d4bb', + 'gacute;': '\u01f5', + 'Gamma;': '\u0393', + 'gamma;': '\u03b3', + 'Gammad;': '\u03dc', + 'gammad;': '\u03dd', + 'gap;': '\u2a86', + 'Gbreve;': '\u011e', + 'gbreve;': '\u011f', + 'Gcedil;': '\u0122', + 'Gcirc;': '\u011c', + 'gcirc;': '\u011d', + 'Gcy;': '\u0413', + 'gcy;': '\u0433', + 'Gdot;': '\u0120', + 'gdot;': '\u0121', + 'gE;': '\u2267', + 'ge;': '\u2265', + 'gEl;': '\u2a8c', + 'gel;': '\u22db', + 'geq;': '\u2265', + 'geqq;': '\u2267', + 'geqslant;': '\u2a7e', + 'ges;': '\u2a7e', + 'gescc;': '\u2aa9', + 'gesdot;': '\u2a80', + 'gesdoto;': '\u2a82', + 'gesdotol;': '\u2a84', + 'gesl;': '\u22db\ufe00', + 'gesles;': '\u2a94', + 'Gfr;': '\U0001d50a', + 'gfr;': '\U0001d524', + 'Gg;': '\u22d9', + 'gg;': '\u226b', + 'ggg;': '\u22d9', + 'gimel;': '\u2137', + 'GJcy;': '\u0403', + 'gjcy;': '\u0453', + 'gl;': '\u2277', + 'gla;': '\u2aa5', + 'glE;': '\u2a92', + 'glj;': '\u2aa4', + 'gnap;': '\u2a8a', + 'gnapprox;': '\u2a8a', + 'gnE;': '\u2269', + 'gne;': '\u2a88', + 'gneq;': '\u2a88', + 'gneqq;': '\u2269', + 'gnsim;': '\u22e7', + 'Gopf;': '\U0001d53e', + 'gopf;': '\U0001d558', + 'grave;': '`', + 'GreaterEqual;': '\u2265', + 'GreaterEqualLess;': '\u22db', + 'GreaterFullEqual;': '\u2267', + 'GreaterGreater;': '\u2aa2', + 'GreaterLess;': '\u2277', + 'GreaterSlantEqual;': '\u2a7e', + 'GreaterTilde;': '\u2273', + 'Gscr;': '\U0001d4a2', + 'gscr;': '\u210a', + 'gsim;': '\u2273', + 'gsime;': '\u2a8e', + 'gsiml;': '\u2a90', + 'GT': '>', + 'gt': '>', + 'GT;': '>', + 'Gt;': '\u226b', + 'gt;': '>', + 'gtcc;': '\u2aa7', + 'gtcir;': '\u2a7a', + 'gtdot;': '\u22d7', + 'gtlPar;': '\u2995', + 'gtquest;': '\u2a7c', + 'gtrapprox;': '\u2a86', + 'gtrarr;': '\u2978', + 'gtrdot;': '\u22d7', + 'gtreqless;': '\u22db', + 'gtreqqless;': '\u2a8c', + 'gtrless;': '\u2277', + 'gtrsim;': '\u2273', + 'gvertneqq;': '\u2269\ufe00', + 'gvnE;': '\u2269\ufe00', + 'Hacek;': '\u02c7', + 'hairsp;': '\u200a', + 'half;': '\xbd', + 'hamilt;': '\u210b', + 'HARDcy;': '\u042a', + 'hardcy;': '\u044a', + 'hArr;': '\u21d4', + 'harr;': '\u2194', + 'harrcir;': '\u2948', + 'harrw;': '\u21ad', + 'Hat;': '^', + 'hbar;': '\u210f', + 'Hcirc;': '\u0124', + 'hcirc;': '\u0125', + 'hearts;': '\u2665', + 'heartsuit;': '\u2665', + 'hellip;': '\u2026', + 'hercon;': '\u22b9', + 'Hfr;': '\u210c', + 'hfr;': '\U0001d525', + 'HilbertSpace;': '\u210b', + 'hksearow;': '\u2925', + 'hkswarow;': '\u2926', + 'hoarr;': '\u21ff', + 'homtht;': '\u223b', + 'hookleftarrow;': '\u21a9', + 'hookrightarrow;': '\u21aa', + 'Hopf;': '\u210d', + 'hopf;': '\U0001d559', + 'horbar;': '\u2015', + 'HorizontalLine;': '\u2500', + 'Hscr;': '\u210b', + 'hscr;': '\U0001d4bd', + 'hslash;': '\u210f', + 'Hstrok;': '\u0126', + 'hstrok;': '\u0127', + 'HumpDownHump;': '\u224e', + 'HumpEqual;': '\u224f', + 'hybull;': '\u2043', + 'hyphen;': '\u2010', + 'Iacute': '\xcd', + 'iacute': '\xed', + 'Iacute;': '\xcd', + 'iacute;': '\xed', + 'ic;': '\u2063', + 'Icirc': '\xce', + 'icirc': '\xee', + 'Icirc;': '\xce', + 'icirc;': '\xee', + 'Icy;': '\u0418', + 'icy;': '\u0438', + 'Idot;': '\u0130', + 'IEcy;': '\u0415', + 'iecy;': '\u0435', + 'iexcl': '\xa1', + 'iexcl;': '\xa1', + 'iff;': '\u21d4', + 'Ifr;': '\u2111', + 'ifr;': '\U0001d526', + 'Igrave': '\xcc', + 'igrave': '\xec', + 'Igrave;': '\xcc', + 'igrave;': '\xec', + 'ii;': '\u2148', + 'iiiint;': '\u2a0c', + 'iiint;': '\u222d', + 'iinfin;': '\u29dc', + 'iiota;': '\u2129', + 'IJlig;': '\u0132', + 'ijlig;': '\u0133', + 'Im;': '\u2111', + 'Imacr;': '\u012a', + 'imacr;': '\u012b', + 'image;': '\u2111', + 'ImaginaryI;': '\u2148', + 'imagline;': '\u2110', + 'imagpart;': '\u2111', + 'imath;': '\u0131', + 'imof;': '\u22b7', + 'imped;': '\u01b5', + 'Implies;': '\u21d2', + 'in;': '\u2208', + 'incare;': '\u2105', + 'infin;': '\u221e', + 'infintie;': '\u29dd', + 'inodot;': '\u0131', + 'Int;': '\u222c', + 'int;': '\u222b', + 'intcal;': '\u22ba', + 'integers;': '\u2124', + 'Integral;': '\u222b', + 'intercal;': '\u22ba', + 'Intersection;': '\u22c2', + 'intlarhk;': '\u2a17', + 'intprod;': '\u2a3c', + 'InvisibleComma;': '\u2063', + 'InvisibleTimes;': '\u2062', + 'IOcy;': '\u0401', + 'iocy;': '\u0451', + 'Iogon;': '\u012e', + 'iogon;': '\u012f', + 'Iopf;': '\U0001d540', + 'iopf;': '\U0001d55a', + 'Iota;': '\u0399', + 'iota;': '\u03b9', + 'iprod;': '\u2a3c', + 'iquest': '\xbf', + 'iquest;': '\xbf', + 'Iscr;': '\u2110', + 'iscr;': '\U0001d4be', + 'isin;': '\u2208', + 'isindot;': '\u22f5', + 'isinE;': '\u22f9', + 'isins;': '\u22f4', + 'isinsv;': '\u22f3', + 'isinv;': '\u2208', + 'it;': '\u2062', + 'Itilde;': '\u0128', + 'itilde;': '\u0129', + 'Iukcy;': '\u0406', + 'iukcy;': '\u0456', + 'Iuml': '\xcf', + 'iuml': '\xef', + 'Iuml;': '\xcf', + 'iuml;': '\xef', + 'Jcirc;': '\u0134', + 'jcirc;': '\u0135', + 'Jcy;': '\u0419', + 'jcy;': '\u0439', + 'Jfr;': '\U0001d50d', + 'jfr;': '\U0001d527', + 'jmath;': '\u0237', + 'Jopf;': '\U0001d541', + 'jopf;': '\U0001d55b', + 'Jscr;': '\U0001d4a5', + 'jscr;': '\U0001d4bf', + 'Jsercy;': '\u0408', + 'jsercy;': '\u0458', + 'Jukcy;': '\u0404', + 'jukcy;': '\u0454', + 'Kappa;': '\u039a', + 'kappa;': '\u03ba', + 'kappav;': '\u03f0', + 'Kcedil;': '\u0136', + 'kcedil;': '\u0137', + 'Kcy;': '\u041a', + 'kcy;': '\u043a', + 'Kfr;': '\U0001d50e', + 'kfr;': '\U0001d528', + 'kgreen;': '\u0138', + 'KHcy;': '\u0425', + 'khcy;': '\u0445', + 'KJcy;': '\u040c', + 'kjcy;': '\u045c', + 'Kopf;': '\U0001d542', + 'kopf;': '\U0001d55c', + 'Kscr;': '\U0001d4a6', + 'kscr;': '\U0001d4c0', + 'lAarr;': '\u21da', + 'Lacute;': '\u0139', + 'lacute;': '\u013a', + 'laemptyv;': '\u29b4', + 'lagran;': '\u2112', + 'Lambda;': '\u039b', + 'lambda;': '\u03bb', + 'Lang;': '\u27ea', + 'lang;': '\u27e8', + 'langd;': '\u2991', + 'langle;': '\u27e8', + 'lap;': '\u2a85', + 'Laplacetrf;': '\u2112', + 'laquo': '\xab', + 'laquo;': '\xab', + 'Larr;': '\u219e', + 'lArr;': '\u21d0', + 'larr;': '\u2190', + 'larrb;': '\u21e4', + 'larrbfs;': '\u291f', + 'larrfs;': '\u291d', + 'larrhk;': '\u21a9', + 'larrlp;': '\u21ab', + 'larrpl;': '\u2939', + 'larrsim;': '\u2973', + 'larrtl;': '\u21a2', + 'lat;': '\u2aab', + 'lAtail;': '\u291b', + 'latail;': '\u2919', + 'late;': '\u2aad', + 'lates;': '\u2aad\ufe00', + 'lBarr;': '\u290e', + 'lbarr;': '\u290c', + 'lbbrk;': '\u2772', + 'lbrace;': '{', + 'lbrack;': '[', + 'lbrke;': '\u298b', + 'lbrksld;': '\u298f', + 'lbrkslu;': '\u298d', + 'Lcaron;': '\u013d', + 'lcaron;': '\u013e', + 'Lcedil;': '\u013b', + 'lcedil;': '\u013c', + 'lceil;': '\u2308', + 'lcub;': '{', + 'Lcy;': '\u041b', + 'lcy;': '\u043b', + 'ldca;': '\u2936', + 'ldquo;': '\u201c', + 'ldquor;': '\u201e', + 'ldrdhar;': '\u2967', + 'ldrushar;': '\u294b', + 'ldsh;': '\u21b2', + 'lE;': '\u2266', + 'le;': '\u2264', + 'LeftAngleBracket;': '\u27e8', + 'LeftArrow;': '\u2190', + 'Leftarrow;': '\u21d0', + 'leftarrow;': '\u2190', + 'LeftArrowBar;': '\u21e4', + 'LeftArrowRightArrow;': '\u21c6', + 'leftarrowtail;': '\u21a2', + 'LeftCeiling;': '\u2308', + 'LeftDoubleBracket;': '\u27e6', + 'LeftDownTeeVector;': '\u2961', + 'LeftDownVector;': '\u21c3', + 'LeftDownVectorBar;': '\u2959', + 'LeftFloor;': '\u230a', + 'leftharpoondown;': '\u21bd', + 'leftharpoonup;': '\u21bc', + 'leftleftarrows;': '\u21c7', + 'LeftRightArrow;': '\u2194', + 'Leftrightarrow;': '\u21d4', + 'leftrightarrow;': '\u2194', + 'leftrightarrows;': '\u21c6', + 'leftrightharpoons;': '\u21cb', + 'leftrightsquigarrow;': '\u21ad', + 'LeftRightVector;': '\u294e', + 'LeftTee;': '\u22a3', + 'LeftTeeArrow;': '\u21a4', + 'LeftTeeVector;': '\u295a', + 'leftthreetimes;': '\u22cb', + 'LeftTriangle;': '\u22b2', + 'LeftTriangleBar;': '\u29cf', + 'LeftTriangleEqual;': '\u22b4', + 'LeftUpDownVector;': '\u2951', + 'LeftUpTeeVector;': '\u2960', + 'LeftUpVector;': '\u21bf', + 'LeftUpVectorBar;': '\u2958', + 'LeftVector;': '\u21bc', + 'LeftVectorBar;': '\u2952', + 'lEg;': '\u2a8b', + 'leg;': '\u22da', + 'leq;': '\u2264', + 'leqq;': '\u2266', + 'leqslant;': '\u2a7d', + 'les;': '\u2a7d', + 'lescc;': '\u2aa8', + 'lesdot;': '\u2a7f', + 'lesdoto;': '\u2a81', + 'lesdotor;': '\u2a83', + 'lesg;': '\u22da\ufe00', + 'lesges;': '\u2a93', + 'lessapprox;': '\u2a85', + 'lessdot;': '\u22d6', + 'lesseqgtr;': '\u22da', + 'lesseqqgtr;': '\u2a8b', + 'LessEqualGreater;': '\u22da', + 'LessFullEqual;': '\u2266', + 'LessGreater;': '\u2276', + 'lessgtr;': '\u2276', + 'LessLess;': '\u2aa1', + 'lesssim;': '\u2272', + 'LessSlantEqual;': '\u2a7d', + 'LessTilde;': '\u2272', + 'lfisht;': '\u297c', + 'lfloor;': '\u230a', + 'Lfr;': '\U0001d50f', + 'lfr;': '\U0001d529', + 'lg;': '\u2276', + 'lgE;': '\u2a91', + 'lHar;': '\u2962', + 'lhard;': '\u21bd', + 'lharu;': '\u21bc', + 'lharul;': '\u296a', + 'lhblk;': '\u2584', + 'LJcy;': '\u0409', + 'ljcy;': '\u0459', + 'Ll;': '\u22d8', + 'll;': '\u226a', + 'llarr;': '\u21c7', + 'llcorner;': '\u231e', + 'Lleftarrow;': '\u21da', + 'llhard;': '\u296b', + 'lltri;': '\u25fa', + 'Lmidot;': '\u013f', + 'lmidot;': '\u0140', + 'lmoust;': '\u23b0', + 'lmoustache;': '\u23b0', + 'lnap;': '\u2a89', + 'lnapprox;': '\u2a89', + 'lnE;': '\u2268', + 'lne;': '\u2a87', + 'lneq;': '\u2a87', + 'lneqq;': '\u2268', + 'lnsim;': '\u22e6', + 'loang;': '\u27ec', + 'loarr;': '\u21fd', + 'lobrk;': '\u27e6', + 'LongLeftArrow;': '\u27f5', + 'Longleftarrow;': '\u27f8', + 'longleftarrow;': '\u27f5', + 'LongLeftRightArrow;': '\u27f7', + 'Longleftrightarrow;': '\u27fa', + 'longleftrightarrow;': '\u27f7', + 'longmapsto;': '\u27fc', + 'LongRightArrow;': '\u27f6', + 'Longrightarrow;': '\u27f9', + 'longrightarrow;': '\u27f6', + 'looparrowleft;': '\u21ab', + 'looparrowright;': '\u21ac', + 'lopar;': '\u2985', + 'Lopf;': '\U0001d543', + 'lopf;': '\U0001d55d', + 'loplus;': '\u2a2d', + 'lotimes;': '\u2a34', + 'lowast;': '\u2217', + 'lowbar;': '_', + 'LowerLeftArrow;': '\u2199', + 'LowerRightArrow;': '\u2198', + 'loz;': '\u25ca', + 'lozenge;': '\u25ca', + 'lozf;': '\u29eb', + 'lpar;': '(', + 'lparlt;': '\u2993', + 'lrarr;': '\u21c6', + 'lrcorner;': '\u231f', + 'lrhar;': '\u21cb', + 'lrhard;': '\u296d', + 'lrm;': '\u200e', + 'lrtri;': '\u22bf', + 'lsaquo;': '\u2039', + 'Lscr;': '\u2112', + 'lscr;': '\U0001d4c1', + 'Lsh;': '\u21b0', + 'lsh;': '\u21b0', + 'lsim;': '\u2272', + 'lsime;': '\u2a8d', + 'lsimg;': '\u2a8f', + 'lsqb;': '[', + 'lsquo;': '\u2018', + 'lsquor;': '\u201a', + 'Lstrok;': '\u0141', + 'lstrok;': '\u0142', + 'LT': '<', + 'lt': '<', + 'LT;': '<', + 'Lt;': '\u226a', + 'lt;': '<', + 'ltcc;': '\u2aa6', + 'ltcir;': '\u2a79', + 'ltdot;': '\u22d6', + 'lthree;': '\u22cb', + 'ltimes;': '\u22c9', + 'ltlarr;': '\u2976', + 'ltquest;': '\u2a7b', + 'ltri;': '\u25c3', + 'ltrie;': '\u22b4', + 'ltrif;': '\u25c2', + 'ltrPar;': '\u2996', + 'lurdshar;': '\u294a', + 'luruhar;': '\u2966', + 'lvertneqq;': '\u2268\ufe00', + 'lvnE;': '\u2268\ufe00', + 'macr': '\xaf', + 'macr;': '\xaf', + 'male;': '\u2642', + 'malt;': '\u2720', + 'maltese;': '\u2720', + 'Map;': '\u2905', + 'map;': '\u21a6', + 'mapsto;': '\u21a6', + 'mapstodown;': '\u21a7', + 'mapstoleft;': '\u21a4', + 'mapstoup;': '\u21a5', + 'marker;': '\u25ae', + 'mcomma;': '\u2a29', + 'Mcy;': '\u041c', + 'mcy;': '\u043c', + 'mdash;': '\u2014', + 'mDDot;': '\u223a', + 'measuredangle;': '\u2221', + 'MediumSpace;': '\u205f', + 'Mellintrf;': '\u2133', + 'Mfr;': '\U0001d510', + 'mfr;': '\U0001d52a', + 'mho;': '\u2127', + 'micro': '\xb5', + 'micro;': '\xb5', + 'mid;': '\u2223', + 'midast;': '*', + 'midcir;': '\u2af0', + 'middot': '\xb7', + 'middot;': '\xb7', + 'minus;': '\u2212', + 'minusb;': '\u229f', + 'minusd;': '\u2238', + 'minusdu;': '\u2a2a', + 'MinusPlus;': '\u2213', + 'mlcp;': '\u2adb', + 'mldr;': '\u2026', + 'mnplus;': '\u2213', + 'models;': '\u22a7', + 'Mopf;': '\U0001d544', + 'mopf;': '\U0001d55e', + 'mp;': '\u2213', + 'Mscr;': '\u2133', + 'mscr;': '\U0001d4c2', + 'mstpos;': '\u223e', + 'Mu;': '\u039c', + 'mu;': '\u03bc', + 'multimap;': '\u22b8', + 'mumap;': '\u22b8', + 'nabla;': '\u2207', + 'Nacute;': '\u0143', + 'nacute;': '\u0144', + 'nang;': '\u2220\u20d2', + 'nap;': '\u2249', + 'napE;': '\u2a70\u0338', + 'napid;': '\u224b\u0338', + 'napos;': '\u0149', + 'napprox;': '\u2249', + 'natur;': '\u266e', + 'natural;': '\u266e', + 'naturals;': '\u2115', + 'nbsp': '\xa0', + 'nbsp;': '\xa0', + 'nbump;': '\u224e\u0338', + 'nbumpe;': '\u224f\u0338', + 'ncap;': '\u2a43', + 'Ncaron;': '\u0147', + 'ncaron;': '\u0148', + 'Ncedil;': '\u0145', + 'ncedil;': '\u0146', + 'ncong;': '\u2247', + 'ncongdot;': '\u2a6d\u0338', + 'ncup;': '\u2a42', + 'Ncy;': '\u041d', + 'ncy;': '\u043d', + 'ndash;': '\u2013', + 'ne;': '\u2260', + 'nearhk;': '\u2924', + 'neArr;': '\u21d7', + 'nearr;': '\u2197', + 'nearrow;': '\u2197', + 'nedot;': '\u2250\u0338', + 'NegativeMediumSpace;': '\u200b', + 'NegativeThickSpace;': '\u200b', + 'NegativeThinSpace;': '\u200b', + 'NegativeVeryThinSpace;': '\u200b', + 'nequiv;': '\u2262', + 'nesear;': '\u2928', + 'nesim;': '\u2242\u0338', + 'NestedGreaterGreater;': '\u226b', + 'NestedLessLess;': '\u226a', + 'NewLine;': '\n', + 'nexist;': '\u2204', + 'nexists;': '\u2204', + 'Nfr;': '\U0001d511', + 'nfr;': '\U0001d52b', + 'ngE;': '\u2267\u0338', + 'nge;': '\u2271', + 'ngeq;': '\u2271', + 'ngeqq;': '\u2267\u0338', + 'ngeqslant;': '\u2a7e\u0338', + 'nges;': '\u2a7e\u0338', + 'nGg;': '\u22d9\u0338', + 'ngsim;': '\u2275', + 'nGt;': '\u226b\u20d2', + 'ngt;': '\u226f', + 'ngtr;': '\u226f', + 'nGtv;': '\u226b\u0338', + 'nhArr;': '\u21ce', + 'nharr;': '\u21ae', + 'nhpar;': '\u2af2', + 'ni;': '\u220b', + 'nis;': '\u22fc', + 'nisd;': '\u22fa', + 'niv;': '\u220b', + 'NJcy;': '\u040a', + 'njcy;': '\u045a', + 'nlArr;': '\u21cd', + 'nlarr;': '\u219a', + 'nldr;': '\u2025', + 'nlE;': '\u2266\u0338', + 'nle;': '\u2270', + 'nLeftarrow;': '\u21cd', + 'nleftarrow;': '\u219a', + 'nLeftrightarrow;': '\u21ce', + 'nleftrightarrow;': '\u21ae', + 'nleq;': '\u2270', + 'nleqq;': '\u2266\u0338', + 'nleqslant;': '\u2a7d\u0338', + 'nles;': '\u2a7d\u0338', + 'nless;': '\u226e', + 'nLl;': '\u22d8\u0338', + 'nlsim;': '\u2274', + 'nLt;': '\u226a\u20d2', + 'nlt;': '\u226e', + 'nltri;': '\u22ea', + 'nltrie;': '\u22ec', + 'nLtv;': '\u226a\u0338', + 'nmid;': '\u2224', + 'NoBreak;': '\u2060', + 'NonBreakingSpace;': '\xa0', + 'Nopf;': '\u2115', + 'nopf;': '\U0001d55f', + 'not': '\xac', + 'Not;': '\u2aec', + 'not;': '\xac', + 'NotCongruent;': '\u2262', + 'NotCupCap;': '\u226d', + 'NotDoubleVerticalBar;': '\u2226', + 'NotElement;': '\u2209', + 'NotEqual;': '\u2260', + 'NotEqualTilde;': '\u2242\u0338', + 'NotExists;': '\u2204', + 'NotGreater;': '\u226f', + 'NotGreaterEqual;': '\u2271', + 'NotGreaterFullEqual;': '\u2267\u0338', + 'NotGreaterGreater;': '\u226b\u0338', + 'NotGreaterLess;': '\u2279', + 'NotGreaterSlantEqual;': '\u2a7e\u0338', + 'NotGreaterTilde;': '\u2275', + 'NotHumpDownHump;': '\u224e\u0338', + 'NotHumpEqual;': '\u224f\u0338', + 'notin;': '\u2209', + 'notindot;': '\u22f5\u0338', + 'notinE;': '\u22f9\u0338', + 'notinva;': '\u2209', + 'notinvb;': '\u22f7', + 'notinvc;': '\u22f6', + 'NotLeftTriangle;': '\u22ea', + 'NotLeftTriangleBar;': '\u29cf\u0338', + 'NotLeftTriangleEqual;': '\u22ec', + 'NotLess;': '\u226e', + 'NotLessEqual;': '\u2270', + 'NotLessGreater;': '\u2278', + 'NotLessLess;': '\u226a\u0338', + 'NotLessSlantEqual;': '\u2a7d\u0338', + 'NotLessTilde;': '\u2274', + 'NotNestedGreaterGreater;': '\u2aa2\u0338', + 'NotNestedLessLess;': '\u2aa1\u0338', + 'notni;': '\u220c', + 'notniva;': '\u220c', + 'notnivb;': '\u22fe', + 'notnivc;': '\u22fd', + 'NotPrecedes;': '\u2280', + 'NotPrecedesEqual;': '\u2aaf\u0338', + 'NotPrecedesSlantEqual;': '\u22e0', + 'NotReverseElement;': '\u220c', + 'NotRightTriangle;': '\u22eb', + 'NotRightTriangleBar;': '\u29d0\u0338', + 'NotRightTriangleEqual;': '\u22ed', + 'NotSquareSubset;': '\u228f\u0338', + 'NotSquareSubsetEqual;': '\u22e2', + 'NotSquareSuperset;': '\u2290\u0338', + 'NotSquareSupersetEqual;': '\u22e3', + 'NotSubset;': '\u2282\u20d2', + 'NotSubsetEqual;': '\u2288', + 'NotSucceeds;': '\u2281', + 'NotSucceedsEqual;': '\u2ab0\u0338', + 'NotSucceedsSlantEqual;': '\u22e1', + 'NotSucceedsTilde;': '\u227f\u0338', + 'NotSuperset;': '\u2283\u20d2', + 'NotSupersetEqual;': '\u2289', + 'NotTilde;': '\u2241', + 'NotTildeEqual;': '\u2244', + 'NotTildeFullEqual;': '\u2247', + 'NotTildeTilde;': '\u2249', + 'NotVerticalBar;': '\u2224', + 'npar;': '\u2226', + 'nparallel;': '\u2226', + 'nparsl;': '\u2afd\u20e5', + 'npart;': '\u2202\u0338', + 'npolint;': '\u2a14', + 'npr;': '\u2280', + 'nprcue;': '\u22e0', + 'npre;': '\u2aaf\u0338', + 'nprec;': '\u2280', + 'npreceq;': '\u2aaf\u0338', + 'nrArr;': '\u21cf', + 'nrarr;': '\u219b', + 'nrarrc;': '\u2933\u0338', + 'nrarrw;': '\u219d\u0338', + 'nRightarrow;': '\u21cf', + 'nrightarrow;': '\u219b', + 'nrtri;': '\u22eb', + 'nrtrie;': '\u22ed', + 'nsc;': '\u2281', + 'nsccue;': '\u22e1', + 'nsce;': '\u2ab0\u0338', + 'Nscr;': '\U0001d4a9', + 'nscr;': '\U0001d4c3', + 'nshortmid;': '\u2224', + 'nshortparallel;': '\u2226', + 'nsim;': '\u2241', + 'nsime;': '\u2244', + 'nsimeq;': '\u2244', + 'nsmid;': '\u2224', + 'nspar;': '\u2226', + 'nsqsube;': '\u22e2', + 'nsqsupe;': '\u22e3', + 'nsub;': '\u2284', + 'nsubE;': '\u2ac5\u0338', + 'nsube;': '\u2288', + 'nsubset;': '\u2282\u20d2', + 'nsubseteq;': '\u2288', + 'nsubseteqq;': '\u2ac5\u0338', + 'nsucc;': '\u2281', + 'nsucceq;': '\u2ab0\u0338', + 'nsup;': '\u2285', + 'nsupE;': '\u2ac6\u0338', + 'nsupe;': '\u2289', + 'nsupset;': '\u2283\u20d2', + 'nsupseteq;': '\u2289', + 'nsupseteqq;': '\u2ac6\u0338', + 'ntgl;': '\u2279', + 'Ntilde': '\xd1', + 'ntilde': '\xf1', + 'Ntilde;': '\xd1', + 'ntilde;': '\xf1', + 'ntlg;': '\u2278', + 'ntriangleleft;': '\u22ea', + 'ntrianglelefteq;': '\u22ec', + 'ntriangleright;': '\u22eb', + 'ntrianglerighteq;': '\u22ed', + 'Nu;': '\u039d', + 'nu;': '\u03bd', + 'num;': '#', + 'numero;': '\u2116', + 'numsp;': '\u2007', + 'nvap;': '\u224d\u20d2', + 'nVDash;': '\u22af', + 'nVdash;': '\u22ae', + 'nvDash;': '\u22ad', + 'nvdash;': '\u22ac', + 'nvge;': '\u2265\u20d2', + 'nvgt;': '>\u20d2', + 'nvHarr;': '\u2904', + 'nvinfin;': '\u29de', + 'nvlArr;': '\u2902', + 'nvle;': '\u2264\u20d2', + 'nvlt;': '<\u20d2', + 'nvltrie;': '\u22b4\u20d2', + 'nvrArr;': '\u2903', + 'nvrtrie;': '\u22b5\u20d2', + 'nvsim;': '\u223c\u20d2', + 'nwarhk;': '\u2923', + 'nwArr;': '\u21d6', + 'nwarr;': '\u2196', + 'nwarrow;': '\u2196', + 'nwnear;': '\u2927', + 'Oacute': '\xd3', + 'oacute': '\xf3', + 'Oacute;': '\xd3', + 'oacute;': '\xf3', + 'oast;': '\u229b', + 'ocir;': '\u229a', + 'Ocirc': '\xd4', + 'ocirc': '\xf4', + 'Ocirc;': '\xd4', + 'ocirc;': '\xf4', + 'Ocy;': '\u041e', + 'ocy;': '\u043e', + 'odash;': '\u229d', + 'Odblac;': '\u0150', + 'odblac;': '\u0151', + 'odiv;': '\u2a38', + 'odot;': '\u2299', + 'odsold;': '\u29bc', + 'OElig;': '\u0152', + 'oelig;': '\u0153', + 'ofcir;': '\u29bf', + 'Ofr;': '\U0001d512', + 'ofr;': '\U0001d52c', + 'ogon;': '\u02db', + 'Ograve': '\xd2', + 'ograve': '\xf2', + 'Ograve;': '\xd2', + 'ograve;': '\xf2', + 'ogt;': '\u29c1', + 'ohbar;': '\u29b5', + 'ohm;': '\u03a9', + 'oint;': '\u222e', + 'olarr;': '\u21ba', + 'olcir;': '\u29be', + 'olcross;': '\u29bb', + 'oline;': '\u203e', + 'olt;': '\u29c0', + 'Omacr;': '\u014c', + 'omacr;': '\u014d', + 'Omega;': '\u03a9', + 'omega;': '\u03c9', + 'Omicron;': '\u039f', + 'omicron;': '\u03bf', + 'omid;': '\u29b6', + 'ominus;': '\u2296', + 'Oopf;': '\U0001d546', + 'oopf;': '\U0001d560', + 'opar;': '\u29b7', + 'OpenCurlyDoubleQuote;': '\u201c', + 'OpenCurlyQuote;': '\u2018', + 'operp;': '\u29b9', + 'oplus;': '\u2295', + 'Or;': '\u2a54', + 'or;': '\u2228', + 'orarr;': '\u21bb', + 'ord;': '\u2a5d', + 'order;': '\u2134', + 'orderof;': '\u2134', + 'ordf': '\xaa', + 'ordf;': '\xaa', + 'ordm': '\xba', + 'ordm;': '\xba', + 'origof;': '\u22b6', + 'oror;': '\u2a56', + 'orslope;': '\u2a57', + 'orv;': '\u2a5b', + 'oS;': '\u24c8', + 'Oscr;': '\U0001d4aa', + 'oscr;': '\u2134', + 'Oslash': '\xd8', + 'oslash': '\xf8', + 'Oslash;': '\xd8', + 'oslash;': '\xf8', + 'osol;': '\u2298', + 'Otilde': '\xd5', + 'otilde': '\xf5', + 'Otilde;': '\xd5', + 'otilde;': '\xf5', + 'Otimes;': '\u2a37', + 'otimes;': '\u2297', + 'otimesas;': '\u2a36', + 'Ouml': '\xd6', + 'ouml': '\xf6', + 'Ouml;': '\xd6', + 'ouml;': '\xf6', + 'ovbar;': '\u233d', + 'OverBar;': '\u203e', + 'OverBrace;': '\u23de', + 'OverBracket;': '\u23b4', + 'OverParenthesis;': '\u23dc', + 'par;': '\u2225', + 'para': '\xb6', + 'para;': '\xb6', + 'parallel;': '\u2225', + 'parsim;': '\u2af3', + 'parsl;': '\u2afd', + 'part;': '\u2202', + 'PartialD;': '\u2202', + 'Pcy;': '\u041f', + 'pcy;': '\u043f', + 'percnt;': '%', + 'period;': '.', + 'permil;': '\u2030', + 'perp;': '\u22a5', + 'pertenk;': '\u2031', + 'Pfr;': '\U0001d513', + 'pfr;': '\U0001d52d', + 'Phi;': '\u03a6', + 'phi;': '\u03c6', + 'phiv;': '\u03d5', + 'phmmat;': '\u2133', + 'phone;': '\u260e', + 'Pi;': '\u03a0', + 'pi;': '\u03c0', + 'pitchfork;': '\u22d4', + 'piv;': '\u03d6', + 'planck;': '\u210f', + 'planckh;': '\u210e', + 'plankv;': '\u210f', + 'plus;': '+', + 'plusacir;': '\u2a23', + 'plusb;': '\u229e', + 'pluscir;': '\u2a22', + 'plusdo;': '\u2214', + 'plusdu;': '\u2a25', + 'pluse;': '\u2a72', + 'PlusMinus;': '\xb1', + 'plusmn': '\xb1', + 'plusmn;': '\xb1', + 'plussim;': '\u2a26', + 'plustwo;': '\u2a27', + 'pm;': '\xb1', + 'Poincareplane;': '\u210c', + 'pointint;': '\u2a15', + 'Popf;': '\u2119', + 'popf;': '\U0001d561', + 'pound': '\xa3', + 'pound;': '\xa3', + 'Pr;': '\u2abb', + 'pr;': '\u227a', + 'prap;': '\u2ab7', + 'prcue;': '\u227c', + 'prE;': '\u2ab3', + 'pre;': '\u2aaf', + 'prec;': '\u227a', + 'precapprox;': '\u2ab7', + 'preccurlyeq;': '\u227c', + 'Precedes;': '\u227a', + 'PrecedesEqual;': '\u2aaf', + 'PrecedesSlantEqual;': '\u227c', + 'PrecedesTilde;': '\u227e', + 'preceq;': '\u2aaf', + 'precnapprox;': '\u2ab9', + 'precneqq;': '\u2ab5', + 'precnsim;': '\u22e8', + 'precsim;': '\u227e', + 'Prime;': '\u2033', + 'prime;': '\u2032', + 'primes;': '\u2119', + 'prnap;': '\u2ab9', + 'prnE;': '\u2ab5', + 'prnsim;': '\u22e8', + 'prod;': '\u220f', + 'Product;': '\u220f', + 'profalar;': '\u232e', + 'profline;': '\u2312', + 'profsurf;': '\u2313', + 'prop;': '\u221d', + 'Proportion;': '\u2237', + 'Proportional;': '\u221d', + 'propto;': '\u221d', + 'prsim;': '\u227e', + 'prurel;': '\u22b0', + 'Pscr;': '\U0001d4ab', + 'pscr;': '\U0001d4c5', + 'Psi;': '\u03a8', + 'psi;': '\u03c8', + 'puncsp;': '\u2008', + 'Qfr;': '\U0001d514', + 'qfr;': '\U0001d52e', + 'qint;': '\u2a0c', + 'Qopf;': '\u211a', + 'qopf;': '\U0001d562', + 'qprime;': '\u2057', + 'Qscr;': '\U0001d4ac', + 'qscr;': '\U0001d4c6', + 'quaternions;': '\u210d', + 'quatint;': '\u2a16', + 'quest;': '?', + 'questeq;': '\u225f', + 'QUOT': '"', + 'quot': '"', + 'QUOT;': '"', + 'quot;': '"', + 'rAarr;': '\u21db', + 'race;': '\u223d\u0331', + 'Racute;': '\u0154', + 'racute;': '\u0155', + 'radic;': '\u221a', + 'raemptyv;': '\u29b3', + 'Rang;': '\u27eb', + 'rang;': '\u27e9', + 'rangd;': '\u2992', + 'range;': '\u29a5', + 'rangle;': '\u27e9', + 'raquo': '\xbb', + 'raquo;': '\xbb', + 'Rarr;': '\u21a0', + 'rArr;': '\u21d2', + 'rarr;': '\u2192', + 'rarrap;': '\u2975', + 'rarrb;': '\u21e5', + 'rarrbfs;': '\u2920', + 'rarrc;': '\u2933', + 'rarrfs;': '\u291e', + 'rarrhk;': '\u21aa', + 'rarrlp;': '\u21ac', + 'rarrpl;': '\u2945', + 'rarrsim;': '\u2974', + 'Rarrtl;': '\u2916', + 'rarrtl;': '\u21a3', + 'rarrw;': '\u219d', + 'rAtail;': '\u291c', + 'ratail;': '\u291a', + 'ratio;': '\u2236', + 'rationals;': '\u211a', + 'RBarr;': '\u2910', + 'rBarr;': '\u290f', + 'rbarr;': '\u290d', + 'rbbrk;': '\u2773', + 'rbrace;': '}', + 'rbrack;': ']', + 'rbrke;': '\u298c', + 'rbrksld;': '\u298e', + 'rbrkslu;': '\u2990', + 'Rcaron;': '\u0158', + 'rcaron;': '\u0159', + 'Rcedil;': '\u0156', + 'rcedil;': '\u0157', + 'rceil;': '\u2309', + 'rcub;': '}', + 'Rcy;': '\u0420', + 'rcy;': '\u0440', + 'rdca;': '\u2937', + 'rdldhar;': '\u2969', + 'rdquo;': '\u201d', + 'rdquor;': '\u201d', + 'rdsh;': '\u21b3', + 'Re;': '\u211c', + 'real;': '\u211c', + 'realine;': '\u211b', + 'realpart;': '\u211c', + 'reals;': '\u211d', + 'rect;': '\u25ad', + 'REG': '\xae', + 'reg': '\xae', + 'REG;': '\xae', + 'reg;': '\xae', + 'ReverseElement;': '\u220b', + 'ReverseEquilibrium;': '\u21cb', + 'ReverseUpEquilibrium;': '\u296f', + 'rfisht;': '\u297d', + 'rfloor;': '\u230b', + 'Rfr;': '\u211c', + 'rfr;': '\U0001d52f', + 'rHar;': '\u2964', + 'rhard;': '\u21c1', + 'rharu;': '\u21c0', + 'rharul;': '\u296c', + 'Rho;': '\u03a1', + 'rho;': '\u03c1', + 'rhov;': '\u03f1', + 'RightAngleBracket;': '\u27e9', + 'RightArrow;': '\u2192', + 'Rightarrow;': '\u21d2', + 'rightarrow;': '\u2192', + 'RightArrowBar;': '\u21e5', + 'RightArrowLeftArrow;': '\u21c4', + 'rightarrowtail;': '\u21a3', + 'RightCeiling;': '\u2309', + 'RightDoubleBracket;': '\u27e7', + 'RightDownTeeVector;': '\u295d', + 'RightDownVector;': '\u21c2', + 'RightDownVectorBar;': '\u2955', + 'RightFloor;': '\u230b', + 'rightharpoondown;': '\u21c1', + 'rightharpoonup;': '\u21c0', + 'rightleftarrows;': '\u21c4', + 'rightleftharpoons;': '\u21cc', + 'rightrightarrows;': '\u21c9', + 'rightsquigarrow;': '\u219d', + 'RightTee;': '\u22a2', + 'RightTeeArrow;': '\u21a6', + 'RightTeeVector;': '\u295b', + 'rightthreetimes;': '\u22cc', + 'RightTriangle;': '\u22b3', + 'RightTriangleBar;': '\u29d0', + 'RightTriangleEqual;': '\u22b5', + 'RightUpDownVector;': '\u294f', + 'RightUpTeeVector;': '\u295c', + 'RightUpVector;': '\u21be', + 'RightUpVectorBar;': '\u2954', + 'RightVector;': '\u21c0', + 'RightVectorBar;': '\u2953', + 'ring;': '\u02da', + 'risingdotseq;': '\u2253', + 'rlarr;': '\u21c4', + 'rlhar;': '\u21cc', + 'rlm;': '\u200f', + 'rmoust;': '\u23b1', + 'rmoustache;': '\u23b1', + 'rnmid;': '\u2aee', + 'roang;': '\u27ed', + 'roarr;': '\u21fe', + 'robrk;': '\u27e7', + 'ropar;': '\u2986', + 'Ropf;': '\u211d', + 'ropf;': '\U0001d563', + 'roplus;': '\u2a2e', + 'rotimes;': '\u2a35', + 'RoundImplies;': '\u2970', + 'rpar;': ')', + 'rpargt;': '\u2994', + 'rppolint;': '\u2a12', + 'rrarr;': '\u21c9', + 'Rrightarrow;': '\u21db', + 'rsaquo;': '\u203a', + 'Rscr;': '\u211b', + 'rscr;': '\U0001d4c7', + 'Rsh;': '\u21b1', + 'rsh;': '\u21b1', + 'rsqb;': ']', + 'rsquo;': '\u2019', + 'rsquor;': '\u2019', + 'rthree;': '\u22cc', + 'rtimes;': '\u22ca', + 'rtri;': '\u25b9', + 'rtrie;': '\u22b5', + 'rtrif;': '\u25b8', + 'rtriltri;': '\u29ce', + 'RuleDelayed;': '\u29f4', + 'ruluhar;': '\u2968', + 'rx;': '\u211e', + 'Sacute;': '\u015a', + 'sacute;': '\u015b', + 'sbquo;': '\u201a', + 'Sc;': '\u2abc', + 'sc;': '\u227b', + 'scap;': '\u2ab8', + 'Scaron;': '\u0160', + 'scaron;': '\u0161', + 'sccue;': '\u227d', + 'scE;': '\u2ab4', + 'sce;': '\u2ab0', + 'Scedil;': '\u015e', + 'scedil;': '\u015f', + 'Scirc;': '\u015c', + 'scirc;': '\u015d', + 'scnap;': '\u2aba', + 'scnE;': '\u2ab6', + 'scnsim;': '\u22e9', + 'scpolint;': '\u2a13', + 'scsim;': '\u227f', + 'Scy;': '\u0421', + 'scy;': '\u0441', + 'sdot;': '\u22c5', + 'sdotb;': '\u22a1', + 'sdote;': '\u2a66', + 'searhk;': '\u2925', + 'seArr;': '\u21d8', + 'searr;': '\u2198', + 'searrow;': '\u2198', + 'sect': '\xa7', + 'sect;': '\xa7', + 'semi;': ';', + 'seswar;': '\u2929', + 'setminus;': '\u2216', + 'setmn;': '\u2216', + 'sext;': '\u2736', + 'Sfr;': '\U0001d516', + 'sfr;': '\U0001d530', + 'sfrown;': '\u2322', + 'sharp;': '\u266f', + 'SHCHcy;': '\u0429', + 'shchcy;': '\u0449', + 'SHcy;': '\u0428', + 'shcy;': '\u0448', + 'ShortDownArrow;': '\u2193', + 'ShortLeftArrow;': '\u2190', + 'shortmid;': '\u2223', + 'shortparallel;': '\u2225', + 'ShortRightArrow;': '\u2192', + 'ShortUpArrow;': '\u2191', + 'shy': '\xad', + 'shy;': '\xad', + 'Sigma;': '\u03a3', + 'sigma;': '\u03c3', + 'sigmaf;': '\u03c2', + 'sigmav;': '\u03c2', + 'sim;': '\u223c', + 'simdot;': '\u2a6a', + 'sime;': '\u2243', + 'simeq;': '\u2243', + 'simg;': '\u2a9e', + 'simgE;': '\u2aa0', + 'siml;': '\u2a9d', + 'simlE;': '\u2a9f', + 'simne;': '\u2246', + 'simplus;': '\u2a24', + 'simrarr;': '\u2972', + 'slarr;': '\u2190', + 'SmallCircle;': '\u2218', + 'smallsetminus;': '\u2216', + 'smashp;': '\u2a33', + 'smeparsl;': '\u29e4', + 'smid;': '\u2223', + 'smile;': '\u2323', + 'smt;': '\u2aaa', + 'smte;': '\u2aac', + 'smtes;': '\u2aac\ufe00', + 'SOFTcy;': '\u042c', + 'softcy;': '\u044c', + 'sol;': '/', + 'solb;': '\u29c4', + 'solbar;': '\u233f', + 'Sopf;': '\U0001d54a', + 'sopf;': '\U0001d564', + 'spades;': '\u2660', + 'spadesuit;': '\u2660', + 'spar;': '\u2225', + 'sqcap;': '\u2293', + 'sqcaps;': '\u2293\ufe00', + 'sqcup;': '\u2294', + 'sqcups;': '\u2294\ufe00', + 'Sqrt;': '\u221a', + 'sqsub;': '\u228f', + 'sqsube;': '\u2291', + 'sqsubset;': '\u228f', + 'sqsubseteq;': '\u2291', + 'sqsup;': '\u2290', + 'sqsupe;': '\u2292', + 'sqsupset;': '\u2290', + 'sqsupseteq;': '\u2292', + 'squ;': '\u25a1', + 'Square;': '\u25a1', + 'square;': '\u25a1', + 'SquareIntersection;': '\u2293', + 'SquareSubset;': '\u228f', + 'SquareSubsetEqual;': '\u2291', + 'SquareSuperset;': '\u2290', + 'SquareSupersetEqual;': '\u2292', + 'SquareUnion;': '\u2294', + 'squarf;': '\u25aa', + 'squf;': '\u25aa', + 'srarr;': '\u2192', + 'Sscr;': '\U0001d4ae', + 'sscr;': '\U0001d4c8', + 'ssetmn;': '\u2216', + 'ssmile;': '\u2323', + 'sstarf;': '\u22c6', + 'Star;': '\u22c6', + 'star;': '\u2606', + 'starf;': '\u2605', + 'straightepsilon;': '\u03f5', + 'straightphi;': '\u03d5', + 'strns;': '\xaf', + 'Sub;': '\u22d0', + 'sub;': '\u2282', + 'subdot;': '\u2abd', + 'subE;': '\u2ac5', + 'sube;': '\u2286', + 'subedot;': '\u2ac3', + 'submult;': '\u2ac1', + 'subnE;': '\u2acb', + 'subne;': '\u228a', + 'subplus;': '\u2abf', + 'subrarr;': '\u2979', + 'Subset;': '\u22d0', + 'subset;': '\u2282', + 'subseteq;': '\u2286', + 'subseteqq;': '\u2ac5', + 'SubsetEqual;': '\u2286', + 'subsetneq;': '\u228a', + 'subsetneqq;': '\u2acb', + 'subsim;': '\u2ac7', + 'subsub;': '\u2ad5', + 'subsup;': '\u2ad3', + 'succ;': '\u227b', + 'succapprox;': '\u2ab8', + 'succcurlyeq;': '\u227d', + 'Succeeds;': '\u227b', + 'SucceedsEqual;': '\u2ab0', + 'SucceedsSlantEqual;': '\u227d', + 'SucceedsTilde;': '\u227f', + 'succeq;': '\u2ab0', + 'succnapprox;': '\u2aba', + 'succneqq;': '\u2ab6', + 'succnsim;': '\u22e9', + 'succsim;': '\u227f', + 'SuchThat;': '\u220b', + 'Sum;': '\u2211', + 'sum;': '\u2211', + 'sung;': '\u266a', + 'sup1': '\xb9', + 'sup1;': '\xb9', + 'sup2': '\xb2', + 'sup2;': '\xb2', + 'sup3': '\xb3', + 'sup3;': '\xb3', + 'Sup;': '\u22d1', + 'sup;': '\u2283', + 'supdot;': '\u2abe', + 'supdsub;': '\u2ad8', + 'supE;': '\u2ac6', + 'supe;': '\u2287', + 'supedot;': '\u2ac4', + 'Superset;': '\u2283', + 'SupersetEqual;': '\u2287', + 'suphsol;': '\u27c9', + 'suphsub;': '\u2ad7', + 'suplarr;': '\u297b', + 'supmult;': '\u2ac2', + 'supnE;': '\u2acc', + 'supne;': '\u228b', + 'supplus;': '\u2ac0', + 'Supset;': '\u22d1', + 'supset;': '\u2283', + 'supseteq;': '\u2287', + 'supseteqq;': '\u2ac6', + 'supsetneq;': '\u228b', + 'supsetneqq;': '\u2acc', + 'supsim;': '\u2ac8', + 'supsub;': '\u2ad4', + 'supsup;': '\u2ad6', + 'swarhk;': '\u2926', + 'swArr;': '\u21d9', + 'swarr;': '\u2199', + 'swarrow;': '\u2199', + 'swnwar;': '\u292a', + 'szlig': '\xdf', + 'szlig;': '\xdf', + 'Tab;': '\t', + 'target;': '\u2316', + 'Tau;': '\u03a4', + 'tau;': '\u03c4', + 'tbrk;': '\u23b4', + 'Tcaron;': '\u0164', + 'tcaron;': '\u0165', + 'Tcedil;': '\u0162', + 'tcedil;': '\u0163', + 'Tcy;': '\u0422', + 'tcy;': '\u0442', + 'tdot;': '\u20db', + 'telrec;': '\u2315', + 'Tfr;': '\U0001d517', + 'tfr;': '\U0001d531', + 'there4;': '\u2234', + 'Therefore;': '\u2234', + 'therefore;': '\u2234', + 'Theta;': '\u0398', + 'theta;': '\u03b8', + 'thetasym;': '\u03d1', + 'thetav;': '\u03d1', + 'thickapprox;': '\u2248', + 'thicksim;': '\u223c', + 'ThickSpace;': '\u205f\u200a', + 'thinsp;': '\u2009', + 'ThinSpace;': '\u2009', + 'thkap;': '\u2248', + 'thksim;': '\u223c', + 'THORN': '\xde', + 'thorn': '\xfe', + 'THORN;': '\xde', + 'thorn;': '\xfe', + 'Tilde;': '\u223c', + 'tilde;': '\u02dc', + 'TildeEqual;': '\u2243', + 'TildeFullEqual;': '\u2245', + 'TildeTilde;': '\u2248', + 'times': '\xd7', + 'times;': '\xd7', + 'timesb;': '\u22a0', + 'timesbar;': '\u2a31', + 'timesd;': '\u2a30', + 'tint;': '\u222d', + 'toea;': '\u2928', + 'top;': '\u22a4', + 'topbot;': '\u2336', + 'topcir;': '\u2af1', + 'Topf;': '\U0001d54b', + 'topf;': '\U0001d565', + 'topfork;': '\u2ada', + 'tosa;': '\u2929', + 'tprime;': '\u2034', + 'TRADE;': '\u2122', + 'trade;': '\u2122', + 'triangle;': '\u25b5', + 'triangledown;': '\u25bf', + 'triangleleft;': '\u25c3', + 'trianglelefteq;': '\u22b4', + 'triangleq;': '\u225c', + 'triangleright;': '\u25b9', + 'trianglerighteq;': '\u22b5', + 'tridot;': '\u25ec', + 'trie;': '\u225c', + 'triminus;': '\u2a3a', + 'TripleDot;': '\u20db', + 'triplus;': '\u2a39', + 'trisb;': '\u29cd', + 'tritime;': '\u2a3b', + 'trpezium;': '\u23e2', + 'Tscr;': '\U0001d4af', + 'tscr;': '\U0001d4c9', + 'TScy;': '\u0426', + 'tscy;': '\u0446', + 'TSHcy;': '\u040b', + 'tshcy;': '\u045b', + 'Tstrok;': '\u0166', + 'tstrok;': '\u0167', + 'twixt;': '\u226c', + 'twoheadleftarrow;': '\u219e', + 'twoheadrightarrow;': '\u21a0', + 'Uacute': '\xda', + 'uacute': '\xfa', + 'Uacute;': '\xda', + 'uacute;': '\xfa', + 'Uarr;': '\u219f', + 'uArr;': '\u21d1', + 'uarr;': '\u2191', + 'Uarrocir;': '\u2949', + 'Ubrcy;': '\u040e', + 'ubrcy;': '\u045e', + 'Ubreve;': '\u016c', + 'ubreve;': '\u016d', + 'Ucirc': '\xdb', + 'ucirc': '\xfb', + 'Ucirc;': '\xdb', + 'ucirc;': '\xfb', + 'Ucy;': '\u0423', + 'ucy;': '\u0443', + 'udarr;': '\u21c5', + 'Udblac;': '\u0170', + 'udblac;': '\u0171', + 'udhar;': '\u296e', + 'ufisht;': '\u297e', + 'Ufr;': '\U0001d518', + 'ufr;': '\U0001d532', + 'Ugrave': '\xd9', + 'ugrave': '\xf9', + 'Ugrave;': '\xd9', + 'ugrave;': '\xf9', + 'uHar;': '\u2963', + 'uharl;': '\u21bf', + 'uharr;': '\u21be', + 'uhblk;': '\u2580', + 'ulcorn;': '\u231c', + 'ulcorner;': '\u231c', + 'ulcrop;': '\u230f', + 'ultri;': '\u25f8', + 'Umacr;': '\u016a', + 'umacr;': '\u016b', + 'uml': '\xa8', + 'uml;': '\xa8', + 'UnderBar;': '_', + 'UnderBrace;': '\u23df', + 'UnderBracket;': '\u23b5', + 'UnderParenthesis;': '\u23dd', + 'Union;': '\u22c3', + 'UnionPlus;': '\u228e', + 'Uogon;': '\u0172', + 'uogon;': '\u0173', + 'Uopf;': '\U0001d54c', + 'uopf;': '\U0001d566', + 'UpArrow;': '\u2191', + 'Uparrow;': '\u21d1', + 'uparrow;': '\u2191', + 'UpArrowBar;': '\u2912', + 'UpArrowDownArrow;': '\u21c5', + 'UpDownArrow;': '\u2195', + 'Updownarrow;': '\u21d5', + 'updownarrow;': '\u2195', + 'UpEquilibrium;': '\u296e', + 'upharpoonleft;': '\u21bf', + 'upharpoonright;': '\u21be', + 'uplus;': '\u228e', + 'UpperLeftArrow;': '\u2196', + 'UpperRightArrow;': '\u2197', + 'Upsi;': '\u03d2', + 'upsi;': '\u03c5', + 'upsih;': '\u03d2', + 'Upsilon;': '\u03a5', + 'upsilon;': '\u03c5', + 'UpTee;': '\u22a5', + 'UpTeeArrow;': '\u21a5', + 'upuparrows;': '\u21c8', + 'urcorn;': '\u231d', + 'urcorner;': '\u231d', + 'urcrop;': '\u230e', + 'Uring;': '\u016e', + 'uring;': '\u016f', + 'urtri;': '\u25f9', + 'Uscr;': '\U0001d4b0', + 'uscr;': '\U0001d4ca', + 'utdot;': '\u22f0', + 'Utilde;': '\u0168', + 'utilde;': '\u0169', + 'utri;': '\u25b5', + 'utrif;': '\u25b4', + 'uuarr;': '\u21c8', + 'Uuml': '\xdc', + 'uuml': '\xfc', + 'Uuml;': '\xdc', + 'uuml;': '\xfc', + 'uwangle;': '\u29a7', + 'vangrt;': '\u299c', + 'varepsilon;': '\u03f5', + 'varkappa;': '\u03f0', + 'varnothing;': '\u2205', + 'varphi;': '\u03d5', + 'varpi;': '\u03d6', + 'varpropto;': '\u221d', + 'vArr;': '\u21d5', + 'varr;': '\u2195', + 'varrho;': '\u03f1', + 'varsigma;': '\u03c2', + 'varsubsetneq;': '\u228a\ufe00', + 'varsubsetneqq;': '\u2acb\ufe00', + 'varsupsetneq;': '\u228b\ufe00', + 'varsupsetneqq;': '\u2acc\ufe00', + 'vartheta;': '\u03d1', + 'vartriangleleft;': '\u22b2', + 'vartriangleright;': '\u22b3', + 'Vbar;': '\u2aeb', + 'vBar;': '\u2ae8', + 'vBarv;': '\u2ae9', + 'Vcy;': '\u0412', + 'vcy;': '\u0432', + 'VDash;': '\u22ab', + 'Vdash;': '\u22a9', + 'vDash;': '\u22a8', + 'vdash;': '\u22a2', + 'Vdashl;': '\u2ae6', + 'Vee;': '\u22c1', + 'vee;': '\u2228', + 'veebar;': '\u22bb', + 'veeeq;': '\u225a', + 'vellip;': '\u22ee', + 'Verbar;': '\u2016', + 'verbar;': '|', + 'Vert;': '\u2016', + 'vert;': '|', + 'VerticalBar;': '\u2223', + 'VerticalLine;': '|', + 'VerticalSeparator;': '\u2758', + 'VerticalTilde;': '\u2240', + 'VeryThinSpace;': '\u200a', + 'Vfr;': '\U0001d519', + 'vfr;': '\U0001d533', + 'vltri;': '\u22b2', + 'vnsub;': '\u2282\u20d2', + 'vnsup;': '\u2283\u20d2', + 'Vopf;': '\U0001d54d', + 'vopf;': '\U0001d567', + 'vprop;': '\u221d', + 'vrtri;': '\u22b3', + 'Vscr;': '\U0001d4b1', + 'vscr;': '\U0001d4cb', + 'vsubnE;': '\u2acb\ufe00', + 'vsubne;': '\u228a\ufe00', + 'vsupnE;': '\u2acc\ufe00', + 'vsupne;': '\u228b\ufe00', + 'Vvdash;': '\u22aa', + 'vzigzag;': '\u299a', + 'Wcirc;': '\u0174', + 'wcirc;': '\u0175', + 'wedbar;': '\u2a5f', + 'Wedge;': '\u22c0', + 'wedge;': '\u2227', + 'wedgeq;': '\u2259', + 'weierp;': '\u2118', + 'Wfr;': '\U0001d51a', + 'wfr;': '\U0001d534', + 'Wopf;': '\U0001d54e', + 'wopf;': '\U0001d568', + 'wp;': '\u2118', + 'wr;': '\u2240', + 'wreath;': '\u2240', + 'Wscr;': '\U0001d4b2', + 'wscr;': '\U0001d4cc', + 'xcap;': '\u22c2', + 'xcirc;': '\u25ef', + 'xcup;': '\u22c3', + 'xdtri;': '\u25bd', + 'Xfr;': '\U0001d51b', + 'xfr;': '\U0001d535', + 'xhArr;': '\u27fa', + 'xharr;': '\u27f7', + 'Xi;': '\u039e', + 'xi;': '\u03be', + 'xlArr;': '\u27f8', + 'xlarr;': '\u27f5', + 'xmap;': '\u27fc', + 'xnis;': '\u22fb', + 'xodot;': '\u2a00', + 'Xopf;': '\U0001d54f', + 'xopf;': '\U0001d569', + 'xoplus;': '\u2a01', + 'xotime;': '\u2a02', + 'xrArr;': '\u27f9', + 'xrarr;': '\u27f6', + 'Xscr;': '\U0001d4b3', + 'xscr;': '\U0001d4cd', + 'xsqcup;': '\u2a06', + 'xuplus;': '\u2a04', + 'xutri;': '\u25b3', + 'xvee;': '\u22c1', + 'xwedge;': '\u22c0', + 'Yacute': '\xdd', + 'yacute': '\xfd', + 'Yacute;': '\xdd', + 'yacute;': '\xfd', + 'YAcy;': '\u042f', + 'yacy;': '\u044f', + 'Ycirc;': '\u0176', + 'ycirc;': '\u0177', + 'Ycy;': '\u042b', + 'ycy;': '\u044b', + 'yen': '\xa5', + 'yen;': '\xa5', + 'Yfr;': '\U0001d51c', + 'yfr;': '\U0001d536', + 'YIcy;': '\u0407', + 'yicy;': '\u0457', + 'Yopf;': '\U0001d550', + 'yopf;': '\U0001d56a', + 'Yscr;': '\U0001d4b4', + 'yscr;': '\U0001d4ce', + 'YUcy;': '\u042e', + 'yucy;': '\u044e', + 'yuml': '\xff', + 'Yuml;': '\u0178', + 'yuml;': '\xff', + 'Zacute;': '\u0179', + 'zacute;': '\u017a', + 'Zcaron;': '\u017d', + 'zcaron;': '\u017e', + 'Zcy;': '\u0417', + 'zcy;': '\u0437', + 'Zdot;': '\u017b', + 'zdot;': '\u017c', + 'zeetrf;': '\u2128', + 'ZeroWidthSpace;': '\u200b', + 'Zeta;': '\u0396', + 'zeta;': '\u03b6', + 'Zfr;': '\u2128', + 'zfr;': '\U0001d537', + 'ZHcy;': '\u0416', + 'zhcy;': '\u0436', + 'zigrarr;': '\u21dd', + 'Zopf;': '\u2124', + 'zopf;': '\U0001d56b', + 'Zscr;': '\U0001d4b5', + 'zscr;': '\U0001d4cf', + 'zwj;': '\u200d', + 'zwnj;': '\u200c', +} + +# maps the Unicode code point to the HTML entity name +codepoint2name = {} + +# maps the HTML entity name to the character +# (or a character reference if the character is outside the Latin-1 range) +entitydefs = {} + +for (name, codepoint) in name2codepoint.items(): + codepoint2name[codepoint] = name + entitydefs[name] = chr(codepoint) + +del name, codepoint diff --git a/parrot/lib/python3.10/html/parser.py b/parrot/lib/python3.10/html/parser.py new file mode 100644 index 0000000000000000000000000000000000000000..58f6bb3b1e932dbfa0081662eae1c54bb7ec918b --- /dev/null +++ b/parrot/lib/python3.10/html/parser.py @@ -0,0 +1,462 @@ +"""A parser for HTML and XHTML.""" + +# This file is based on sgmllib.py, but the API is slightly different. + +# XXX There should be a way to distinguish between PCDATA (parsed +# character data -- the normal case), RCDATA (replaceable character +# data -- only char and entity references and end tags are special) +# and CDATA (character data -- only end tags are special). + + +import re +import _markupbase + +from html import unescape + + +__all__ = ['HTMLParser'] + +# Regular expressions used for parsing + +interesting_normal = re.compile('[&<]') +incomplete = re.compile('&[a-zA-Z#]') + +entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]') +charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]') + +starttagopen = re.compile('<[a-zA-Z]') +piclose = re.compile('>') +commentclose = re.compile(r'--\s*>') +# Note: +# 1) if you change tagfind/attrfind remember to update locatestarttagend too; +# 2) if you change tagfind/attrfind and/or locatestarttagend the parser will +# explode, so don't do it. +# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state +# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state +tagfind_tolerant = re.compile(r'([a-zA-Z][^\t\n\r\f />\x00]*)(?:\s|/(?!>))*') +attrfind_tolerant = re.compile( + r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*' + r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*') +locatestarttagend_tolerant = re.compile(r""" + <[a-zA-Z][^\t\n\r\f />\x00]* # tag name + (?:[\s/]* # optional whitespace before attribute name + (?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name + (?:\s*=+\s* # value indicator + (?:'[^']*' # LITA-enclosed value + |"[^"]*" # LIT-enclosed value + |(?!['"])[^>\s]* # bare value + ) + \s* # possibly followed by a space + )?(?:\s|/(?!>))* + )* + )? + \s* # trailing whitespace +""", re.VERBOSE) +endendtag = re.compile('>') +# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between +# ') + + + +class HTMLParser(_markupbase.ParserBase): + """Find tags and other markup and call handler functions. + + Usage: + p = HTMLParser() + p.feed(data) + ... + p.close() + + Start tags are handled by calling self.handle_starttag() or + self.handle_startendtag(); end tags by self.handle_endtag(). The + data between tags is passed from the parser to the derived class + by calling self.handle_data() with the data as argument (the data + may be split up in arbitrary chunks). If convert_charrefs is + True the character references are converted automatically to the + corresponding Unicode character (and self.handle_data() is no + longer split in chunks), otherwise they are passed by calling + self.handle_entityref() or self.handle_charref() with the string + containing respectively the named or numeric reference as the + argument. + """ + + CDATA_CONTENT_ELEMENTS = ("script", "style") + + def __init__(self, *, convert_charrefs=True): + """Initialize and reset this instance. + + If convert_charrefs is True (the default), all character references + are automatically converted to the corresponding Unicode characters. + """ + self.convert_charrefs = convert_charrefs + self.reset() + + def reset(self): + """Reset this instance. Loses all unprocessed data.""" + self.rawdata = '' + self.lasttag = '???' + self.interesting = interesting_normal + self.cdata_elem = None + _markupbase.ParserBase.reset(self) + + def feed(self, data): + r"""Feed data to the parser. + + Call this as often as you want, with as little or as much text + as you want (may include '\n'). + """ + self.rawdata = self.rawdata + data + self.goahead(0) + + def close(self): + """Handle any buffered data.""" + self.goahead(1) + + __starttag_text = None + + def get_starttag_text(self): + """Return full source of start tag: '<...>'.""" + return self.__starttag_text + + def set_cdata_mode(self, elem): + self.cdata_elem = elem.lower() + self.interesting = re.compile(r'' % self.cdata_elem, re.I) + + def clear_cdata_mode(self): + self.interesting = interesting_normal + self.cdata_elem = None + + # Internal -- handle data as far as reasonable. May leave state + # and data to be processed by a subsequent call. If 'end' is + # true, force handling all data as if followed by EOF marker. + def goahead(self, end): + rawdata = self.rawdata + i = 0 + n = len(rawdata) + while i < n: + if self.convert_charrefs and not self.cdata_elem: + j = rawdata.find('<', i) + if j < 0: + # if we can't find the next <, either we are at the end + # or there's more text incoming. If the latter is True, + # we can't pass the text to handle_data in case we have + # a charref cut in half at end. Try to determine if + # this is the case before proceeding by looking for an + # & near the end and see if it's followed by a space or ;. + amppos = rawdata.rfind('&', max(i, n-34)) + if (amppos >= 0 and + not re.compile(r'[\s;]').search(rawdata, amppos)): + break # wait till we get all the text + j = n + else: + match = self.interesting.search(rawdata, i) # < or & + if match: + j = match.start() + else: + if self.cdata_elem: + break + j = n + if i < j: + if self.convert_charrefs and not self.cdata_elem: + self.handle_data(unescape(rawdata[i:j])) + else: + self.handle_data(rawdata[i:j]) + i = self.updatepos(i, j) + if i == n: break + startswith = rawdata.startswith + if startswith('<', i): + if starttagopen.match(rawdata, i): # < + letter + k = self.parse_starttag(i) + elif startswith("', i + 1) + if k < 0: + k = rawdata.find('<', i + 1) + if k < 0: + k = i + 1 + else: + k += 1 + if self.convert_charrefs and not self.cdata_elem: + self.handle_data(unescape(rawdata[i:k])) + else: + self.handle_data(rawdata[i:k]) + i = self.updatepos(i, k) + elif startswith("&#", i): + match = charref.match(rawdata, i) + if match: + name = match.group()[2:-1] + self.handle_charref(name) + k = match.end() + if not startswith(';', k-1): + k = k - 1 + i = self.updatepos(i, k) + continue + else: + if ";" in rawdata[i:]: # bail by consuming &# + self.handle_data(rawdata[i:i+2]) + i = self.updatepos(i, i+2) + break + elif startswith('&', i): + match = entityref.match(rawdata, i) + if match: + name = match.group(1) + self.handle_entityref(name) + k = match.end() + if not startswith(';', k-1): + k = k - 1 + i = self.updatepos(i, k) + continue + match = incomplete.match(rawdata, i) + if match: + # match.group() will contain at least 2 chars + if end and match.group() == rawdata[i:]: + k = match.end() + if k <= i: + k = n + i = self.updatepos(i, i + 1) + # incomplete + break + elif (i + 1) < n: + # not the end of the buffer, and can't be confused + # with some other construct + self.handle_data("&") + i = self.updatepos(i, i + 1) + else: + break + else: + assert 0, "interesting.search() lied" + # end while + if end and i < n and not self.cdata_elem: + if self.convert_charrefs and not self.cdata_elem: + self.handle_data(unescape(rawdata[i:n])) + else: + self.handle_data(rawdata[i:n]) + i = self.updatepos(i, n) + self.rawdata = rawdata[i:] + + # Internal -- parse html declarations, return length or -1 if not terminated + # See w3.org/TR/html5/tokenization.html#markup-declaration-open-state + # See also parse_declaration in _markupbase + def parse_html_declaration(self, i): + rawdata = self.rawdata + assert rawdata[i:i+2] == ' + gtpos = rawdata.find('>', i+9) + if gtpos == -1: + return -1 + self.handle_decl(rawdata[i+2:gtpos]) + return gtpos+1 + else: + return self.parse_bogus_comment(i) + + # Internal -- parse bogus comment, return length or -1 if not terminated + # see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state + def parse_bogus_comment(self, i, report=1): + rawdata = self.rawdata + assert rawdata[i:i+2] in ('', i+2) + if pos == -1: + return -1 + if report: + self.handle_comment(rawdata[i+2:pos]) + return pos + 1 + + # Internal -- parse processing instr, return end or -1 if not terminated + def parse_pi(self, i): + rawdata = self.rawdata + assert rawdata[i:i+2] == ' + if not match: + return -1 + j = match.start() + self.handle_pi(rawdata[i+2: j]) + j = match.end() + return j + + # Internal -- handle starttag, return end or -1 if not terminated + def parse_starttag(self, i): + self.__starttag_text = None + endpos = self.check_for_whole_start_tag(i) + if endpos < 0: + return endpos + rawdata = self.rawdata + self.__starttag_text = rawdata[i:endpos] + + # Now parse the data between i+1 and j into a tag and attrs + attrs = [] + match = tagfind_tolerant.match(rawdata, i+1) + assert match, 'unexpected call to parse_starttag()' + k = match.end() + self.lasttag = tag = match.group(1).lower() + while k < endpos: + m = attrfind_tolerant.match(rawdata, k) + if not m: + break + attrname, rest, attrvalue = m.group(1, 2, 3) + if not rest: + attrvalue = None + elif attrvalue[:1] == '\'' == attrvalue[-1:] or \ + attrvalue[:1] == '"' == attrvalue[-1:]: + attrvalue = attrvalue[1:-1] + if attrvalue: + attrvalue = unescape(attrvalue) + attrs.append((attrname.lower(), attrvalue)) + k = m.end() + + end = rawdata[k:endpos].strip() + if end not in (">", "/>"): + lineno, offset = self.getpos() + if "\n" in self.__starttag_text: + lineno = lineno + self.__starttag_text.count("\n") + offset = len(self.__starttag_text) \ + - self.__starttag_text.rfind("\n") + else: + offset = offset + len(self.__starttag_text) + self.handle_data(rawdata[i:endpos]) + return endpos + if end.endswith('/>'): + # XHTML-style empty tag: + self.handle_startendtag(tag, attrs) + else: + self.handle_starttag(tag, attrs) + if tag in self.CDATA_CONTENT_ELEMENTS: + self.set_cdata_mode(tag) + return endpos + + # Internal -- check to see if we have a complete starttag; return end + # or -1 if incomplete. + def check_for_whole_start_tag(self, i): + rawdata = self.rawdata + m = locatestarttagend_tolerant.match(rawdata, i) + if m: + j = m.end() + next = rawdata[j:j+1] + if next == ">": + return j + 1 + if next == "/": + if rawdata.startswith("/>", j): + return j + 2 + if rawdata.startswith("/", j): + # buffer boundary + return -1 + # else bogus input + if j > i: + return j + else: + return i + 1 + if next == "": + # end of input + return -1 + if next in ("abcdefghijklmnopqrstuvwxyz=/" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ"): + # end of input in or before attribute value, or we have the + # '/' from a '/>' ending + return -1 + if j > i: + return j + else: + return i + 1 + raise AssertionError("we should not get here!") + + # Internal -- parse endtag, return end or -1 if incomplete + def parse_endtag(self, i): + rawdata = self.rawdata + assert rawdata[i:i+2] == " + if not match: + return -1 + gtpos = match.end() + match = endtagfind.match(rawdata, i) # + if not match: + if self.cdata_elem is not None: + self.handle_data(rawdata[i:gtpos]) + return gtpos + # find the name: w3.org/TR/html5/tokenization.html#tag-name-state + namematch = tagfind_tolerant.match(rawdata, i+2) + if not namematch: + # w3.org/TR/html5/tokenization.html#end-tag-open-state + if rawdata[i:i+3] == '': + return i+3 + else: + return self.parse_bogus_comment(i) + tagname = namematch.group(1).lower() + # consume and ignore other stuff between the name and the > + # Note: this is not 100% correct, since we might have things like + # , but looking for > after the name should cover + # most of the cases and is much simpler + gtpos = rawdata.find('>', namematch.end()) + self.handle_endtag(tagname) + return gtpos+1 + + elem = match.group(1).lower() # script or style + if self.cdata_elem is not None: + if elem != self.cdata_elem: + self.handle_data(rawdata[i:gtpos]) + return gtpos + + self.handle_endtag(elem) + self.clear_cdata_mode() + return gtpos + + # Overridable -- finish processing of start+end tag: + def handle_startendtag(self, tag, attrs): + self.handle_starttag(tag, attrs) + self.handle_endtag(tag) + + # Overridable -- handle start tag + def handle_starttag(self, tag, attrs): + pass + + # Overridable -- handle end tag + def handle_endtag(self, tag): + pass + + # Overridable -- handle character reference + def handle_charref(self, name): + pass + + # Overridable -- handle entity reference + def handle_entityref(self, name): + pass + + # Overridable -- handle data + def handle_data(self, data): + pass + + # Overridable -- handle comment + def handle_comment(self, data): + pass + + # Overridable -- handle declaration + def handle_decl(self, decl): + pass + + # Overridable -- handle processing instruction + def handle_pi(self, data): + pass + + def unknown_decl(self, data): + pass diff --git a/parrot/lib/python3.10/pydoc_data/topics.py b/parrot/lib/python3.10/pydoc_data/topics.py new file mode 100644 index 0000000000000000000000000000000000000000..8db507a61f2a665cf911abd0e554a98599577344 --- /dev/null +++ b/parrot/lib/python3.10/pydoc_data/topics.py @@ -0,0 +1,15370 @@ +# -*- coding: utf-8 -*- +# Autogenerated by Sphinx on Tue Dec 3 12:26:47 2024 +topics = {'assert': 'The "assert" statement\n' + '**********************\n' + '\n' + 'Assert statements are a convenient way to insert debugging ' + 'assertions\n' + 'into a program:\n' + '\n' + ' assert_stmt ::= "assert" expression ["," expression]\n' + '\n' + 'The simple form, "assert expression", is equivalent to\n' + '\n' + ' if __debug__:\n' + ' if not expression: raise AssertionError\n' + '\n' + 'The extended form, "assert expression1, expression2", is ' + 'equivalent to\n' + '\n' + ' if __debug__:\n' + ' if not expression1: raise AssertionError(expression2)\n' + '\n' + 'These equivalences assume that "__debug__" and "AssertionError" ' + 'refer\n' + 'to the built-in variables with those names. In the current\n' + 'implementation, the built-in variable "__debug__" is "True" under\n' + 'normal circumstances, "False" when optimization is requested ' + '(command\n' + 'line option "-O"). The current code generator emits no code for ' + 'an\n' + 'assert statement when optimization is requested at compile time. ' + 'Note\n' + 'that it is unnecessary to include the source code for the ' + 'expression\n' + 'that failed in the error message; it will be displayed as part of ' + 'the\n' + 'stack trace.\n' + '\n' + 'Assignments to "__debug__" are illegal. The value for the ' + 'built-in\n' + 'variable is determined when the interpreter starts.\n', + 'assignment': 'Assignment statements\n' + '*********************\n' + '\n' + 'Assignment statements are used to (re)bind names to values and ' + 'to\n' + 'modify attributes or items of mutable objects:\n' + '\n' + ' assignment_stmt ::= (target_list "=")+ (starred_expression ' + '| yield_expression)\n' + ' target_list ::= target ("," target)* [","]\n' + ' target ::= identifier\n' + ' | "(" [target_list] ")"\n' + ' | "[" [target_list] "]"\n' + ' | attributeref\n' + ' | subscription\n' + ' | slicing\n' + ' | "*" target\n' + '\n' + '(See section Primaries for the syntax definitions for ' + '*attributeref*,\n' + '*subscription*, and *slicing*.)\n' + '\n' + 'An assignment statement evaluates the expression list ' + '(remember that\n' + 'this can be a single expression or a comma-separated list, the ' + 'latter\n' + 'yielding a tuple) and assigns the single resulting object to ' + 'each of\n' + 'the target lists, from left to right.\n' + '\n' + 'Assignment is defined recursively depending on the form of the ' + 'target\n' + '(list). When a target is part of a mutable object (an ' + 'attribute\n' + 'reference, subscription or slicing), the mutable object must\n' + 'ultimately perform the assignment and decide about its ' + 'validity, and\n' + 'may raise an exception if the assignment is unacceptable. The ' + 'rules\n' + 'observed by various types and the exceptions raised are given ' + 'with the\n' + 'definition of the object types (see section The standard type\n' + 'hierarchy).\n' + '\n' + 'Assignment of an object to a target list, optionally enclosed ' + 'in\n' + 'parentheses or square brackets, is recursively defined as ' + 'follows.\n' + '\n' + '* If the target list is a single target with no trailing ' + 'comma,\n' + ' optionally in parentheses, the object is assigned to that ' + 'target.\n' + '\n' + '* Else:\n' + '\n' + ' * If the target list contains one target prefixed with an ' + 'asterisk,\n' + ' called a “starred” target: The object must be an iterable ' + 'with at\n' + ' least as many items as there are targets in the target ' + 'list, minus\n' + ' one. The first items of the iterable are assigned, from ' + 'left to\n' + ' right, to the targets before the starred target. The ' + 'final items\n' + ' of the iterable are assigned to the targets after the ' + 'starred\n' + ' target. A list of the remaining items in the iterable is ' + 'then\n' + ' assigned to the starred target (the list can be empty).\n' + '\n' + ' * Else: The object must be an iterable with the same number ' + 'of items\n' + ' as there are targets in the target list, and the items ' + 'are\n' + ' assigned, from left to right, to the corresponding ' + 'targets.\n' + '\n' + 'Assignment of an object to a single target is recursively ' + 'defined as\n' + 'follows.\n' + '\n' + '* If the target is an identifier (name):\n' + '\n' + ' * If the name does not occur in a "global" or "nonlocal" ' + 'statement\n' + ' in the current code block: the name is bound to the object ' + 'in the\n' + ' current local namespace.\n' + '\n' + ' * Otherwise: the name is bound to the object in the global ' + 'namespace\n' + ' or the outer namespace determined by "nonlocal", ' + 'respectively.\n' + '\n' + ' The name is rebound if it was already bound. This may cause ' + 'the\n' + ' reference count for the object previously bound to the name ' + 'to reach\n' + ' zero, causing the object to be deallocated and its ' + 'destructor (if it\n' + ' has one) to be called.\n' + '\n' + '* If the target is an attribute reference: The primary ' + 'expression in\n' + ' the reference is evaluated. It should yield an object with\n' + ' assignable attributes; if this is not the case, "TypeError" ' + 'is\n' + ' raised. That object is then asked to assign the assigned ' + 'object to\n' + ' the given attribute; if it cannot perform the assignment, it ' + 'raises\n' + ' an exception (usually but not necessarily ' + '"AttributeError").\n' + '\n' + ' Note: If the object is a class instance and the attribute ' + 'reference\n' + ' occurs on both sides of the assignment operator, the ' + 'right-hand side\n' + ' expression, "a.x" can access either an instance attribute or ' + '(if no\n' + ' instance attribute exists) a class attribute. The left-hand ' + 'side\n' + ' target "a.x" is always set as an instance attribute, ' + 'creating it if\n' + ' necessary. Thus, the two occurrences of "a.x" do not ' + 'necessarily\n' + ' refer to the same attribute: if the right-hand side ' + 'expression\n' + ' refers to a class attribute, the left-hand side creates a ' + 'new\n' + ' instance attribute as the target of the assignment:\n' + '\n' + ' class Cls:\n' + ' x = 3 # class variable\n' + ' inst = Cls()\n' + ' inst.x = inst.x + 1 # writes inst.x as 4 leaving Cls.x ' + 'as 3\n' + '\n' + ' This description does not necessarily apply to descriptor\n' + ' attributes, such as properties created with "property()".\n' + '\n' + '* If the target is a subscription: The primary expression in ' + 'the\n' + ' reference is evaluated. It should yield either a mutable ' + 'sequence\n' + ' object (such as a list) or a mapping object (such as a ' + 'dictionary).\n' + ' Next, the subscript expression is evaluated.\n' + '\n' + ' If the primary is a mutable sequence object (such as a ' + 'list), the\n' + ' subscript must yield an integer. If it is negative, the ' + 'sequence’s\n' + ' length is added to it. The resulting value must be a ' + 'nonnegative\n' + ' integer less than the sequence’s length, and the sequence is ' + 'asked\n' + ' to assign the assigned object to its item with that index. ' + 'If the\n' + ' index is out of range, "IndexError" is raised (assignment to ' + 'a\n' + ' subscripted sequence cannot add new items to a list).\n' + '\n' + ' If the primary is a mapping object (such as a dictionary), ' + 'the\n' + ' subscript must have a type compatible with the mapping’s key ' + 'type,\n' + ' and the mapping is then asked to create a key/datum pair ' + 'which maps\n' + ' the subscript to the assigned object. This can either ' + 'replace an\n' + ' existing key/value pair with the same key value, or insert a ' + 'new\n' + ' key/value pair (if no key with the same value existed).\n' + '\n' + ' For user-defined objects, the "__setitem__()" method is ' + 'called with\n' + ' appropriate arguments.\n' + '\n' + '* If the target is a slicing: The primary expression in the ' + 'reference\n' + ' is evaluated. It should yield a mutable sequence object ' + '(such as a\n' + ' list). The assigned object should be a sequence object of ' + 'the same\n' + ' type. Next, the lower and upper bound expressions are ' + 'evaluated,\n' + ' insofar they are present; defaults are zero and the ' + 'sequence’s\n' + ' length. The bounds should evaluate to integers. If either ' + 'bound is\n' + ' negative, the sequence’s length is added to it. The ' + 'resulting\n' + ' bounds are clipped to lie between zero and the sequence’s ' + 'length,\n' + ' inclusive. Finally, the sequence object is asked to replace ' + 'the\n' + ' slice with the items of the assigned sequence. The length ' + 'of the\n' + ' slice may be different from the length of the assigned ' + 'sequence,\n' + ' thus changing the length of the target sequence, if the ' + 'target\n' + ' sequence allows it.\n' + '\n' + '**CPython implementation detail:** In the current ' + 'implementation, the\n' + 'syntax for targets is taken to be the same as for expressions, ' + 'and\n' + 'invalid syntax is rejected during the code generation phase, ' + 'causing\n' + 'less detailed error messages.\n' + '\n' + 'Although the definition of assignment implies that overlaps ' + 'between\n' + 'the left-hand side and the right-hand side are ‘simultaneous’ ' + '(for\n' + 'example "a, b = b, a" swaps two variables), overlaps *within* ' + 'the\n' + 'collection of assigned-to variables occur left-to-right, ' + 'sometimes\n' + 'resulting in confusion. For instance, the following program ' + 'prints\n' + '"[0, 2]":\n' + '\n' + ' x = [0, 1]\n' + ' i = 0\n' + ' i, x[i] = 1, 2 # i is updated, then x[i] is ' + 'updated\n' + ' print(x)\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 3132** - Extended Iterable Unpacking\n' + ' The specification for the "*target" feature.\n' + '\n' + '\n' + 'Augmented assignment statements\n' + '===============================\n' + '\n' + 'Augmented assignment is the combination, in a single ' + 'statement, of a\n' + 'binary operation and an assignment statement:\n' + '\n' + ' augmented_assignment_stmt ::= augtarget augop ' + '(expression_list | yield_expression)\n' + ' augtarget ::= identifier | attributeref | ' + 'subscription | slicing\n' + ' augop ::= "+=" | "-=" | "*=" | "@=" | ' + '"/=" | "//=" | "%=" | "**="\n' + ' | ">>=" | "<<=" | "&=" | "^=" | "|="\n' + '\n' + '(See section Primaries for the syntax definitions of the last ' + 'three\n' + 'symbols.)\n' + '\n' + 'An augmented assignment evaluates the target (which, unlike ' + 'normal\n' + 'assignment statements, cannot be an unpacking) and the ' + 'expression\n' + 'list, performs the binary operation specific to the type of ' + 'assignment\n' + 'on the two operands, and assigns the result to the original ' + 'target.\n' + 'The target is only evaluated once.\n' + '\n' + 'An augmented assignment expression like "x += 1" can be ' + 'rewritten as\n' + '"x = x + 1" to achieve a similar, but not exactly equal ' + 'effect. In the\n' + 'augmented version, "x" is only evaluated once. Also, when ' + 'possible,\n' + 'the actual operation is performed *in-place*, meaning that ' + 'rather than\n' + 'creating a new object and assigning that to the target, the ' + 'old object\n' + 'is modified instead.\n' + '\n' + 'Unlike normal assignments, augmented assignments evaluate the ' + 'left-\n' + 'hand side *before* evaluating the right-hand side. For ' + 'example, "a[i]\n' + '+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and ' + 'performs\n' + 'the addition, and lastly, it writes the result back to ' + '"a[i]".\n' + '\n' + 'With the exception of assigning to tuples and multiple targets ' + 'in a\n' + 'single statement, the assignment done by augmented assignment\n' + 'statements is handled the same way as normal assignments. ' + 'Similarly,\n' + 'with the exception of the possible *in-place* behavior, the ' + 'binary\n' + 'operation performed by augmented assignment is the same as the ' + 'normal\n' + 'binary operations.\n' + '\n' + 'For targets which are attribute references, the same caveat ' + 'about\n' + 'class and instance attributes applies as for regular ' + 'assignments.\n' + '\n' + '\n' + 'Annotated assignment statements\n' + '===============================\n' + '\n' + '*Annotation* assignment is the combination, in a single ' + 'statement, of\n' + 'a variable or attribute annotation and an optional assignment\n' + 'statement:\n' + '\n' + ' annotated_assignment_stmt ::= augtarget ":" expression\n' + ' ["=" (starred_expression | ' + 'yield_expression)]\n' + '\n' + 'The difference from normal Assignment statements is that only ' + 'a single\n' + 'target is allowed.\n' + '\n' + 'For simple names as assignment targets, if in class or module ' + 'scope,\n' + 'the annotations are evaluated and stored in a special class or ' + 'module\n' + 'attribute "__annotations__" that is a dictionary mapping from ' + 'variable\n' + 'names (mangled if private) to evaluated annotations. This ' + 'attribute is\n' + 'writable and is automatically created at the start of class or ' + 'module\n' + 'body execution, if annotations are found statically.\n' + '\n' + 'For expressions as assignment targets, the annotations are ' + 'evaluated\n' + 'if in class or module scope, but not stored.\n' + '\n' + 'If a name is annotated in a function scope, then this name is ' + 'local\n' + 'for that scope. Annotations are never evaluated and stored in ' + 'function\n' + 'scopes.\n' + '\n' + 'If the right hand side is present, an annotated assignment ' + 'performs\n' + 'the actual assignment before evaluating annotations (where\n' + 'applicable). If the right hand side is not present for an ' + 'expression\n' + 'target, then the interpreter evaluates the target except for ' + 'the last\n' + '"__setitem__()" or "__setattr__()" call.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 526** - Syntax for Variable Annotations\n' + ' The proposal that added syntax for annotating the types ' + 'of\n' + ' variables (including class variables and instance ' + 'variables),\n' + ' instead of expressing them through comments.\n' + '\n' + ' **PEP 484** - Type hints\n' + ' The proposal that added the "typing" module to provide a ' + 'standard\n' + ' syntax for type annotations that can be used in static ' + 'analysis\n' + ' tools and IDEs.\n' + '\n' + 'Changed in version 3.8: Now annotated assignments allow the ' + 'same\n' + 'expressions in the right hand side as regular assignments. ' + 'Previously,\n' + 'some expressions (like un-parenthesized tuple expressions) ' + 'caused a\n' + 'syntax error.\n', + 'async': 'Coroutines\n' + '**********\n' + '\n' + 'New in version 3.5.\n' + '\n' + '\n' + 'Coroutine function definition\n' + '=============================\n' + '\n' + ' async_funcdef ::= [decorators] "async" "def" funcname "(" ' + '[parameter_list] ")"\n' + ' ["->" expression] ":" suite\n' + '\n' + 'Execution of Python coroutines can be suspended and resumed at ' + 'many\n' + 'points (see *coroutine*). "await" expressions, "async for" and ' + '"async\n' + 'with" can only be used in the body of a coroutine function.\n' + '\n' + 'Functions defined with "async def" syntax are always coroutine\n' + 'functions, even if they do not contain "await" or "async" ' + 'keywords.\n' + '\n' + 'It is a "SyntaxError" to use a "yield from" expression inside the ' + 'body\n' + 'of a coroutine function.\n' + '\n' + 'An example of a coroutine function:\n' + '\n' + ' async def func(param1, param2):\n' + ' do_stuff()\n' + ' await some_coroutine()\n' + '\n' + 'Changed in version 3.7: "await" and "async" are now keywords;\n' + 'previously they were only treated as such inside the body of a\n' + 'coroutine function.\n' + '\n' + '\n' + 'The "async for" statement\n' + '=========================\n' + '\n' + ' async_for_stmt ::= "async" for_stmt\n' + '\n' + 'An *asynchronous iterable* provides an "__aiter__" method that\n' + 'directly returns an *asynchronous iterator*, which can call\n' + 'asynchronous code in its "__anext__" method.\n' + '\n' + 'The "async for" statement allows convenient iteration over\n' + 'asynchronous iterables.\n' + '\n' + 'The following code:\n' + '\n' + ' async for TARGET in ITER:\n' + ' SUITE\n' + ' else:\n' + ' SUITE2\n' + '\n' + 'Is semantically equivalent to:\n' + '\n' + ' iter = (ITER)\n' + ' iter = type(iter).__aiter__(iter)\n' + ' running = True\n' + '\n' + ' while running:\n' + ' try:\n' + ' TARGET = await type(iter).__anext__(iter)\n' + ' except StopAsyncIteration:\n' + ' running = False\n' + ' else:\n' + ' SUITE\n' + ' else:\n' + ' SUITE2\n' + '\n' + 'See also "__aiter__()" and "__anext__()" for details.\n' + '\n' + 'It is a "SyntaxError" to use an "async for" statement outside the ' + 'body\n' + 'of a coroutine function.\n' + '\n' + '\n' + 'The "async with" statement\n' + '==========================\n' + '\n' + ' async_with_stmt ::= "async" with_stmt\n' + '\n' + 'An *asynchronous context manager* is a *context manager* that is ' + 'able\n' + 'to suspend execution in its *enter* and *exit* methods.\n' + '\n' + 'The following code:\n' + '\n' + ' async with EXPRESSION as TARGET:\n' + ' SUITE\n' + '\n' + 'is semantically equivalent to:\n' + '\n' + ' manager = (EXPRESSION)\n' + ' aenter = type(manager).__aenter__\n' + ' aexit = type(manager).__aexit__\n' + ' value = await aenter(manager)\n' + ' hit_except = False\n' + '\n' + ' try:\n' + ' TARGET = value\n' + ' SUITE\n' + ' except:\n' + ' hit_except = True\n' + ' if not await aexit(manager, *sys.exc_info()):\n' + ' raise\n' + ' finally:\n' + ' if not hit_except:\n' + ' await aexit(manager, None, None, None)\n' + '\n' + 'See also "__aenter__()" and "__aexit__()" for details.\n' + '\n' + 'It is a "SyntaxError" to use an "async with" statement outside the\n' + 'body of a coroutine function.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 492** - Coroutines with async and await syntax\n' + ' The proposal that made coroutines a proper standalone concept ' + 'in\n' + ' Python, and added supporting syntax.\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] The exception is propagated to the invocation stack unless ' + 'there\n' + ' is a "finally" clause which happens to raise another ' + 'exception.\n' + ' That new exception causes the old one to be lost.\n' + '\n' + '[2] In pattern matching, a sequence is defined as one of the\n' + ' following:\n' + '\n' + ' * a class that inherits from "collections.abc.Sequence"\n' + '\n' + ' * a Python class that has been registered as\n' + ' "collections.abc.Sequence"\n' + '\n' + ' * a builtin class that has its (CPython) ' + '"Py_TPFLAGS_SEQUENCE"\n' + ' bit set\n' + '\n' + ' * a class that inherits from any of the above\n' + '\n' + ' The following standard library classes are sequences:\n' + '\n' + ' * "array.array"\n' + '\n' + ' * "collections.deque"\n' + '\n' + ' * "list"\n' + '\n' + ' * "memoryview"\n' + '\n' + ' * "range"\n' + '\n' + ' * "tuple"\n' + '\n' + ' Note:\n' + '\n' + ' Subject values of type "str", "bytes", and "bytearray" do ' + 'not\n' + ' match sequence patterns.\n' + '\n' + '[3] In pattern matching, a mapping is defined as one of the ' + 'following:\n' + '\n' + ' * a class that inherits from "collections.abc.Mapping"\n' + '\n' + ' * a Python class that has been registered as\n' + ' "collections.abc.Mapping"\n' + '\n' + ' * a builtin class that has its (CPython) ' + '"Py_TPFLAGS_MAPPING"\n' + ' bit set\n' + '\n' + ' * a class that inherits from any of the above\n' + '\n' + ' The standard library classes "dict" and ' + '"types.MappingProxyType"\n' + ' are mappings.\n' + '\n' + '[4] A string literal appearing as the first statement in the ' + 'function\n' + ' body is transformed into the function’s "__doc__" attribute ' + 'and\n' + ' therefore the function’s *docstring*.\n' + '\n' + '[5] A string literal appearing as the first statement in the class\n' + ' body is transformed into the namespace’s "__doc__" item and\n' + ' therefore the class’s *docstring*.\n', + 'atom-identifiers': 'Identifiers (Names)\n' + '*******************\n' + '\n' + 'An identifier occurring as an atom is a name. See ' + 'section Identifiers\n' + 'and keywords for lexical definition and section Naming ' + 'and binding for\n' + 'documentation of naming and binding.\n' + '\n' + 'When the name is bound to an object, evaluation of the ' + 'atom yields\n' + 'that object. When a name is not bound, an attempt to ' + 'evaluate it\n' + 'raises a "NameError" exception.\n' + '\n' + '**Private name mangling:** When an identifier that ' + 'textually occurs in\n' + 'a class definition begins with two or more underscore ' + 'characters and\n' + 'does not end in two or more underscores, it is ' + 'considered a *private\n' + 'name* of that class. Private names are transformed to a ' + 'longer form\n' + 'before code is generated for them. The transformation ' + 'inserts the\n' + 'class name, with leading underscores removed and a ' + 'single underscore\n' + 'inserted, in front of the name. For example, the ' + 'identifier "__spam"\n' + 'occurring in a class named "Ham" will be transformed to ' + '"_Ham__spam".\n' + 'This transformation is independent of the syntactical ' + 'context in which\n' + 'the identifier is used. If the transformed name is ' + 'extremely long\n' + '(longer than 255 characters), implementation defined ' + 'truncation may\n' + 'happen. If the class name consists only of underscores, ' + 'no\n' + 'transformation is done.\n', + 'atom-literals': 'Literals\n' + '********\n' + '\n' + 'Python supports string and bytes literals and various ' + 'numeric\n' + 'literals:\n' + '\n' + ' literal ::= stringliteral | bytesliteral\n' + ' | integer | floatnumber | imagnumber\n' + '\n' + 'Evaluation of a literal yields an object of the given type ' + '(string,\n' + 'bytes, integer, floating point number, complex number) with ' + 'the given\n' + 'value. The value may be approximated in the case of ' + 'floating point\n' + 'and imaginary (complex) literals. See section Literals for ' + 'details.\n' + '\n' + 'All literals correspond to immutable data types, and hence ' + 'the\n' + 'object’s identity is less important than its value. ' + 'Multiple\n' + 'evaluations of literals with the same value (either the ' + 'same\n' + 'occurrence in the program text or a different occurrence) ' + 'may obtain\n' + 'the same object or a different object with the same ' + 'value.\n', + 'attribute-access': 'Customizing attribute access\n' + '****************************\n' + '\n' + 'The following methods can be defined to customize the ' + 'meaning of\n' + 'attribute access (use of, assignment to, or deletion of ' + '"x.name") for\n' + 'class instances.\n' + '\n' + 'object.__getattr__(self, name)\n' + '\n' + ' Called when the default attribute access fails with ' + 'an\n' + ' "AttributeError" (either "__getattribute__()" raises ' + 'an\n' + ' "AttributeError" because *name* is not an instance ' + 'attribute or an\n' + ' attribute in the class tree for "self"; or ' + '"__get__()" of a *name*\n' + ' property raises "AttributeError"). This method ' + 'should either\n' + ' return the (computed) attribute value or raise an ' + '"AttributeError"\n' + ' exception.\n' + '\n' + ' Note that if the attribute is found through the ' + 'normal mechanism,\n' + ' "__getattr__()" is not called. (This is an ' + 'intentional asymmetry\n' + ' between "__getattr__()" and "__setattr__()".) This is ' + 'done both for\n' + ' efficiency reasons and because otherwise ' + '"__getattr__()" would have\n' + ' no way to access other attributes of the instance. ' + 'Note that at\n' + ' least for instance variables, you can fake total ' + 'control by not\n' + ' inserting any values in the instance attribute ' + 'dictionary (but\n' + ' instead inserting them in another object). See the\n' + ' "__getattribute__()" method below for a way to ' + 'actually get total\n' + ' control over attribute access.\n' + '\n' + 'object.__getattribute__(self, name)\n' + '\n' + ' Called unconditionally to implement attribute ' + 'accesses for\n' + ' instances of the class. If the class also defines ' + '"__getattr__()",\n' + ' the latter will not be called unless ' + '"__getattribute__()" either\n' + ' calls it explicitly or raises an "AttributeError". ' + 'This method\n' + ' should return the (computed) attribute value or raise ' + 'an\n' + ' "AttributeError" exception. In order to avoid ' + 'infinite recursion in\n' + ' this method, its implementation should always call ' + 'the base class\n' + ' method with the same name to access any attributes it ' + 'needs, for\n' + ' example, "object.__getattribute__(self, name)".\n' + '\n' + ' Note:\n' + '\n' + ' This method may still be bypassed when looking up ' + 'special methods\n' + ' as the result of implicit invocation via language ' + 'syntax or\n' + ' built-in functions. See Special method lookup.\n' + '\n' + ' For certain sensitive attribute accesses, raises an ' + 'auditing event\n' + ' "object.__getattr__" with arguments "obj" and ' + '"name".\n' + '\n' + 'object.__setattr__(self, name, value)\n' + '\n' + ' Called when an attribute assignment is attempted. ' + 'This is called\n' + ' instead of the normal mechanism (i.e. store the value ' + 'in the\n' + ' instance dictionary). *name* is the attribute name, ' + '*value* is the\n' + ' value to be assigned to it.\n' + '\n' + ' If "__setattr__()" wants to assign to an instance ' + 'attribute, it\n' + ' should call the base class method with the same name, ' + 'for example,\n' + ' "object.__setattr__(self, name, value)".\n' + '\n' + ' For certain sensitive attribute assignments, raises ' + 'an auditing\n' + ' event "object.__setattr__" with arguments "obj", ' + '"name", "value".\n' + '\n' + 'object.__delattr__(self, name)\n' + '\n' + ' Like "__setattr__()" but for attribute deletion ' + 'instead of\n' + ' assignment. This should only be implemented if "del ' + 'obj.name" is\n' + ' meaningful for the object.\n' + '\n' + ' For certain sensitive attribute deletions, raises an ' + 'auditing event\n' + ' "object.__delattr__" with arguments "obj" and ' + '"name".\n' + '\n' + 'object.__dir__(self)\n' + '\n' + ' Called when "dir()" is called on the object. A ' + 'sequence must be\n' + ' returned. "dir()" converts the returned sequence to a ' + 'list and\n' + ' sorts it.\n' + '\n' + '\n' + 'Customizing module attribute access\n' + '===================================\n' + '\n' + 'Special names "__getattr__" and "__dir__" can be also ' + 'used to\n' + 'customize access to module attributes. The "__getattr__" ' + 'function at\n' + 'the module level should accept one argument which is the ' + 'name of an\n' + 'attribute and return the computed value or raise an ' + '"AttributeError".\n' + 'If an attribute is not found on a module object through ' + 'the normal\n' + 'lookup, i.e. "object.__getattribute__()", then ' + '"__getattr__" is\n' + 'searched in the module "__dict__" before raising an ' + '"AttributeError".\n' + 'If found, it is called with the attribute name and the ' + 'result is\n' + 'returned.\n' + '\n' + 'The "__dir__" function should accept no arguments, and ' + 'return a\n' + 'sequence of strings that represents the names accessible ' + 'on module. If\n' + 'present, this function overrides the standard "dir()" ' + 'search on a\n' + 'module.\n' + '\n' + 'For a more fine grained customization of the module ' + 'behavior (setting\n' + 'attributes, properties, etc.), one can set the ' + '"__class__" attribute\n' + 'of a module object to a subclass of "types.ModuleType". ' + 'For example:\n' + '\n' + ' import sys\n' + ' from types import ModuleType\n' + '\n' + ' class VerboseModule(ModuleType):\n' + ' def __repr__(self):\n' + " return f'Verbose {self.__name__}'\n" + '\n' + ' def __setattr__(self, attr, value):\n' + " print(f'Setting {attr}...')\n" + ' super().__setattr__(attr, value)\n' + '\n' + ' sys.modules[__name__].__class__ = VerboseModule\n' + '\n' + 'Note:\n' + '\n' + ' Defining module "__getattr__" and setting module ' + '"__class__" only\n' + ' affect lookups made using the attribute access syntax ' + '– directly\n' + ' accessing the module globals (whether by code within ' + 'the module, or\n' + ' via a reference to the module’s globals dictionary) is ' + 'unaffected.\n' + '\n' + 'Changed in version 3.5: "__class__" module attribute is ' + 'now writable.\n' + '\n' + 'New in version 3.7: "__getattr__" and "__dir__" module ' + 'attributes.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 562** - Module __getattr__ and __dir__\n' + ' Describes the "__getattr__" and "__dir__" functions ' + 'on modules.\n' + '\n' + '\n' + 'Implementing Descriptors\n' + '========================\n' + '\n' + 'The following methods only apply when an instance of the ' + 'class\n' + 'containing the method (a so-called *descriptor* class) ' + 'appears in an\n' + '*owner* class (the descriptor must be in either the ' + 'owner’s class\n' + 'dictionary or in the class dictionary for one of its ' + 'parents). In the\n' + 'examples below, “the attribute” refers to the attribute ' + 'whose name is\n' + 'the key of the property in the owner class’ "__dict__".\n' + '\n' + 'object.__get__(self, instance, owner=None)\n' + '\n' + ' Called to get the attribute of the owner class (class ' + 'attribute\n' + ' access) or of an instance of that class (instance ' + 'attribute\n' + ' access). The optional *owner* argument is the owner ' + 'class, while\n' + ' *instance* is the instance that the attribute was ' + 'accessed through,\n' + ' or "None" when the attribute is accessed through the ' + '*owner*.\n' + '\n' + ' This method should return the computed attribute ' + 'value or raise an\n' + ' "AttributeError" exception.\n' + '\n' + ' **PEP 252** specifies that "__get__()" is callable ' + 'with one or two\n' + ' arguments. Python’s own built-in descriptors support ' + 'this\n' + ' specification; however, it is likely that some ' + 'third-party tools\n' + ' have descriptors that require both arguments. ' + 'Python’s own\n' + ' "__getattribute__()" implementation always passes in ' + 'both arguments\n' + ' whether they are required or not.\n' + '\n' + 'object.__set__(self, instance, value)\n' + '\n' + ' Called to set the attribute on an instance *instance* ' + 'of the owner\n' + ' class to a new value, *value*.\n' + '\n' + ' Note, adding "__set__()" or "__delete__()" changes ' + 'the kind of\n' + ' descriptor to a “data descriptor”. See Invoking ' + 'Descriptors for\n' + ' more details.\n' + '\n' + 'object.__delete__(self, instance)\n' + '\n' + ' Called to delete the attribute on an instance ' + '*instance* of the\n' + ' owner class.\n' + '\n' + 'The attribute "__objclass__" is interpreted by the ' + '"inspect" module as\n' + 'specifying the class where this object was defined ' + '(setting this\n' + 'appropriately can assist in runtime introspection of ' + 'dynamic class\n' + 'attributes). For callables, it may indicate that an ' + 'instance of the\n' + 'given type (or a subclass) is expected or required as ' + 'the first\n' + 'positional argument (for example, CPython sets this ' + 'attribute for\n' + 'unbound methods that are implemented in C).\n' + '\n' + '\n' + 'Invoking Descriptors\n' + '====================\n' + '\n' + 'In general, a descriptor is an object attribute with ' + '“binding\n' + 'behavior”, one whose attribute access has been ' + 'overridden by methods\n' + 'in the descriptor protocol: "__get__()", "__set__()", ' + 'and\n' + '"__delete__()". If any of those methods are defined for ' + 'an object, it\n' + 'is said to be a descriptor.\n' + '\n' + 'The default behavior for attribute access is to get, ' + 'set, or delete\n' + 'the attribute from an object’s dictionary. For instance, ' + '"a.x" has a\n' + 'lookup chain starting with "a.__dict__[\'x\']", then\n' + '"type(a).__dict__[\'x\']", and continuing through the ' + 'base classes of\n' + '"type(a)" excluding metaclasses.\n' + '\n' + 'However, if the looked-up value is an object defining ' + 'one of the\n' + 'descriptor methods, then Python may override the default ' + 'behavior and\n' + 'invoke the descriptor method instead. Where this occurs ' + 'in the\n' + 'precedence chain depends on which descriptor methods ' + 'were defined and\n' + 'how they were called.\n' + '\n' + 'The starting point for descriptor invocation is a ' + 'binding, "a.x". How\n' + 'the arguments are assembled depends on "a":\n' + '\n' + 'Direct Call\n' + ' The simplest and least common call is when user code ' + 'directly\n' + ' invokes a descriptor method: "x.__get__(a)".\n' + '\n' + 'Instance Binding\n' + ' If binding to an object instance, "a.x" is ' + 'transformed into the\n' + ' call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n' + '\n' + 'Class Binding\n' + ' If binding to a class, "A.x" is transformed into the ' + 'call:\n' + ' "A.__dict__[\'x\'].__get__(None, A)".\n' + '\n' + 'Super Binding\n' + ' If "a" is an instance of "super", then the binding ' + '"super(B,\n' + ' obj).m()" searches "obj.__class__.__mro__" for the ' + 'base class "A"\n' + ' immediately following "B" and then invokes the ' + 'descriptor with the\n' + ' call: "A.__dict__[\'m\'].__get__(obj, ' + 'obj.__class__)".\n' + '\n' + 'For instance bindings, the precedence of descriptor ' + 'invocation depends\n' + 'on which descriptor methods are defined. A descriptor ' + 'can define any\n' + 'combination of "__get__()", "__set__()" and ' + '"__delete__()". If it\n' + 'does not define "__get__()", then accessing the ' + 'attribute will return\n' + 'the descriptor object itself unless there is a value in ' + 'the object’s\n' + 'instance dictionary. If the descriptor defines ' + '"__set__()" and/or\n' + '"__delete__()", it is a data descriptor; if it defines ' + 'neither, it is\n' + 'a non-data descriptor. Normally, data descriptors ' + 'define both\n' + '"__get__()" and "__set__()", while non-data descriptors ' + 'have just the\n' + '"__get__()" method. Data descriptors with "__get__()" ' + 'and "__set__()"\n' + '(and/or "__delete__()") defined always override a ' + 'redefinition in an\n' + 'instance dictionary. In contrast, non-data descriptors ' + 'can be\n' + 'overridden by instances.\n' + '\n' + 'Python methods (including those decorated with ' + '"@staticmethod" and\n' + '"@classmethod") are implemented as non-data ' + 'descriptors. Accordingly,\n' + 'instances can redefine and override methods. This ' + 'allows individual\n' + 'instances to acquire behaviors that differ from other ' + 'instances of the\n' + 'same class.\n' + '\n' + 'The "property()" function is implemented as a data ' + 'descriptor.\n' + 'Accordingly, instances cannot override the behavior of a ' + 'property.\n' + '\n' + '\n' + '__slots__\n' + '=========\n' + '\n' + '*__slots__* allow us to explicitly declare data members ' + '(like\n' + 'properties) and deny the creation of "__dict__" and ' + '*__weakref__*\n' + '(unless explicitly declared in *__slots__* or available ' + 'in a parent.)\n' + '\n' + 'The space saved over using "__dict__" can be ' + 'significant. Attribute\n' + 'lookup speed can be significantly improved as well.\n' + '\n' + 'object.__slots__\n' + '\n' + ' This class variable can be assigned a string, ' + 'iterable, or sequence\n' + ' of strings with variable names used by instances. ' + '*__slots__*\n' + ' reserves space for the declared variables and ' + 'prevents the\n' + ' automatic creation of "__dict__" and *__weakref__* ' + 'for each\n' + ' instance.\n' + '\n' + '\n' + 'Notes on using *__slots__*\n' + '--------------------------\n' + '\n' + '* When inheriting from a class without *__slots__*, the ' + '"__dict__" and\n' + ' *__weakref__* attribute of the instances will always ' + 'be accessible.\n' + '\n' + '* Without a "__dict__" variable, instances cannot be ' + 'assigned new\n' + ' variables not listed in the *__slots__* definition. ' + 'Attempts to\n' + ' assign to an unlisted variable name raises ' + '"AttributeError". If\n' + ' dynamic assignment of new variables is desired, then ' + 'add\n' + ' "\'__dict__\'" to the sequence of strings in the ' + '*__slots__*\n' + ' declaration.\n' + '\n' + '* Without a *__weakref__* variable for each instance, ' + 'classes defining\n' + ' *__slots__* do not support "weak references" to its ' + 'instances. If\n' + ' weak reference support is needed, then add ' + '"\'__weakref__\'" to the\n' + ' sequence of strings in the *__slots__* declaration.\n' + '\n' + '* *__slots__* are implemented at the class level by ' + 'creating\n' + ' descriptors for each variable name. As a result, ' + 'class attributes\n' + ' cannot be used to set default values for instance ' + 'variables defined\n' + ' by *__slots__*; otherwise, the class attribute would ' + 'overwrite the\n' + ' descriptor assignment.\n' + '\n' + '* The action of a *__slots__* declaration is not limited ' + 'to the class\n' + ' where it is defined. *__slots__* declared in parents ' + 'are available\n' + ' in child classes. However, child subclasses will get a ' + '"__dict__"\n' + ' and *__weakref__* unless they also define *__slots__* ' + '(which should\n' + ' only contain names of any *additional* slots).\n' + '\n' + '* If a class defines a slot also defined in a base ' + 'class, the instance\n' + ' variable defined by the base class slot is ' + 'inaccessible (except by\n' + ' retrieving its descriptor directly from the base ' + 'class). This\n' + ' renders the meaning of the program undefined. In the ' + 'future, a\n' + ' check may be added to prevent this.\n' + '\n' + '* "TypeError" will be raised if nonempty *__slots__* are ' + 'defined for a\n' + ' class derived from a ""variable-length" built-in type" ' + 'such as\n' + ' "int", "bytes", and "tuple".\n' + '\n' + '* Any non-string *iterable* may be assigned to ' + '*__slots__*.\n' + '\n' + '* If a "dictionary" is used to assign *__slots__*, the ' + 'dictionary keys\n' + ' will be used as the slot names. The values of the ' + 'dictionary can be\n' + ' used to provide per-attribute docstrings that will be ' + 'recognised by\n' + ' "inspect.getdoc()" and displayed in the output of ' + '"help()".\n' + '\n' + '* "__class__" assignment works only if both classes have ' + 'the same\n' + ' *__slots__*.\n' + '\n' + '* Multiple inheritance with multiple slotted parent ' + 'classes can be\n' + ' used, but only one parent is allowed to have ' + 'attributes created by\n' + ' slots (the other bases must have empty slot layouts) - ' + 'violations\n' + ' raise "TypeError".\n' + '\n' + '* If an *iterator* is used for *__slots__* then a ' + '*descriptor* is\n' + ' created for each of the iterator’s values. However, ' + 'the *__slots__*\n' + ' attribute will be an empty iterator.\n', + 'attribute-references': 'Attribute references\n' + '********************\n' + '\n' + 'An attribute reference is a primary followed by a ' + 'period and a name:\n' + '\n' + ' attributeref ::= primary "." identifier\n' + '\n' + 'The primary must evaluate to an object of a type ' + 'that supports\n' + 'attribute references, which most objects do. This ' + 'object is then\n' + 'asked to produce the attribute whose name is the ' + 'identifier. This\n' + 'production can be customized by overriding the ' + '"__getattr__()" method.\n' + 'If this attribute is not available, the exception ' + '"AttributeError" is\n' + 'raised. Otherwise, the type and value of the object ' + 'produced is\n' + 'determined by the object. Multiple evaluations of ' + 'the same attribute\n' + 'reference may yield different objects.\n', + 'augassign': 'Augmented assignment statements\n' + '*******************************\n' + '\n' + 'Augmented assignment is the combination, in a single statement, ' + 'of a\n' + 'binary operation and an assignment statement:\n' + '\n' + ' augmented_assignment_stmt ::= augtarget augop ' + '(expression_list | yield_expression)\n' + ' augtarget ::= identifier | attributeref | ' + 'subscription | slicing\n' + ' augop ::= "+=" | "-=" | "*=" | "@=" | ' + '"/=" | "//=" | "%=" | "**="\n' + ' | ">>=" | "<<=" | "&=" | "^=" | "|="\n' + '\n' + '(See section Primaries for the syntax definitions of the last ' + 'three\n' + 'symbols.)\n' + '\n' + 'An augmented assignment evaluates the target (which, unlike ' + 'normal\n' + 'assignment statements, cannot be an unpacking) and the ' + 'expression\n' + 'list, performs the binary operation specific to the type of ' + 'assignment\n' + 'on the two operands, and assigns the result to the original ' + 'target.\n' + 'The target is only evaluated once.\n' + '\n' + 'An augmented assignment expression like "x += 1" can be ' + 'rewritten as\n' + '"x = x + 1" to achieve a similar, but not exactly equal effect. ' + 'In the\n' + 'augmented version, "x" is only evaluated once. Also, when ' + 'possible,\n' + 'the actual operation is performed *in-place*, meaning that ' + 'rather than\n' + 'creating a new object and assigning that to the target, the old ' + 'object\n' + 'is modified instead.\n' + '\n' + 'Unlike normal assignments, augmented assignments evaluate the ' + 'left-\n' + 'hand side *before* evaluating the right-hand side. For ' + 'example, "a[i]\n' + '+= f(x)" first looks-up "a[i]", then it evaluates "f(x)" and ' + 'performs\n' + 'the addition, and lastly, it writes the result back to "a[i]".\n' + '\n' + 'With the exception of assigning to tuples and multiple targets ' + 'in a\n' + 'single statement, the assignment done by augmented assignment\n' + 'statements is handled the same way as normal assignments. ' + 'Similarly,\n' + 'with the exception of the possible *in-place* behavior, the ' + 'binary\n' + 'operation performed by augmented assignment is the same as the ' + 'normal\n' + 'binary operations.\n' + '\n' + 'For targets which are attribute references, the same caveat ' + 'about\n' + 'class and instance attributes applies as for regular ' + 'assignments.\n', + 'await': 'Await expression\n' + '****************\n' + '\n' + 'Suspend the execution of *coroutine* on an *awaitable* object. Can\n' + 'only be used inside a *coroutine function*.\n' + '\n' + ' await_expr ::= "await" primary\n' + '\n' + 'New in version 3.5.\n', + 'binary': 'Binary arithmetic operations\n' + '****************************\n' + '\n' + 'The binary arithmetic operations have the conventional priority\n' + 'levels. Note that some of these operations also apply to certain ' + 'non-\n' + 'numeric types. Apart from the power operator, there are only two\n' + 'levels, one for multiplicative operators and one for additive\n' + 'operators:\n' + '\n' + ' m_expr ::= u_expr | m_expr "*" u_expr | m_expr "@" m_expr |\n' + ' m_expr "//" u_expr | m_expr "/" u_expr |\n' + ' m_expr "%" u_expr\n' + ' a_expr ::= m_expr | a_expr "+" m_expr | a_expr "-" m_expr\n' + '\n' + 'The "*" (multiplication) operator yields the product of its ' + 'arguments.\n' + 'The arguments must either both be numbers, or one argument must be ' + 'an\n' + 'integer and the other must be a sequence. In the former case, the\n' + 'numbers are converted to a common type and then multiplied ' + 'together.\n' + 'In the latter case, sequence repetition is performed; a negative\n' + 'repetition factor yields an empty sequence.\n' + '\n' + 'This operation can be customized using the special "__mul__()" ' + 'and\n' + '"__rmul__()" methods.\n' + '\n' + 'The "@" (at) operator is intended to be used for matrix\n' + 'multiplication. No builtin Python types implement this operator.\n' + '\n' + 'New in version 3.5.\n' + '\n' + 'The "/" (division) and "//" (floor division) operators yield the\n' + 'quotient of their arguments. The numeric arguments are first\n' + 'converted to a common type. Division of integers yields a float, ' + 'while\n' + 'floor division of integers results in an integer; the result is ' + 'that\n' + 'of mathematical division with the ‘floor’ function applied to the\n' + 'result. Division by zero raises the "ZeroDivisionError" ' + 'exception.\n' + '\n' + 'This operation can be customized using the special "__truediv__()" ' + 'and\n' + '"__floordiv__()" methods.\n' + '\n' + 'The "%" (modulo) operator yields the remainder from the division ' + 'of\n' + 'the first argument by the second. The numeric arguments are ' + 'first\n' + 'converted to a common type. A zero right argument raises the\n' + '"ZeroDivisionError" exception. The arguments may be floating ' + 'point\n' + 'numbers, e.g., "3.14%0.7" equals "0.34" (since "3.14" equals ' + '"4*0.7 +\n' + '0.34".) The modulo operator always yields a result with the same ' + 'sign\n' + 'as its second operand (or zero); the absolute value of the result ' + 'is\n' + 'strictly smaller than the absolute value of the second operand ' + '[1].\n' + '\n' + 'The floor division and modulo operators are connected by the ' + 'following\n' + 'identity: "x == (x//y)*y + (x%y)". Floor division and modulo are ' + 'also\n' + 'connected with the built-in function "divmod()": "divmod(x, y) ==\n' + '(x//y, x%y)". [2].\n' + '\n' + 'In addition to performing the modulo operation on numbers, the ' + '"%"\n' + 'operator is also overloaded by string objects to perform ' + 'old-style\n' + 'string formatting (also known as interpolation). The syntax for\n' + 'string formatting is described in the Python Library Reference,\n' + 'section printf-style String Formatting.\n' + '\n' + 'The *modulo* operation can be customized using the special ' + '"__mod__()"\n' + 'method.\n' + '\n' + 'The floor division operator, the modulo operator, and the ' + '"divmod()"\n' + 'function are not defined for complex numbers. Instead, convert to ' + 'a\n' + 'floating point number using the "abs()" function if appropriate.\n' + '\n' + 'The "+" (addition) operator yields the sum of its arguments. The\n' + 'arguments must either both be numbers or both be sequences of the ' + 'same\n' + 'type. In the former case, the numbers are converted to a common ' + 'type\n' + 'and then added together. In the latter case, the sequences are\n' + 'concatenated.\n' + '\n' + 'This operation can be customized using the special "__add__()" ' + 'and\n' + '"__radd__()" methods.\n' + '\n' + 'The "-" (subtraction) operator yields the difference of its ' + 'arguments.\n' + 'The numeric arguments are first converted to a common type.\n' + '\n' + 'This operation can be customized using the special "__sub__()" ' + 'method.\n', + 'bitwise': 'Binary bitwise operations\n' + '*************************\n' + '\n' + 'Each of the three bitwise operations has a different priority ' + 'level:\n' + '\n' + ' and_expr ::= shift_expr | and_expr "&" shift_expr\n' + ' xor_expr ::= and_expr | xor_expr "^" and_expr\n' + ' or_expr ::= xor_expr | or_expr "|" xor_expr\n' + '\n' + 'The "&" operator yields the bitwise AND of its arguments, which ' + 'must\n' + 'be integers or one of them must be a custom object overriding\n' + '"__and__()" or "__rand__()" special methods.\n' + '\n' + 'The "^" operator yields the bitwise XOR (exclusive OR) of its\n' + 'arguments, which must be integers or one of them must be a ' + 'custom\n' + 'object overriding "__xor__()" or "__rxor__()" special methods.\n' + '\n' + 'The "|" operator yields the bitwise (inclusive) OR of its ' + 'arguments,\n' + 'which must be integers or one of them must be a custom object\n' + 'overriding "__or__()" or "__ror__()" special methods.\n', + 'bltin-code-objects': 'Code Objects\n' + '************\n' + '\n' + 'Code objects are used by the implementation to ' + 'represent “pseudo-\n' + 'compiled” executable Python code such as a function ' + 'body. They differ\n' + 'from function objects because they don’t contain a ' + 'reference to their\n' + 'global execution environment. Code objects are ' + 'returned by the built-\n' + 'in "compile()" function and can be extracted from ' + 'function objects\n' + 'through their "__code__" attribute. See also the ' + '"code" module.\n' + '\n' + 'Accessing "__code__" raises an auditing event ' + '"object.__getattr__"\n' + 'with arguments "obj" and ""__code__"".\n' + '\n' + 'A code object can be executed or evaluated by passing ' + 'it (instead of a\n' + 'source string) to the "exec()" or "eval()" built-in ' + 'functions.\n' + '\n' + 'See The standard type hierarchy for more ' + 'information.\n', + 'bltin-ellipsis-object': 'The Ellipsis Object\n' + '*******************\n' + '\n' + 'This object is commonly used by slicing (see ' + 'Slicings). It supports\n' + 'no special operations. There is exactly one ' + 'ellipsis object, named\n' + '"Ellipsis" (a built-in name). "type(Ellipsis)()" ' + 'produces the\n' + '"Ellipsis" singleton.\n' + '\n' + 'It is written as "Ellipsis" or "...".\n', + 'bltin-null-object': 'The Null Object\n' + '***************\n' + '\n' + 'This object is returned by functions that don’t ' + 'explicitly return a\n' + 'value. It supports no special operations. There is ' + 'exactly one null\n' + 'object, named "None" (a built-in name). "type(None)()" ' + 'produces the\n' + 'same singleton.\n' + '\n' + 'It is written as "None".\n', + 'bltin-type-objects': 'Type Objects\n' + '************\n' + '\n' + 'Type objects represent the various object types. An ' + 'object’s type is\n' + 'accessed by the built-in function "type()". There are ' + 'no special\n' + 'operations on types. The standard module "types" ' + 'defines names for\n' + 'all standard built-in types.\n' + '\n' + 'Types are written like this: "".\n', + 'booleans': 'Boolean operations\n' + '******************\n' + '\n' + ' or_test ::= and_test | or_test "or" and_test\n' + ' and_test ::= not_test | and_test "and" not_test\n' + ' not_test ::= comparison | "not" not_test\n' + '\n' + 'In the context of Boolean operations, and also when expressions ' + 'are\n' + 'used by control flow statements, the following values are ' + 'interpreted\n' + 'as false: "False", "None", numeric zero of all types, and empty\n' + 'strings and containers (including strings, tuples, lists,\n' + 'dictionaries, sets and frozensets). All other values are ' + 'interpreted\n' + 'as true. User-defined objects can customize their truth value ' + 'by\n' + 'providing a "__bool__()" method.\n' + '\n' + 'The operator "not" yields "True" if its argument is false, ' + '"False"\n' + 'otherwise.\n' + '\n' + 'The expression "x and y" first evaluates *x*; if *x* is false, ' + 'its\n' + 'value is returned; otherwise, *y* is evaluated and the resulting ' + 'value\n' + 'is returned.\n' + '\n' + 'The expression "x or y" first evaluates *x*; if *x* is true, its ' + 'value\n' + 'is returned; otherwise, *y* is evaluated and the resulting value ' + 'is\n' + 'returned.\n' + '\n' + 'Note that neither "and" nor "or" restrict the value and type ' + 'they\n' + 'return to "False" and "True", but rather return the last ' + 'evaluated\n' + 'argument. This is sometimes useful, e.g., if "s" is a string ' + 'that\n' + 'should be replaced by a default value if it is empty, the ' + 'expression\n' + '"s or \'foo\'" yields the desired value. Because "not" has to ' + 'create a\n' + 'new value, it returns a boolean value regardless of the type of ' + 'its\n' + 'argument (for example, "not \'foo\'" produces "False" rather ' + 'than "\'\'".)\n', + 'break': 'The "break" statement\n' + '*********************\n' + '\n' + ' break_stmt ::= "break"\n' + '\n' + '"break" may only occur syntactically nested in a "for" or "while"\n' + 'loop, but not nested in a function or class definition within that\n' + 'loop.\n' + '\n' + 'It terminates the nearest enclosing loop, skipping the optional ' + '"else"\n' + 'clause if the loop has one.\n' + '\n' + 'If a "for" loop is terminated by "break", the loop control target\n' + 'keeps its current value.\n' + '\n' + 'When "break" passes control out of a "try" statement with a ' + '"finally"\n' + 'clause, that "finally" clause is executed before really leaving ' + 'the\n' + 'loop.\n', + 'callable-types': 'Emulating callable objects\n' + '**************************\n' + '\n' + 'object.__call__(self[, args...])\n' + '\n' + ' Called when the instance is “called” as a function; if ' + 'this method\n' + ' is defined, "x(arg1, arg2, ...)" roughly translates to\n' + ' "type(x).__call__(x, arg1, ...)".\n', + 'calls': 'Calls\n' + '*****\n' + '\n' + 'A call calls a callable object (e.g., a *function*) with a ' + 'possibly\n' + 'empty series of *arguments*:\n' + '\n' + ' call ::= primary "(" [argument_list [","] | ' + 'comprehension] ")"\n' + ' argument_list ::= positional_arguments ["," ' + 'starred_and_keywords]\n' + ' ["," keywords_arguments]\n' + ' | starred_and_keywords ["," ' + 'keywords_arguments]\n' + ' | keywords_arguments\n' + ' positional_arguments ::= positional_item ("," positional_item)*\n' + ' positional_item ::= assignment_expression | "*" expression\n' + ' starred_and_keywords ::= ("*" expression | keyword_item)\n' + ' ("," "*" expression | "," ' + 'keyword_item)*\n' + ' keywords_arguments ::= (keyword_item | "**" expression)\n' + ' ("," keyword_item | "," "**" ' + 'expression)*\n' + ' keyword_item ::= identifier "=" expression\n' + '\n' + 'An optional trailing comma may be present after the positional and\n' + 'keyword arguments but does not affect the semantics.\n' + '\n' + 'The primary must evaluate to a callable object (user-defined\n' + 'functions, built-in functions, methods of built-in objects, class\n' + 'objects, methods of class instances, and all objects having a\n' + '"__call__()" method are callable). All argument expressions are\n' + 'evaluated before the call is attempted. Please refer to section\n' + 'Function definitions for the syntax of formal *parameter* lists.\n' + '\n' + 'If keyword arguments are present, they are first converted to\n' + 'positional arguments, as follows. First, a list of unfilled slots ' + 'is\n' + 'created for the formal parameters. If there are N positional\n' + 'arguments, they are placed in the first N slots. Next, for each\n' + 'keyword argument, the identifier is used to determine the\n' + 'corresponding slot (if the identifier is the same as the first ' + 'formal\n' + 'parameter name, the first slot is used, and so on). If the slot ' + 'is\n' + 'already filled, a "TypeError" exception is raised. Otherwise, the\n' + 'value of the argument is placed in the slot, filling it (even if ' + 'the\n' + 'expression is "None", it fills the slot). When all arguments have\n' + 'been processed, the slots that are still unfilled are filled with ' + 'the\n' + 'corresponding default value from the function definition. ' + '(Default\n' + 'values are calculated, once, when the function is defined; thus, a\n' + 'mutable object such as a list or dictionary used as default value ' + 'will\n' + 'be shared by all calls that don’t specify an argument value for ' + 'the\n' + 'corresponding slot; this should usually be avoided.) If there are ' + 'any\n' + 'unfilled slots for which no default value is specified, a ' + '"TypeError"\n' + 'exception is raised. Otherwise, the list of filled slots is used ' + 'as\n' + 'the argument list for the call.\n' + '\n' + '**CPython implementation detail:** An implementation may provide\n' + 'built-in functions whose positional parameters do not have names, ' + 'even\n' + 'if they are ‘named’ for the purpose of documentation, and which\n' + 'therefore cannot be supplied by keyword. In CPython, this is the ' + 'case\n' + 'for functions implemented in C that use "PyArg_ParseTuple()" to ' + 'parse\n' + 'their arguments.\n' + '\n' + 'If there are more positional arguments than there are formal ' + 'parameter\n' + 'slots, a "TypeError" exception is raised, unless a formal ' + 'parameter\n' + 'using the syntax "*identifier" is present; in this case, that ' + 'formal\n' + 'parameter receives a tuple containing the excess positional ' + 'arguments\n' + '(or an empty tuple if there were no excess positional arguments).\n' + '\n' + 'If any keyword argument does not correspond to a formal parameter\n' + 'name, a "TypeError" exception is raised, unless a formal parameter\n' + 'using the syntax "**identifier" is present; in this case, that ' + 'formal\n' + 'parameter receives a dictionary containing the excess keyword\n' + 'arguments (using the keywords as keys and the argument values as\n' + 'corresponding values), or a (new) empty dictionary if there were ' + 'no\n' + 'excess keyword arguments.\n' + '\n' + 'If the syntax "*expression" appears in the function call, ' + '"expression"\n' + 'must evaluate to an *iterable*. Elements from these iterables are\n' + 'treated as if they were additional positional arguments. For the ' + 'call\n' + '"f(x1, x2, *y, x3, x4)", if *y* evaluates to a sequence *y1*, …, ' + '*yM*,\n' + 'this is equivalent to a call with M+4 positional arguments *x1*, ' + '*x2*,\n' + '*y1*, …, *yM*, *x3*, *x4*.\n' + '\n' + 'A consequence of this is that although the "*expression" syntax ' + 'may\n' + 'appear *after* explicit keyword arguments, it is processed ' + '*before*\n' + 'the keyword arguments (and any "**expression" arguments – see ' + 'below).\n' + 'So:\n' + '\n' + ' >>> def f(a, b):\n' + ' ... print(a, b)\n' + ' ...\n' + ' >>> f(b=1, *(2,))\n' + ' 2 1\n' + ' >>> f(a=1, *(2,))\n' + ' Traceback (most recent call last):\n' + ' File "", line 1, in \n' + " TypeError: f() got multiple values for keyword argument 'a'\n" + ' >>> f(1, *(2,))\n' + ' 1 2\n' + '\n' + 'It is unusual for both keyword arguments and the "*expression" ' + 'syntax\n' + 'to be used in the same call, so in practice this confusion does ' + 'not\n' + 'arise.\n' + '\n' + 'If the syntax "**expression" appears in the function call,\n' + '"expression" must evaluate to a *mapping*, the contents of which ' + 'are\n' + 'treated as additional keyword arguments. If a parameter matching a ' + 'key\n' + 'has already been given a value (by an explicit keyword argument, ' + 'or\n' + 'from another unpacking), a "TypeError" exception is raised.\n' + '\n' + 'When "**expression" is used, each key in this mapping must be a\n' + 'string. Each value from the mapping is assigned to the first ' + 'formal\n' + 'parameter eligible for keyword assignment whose name is equal to ' + 'the\n' + 'key. A key need not be a Python identifier (e.g. ""max-temp °F"" ' + 'is\n' + 'acceptable, although it will not match any formal parameter that ' + 'could\n' + 'be declared). If there is no match to a formal parameter the ' + 'key-value\n' + 'pair is collected by the "**" parameter, if there is one, or if ' + 'there\n' + 'is not, a "TypeError" exception is raised.\n' + '\n' + 'Formal parameters using the syntax "*identifier" or "**identifier"\n' + 'cannot be used as positional argument slots or as keyword argument\n' + 'names.\n' + '\n' + 'Changed in version 3.5: Function calls accept any number of "*" ' + 'and\n' + '"**" unpackings, positional arguments may follow iterable ' + 'unpackings\n' + '("*"), and keyword arguments may follow dictionary unpackings ' + '("**").\n' + 'Originally proposed by **PEP 448**.\n' + '\n' + 'A call always returns some value, possibly "None", unless it raises ' + 'an\n' + 'exception. How this value is computed depends on the type of the\n' + 'callable object.\n' + '\n' + 'If it is—\n' + '\n' + 'a user-defined function:\n' + ' The code block for the function is executed, passing it the\n' + ' argument list. The first thing the code block will do is bind ' + 'the\n' + ' formal parameters to the arguments; this is described in ' + 'section\n' + ' Function definitions. When the code block executes a "return"\n' + ' statement, this specifies the return value of the function ' + 'call.\n' + '\n' + 'a built-in function or method:\n' + ' The result is up to the interpreter; see Built-in Functions for ' + 'the\n' + ' descriptions of built-in functions and methods.\n' + '\n' + 'a class object:\n' + ' A new instance of that class is returned.\n' + '\n' + 'a class instance method:\n' + ' The corresponding user-defined function is called, with an ' + 'argument\n' + ' list that is one longer than the argument list of the call: the\n' + ' instance becomes the first argument.\n' + '\n' + 'a class instance:\n' + ' The class must define a "__call__()" method; the effect is then ' + 'the\n' + ' same as if that method was called.\n', + 'class': 'Class definitions\n' + '*****************\n' + '\n' + 'A class definition defines a class object (see section The ' + 'standard\n' + 'type hierarchy):\n' + '\n' + ' classdef ::= [decorators] "class" classname [inheritance] ":" ' + 'suite\n' + ' inheritance ::= "(" [argument_list] ")"\n' + ' classname ::= identifier\n' + '\n' + 'A class definition is an executable statement. The inheritance ' + 'list\n' + 'usually gives a list of base classes (see Metaclasses for more\n' + 'advanced uses), so each item in the list should evaluate to a ' + 'class\n' + 'object which allows subclassing. Classes without an inheritance ' + 'list\n' + 'inherit, by default, from the base class "object"; hence,\n' + '\n' + ' class Foo:\n' + ' pass\n' + '\n' + 'is equivalent to\n' + '\n' + ' class Foo(object):\n' + ' pass\n' + '\n' + 'The class’s suite is then executed in a new execution frame (see\n' + 'Naming and binding), using a newly created local namespace and the\n' + 'original global namespace. (Usually, the suite contains mostly\n' + 'function definitions.) When the class’s suite finishes execution, ' + 'its\n' + 'execution frame is discarded but its local namespace is saved. [5] ' + 'A\n' + 'class object is then created using the inheritance list for the ' + 'base\n' + 'classes and the saved local namespace for the attribute ' + 'dictionary.\n' + 'The class name is bound to this class object in the original local\n' + 'namespace.\n' + '\n' + 'The order in which attributes are defined in the class body is\n' + 'preserved in the new class’s "__dict__". Note that this is ' + 'reliable\n' + 'only right after the class is created and only for classes that ' + 'were\n' + 'defined using the definition syntax.\n' + '\n' + 'Class creation can be customized heavily using metaclasses.\n' + '\n' + 'Classes can also be decorated: just like when decorating ' + 'functions,\n' + '\n' + ' @f1(arg)\n' + ' @f2\n' + ' class Foo: pass\n' + '\n' + 'is roughly equivalent to\n' + '\n' + ' class Foo: pass\n' + ' Foo = f1(arg)(f2(Foo))\n' + '\n' + 'The evaluation rules for the decorator expressions are the same as ' + 'for\n' + 'function decorators. The result is then bound to the class name.\n' + '\n' + 'Changed in version 3.9: Classes may be decorated with any valid\n' + '"assignment_expression". Previously, the grammar was much more\n' + 'restrictive; see **PEP 614** for details.\n' + '\n' + '**Programmer’s note:** Variables defined in the class definition ' + 'are\n' + 'class attributes; they are shared by instances. Instance ' + 'attributes\n' + 'can be set in a method with "self.name = value". Both class and\n' + 'instance attributes are accessible through the notation ' + '“"self.name"”,\n' + 'and an instance attribute hides a class attribute with the same ' + 'name\n' + 'when accessed in this way. Class attributes can be used as ' + 'defaults\n' + 'for instance attributes, but using mutable values there can lead ' + 'to\n' + 'unexpected results. Descriptors can be used to create instance\n' + 'variables with different implementation details.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 3115** - Metaclasses in Python 3000\n' + ' The proposal that changed the declaration of metaclasses to ' + 'the\n' + ' current syntax, and the semantics for how classes with\n' + ' metaclasses are constructed.\n' + '\n' + ' **PEP 3129** - Class Decorators\n' + ' The proposal that added class decorators. Function and ' + 'method\n' + ' decorators were introduced in **PEP 318**.\n', + 'comparisons': 'Comparisons\n' + '***********\n' + '\n' + 'Unlike C, all comparison operations in Python have the same ' + 'priority,\n' + 'which is lower than that of any arithmetic, shifting or ' + 'bitwise\n' + 'operation. Also unlike C, expressions like "a < b < c" have ' + 'the\n' + 'interpretation that is conventional in mathematics:\n' + '\n' + ' comparison ::= or_expr (comp_operator or_expr)*\n' + ' comp_operator ::= "<" | ">" | "==" | ">=" | "<=" | "!="\n' + ' | "is" ["not"] | ["not"] "in"\n' + '\n' + 'Comparisons yield boolean values: "True" or "False". Custom ' + '*rich\n' + 'comparison methods* may return non-boolean values. In this ' + 'case Python\n' + 'will call "bool()" on such value in boolean contexts.\n' + '\n' + 'Comparisons can be chained arbitrarily, e.g., "x < y <= z" ' + 'is\n' + 'equivalent to "x < y and y <= z", except that "y" is ' + 'evaluated only\n' + 'once (but in both cases "z" is not evaluated at all when "x < ' + 'y" is\n' + 'found to be false).\n' + '\n' + 'Formally, if *a*, *b*, *c*, …, *y*, *z* are expressions and ' + '*op1*,\n' + '*op2*, …, *opN* are comparison operators, then "a op1 b op2 c ' + '... y\n' + 'opN z" is equivalent to "a op1 b and b op2 c and ... y opN ' + 'z", except\n' + 'that each expression is evaluated at most once.\n' + '\n' + 'Note that "a op1 b op2 c" doesn’t imply any kind of ' + 'comparison between\n' + '*a* and *c*, so that, e.g., "x < y > z" is perfectly legal ' + '(though\n' + 'perhaps not pretty).\n' + '\n' + '\n' + 'Value comparisons\n' + '=================\n' + '\n' + 'The operators "<", ">", "==", ">=", "<=", and "!=" compare ' + 'the values\n' + 'of two objects. The objects do not need to have the same ' + 'type.\n' + '\n' + 'Chapter Objects, values and types states that objects have a ' + 'value (in\n' + 'addition to type and identity). The value of an object is a ' + 'rather\n' + 'abstract notion in Python: For example, there is no canonical ' + 'access\n' + 'method for an object’s value. Also, there is no requirement ' + 'that the\n' + 'value of an object should be constructed in a particular way, ' + 'e.g.\n' + 'comprised of all its data attributes. Comparison operators ' + 'implement a\n' + 'particular notion of what the value of an object is. One can ' + 'think of\n' + 'them as defining the value of an object indirectly, by means ' + 'of their\n' + 'comparison implementation.\n' + '\n' + 'Because all types are (direct or indirect) subtypes of ' + '"object", they\n' + 'inherit the default comparison behavior from "object". Types ' + 'can\n' + 'customize their comparison behavior by implementing *rich ' + 'comparison\n' + 'methods* like "__lt__()", described in Basic customization.\n' + '\n' + 'The default behavior for equality comparison ("==" and "!=") ' + 'is based\n' + 'on the identity of the objects. Hence, equality comparison ' + 'of\n' + 'instances with the same identity results in equality, and ' + 'equality\n' + 'comparison of instances with different identities results in\n' + 'inequality. A motivation for this default behavior is the ' + 'desire that\n' + 'all objects should be reflexive (i.e. "x is y" implies "x == ' + 'y").\n' + '\n' + 'A default order comparison ("<", ">", "<=", and ">=") is not ' + 'provided;\n' + 'an attempt raises "TypeError". A motivation for this default ' + 'behavior\n' + 'is the lack of a similar invariant as for equality.\n' + '\n' + 'The behavior of the default equality comparison, that ' + 'instances with\n' + 'different identities are always unequal, may be in contrast ' + 'to what\n' + 'types will need that have a sensible definition of object ' + 'value and\n' + 'value-based equality. Such types will need to customize ' + 'their\n' + 'comparison behavior, and in fact, a number of built-in types ' + 'have done\n' + 'that.\n' + '\n' + 'The following list describes the comparison behavior of the ' + 'most\n' + 'important built-in types.\n' + '\n' + '* Numbers of built-in numeric types (Numeric Types — int, ' + 'float,\n' + ' complex) and of the standard library types ' + '"fractions.Fraction" and\n' + ' "decimal.Decimal" can be compared within and across their ' + 'types,\n' + ' with the restriction that complex numbers do not support ' + 'order\n' + ' comparison. Within the limits of the types involved, they ' + 'compare\n' + ' mathematically (algorithmically) correct without loss of ' + 'precision.\n' + '\n' + ' The not-a-number values "float(\'NaN\')" and ' + '"decimal.Decimal(\'NaN\')"\n' + ' are special. Any ordered comparison of a number to a ' + 'not-a-number\n' + ' value is false. A counter-intuitive implication is that ' + 'not-a-number\n' + ' values are not equal to themselves. For example, if "x =\n' + ' float(\'NaN\')", "3 < x", "x < 3" and "x == x" are all ' + 'false, while "x\n' + ' != x" is true. This behavior is compliant with IEEE 754.\n' + '\n' + '* "None" and "NotImplemented" are singletons. **PEP 8** ' + 'advises that\n' + ' comparisons for singletons should always be done with "is" ' + 'or "is\n' + ' not", never the equality operators.\n' + '\n' + '* Binary sequences (instances of "bytes" or "bytearray") can ' + 'be\n' + ' compared within and across their types. They compare\n' + ' lexicographically using the numeric values of their ' + 'elements.\n' + '\n' + '* Strings (instances of "str") compare lexicographically ' + 'using the\n' + ' numerical Unicode code points (the result of the built-in ' + 'function\n' + ' "ord()") of their characters. [3]\n' + '\n' + ' Strings and binary sequences cannot be directly compared.\n' + '\n' + '* Sequences (instances of "tuple", "list", or "range") can be ' + 'compared\n' + ' only within each of their types, with the restriction that ' + 'ranges do\n' + ' not support order comparison. Equality comparison across ' + 'these\n' + ' types results in inequality, and ordering comparison across ' + 'these\n' + ' types raises "TypeError".\n' + '\n' + ' Sequences compare lexicographically using comparison of\n' + ' corresponding elements. The built-in containers typically ' + 'assume\n' + ' identical objects are equal to themselves. That lets them ' + 'bypass\n' + ' equality tests for identical objects to improve performance ' + 'and to\n' + ' maintain their internal invariants.\n' + '\n' + ' Lexicographical comparison between built-in collections ' + 'works as\n' + ' follows:\n' + '\n' + ' * For two collections to compare equal, they must be of the ' + 'same\n' + ' type, have the same length, and each pair of ' + 'corresponding\n' + ' elements must compare equal (for example, "[1,2] == ' + '(1,2)" is\n' + ' false because the type is not the same).\n' + '\n' + ' * Collections that support order comparison are ordered the ' + 'same as\n' + ' their first unequal elements (for example, "[1,2,x] <= ' + '[1,2,y]"\n' + ' has the same value as "x <= y"). If a corresponding ' + 'element does\n' + ' not exist, the shorter collection is ordered first (for ' + 'example,\n' + ' "[1,2] < [1,2,3]" is true).\n' + '\n' + '* Mappings (instances of "dict") compare equal if and only if ' + 'they\n' + ' have equal "(key, value)" pairs. Equality comparison of the ' + 'keys and\n' + ' values enforces reflexivity.\n' + '\n' + ' Order comparisons ("<", ">", "<=", and ">=") raise ' + '"TypeError".\n' + '\n' + '* Sets (instances of "set" or "frozenset") can be compared ' + 'within and\n' + ' across their types.\n' + '\n' + ' They define order comparison operators to mean subset and ' + 'superset\n' + ' tests. Those relations do not define total orderings (for ' + 'example,\n' + ' the two sets "{1,2}" and "{2,3}" are not equal, nor subsets ' + 'of one\n' + ' another, nor supersets of one another). Accordingly, sets ' + 'are not\n' + ' appropriate arguments for functions which depend on total ' + 'ordering\n' + ' (for example, "min()", "max()", and "sorted()" produce ' + 'undefined\n' + ' results given a list of sets as inputs).\n' + '\n' + ' Comparison of sets enforces reflexivity of its elements.\n' + '\n' + '* Most other built-in types have no comparison methods ' + 'implemented, so\n' + ' they inherit the default comparison behavior.\n' + '\n' + 'User-defined classes that customize their comparison behavior ' + 'should\n' + 'follow some consistency rules, if possible:\n' + '\n' + '* Equality comparison should be reflexive. In other words, ' + 'identical\n' + ' objects should compare equal:\n' + '\n' + ' "x is y" implies "x == y"\n' + '\n' + '* Comparison should be symmetric. In other words, the ' + 'following\n' + ' expressions should have the same result:\n' + '\n' + ' "x == y" and "y == x"\n' + '\n' + ' "x != y" and "y != x"\n' + '\n' + ' "x < y" and "y > x"\n' + '\n' + ' "x <= y" and "y >= x"\n' + '\n' + '* Comparison should be transitive. The following ' + '(non-exhaustive)\n' + ' examples illustrate that:\n' + '\n' + ' "x > y and y > z" implies "x > z"\n' + '\n' + ' "x < y and y <= z" implies "x < z"\n' + '\n' + '* Inverse comparison should result in the boolean negation. ' + 'In other\n' + ' words, the following expressions should have the same ' + 'result:\n' + '\n' + ' "x == y" and "not x != y"\n' + '\n' + ' "x < y" and "not x >= y" (for total ordering)\n' + '\n' + ' "x > y" and "not x <= y" (for total ordering)\n' + '\n' + ' The last two expressions apply to totally ordered ' + 'collections (e.g.\n' + ' to sequences, but not to sets or mappings). See also the\n' + ' "total_ordering()" decorator.\n' + '\n' + '* The "hash()" result should be consistent with equality. ' + 'Objects that\n' + ' are equal should either have the same hash value, or be ' + 'marked as\n' + ' unhashable.\n' + '\n' + 'Python does not enforce these consistency rules. In fact, ' + 'the\n' + 'not-a-number values are an example for not following these ' + 'rules.\n' + '\n' + '\n' + 'Membership test operations\n' + '==========================\n' + '\n' + 'The operators "in" and "not in" test for membership. "x in ' + 's"\n' + 'evaluates to "True" if *x* is a member of *s*, and "False" ' + 'otherwise.\n' + '"x not in s" returns the negation of "x in s". All built-in ' + 'sequences\n' + 'and set types support this as well as dictionary, for which ' + '"in" tests\n' + 'whether the dictionary has a given key. For container types ' + 'such as\n' + 'list, tuple, set, frozenset, dict, or collections.deque, the\n' + 'expression "x in y" is equivalent to "any(x is e or x == e ' + 'for e in\n' + 'y)".\n' + '\n' + 'For the string and bytes types, "x in y" is "True" if and ' + 'only if *x*\n' + 'is a substring of *y*. An equivalent test is "y.find(x) != ' + '-1".\n' + 'Empty strings are always considered to be a substring of any ' + 'other\n' + 'string, so """ in "abc"" will return "True".\n' + '\n' + 'For user-defined classes which define the "__contains__()" ' + 'method, "x\n' + 'in y" returns "True" if "y.__contains__(x)" returns a true ' + 'value, and\n' + '"False" otherwise.\n' + '\n' + 'For user-defined classes which do not define "__contains__()" ' + 'but do\n' + 'define "__iter__()", "x in y" is "True" if some value "z", ' + 'for which\n' + 'the expression "x is z or x == z" is true, is produced while ' + 'iterating\n' + 'over "y". If an exception is raised during the iteration, it ' + 'is as if\n' + '"in" raised that exception.\n' + '\n' + 'Lastly, the old-style iteration protocol is tried: if a class ' + 'defines\n' + '"__getitem__()", "x in y" is "True" if and only if there is a ' + 'non-\n' + 'negative integer index *i* such that "x is y[i] or x == ' + 'y[i]", and no\n' + 'lower integer index raises the "IndexError" exception. (If ' + 'any other\n' + 'exception is raised, it is as if "in" raised that ' + 'exception).\n' + '\n' + 'The operator "not in" is defined to have the inverse truth ' + 'value of\n' + '"in".\n' + '\n' + '\n' + 'Identity comparisons\n' + '====================\n' + '\n' + 'The operators "is" and "is not" test for an object’s ' + 'identity: "x is\n' + 'y" is true if and only if *x* and *y* are the same object. ' + 'An\n' + 'Object’s identity is determined using the "id()" function. ' + '"x is not\n' + 'y" yields the inverse truth value. [4]\n', + 'compound': 'Compound statements\n' + '*******************\n' + '\n' + 'Compound statements contain (groups of) other statements; they ' + 'affect\n' + 'or control the execution of those other statements in some way. ' + 'In\n' + 'general, compound statements span multiple lines, although in ' + 'simple\n' + 'incarnations a whole compound statement may be contained in one ' + 'line.\n' + '\n' + 'The "if", "while" and "for" statements implement traditional ' + 'control\n' + 'flow constructs. "try" specifies exception handlers and/or ' + 'cleanup\n' + 'code for a group of statements, while the "with" statement ' + 'allows the\n' + 'execution of initialization and finalization code around a block ' + 'of\n' + 'code. Function and class definitions are also syntactically ' + 'compound\n' + 'statements.\n' + '\n' + 'A compound statement consists of one or more ‘clauses.’ A ' + 'clause\n' + 'consists of a header and a ‘suite.’ The clause headers of a\n' + 'particular compound statement are all at the same indentation ' + 'level.\n' + 'Each clause header begins with a uniquely identifying keyword ' + 'and ends\n' + 'with a colon. A suite is a group of statements controlled by a\n' + 'clause. A suite can be one or more semicolon-separated simple\n' + 'statements on the same line as the header, following the ' + 'header’s\n' + 'colon, or it can be one or more indented statements on ' + 'subsequent\n' + 'lines. Only the latter form of a suite can contain nested ' + 'compound\n' + 'statements; the following is illegal, mostly because it wouldn’t ' + 'be\n' + 'clear to which "if" clause a following "else" clause would ' + 'belong:\n' + '\n' + ' if test1: if test2: print(x)\n' + '\n' + 'Also note that the semicolon binds tighter than the colon in ' + 'this\n' + 'context, so that in the following example, either all or none of ' + 'the\n' + '"print()" calls are executed:\n' + '\n' + ' if x < y < z: print(x); print(y); print(z)\n' + '\n' + 'Summarizing:\n' + '\n' + ' compound_stmt ::= if_stmt\n' + ' | while_stmt\n' + ' | for_stmt\n' + ' | try_stmt\n' + ' | with_stmt\n' + ' | match_stmt\n' + ' | funcdef\n' + ' | classdef\n' + ' | async_with_stmt\n' + ' | async_for_stmt\n' + ' | async_funcdef\n' + ' suite ::= stmt_list NEWLINE | NEWLINE INDENT ' + 'statement+ DEDENT\n' + ' statement ::= stmt_list NEWLINE | compound_stmt\n' + ' stmt_list ::= simple_stmt (";" simple_stmt)* [";"]\n' + '\n' + 'Note that statements always end in a "NEWLINE" possibly followed ' + 'by a\n' + '"DEDENT". Also note that optional continuation clauses always ' + 'begin\n' + 'with a keyword that cannot start a statement, thus there are no\n' + 'ambiguities (the ‘dangling "else"’ problem is solved in Python ' + 'by\n' + 'requiring nested "if" statements to be indented).\n' + '\n' + 'The formatting of the grammar rules in the following sections ' + 'places\n' + 'each clause on a separate line for clarity.\n' + '\n' + '\n' + 'The "if" statement\n' + '==================\n' + '\n' + 'The "if" statement is used for conditional execution:\n' + '\n' + ' if_stmt ::= "if" assignment_expression ":" suite\n' + ' ("elif" assignment_expression ":" suite)*\n' + ' ["else" ":" suite]\n' + '\n' + 'It selects exactly one of the suites by evaluating the ' + 'expressions one\n' + 'by one until one is found to be true (see section Boolean ' + 'operations\n' + 'for the definition of true and false); then that suite is ' + 'executed\n' + '(and no other part of the "if" statement is executed or ' + 'evaluated).\n' + 'If all expressions are false, the suite of the "else" clause, ' + 'if\n' + 'present, is executed.\n' + '\n' + '\n' + 'The "while" statement\n' + '=====================\n' + '\n' + 'The "while" statement is used for repeated execution as long as ' + 'an\n' + 'expression is true:\n' + '\n' + ' while_stmt ::= "while" assignment_expression ":" suite\n' + ' ["else" ":" suite]\n' + '\n' + 'This repeatedly tests the expression and, if it is true, ' + 'executes the\n' + 'first suite; if the expression is false (which may be the first ' + 'time\n' + 'it is tested) the suite of the "else" clause, if present, is ' + 'executed\n' + 'and the loop terminates.\n' + '\n' + 'A "break" statement executed in the first suite terminates the ' + 'loop\n' + 'without executing the "else" clause’s suite. A "continue" ' + 'statement\n' + 'executed in the first suite skips the rest of the suite and goes ' + 'back\n' + 'to testing the expression.\n' + '\n' + '\n' + 'The "for" statement\n' + '===================\n' + '\n' + 'The "for" statement is used to iterate over the elements of a ' + 'sequence\n' + '(such as a string, tuple or list) or other iterable object:\n' + '\n' + ' for_stmt ::= "for" target_list "in" expression_list ":" ' + 'suite\n' + ' ["else" ":" suite]\n' + '\n' + 'The expression list is evaluated once; it should yield an ' + 'iterable\n' + 'object. An iterator is created for the result of the\n' + '"expression_list". The suite is then executed once for each ' + 'item\n' + 'provided by the iterator, in the order returned by the ' + 'iterator. Each\n' + 'item in turn is assigned to the target list using the standard ' + 'rules\n' + 'for assignments (see Assignment statements), and then the suite ' + 'is\n' + 'executed. When the items are exhausted (which is immediately ' + 'when the\n' + 'sequence is empty or an iterator raises a "StopIteration" ' + 'exception),\n' + 'the suite in the "else" clause, if present, is executed, and the ' + 'loop\n' + 'terminates.\n' + '\n' + 'A "break" statement executed in the first suite terminates the ' + 'loop\n' + 'without executing the "else" clause’s suite. A "continue" ' + 'statement\n' + 'executed in the first suite skips the rest of the suite and ' + 'continues\n' + 'with the next item, or with the "else" clause if there is no ' + 'next\n' + 'item.\n' + '\n' + 'The for-loop makes assignments to the variables in the target ' + 'list.\n' + 'This overwrites all previous assignments to those variables ' + 'including\n' + 'those made in the suite of the for-loop:\n' + '\n' + ' for i in range(10):\n' + ' print(i)\n' + ' i = 5 # this will not affect the for-loop\n' + ' # because i will be overwritten with ' + 'the next\n' + ' # index in the range\n' + '\n' + 'Names in the target list are not deleted when the loop is ' + 'finished,\n' + 'but if the sequence is empty, they will not have been assigned ' + 'to at\n' + 'all by the loop. Hint: the built-in type "range()" represents\n' + 'immutable arithmetic sequences of integers. For instance, ' + 'iterating\n' + '"range(3)" successively yields 0, 1, and then 2.\n' + '\n' + '\n' + 'The "try" statement\n' + '===================\n' + '\n' + 'The "try" statement specifies exception handlers and/or cleanup ' + 'code\n' + 'for a group of statements:\n' + '\n' + ' try_stmt ::= try1_stmt | try2_stmt\n' + ' try1_stmt ::= "try" ":" suite\n' + ' ("except" [expression ["as" identifier]] ":" ' + 'suite)+\n' + ' ["else" ":" suite]\n' + ' ["finally" ":" suite]\n' + ' try2_stmt ::= "try" ":" suite\n' + ' "finally" ":" suite\n' + '\n' + 'The "except" clause(s) specify one or more exception handlers. ' + 'When no\n' + 'exception occurs in the "try" clause, no exception handler is\n' + 'executed. When an exception occurs in the "try" suite, a search ' + 'for an\n' + 'exception handler is started. This search inspects the except ' + 'clauses\n' + 'in turn until one is found that matches the exception. An ' + 'expression-\n' + 'less except clause, if present, must be last; it matches any\n' + 'exception. For an except clause with an expression, that ' + 'expression\n' + 'is evaluated, and the clause matches the exception if the ' + 'resulting\n' + 'object is “compatible” with the exception. An object is ' + 'compatible\n' + 'with an exception if the object is the class or a *non-virtual ' + 'base\n' + 'class* of the exception object, or a tuple containing an item ' + 'that is\n' + 'the class or a non-virtual base class of the exception object.\n' + '\n' + 'If no except clause matches the exception, the search for an ' + 'exception\n' + 'handler continues in the surrounding code and on the invocation ' + 'stack.\n' + '[1]\n' + '\n' + 'If the evaluation of an expression in the header of an except ' + 'clause\n' + 'raises an exception, the original search for a handler is ' + 'canceled and\n' + 'a search starts for the new exception in the surrounding code ' + 'and on\n' + 'the call stack (it is treated as if the entire "try" statement ' + 'raised\n' + 'the exception).\n' + '\n' + 'When a matching except clause is found, the exception is ' + 'assigned to\n' + 'the target specified after the "as" keyword in that except ' + 'clause, if\n' + 'present, and the except clause’s suite is executed. All except\n' + 'clauses must have an executable block. When the end of this ' + 'block is\n' + 'reached, execution continues normally after the entire try ' + 'statement.\n' + '(This means that if two nested handlers exist for the same ' + 'exception,\n' + 'and the exception occurs in the try clause of the inner handler, ' + 'the\n' + 'outer handler will not handle the exception.)\n' + '\n' + 'When an exception has been assigned using "as target", it is ' + 'cleared\n' + 'at the end of the except clause. This is as if\n' + '\n' + ' except E as N:\n' + ' foo\n' + '\n' + 'was translated to\n' + '\n' + ' except E as N:\n' + ' try:\n' + ' foo\n' + ' finally:\n' + ' del N\n' + '\n' + 'This means the exception must be assigned to a different name to ' + 'be\n' + 'able to refer to it after the except clause. Exceptions are ' + 'cleared\n' + 'because with the traceback attached to them, they form a ' + 'reference\n' + 'cycle with the stack frame, keeping all locals in that frame ' + 'alive\n' + 'until the next garbage collection occurs.\n' + '\n' + 'Before an except clause’s suite is executed, details about the\n' + 'exception are stored in the "sys" module and can be accessed ' + 'via\n' + '"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting ' + 'of the\n' + 'exception class, the exception instance and a traceback object ' + '(see\n' + 'section The standard type hierarchy) identifying the point in ' + 'the\n' + 'program where the exception occurred. The details about the ' + 'exception\n' + 'accessed via "sys.exc_info()" are restored to their previous ' + 'values\n' + 'when leaving an exception handler:\n' + '\n' + ' >>> print(sys.exc_info())\n' + ' (None, None, None)\n' + ' >>> try:\n' + ' ... raise TypeError\n' + ' ... except:\n' + ' ... print(sys.exc_info())\n' + ' ... try:\n' + ' ... raise ValueError\n' + ' ... except:\n' + ' ... print(sys.exc_info())\n' + ' ... print(sys.exc_info())\n' + ' ...\n' + " (, TypeError(), )\n' + " (, ValueError(), )\n' + " (, TypeError(), )\n' + ' >>> print(sys.exc_info())\n' + ' (None, None, None)\n' + '\n' + 'The optional "else" clause is executed if the control flow ' + 'leaves the\n' + '"try" suite, no exception was raised, and no "return", ' + '"continue", or\n' + '"break" statement was executed. Exceptions in the "else" clause ' + 'are\n' + 'not handled by the preceding "except" clauses.\n' + '\n' + 'If "finally" is present, it specifies a ‘cleanup’ handler. The ' + '"try"\n' + 'clause is executed, including any "except" and "else" clauses. ' + 'If an\n' + 'exception occurs in any of the clauses and is not handled, the\n' + 'exception is temporarily saved. The "finally" clause is ' + 'executed. If\n' + 'there is a saved exception it is re-raised at the end of the ' + '"finally"\n' + 'clause. If the "finally" clause raises another exception, the ' + 'saved\n' + 'exception is set as the context of the new exception. If the ' + '"finally"\n' + 'clause executes a "return", "break" or "continue" statement, the ' + 'saved\n' + 'exception is discarded:\n' + '\n' + ' >>> def f():\n' + ' ... try:\n' + ' ... 1/0\n' + ' ... finally:\n' + ' ... return 42\n' + ' ...\n' + ' >>> f()\n' + ' 42\n' + '\n' + 'The exception information is not available to the program ' + 'during\n' + 'execution of the "finally" clause.\n' + '\n' + 'When a "return", "break" or "continue" statement is executed in ' + 'the\n' + '"try" suite of a "try"…"finally" statement, the "finally" clause ' + 'is\n' + 'also executed ‘on the way out.’\n' + '\n' + 'The return value of a function is determined by the last ' + '"return"\n' + 'statement executed. Since the "finally" clause always executes, ' + 'a\n' + '"return" statement executed in the "finally" clause will always ' + 'be the\n' + 'last one executed:\n' + '\n' + ' >>> def foo():\n' + ' ... try:\n' + " ... return 'try'\n" + ' ... finally:\n' + " ... return 'finally'\n" + ' ...\n' + ' >>> foo()\n' + " 'finally'\n" + '\n' + 'Additional information on exceptions can be found in section\n' + 'Exceptions, and information on using the "raise" statement to ' + 'generate\n' + 'exceptions may be found in section The raise statement.\n' + '\n' + 'Changed in version 3.8: Prior to Python 3.8, a "continue" ' + 'statement\n' + 'was illegal in the "finally" clause due to a problem with the\n' + 'implementation.\n' + '\n' + '\n' + 'The "with" statement\n' + '====================\n' + '\n' + 'The "with" statement is used to wrap the execution of a block ' + 'with\n' + 'methods defined by a context manager (see section With ' + 'Statement\n' + 'Context Managers). This allows common "try"…"except"…"finally" ' + 'usage\n' + 'patterns to be encapsulated for convenient reuse.\n' + '\n' + ' with_stmt ::= "with" ( "(" with_stmt_contents ","? ' + '")" | with_stmt_contents ) ":" suite\n' + ' with_stmt_contents ::= with_item ("," with_item)*\n' + ' with_item ::= expression ["as" target]\n' + '\n' + 'The execution of the "with" statement with one “item” proceeds ' + 'as\n' + 'follows:\n' + '\n' + '1. The context expression (the expression given in the ' + '"with_item") is\n' + ' evaluated to obtain a context manager.\n' + '\n' + '2. The context manager’s "__enter__()" is loaded for later use.\n' + '\n' + '3. The context manager’s "__exit__()" is loaded for later use.\n' + '\n' + '4. The context manager’s "__enter__()" method is invoked.\n' + '\n' + '5. If a target was included in the "with" statement, the return ' + 'value\n' + ' from "__enter__()" is assigned to it.\n' + '\n' + ' Note:\n' + '\n' + ' The "with" statement guarantees that if the "__enter__()" ' + 'method\n' + ' returns without an error, then "__exit__()" will always be\n' + ' called. Thus, if an error occurs during the assignment to ' + 'the\n' + ' target list, it will be treated the same as an error ' + 'occurring\n' + ' within the suite would be. See step 7 below.\n' + '\n' + '6. The suite is executed.\n' + '\n' + '7. The context manager’s "__exit__()" method is invoked. If an\n' + ' exception caused the suite to be exited, its type, value, ' + 'and\n' + ' traceback are passed as arguments to "__exit__()". Otherwise, ' + 'three\n' + ' "None" arguments are supplied.\n' + '\n' + ' If the suite was exited due to an exception, and the return ' + 'value\n' + ' from the "__exit__()" method was false, the exception is ' + 'reraised.\n' + ' If the return value was true, the exception is suppressed, ' + 'and\n' + ' execution continues with the statement following the "with"\n' + ' statement.\n' + '\n' + ' If the suite was exited for any reason other than an ' + 'exception, the\n' + ' return value from "__exit__()" is ignored, and execution ' + 'proceeds\n' + ' at the normal location for the kind of exit that was taken.\n' + '\n' + 'The following code:\n' + '\n' + ' with EXPRESSION as TARGET:\n' + ' SUITE\n' + '\n' + 'is semantically equivalent to:\n' + '\n' + ' manager = (EXPRESSION)\n' + ' enter = type(manager).__enter__\n' + ' exit = type(manager).__exit__\n' + ' value = enter(manager)\n' + ' hit_except = False\n' + '\n' + ' try:\n' + ' TARGET = value\n' + ' SUITE\n' + ' except:\n' + ' hit_except = True\n' + ' if not exit(manager, *sys.exc_info()):\n' + ' raise\n' + ' finally:\n' + ' if not hit_except:\n' + ' exit(manager, None, None, None)\n' + '\n' + 'With more than one item, the context managers are processed as ' + 'if\n' + 'multiple "with" statements were nested:\n' + '\n' + ' with A() as a, B() as b:\n' + ' SUITE\n' + '\n' + 'is semantically equivalent to:\n' + '\n' + ' with A() as a:\n' + ' with B() as b:\n' + ' SUITE\n' + '\n' + 'You can also write multi-item context managers in multiple lines ' + 'if\n' + 'the items are surrounded by parentheses. For example:\n' + '\n' + ' with (\n' + ' A() as a,\n' + ' B() as b,\n' + ' ):\n' + ' SUITE\n' + '\n' + 'Changed in version 3.1: Support for multiple context ' + 'expressions.\n' + '\n' + 'Changed in version 3.10: Support for using grouping parentheses ' + 'to\n' + 'break the statement in multiple lines.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 343** - The “with” statement\n' + ' The specification, background, and examples for the Python ' + '"with"\n' + ' statement.\n' + '\n' + '\n' + 'The "match" statement\n' + '=====================\n' + '\n' + 'New in version 3.10.\n' + '\n' + 'The match statement is used for pattern matching. Syntax:\n' + '\n' + ' match_stmt ::= \'match\' subject_expr ":" NEWLINE INDENT ' + 'case_block+ DEDENT\n' + ' subject_expr ::= star_named_expression "," ' + 'star_named_expressions?\n' + ' | named_expression\n' + ' case_block ::= \'case\' patterns [guard] ":" block\n' + '\n' + 'Note:\n' + '\n' + ' This section uses single quotes to denote soft keywords.\n' + '\n' + 'Pattern matching takes a pattern as input (following "case") and ' + 'a\n' + 'subject value (following "match"). The pattern (which may ' + 'contain\n' + 'subpatterns) is matched against the subject value. The outcomes ' + 'are:\n' + '\n' + '* A match success or failure (also termed a pattern success or\n' + ' failure).\n' + '\n' + '* Possible binding of matched values to a name. The ' + 'prerequisites for\n' + ' this are further discussed below.\n' + '\n' + 'The "match" and "case" keywords are soft keywords.\n' + '\n' + 'See also:\n' + '\n' + ' * **PEP 634** – Structural Pattern Matching: Specification\n' + '\n' + ' * **PEP 636** – Structural Pattern Matching: Tutorial\n' + '\n' + '\n' + 'Overview\n' + '--------\n' + '\n' + 'Here’s an overview of the logical flow of a match statement:\n' + '\n' + '1. The subject expression "subject_expr" is evaluated and a ' + 'resulting\n' + ' subject value obtained. If the subject expression contains a ' + 'comma,\n' + ' a tuple is constructed using the standard rules.\n' + '\n' + '2. Each pattern in a "case_block" is attempted to match with ' + 'the\n' + ' subject value. The specific rules for success or failure are\n' + ' described below. The match attempt can also bind some or all ' + 'of the\n' + ' standalone names within the pattern. The precise pattern ' + 'binding\n' + ' rules vary per pattern type and are specified below. **Name\n' + ' bindings made during a successful pattern match outlive the\n' + ' executed block and can be used after the match statement**.\n' + '\n' + ' Note:\n' + '\n' + ' During failed pattern matches, some subpatterns may ' + 'succeed.\n' + ' Do not rely on bindings being made for a failed match.\n' + ' Conversely, do not rely on variables remaining unchanged ' + 'after\n' + ' a failed match. The exact behavior is dependent on\n' + ' implementation and may vary. This is an intentional ' + 'decision\n' + ' made to allow different implementations to add ' + 'optimizations.\n' + '\n' + '3. If the pattern succeeds, the corresponding guard (if present) ' + 'is\n' + ' evaluated. In this case all name bindings are guaranteed to ' + 'have\n' + ' happened.\n' + '\n' + ' * If the guard evaluates as true or is missing, the "block" ' + 'inside\n' + ' "case_block" is executed.\n' + '\n' + ' * Otherwise, the next "case_block" is attempted as described ' + 'above.\n' + '\n' + ' * If there are no further case blocks, the match statement ' + 'is\n' + ' completed.\n' + '\n' + 'Note:\n' + '\n' + ' Users should generally never rely on a pattern being ' + 'evaluated.\n' + ' Depending on implementation, the interpreter may cache values ' + 'or use\n' + ' other optimizations which skip repeated evaluations.\n' + '\n' + 'A sample match statement:\n' + '\n' + ' >>> flag = False\n' + ' >>> match (100, 200):\n' + ' ... case (100, 300): # Mismatch: 200 != 300\n' + " ... print('Case 1')\n" + ' ... case (100, 200) if flag: # Successful match, but ' + 'guard fails\n' + " ... print('Case 2')\n" + ' ... case (100, y): # Matches and binds y to 200\n' + " ... print(f'Case 3, y: {y}')\n" + ' ... case _: # Pattern not attempted\n' + " ... print('Case 4, I match anything!')\n" + ' ...\n' + ' Case 3, y: 200\n' + '\n' + 'In this case, "if flag" is a guard. Read more about that in the ' + 'next\n' + 'section.\n' + '\n' + '\n' + 'Guards\n' + '------\n' + '\n' + ' guard ::= "if" named_expression\n' + '\n' + 'A "guard" (which is part of the "case") must succeed for code ' + 'inside\n' + 'the "case" block to execute. It takes the form: "if" followed ' + 'by an\n' + 'expression.\n' + '\n' + 'The logical flow of a "case" block with a "guard" follows:\n' + '\n' + '1. Check that the pattern in the "case" block succeeded. If ' + 'the\n' + ' pattern failed, the "guard" is not evaluated and the next ' + '"case"\n' + ' block is checked.\n' + '\n' + '2. If the pattern succeeded, evaluate the "guard".\n' + '\n' + ' * If the "guard" condition evaluates as true, the case block ' + 'is\n' + ' selected.\n' + '\n' + ' * If the "guard" condition evaluates as false, the case block ' + 'is\n' + ' not selected.\n' + '\n' + ' * If the "guard" raises an exception during evaluation, the\n' + ' exception bubbles up.\n' + '\n' + 'Guards are allowed to have side effects as they are ' + 'expressions.\n' + 'Guard evaluation must proceed from the first to the last case ' + 'block,\n' + 'one at a time, skipping case blocks whose pattern(s) don’t all\n' + 'succeed. (I.e., guard evaluation must happen in order.) Guard\n' + 'evaluation must stop once a case block is selected.\n' + '\n' + '\n' + 'Irrefutable Case Blocks\n' + '-----------------------\n' + '\n' + 'An irrefutable case block is a match-all case block. A match\n' + 'statement may have at most one irrefutable case block, and it ' + 'must be\n' + 'last.\n' + '\n' + 'A case block is considered irrefutable if it has no guard and ' + 'its\n' + 'pattern is irrefutable. A pattern is considered irrefutable if ' + 'we can\n' + 'prove from its syntax alone that it will always succeed. Only ' + 'the\n' + 'following patterns are irrefutable:\n' + '\n' + '* AS Patterns whose left-hand side is irrefutable\n' + '\n' + '* OR Patterns containing at least one irrefutable pattern\n' + '\n' + '* Capture Patterns\n' + '\n' + '* Wildcard Patterns\n' + '\n' + '* parenthesized irrefutable patterns\n' + '\n' + '\n' + 'Patterns\n' + '--------\n' + '\n' + 'Note:\n' + '\n' + ' This section uses grammar notations beyond standard EBNF:\n' + '\n' + ' * the notation "SEP.RULE+" is shorthand for "RULE (SEP ' + 'RULE)*"\n' + '\n' + ' * the notation "!RULE" is shorthand for a negative lookahead\n' + ' assertion\n' + '\n' + 'The top-level syntax for "patterns" is:\n' + '\n' + ' patterns ::= open_sequence_pattern | pattern\n' + ' pattern ::= as_pattern | or_pattern\n' + ' closed_pattern ::= | literal_pattern\n' + ' | capture_pattern\n' + ' | wildcard_pattern\n' + ' | value_pattern\n' + ' | group_pattern\n' + ' | sequence_pattern\n' + ' | mapping_pattern\n' + ' | class_pattern\n' + '\n' + 'The descriptions below will include a description “in simple ' + 'terms” of\n' + 'what a pattern does for illustration purposes (credits to ' + 'Raymond\n' + 'Hettinger for a document that inspired most of the ' + 'descriptions). Note\n' + 'that these descriptions are purely for illustration purposes and ' + '**may\n' + 'not** reflect the underlying implementation. Furthermore, they ' + 'do not\n' + 'cover all valid forms.\n' + '\n' + '\n' + 'OR Patterns\n' + '~~~~~~~~~~~\n' + '\n' + 'An OR pattern is two or more patterns separated by vertical bars ' + '"|".\n' + 'Syntax:\n' + '\n' + ' or_pattern ::= "|".closed_pattern+\n' + '\n' + 'Only the final subpattern may be irrefutable, and each ' + 'subpattern must\n' + 'bind the same set of names to avoid ambiguity.\n' + '\n' + 'An OR pattern matches each of its subpatterns in turn to the ' + 'subject\n' + 'value, until one succeeds. The OR pattern is then considered\n' + 'successful. Otherwise, if none of the subpatterns succeed, the ' + 'OR\n' + 'pattern fails.\n' + '\n' + 'In simple terms, "P1 | P2 | ..." will try to match "P1", if it ' + 'fails\n' + 'it will try to match "P2", succeeding immediately if any ' + 'succeeds,\n' + 'failing otherwise.\n' + '\n' + '\n' + 'AS Patterns\n' + '~~~~~~~~~~~\n' + '\n' + 'An AS pattern matches an OR pattern on the left of the "as" ' + 'keyword\n' + 'against a subject. Syntax:\n' + '\n' + ' as_pattern ::= or_pattern "as" capture_pattern\n' + '\n' + 'If the OR pattern fails, the AS pattern fails. Otherwise, the ' + 'AS\n' + 'pattern binds the subject to the name on the right of the as ' + 'keyword\n' + 'and succeeds. "capture_pattern" cannot be a "_".\n' + '\n' + 'In simple terms "P as NAME" will match with "P", and on success ' + 'it\n' + 'will set "NAME = ".\n' + '\n' + '\n' + 'Literal Patterns\n' + '~~~~~~~~~~~~~~~~\n' + '\n' + 'A literal pattern corresponds to most literals in Python. ' + 'Syntax:\n' + '\n' + ' literal_pattern ::= signed_number\n' + ' | signed_number "+" NUMBER\n' + ' | signed_number "-" NUMBER\n' + ' | strings\n' + ' | "None"\n' + ' | "True"\n' + ' | "False"\n' + ' | signed_number: NUMBER | "-" NUMBER\n' + '\n' + 'The rule "strings" and the token "NUMBER" are defined in the ' + 'standard\n' + 'Python grammar. Triple-quoted strings are supported. Raw ' + 'strings and\n' + 'byte strings are supported. Formatted string literals are not\n' + 'supported.\n' + '\n' + 'The forms "signed_number \'+\' NUMBER" and "signed_number \'-\' ' + 'NUMBER"\n' + 'are for expressing complex numbers; they require a real number ' + 'on the\n' + 'left and an imaginary number on the right. E.g. "3 + 4j".\n' + '\n' + 'In simple terms, "LITERAL" will succeed only if " ==\n' + 'LITERAL". For the singletons "None", "True" and "False", the ' + '"is"\n' + 'operator is used.\n' + '\n' + '\n' + 'Capture Patterns\n' + '~~~~~~~~~~~~~~~~\n' + '\n' + 'A capture pattern binds the subject value to a name. Syntax:\n' + '\n' + " capture_pattern ::= !'_' NAME\n" + '\n' + 'A single underscore "_" is not a capture pattern (this is what ' + '"!\'_\'"\n' + 'expresses). It is instead treated as a "wildcard_pattern".\n' + '\n' + 'In a given pattern, a given name can only be bound once. E.g. ' + '"case\n' + 'x, x: ..." is invalid while "case [x] | x: ..." is allowed.\n' + '\n' + 'Capture patterns always succeed. The binding follows scoping ' + 'rules\n' + 'established by the assignment expression operator in **PEP ' + '572**; the\n' + 'name becomes a local variable in the closest containing function ' + 'scope\n' + 'unless there’s an applicable "global" or "nonlocal" statement.\n' + '\n' + 'In simple terms "NAME" will always succeed and it will set "NAME ' + '=\n' + '".\n' + '\n' + '\n' + 'Wildcard Patterns\n' + '~~~~~~~~~~~~~~~~~\n' + '\n' + 'A wildcard pattern always succeeds (matches anything) and binds ' + 'no\n' + 'name. Syntax:\n' + '\n' + " wildcard_pattern ::= '_'\n" + '\n' + '"_" is a soft keyword within any pattern, but only within ' + 'patterns.\n' + 'It is an identifier, as usual, even within "match" subject\n' + 'expressions, "guard"s, and "case" blocks.\n' + '\n' + 'In simple terms, "_" will always succeed.\n' + '\n' + '\n' + 'Value Patterns\n' + '~~~~~~~~~~~~~~\n' + '\n' + 'A value pattern represents a named value in Python. Syntax:\n' + '\n' + ' value_pattern ::= attr\n' + ' attr ::= name_or_attr "." NAME\n' + ' name_or_attr ::= attr | NAME\n' + '\n' + 'The dotted name in the pattern is looked up using standard ' + 'Python name\n' + 'resolution rules. The pattern succeeds if the value found ' + 'compares\n' + 'equal to the subject value (using the "==" equality operator).\n' + '\n' + 'In simple terms "NAME1.NAME2" will succeed only if " ' + '==\n' + 'NAME1.NAME2"\n' + '\n' + 'Note:\n' + '\n' + ' If the same value occurs multiple times in the same match ' + 'statement,\n' + ' the interpreter may cache the first value found and reuse it ' + 'rather\n' + ' than repeat the same lookup. This cache is strictly tied to a ' + 'given\n' + ' execution of a given match statement.\n' + '\n' + '\n' + 'Group Patterns\n' + '~~~~~~~~~~~~~~\n' + '\n' + 'A group pattern allows users to add parentheses around patterns ' + 'to\n' + 'emphasize the intended grouping. Otherwise, it has no ' + 'additional\n' + 'syntax. Syntax:\n' + '\n' + ' group_pattern ::= "(" pattern ")"\n' + '\n' + 'In simple terms "(P)" has the same effect as "P".\n' + '\n' + '\n' + 'Sequence Patterns\n' + '~~~~~~~~~~~~~~~~~\n' + '\n' + 'A sequence pattern contains several subpatterns to be matched ' + 'against\n' + 'sequence elements. The syntax is similar to the unpacking of a ' + 'list or\n' + 'tuple.\n' + '\n' + ' sequence_pattern ::= "[" [maybe_sequence_pattern] "]"\n' + ' | "(" [open_sequence_pattern] ")"\n' + ' open_sequence_pattern ::= maybe_star_pattern "," ' + '[maybe_sequence_pattern]\n' + ' maybe_sequence_pattern ::= ",".maybe_star_pattern+ ","?\n' + ' maybe_star_pattern ::= star_pattern | pattern\n' + ' star_pattern ::= "*" (capture_pattern | ' + 'wildcard_pattern)\n' + '\n' + 'There is no difference if parentheses or square brackets are ' + 'used for\n' + 'sequence patterns (i.e. "(...)" vs "[...]" ).\n' + '\n' + 'Note:\n' + '\n' + ' A single pattern enclosed in parentheses without a trailing ' + 'comma\n' + ' (e.g. "(3 | 4)") is a group pattern. While a single pattern ' + 'enclosed\n' + ' in square brackets (e.g. "[3 | 4]") is still a sequence ' + 'pattern.\n' + '\n' + 'At most one star subpattern may be in a sequence pattern. The ' + 'star\n' + 'subpattern may occur in any position. If no star subpattern is\n' + 'present, the sequence pattern is a fixed-length sequence ' + 'pattern;\n' + 'otherwise it is a variable-length sequence pattern.\n' + '\n' + 'The following is the logical flow for matching a sequence ' + 'pattern\n' + 'against a subject value:\n' + '\n' + '1. If the subject value is not a sequence [2], the sequence ' + 'pattern\n' + ' fails.\n' + '\n' + '2. If the subject value is an instance of "str", "bytes" or\n' + ' "bytearray" the sequence pattern fails.\n' + '\n' + '3. The subsequent steps depend on whether the sequence pattern ' + 'is\n' + ' fixed or variable-length.\n' + '\n' + ' If the sequence pattern is fixed-length:\n' + '\n' + ' 1. If the length of the subject sequence is not equal to the ' + 'number\n' + ' of subpatterns, the sequence pattern fails\n' + '\n' + ' 2. Subpatterns in the sequence pattern are matched to their\n' + ' corresponding items in the subject sequence from left to ' + 'right.\n' + ' Matching stops as soon as a subpattern fails. If all\n' + ' subpatterns succeed in matching their corresponding item, ' + 'the\n' + ' sequence pattern succeeds.\n' + '\n' + ' Otherwise, if the sequence pattern is variable-length:\n' + '\n' + ' 1. If the length of the subject sequence is less than the ' + 'number of\n' + ' non-star subpatterns, the sequence pattern fails.\n' + '\n' + ' 2. The leading non-star subpatterns are matched to their\n' + ' corresponding items as for fixed-length sequences.\n' + '\n' + ' 3. If the previous step succeeds, the star subpattern matches ' + 'a\n' + ' list formed of the remaining subject items, excluding the\n' + ' remaining items corresponding to non-star subpatterns ' + 'following\n' + ' the star subpattern.\n' + '\n' + ' 4. Remaining non-star subpatterns are matched to their\n' + ' corresponding subject items, as for a fixed-length ' + 'sequence.\n' + '\n' + ' Note:\n' + '\n' + ' The length of the subject sequence is obtained via "len()" ' + '(i.e.\n' + ' via the "__len__()" protocol). This length may be cached ' + 'by the\n' + ' interpreter in a similar manner as value patterns.\n' + '\n' + 'In simple terms "[P1, P2, P3," … ", P]" matches only if all ' + 'the\n' + 'following happens:\n' + '\n' + '* check "" is a sequence\n' + '\n' + '* "len(subject) == "\n' + '\n' + '* "P1" matches "[0]" (note that this match can also ' + 'bind\n' + ' names)\n' + '\n' + '* "P2" matches "[1]" (note that this match can also ' + 'bind\n' + ' names)\n' + '\n' + '* … and so on for the corresponding pattern/element.\n' + '\n' + '\n' + 'Mapping Patterns\n' + '~~~~~~~~~~~~~~~~\n' + '\n' + 'A mapping pattern contains one or more key-value patterns. The ' + 'syntax\n' + 'is similar to the construction of a dictionary. Syntax:\n' + '\n' + ' mapping_pattern ::= "{" [items_pattern] "}"\n' + ' items_pattern ::= ",".key_value_pattern+ ","?\n' + ' key_value_pattern ::= (literal_pattern | value_pattern) ":" ' + 'pattern\n' + ' | double_star_pattern\n' + ' double_star_pattern ::= "**" capture_pattern\n' + '\n' + 'At most one double star pattern may be in a mapping pattern. ' + 'The\n' + 'double star pattern must be the last subpattern in the mapping\n' + 'pattern.\n' + '\n' + 'Duplicate keys in mapping patterns are disallowed. Duplicate ' + 'literal\n' + 'keys will raise a "SyntaxError". Two keys that otherwise have ' + 'the same\n' + 'value will raise a "ValueError" at runtime.\n' + '\n' + 'The following is the logical flow for matching a mapping ' + 'pattern\n' + 'against a subject value:\n' + '\n' + '1. If the subject value is not a mapping [3],the mapping ' + 'pattern\n' + ' fails.\n' + '\n' + '2. If every key given in the mapping pattern is present in the ' + 'subject\n' + ' mapping, and the pattern for each key matches the ' + 'corresponding\n' + ' item of the subject mapping, the mapping pattern succeeds.\n' + '\n' + '3. If duplicate keys are detected in the mapping pattern, the ' + 'pattern\n' + ' is considered invalid. A "SyntaxError" is raised for ' + 'duplicate\n' + ' literal values; or a "ValueError" for named keys of the same ' + 'value.\n' + '\n' + 'Note:\n' + '\n' + ' Key-value pairs are matched using the two-argument form of ' + 'the\n' + ' mapping subject’s "get()" method. Matched key-value pairs ' + 'must\n' + ' already be present in the mapping, and not created on-the-fly ' + 'via\n' + ' "__missing__()" or "__getitem__()".\n' + '\n' + 'In simple terms "{KEY1: P1, KEY2: P2, ... }" matches only if all ' + 'the\n' + 'following happens:\n' + '\n' + '* check "" is a mapping\n' + '\n' + '* "KEY1 in "\n' + '\n' + '* "P1" matches "[KEY1]"\n' + '\n' + '* … and so on for the corresponding KEY/pattern pair.\n' + '\n' + '\n' + 'Class Patterns\n' + '~~~~~~~~~~~~~~\n' + '\n' + 'A class pattern represents a class and its positional and ' + 'keyword\n' + 'arguments (if any). Syntax:\n' + '\n' + ' class_pattern ::= name_or_attr "(" [pattern_arguments ' + '","?] ")"\n' + ' pattern_arguments ::= positional_patterns ["," ' + 'keyword_patterns]\n' + ' | keyword_patterns\n' + ' positional_patterns ::= ",".pattern+\n' + ' keyword_patterns ::= ",".keyword_pattern+\n' + ' keyword_pattern ::= NAME "=" pattern\n' + '\n' + 'The same keyword should not be repeated in class patterns.\n' + '\n' + 'The following is the logical flow for matching a class pattern ' + 'against\n' + 'a subject value:\n' + '\n' + '1. If "name_or_attr" is not an instance of the builtin "type" , ' + 'raise\n' + ' "TypeError".\n' + '\n' + '2. If the subject value is not an instance of "name_or_attr" ' + '(tested\n' + ' via "isinstance()"), the class pattern fails.\n' + '\n' + '3. If no pattern arguments are present, the pattern succeeds.\n' + ' Otherwise, the subsequent steps depend on whether keyword or\n' + ' positional argument patterns are present.\n' + '\n' + ' For a number of built-in types (specified below), a single\n' + ' positional subpattern is accepted which will match the ' + 'entire\n' + ' subject; for these types keyword patterns also work as for ' + 'other\n' + ' types.\n' + '\n' + ' If only keyword patterns are present, they are processed as\n' + ' follows, one by one:\n' + '\n' + ' I. The keyword is looked up as an attribute on the subject.\n' + '\n' + ' * If this raises an exception other than "AttributeError", ' + 'the\n' + ' exception bubbles up.\n' + '\n' + ' * If this raises "AttributeError", the class pattern has ' + 'failed.\n' + '\n' + ' * Else, the subpattern associated with the keyword pattern ' + 'is\n' + ' matched against the subject’s attribute value. If this ' + 'fails,\n' + ' the class pattern fails; if this succeeds, the match ' + 'proceeds\n' + ' to the next keyword.\n' + '\n' + ' II. If all keyword patterns succeed, the class pattern ' + 'succeeds.\n' + '\n' + ' If any positional patterns are present, they are converted ' + 'to\n' + ' keyword patterns using the "__match_args__" attribute on the ' + 'class\n' + ' "name_or_attr" before matching:\n' + '\n' + ' I. The equivalent of "getattr(cls, "__match_args__", ())" is\n' + ' called.\n' + '\n' + ' * If this raises an exception, the exception bubbles up.\n' + '\n' + ' * If the returned value is not a tuple, the conversion ' + 'fails and\n' + ' "TypeError" is raised.\n' + '\n' + ' * If there are more positional patterns than\n' + ' "len(cls.__match_args__)", "TypeError" is raised.\n' + '\n' + ' * Otherwise, positional pattern "i" is converted to a ' + 'keyword\n' + ' pattern using "__match_args__[i]" as the keyword.\n' + ' "__match_args__[i]" must be a string; if not "TypeError" ' + 'is\n' + ' raised.\n' + '\n' + ' * If there are duplicate keywords, "TypeError" is raised.\n' + '\n' + ' See also:\n' + '\n' + ' Customizing positional arguments in class pattern ' + 'matching\n' + '\n' + ' II. Once all positional patterns have been converted to ' + 'keyword\n' + ' patterns,\n' + ' the match proceeds as if there were only keyword ' + 'patterns.\n' + '\n' + ' For the following built-in types the handling of positional\n' + ' subpatterns is different:\n' + '\n' + ' * "bool"\n' + '\n' + ' * "bytearray"\n' + '\n' + ' * "bytes"\n' + '\n' + ' * "dict"\n' + '\n' + ' * "float"\n' + '\n' + ' * "frozenset"\n' + '\n' + ' * "int"\n' + '\n' + ' * "list"\n' + '\n' + ' * "set"\n' + '\n' + ' * "str"\n' + '\n' + ' * "tuple"\n' + '\n' + ' These classes accept a single positional argument, and the ' + 'pattern\n' + ' there is matched against the whole object rather than an ' + 'attribute.\n' + ' For example "int(0|1)" matches the value "0", but not the ' + 'value\n' + ' "0.0".\n' + '\n' + 'In simple terms "CLS(P1, attr=P2)" matches only if the ' + 'following\n' + 'happens:\n' + '\n' + '* "isinstance(, CLS)"\n' + '\n' + '* convert "P1" to a keyword pattern using "CLS.__match_args__"\n' + '\n' + '* For each keyword argument "attr=P2":\n' + ' * "hasattr(, "attr")"\n' + '\n' + ' * "P2" matches ".attr"\n' + '\n' + '* … and so on for the corresponding keyword argument/pattern ' + 'pair.\n' + '\n' + 'See also:\n' + '\n' + ' * **PEP 634** – Structural Pattern Matching: Specification\n' + '\n' + ' * **PEP 636** – Structural Pattern Matching: Tutorial\n' + '\n' + '\n' + 'Function definitions\n' + '====================\n' + '\n' + 'A function definition defines a user-defined function object ' + '(see\n' + 'section The standard type hierarchy):\n' + '\n' + ' funcdef ::= [decorators] "def" funcname "(" ' + '[parameter_list] ")"\n' + ' ["->" expression] ":" suite\n' + ' decorators ::= decorator+\n' + ' decorator ::= "@" assignment_expression ' + 'NEWLINE\n' + ' parameter_list ::= defparameter ("," ' + 'defparameter)* "," "/" ["," [parameter_list_no_posonly]]\n' + ' | parameter_list_no_posonly\n' + ' parameter_list_no_posonly ::= defparameter ("," ' + 'defparameter)* ["," [parameter_list_starargs]]\n' + ' | parameter_list_starargs\n' + ' parameter_list_starargs ::= "*" [parameter] ("," ' + 'defparameter)* ["," ["**" parameter [","]]]\n' + ' | "**" parameter [","]\n' + ' parameter ::= identifier [":" expression]\n' + ' defparameter ::= parameter ["=" expression]\n' + ' funcname ::= identifier\n' + '\n' + 'A function definition is an executable statement. Its execution ' + 'binds\n' + 'the function name in the current local namespace to a function ' + 'object\n' + '(a wrapper around the executable code for the function). This\n' + 'function object contains a reference to the current global ' + 'namespace\n' + 'as the global namespace to be used when the function is called.\n' + '\n' + 'The function definition does not execute the function body; this ' + 'gets\n' + 'executed only when the function is called. [4]\n' + '\n' + 'A function definition may be wrapped by one or more *decorator*\n' + 'expressions. Decorator expressions are evaluated when the ' + 'function is\n' + 'defined, in the scope that contains the function definition. ' + 'The\n' + 'result must be a callable, which is invoked with the function ' + 'object\n' + 'as the only argument. The returned value is bound to the ' + 'function name\n' + 'instead of the function object. Multiple decorators are applied ' + 'in\n' + 'nested fashion. For example, the following code\n' + '\n' + ' @f1(arg)\n' + ' @f2\n' + ' def func(): pass\n' + '\n' + 'is roughly equivalent to\n' + '\n' + ' def func(): pass\n' + ' func = f1(arg)(f2(func))\n' + '\n' + 'except that the original function is not temporarily bound to ' + 'the name\n' + '"func".\n' + '\n' + 'Changed in version 3.9: Functions may be decorated with any ' + 'valid\n' + '"assignment_expression". Previously, the grammar was much more\n' + 'restrictive; see **PEP 614** for details.\n' + '\n' + 'When one or more *parameters* have the form *parameter* "="\n' + '*expression*, the function is said to have “default parameter ' + 'values.”\n' + 'For a parameter with a default value, the corresponding ' + '*argument* may\n' + 'be omitted from a call, in which case the parameter’s default ' + 'value is\n' + 'substituted. If a parameter has a default value, all following\n' + 'parameters up until the “"*"” must also have a default value — ' + 'this is\n' + 'a syntactic restriction that is not expressed by the grammar.\n' + '\n' + '**Default parameter values are evaluated from left to right when ' + 'the\n' + 'function definition is executed.** This means that the ' + 'expression is\n' + 'evaluated once, when the function is defined, and that the same ' + '“pre-\n' + 'computed” value is used for each call. This is especially ' + 'important\n' + 'to understand when a default parameter value is a mutable ' + 'object, such\n' + 'as a list or a dictionary: if the function modifies the object ' + '(e.g.\n' + 'by appending an item to a list), the default parameter value is ' + 'in\n' + 'effect modified. This is generally not what was intended. A ' + 'way\n' + 'around this is to use "None" as the default, and explicitly test ' + 'for\n' + 'it in the body of the function, e.g.:\n' + '\n' + ' def whats_on_the_telly(penguin=None):\n' + ' if penguin is None:\n' + ' penguin = []\n' + ' penguin.append("property of the zoo")\n' + ' return penguin\n' + '\n' + 'Function call semantics are described in more detail in section ' + 'Calls.\n' + 'A function call always assigns values to all parameters ' + 'mentioned in\n' + 'the parameter list, either from positional arguments, from ' + 'keyword\n' + 'arguments, or from default values. If the form “"*identifier"” ' + 'is\n' + 'present, it is initialized to a tuple receiving any excess ' + 'positional\n' + 'parameters, defaulting to the empty tuple. If the form\n' + '“"**identifier"” is present, it is initialized to a new ordered\n' + 'mapping receiving any excess keyword arguments, defaulting to a ' + 'new\n' + 'empty mapping of the same type. Parameters after “"*"” or\n' + '“"*identifier"” are keyword-only parameters and may only be ' + 'passed by\n' + 'keyword arguments. Parameters before “"/"” are positional-only\n' + 'parameters and may only be passed by positional arguments.\n' + '\n' + 'Changed in version 3.8: The "/" function parameter syntax may be ' + 'used\n' + 'to indicate positional-only parameters. See **PEP 570** for ' + 'details.\n' + '\n' + 'Parameters may have an *annotation* of the form “": ' + 'expression"”\n' + 'following the parameter name. Any parameter may have an ' + 'annotation,\n' + 'even those of the form "*identifier" or "**identifier". ' + 'Functions may\n' + 'have “return” annotation of the form “"-> expression"” after ' + 'the\n' + 'parameter list. These annotations can be any valid Python ' + 'expression.\n' + 'The presence of annotations does not change the semantics of a\n' + 'function. The annotation values are available as values of a\n' + 'dictionary keyed by the parameters’ names in the ' + '"__annotations__"\n' + 'attribute of the function object. If the "annotations" import ' + 'from\n' + '"__future__" is used, annotations are preserved as strings at ' + 'runtime\n' + 'which enables postponed evaluation. Otherwise, they are ' + 'evaluated\n' + 'when the function definition is executed. In this case ' + 'annotations\n' + 'may be evaluated in a different order than they appear in the ' + 'source\n' + 'code.\n' + '\n' + 'It is also possible to create anonymous functions (functions not ' + 'bound\n' + 'to a name), for immediate use in expressions. This uses lambda\n' + 'expressions, described in section Lambdas. Note that the ' + 'lambda\n' + 'expression is merely a shorthand for a simplified function ' + 'definition;\n' + 'a function defined in a “"def"” statement can be passed around ' + 'or\n' + 'assigned to another name just like a function defined by a ' + 'lambda\n' + 'expression. The “"def"” form is actually more powerful since ' + 'it\n' + 'allows the execution of multiple statements and annotations.\n' + '\n' + '**Programmer’s note:** Functions are first-class objects. A ' + '“"def"”\n' + 'statement executed inside a function definition defines a local\n' + 'function that can be returned or passed around. Free variables ' + 'used\n' + 'in the nested function can access the local variables of the ' + 'function\n' + 'containing the def. See section Naming and binding for ' + 'details.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 3107** - Function Annotations\n' + ' The original specification for function annotations.\n' + '\n' + ' **PEP 484** - Type Hints\n' + ' Definition of a standard meaning for annotations: type ' + 'hints.\n' + '\n' + ' **PEP 526** - Syntax for Variable Annotations\n' + ' Ability to type hint variable declarations, including ' + 'class\n' + ' variables and instance variables\n' + '\n' + ' **PEP 563** - Postponed Evaluation of Annotations\n' + ' Support for forward references within annotations by ' + 'preserving\n' + ' annotations in a string form at runtime instead of eager\n' + ' evaluation.\n' + '\n' + '\n' + 'Class definitions\n' + '=================\n' + '\n' + 'A class definition defines a class object (see section The ' + 'standard\n' + 'type hierarchy):\n' + '\n' + ' classdef ::= [decorators] "class" classname [inheritance] ' + '":" suite\n' + ' inheritance ::= "(" [argument_list] ")"\n' + ' classname ::= identifier\n' + '\n' + 'A class definition is an executable statement. The inheritance ' + 'list\n' + 'usually gives a list of base classes (see Metaclasses for more\n' + 'advanced uses), so each item in the list should evaluate to a ' + 'class\n' + 'object which allows subclassing. Classes without an inheritance ' + 'list\n' + 'inherit, by default, from the base class "object"; hence,\n' + '\n' + ' class Foo:\n' + ' pass\n' + '\n' + 'is equivalent to\n' + '\n' + ' class Foo(object):\n' + ' pass\n' + '\n' + 'The class’s suite is then executed in a new execution frame ' + '(see\n' + 'Naming and binding), using a newly created local namespace and ' + 'the\n' + 'original global namespace. (Usually, the suite contains mostly\n' + 'function definitions.) When the class’s suite finishes ' + 'execution, its\n' + 'execution frame is discarded but its local namespace is saved. ' + '[5] A\n' + 'class object is then created using the inheritance list for the ' + 'base\n' + 'classes and the saved local namespace for the attribute ' + 'dictionary.\n' + 'The class name is bound to this class object in the original ' + 'local\n' + 'namespace.\n' + '\n' + 'The order in which attributes are defined in the class body is\n' + 'preserved in the new class’s "__dict__". Note that this is ' + 'reliable\n' + 'only right after the class is created and only for classes that ' + 'were\n' + 'defined using the definition syntax.\n' + '\n' + 'Class creation can be customized heavily using metaclasses.\n' + '\n' + 'Classes can also be decorated: just like when decorating ' + 'functions,\n' + '\n' + ' @f1(arg)\n' + ' @f2\n' + ' class Foo: pass\n' + '\n' + 'is roughly equivalent to\n' + '\n' + ' class Foo: pass\n' + ' Foo = f1(arg)(f2(Foo))\n' + '\n' + 'The evaluation rules for the decorator expressions are the same ' + 'as for\n' + 'function decorators. The result is then bound to the class ' + 'name.\n' + '\n' + 'Changed in version 3.9: Classes may be decorated with any valid\n' + '"assignment_expression". Previously, the grammar was much more\n' + 'restrictive; see **PEP 614** for details.\n' + '\n' + '**Programmer’s note:** Variables defined in the class definition ' + 'are\n' + 'class attributes; they are shared by instances. Instance ' + 'attributes\n' + 'can be set in a method with "self.name = value". Both class ' + 'and\n' + 'instance attributes are accessible through the notation ' + '“"self.name"”,\n' + 'and an instance attribute hides a class attribute with the same ' + 'name\n' + 'when accessed in this way. Class attributes can be used as ' + 'defaults\n' + 'for instance attributes, but using mutable values there can lead ' + 'to\n' + 'unexpected results. Descriptors can be used to create instance\n' + 'variables with different implementation details.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 3115** - Metaclasses in Python 3000\n' + ' The proposal that changed the declaration of metaclasses to ' + 'the\n' + ' current syntax, and the semantics for how classes with\n' + ' metaclasses are constructed.\n' + '\n' + ' **PEP 3129** - Class Decorators\n' + ' The proposal that added class decorators. Function and ' + 'method\n' + ' decorators were introduced in **PEP 318**.\n' + '\n' + '\n' + 'Coroutines\n' + '==========\n' + '\n' + 'New in version 3.5.\n' + '\n' + '\n' + 'Coroutine function definition\n' + '-----------------------------\n' + '\n' + ' async_funcdef ::= [decorators] "async" "def" funcname "(" ' + '[parameter_list] ")"\n' + ' ["->" expression] ":" suite\n' + '\n' + 'Execution of Python coroutines can be suspended and resumed at ' + 'many\n' + 'points (see *coroutine*). "await" expressions, "async for" and ' + '"async\n' + 'with" can only be used in the body of a coroutine function.\n' + '\n' + 'Functions defined with "async def" syntax are always coroutine\n' + 'functions, even if they do not contain "await" or "async" ' + 'keywords.\n' + '\n' + 'It is a "SyntaxError" to use a "yield from" expression inside ' + 'the body\n' + 'of a coroutine function.\n' + '\n' + 'An example of a coroutine function:\n' + '\n' + ' async def func(param1, param2):\n' + ' do_stuff()\n' + ' await some_coroutine()\n' + '\n' + 'Changed in version 3.7: "await" and "async" are now keywords;\n' + 'previously they were only treated as such inside the body of a\n' + 'coroutine function.\n' + '\n' + '\n' + 'The "async for" statement\n' + '-------------------------\n' + '\n' + ' async_for_stmt ::= "async" for_stmt\n' + '\n' + 'An *asynchronous iterable* provides an "__aiter__" method that\n' + 'directly returns an *asynchronous iterator*, which can call\n' + 'asynchronous code in its "__anext__" method.\n' + '\n' + 'The "async for" statement allows convenient iteration over\n' + 'asynchronous iterables.\n' + '\n' + 'The following code:\n' + '\n' + ' async for TARGET in ITER:\n' + ' SUITE\n' + ' else:\n' + ' SUITE2\n' + '\n' + 'Is semantically equivalent to:\n' + '\n' + ' iter = (ITER)\n' + ' iter = type(iter).__aiter__(iter)\n' + ' running = True\n' + '\n' + ' while running:\n' + ' try:\n' + ' TARGET = await type(iter).__anext__(iter)\n' + ' except StopAsyncIteration:\n' + ' running = False\n' + ' else:\n' + ' SUITE\n' + ' else:\n' + ' SUITE2\n' + '\n' + 'See also "__aiter__()" and "__anext__()" for details.\n' + '\n' + 'It is a "SyntaxError" to use an "async for" statement outside ' + 'the body\n' + 'of a coroutine function.\n' + '\n' + '\n' + 'The "async with" statement\n' + '--------------------------\n' + '\n' + ' async_with_stmt ::= "async" with_stmt\n' + '\n' + 'An *asynchronous context manager* is a *context manager* that is ' + 'able\n' + 'to suspend execution in its *enter* and *exit* methods.\n' + '\n' + 'The following code:\n' + '\n' + ' async with EXPRESSION as TARGET:\n' + ' SUITE\n' + '\n' + 'is semantically equivalent to:\n' + '\n' + ' manager = (EXPRESSION)\n' + ' aenter = type(manager).__aenter__\n' + ' aexit = type(manager).__aexit__\n' + ' value = await aenter(manager)\n' + ' hit_except = False\n' + '\n' + ' try:\n' + ' TARGET = value\n' + ' SUITE\n' + ' except:\n' + ' hit_except = True\n' + ' if not await aexit(manager, *sys.exc_info()):\n' + ' raise\n' + ' finally:\n' + ' if not hit_except:\n' + ' await aexit(manager, None, None, None)\n' + '\n' + 'See also "__aenter__()" and "__aexit__()" for details.\n' + '\n' + 'It is a "SyntaxError" to use an "async with" statement outside ' + 'the\n' + 'body of a coroutine function.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 492** - Coroutines with async and await syntax\n' + ' The proposal that made coroutines a proper standalone ' + 'concept in\n' + ' Python, and added supporting syntax.\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] The exception is propagated to the invocation stack unless ' + 'there\n' + ' is a "finally" clause which happens to raise another ' + 'exception.\n' + ' That new exception causes the old one to be lost.\n' + '\n' + '[2] In pattern matching, a sequence is defined as one of the\n' + ' following:\n' + '\n' + ' * a class that inherits from "collections.abc.Sequence"\n' + '\n' + ' * a Python class that has been registered as\n' + ' "collections.abc.Sequence"\n' + '\n' + ' * a builtin class that has its (CPython) ' + '"Py_TPFLAGS_SEQUENCE"\n' + ' bit set\n' + '\n' + ' * a class that inherits from any of the above\n' + '\n' + ' The following standard library classes are sequences:\n' + '\n' + ' * "array.array"\n' + '\n' + ' * "collections.deque"\n' + '\n' + ' * "list"\n' + '\n' + ' * "memoryview"\n' + '\n' + ' * "range"\n' + '\n' + ' * "tuple"\n' + '\n' + ' Note:\n' + '\n' + ' Subject values of type "str", "bytes", and "bytearray" do ' + 'not\n' + ' match sequence patterns.\n' + '\n' + '[3] In pattern matching, a mapping is defined as one of the ' + 'following:\n' + '\n' + ' * a class that inherits from "collections.abc.Mapping"\n' + '\n' + ' * a Python class that has been registered as\n' + ' "collections.abc.Mapping"\n' + '\n' + ' * a builtin class that has its (CPython) ' + '"Py_TPFLAGS_MAPPING"\n' + ' bit set\n' + '\n' + ' * a class that inherits from any of the above\n' + '\n' + ' The standard library classes "dict" and ' + '"types.MappingProxyType"\n' + ' are mappings.\n' + '\n' + '[4] A string literal appearing as the first statement in the ' + 'function\n' + ' body is transformed into the function’s "__doc__" attribute ' + 'and\n' + ' therefore the function’s *docstring*.\n' + '\n' + '[5] A string literal appearing as the first statement in the ' + 'class\n' + ' body is transformed into the namespace’s "__doc__" item and\n' + ' therefore the class’s *docstring*.\n', + 'context-managers': 'With Statement Context Managers\n' + '*******************************\n' + '\n' + 'A *context manager* is an object that defines the ' + 'runtime context to\n' + 'be established when executing a "with" statement. The ' + 'context manager\n' + 'handles the entry into, and the exit from, the desired ' + 'runtime context\n' + 'for the execution of the block of code. Context ' + 'managers are normally\n' + 'invoked using the "with" statement (described in section ' + 'The with\n' + 'statement), but can also be used by directly invoking ' + 'their methods.\n' + '\n' + 'Typical uses of context managers include saving and ' + 'restoring various\n' + 'kinds of global state, locking and unlocking resources, ' + 'closing opened\n' + 'files, etc.\n' + '\n' + 'For more information on context managers, see Context ' + 'Manager Types.\n' + '\n' + 'object.__enter__(self)\n' + '\n' + ' Enter the runtime context related to this object. The ' + '"with"\n' + ' statement will bind this method’s return value to the ' + 'target(s)\n' + ' specified in the "as" clause of the statement, if ' + 'any.\n' + '\n' + 'object.__exit__(self, exc_type, exc_value, traceback)\n' + '\n' + ' Exit the runtime context related to this object. The ' + 'parameters\n' + ' describe the exception that caused the context to be ' + 'exited. If the\n' + ' context was exited without an exception, all three ' + 'arguments will\n' + ' be "None".\n' + '\n' + ' If an exception is supplied, and the method wishes to ' + 'suppress the\n' + ' exception (i.e., prevent it from being propagated), ' + 'it should\n' + ' return a true value. Otherwise, the exception will be ' + 'processed\n' + ' normally upon exit from this method.\n' + '\n' + ' Note that "__exit__()" methods should not reraise the ' + 'passed-in\n' + ' exception; this is the caller’s responsibility.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 343** - The “with” statement\n' + ' The specification, background, and examples for the ' + 'Python "with"\n' + ' statement.\n', + 'continue': 'The "continue" statement\n' + '************************\n' + '\n' + ' continue_stmt ::= "continue"\n' + '\n' + '"continue" may only occur syntactically nested in a "for" or ' + '"while"\n' + 'loop, but not nested in a function or class definition within ' + 'that\n' + 'loop. It continues with the next cycle of the nearest enclosing ' + 'loop.\n' + '\n' + 'When "continue" passes control out of a "try" statement with a\n' + '"finally" clause, that "finally" clause is executed before ' + 'really\n' + 'starting the next loop cycle.\n', + 'conversions': 'Arithmetic conversions\n' + '**********************\n' + '\n' + 'When a description of an arithmetic operator below uses the ' + 'phrase\n' + '“the numeric arguments are converted to a common type”, this ' + 'means\n' + 'that the operator implementation for built-in types works as ' + 'follows:\n' + '\n' + '* If either argument is a complex number, the other is ' + 'converted to\n' + ' complex;\n' + '\n' + '* otherwise, if either argument is a floating point number, ' + 'the other\n' + ' is converted to floating point;\n' + '\n' + '* otherwise, both must be integers and no conversion is ' + 'necessary.\n' + '\n' + 'Some additional rules apply for certain operators (e.g., a ' + 'string as a\n' + 'left argument to the ‘%’ operator). Extensions must define ' + 'their own\n' + 'conversion behavior.\n', + 'customization': 'Basic customization\n' + '*******************\n' + '\n' + 'object.__new__(cls[, ...])\n' + '\n' + ' Called to create a new instance of class *cls*. ' + '"__new__()" is a\n' + ' static method (special-cased so you need not declare it ' + 'as such)\n' + ' that takes the class of which an instance was requested ' + 'as its\n' + ' first argument. The remaining arguments are those ' + 'passed to the\n' + ' object constructor expression (the call to the class). ' + 'The return\n' + ' value of "__new__()" should be the new object instance ' + '(usually an\n' + ' instance of *cls*).\n' + '\n' + ' Typical implementations create a new instance of the ' + 'class by\n' + ' invoking the superclass’s "__new__()" method using\n' + ' "super().__new__(cls[, ...])" with appropriate arguments ' + 'and then\n' + ' modifying the newly created instance as necessary before ' + 'returning\n' + ' it.\n' + '\n' + ' If "__new__()" is invoked during object construction and ' + 'it returns\n' + ' an instance of *cls*, then the new instance’s ' + '"__init__()" method\n' + ' will be invoked like "__init__(self[, ...])", where ' + '*self* is the\n' + ' new instance and the remaining arguments are the same as ' + 'were\n' + ' passed to the object constructor.\n' + '\n' + ' If "__new__()" does not return an instance of *cls*, ' + 'then the new\n' + ' instance’s "__init__()" method will not be invoked.\n' + '\n' + ' "__new__()" is intended mainly to allow subclasses of ' + 'immutable\n' + ' types (like int, str, or tuple) to customize instance ' + 'creation. It\n' + ' is also commonly overridden in custom metaclasses in ' + 'order to\n' + ' customize class creation.\n' + '\n' + 'object.__init__(self[, ...])\n' + '\n' + ' Called after the instance has been created (by ' + '"__new__()"), but\n' + ' before it is returned to the caller. The arguments are ' + 'those\n' + ' passed to the class constructor expression. If a base ' + 'class has an\n' + ' "__init__()" method, the derived class’s "__init__()" ' + 'method, if\n' + ' any, must explicitly call it to ensure proper ' + 'initialization of the\n' + ' base class part of the instance; for example:\n' + ' "super().__init__([args...])".\n' + '\n' + ' Because "__new__()" and "__init__()" work together in ' + 'constructing\n' + ' objects ("__new__()" to create it, and "__init__()" to ' + 'customize\n' + ' it), no non-"None" value may be returned by ' + '"__init__()"; doing so\n' + ' will cause a "TypeError" to be raised at runtime.\n' + '\n' + 'object.__del__(self)\n' + '\n' + ' Called when the instance is about to be destroyed. This ' + 'is also\n' + ' called a finalizer or (improperly) a destructor. If a ' + 'base class\n' + ' has a "__del__()" method, the derived class’s ' + '"__del__()" method,\n' + ' if any, must explicitly call it to ensure proper ' + 'deletion of the\n' + ' base class part of the instance.\n' + '\n' + ' It is possible (though not recommended!) for the ' + '"__del__()" method\n' + ' to postpone destruction of the instance by creating a ' + 'new reference\n' + ' to it. This is called object *resurrection*. It is\n' + ' implementation-dependent whether "__del__()" is called a ' + 'second\n' + ' time when a resurrected object is about to be destroyed; ' + 'the\n' + ' current *CPython* implementation only calls it once.\n' + '\n' + ' It is not guaranteed that "__del__()" methods are called ' + 'for\n' + ' objects that still exist when the interpreter exits.\n' + '\n' + ' Note:\n' + '\n' + ' "del x" doesn’t directly call "x.__del__()" — the ' + 'former\n' + ' decrements the reference count for "x" by one, and the ' + 'latter is\n' + ' only called when "x"’s reference count reaches zero.\n' + '\n' + ' **CPython implementation detail:** It is possible for a ' + 'reference\n' + ' cycle to prevent the reference count of an object from ' + 'going to\n' + ' zero. In this case, the cycle will be later detected ' + 'and deleted\n' + ' by the *cyclic garbage collector*. A common cause of ' + 'reference\n' + ' cycles is when an exception has been caught in a local ' + 'variable.\n' + ' The frame’s locals then reference the exception, which ' + 'references\n' + ' its own traceback, which references the locals of all ' + 'frames caught\n' + ' in the traceback.\n' + '\n' + ' See also: Documentation for the "gc" module.\n' + '\n' + ' Warning:\n' + '\n' + ' Due to the precarious circumstances under which ' + '"__del__()"\n' + ' methods are invoked, exceptions that occur during ' + 'their execution\n' + ' are ignored, and a warning is printed to "sys.stderr" ' + 'instead.\n' + ' In particular:\n' + '\n' + ' * "__del__()" can be invoked when arbitrary code is ' + 'being\n' + ' executed, including from any arbitrary thread. If ' + '"__del__()"\n' + ' needs to take a lock or invoke any other blocking ' + 'resource, it\n' + ' may deadlock as the resource may already be taken by ' + 'the code\n' + ' that gets interrupted to execute "__del__()".\n' + '\n' + ' * "__del__()" can be executed during interpreter ' + 'shutdown. As a\n' + ' consequence, the global variables it needs to access ' + '(including\n' + ' other modules) may already have been deleted or set ' + 'to "None".\n' + ' Python guarantees that globals whose name begins ' + 'with a single\n' + ' underscore are deleted from their module before ' + 'other globals\n' + ' are deleted; if no other references to such globals ' + 'exist, this\n' + ' may help in assuring that imported modules are still ' + 'available\n' + ' at the time when the "__del__()" method is called.\n' + '\n' + 'object.__repr__(self)\n' + '\n' + ' Called by the "repr()" built-in function to compute the ' + '“official”\n' + ' string representation of an object. If at all possible, ' + 'this\n' + ' should look like a valid Python expression that could be ' + 'used to\n' + ' recreate an object with the same value (given an ' + 'appropriate\n' + ' environment). If this is not possible, a string of the ' + 'form\n' + ' "<...some useful description...>" should be returned. ' + 'The return\n' + ' value must be a string object. If a class defines ' + '"__repr__()" but\n' + ' not "__str__()", then "__repr__()" is also used when an ' + '“informal”\n' + ' string representation of instances of that class is ' + 'required.\n' + '\n' + ' This is typically used for debugging, so it is important ' + 'that the\n' + ' representation is information-rich and unambiguous.\n' + '\n' + 'object.__str__(self)\n' + '\n' + ' Called by "str(object)" and the built-in functions ' + '"format()" and\n' + ' "print()" to compute the “informal” or nicely printable ' + 'string\n' + ' representation of an object. The return value must be a ' + 'string\n' + ' object.\n' + '\n' + ' This method differs from "object.__repr__()" in that ' + 'there is no\n' + ' expectation that "__str__()" return a valid Python ' + 'expression: a\n' + ' more convenient or concise representation can be used.\n' + '\n' + ' The default implementation defined by the built-in type ' + '"object"\n' + ' calls "object.__repr__()".\n' + '\n' + 'object.__bytes__(self)\n' + '\n' + ' Called by bytes to compute a byte-string representation ' + 'of an\n' + ' object. This should return a "bytes" object.\n' + '\n' + 'object.__format__(self, format_spec)\n' + '\n' + ' Called by the "format()" built-in function, and by ' + 'extension,\n' + ' evaluation of formatted string literals and the ' + '"str.format()"\n' + ' method, to produce a “formatted” string representation ' + 'of an\n' + ' object. The *format_spec* argument is a string that ' + 'contains a\n' + ' description of the formatting options desired. The ' + 'interpretation\n' + ' of the *format_spec* argument is up to the type ' + 'implementing\n' + ' "__format__()", however most classes will either ' + 'delegate\n' + ' formatting to one of the built-in types, or use a ' + 'similar\n' + ' formatting option syntax.\n' + '\n' + ' See Format Specification Mini-Language for a description ' + 'of the\n' + ' standard formatting syntax.\n' + '\n' + ' The return value must be a string object.\n' + '\n' + ' Changed in version 3.4: The __format__ method of ' + '"object" itself\n' + ' raises a "TypeError" if passed any non-empty string.\n' + '\n' + ' Changed in version 3.7: "object.__format__(x, \'\')" is ' + 'now\n' + ' equivalent to "str(x)" rather than "format(str(x), ' + '\'\')".\n' + '\n' + 'object.__lt__(self, other)\n' + 'object.__le__(self, other)\n' + 'object.__eq__(self, other)\n' + 'object.__ne__(self, other)\n' + 'object.__gt__(self, other)\n' + 'object.__ge__(self, other)\n' + '\n' + ' These are the so-called “rich comparison” methods. The\n' + ' correspondence between operator symbols and method names ' + 'is as\n' + ' follows: "xy" calls\n' + ' "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n' + '\n' + ' A rich comparison method may return the singleton ' + '"NotImplemented"\n' + ' if it does not implement the operation for a given pair ' + 'of\n' + ' arguments. By convention, "False" and "True" are ' + 'returned for a\n' + ' successful comparison. However, these methods can return ' + 'any value,\n' + ' so if the comparison operator is used in a Boolean ' + 'context (e.g.,\n' + ' in the condition of an "if" statement), Python will call ' + '"bool()"\n' + ' on the value to determine if the result is true or ' + 'false.\n' + '\n' + ' By default, "object" implements "__eq__()" by using ' + '"is", returning\n' + ' "NotImplemented" in the case of a false comparison: ' + '"True if x is y\n' + ' else NotImplemented". For "__ne__()", by default it ' + 'delegates to\n' + ' "__eq__()" and inverts the result unless it is ' + '"NotImplemented".\n' + ' There are no other implied relationships among the ' + 'comparison\n' + ' operators or default implementations; for example, the ' + 'truth of\n' + ' "(x.__hash__".\n' + '\n' + ' If a class that does not override "__eq__()" wishes to ' + 'suppress\n' + ' hash support, it should include "__hash__ = None" in the ' + 'class\n' + ' definition. A class which defines its own "__hash__()" ' + 'that\n' + ' explicitly raises a "TypeError" would be incorrectly ' + 'identified as\n' + ' hashable by an "isinstance(obj, ' + 'collections.abc.Hashable)" call.\n' + '\n' + ' Note:\n' + '\n' + ' By default, the "__hash__()" values of str and bytes ' + 'objects are\n' + ' “salted” with an unpredictable random value. Although ' + 'they\n' + ' remain constant within an individual Python process, ' + 'they are not\n' + ' predictable between repeated invocations of ' + 'Python.This is\n' + ' intended to provide protection against a ' + 'denial-of-service caused\n' + ' by carefully chosen inputs that exploit the worst ' + 'case\n' + ' performance of a dict insertion, O(n^2) complexity. ' + 'See\n' + ' http://www.ocert.org/advisories/ocert-2011-003.html ' + 'for\n' + ' details.Changing hash values affects the iteration ' + 'order of sets.\n' + ' Python has never made guarantees about this ordering ' + '(and it\n' + ' typically varies between 32-bit and 64-bit builds).See ' + 'also\n' + ' "PYTHONHASHSEED".\n' + '\n' + ' Changed in version 3.3: Hash randomization is enabled by ' + 'default.\n' + '\n' + 'object.__bool__(self)\n' + '\n' + ' Called to implement truth value testing and the built-in ' + 'operation\n' + ' "bool()"; should return "False" or "True". When this ' + 'method is not\n' + ' defined, "__len__()" is called, if it is defined, and ' + 'the object is\n' + ' considered true if its result is nonzero. If a class ' + 'defines\n' + ' neither "__len__()" nor "__bool__()", all its instances ' + 'are\n' + ' considered true.\n', + 'debugger': '"pdb" — The Python Debugger\n' + '***************************\n' + '\n' + '**Source code:** Lib/pdb.py\n' + '\n' + '======================================================================\n' + '\n' + 'The module "pdb" defines an interactive source code debugger ' + 'for\n' + 'Python programs. It supports setting (conditional) breakpoints ' + 'and\n' + 'single stepping at the source line level, inspection of stack ' + 'frames,\n' + 'source code listing, and evaluation of arbitrary Python code in ' + 'the\n' + 'context of any stack frame. It also supports post-mortem ' + 'debugging\n' + 'and can be called under program control.\n' + '\n' + 'The debugger is extensible – it is actually defined as the ' + 'class\n' + '"Pdb". This is currently undocumented but easily understood by ' + 'reading\n' + 'the source. The extension interface uses the modules "bdb" and ' + '"cmd".\n' + '\n' + 'See also:\n' + '\n' + ' Module "faulthandler"\n' + ' Used to dump Python tracebacks explicitly, on a fault, ' + 'after a\n' + ' timeout, or on a user signal.\n' + '\n' + ' Module "traceback"\n' + ' Standard interface to extract, format and print stack ' + 'traces of\n' + ' Python programs.\n' + '\n' + 'The debugger’s prompt is "(Pdb)". Typical usage to run a program ' + 'under\n' + 'control of the debugger is:\n' + '\n' + ' >>> import pdb\n' + ' >>> import mymodule\n' + " >>> pdb.run('mymodule.test()')\n" + ' > (0)?()\n' + ' (Pdb) continue\n' + ' > (1)?()\n' + ' (Pdb) continue\n' + " NameError: 'spam'\n" + ' > (1)?()\n' + ' (Pdb)\n' + '\n' + 'Changed in version 3.3: Tab-completion via the "readline" module ' + 'is\n' + 'available for commands and command arguments, e.g. the current ' + 'global\n' + 'and local names are offered as arguments of the "p" command.\n' + '\n' + '"pdb.py" can also be invoked as a script to debug other ' + 'scripts. For\n' + 'example:\n' + '\n' + ' python -m pdb myscript.py\n' + '\n' + 'When invoked as a script, pdb will automatically enter ' + 'post-mortem\n' + 'debugging if the program being debugged exits abnormally. After ' + 'post-\n' + 'mortem debugging (or after normal exit of the program), pdb ' + 'will\n' + 'restart the program. Automatic restarting preserves pdb’s state ' + '(such\n' + 'as breakpoints) and in most cases is more useful than quitting ' + 'the\n' + 'debugger upon program’s exit.\n' + '\n' + 'New in version 3.2: "pdb.py" now accepts a "-c" option that ' + 'executes\n' + 'commands as if given in a ".pdbrc" file, see Debugger Commands.\n' + '\n' + 'New in version 3.7: "pdb.py" now accepts a "-m" option that ' + 'execute\n' + 'modules similar to the way "python -m" does. As with a script, ' + 'the\n' + 'debugger will pause execution just before the first line of the\n' + 'module.\n' + '\n' + 'The typical usage to break into the debugger is to insert:\n' + '\n' + ' import pdb; pdb.set_trace()\n' + '\n' + 'at the location you want to break into the debugger, and then ' + 'run the\n' + 'program. You can then step through the code following this ' + 'statement,\n' + 'and continue running without the debugger using the "continue"\n' + 'command.\n' + '\n' + 'New in version 3.7: The built-in "breakpoint()", when called ' + 'with\n' + 'defaults, can be used instead of "import pdb; pdb.set_trace()".\n' + '\n' + 'The typical usage to inspect a crashed program is:\n' + '\n' + ' >>> import pdb\n' + ' >>> import mymodule\n' + ' >>> mymodule.test()\n' + ' Traceback (most recent call last):\n' + ' File "", line 1, in \n' + ' File "./mymodule.py", line 4, in test\n' + ' test2()\n' + ' File "./mymodule.py", line 3, in test2\n' + ' print(spam)\n' + ' NameError: spam\n' + ' >>> pdb.pm()\n' + ' > ./mymodule.py(3)test2()\n' + ' -> print(spam)\n' + ' (Pdb)\n' + '\n' + 'The module defines the following functions; each enters the ' + 'debugger\n' + 'in a slightly different way:\n' + '\n' + 'pdb.run(statement, globals=None, locals=None)\n' + '\n' + ' Execute the *statement* (given as a string or a code object) ' + 'under\n' + ' debugger control. The debugger prompt appears before any ' + 'code is\n' + ' executed; you can set breakpoints and type "continue", or you ' + 'can\n' + ' step through the statement using "step" or "next" (all these\n' + ' commands are explained below). The optional *globals* and ' + '*locals*\n' + ' arguments specify the environment in which the code is ' + 'executed; by\n' + ' default the dictionary of the module "__main__" is used. ' + '(See the\n' + ' explanation of the built-in "exec()" or "eval()" functions.)\n' + '\n' + 'pdb.runeval(expression, globals=None, locals=None)\n' + '\n' + ' Evaluate the *expression* (given as a string or a code ' + 'object)\n' + ' under debugger control. When "runeval()" returns, it returns ' + 'the\n' + ' value of the *expression*. Otherwise this function is ' + 'similar to\n' + ' "run()".\n' + '\n' + 'pdb.runcall(function, *args, **kwds)\n' + '\n' + ' Call the *function* (a function or method object, not a ' + 'string)\n' + ' with the given arguments. When "runcall()" returns, it ' + 'returns\n' + ' whatever the function call returned. The debugger prompt ' + 'appears\n' + ' as soon as the function is entered.\n' + '\n' + 'pdb.set_trace(*, header=None)\n' + '\n' + ' Enter the debugger at the calling stack frame. This is ' + 'useful to\n' + ' hard-code a breakpoint at a given point in a program, even if ' + 'the\n' + ' code is not otherwise being debugged (e.g. when an assertion\n' + ' fails). If given, *header* is printed to the console just ' + 'before\n' + ' debugging begins.\n' + '\n' + ' Changed in version 3.7: The keyword-only argument *header*.\n' + '\n' + 'pdb.post_mortem(traceback=None)\n' + '\n' + ' Enter post-mortem debugging of the given *traceback* object. ' + 'If no\n' + ' *traceback* is given, it uses the one of the exception that ' + 'is\n' + ' currently being handled (an exception must be being handled ' + 'if the\n' + ' default is to be used).\n' + '\n' + 'pdb.pm()\n' + '\n' + ' Enter post-mortem debugging of the traceback found in\n' + ' "sys.last_traceback".\n' + '\n' + 'The "run*" functions and "set_trace()" are aliases for ' + 'instantiating\n' + 'the "Pdb" class and calling the method of the same name. If you ' + 'want\n' + 'to access further features, you have to do this yourself:\n' + '\n' + "class pdb.Pdb(completekey='tab', stdin=None, stdout=None, " + 'skip=None, nosigint=False, readrc=True)\n' + '\n' + ' "Pdb" is the debugger class.\n' + '\n' + ' The *completekey*, *stdin* and *stdout* arguments are passed ' + 'to the\n' + ' underlying "cmd.Cmd" class; see the description there.\n' + '\n' + ' The *skip* argument, if given, must be an iterable of ' + 'glob-style\n' + ' module name patterns. The debugger will not step into frames ' + 'that\n' + ' originate in a module that matches one of these patterns. ' + '[1]\n' + '\n' + ' By default, Pdb sets a handler for the SIGINT signal (which ' + 'is sent\n' + ' when the user presses "Ctrl-C" on the console) when you give ' + 'a\n' + ' "continue" command. This allows you to break into the ' + 'debugger\n' + ' again by pressing "Ctrl-C". If you want Pdb not to touch ' + 'the\n' + ' SIGINT handler, set *nosigint* to true.\n' + '\n' + ' The *readrc* argument defaults to true and controls whether ' + 'Pdb\n' + ' will load .pdbrc files from the filesystem.\n' + '\n' + ' Example call to enable tracing with *skip*:\n' + '\n' + " import pdb; pdb.Pdb(skip=['django.*']).set_trace()\n" + '\n' + ' Raises an auditing event "pdb.Pdb" with no arguments.\n' + '\n' + ' New in version 3.1: The *skip* argument.\n' + '\n' + ' New in version 3.2: The *nosigint* argument. Previously, a ' + 'SIGINT\n' + ' handler was never set by Pdb.\n' + '\n' + ' Changed in version 3.6: The *readrc* argument.\n' + '\n' + ' run(statement, globals=None, locals=None)\n' + ' runeval(expression, globals=None, locals=None)\n' + ' runcall(function, *args, **kwds)\n' + ' set_trace()\n' + '\n' + ' See the documentation for the functions explained above.\n' + '\n' + '\n' + 'Debugger Commands\n' + '=================\n' + '\n' + 'The commands recognized by the debugger are listed below. Most\n' + 'commands can be abbreviated to one or two letters as indicated; ' + 'e.g.\n' + '"h(elp)" means that either "h" or "help" can be used to enter ' + 'the help\n' + 'command (but not "he" or "hel", nor "H" or "Help" or "HELP").\n' + 'Arguments to commands must be separated by whitespace (spaces ' + 'or\n' + 'tabs). Optional arguments are enclosed in square brackets ' + '("[]") in\n' + 'the command syntax; the square brackets must not be typed.\n' + 'Alternatives in the command syntax are separated by a vertical ' + 'bar\n' + '("|").\n' + '\n' + 'Entering a blank line repeats the last command entered. ' + 'Exception: if\n' + 'the last command was a "list" command, the next 11 lines are ' + 'listed.\n' + '\n' + 'Commands that the debugger doesn’t recognize are assumed to be ' + 'Python\n' + 'statements and are executed in the context of the program being\n' + 'debugged. Python statements can also be prefixed with an ' + 'exclamation\n' + 'point ("!"). This is a powerful way to inspect the program ' + 'being\n' + 'debugged; it is even possible to change a variable or call a ' + 'function.\n' + 'When an exception occurs in such a statement, the exception name ' + 'is\n' + 'printed but the debugger’s state is not changed.\n' + '\n' + 'The debugger supports aliases. Aliases can have parameters ' + 'which\n' + 'allows one a certain level of adaptability to the context under\n' + 'examination.\n' + '\n' + 'Multiple commands may be entered on a single line, separated by ' + '";;".\n' + '(A single ";" is not used as it is the separator for multiple ' + 'commands\n' + 'in a line that is passed to the Python parser.) No intelligence ' + 'is\n' + 'applied to separating the commands; the input is split at the ' + 'first\n' + '";;" pair, even if it is in the middle of a quoted string. A\n' + 'workaround for strings with double semicolons is to use ' + 'implicit\n' + 'string concatenation "\';\'\';\'" or "";"";"".\n' + '\n' + 'If a file ".pdbrc" exists in the user’s home directory or in ' + 'the\n' + 'current directory, it is read in and executed as if it had been ' + 'typed\n' + 'at the debugger prompt. This is particularly useful for ' + 'aliases. If\n' + 'both files exist, the one in the home directory is read first ' + 'and\n' + 'aliases defined there can be overridden by the local file.\n' + '\n' + 'Changed in version 3.2: ".pdbrc" can now contain commands that\n' + 'continue debugging, such as "continue" or "next". Previously, ' + 'these\n' + 'commands had no effect.\n' + '\n' + 'h(elp) [command]\n' + '\n' + ' Without argument, print the list of available commands. With ' + 'a\n' + ' *command* as argument, print help about that command. "help ' + 'pdb"\n' + ' displays the full documentation (the docstring of the "pdb"\n' + ' module). Since the *command* argument must be an identifier, ' + '"help\n' + ' exec" must be entered to get help on the "!" command.\n' + '\n' + 'w(here)\n' + '\n' + ' Print a stack trace, with the most recent frame at the ' + 'bottom. An\n' + ' arrow indicates the current frame, which determines the ' + 'context of\n' + ' most commands.\n' + '\n' + 'd(own) [count]\n' + '\n' + ' Move the current frame *count* (default one) levels down in ' + 'the\n' + ' stack trace (to a newer frame).\n' + '\n' + 'u(p) [count]\n' + '\n' + ' Move the current frame *count* (default one) levels up in the ' + 'stack\n' + ' trace (to an older frame).\n' + '\n' + 'b(reak) [([filename:]lineno | function) [, condition]]\n' + '\n' + ' With a *lineno* argument, set a break there in the current ' + 'file.\n' + ' With a *function* argument, set a break at the first ' + 'executable\n' + ' statement within that function. The line number may be ' + 'prefixed\n' + ' with a filename and a colon, to specify a breakpoint in ' + 'another\n' + ' file (probably one that hasn’t been loaded yet). The file ' + 'is\n' + ' searched on "sys.path". Note that each breakpoint is ' + 'assigned a\n' + ' number to which all the other breakpoint commands refer.\n' + '\n' + ' If a second argument is present, it is an expression which ' + 'must\n' + ' evaluate to true before the breakpoint is honored.\n' + '\n' + ' Without argument, list all breaks, including for each ' + 'breakpoint,\n' + ' the number of times that breakpoint has been hit, the ' + 'current\n' + ' ignore count, and the associated condition if any.\n' + '\n' + 'tbreak [([filename:]lineno | function) [, condition]]\n' + '\n' + ' Temporary breakpoint, which is removed automatically when it ' + 'is\n' + ' first hit. The arguments are the same as for "break".\n' + '\n' + 'cl(ear) [filename:lineno | bpnumber ...]\n' + '\n' + ' With a *filename:lineno* argument, clear all the breakpoints ' + 'at\n' + ' this line. With a space separated list of breakpoint numbers, ' + 'clear\n' + ' those breakpoints. Without argument, clear all breaks (but ' + 'first\n' + ' ask confirmation).\n' + '\n' + 'disable [bpnumber ...]\n' + '\n' + ' Disable the breakpoints given as a space separated list of\n' + ' breakpoint numbers. Disabling a breakpoint means it cannot ' + 'cause\n' + ' the program to stop execution, but unlike clearing a ' + 'breakpoint, it\n' + ' remains in the list of breakpoints and can be (re-)enabled.\n' + '\n' + 'enable [bpnumber ...]\n' + '\n' + ' Enable the breakpoints specified.\n' + '\n' + 'ignore bpnumber [count]\n' + '\n' + ' Set the ignore count for the given breakpoint number. If ' + '*count*\n' + ' is omitted, the ignore count is set to 0. A breakpoint ' + 'becomes\n' + ' active when the ignore count is zero. When non-zero, the ' + '*count*\n' + ' is decremented each time the breakpoint is reached and the\n' + ' breakpoint is not disabled and any associated condition ' + 'evaluates\n' + ' to true.\n' + '\n' + 'condition bpnumber [condition]\n' + '\n' + ' Set a new *condition* for the breakpoint, an expression which ' + 'must\n' + ' evaluate to true before the breakpoint is honored. If ' + '*condition*\n' + ' is absent, any existing condition is removed; i.e., the ' + 'breakpoint\n' + ' is made unconditional.\n' + '\n' + 'commands [bpnumber]\n' + '\n' + ' Specify a list of commands for breakpoint number *bpnumber*. ' + 'The\n' + ' commands themselves appear on the following lines. Type a ' + 'line\n' + ' containing just "end" to terminate the commands. An example:\n' + '\n' + ' (Pdb) commands 1\n' + ' (com) p some_variable\n' + ' (com) end\n' + ' (Pdb)\n' + '\n' + ' To remove all commands from a breakpoint, type "commands" ' + 'and\n' + ' follow it immediately with "end"; that is, give no commands.\n' + '\n' + ' With no *bpnumber* argument, "commands" refers to the last\n' + ' breakpoint set.\n' + '\n' + ' You can use breakpoint commands to start your program up ' + 'again.\n' + ' Simply use the "continue" command, or "step", or any other ' + 'command\n' + ' that resumes execution.\n' + '\n' + ' Specifying any command resuming execution (currently ' + '"continue",\n' + ' "step", "next", "return", "jump", "quit" and their ' + 'abbreviations)\n' + ' terminates the command list (as if that command was ' + 'immediately\n' + ' followed by end). This is because any time you resume ' + 'execution\n' + ' (even with a simple next or step), you may encounter another\n' + ' breakpoint—which could have its own command list, leading to\n' + ' ambiguities about which list to execute.\n' + '\n' + ' If you use the "silent" command in the command list, the ' + 'usual\n' + ' message about stopping at a breakpoint is not printed. This ' + 'may be\n' + ' desirable for breakpoints that are to print a specific ' + 'message and\n' + ' then continue. If none of the other commands print anything, ' + 'you\n' + ' see no sign that the breakpoint was reached.\n' + '\n' + 's(tep)\n' + '\n' + ' Execute the current line, stop at the first possible ' + 'occasion\n' + ' (either in a function that is called or on the next line in ' + 'the\n' + ' current function).\n' + '\n' + 'n(ext)\n' + '\n' + ' Continue execution until the next line in the current ' + 'function is\n' + ' reached or it returns. (The difference between "next" and ' + '"step"\n' + ' is that "step" stops inside a called function, while "next"\n' + ' executes called functions at (nearly) full speed, only ' + 'stopping at\n' + ' the next line in the current function.)\n' + '\n' + 'unt(il) [lineno]\n' + '\n' + ' Without argument, continue execution until the line with a ' + 'number\n' + ' greater than the current one is reached.\n' + '\n' + ' With *lineno*, continue execution until a line with a number\n' + ' greater or equal to *lineno* is reached. In both cases, also ' + 'stop\n' + ' when the current frame returns.\n' + '\n' + ' Changed in version 3.2: Allow giving an explicit line ' + 'number.\n' + '\n' + 'r(eturn)\n' + '\n' + ' Continue execution until the current function returns.\n' + '\n' + 'c(ont(inue))\n' + '\n' + ' Continue execution, only stop when a breakpoint is ' + 'encountered.\n' + '\n' + 'j(ump) lineno\n' + '\n' + ' Set the next line that will be executed. Only available in ' + 'the\n' + ' bottom-most frame. This lets you jump back and execute code ' + 'again,\n' + ' or jump forward to skip code that you don’t want to run.\n' + '\n' + ' It should be noted that not all jumps are allowed – for ' + 'instance it\n' + ' is not possible to jump into the middle of a "for" loop or ' + 'out of a\n' + ' "finally" clause.\n' + '\n' + 'l(ist) [first[, last]]\n' + '\n' + ' List source code for the current file. Without arguments, ' + 'list 11\n' + ' lines around the current line or continue the previous ' + 'listing.\n' + ' With "." as argument, list 11 lines around the current line. ' + 'With\n' + ' one argument, list 11 lines around at that line. With two\n' + ' arguments, list the given range; if the second argument is ' + 'less\n' + ' than the first, it is interpreted as a count.\n' + '\n' + ' The current line in the current frame is indicated by "->". ' + 'If an\n' + ' exception is being debugged, the line where the exception ' + 'was\n' + ' originally raised or propagated is indicated by ">>", if it ' + 'differs\n' + ' from the current line.\n' + '\n' + ' New in version 3.2: The ">>" marker.\n' + '\n' + 'll | longlist\n' + '\n' + ' List all source code for the current function or frame.\n' + ' Interesting lines are marked as for "list".\n' + '\n' + ' New in version 3.2.\n' + '\n' + 'a(rgs)\n' + '\n' + ' Print the argument list of the current function.\n' + '\n' + 'p expression\n' + '\n' + ' Evaluate *expression* in the current context and print its ' + 'value.\n' + '\n' + ' Note:\n' + '\n' + ' "print()" can also be used, but is not a debugger command — ' + 'this\n' + ' executes the Python "print()" function.\n' + '\n' + 'pp expression\n' + '\n' + ' Like the "p" command, except the value of *expression* is ' + 'pretty-\n' + ' printed using the "pprint" module.\n' + '\n' + 'whatis expression\n' + '\n' + ' Print the type of *expression*.\n' + '\n' + 'source expression\n' + '\n' + ' Try to get source code of *expression* and display it.\n' + '\n' + ' New in version 3.2.\n' + '\n' + 'display [expression]\n' + '\n' + ' Display the value of *expression* if it changed, each time\n' + ' execution stops in the current frame.\n' + '\n' + ' Without *expression*, list all display expressions for the ' + 'current\n' + ' frame.\n' + '\n' + ' New in version 3.2.\n' + '\n' + 'undisplay [expression]\n' + '\n' + ' Do not display *expression* anymore in the current frame. ' + 'Without\n' + ' *expression*, clear all display expressions for the current ' + 'frame.\n' + '\n' + ' New in version 3.2.\n' + '\n' + 'interact\n' + '\n' + ' Start an interactive interpreter (using the "code" module) ' + 'whose\n' + ' global namespace contains all the (global and local) names ' + 'found in\n' + ' the current scope.\n' + '\n' + ' New in version 3.2.\n' + '\n' + 'alias [name [command]]\n' + '\n' + ' Create an alias called *name* that executes *command*. The\n' + ' *command* must *not* be enclosed in quotes. Replaceable ' + 'parameters\n' + ' can be indicated by "%1", "%2", and so on, while "%*" is ' + 'replaced\n' + ' by all the parameters. If *command* is omitted, the current ' + 'alias\n' + ' for *name* is shown. If no arguments are given, all aliases ' + 'are\n' + ' listed.\n' + '\n' + ' Aliases may be nested and can contain anything that can be ' + 'legally\n' + ' typed at the pdb prompt. Note that internal pdb commands ' + '*can* be\n' + ' overridden by aliases. Such a command is then hidden until ' + 'the\n' + ' alias is removed. Aliasing is recursively applied to the ' + 'first\n' + ' word of the command line; all other words in the line are ' + 'left\n' + ' alone.\n' + '\n' + ' As an example, here are two useful aliases (especially when ' + 'placed\n' + ' in the ".pdbrc" file):\n' + '\n' + ' # Print instance variables (usage "pi classInst")\n' + ' alias pi for k in %1.__dict__.keys(): print(f"%1.{k} = ' + '{%1.__dict__[k]}")\n' + ' # Print instance variables in self\n' + ' alias ps pi self\n' + '\n' + 'unalias name\n' + '\n' + ' Delete the specified alias *name*.\n' + '\n' + '! statement\n' + '\n' + ' Execute the (one-line) *statement* in the context of the ' + 'current\n' + ' stack frame. The exclamation point can be omitted unless the ' + 'first\n' + ' word of the statement resembles a debugger command. To set ' + 'a\n' + ' global variable, you can prefix the assignment command with ' + 'a\n' + ' "global" statement on the same line, e.g.:\n' + '\n' + " (Pdb) global list_options; list_options = ['-l']\n" + ' (Pdb)\n' + '\n' + 'run [args ...]\n' + 'restart [args ...]\n' + '\n' + ' Restart the debugged Python program. If *args* is supplied, ' + 'it is\n' + ' split with "shlex" and the result is used as the new ' + '"sys.argv".\n' + ' History, breakpoints, actions and debugger options are ' + 'preserved.\n' + ' "restart" is an alias for "run".\n' + '\n' + 'q(uit)\n' + '\n' + ' Quit from the debugger. The program being executed is ' + 'aborted.\n' + '\n' + 'debug code\n' + '\n' + ' Enter a recursive debugger that steps through *code* (which ' + 'is an\n' + ' arbitrary expression or statement to be executed in the ' + 'current\n' + ' environment).\n' + '\n' + 'retval\n' + '\n' + ' Print the return value for the last return of a function.\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] Whether a frame is considered to originate in a certain ' + 'module is\n' + ' determined by the "__name__" in the frame globals.\n', + 'del': 'The "del" statement\n' + '*******************\n' + '\n' + ' del_stmt ::= "del" target_list\n' + '\n' + 'Deletion is recursively defined very similar to the way assignment ' + 'is\n' + 'defined. Rather than spelling it out in full details, here are some\n' + 'hints.\n' + '\n' + 'Deletion of a target list recursively deletes each target, from left\n' + 'to right.\n' + '\n' + 'Deletion of a name removes the binding of that name from the local ' + 'or\n' + 'global namespace, depending on whether the name occurs in a "global"\n' + 'statement in the same code block. If the name is unbound, a\n' + '"NameError" exception will be raised.\n' + '\n' + 'Deletion of attribute references, subscriptions and slicings is ' + 'passed\n' + 'to the primary object involved; deletion of a slicing is in general\n' + 'equivalent to assignment of an empty slice of the right type (but ' + 'even\n' + 'this is determined by the sliced object).\n' + '\n' + 'Changed in version 3.2: Previously it was illegal to delete a name\n' + 'from the local namespace if it occurs as a free variable in a nested\n' + 'block.\n', + 'dict': 'Dictionary displays\n' + '*******************\n' + '\n' + 'A dictionary display is a possibly empty series of key/datum pairs\n' + 'enclosed in curly braces:\n' + '\n' + ' dict_display ::= "{" [key_datum_list | dict_comprehension] ' + '"}"\n' + ' key_datum_list ::= key_datum ("," key_datum)* [","]\n' + ' key_datum ::= expression ":" expression | "**" or_expr\n' + ' dict_comprehension ::= expression ":" expression comp_for\n' + '\n' + 'A dictionary display yields a new dictionary object.\n' + '\n' + 'If a comma-separated sequence of key/datum pairs is given, they are\n' + 'evaluated from left to right to define the entries of the ' + 'dictionary:\n' + 'each key object is used as a key into the dictionary to store the\n' + 'corresponding datum. This means that you can specify the same key\n' + 'multiple times in the key/datum list, and the final dictionary’s ' + 'value\n' + 'for that key will be the last one given.\n' + '\n' + 'A double asterisk "**" denotes *dictionary unpacking*. Its operand\n' + 'must be a *mapping*. Each mapping item is added to the new\n' + 'dictionary. Later values replace values already set by earlier\n' + 'key/datum pairs and earlier dictionary unpackings.\n' + '\n' + 'New in version 3.5: Unpacking into dictionary displays, originally\n' + 'proposed by **PEP 448**.\n' + '\n' + 'A dict comprehension, in contrast to list and set comprehensions,\n' + 'needs two expressions separated with a colon followed by the usual\n' + '“for” and “if” clauses. When the comprehension is run, the ' + 'resulting\n' + 'key and value elements are inserted in the new dictionary in the ' + 'order\n' + 'they are produced.\n' + '\n' + 'Restrictions on the types of the key values are listed earlier in\n' + 'section The standard type hierarchy. (To summarize, the key type\n' + 'should be *hashable*, which excludes all mutable objects.) Clashes\n' + 'between duplicate keys are not detected; the last datum (textually\n' + 'rightmost in the display) stored for a given key value prevails.\n' + '\n' + 'Changed in version 3.8: Prior to Python 3.8, in dict ' + 'comprehensions,\n' + 'the evaluation order of key and value was not well-defined. In\n' + 'CPython, the value was evaluated before the key. Starting with ' + '3.8,\n' + 'the key is evaluated before the value, as proposed by **PEP 572**.\n', + 'dynamic-features': 'Interaction with dynamic features\n' + '*********************************\n' + '\n' + 'Name resolution of free variables occurs at runtime, not ' + 'at compile\n' + 'time. This means that the following code will print 42:\n' + '\n' + ' i = 10\n' + ' def f():\n' + ' print(i)\n' + ' i = 42\n' + ' f()\n' + '\n' + 'The "eval()" and "exec()" functions do not have access ' + 'to the full\n' + 'environment for resolving names. Names may be resolved ' + 'in the local\n' + 'and global namespaces of the caller. Free variables are ' + 'not resolved\n' + 'in the nearest enclosing namespace, but in the global ' + 'namespace. [1]\n' + 'The "exec()" and "eval()" functions have optional ' + 'arguments to\n' + 'override the global and local namespace. If only one ' + 'namespace is\n' + 'specified, it is used for both.\n', + 'else': 'The "if" statement\n' + '******************\n' + '\n' + 'The "if" statement is used for conditional execution:\n' + '\n' + ' if_stmt ::= "if" assignment_expression ":" suite\n' + ' ("elif" assignment_expression ":" suite)*\n' + ' ["else" ":" suite]\n' + '\n' + 'It selects exactly one of the suites by evaluating the expressions ' + 'one\n' + 'by one until one is found to be true (see section Boolean ' + 'operations\n' + 'for the definition of true and false); then that suite is executed\n' + '(and no other part of the "if" statement is executed or evaluated).\n' + 'If all expressions are false, the suite of the "else" clause, if\n' + 'present, is executed.\n', + 'exceptions': 'Exceptions\n' + '**********\n' + '\n' + 'Exceptions are a means of breaking out of the normal flow of ' + 'control\n' + 'of a code block in order to handle errors or other ' + 'exceptional\n' + 'conditions. An exception is *raised* at the point where the ' + 'error is\n' + 'detected; it may be *handled* by the surrounding code block or ' + 'by any\n' + 'code block that directly or indirectly invoked the code block ' + 'where\n' + 'the error occurred.\n' + '\n' + 'The Python interpreter raises an exception when it detects a ' + 'run-time\n' + 'error (such as division by zero). A Python program can also\n' + 'explicitly raise an exception with the "raise" statement. ' + 'Exception\n' + 'handlers are specified with the "try" … "except" statement. ' + 'The\n' + '"finally" clause of such a statement can be used to specify ' + 'cleanup\n' + 'code which does not handle the exception, but is executed ' + 'whether an\n' + 'exception occurred or not in the preceding code.\n' + '\n' + 'Python uses the “termination” model of error handling: an ' + 'exception\n' + 'handler can find out what happened and continue execution at ' + 'an outer\n' + 'level, but it cannot repair the cause of the error and retry ' + 'the\n' + 'failing operation (except by re-entering the offending piece ' + 'of code\n' + 'from the top).\n' + '\n' + 'When an exception is not handled at all, the interpreter ' + 'terminates\n' + 'execution of the program, or returns to its interactive main ' + 'loop. In\n' + 'either case, it prints a stack traceback, except when the ' + 'exception is\n' + '"SystemExit".\n' + '\n' + 'Exceptions are identified by class instances. The "except" ' + 'clause is\n' + 'selected depending on the class of the instance: it must ' + 'reference the\n' + 'class of the instance or a *non-virtual base class* thereof. ' + 'The\n' + 'instance can be received by the handler and can carry ' + 'additional\n' + 'information about the exceptional condition.\n' + '\n' + 'Note:\n' + '\n' + ' Exception messages are not part of the Python API. Their ' + 'contents\n' + ' may change from one version of Python to the next without ' + 'warning\n' + ' and should not be relied on by code which will run under ' + 'multiple\n' + ' versions of the interpreter.\n' + '\n' + 'See also the description of the "try" statement in section The ' + 'try\n' + 'statement and "raise" statement in section The raise ' + 'statement.\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] This limitation occurs because the code that is executed ' + 'by these\n' + ' operations is not available at the time the module is ' + 'compiled.\n', + 'execmodel': 'Execution model\n' + '***************\n' + '\n' + '\n' + 'Structure of a program\n' + '======================\n' + '\n' + 'A Python program is constructed from code blocks. A *block* is ' + 'a piece\n' + 'of Python program text that is executed as a unit. The ' + 'following are\n' + 'blocks: a module, a function body, and a class definition. ' + 'Each\n' + 'command typed interactively is a block. A script file (a file ' + 'given\n' + 'as standard input to the interpreter or specified as a command ' + 'line\n' + 'argument to the interpreter) is a code block. A script command ' + '(a\n' + 'command specified on the interpreter command line with the ' + '"-c"\n' + 'option) is a code block. A module run as a top level script (as ' + 'module\n' + '"__main__") from the command line using a "-m" argument is also ' + 'a code\n' + 'block. The string argument passed to the built-in functions ' + '"eval()"\n' + 'and "exec()" is a code block.\n' + '\n' + 'A code block is executed in an *execution frame*. A frame ' + 'contains\n' + 'some administrative information (used for debugging) and ' + 'determines\n' + 'where and how execution continues after the code block’s ' + 'execution has\n' + 'completed.\n' + '\n' + '\n' + 'Naming and binding\n' + '==================\n' + '\n' + '\n' + 'Binding of names\n' + '----------------\n' + '\n' + '*Names* refer to objects. Names are introduced by name ' + 'binding\n' + 'operations.\n' + '\n' + 'The following constructs bind names:\n' + '\n' + '* formal parameters to functions,\n' + '\n' + '* class definitions,\n' + '\n' + '* function definitions,\n' + '\n' + '* assignment expressions,\n' + '\n' + '* targets that are identifiers if occurring in an assignment:\n' + '\n' + ' * "for" loop header,\n' + '\n' + ' * after "as" in a "with" statement, "except" clause or in the ' + 'as-\n' + ' pattern in structural pattern matching,\n' + '\n' + ' * in a capture pattern in structural pattern matching\n' + '\n' + '* "import" statements.\n' + '\n' + 'The "import" statement of the form "from ... import *" binds ' + 'all names\n' + 'defined in the imported module, except those beginning with an\n' + 'underscore. This form may only be used at the module level.\n' + '\n' + 'A target occurring in a "del" statement is also considered ' + 'bound for\n' + 'this purpose (though the actual semantics are to unbind the ' + 'name).\n' + '\n' + 'Each assignment or import statement occurs within a block ' + 'defined by a\n' + 'class or function definition or at the module level (the ' + 'top-level\n' + 'code block).\n' + '\n' + 'If a name is bound in a block, it is a local variable of that ' + 'block,\n' + 'unless declared as "nonlocal" or "global". If a name is bound ' + 'at the\n' + 'module level, it is a global variable. (The variables of the ' + 'module\n' + 'code block are local and global.) If a variable is used in a ' + 'code\n' + 'block but not defined there, it is a *free variable*.\n' + '\n' + 'Each occurrence of a name in the program text refers to the ' + '*binding*\n' + 'of that name established by the following name resolution ' + 'rules.\n' + '\n' + '\n' + 'Resolution of names\n' + '-------------------\n' + '\n' + 'A *scope* defines the visibility of a name within a block. If ' + 'a local\n' + 'variable is defined in a block, its scope includes that block. ' + 'If the\n' + 'definition occurs in a function block, the scope extends to any ' + 'blocks\n' + 'contained within the defining one, unless a contained block ' + 'introduces\n' + 'a different binding for the name.\n' + '\n' + 'When a name is used in a code block, it is resolved using the ' + 'nearest\n' + 'enclosing scope. The set of all such scopes visible to a code ' + 'block\n' + 'is called the block’s *environment*.\n' + '\n' + 'When a name is not found at all, a "NameError" exception is ' + 'raised. If\n' + 'the current scope is a function scope, and the name refers to a ' + 'local\n' + 'variable that has not yet been bound to a value at the point ' + 'where the\n' + 'name is used, an "UnboundLocalError" exception is raised.\n' + '"UnboundLocalError" is a subclass of "NameError".\n' + '\n' + 'If a name binding operation occurs anywhere within a code ' + 'block, all\n' + 'uses of the name within the block are treated as references to ' + 'the\n' + 'current block. This can lead to errors when a name is used ' + 'within a\n' + 'block before it is bound. This rule is subtle. Python lacks\n' + 'declarations and allows name binding operations to occur ' + 'anywhere\n' + 'within a code block. The local variables of a code block can ' + 'be\n' + 'determined by scanning the entire text of the block for name ' + 'binding\n' + 'operations. See the FAQ entry on UnboundLocalError for ' + 'examples.\n' + '\n' + 'If the "global" statement occurs within a block, all uses of ' + 'the names\n' + 'specified in the statement refer to the bindings of those names ' + 'in the\n' + 'top-level namespace. Names are resolved in the top-level ' + 'namespace by\n' + 'searching the global namespace, i.e. the namespace of the ' + 'module\n' + 'containing the code block, and the builtins namespace, the ' + 'namespace\n' + 'of the module "builtins". The global namespace is searched ' + 'first. If\n' + 'the names are not found there, the builtins namespace is ' + 'searched.\n' + 'The "global" statement must precede all uses of the listed ' + 'names.\n' + '\n' + 'The "global" statement has the same scope as a name binding ' + 'operation\n' + 'in the same block. If the nearest enclosing scope for a free ' + 'variable\n' + 'contains a global statement, the free variable is treated as a ' + 'global.\n' + '\n' + 'The "nonlocal" statement causes corresponding names to refer ' + 'to\n' + 'previously bound variables in the nearest enclosing function ' + 'scope.\n' + '"SyntaxError" is raised at compile time if the given name does ' + 'not\n' + 'exist in any enclosing function scope.\n' + '\n' + 'The namespace for a module is automatically created the first ' + 'time a\n' + 'module is imported. The main module for a script is always ' + 'called\n' + '"__main__".\n' + '\n' + 'Class definition blocks and arguments to "exec()" and "eval()" ' + 'are\n' + 'special in the context of name resolution. A class definition ' + 'is an\n' + 'executable statement that may use and define names. These ' + 'references\n' + 'follow the normal rules for name resolution with an exception ' + 'that\n' + 'unbound local variables are looked up in the global namespace. ' + 'The\n' + 'namespace of the class definition becomes the attribute ' + 'dictionary of\n' + 'the class. The scope of names defined in a class block is ' + 'limited to\n' + 'the class block; it does not extend to the code blocks of ' + 'methods –\n' + 'this includes comprehensions and generator expressions since ' + 'they are\n' + 'implemented using a function scope. This means that the ' + 'following\n' + 'will fail:\n' + '\n' + ' class A:\n' + ' a = 42\n' + ' b = list(a + i for i in range(10))\n' + '\n' + '\n' + 'Builtins and restricted execution\n' + '---------------------------------\n' + '\n' + '**CPython implementation detail:** Users should not touch\n' + '"__builtins__"; it is strictly an implementation detail. ' + 'Users\n' + 'wanting to override values in the builtins namespace should ' + '"import"\n' + 'the "builtins" module and modify its attributes appropriately.\n' + '\n' + 'The builtins namespace associated with the execution of a code ' + 'block\n' + 'is actually found by looking up the name "__builtins__" in its ' + 'global\n' + 'namespace; this should be a dictionary or a module (in the ' + 'latter case\n' + 'the module’s dictionary is used). By default, when in the ' + '"__main__"\n' + 'module, "__builtins__" is the built-in module "builtins"; when ' + 'in any\n' + 'other module, "__builtins__" is an alias for the dictionary of ' + 'the\n' + '"builtins" module itself.\n' + '\n' + '\n' + 'Interaction with dynamic features\n' + '---------------------------------\n' + '\n' + 'Name resolution of free variables occurs at runtime, not at ' + 'compile\n' + 'time. This means that the following code will print 42:\n' + '\n' + ' i = 10\n' + ' def f():\n' + ' print(i)\n' + ' i = 42\n' + ' f()\n' + '\n' + 'The "eval()" and "exec()" functions do not have access to the ' + 'full\n' + 'environment for resolving names. Names may be resolved in the ' + 'local\n' + 'and global namespaces of the caller. Free variables are not ' + 'resolved\n' + 'in the nearest enclosing namespace, but in the global ' + 'namespace. [1]\n' + 'The "exec()" and "eval()" functions have optional arguments to\n' + 'override the global and local namespace. If only one namespace ' + 'is\n' + 'specified, it is used for both.\n' + '\n' + '\n' + 'Exceptions\n' + '==========\n' + '\n' + 'Exceptions are a means of breaking out of the normal flow of ' + 'control\n' + 'of a code block in order to handle errors or other exceptional\n' + 'conditions. An exception is *raised* at the point where the ' + 'error is\n' + 'detected; it may be *handled* by the surrounding code block or ' + 'by any\n' + 'code block that directly or indirectly invoked the code block ' + 'where\n' + 'the error occurred.\n' + '\n' + 'The Python interpreter raises an exception when it detects a ' + 'run-time\n' + 'error (such as division by zero). A Python program can also\n' + 'explicitly raise an exception with the "raise" statement. ' + 'Exception\n' + 'handlers are specified with the "try" … "except" statement. ' + 'The\n' + '"finally" clause of such a statement can be used to specify ' + 'cleanup\n' + 'code which does not handle the exception, but is executed ' + 'whether an\n' + 'exception occurred or not in the preceding code.\n' + '\n' + 'Python uses the “termination” model of error handling: an ' + 'exception\n' + 'handler can find out what happened and continue execution at an ' + 'outer\n' + 'level, but it cannot repair the cause of the error and retry ' + 'the\n' + 'failing operation (except by re-entering the offending piece of ' + 'code\n' + 'from the top).\n' + '\n' + 'When an exception is not handled at all, the interpreter ' + 'terminates\n' + 'execution of the program, or returns to its interactive main ' + 'loop. In\n' + 'either case, it prints a stack traceback, except when the ' + 'exception is\n' + '"SystemExit".\n' + '\n' + 'Exceptions are identified by class instances. The "except" ' + 'clause is\n' + 'selected depending on the class of the instance: it must ' + 'reference the\n' + 'class of the instance or a *non-virtual base class* thereof. ' + 'The\n' + 'instance can be received by the handler and can carry ' + 'additional\n' + 'information about the exceptional condition.\n' + '\n' + 'Note:\n' + '\n' + ' Exception messages are not part of the Python API. Their ' + 'contents\n' + ' may change from one version of Python to the next without ' + 'warning\n' + ' and should not be relied on by code which will run under ' + 'multiple\n' + ' versions of the interpreter.\n' + '\n' + 'See also the description of the "try" statement in section The ' + 'try\n' + 'statement and "raise" statement in section The raise ' + 'statement.\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] This limitation occurs because the code that is executed by ' + 'these\n' + ' operations is not available at the time the module is ' + 'compiled.\n', + 'exprlists': 'Expression lists\n' + '****************\n' + '\n' + ' expression_list ::= expression ("," expression)* [","]\n' + ' starred_list ::= starred_item ("," starred_item)* ' + '[","]\n' + ' starred_expression ::= expression | (starred_item ",")* ' + '[starred_item]\n' + ' starred_item ::= assignment_expression | "*" or_expr\n' + '\n' + 'Except when part of a list or set display, an expression list\n' + 'containing at least one comma yields a tuple. The length of ' + 'the tuple\n' + 'is the number of expressions in the list. The expressions are\n' + 'evaluated from left to right.\n' + '\n' + 'An asterisk "*" denotes *iterable unpacking*. Its operand must ' + 'be an\n' + '*iterable*. The iterable is expanded into a sequence of items, ' + 'which\n' + 'are included in the new tuple, list, or set, at the site of ' + 'the\n' + 'unpacking.\n' + '\n' + 'New in version 3.5: Iterable unpacking in expression lists, ' + 'originally\n' + 'proposed by **PEP 448**.\n' + '\n' + 'The trailing comma is required only to create a single tuple ' + '(a.k.a. a\n' + '*singleton*); it is optional in all other cases. A single ' + 'expression\n' + 'without a trailing comma doesn’t create a tuple, but rather ' + 'yields the\n' + 'value of that expression. (To create an empty tuple, use an ' + 'empty pair\n' + 'of parentheses: "()".)\n', + 'floating': 'Floating point literals\n' + '***********************\n' + '\n' + 'Floating point literals are described by the following lexical\n' + 'definitions:\n' + '\n' + ' floatnumber ::= pointfloat | exponentfloat\n' + ' pointfloat ::= [digitpart] fraction | digitpart "."\n' + ' exponentfloat ::= (digitpart | pointfloat) exponent\n' + ' digitpart ::= digit (["_"] digit)*\n' + ' fraction ::= "." digitpart\n' + ' exponent ::= ("e" | "E") ["+" | "-"] digitpart\n' + '\n' + 'Note that the integer and exponent parts are always interpreted ' + 'using\n' + 'radix 10. For example, "077e010" is legal, and denotes the same ' + 'number\n' + 'as "77e10". The allowed range of floating point literals is\n' + 'implementation-dependent. As in integer literals, underscores ' + 'are\n' + 'supported for digit grouping.\n' + '\n' + 'Some examples of floating point literals:\n' + '\n' + ' 3.14 10. .001 1e100 3.14e-10 0e0 ' + '3.14_15_93\n' + '\n' + 'Changed in version 3.6: Underscores are now allowed for ' + 'grouping\n' + 'purposes in literals.\n', + 'for': 'The "for" statement\n' + '*******************\n' + '\n' + 'The "for" statement is used to iterate over the elements of a ' + 'sequence\n' + '(such as a string, tuple or list) or other iterable object:\n' + '\n' + ' for_stmt ::= "for" target_list "in" expression_list ":" suite\n' + ' ["else" ":" suite]\n' + '\n' + 'The expression list is evaluated once; it should yield an iterable\n' + 'object. An iterator is created for the result of the\n' + '"expression_list". The suite is then executed once for each item\n' + 'provided by the iterator, in the order returned by the iterator. ' + 'Each\n' + 'item in turn is assigned to the target list using the standard rules\n' + 'for assignments (see Assignment statements), and then the suite is\n' + 'executed. When the items are exhausted (which is immediately when ' + 'the\n' + 'sequence is empty or an iterator raises a "StopIteration" ' + 'exception),\n' + 'the suite in the "else" clause, if present, is executed, and the ' + 'loop\n' + 'terminates.\n' + '\n' + 'A "break" statement executed in the first suite terminates the loop\n' + 'without executing the "else" clause’s suite. A "continue" statement\n' + 'executed in the first suite skips the rest of the suite and ' + 'continues\n' + 'with the next item, or with the "else" clause if there is no next\n' + 'item.\n' + '\n' + 'The for-loop makes assignments to the variables in the target list.\n' + 'This overwrites all previous assignments to those variables ' + 'including\n' + 'those made in the suite of the for-loop:\n' + '\n' + ' for i in range(10):\n' + ' print(i)\n' + ' i = 5 # this will not affect the for-loop\n' + ' # because i will be overwritten with the ' + 'next\n' + ' # index in the range\n' + '\n' + 'Names in the target list are not deleted when the loop is finished,\n' + 'but if the sequence is empty, they will not have been assigned to at\n' + 'all by the loop. Hint: the built-in type "range()" represents\n' + 'immutable arithmetic sequences of integers. For instance, iterating\n' + '"range(3)" successively yields 0, 1, and then 2.\n', + 'formatstrings': 'Format String Syntax\n' + '********************\n' + '\n' + 'The "str.format()" method and the "Formatter" class share ' + 'the same\n' + 'syntax for format strings (although in the case of ' + '"Formatter",\n' + 'subclasses can define their own format string syntax). The ' + 'syntax is\n' + 'related to that of formatted string literals, but it is ' + 'less\n' + 'sophisticated and, in particular, does not support ' + 'arbitrary\n' + 'expressions.\n' + '\n' + 'Format strings contain “replacement fields” surrounded by ' + 'curly braces\n' + '"{}". Anything that is not contained in braces is ' + 'considered literal\n' + 'text, which is copied unchanged to the output. If you need ' + 'to include\n' + 'a brace character in the literal text, it can be escaped by ' + 'doubling:\n' + '"{{" and "}}".\n' + '\n' + 'The grammar for a replacement field is as follows:\n' + '\n' + ' replacement_field ::= "{" [field_name] ["!" ' + 'conversion] [":" format_spec] "}"\n' + ' field_name ::= arg_name ("." attribute_name | ' + '"[" element_index "]")*\n' + ' arg_name ::= [identifier | digit+]\n' + ' attribute_name ::= identifier\n' + ' element_index ::= digit+ | index_string\n' + ' index_string ::= +\n' + ' conversion ::= "r" | "s" | "a"\n' + ' format_spec ::= \n' + '\n' + 'In less formal terms, the replacement field can start with ' + 'a\n' + '*field_name* that specifies the object whose value is to be ' + 'formatted\n' + 'and inserted into the output instead of the replacement ' + 'field. The\n' + '*field_name* is optionally followed by a *conversion* ' + 'field, which is\n' + 'preceded by an exclamation point "\'!\'", and a ' + '*format_spec*, which is\n' + 'preceded by a colon "\':\'". These specify a non-default ' + 'format for the\n' + 'replacement value.\n' + '\n' + 'See also the Format Specification Mini-Language section.\n' + '\n' + 'The *field_name* itself begins with an *arg_name* that is ' + 'either a\n' + 'number or a keyword. If it’s a number, it refers to a ' + 'positional\n' + 'argument, and if it’s a keyword, it refers to a named ' + 'keyword\n' + 'argument. If the numerical arg_names in a format string ' + 'are 0, 1, 2,\n' + '… in sequence, they can all be omitted (not just some) and ' + 'the numbers\n' + '0, 1, 2, … will be automatically inserted in that order. ' + 'Because\n' + '*arg_name* is not quote-delimited, it is not possible to ' + 'specify\n' + 'arbitrary dictionary keys (e.g., the strings "\'10\'" or ' + '"\':-]\'") within\n' + 'a format string. The *arg_name* can be followed by any ' + 'number of index\n' + 'or attribute expressions. An expression of the form ' + '"\'.name\'" selects\n' + 'the named attribute using "getattr()", while an expression ' + 'of the form\n' + '"\'[index]\'" does an index lookup using "__getitem__()".\n' + '\n' + 'Changed in version 3.1: The positional argument specifiers ' + 'can be\n' + 'omitted for "str.format()", so "\'{} {}\'.format(a, b)" is ' + 'equivalent to\n' + '"\'{0} {1}\'.format(a, b)".\n' + '\n' + 'Changed in version 3.4: The positional argument specifiers ' + 'can be\n' + 'omitted for "Formatter".\n' + '\n' + 'Some simple format string examples:\n' + '\n' + ' "First, thou shalt count to {0}" # References first ' + 'positional argument\n' + ' "Bring me a {}" # Implicitly ' + 'references the first positional argument\n' + ' "From {} to {}" # Same as "From {0} to ' + '{1}"\n' + ' "My quest is {name}" # References keyword ' + "argument 'name'\n" + ' "Weight in tons {0.weight}" # \'weight\' attribute ' + 'of first positional arg\n' + ' "Units destroyed: {players[0]}" # First element of ' + "keyword argument 'players'.\n" + '\n' + 'The *conversion* field causes a type coercion before ' + 'formatting.\n' + 'Normally, the job of formatting a value is done by the ' + '"__format__()"\n' + 'method of the value itself. However, in some cases it is ' + 'desirable to\n' + 'force a type to be formatted as a string, overriding its ' + 'own\n' + 'definition of formatting. By converting the value to a ' + 'string before\n' + 'calling "__format__()", the normal formatting logic is ' + 'bypassed.\n' + '\n' + 'Three conversion flags are currently supported: "\'!s\'" ' + 'which calls\n' + '"str()" on the value, "\'!r\'" which calls "repr()" and ' + '"\'!a\'" which\n' + 'calls "ascii()".\n' + '\n' + 'Some examples:\n' + '\n' + ' "Harold\'s a clever {0!s}" # Calls str() on the ' + 'argument first\n' + ' "Bring out the holy {name!r}" # Calls repr() on the ' + 'argument first\n' + ' "More {!a}" # Calls ascii() on the ' + 'argument first\n' + '\n' + 'The *format_spec* field contains a specification of how the ' + 'value\n' + 'should be presented, including such details as field width, ' + 'alignment,\n' + 'padding, decimal precision and so on. Each value type can ' + 'define its\n' + 'own “formatting mini-language” or interpretation of the ' + '*format_spec*.\n' + '\n' + 'Most built-in types support a common formatting ' + 'mini-language, which\n' + 'is described in the next section.\n' + '\n' + 'A *format_spec* field can also include nested replacement ' + 'fields\n' + 'within it. These nested replacement fields may contain a ' + 'field name,\n' + 'conversion flag and format specification, but deeper ' + 'nesting is not\n' + 'allowed. The replacement fields within the format_spec ' + 'are\n' + 'substituted before the *format_spec* string is interpreted. ' + 'This\n' + 'allows the formatting of a value to be dynamically ' + 'specified.\n' + '\n' + 'See the Format examples section for some examples.\n' + '\n' + '\n' + 'Format Specification Mini-Language\n' + '==================================\n' + '\n' + '“Format specifications” are used within replacement fields ' + 'contained\n' + 'within a format string to define how individual values are ' + 'presented\n' + '(see Format String Syntax and Formatted string literals). ' + 'They can\n' + 'also be passed directly to the built-in "format()" ' + 'function. Each\n' + 'formattable type may define how the format specification is ' + 'to be\n' + 'interpreted.\n' + '\n' + 'Most built-in types implement the following options for ' + 'format\n' + 'specifications, although some of the formatting options are ' + 'only\n' + 'supported by the numeric types.\n' + '\n' + 'A general convention is that an empty format specification ' + 'produces\n' + 'the same result as if you had called "str()" on the value. ' + 'A non-empty\n' + 'format specification typically modifies the result.\n' + '\n' + 'The general form of a *standard format specifier* is:\n' + '\n' + ' format_spec ::= ' + '[[fill]align][sign][#][0][width][grouping_option][.precision][type]\n' + ' fill ::= \n' + ' align ::= "<" | ">" | "=" | "^"\n' + ' sign ::= "+" | "-" | " "\n' + ' width ::= digit+\n' + ' grouping_option ::= "_" | ","\n' + ' precision ::= digit+\n' + ' type ::= "b" | "c" | "d" | "e" | "E" | "f" | ' + '"F" | "g" | "G" | "n" | "o" | "s" | "x" | "X" | "%"\n' + '\n' + 'If a valid *align* value is specified, it can be preceded ' + 'by a *fill*\n' + 'character that can be any character and defaults to a space ' + 'if\n' + 'omitted. It is not possible to use a literal curly brace ' + '(”"{"” or\n' + '“"}"”) as the *fill* character in a formatted string ' + 'literal or when\n' + 'using the "str.format()" method. However, it is possible ' + 'to insert a\n' + 'curly brace with a nested replacement field. This ' + 'limitation doesn’t\n' + 'affect the "format()" function.\n' + '\n' + 'The meaning of the various alignment options is as ' + 'follows:\n' + '\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | Option | ' + 'Meaning ' + '|\n' + ' ' + '|===========|============================================================|\n' + ' | "\'<\'" | Forces the field to be left-aligned ' + 'within the available |\n' + ' | | space (this is the default for most ' + 'objects). |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'>\'" | Forces the field to be right-aligned ' + 'within the available |\n' + ' | | space (this is the default for ' + 'numbers). |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'=\'" | Forces the padding to be placed after ' + 'the sign (if any) |\n' + ' | | but before the digits. This is used for ' + 'printing fields |\n' + ' | | in the form ‘+000000120’. This alignment ' + 'option is only |\n' + ' | | valid for numeric types. It becomes the ' + 'default for |\n' + ' | | numbers when ‘0’ immediately precedes the ' + 'field width. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'^\'" | Forces the field to be centered within ' + 'the available |\n' + ' | | ' + 'space. ' + '|\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + '\n' + 'Note that unless a minimum field width is defined, the ' + 'field width\n' + 'will always be the same size as the data to fill it, so ' + 'that the\n' + 'alignment option has no meaning in this case.\n' + '\n' + 'The *sign* option is only valid for number types, and can ' + 'be one of\n' + 'the following:\n' + '\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | Option | ' + 'Meaning ' + '|\n' + ' ' + '|===========|============================================================|\n' + ' | "\'+\'" | indicates that a sign should be used for ' + 'both positive as |\n' + ' | | well as negative ' + 'numbers. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'-\'" | indicates that a sign should be used ' + 'only for negative |\n' + ' | | numbers (this is the default ' + 'behavior). |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | space | indicates that a leading space should be ' + 'used on positive |\n' + ' | | numbers, and a minus sign on negative ' + 'numbers. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + '\n' + 'The "\'#\'" option causes the “alternate form” to be used ' + 'for the\n' + 'conversion. The alternate form is defined differently for ' + 'different\n' + 'types. This option is only valid for integer, float and ' + 'complex\n' + 'types. For integers, when binary, octal, or hexadecimal ' + 'output is\n' + 'used, this option adds the respective prefix "\'0b\'", ' + '"\'0o\'", "\'0x\'",\n' + 'or "\'0X\'" to the output value. For float and complex the ' + 'alternate\n' + 'form causes the result of the conversion to always contain ' + 'a decimal-\n' + 'point character, even if no digits follow it. Normally, a ' + 'decimal-\n' + 'point character appears in the result of these conversions ' + 'only if a\n' + 'digit follows it. In addition, for "\'g\'" and "\'G\'" ' + 'conversions,\n' + 'trailing zeros are not removed from the result.\n' + '\n' + 'The "\',\'" option signals the use of a comma for a ' + 'thousands separator.\n' + 'For a locale aware separator, use the "\'n\'" integer ' + 'presentation type\n' + 'instead.\n' + '\n' + 'Changed in version 3.1: Added the "\',\'" option (see also ' + '**PEP 378**).\n' + '\n' + 'The "\'_\'" option signals the use of an underscore for a ' + 'thousands\n' + 'separator for floating point presentation types and for ' + 'integer\n' + 'presentation type "\'d\'". For integer presentation types ' + '"\'b\'", "\'o\'",\n' + '"\'x\'", and "\'X\'", underscores will be inserted every 4 ' + 'digits. For\n' + 'other presentation types, specifying this option is an ' + 'error.\n' + '\n' + 'Changed in version 3.6: Added the "\'_\'" option (see also ' + '**PEP 515**).\n' + '\n' + '*width* is a decimal integer defining the minimum total ' + 'field width,\n' + 'including any prefixes, separators, and other formatting ' + 'characters.\n' + 'If not specified, then the field width will be determined ' + 'by the\n' + 'content.\n' + '\n' + 'When no explicit alignment is given, preceding the *width* ' + 'field by a\n' + 'zero ("\'0\'") character enables sign-aware zero-padding ' + 'for numeric\n' + 'types. This is equivalent to a *fill* character of "\'0\'" ' + 'with an\n' + '*alignment* type of "\'=\'".\n' + '\n' + 'Changed in version 3.10: Preceding the *width* field by ' + '"\'0\'" no\n' + 'longer affects the default alignment for strings.\n' + '\n' + 'The *precision* is a decimal integer indicating how many ' + 'digits should\n' + 'be displayed after the decimal point for presentation types ' + '"\'f\'" and\n' + '"\'F\'", or before and after the decimal point for ' + 'presentation types\n' + '"\'g\'" or "\'G\'". For string presentation types the ' + 'field indicates the\n' + 'maximum field size - in other words, how many characters ' + 'will be used\n' + 'from the field content. The *precision* is not allowed for ' + 'integer\n' + 'presentation types.\n' + '\n' + 'Finally, the *type* determines how the data should be ' + 'presented.\n' + '\n' + 'The available string presentation types are:\n' + '\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | Type | ' + 'Meaning ' + '|\n' + ' ' + '|===========|============================================================|\n' + ' | "\'s\'" | String format. This is the default type ' + 'for strings and |\n' + ' | | may be ' + 'omitted. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | None | The same as ' + '"\'s\'". |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + '\n' + 'The available integer presentation types are:\n' + '\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | Type | ' + 'Meaning ' + '|\n' + ' ' + '|===========|============================================================|\n' + ' | "\'b\'" | Binary format. Outputs the number in ' + 'base 2. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'c\'" | Character. Converts the integer to the ' + 'corresponding |\n' + ' | | unicode character before ' + 'printing. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'d\'" | Decimal Integer. Outputs the number in ' + 'base 10. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'o\'" | Octal format. Outputs the number in base ' + '8. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'x\'" | Hex format. Outputs the number in base ' + '16, using lower- |\n' + ' | | case letters for the digits above ' + '9. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'X\'" | Hex format. Outputs the number in base ' + '16, using upper- |\n' + ' | | case letters for the digits above 9. In ' + 'case "\'#\'" is |\n' + ' | | specified, the prefix "\'0x\'" will be ' + 'upper-cased to "\'0X\'" |\n' + ' | | as ' + 'well. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'n\'" | Number. This is the same as "\'d\'", ' + 'except that it uses the |\n' + ' | | current locale setting to insert the ' + 'appropriate number |\n' + ' | | separator ' + 'characters. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | None | The same as ' + '"\'d\'". |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + '\n' + 'In addition to the above presentation types, integers can ' + 'be formatted\n' + 'with the floating point presentation types listed below ' + '(except "\'n\'"\n' + 'and "None"). When doing so, "float()" is used to convert ' + 'the integer\n' + 'to a floating point number before formatting.\n' + '\n' + 'The available presentation types for "float" and "Decimal" ' + 'values are:\n' + '\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | Type | ' + 'Meaning ' + '|\n' + ' ' + '|===========|============================================================|\n' + ' | "\'e\'" | Scientific notation. For a given ' + 'precision "p", formats |\n' + ' | | the number in scientific notation with the ' + 'letter ‘e’ |\n' + ' | | separating the coefficient from the ' + 'exponent. The |\n' + ' | | coefficient has one digit before and "p" ' + 'digits after the |\n' + ' | | decimal point, for a total of "p + 1" ' + 'significant digits. |\n' + ' | | With no precision given, uses a precision ' + 'of "6" digits |\n' + ' | | after the decimal point for "float", and ' + 'shows all |\n' + ' | | coefficient digits for "Decimal". If no ' + 'digits follow the |\n' + ' | | decimal point, the decimal point is also ' + 'removed unless |\n' + ' | | the "#" option is ' + 'used. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'E\'" | Scientific notation. Same as "\'e\'" ' + 'except it uses an upper |\n' + ' | | case ‘E’ as the separator ' + 'character. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'f\'" | Fixed-point notation. For a given ' + 'precision "p", formats |\n' + ' | | the number as a decimal number with ' + 'exactly "p" digits |\n' + ' | | following the decimal point. With no ' + 'precision given, uses |\n' + ' | | a precision of "6" digits after the ' + 'decimal point for |\n' + ' | | "float", and uses a precision large enough ' + 'to show all |\n' + ' | | coefficient digits for "Decimal". If no ' + 'digits follow the |\n' + ' | | decimal point, the decimal point is also ' + 'removed unless |\n' + ' | | the "#" option is ' + 'used. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'F\'" | Fixed-point notation. Same as "\'f\'", ' + 'but converts "nan" to |\n' + ' | | "NAN" and "inf" to ' + '"INF". |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'g\'" | General format. For a given precision ' + '"p >= 1", this |\n' + ' | | rounds the number to "p" significant ' + 'digits and then |\n' + ' | | formats the result in either fixed-point ' + 'format or in |\n' + ' | | scientific notation, depending on its ' + 'magnitude. A |\n' + ' | | precision of "0" is treated as equivalent ' + 'to a precision |\n' + ' | | of "1". The precise rules are as follows: ' + 'suppose that |\n' + ' | | the result formatted with presentation ' + 'type "\'e\'" and |\n' + ' | | precision "p-1" would have exponent ' + '"exp". Then, if "m <= |\n' + ' | | exp < p", where "m" is -4 for floats and ' + '-6 for |\n' + ' | | "Decimals", the number is formatted with ' + 'presentation type |\n' + ' | | "\'f\'" and precision "p-1-exp". ' + 'Otherwise, the number is |\n' + ' | | formatted with presentation type "\'e\'" ' + 'and precision |\n' + ' | | "p-1". In both cases insignificant ' + 'trailing zeros are |\n' + ' | | removed from the significand, and the ' + 'decimal point is |\n' + ' | | also removed if there are no remaining ' + 'digits following |\n' + ' | | it, unless the "\'#\'" option is used. ' + 'With no precision |\n' + ' | | given, uses a precision of "6" significant ' + 'digits for |\n' + ' | | "float". For "Decimal", the coefficient of ' + 'the result is |\n' + ' | | formed from the coefficient digits of the ' + 'value; |\n' + ' | | scientific notation is used for values ' + 'smaller than "1e-6" |\n' + ' | | in absolute value and values where the ' + 'place value of the |\n' + ' | | least significant digit is larger than 1, ' + 'and fixed-point |\n' + ' | | notation is used otherwise. Positive and ' + 'negative |\n' + ' | | infinity, positive and negative zero, and ' + 'nans, are |\n' + ' | | formatted as "inf", "-inf", "0", "-0" and ' + '"nan" |\n' + ' | | respectively, regardless of the ' + 'precision. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'G\'" | General format. Same as "\'g\'" except ' + 'switches to "\'E\'" if |\n' + ' | | the number gets too large. The ' + 'representations of infinity |\n' + ' | | and NaN are uppercased, ' + 'too. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'n\'" | Number. This is the same as "\'g\'", ' + 'except that it uses the |\n' + ' | | current locale setting to insert the ' + 'appropriate number |\n' + ' | | separator ' + 'characters. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | "\'%\'" | Percentage. Multiplies the number by 100 ' + 'and displays in |\n' + ' | | fixed ("\'f\'") format, followed by a ' + 'percent sign. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + ' | None | For "float" this is the same as "\'g\'", ' + 'except that when |\n' + ' | | fixed-point notation is used to format the ' + 'result, it |\n' + ' | | always includes at least one digit past ' + 'the decimal point. |\n' + ' | | The precision used is as large as needed ' + 'to represent the |\n' + ' | | given value faithfully. For "Decimal", ' + 'this is the same |\n' + ' | | as either "\'g\'" or "\'G\'" depending on ' + 'the value of |\n' + ' | | "context.capitals" for the current decimal ' + 'context. The |\n' + ' | | overall effect is to match the output of ' + '"str()" as |\n' + ' | | altered by the other format ' + 'modifiers. |\n' + ' ' + '+-----------+------------------------------------------------------------+\n' + '\n' + '\n' + 'Format examples\n' + '===============\n' + '\n' + 'This section contains examples of the "str.format()" syntax ' + 'and\n' + 'comparison with the old "%"-formatting.\n' + '\n' + 'In most of the cases the syntax is similar to the old ' + '"%"-formatting,\n' + 'with the addition of the "{}" and with ":" used instead of ' + '"%". For\n' + 'example, "\'%03.2f\'" can be translated to "\'{:03.2f}\'".\n' + '\n' + 'The new format syntax also supports new and different ' + 'options, shown\n' + 'in the following examples.\n' + '\n' + 'Accessing arguments by position:\n' + '\n' + " >>> '{0}, {1}, {2}'.format('a', 'b', 'c')\n" + " 'a, b, c'\n" + " >>> '{}, {}, {}'.format('a', 'b', 'c') # 3.1+ only\n" + " 'a, b, c'\n" + " >>> '{2}, {1}, {0}'.format('a', 'b', 'c')\n" + " 'c, b, a'\n" + " >>> '{2}, {1}, {0}'.format(*'abc') # unpacking " + 'argument sequence\n' + " 'c, b, a'\n" + " >>> '{0}{1}{0}'.format('abra', 'cad') # arguments' " + 'indices can be repeated\n' + " 'abracadabra'\n" + '\n' + 'Accessing arguments by name:\n' + '\n' + " >>> 'Coordinates: {latitude}, " + "{longitude}'.format(latitude='37.24N', " + "longitude='-115.81W')\n" + " 'Coordinates: 37.24N, -115.81W'\n" + " >>> coord = {'latitude': '37.24N', 'longitude': " + "'-115.81W'}\n" + " >>> 'Coordinates: {latitude}, " + "{longitude}'.format(**coord)\n" + " 'Coordinates: 37.24N, -115.81W'\n" + '\n' + 'Accessing arguments’ attributes:\n' + '\n' + ' >>> c = 3-5j\n' + " >>> ('The complex number {0} is formed from the real " + "part {0.real} '\n" + " ... 'and the imaginary part {0.imag}.').format(c)\n" + " 'The complex number (3-5j) is formed from the real part " + "3.0 and the imaginary part -5.0.'\n" + ' >>> class Point:\n' + ' ... def __init__(self, x, y):\n' + ' ... self.x, self.y = x, y\n' + ' ... def __str__(self):\n' + " ... return 'Point({self.x}, " + "{self.y})'.format(self=self)\n" + ' ...\n' + ' >>> str(Point(4, 2))\n' + " 'Point(4, 2)'\n" + '\n' + 'Accessing arguments’ items:\n' + '\n' + ' >>> coord = (3, 5)\n' + " >>> 'X: {0[0]}; Y: {0[1]}'.format(coord)\n" + " 'X: 3; Y: 5'\n" + '\n' + 'Replacing "%s" and "%r":\n' + '\n' + ' >>> "repr() shows quotes: {!r}; str() doesn\'t: ' + '{!s}".format(\'test1\', \'test2\')\n' + ' "repr() shows quotes: \'test1\'; str() doesn\'t: test2"\n' + '\n' + 'Aligning the text and specifying a width:\n' + '\n' + " >>> '{:<30}'.format('left aligned')\n" + " 'left aligned '\n" + " >>> '{:>30}'.format('right aligned')\n" + " ' right aligned'\n" + " >>> '{:^30}'.format('centered')\n" + " ' centered '\n" + " >>> '{:*^30}'.format('centered') # use '*' as a fill " + 'char\n' + " '***********centered***********'\n" + '\n' + 'Replacing "%+f", "%-f", and "% f" and specifying a sign:\n' + '\n' + " >>> '{:+f}; {:+f}'.format(3.14, -3.14) # show it " + 'always\n' + " '+3.140000; -3.140000'\n" + " >>> '{: f}; {: f}'.format(3.14, -3.14) # show a space " + 'for positive numbers\n' + " ' 3.140000; -3.140000'\n" + " >>> '{:-f}; {:-f}'.format(3.14, -3.14) # show only the " + "minus -- same as '{:f}; {:f}'\n" + " '3.140000; -3.140000'\n" + '\n' + 'Replacing "%x" and "%o" and converting the value to ' + 'different bases:\n' + '\n' + ' >>> # format also supports binary numbers\n' + ' >>> "int: {0:d}; hex: {0:x}; oct: {0:o}; bin: ' + '{0:b}".format(42)\n' + " 'int: 42; hex: 2a; oct: 52; bin: 101010'\n" + ' >>> # with 0x, 0o, or 0b as prefix:\n' + ' >>> "int: {0:d}; hex: {0:#x}; oct: {0:#o}; bin: ' + '{0:#b}".format(42)\n' + " 'int: 42; hex: 0x2a; oct: 0o52; bin: 0b101010'\n" + '\n' + 'Using the comma as a thousands separator:\n' + '\n' + " >>> '{:,}'.format(1234567890)\n" + " '1,234,567,890'\n" + '\n' + 'Expressing a percentage:\n' + '\n' + ' >>> points = 19\n' + ' >>> total = 22\n' + " >>> 'Correct answers: {:.2%}'.format(points/total)\n" + " 'Correct answers: 86.36%'\n" + '\n' + 'Using type-specific formatting:\n' + '\n' + ' >>> import datetime\n' + ' >>> d = datetime.datetime(2010, 7, 4, 12, 15, 58)\n' + " >>> '{:%Y-%m-%d %H:%M:%S}'.format(d)\n" + " '2010-07-04 12:15:58'\n" + '\n' + 'Nesting arguments and more complex examples:\n' + '\n' + " >>> for align, text in zip('<^>', ['left', 'center', " + "'right']):\n" + " ... '{0:{fill}{align}16}'.format(text, fill=align, " + 'align=align)\n' + ' ...\n' + " 'left<<<<<<<<<<<<'\n" + " '^^^^^center^^^^^'\n" + " '>>>>>>>>>>>right'\n" + ' >>>\n' + ' >>> octets = [192, 168, 0, 1]\n' + " >>> '{:02X}{:02X}{:02X}{:02X}'.format(*octets)\n" + " 'C0A80001'\n" + ' >>> int(_, 16)\n' + ' 3232235521\n' + ' >>>\n' + ' >>> width = 5\n' + ' >>> for num in range(5,12): \n' + " ... for base in 'dXob':\n" + " ... print('{0:{width}{base}}'.format(num, " + "base=base, width=width), end=' ')\n" + ' ... print()\n' + ' ...\n' + ' 5 5 5 101\n' + ' 6 6 6 110\n' + ' 7 7 7 111\n' + ' 8 8 10 1000\n' + ' 9 9 11 1001\n' + ' 10 A 12 1010\n' + ' 11 B 13 1011\n', + 'function': 'Function definitions\n' + '********************\n' + '\n' + 'A function definition defines a user-defined function object ' + '(see\n' + 'section The standard type hierarchy):\n' + '\n' + ' funcdef ::= [decorators] "def" funcname "(" ' + '[parameter_list] ")"\n' + ' ["->" expression] ":" suite\n' + ' decorators ::= decorator+\n' + ' decorator ::= "@" assignment_expression ' + 'NEWLINE\n' + ' parameter_list ::= defparameter ("," ' + 'defparameter)* "," "/" ["," [parameter_list_no_posonly]]\n' + ' | parameter_list_no_posonly\n' + ' parameter_list_no_posonly ::= defparameter ("," ' + 'defparameter)* ["," [parameter_list_starargs]]\n' + ' | parameter_list_starargs\n' + ' parameter_list_starargs ::= "*" [parameter] ("," ' + 'defparameter)* ["," ["**" parameter [","]]]\n' + ' | "**" parameter [","]\n' + ' parameter ::= identifier [":" expression]\n' + ' defparameter ::= parameter ["=" expression]\n' + ' funcname ::= identifier\n' + '\n' + 'A function definition is an executable statement. Its execution ' + 'binds\n' + 'the function name in the current local namespace to a function ' + 'object\n' + '(a wrapper around the executable code for the function). This\n' + 'function object contains a reference to the current global ' + 'namespace\n' + 'as the global namespace to be used when the function is called.\n' + '\n' + 'The function definition does not execute the function body; this ' + 'gets\n' + 'executed only when the function is called. [4]\n' + '\n' + 'A function definition may be wrapped by one or more *decorator*\n' + 'expressions. Decorator expressions are evaluated when the ' + 'function is\n' + 'defined, in the scope that contains the function definition. ' + 'The\n' + 'result must be a callable, which is invoked with the function ' + 'object\n' + 'as the only argument. The returned value is bound to the ' + 'function name\n' + 'instead of the function object. Multiple decorators are applied ' + 'in\n' + 'nested fashion. For example, the following code\n' + '\n' + ' @f1(arg)\n' + ' @f2\n' + ' def func(): pass\n' + '\n' + 'is roughly equivalent to\n' + '\n' + ' def func(): pass\n' + ' func = f1(arg)(f2(func))\n' + '\n' + 'except that the original function is not temporarily bound to ' + 'the name\n' + '"func".\n' + '\n' + 'Changed in version 3.9: Functions may be decorated with any ' + 'valid\n' + '"assignment_expression". Previously, the grammar was much more\n' + 'restrictive; see **PEP 614** for details.\n' + '\n' + 'When one or more *parameters* have the form *parameter* "="\n' + '*expression*, the function is said to have “default parameter ' + 'values.”\n' + 'For a parameter with a default value, the corresponding ' + '*argument* may\n' + 'be omitted from a call, in which case the parameter’s default ' + 'value is\n' + 'substituted. If a parameter has a default value, all following\n' + 'parameters up until the “"*"” must also have a default value — ' + 'this is\n' + 'a syntactic restriction that is not expressed by the grammar.\n' + '\n' + '**Default parameter values are evaluated from left to right when ' + 'the\n' + 'function definition is executed.** This means that the ' + 'expression is\n' + 'evaluated once, when the function is defined, and that the same ' + '“pre-\n' + 'computed” value is used for each call. This is especially ' + 'important\n' + 'to understand when a default parameter value is a mutable ' + 'object, such\n' + 'as a list or a dictionary: if the function modifies the object ' + '(e.g.\n' + 'by appending an item to a list), the default parameter value is ' + 'in\n' + 'effect modified. This is generally not what was intended. A ' + 'way\n' + 'around this is to use "None" as the default, and explicitly test ' + 'for\n' + 'it in the body of the function, e.g.:\n' + '\n' + ' def whats_on_the_telly(penguin=None):\n' + ' if penguin is None:\n' + ' penguin = []\n' + ' penguin.append("property of the zoo")\n' + ' return penguin\n' + '\n' + 'Function call semantics are described in more detail in section ' + 'Calls.\n' + 'A function call always assigns values to all parameters ' + 'mentioned in\n' + 'the parameter list, either from positional arguments, from ' + 'keyword\n' + 'arguments, or from default values. If the form “"*identifier"” ' + 'is\n' + 'present, it is initialized to a tuple receiving any excess ' + 'positional\n' + 'parameters, defaulting to the empty tuple. If the form\n' + '“"**identifier"” is present, it is initialized to a new ordered\n' + 'mapping receiving any excess keyword arguments, defaulting to a ' + 'new\n' + 'empty mapping of the same type. Parameters after “"*"” or\n' + '“"*identifier"” are keyword-only parameters and may only be ' + 'passed by\n' + 'keyword arguments. Parameters before “"/"” are positional-only\n' + 'parameters and may only be passed by positional arguments.\n' + '\n' + 'Changed in version 3.8: The "/" function parameter syntax may be ' + 'used\n' + 'to indicate positional-only parameters. See **PEP 570** for ' + 'details.\n' + '\n' + 'Parameters may have an *annotation* of the form “": ' + 'expression"”\n' + 'following the parameter name. Any parameter may have an ' + 'annotation,\n' + 'even those of the form "*identifier" or "**identifier". ' + 'Functions may\n' + 'have “return” annotation of the form “"-> expression"” after ' + 'the\n' + 'parameter list. These annotations can be any valid Python ' + 'expression.\n' + 'The presence of annotations does not change the semantics of a\n' + 'function. The annotation values are available as values of a\n' + 'dictionary keyed by the parameters’ names in the ' + '"__annotations__"\n' + 'attribute of the function object. If the "annotations" import ' + 'from\n' + '"__future__" is used, annotations are preserved as strings at ' + 'runtime\n' + 'which enables postponed evaluation. Otherwise, they are ' + 'evaluated\n' + 'when the function definition is executed. In this case ' + 'annotations\n' + 'may be evaluated in a different order than they appear in the ' + 'source\n' + 'code.\n' + '\n' + 'It is also possible to create anonymous functions (functions not ' + 'bound\n' + 'to a name), for immediate use in expressions. This uses lambda\n' + 'expressions, described in section Lambdas. Note that the ' + 'lambda\n' + 'expression is merely a shorthand for a simplified function ' + 'definition;\n' + 'a function defined in a “"def"” statement can be passed around ' + 'or\n' + 'assigned to another name just like a function defined by a ' + 'lambda\n' + 'expression. The “"def"” form is actually more powerful since ' + 'it\n' + 'allows the execution of multiple statements and annotations.\n' + '\n' + '**Programmer’s note:** Functions are first-class objects. A ' + '“"def"”\n' + 'statement executed inside a function definition defines a local\n' + 'function that can be returned or passed around. Free variables ' + 'used\n' + 'in the nested function can access the local variables of the ' + 'function\n' + 'containing the def. See section Naming and binding for ' + 'details.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 3107** - Function Annotations\n' + ' The original specification for function annotations.\n' + '\n' + ' **PEP 484** - Type Hints\n' + ' Definition of a standard meaning for annotations: type ' + 'hints.\n' + '\n' + ' **PEP 526** - Syntax for Variable Annotations\n' + ' Ability to type hint variable declarations, including ' + 'class\n' + ' variables and instance variables\n' + '\n' + ' **PEP 563** - Postponed Evaluation of Annotations\n' + ' Support for forward references within annotations by ' + 'preserving\n' + ' annotations in a string form at runtime instead of eager\n' + ' evaluation.\n', + 'global': 'The "global" statement\n' + '**********************\n' + '\n' + ' global_stmt ::= "global" identifier ("," identifier)*\n' + '\n' + 'The "global" statement is a declaration which holds for the ' + 'entire\n' + 'current code block. It means that the listed identifiers are to ' + 'be\n' + 'interpreted as globals. It would be impossible to assign to a ' + 'global\n' + 'variable without "global", although free variables may refer to\n' + 'globals without being declared global.\n' + '\n' + 'Names listed in a "global" statement must not be used in the same ' + 'code\n' + 'block textually preceding that "global" statement.\n' + '\n' + 'Names listed in a "global" statement must not be defined as ' + 'formal\n' + 'parameters, or as targets in "with" statements or "except" ' + 'clauses, or\n' + 'in a "for" target list, "class" definition, function definition,\n' + '"import" statement, or variable annotation.\n' + '\n' + '**CPython implementation detail:** The current implementation does ' + 'not\n' + 'enforce some of these restrictions, but programs should not abuse ' + 'this\n' + 'freedom, as future implementations may enforce them or silently ' + 'change\n' + 'the meaning of the program.\n' + '\n' + '**Programmer’s note:** "global" is a directive to the parser. It\n' + 'applies only to code parsed at the same time as the "global"\n' + 'statement. In particular, a "global" statement contained in a ' + 'string\n' + 'or code object supplied to the built-in "exec()" function does ' + 'not\n' + 'affect the code block *containing* the function call, and code\n' + 'contained in such a string is unaffected by "global" statements in ' + 'the\n' + 'code containing the function call. The same applies to the ' + '"eval()"\n' + 'and "compile()" functions.\n', + 'id-classes': 'Reserved classes of identifiers\n' + '*******************************\n' + '\n' + 'Certain classes of identifiers (besides keywords) have ' + 'special\n' + 'meanings. These classes are identified by the patterns of ' + 'leading and\n' + 'trailing underscore characters:\n' + '\n' + '"_*"\n' + ' Not imported by "from module import *".\n' + '\n' + '"_"\n' + ' In a "case" pattern within a "match" statement, "_" is a ' + 'soft\n' + ' keyword that denotes a wildcard.\n' + '\n' + ' Separately, the interactive interpreter makes the result of ' + 'the\n' + ' last evaluation available in the variable "_". (It is ' + 'stored in the\n' + ' "builtins" module, alongside built-in functions like ' + '"print".)\n' + '\n' + ' Elsewhere, "_" is a regular identifier. It is often used to ' + 'name\n' + ' “special” items, but it is not special to Python itself.\n' + '\n' + ' Note:\n' + '\n' + ' The name "_" is often used in conjunction with\n' + ' internationalization; refer to the documentation for the\n' + ' "gettext" module for more information on this ' + 'convention.It is\n' + ' also commonly used for unused variables.\n' + '\n' + '"__*__"\n' + ' System-defined names, informally known as “dunder” names. ' + 'These\n' + ' names are defined by the interpreter and its ' + 'implementation\n' + ' (including the standard library). Current system names are\n' + ' discussed in the Special method names section and ' + 'elsewhere. More\n' + ' will likely be defined in future versions of Python. *Any* ' + 'use of\n' + ' "__*__" names, in any context, that does not follow ' + 'explicitly\n' + ' documented use, is subject to breakage without warning.\n' + '\n' + '"__*"\n' + ' Class-private names. Names in this category, when used ' + 'within the\n' + ' context of a class definition, are re-written to use a ' + 'mangled form\n' + ' to help avoid name clashes between “private” attributes of ' + 'base and\n' + ' derived classes. See section Identifiers (Names).\n', + 'identifiers': 'Identifiers and keywords\n' + '************************\n' + '\n' + 'Identifiers (also referred to as *names*) are described by ' + 'the\n' + 'following lexical definitions.\n' + '\n' + 'The syntax of identifiers in Python is based on the Unicode ' + 'standard\n' + 'annex UAX-31, with elaboration and changes as defined below; ' + 'see also\n' + '**PEP 3131** for further details.\n' + '\n' + 'Within the ASCII range (U+0001..U+007F), the valid characters ' + 'for\n' + 'identifiers are the same as in Python 2.x: the uppercase and ' + 'lowercase\n' + 'letters "A" through "Z", the underscore "_" and, except for ' + 'the first\n' + 'character, the digits "0" through "9".\n' + '\n' + 'Python 3.0 introduces additional characters from outside the ' + 'ASCII\n' + 'range (see **PEP 3131**). For these characters, the ' + 'classification\n' + 'uses the version of the Unicode Character Database as ' + 'included in the\n' + '"unicodedata" module.\n' + '\n' + 'Identifiers are unlimited in length. Case is significant.\n' + '\n' + ' identifier ::= xid_start xid_continue*\n' + ' id_start ::= \n' + ' id_continue ::= \n' + ' xid_start ::= \n' + ' xid_continue ::= \n' + '\n' + 'The Unicode category codes mentioned above stand for:\n' + '\n' + '* *Lu* - uppercase letters\n' + '\n' + '* *Ll* - lowercase letters\n' + '\n' + '* *Lt* - titlecase letters\n' + '\n' + '* *Lm* - modifier letters\n' + '\n' + '* *Lo* - other letters\n' + '\n' + '* *Nl* - letter numbers\n' + '\n' + '* *Mn* - nonspacing marks\n' + '\n' + '* *Mc* - spacing combining marks\n' + '\n' + '* *Nd* - decimal numbers\n' + '\n' + '* *Pc* - connector punctuations\n' + '\n' + '* *Other_ID_Start* - explicit list of characters in ' + 'PropList.txt to\n' + ' support backwards compatibility\n' + '\n' + '* *Other_ID_Continue* - likewise\n' + '\n' + 'All identifiers are converted into the normal form NFKC while ' + 'parsing;\n' + 'comparison of identifiers is based on NFKC.\n' + '\n' + 'A non-normative HTML file listing all valid identifier ' + 'characters for\n' + 'Unicode 4.1 can be found at\n' + 'https://www.unicode.org/Public/13.0.0/ucd/DerivedCoreProperties.txt\n' + '\n' + '\n' + 'Keywords\n' + '========\n' + '\n' + 'The following identifiers are used as reserved words, or ' + '*keywords* of\n' + 'the language, and cannot be used as ordinary identifiers. ' + 'They must\n' + 'be spelled exactly as written here:\n' + '\n' + ' False await else import pass\n' + ' None break except in raise\n' + ' True class finally is return\n' + ' and continue for lambda try\n' + ' as def from nonlocal while\n' + ' assert del global not with\n' + ' async elif if or yield\n' + '\n' + '\n' + 'Soft Keywords\n' + '=============\n' + '\n' + 'New in version 3.10.\n' + '\n' + 'Some identifiers are only reserved under specific contexts. ' + 'These are\n' + 'known as *soft keywords*. The identifiers "match", "case" ' + 'and "_" can\n' + 'syntactically act as keywords in contexts related to the ' + 'pattern\n' + 'matching statement, but this distinction is done at the ' + 'parser level,\n' + 'not when tokenizing.\n' + '\n' + 'As soft keywords, their use with pattern matching is possible ' + 'while\n' + 'still preserving compatibility with existing code that uses ' + '"match",\n' + '"case" and "_" as identifier names.\n' + '\n' + '\n' + 'Reserved classes of identifiers\n' + '===============================\n' + '\n' + 'Certain classes of identifiers (besides keywords) have ' + 'special\n' + 'meanings. These classes are identified by the patterns of ' + 'leading and\n' + 'trailing underscore characters:\n' + '\n' + '"_*"\n' + ' Not imported by "from module import *".\n' + '\n' + '"_"\n' + ' In a "case" pattern within a "match" statement, "_" is a ' + 'soft\n' + ' keyword that denotes a wildcard.\n' + '\n' + ' Separately, the interactive interpreter makes the result ' + 'of the\n' + ' last evaluation available in the variable "_". (It is ' + 'stored in the\n' + ' "builtins" module, alongside built-in functions like ' + '"print".)\n' + '\n' + ' Elsewhere, "_" is a regular identifier. It is often used ' + 'to name\n' + ' “special” items, but it is not special to Python itself.\n' + '\n' + ' Note:\n' + '\n' + ' The name "_" is often used in conjunction with\n' + ' internationalization; refer to the documentation for ' + 'the\n' + ' "gettext" module for more information on this ' + 'convention.It is\n' + ' also commonly used for unused variables.\n' + '\n' + '"__*__"\n' + ' System-defined names, informally known as “dunder” names. ' + 'These\n' + ' names are defined by the interpreter and its ' + 'implementation\n' + ' (including the standard library). Current system names ' + 'are\n' + ' discussed in the Special method names section and ' + 'elsewhere. More\n' + ' will likely be defined in future versions of Python. ' + '*Any* use of\n' + ' "__*__" names, in any context, that does not follow ' + 'explicitly\n' + ' documented use, is subject to breakage without warning.\n' + '\n' + '"__*"\n' + ' Class-private names. Names in this category, when used ' + 'within the\n' + ' context of a class definition, are re-written to use a ' + 'mangled form\n' + ' to help avoid name clashes between “private” attributes of ' + 'base and\n' + ' derived classes. See section Identifiers (Names).\n', + 'if': 'The "if" statement\n' + '******************\n' + '\n' + 'The "if" statement is used for conditional execution:\n' + '\n' + ' if_stmt ::= "if" assignment_expression ":" suite\n' + ' ("elif" assignment_expression ":" suite)*\n' + ' ["else" ":" suite]\n' + '\n' + 'It selects exactly one of the suites by evaluating the expressions ' + 'one\n' + 'by one until one is found to be true (see section Boolean operations\n' + 'for the definition of true and false); then that suite is executed\n' + '(and no other part of the "if" statement is executed or evaluated).\n' + 'If all expressions are false, the suite of the "else" clause, if\n' + 'present, is executed.\n', + 'imaginary': 'Imaginary literals\n' + '******************\n' + '\n' + 'Imaginary literals are described by the following lexical ' + 'definitions:\n' + '\n' + ' imagnumber ::= (floatnumber | digitpart) ("j" | "J")\n' + '\n' + 'An imaginary literal yields a complex number with a real part ' + 'of 0.0.\n' + 'Complex numbers are represented as a pair of floating point ' + 'numbers\n' + 'and have the same restrictions on their range. To create a ' + 'complex\n' + 'number with a nonzero real part, add a floating point number to ' + 'it,\n' + 'e.g., "(3+4j)". Some examples of imaginary literals:\n' + '\n' + ' 3.14j 10.j 10j .001j 1e100j 3.14e-10j ' + '3.14_15_93j\n', + 'import': 'The "import" statement\n' + '**********************\n' + '\n' + ' import_stmt ::= "import" module ["as" identifier] ("," ' + 'module ["as" identifier])*\n' + ' | "from" relative_module "import" identifier ' + '["as" identifier]\n' + ' ("," identifier ["as" identifier])*\n' + ' | "from" relative_module "import" "(" ' + 'identifier ["as" identifier]\n' + ' ("," identifier ["as" identifier])* [","] ")"\n' + ' | "from" relative_module "import" "*"\n' + ' module ::= (identifier ".")* identifier\n' + ' relative_module ::= "."* module | "."+\n' + '\n' + 'The basic import statement (no "from" clause) is executed in two\n' + 'steps:\n' + '\n' + '1. find a module, loading and initializing it if necessary\n' + '\n' + '2. define a name or names in the local namespace for the scope ' + 'where\n' + ' the "import" statement occurs.\n' + '\n' + 'When the statement contains multiple clauses (separated by commas) ' + 'the\n' + 'two steps are carried out separately for each clause, just as ' + 'though\n' + 'the clauses had been separated out into individual import ' + 'statements.\n' + '\n' + 'The details of the first step, finding and loading modules, are\n' + 'described in greater detail in the section on the import system, ' + 'which\n' + 'also describes the various types of packages and modules that can ' + 'be\n' + 'imported, as well as all the hooks that can be used to customize ' + 'the\n' + 'import system. Note that failures in this step may indicate ' + 'either\n' + 'that the module could not be located, *or* that an error occurred\n' + 'while initializing the module, which includes execution of the\n' + 'module’s code.\n' + '\n' + 'If the requested module is retrieved successfully, it will be ' + 'made\n' + 'available in the local namespace in one of three ways:\n' + '\n' + '* If the module name is followed by "as", then the name following ' + '"as"\n' + ' is bound directly to the imported module.\n' + '\n' + '* If no other name is specified, and the module being imported is ' + 'a\n' + ' top level module, the module’s name is bound in the local ' + 'namespace\n' + ' as a reference to the imported module\n' + '\n' + '* If the module being imported is *not* a top level module, then ' + 'the\n' + ' name of the top level package that contains the module is bound ' + 'in\n' + ' the local namespace as a reference to the top level package. ' + 'The\n' + ' imported module must be accessed using its full qualified name\n' + ' rather than directly\n' + '\n' + 'The "from" form uses a slightly more complex process:\n' + '\n' + '1. find the module specified in the "from" clause, loading and\n' + ' initializing it if necessary;\n' + '\n' + '2. for each of the identifiers specified in the "import" clauses:\n' + '\n' + ' 1. check if the imported module has an attribute by that name\n' + '\n' + ' 2. if not, attempt to import a submodule with that name and ' + 'then\n' + ' check the imported module again for that attribute\n' + '\n' + ' 3. if the attribute is not found, "ImportError" is raised.\n' + '\n' + ' 4. otherwise, a reference to that value is stored in the local\n' + ' namespace, using the name in the "as" clause if it is ' + 'present,\n' + ' otherwise using the attribute name\n' + '\n' + 'Examples:\n' + '\n' + ' import foo # foo imported and bound locally\n' + ' import foo.bar.baz # foo, foo.bar, and foo.bar.baz ' + 'imported, foo bound locally\n' + ' import foo.bar.baz as fbb # foo, foo.bar, and foo.bar.baz ' + 'imported, foo.bar.baz bound as fbb\n' + ' from foo.bar import baz # foo, foo.bar, and foo.bar.baz ' + 'imported, foo.bar.baz bound as baz\n' + ' from foo import attr # foo imported and foo.attr bound as ' + 'attr\n' + '\n' + 'If the list of identifiers is replaced by a star ("\'*\'"), all ' + 'public\n' + 'names defined in the module are bound in the local namespace for ' + 'the\n' + 'scope where the "import" statement occurs.\n' + '\n' + 'The *public names* defined by a module are determined by checking ' + 'the\n' + 'module’s namespace for a variable named "__all__"; if defined, it ' + 'must\n' + 'be a sequence of strings which are names defined or imported by ' + 'that\n' + 'module. The names given in "__all__" are all considered public ' + 'and\n' + 'are required to exist. If "__all__" is not defined, the set of ' + 'public\n' + 'names includes all names found in the module’s namespace which do ' + 'not\n' + 'begin with an underscore character ("\'_\'"). "__all__" should ' + 'contain\n' + 'the entire public API. It is intended to avoid accidentally ' + 'exporting\n' + 'items that are not part of the API (such as library modules which ' + 'were\n' + 'imported and used within the module).\n' + '\n' + 'The wild card form of import — "from module import *" — is only\n' + 'allowed at the module level. Attempting to use it in class or\n' + 'function definitions will raise a "SyntaxError".\n' + '\n' + 'When specifying what module to import you do not have to specify ' + 'the\n' + 'absolute name of the module. When a module or package is ' + 'contained\n' + 'within another package it is possible to make a relative import ' + 'within\n' + 'the same top package without having to mention the package name. ' + 'By\n' + 'using leading dots in the specified module or package after "from" ' + 'you\n' + 'can specify how high to traverse up the current package hierarchy\n' + 'without specifying exact names. One leading dot means the current\n' + 'package where the module making the import exists. Two dots means ' + 'up\n' + 'one package level. Three dots is up two levels, etc. So if you ' + 'execute\n' + '"from . import mod" from a module in the "pkg" package then you ' + 'will\n' + 'end up importing "pkg.mod". If you execute "from ..subpkg2 import ' + 'mod"\n' + 'from within "pkg.subpkg1" you will import "pkg.subpkg2.mod". The\n' + 'specification for relative imports is contained in the Package\n' + 'Relative Imports section.\n' + '\n' + '"importlib.import_module()" is provided to support applications ' + 'that\n' + 'determine dynamically the modules to be loaded.\n' + '\n' + 'Raises an auditing event "import" with arguments "module", ' + '"filename",\n' + '"sys.path", "sys.meta_path", "sys.path_hooks".\n' + '\n' + '\n' + 'Future statements\n' + '=================\n' + '\n' + 'A *future statement* is a directive to the compiler that a ' + 'particular\n' + 'module should be compiled using syntax or semantics that will be\n' + 'available in a specified future release of Python where the ' + 'feature\n' + 'becomes standard.\n' + '\n' + 'The future statement is intended to ease migration to future ' + 'versions\n' + 'of Python that introduce incompatible changes to the language. ' + 'It\n' + 'allows use of the new features on a per-module basis before the\n' + 'release in which the feature becomes standard.\n' + '\n' + ' future_stmt ::= "from" "__future__" "import" feature ["as" ' + 'identifier]\n' + ' ("," feature ["as" identifier])*\n' + ' | "from" "__future__" "import" "(" feature ' + '["as" identifier]\n' + ' ("," feature ["as" identifier])* [","] ")"\n' + ' feature ::= identifier\n' + '\n' + 'A future statement must appear near the top of the module. The ' + 'only\n' + 'lines that can appear before a future statement are:\n' + '\n' + '* the module docstring (if any),\n' + '\n' + '* comments,\n' + '\n' + '* blank lines, and\n' + '\n' + '* other future statements.\n' + '\n' + 'The only feature that requires using the future statement is\n' + '"annotations" (see **PEP 563**).\n' + '\n' + 'All historical features enabled by the future statement are still\n' + 'recognized by Python 3. The list includes "absolute_import",\n' + '"division", "generators", "generator_stop", "unicode_literals",\n' + '"print_function", "nested_scopes" and "with_statement". They are ' + 'all\n' + 'redundant because they are always enabled, and only kept for ' + 'backwards\n' + 'compatibility.\n' + '\n' + 'A future statement is recognized and treated specially at compile\n' + 'time: Changes to the semantics of core constructs are often\n' + 'implemented by generating different code. It may even be the ' + 'case\n' + 'that a new feature introduces new incompatible syntax (such as a ' + 'new\n' + 'reserved word), in which case the compiler may need to parse the\n' + 'module differently. Such decisions cannot be pushed off until\n' + 'runtime.\n' + '\n' + 'For any given release, the compiler knows which feature names ' + 'have\n' + 'been defined, and raises a compile-time error if a future ' + 'statement\n' + 'contains a feature not known to it.\n' + '\n' + 'The direct runtime semantics are the same as for any import ' + 'statement:\n' + 'there is a standard module "__future__", described later, and it ' + 'will\n' + 'be imported in the usual way at the time the future statement is\n' + 'executed.\n' + '\n' + 'The interesting runtime semantics depend on the specific feature\n' + 'enabled by the future statement.\n' + '\n' + 'Note that there is nothing special about the statement:\n' + '\n' + ' import __future__ [as name]\n' + '\n' + 'That is not a future statement; it’s an ordinary import statement ' + 'with\n' + 'no special semantics or syntax restrictions.\n' + '\n' + 'Code compiled by calls to the built-in functions "exec()" and\n' + '"compile()" that occur in a module "M" containing a future ' + 'statement\n' + 'will, by default, use the new syntax or semantics associated with ' + 'the\n' + 'future statement. This can be controlled by optional arguments ' + 'to\n' + '"compile()" — see the documentation of that function for details.\n' + '\n' + 'A future statement typed at an interactive interpreter prompt ' + 'will\n' + 'take effect for the rest of the interpreter session. If an\n' + 'interpreter is started with the "-i" option, is passed a script ' + 'name\n' + 'to execute, and the script includes a future statement, it will be ' + 'in\n' + 'effect in the interactive session started after the script is\n' + 'executed.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 236** - Back to the __future__\n' + ' The original proposal for the __future__ mechanism.\n', + 'in': 'Membership test operations\n' + '**************************\n' + '\n' + 'The operators "in" and "not in" test for membership. "x in s"\n' + 'evaluates to "True" if *x* is a member of *s*, and "False" otherwise.\n' + '"x not in s" returns the negation of "x in s". All built-in ' + 'sequences\n' + 'and set types support this as well as dictionary, for which "in" ' + 'tests\n' + 'whether the dictionary has a given key. For container types such as\n' + 'list, tuple, set, frozenset, dict, or collections.deque, the\n' + 'expression "x in y" is equivalent to "any(x is e or x == e for e in\n' + 'y)".\n' + '\n' + 'For the string and bytes types, "x in y" is "True" if and only if *x*\n' + 'is a substring of *y*. An equivalent test is "y.find(x) != -1".\n' + 'Empty strings are always considered to be a substring of any other\n' + 'string, so """ in "abc"" will return "True".\n' + '\n' + 'For user-defined classes which define the "__contains__()" method, "x\n' + 'in y" returns "True" if "y.__contains__(x)" returns a true value, and\n' + '"False" otherwise.\n' + '\n' + 'For user-defined classes which do not define "__contains__()" but do\n' + 'define "__iter__()", "x in y" is "True" if some value "z", for which\n' + 'the expression "x is z or x == z" is true, is produced while ' + 'iterating\n' + 'over "y". If an exception is raised during the iteration, it is as if\n' + '"in" raised that exception.\n' + '\n' + 'Lastly, the old-style iteration protocol is tried: if a class defines\n' + '"__getitem__()", "x in y" is "True" if and only if there is a non-\n' + 'negative integer index *i* such that "x is y[i] or x == y[i]", and no\n' + 'lower integer index raises the "IndexError" exception. (If any other\n' + 'exception is raised, it is as if "in" raised that exception).\n' + '\n' + 'The operator "not in" is defined to have the inverse truth value of\n' + '"in".\n', + 'integers': 'Integer literals\n' + '****************\n' + '\n' + 'Integer literals are described by the following lexical ' + 'definitions:\n' + '\n' + ' integer ::= decinteger | bininteger | octinteger | ' + 'hexinteger\n' + ' decinteger ::= nonzerodigit (["_"] digit)* | "0"+ (["_"] ' + '"0")*\n' + ' bininteger ::= "0" ("b" | "B") (["_"] bindigit)+\n' + ' octinteger ::= "0" ("o" | "O") (["_"] octdigit)+\n' + ' hexinteger ::= "0" ("x" | "X") (["_"] hexdigit)+\n' + ' nonzerodigit ::= "1"..."9"\n' + ' digit ::= "0"..."9"\n' + ' bindigit ::= "0" | "1"\n' + ' octdigit ::= "0"..."7"\n' + ' hexdigit ::= digit | "a"..."f" | "A"..."F"\n' + '\n' + 'There is no limit for the length of integer literals apart from ' + 'what\n' + 'can be stored in available memory.\n' + '\n' + 'Underscores are ignored for determining the numeric value of ' + 'the\n' + 'literal. They can be used to group digits for enhanced ' + 'readability.\n' + 'One underscore can occur between digits, and after base ' + 'specifiers\n' + 'like "0x".\n' + '\n' + 'Note that leading zeros in a non-zero decimal number are not ' + 'allowed.\n' + 'This is for disambiguation with C-style octal literals, which ' + 'Python\n' + 'used before version 3.0.\n' + '\n' + 'Some examples of integer literals:\n' + '\n' + ' 7 2147483647 0o177 0b100110111\n' + ' 3 79228162514264337593543950336 0o377 0xdeadbeef\n' + ' 100_000_000_000 0b_1110_0101\n' + '\n' + 'Changed in version 3.6: Underscores are now allowed for ' + 'grouping\n' + 'purposes in literals.\n', + 'lambda': 'Lambdas\n' + '*******\n' + '\n' + ' lambda_expr ::= "lambda" [parameter_list] ":" expression\n' + '\n' + 'Lambda expressions (sometimes called lambda forms) are used to ' + 'create\n' + 'anonymous functions. The expression "lambda parameters: ' + 'expression"\n' + 'yields a function object. The unnamed object behaves like a ' + 'function\n' + 'object defined with:\n' + '\n' + ' def (parameters):\n' + ' return expression\n' + '\n' + 'See section Function definitions for the syntax of parameter ' + 'lists.\n' + 'Note that functions created with lambda expressions cannot ' + 'contain\n' + 'statements or annotations.\n', + 'lists': 'List displays\n' + '*************\n' + '\n' + 'A list display is a possibly empty series of expressions enclosed ' + 'in\n' + 'square brackets:\n' + '\n' + ' list_display ::= "[" [starred_list | comprehension] "]"\n' + '\n' + 'A list display yields a new list object, the contents being ' + 'specified\n' + 'by either a list of expressions or a comprehension. When a comma-\n' + 'separated list of expressions is supplied, its elements are ' + 'evaluated\n' + 'from left to right and placed into the list object in that order.\n' + 'When a comprehension is supplied, the list is constructed from the\n' + 'elements resulting from the comprehension.\n', + 'naming': 'Naming and binding\n' + '******************\n' + '\n' + '\n' + 'Binding of names\n' + '================\n' + '\n' + '*Names* refer to objects. Names are introduced by name binding\n' + 'operations.\n' + '\n' + 'The following constructs bind names:\n' + '\n' + '* formal parameters to functions,\n' + '\n' + '* class definitions,\n' + '\n' + '* function definitions,\n' + '\n' + '* assignment expressions,\n' + '\n' + '* targets that are identifiers if occurring in an assignment:\n' + '\n' + ' * "for" loop header,\n' + '\n' + ' * after "as" in a "with" statement, "except" clause or in the ' + 'as-\n' + ' pattern in structural pattern matching,\n' + '\n' + ' * in a capture pattern in structural pattern matching\n' + '\n' + '* "import" statements.\n' + '\n' + 'The "import" statement of the form "from ... import *" binds all ' + 'names\n' + 'defined in the imported module, except those beginning with an\n' + 'underscore. This form may only be used at the module level.\n' + '\n' + 'A target occurring in a "del" statement is also considered bound ' + 'for\n' + 'this purpose (though the actual semantics are to unbind the ' + 'name).\n' + '\n' + 'Each assignment or import statement occurs within a block defined ' + 'by a\n' + 'class or function definition or at the module level (the ' + 'top-level\n' + 'code block).\n' + '\n' + 'If a name is bound in a block, it is a local variable of that ' + 'block,\n' + 'unless declared as "nonlocal" or "global". If a name is bound at ' + 'the\n' + 'module level, it is a global variable. (The variables of the ' + 'module\n' + 'code block are local and global.) If a variable is used in a ' + 'code\n' + 'block but not defined there, it is a *free variable*.\n' + '\n' + 'Each occurrence of a name in the program text refers to the ' + '*binding*\n' + 'of that name established by the following name resolution rules.\n' + '\n' + '\n' + 'Resolution of names\n' + '===================\n' + '\n' + 'A *scope* defines the visibility of a name within a block. If a ' + 'local\n' + 'variable is defined in a block, its scope includes that block. If ' + 'the\n' + 'definition occurs in a function block, the scope extends to any ' + 'blocks\n' + 'contained within the defining one, unless a contained block ' + 'introduces\n' + 'a different binding for the name.\n' + '\n' + 'When a name is used in a code block, it is resolved using the ' + 'nearest\n' + 'enclosing scope. The set of all such scopes visible to a code ' + 'block\n' + 'is called the block’s *environment*.\n' + '\n' + 'When a name is not found at all, a "NameError" exception is ' + 'raised. If\n' + 'the current scope is a function scope, and the name refers to a ' + 'local\n' + 'variable that has not yet been bound to a value at the point where ' + 'the\n' + 'name is used, an "UnboundLocalError" exception is raised.\n' + '"UnboundLocalError" is a subclass of "NameError".\n' + '\n' + 'If a name binding operation occurs anywhere within a code block, ' + 'all\n' + 'uses of the name within the block are treated as references to ' + 'the\n' + 'current block. This can lead to errors when a name is used within ' + 'a\n' + 'block before it is bound. This rule is subtle. Python lacks\n' + 'declarations and allows name binding operations to occur anywhere\n' + 'within a code block. The local variables of a code block can be\n' + 'determined by scanning the entire text of the block for name ' + 'binding\n' + 'operations. See the FAQ entry on UnboundLocalError for examples.\n' + '\n' + 'If the "global" statement occurs within a block, all uses of the ' + 'names\n' + 'specified in the statement refer to the bindings of those names in ' + 'the\n' + 'top-level namespace. Names are resolved in the top-level ' + 'namespace by\n' + 'searching the global namespace, i.e. the namespace of the module\n' + 'containing the code block, and the builtins namespace, the ' + 'namespace\n' + 'of the module "builtins". The global namespace is searched ' + 'first. If\n' + 'the names are not found there, the builtins namespace is ' + 'searched.\n' + 'The "global" statement must precede all uses of the listed names.\n' + '\n' + 'The "global" statement has the same scope as a name binding ' + 'operation\n' + 'in the same block. If the nearest enclosing scope for a free ' + 'variable\n' + 'contains a global statement, the free variable is treated as a ' + 'global.\n' + '\n' + 'The "nonlocal" statement causes corresponding names to refer to\n' + 'previously bound variables in the nearest enclosing function ' + 'scope.\n' + '"SyntaxError" is raised at compile time if the given name does ' + 'not\n' + 'exist in any enclosing function scope.\n' + '\n' + 'The namespace for a module is automatically created the first time ' + 'a\n' + 'module is imported. The main module for a script is always ' + 'called\n' + '"__main__".\n' + '\n' + 'Class definition blocks and arguments to "exec()" and "eval()" ' + 'are\n' + 'special in the context of name resolution. A class definition is ' + 'an\n' + 'executable statement that may use and define names. These ' + 'references\n' + 'follow the normal rules for name resolution with an exception ' + 'that\n' + 'unbound local variables are looked up in the global namespace. ' + 'The\n' + 'namespace of the class definition becomes the attribute dictionary ' + 'of\n' + 'the class. The scope of names defined in a class block is limited ' + 'to\n' + 'the class block; it does not extend to the code blocks of methods ' + '–\n' + 'this includes comprehensions and generator expressions since they ' + 'are\n' + 'implemented using a function scope. This means that the ' + 'following\n' + 'will fail:\n' + '\n' + ' class A:\n' + ' a = 42\n' + ' b = list(a + i for i in range(10))\n' + '\n' + '\n' + 'Builtins and restricted execution\n' + '=================================\n' + '\n' + '**CPython implementation detail:** Users should not touch\n' + '"__builtins__"; it is strictly an implementation detail. Users\n' + 'wanting to override values in the builtins namespace should ' + '"import"\n' + 'the "builtins" module and modify its attributes appropriately.\n' + '\n' + 'The builtins namespace associated with the execution of a code ' + 'block\n' + 'is actually found by looking up the name "__builtins__" in its ' + 'global\n' + 'namespace; this should be a dictionary or a module (in the latter ' + 'case\n' + 'the module’s dictionary is used). By default, when in the ' + '"__main__"\n' + 'module, "__builtins__" is the built-in module "builtins"; when in ' + 'any\n' + 'other module, "__builtins__" is an alias for the dictionary of ' + 'the\n' + '"builtins" module itself.\n' + '\n' + '\n' + 'Interaction with dynamic features\n' + '=================================\n' + '\n' + 'Name resolution of free variables occurs at runtime, not at ' + 'compile\n' + 'time. This means that the following code will print 42:\n' + '\n' + ' i = 10\n' + ' def f():\n' + ' print(i)\n' + ' i = 42\n' + ' f()\n' + '\n' + 'The "eval()" and "exec()" functions do not have access to the ' + 'full\n' + 'environment for resolving names. Names may be resolved in the ' + 'local\n' + 'and global namespaces of the caller. Free variables are not ' + 'resolved\n' + 'in the nearest enclosing namespace, but in the global namespace. ' + '[1]\n' + 'The "exec()" and "eval()" functions have optional arguments to\n' + 'override the global and local namespace. If only one namespace ' + 'is\n' + 'specified, it is used for both.\n', + 'nonlocal': 'The "nonlocal" statement\n' + '************************\n' + '\n' + ' nonlocal_stmt ::= "nonlocal" identifier ("," identifier)*\n' + '\n' + 'The "nonlocal" statement causes the listed identifiers to refer ' + 'to\n' + 'previously bound variables in the nearest enclosing scope ' + 'excluding\n' + 'globals. This is important because the default behavior for ' + 'binding is\n' + 'to search the local namespace first. The statement allows\n' + 'encapsulated code to rebind variables outside of the local ' + 'scope\n' + 'besides the global (module) scope.\n' + '\n' + 'Names listed in a "nonlocal" statement, unlike those listed in ' + 'a\n' + '"global" statement, must refer to pre-existing bindings in an\n' + 'enclosing scope (the scope in which a new binding should be ' + 'created\n' + 'cannot be determined unambiguously).\n' + '\n' + 'Names listed in a "nonlocal" statement must not collide with ' + 'pre-\n' + 'existing bindings in the local scope.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 3104** - Access to Names in Outer Scopes\n' + ' The specification for the "nonlocal" statement.\n', + 'numbers': 'Numeric literals\n' + '****************\n' + '\n' + 'There are three types of numeric literals: integers, floating ' + 'point\n' + 'numbers, and imaginary numbers. There are no complex literals\n' + '(complex numbers can be formed by adding a real number and an\n' + 'imaginary number).\n' + '\n' + 'Note that numeric literals do not include a sign; a phrase like ' + '"-1"\n' + 'is actually an expression composed of the unary operator ‘"-"’ ' + 'and the\n' + 'literal "1".\n', + 'numeric-types': 'Emulating numeric types\n' + '***********************\n' + '\n' + 'The following methods can be defined to emulate numeric ' + 'objects.\n' + 'Methods corresponding to operations that are not supported ' + 'by the\n' + 'particular kind of number implemented (e.g., bitwise ' + 'operations for\n' + 'non-integral numbers) should be left undefined.\n' + '\n' + 'object.__add__(self, other)\n' + 'object.__sub__(self, other)\n' + 'object.__mul__(self, other)\n' + 'object.__matmul__(self, other)\n' + 'object.__truediv__(self, other)\n' + 'object.__floordiv__(self, other)\n' + 'object.__mod__(self, other)\n' + 'object.__divmod__(self, other)\n' + 'object.__pow__(self, other[, modulo])\n' + 'object.__lshift__(self, other)\n' + 'object.__rshift__(self, other)\n' + 'object.__and__(self, other)\n' + 'object.__xor__(self, other)\n' + 'object.__or__(self, other)\n' + '\n' + ' These methods are called to implement the binary ' + 'arithmetic\n' + ' operations ("+", "-", "*", "@", "/", "//", "%", ' + '"divmod()",\n' + ' "pow()", "**", "<<", ">>", "&", "^", "|"). For ' + 'instance, to\n' + ' evaluate the expression "x + y", where *x* is an ' + 'instance of a\n' + ' class that has an "__add__()" method, "x.__add__(y)" is ' + 'called.\n' + ' The "__divmod__()" method should be the equivalent to ' + 'using\n' + ' "__floordiv__()" and "__mod__()"; it should not be ' + 'related to\n' + ' "__truediv__()". Note that "__pow__()" should be ' + 'defined to accept\n' + ' an optional third argument if the ternary version of the ' + 'built-in\n' + ' "pow()" function is to be supported.\n' + '\n' + ' If one of those methods does not support the operation ' + 'with the\n' + ' supplied arguments, it should return "NotImplemented".\n' + '\n' + 'object.__radd__(self, other)\n' + 'object.__rsub__(self, other)\n' + 'object.__rmul__(self, other)\n' + 'object.__rmatmul__(self, other)\n' + 'object.__rtruediv__(self, other)\n' + 'object.__rfloordiv__(self, other)\n' + 'object.__rmod__(self, other)\n' + 'object.__rdivmod__(self, other)\n' + 'object.__rpow__(self, other[, modulo])\n' + 'object.__rlshift__(self, other)\n' + 'object.__rrshift__(self, other)\n' + 'object.__rand__(self, other)\n' + 'object.__rxor__(self, other)\n' + 'object.__ror__(self, other)\n' + '\n' + ' These methods are called to implement the binary ' + 'arithmetic\n' + ' operations ("+", "-", "*", "@", "/", "//", "%", ' + '"divmod()",\n' + ' "pow()", "**", "<<", ">>", "&", "^", "|") with reflected ' + '(swapped)\n' + ' operands. These functions are only called if the left ' + 'operand does\n' + ' not support the corresponding operation [3] and the ' + 'operands are of\n' + ' different types. [4] For instance, to evaluate the ' + 'expression "x -\n' + ' y", where *y* is an instance of a class that has an ' + '"__rsub__()"\n' + ' method, "y.__rsub__(x)" is called if "x.__sub__(y)" ' + 'returns\n' + ' *NotImplemented*.\n' + '\n' + ' Note that ternary "pow()" will not try calling ' + '"__rpow__()" (the\n' + ' coercion rules would become too complicated).\n' + '\n' + ' Note:\n' + '\n' + ' If the right operand’s type is a subclass of the left ' + 'operand’s\n' + ' type and that subclass provides a different ' + 'implementation of the\n' + ' reflected method for the operation, this method will ' + 'be called\n' + ' before the left operand’s non-reflected method. This ' + 'behavior\n' + ' allows subclasses to override their ancestors’ ' + 'operations.\n' + '\n' + 'object.__iadd__(self, other)\n' + 'object.__isub__(self, other)\n' + 'object.__imul__(self, other)\n' + 'object.__imatmul__(self, other)\n' + 'object.__itruediv__(self, other)\n' + 'object.__ifloordiv__(self, other)\n' + 'object.__imod__(self, other)\n' + 'object.__ipow__(self, other[, modulo])\n' + 'object.__ilshift__(self, other)\n' + 'object.__irshift__(self, other)\n' + 'object.__iand__(self, other)\n' + 'object.__ixor__(self, other)\n' + 'object.__ior__(self, other)\n' + '\n' + ' These methods are called to implement the augmented ' + 'arithmetic\n' + ' assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", ' + '"**=",\n' + ' "<<=", ">>=", "&=", "^=", "|="). These methods should ' + 'attempt to\n' + ' do the operation in-place (modifying *self*) and return ' + 'the result\n' + ' (which could be, but does not have to be, *self*). If a ' + 'specific\n' + ' method is not defined, the augmented assignment falls ' + 'back to the\n' + ' normal methods. For instance, if *x* is an instance of ' + 'a class\n' + ' with an "__iadd__()" method, "x += y" is equivalent to ' + '"x =\n' + ' x.__iadd__(y)" . Otherwise, "x.__add__(y)" and ' + '"y.__radd__(x)" are\n' + ' considered, as with the evaluation of "x + y". In ' + 'certain\n' + ' situations, augmented assignment can result in ' + 'unexpected errors\n' + ' (see Why does a_tuple[i] += [‘item’] raise an exception ' + 'when the\n' + ' addition works?), but this behavior is in fact part of ' + 'the data\n' + ' model.\n' + '\n' + 'object.__neg__(self)\n' + 'object.__pos__(self)\n' + 'object.__abs__(self)\n' + 'object.__invert__(self)\n' + '\n' + ' Called to implement the unary arithmetic operations ' + '("-", "+",\n' + ' "abs()" and "~").\n' + '\n' + 'object.__complex__(self)\n' + 'object.__int__(self)\n' + 'object.__float__(self)\n' + '\n' + ' Called to implement the built-in functions "complex()", ' + '"int()" and\n' + ' "float()". Should return a value of the appropriate ' + 'type.\n' + '\n' + 'object.__index__(self)\n' + '\n' + ' Called to implement "operator.index()", and whenever ' + 'Python needs\n' + ' to losslessly convert the numeric object to an integer ' + 'object (such\n' + ' as in slicing, or in the built-in "bin()", "hex()" and ' + '"oct()"\n' + ' functions). Presence of this method indicates that the ' + 'numeric\n' + ' object is an integer type. Must return an integer.\n' + '\n' + ' If "__int__()", "__float__()" and "__complex__()" are ' + 'not defined\n' + ' then corresponding built-in functions "int()", "float()" ' + 'and\n' + ' "complex()" fall back to "__index__()".\n' + '\n' + 'object.__round__(self[, ndigits])\n' + 'object.__trunc__(self)\n' + 'object.__floor__(self)\n' + 'object.__ceil__(self)\n' + '\n' + ' Called to implement the built-in function "round()" and ' + '"math"\n' + ' functions "trunc()", "floor()" and "ceil()". Unless ' + '*ndigits* is\n' + ' passed to "__round__()" all these methods should return ' + 'the value\n' + ' of the object truncated to an "Integral" (typically an ' + '"int").\n' + '\n' + ' The built-in function "int()" falls back to ' + '"__trunc__()" if\n' + ' neither "__int__()" nor "__index__()" is defined.\n', + 'objects': 'Objects, values and types\n' + '*************************\n' + '\n' + '*Objects* are Python’s abstraction for data. All data in a ' + 'Python\n' + 'program is represented by objects or by relations between ' + 'objects. (In\n' + 'a sense, and in conformance to Von Neumann’s model of a “stored\n' + 'program computer”, code is also represented by objects.)\n' + '\n' + 'Every object has an identity, a type and a value. An object’s\n' + '*identity* never changes once it has been created; you may think ' + 'of it\n' + 'as the object’s address in memory. The ‘"is"’ operator compares ' + 'the\n' + 'identity of two objects; the "id()" function returns an integer\n' + 'representing its identity.\n' + '\n' + '**CPython implementation detail:** For CPython, "id(x)" is the ' + 'memory\n' + 'address where "x" is stored.\n' + '\n' + 'An object’s type determines the operations that the object ' + 'supports\n' + '(e.g., “does it have a length?”) and also defines the possible ' + 'values\n' + 'for objects of that type. The "type()" function returns an ' + 'object’s\n' + 'type (which is an object itself). Like its identity, an ' + 'object’s\n' + '*type* is also unchangeable. [1]\n' + '\n' + 'The *value* of some objects can change. Objects whose value can\n' + 'change are said to be *mutable*; objects whose value is ' + 'unchangeable\n' + 'once they are created are called *immutable*. (The value of an\n' + 'immutable container object that contains a reference to a ' + 'mutable\n' + 'object can change when the latter’s value is changed; however ' + 'the\n' + 'container is still considered immutable, because the collection ' + 'of\n' + 'objects it contains cannot be changed. So, immutability is not\n' + 'strictly the same as having an unchangeable value, it is more ' + 'subtle.)\n' + 'An object’s mutability is determined by its type; for instance,\n' + 'numbers, strings and tuples are immutable, while dictionaries ' + 'and\n' + 'lists are mutable.\n' + '\n' + 'Objects are never explicitly destroyed; however, when they ' + 'become\n' + 'unreachable they may be garbage-collected. An implementation is\n' + 'allowed to postpone garbage collection or omit it altogether — it ' + 'is a\n' + 'matter of implementation quality how garbage collection is\n' + 'implemented, as long as no objects are collected that are still\n' + 'reachable.\n' + '\n' + '**CPython implementation detail:** CPython currently uses a ' + 'reference-\n' + 'counting scheme with (optional) delayed detection of cyclically ' + 'linked\n' + 'garbage, which collects most objects as soon as they become\n' + 'unreachable, but is not guaranteed to collect garbage containing\n' + 'circular references. See the documentation of the "gc" module ' + 'for\n' + 'information on controlling the collection of cyclic garbage. ' + 'Other\n' + 'implementations act differently and CPython may change. Do not ' + 'depend\n' + 'on immediate finalization of objects when they become unreachable ' + '(so\n' + 'you should always close files explicitly).\n' + '\n' + 'Note that the use of the implementation’s tracing or debugging\n' + 'facilities may keep objects alive that would normally be ' + 'collectable.\n' + 'Also note that catching an exception with a ‘"try"…"except"’ ' + 'statement\n' + 'may keep objects alive.\n' + '\n' + 'Some objects contain references to “external” resources such as ' + 'open\n' + 'files or windows. It is understood that these resources are ' + 'freed\n' + 'when the object is garbage-collected, but since garbage ' + 'collection is\n' + 'not guaranteed to happen, such objects also provide an explicit ' + 'way to\n' + 'release the external resource, usually a "close()" method. ' + 'Programs\n' + 'are strongly recommended to explicitly close such objects. The\n' + '‘"try"…"finally"’ statement and the ‘"with"’ statement provide\n' + 'convenient ways to do this.\n' + '\n' + 'Some objects contain references to other objects; these are ' + 'called\n' + '*containers*. Examples of containers are tuples, lists and\n' + 'dictionaries. The references are part of a container’s value. ' + 'In\n' + 'most cases, when we talk about the value of a container, we imply ' + 'the\n' + 'values, not the identities of the contained objects; however, ' + 'when we\n' + 'talk about the mutability of a container, only the identities of ' + 'the\n' + 'immediately contained objects are implied. So, if an immutable\n' + 'container (like a tuple) contains a reference to a mutable ' + 'object, its\n' + 'value changes if that mutable object is changed.\n' + '\n' + 'Types affect almost all aspects of object behavior. Even the\n' + 'importance of object identity is affected in some sense: for ' + 'immutable\n' + 'types, operations that compute new values may actually return a\n' + 'reference to any existing object with the same type and value, ' + 'while\n' + 'for mutable objects this is not allowed. E.g., after "a = 1; b = ' + '1",\n' + '"a" and "b" may or may not refer to the same object with the ' + 'value\n' + 'one, depending on the implementation, but after "c = []; d = []", ' + '"c"\n' + 'and "d" are guaranteed to refer to two different, unique, newly\n' + 'created empty lists. (Note that "c = d = []" assigns the same ' + 'object\n' + 'to both "c" and "d".)\n', + 'operator-summary': 'Operator precedence\n' + '*******************\n' + '\n' + 'The following table summarizes the operator precedence ' + 'in Python, from\n' + 'highest precedence (most binding) to lowest precedence ' + '(least\n' + 'binding). Operators in the same box have the same ' + 'precedence. Unless\n' + 'the syntax is explicitly given, operators are binary. ' + 'Operators in\n' + 'the same box group left to right (except for ' + 'exponentiation, which\n' + 'groups from right to left).\n' + '\n' + 'Note that comparisons, membership tests, and identity ' + 'tests, all have\n' + 'the same precedence and have a left-to-right chaining ' + 'feature as\n' + 'described in the Comparisons section.\n' + '\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| Operator | ' + 'Description |\n' + '|=================================================|=======================================|\n' + '| "(expressions...)", "[expressions...]", "{key: | ' + 'Binding or parenthesized expression, |\n' + '| value...}", "{expressions...}" | list ' + 'display, dictionary display, set |\n' + '| | ' + 'display |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "x[index]", "x[index:index]", | ' + 'Subscription, slicing, call, |\n' + '| "x(arguments...)", "x.attribute" | ' + 'attribute reference |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "await x" | ' + 'Await expression |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "**" | ' + 'Exponentiation [5] |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "+x", "-x", "~x" | ' + 'Positive, negative, bitwise NOT |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "*", "@", "/", "//", "%" | ' + 'Multiplication, matrix |\n' + '| | ' + 'multiplication, division, floor |\n' + '| | ' + 'division, remainder [6] |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "+", "-" | ' + 'Addition and subtraction |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "<<", ">>" | ' + 'Shifts |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "&" | ' + 'Bitwise AND |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "^" | ' + 'Bitwise XOR |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "|" | ' + 'Bitwise OR |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "in", "not in", "is", "is not", "<", "<=", ">", | ' + 'Comparisons, including membership |\n' + '| ">=", "!=", "==" | ' + 'tests and identity tests |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "not x" | ' + 'Boolean NOT |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "and" | ' + 'Boolean AND |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "or" | ' + 'Boolean OR |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "if" – "else" | ' + 'Conditional expression |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| "lambda" | ' + 'Lambda expression |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '| ":=" | ' + 'Assignment expression |\n' + '+-------------------------------------------------+---------------------------------------+\n' + '\n' + '-[ Footnotes ]-\n' + '\n' + '[1] While "abs(x%y) < abs(y)" is true mathematically, ' + 'for floats it\n' + ' may not be true numerically due to roundoff. For ' + 'example, and\n' + ' assuming a platform on which a Python float is an ' + 'IEEE 754 double-\n' + ' precision number, in order that "-1e-100 % 1e100" ' + 'have the same\n' + ' sign as "1e100", the computed result is "-1e-100 + ' + '1e100", which\n' + ' is numerically exactly equal to "1e100". The ' + 'function\n' + ' "math.fmod()" returns a result whose sign matches ' + 'the sign of the\n' + ' first argument instead, and so returns "-1e-100" in ' + 'this case.\n' + ' Which approach is more appropriate depends on the ' + 'application.\n' + '\n' + '[2] If x is very close to an exact integer multiple of ' + 'y, it’s\n' + ' possible for "x//y" to be one larger than ' + '"(x-x%y)//y" due to\n' + ' rounding. In such cases, Python returns the latter ' + 'result, in\n' + ' order to preserve that "divmod(x,y)[0] * y + x % y" ' + 'be very close\n' + ' to "x".\n' + '\n' + '[3] The Unicode standard distinguishes between *code ' + 'points* (e.g.\n' + ' U+0041) and *abstract characters* (e.g. “LATIN ' + 'CAPITAL LETTER A”).\n' + ' While most abstract characters in Unicode are only ' + 'represented\n' + ' using one code point, there is a number of abstract ' + 'characters\n' + ' that can in addition be represented using a sequence ' + 'of more than\n' + ' one code point. For example, the abstract character ' + '“LATIN\n' + ' CAPITAL LETTER C WITH CEDILLA” can be represented as ' + 'a single\n' + ' *precomposed character* at code position U+00C7, or ' + 'as a sequence\n' + ' of a *base character* at code position U+0043 (LATIN ' + 'CAPITAL\n' + ' LETTER C), followed by a *combining character* at ' + 'code position\n' + ' U+0327 (COMBINING CEDILLA).\n' + '\n' + ' The comparison operators on strings compare at the ' + 'level of\n' + ' Unicode code points. This may be counter-intuitive ' + 'to humans. For\n' + ' example, ""\\u00C7" == "\\u0043\\u0327"" is "False", ' + 'even though both\n' + ' strings represent the same abstract character “LATIN ' + 'CAPITAL\n' + ' LETTER C WITH CEDILLA”.\n' + '\n' + ' To compare strings at the level of abstract ' + 'characters (that is,\n' + ' in a way intuitive to humans), use ' + '"unicodedata.normalize()".\n' + '\n' + '[4] Due to automatic garbage-collection, free lists, and ' + 'the dynamic\n' + ' nature of descriptors, you may notice seemingly ' + 'unusual behaviour\n' + ' in certain uses of the "is" operator, like those ' + 'involving\n' + ' comparisons between instance methods, or constants. ' + 'Check their\n' + ' documentation for more info.\n' + '\n' + '[5] The power operator "**" binds less tightly than an ' + 'arithmetic or\n' + ' bitwise unary operator on its right, that is, ' + '"2**-1" is "0.5".\n' + '\n' + '[6] The "%" operator is also used for string formatting; ' + 'the same\n' + ' precedence applies.\n', + 'pass': 'The "pass" statement\n' + '********************\n' + '\n' + ' pass_stmt ::= "pass"\n' + '\n' + '"pass" is a null operation — when it is executed, nothing happens. ' + 'It\n' + 'is useful as a placeholder when a statement is required ' + 'syntactically,\n' + 'but no code needs to be executed, for example:\n' + '\n' + ' def f(arg): pass # a function that does nothing (yet)\n' + '\n' + ' class C: pass # a class with no methods (yet)\n', + 'power': 'The power operator\n' + '******************\n' + '\n' + 'The power operator binds more tightly than unary operators on its\n' + 'left; it binds less tightly than unary operators on its right. ' + 'The\n' + 'syntax is:\n' + '\n' + ' power ::= (await_expr | primary) ["**" u_expr]\n' + '\n' + 'Thus, in an unparenthesized sequence of power and unary operators, ' + 'the\n' + 'operators are evaluated from right to left (this does not ' + 'constrain\n' + 'the evaluation order for the operands): "-1**2" results in "-1".\n' + '\n' + 'The power operator has the same semantics as the built-in "pow()"\n' + 'function, when called with two arguments: it yields its left ' + 'argument\n' + 'raised to the power of its right argument. The numeric arguments ' + 'are\n' + 'first converted to a common type, and the result is of that type.\n' + '\n' + 'For int operands, the result has the same type as the operands ' + 'unless\n' + 'the second argument is negative; in that case, all arguments are\n' + 'converted to float and a float result is delivered. For example,\n' + '"10**2" returns "100", but "10**-2" returns "0.01".\n' + '\n' + 'Raising "0.0" to a negative power results in a ' + '"ZeroDivisionError".\n' + 'Raising a negative number to a fractional power results in a ' + '"complex"\n' + 'number. (In earlier versions it raised a "ValueError".)\n' + '\n' + 'This operation can be customized using the special "__pow__()" ' + 'method.\n', + 'raise': 'The "raise" statement\n' + '*********************\n' + '\n' + ' raise_stmt ::= "raise" [expression ["from" expression]]\n' + '\n' + 'If no expressions are present, "raise" re-raises the exception that ' + 'is\n' + 'currently being handled, which is also known as the *active\n' + 'exception*. If there isn’t currently an active exception, a\n' + '"RuntimeError" exception is raised indicating that this is an ' + 'error.\n' + '\n' + 'Otherwise, "raise" evaluates the first expression as the exception\n' + 'object. It must be either a subclass or an instance of\n' + '"BaseException". If it is a class, the exception instance will be\n' + 'obtained when needed by instantiating the class with no arguments.\n' + '\n' + 'The *type* of the exception is the exception instance’s class, the\n' + '*value* is the instance itself.\n' + '\n' + 'A traceback object is normally created automatically when an ' + 'exception\n' + 'is raised and attached to it as the "__traceback__" attribute, ' + 'which\n' + 'is writable. You can create an exception and set your own traceback ' + 'in\n' + 'one step using the "with_traceback()" exception method (which ' + 'returns\n' + 'the same exception instance, with its traceback set to its ' + 'argument),\n' + 'like so:\n' + '\n' + ' raise Exception("foo occurred").with_traceback(tracebackobj)\n' + '\n' + 'The "from" clause is used for exception chaining: if given, the ' + 'second\n' + '*expression* must be another exception class or instance. If the\n' + 'second expression is an exception instance, it will be attached to ' + 'the\n' + 'raised exception as the "__cause__" attribute (which is writable). ' + 'If\n' + 'the expression is an exception class, the class will be ' + 'instantiated\n' + 'and the resulting exception instance will be attached to the ' + 'raised\n' + 'exception as the "__cause__" attribute. If the raised exception is ' + 'not\n' + 'handled, both exceptions will be printed:\n' + '\n' + ' >>> try:\n' + ' ... print(1 / 0)\n' + ' ... except Exception as exc:\n' + ' ... raise RuntimeError("Something bad happened") from exc\n' + ' ...\n' + ' Traceback (most recent call last):\n' + ' File "", line 2, in \n' + ' ZeroDivisionError: division by zero\n' + '\n' + ' The above exception was the direct cause of the following ' + 'exception:\n' + '\n' + ' Traceback (most recent call last):\n' + ' File "", line 4, in \n' + ' RuntimeError: Something bad happened\n' + '\n' + 'A similar mechanism works implicitly if a new exception is raised ' + 'when\n' + 'an exception is already being handled. An exception may be ' + 'handled\n' + 'when an "except" or "finally" clause, or a "with" statement, is ' + 'used.\n' + 'The previous exception is then attached as the new exception’s\n' + '"__context__" attribute:\n' + '\n' + ' >>> try:\n' + ' ... print(1 / 0)\n' + ' ... except:\n' + ' ... raise RuntimeError("Something bad happened")\n' + ' ...\n' + ' Traceback (most recent call last):\n' + ' File "", line 2, in \n' + ' ZeroDivisionError: division by zero\n' + '\n' + ' During handling of the above exception, another exception ' + 'occurred:\n' + '\n' + ' Traceback (most recent call last):\n' + ' File "", line 4, in \n' + ' RuntimeError: Something bad happened\n' + '\n' + 'Exception chaining can be explicitly suppressed by specifying ' + '"None"\n' + 'in the "from" clause:\n' + '\n' + ' >>> try:\n' + ' ... print(1 / 0)\n' + ' ... except:\n' + ' ... raise RuntimeError("Something bad happened") from None\n' + ' ...\n' + ' Traceback (most recent call last):\n' + ' File "", line 4, in \n' + ' RuntimeError: Something bad happened\n' + '\n' + 'Additional information on exceptions can be found in section\n' + 'Exceptions, and information about handling exceptions is in ' + 'section\n' + 'The try statement.\n' + '\n' + 'Changed in version 3.3: "None" is now permitted as "Y" in "raise X\n' + 'from Y".\n' + '\n' + 'New in version 3.3: The "__suppress_context__" attribute to ' + 'suppress\n' + 'automatic display of the exception context.\n', + 'return': 'The "return" statement\n' + '**********************\n' + '\n' + ' return_stmt ::= "return" [expression_list]\n' + '\n' + '"return" may only occur syntactically nested in a function ' + 'definition,\n' + 'not within a nested class definition.\n' + '\n' + 'If an expression list is present, it is evaluated, else "None" is\n' + 'substituted.\n' + '\n' + '"return" leaves the current function call with the expression list ' + '(or\n' + '"None") as return value.\n' + '\n' + 'When "return" passes control out of a "try" statement with a ' + '"finally"\n' + 'clause, that "finally" clause is executed before really leaving ' + 'the\n' + 'function.\n' + '\n' + 'In a generator function, the "return" statement indicates that ' + 'the\n' + 'generator is done and will cause "StopIteration" to be raised. ' + 'The\n' + 'returned value (if any) is used as an argument to construct\n' + '"StopIteration" and becomes the "StopIteration.value" attribute.\n' + '\n' + 'In an asynchronous generator function, an empty "return" ' + 'statement\n' + 'indicates that the asynchronous generator is done and will cause\n' + '"StopAsyncIteration" to be raised. A non-empty "return" statement ' + 'is\n' + 'a syntax error in an asynchronous generator function.\n', + 'sequence-types': 'Emulating container types\n' + '*************************\n' + '\n' + 'The following methods can be defined to implement ' + 'container objects.\n' + 'Containers usually are *sequences* (such as "lists" or ' + '"tuples") or\n' + '*mappings* (like "dictionaries"), but can represent other ' + 'containers\n' + 'as well. The first set of methods is used either to ' + 'emulate a\n' + 'sequence or to emulate a mapping; the difference is that ' + 'for a\n' + 'sequence, the allowable keys should be the integers *k* ' + 'for which "0\n' + '<= k < N" where *N* is the length of the sequence, or ' + '"slice" objects,\n' + 'which define a range of items. It is also recommended ' + 'that mappings\n' + 'provide the methods "keys()", "values()", "items()", ' + '"get()",\n' + '"clear()", "setdefault()", "pop()", "popitem()", "copy()", ' + 'and\n' + '"update()" behaving similar to those for Python’s ' + 'standard\n' + '"dictionary" objects. The "collections.abc" module ' + 'provides a\n' + '"MutableMapping" *abstract base class* to help create ' + 'those methods\n' + 'from a base set of "__getitem__()", "__setitem__()", ' + '"__delitem__()",\n' + 'and "keys()". Mutable sequences should provide methods ' + '"append()",\n' + '"count()", "index()", "extend()", "insert()", "pop()", ' + '"remove()",\n' + '"reverse()" and "sort()", like Python standard "list" ' + 'objects.\n' + 'Finally, sequence types should implement addition ' + '(meaning\n' + 'concatenation) and multiplication (meaning repetition) by ' + 'defining the\n' + 'methods "__add__()", "__radd__()", "__iadd__()", ' + '"__mul__()",\n' + '"__rmul__()" and "__imul__()" described below; they should ' + 'not define\n' + 'other numerical operators. It is recommended that both ' + 'mappings and\n' + 'sequences implement the "__contains__()" method to allow ' + 'efficient use\n' + 'of the "in" operator; for mappings, "in" should search the ' + 'mapping’s\n' + 'keys; for sequences, it should search through the values. ' + 'It is\n' + 'further recommended that both mappings and sequences ' + 'implement the\n' + '"__iter__()" method to allow efficient iteration through ' + 'the\n' + 'container; for mappings, "__iter__()" should iterate ' + 'through the\n' + 'object’s keys; for sequences, it should iterate through ' + 'the values.\n' + '\n' + 'object.__len__(self)\n' + '\n' + ' Called to implement the built-in function "len()". ' + 'Should return\n' + ' the length of the object, an integer ">=" 0. Also, an ' + 'object that\n' + ' doesn’t define a "__bool__()" method and whose ' + '"__len__()" method\n' + ' returns zero is considered to be false in a Boolean ' + 'context.\n' + '\n' + ' **CPython implementation detail:** In CPython, the ' + 'length is\n' + ' required to be at most "sys.maxsize". If the length is ' + 'larger than\n' + ' "sys.maxsize" some features (such as "len()") may ' + 'raise\n' + ' "OverflowError". To prevent raising "OverflowError" by ' + 'truth value\n' + ' testing, an object must define a "__bool__()" method.\n' + '\n' + 'object.__length_hint__(self)\n' + '\n' + ' Called to implement "operator.length_hint()". Should ' + 'return an\n' + ' estimated length for the object (which may be greater ' + 'or less than\n' + ' the actual length). The length must be an integer ">=" ' + '0. The\n' + ' return value may also be "NotImplemented", which is ' + 'treated the\n' + ' same as if the "__length_hint__" method didn’t exist at ' + 'all. This\n' + ' method is purely an optimization and is never required ' + 'for\n' + ' correctness.\n' + '\n' + ' New in version 3.4.\n' + '\n' + 'Note:\n' + '\n' + ' Slicing is done exclusively with the following three ' + 'methods. A\n' + ' call like\n' + '\n' + ' a[1:2] = b\n' + '\n' + ' is translated to\n' + '\n' + ' a[slice(1, 2, None)] = b\n' + '\n' + ' and so forth. Missing slice items are always filled in ' + 'with "None".\n' + '\n' + 'object.__getitem__(self, key)\n' + '\n' + ' Called to implement evaluation of "self[key]". For ' + '*sequence*\n' + ' types, the accepted keys should be integers and slice ' + 'objects.\n' + ' Note that the special interpretation of negative ' + 'indexes (if the\n' + ' class wishes to emulate a *sequence* type) is up to ' + 'the\n' + ' "__getitem__()" method. If *key* is of an inappropriate ' + 'type,\n' + ' "TypeError" may be raised; if of a value outside the ' + 'set of indexes\n' + ' for the sequence (after any special interpretation of ' + 'negative\n' + ' values), "IndexError" should be raised. For *mapping* ' + 'types, if\n' + ' *key* is missing (not in the container), "KeyError" ' + 'should be\n' + ' raised.\n' + '\n' + ' Note:\n' + '\n' + ' "for" loops expect that an "IndexError" will be ' + 'raised for\n' + ' illegal indexes to allow proper detection of the end ' + 'of the\n' + ' sequence.\n' + '\n' + ' Note:\n' + '\n' + ' When subscripting a *class*, the special class ' + 'method\n' + ' "__class_getitem__()" may be called instead of ' + '"__getitem__()".\n' + ' See __class_getitem__ versus __getitem__ for more ' + 'details.\n' + '\n' + 'object.__setitem__(self, key, value)\n' + '\n' + ' Called to implement assignment to "self[key]". Same ' + 'note as for\n' + ' "__getitem__()". This should only be implemented for ' + 'mappings if\n' + ' the objects support changes to the values for keys, or ' + 'if new keys\n' + ' can be added, or for sequences if elements can be ' + 'replaced. The\n' + ' same exceptions should be raised for improper *key* ' + 'values as for\n' + ' the "__getitem__()" method.\n' + '\n' + 'object.__delitem__(self, key)\n' + '\n' + ' Called to implement deletion of "self[key]". Same note ' + 'as for\n' + ' "__getitem__()". This should only be implemented for ' + 'mappings if\n' + ' the objects support removal of keys, or for sequences ' + 'if elements\n' + ' can be removed from the sequence. The same exceptions ' + 'should be\n' + ' raised for improper *key* values as for the ' + '"__getitem__()" method.\n' + '\n' + 'object.__missing__(self, key)\n' + '\n' + ' Called by "dict"."__getitem__()" to implement ' + '"self[key]" for dict\n' + ' subclasses when key is not in the dictionary.\n' + '\n' + 'object.__iter__(self)\n' + '\n' + ' This method is called when an *iterator* is required ' + 'for a\n' + ' container. This method should return a new iterator ' + 'object that can\n' + ' iterate over all the objects in the container. For ' + 'mappings, it\n' + ' should iterate over the keys of the container.\n' + '\n' + 'object.__reversed__(self)\n' + '\n' + ' Called (if present) by the "reversed()" built-in to ' + 'implement\n' + ' reverse iteration. It should return a new iterator ' + 'object that\n' + ' iterates over all the objects in the container in ' + 'reverse order.\n' + '\n' + ' If the "__reversed__()" method is not provided, the ' + '"reversed()"\n' + ' built-in will fall back to using the sequence protocol ' + '("__len__()"\n' + ' and "__getitem__()"). Objects that support the ' + 'sequence protocol\n' + ' should only provide "__reversed__()" if they can ' + 'provide an\n' + ' implementation that is more efficient than the one ' + 'provided by\n' + ' "reversed()".\n' + '\n' + 'The membership test operators ("in" and "not in") are ' + 'normally\n' + 'implemented as an iteration through a container. However, ' + 'container\n' + 'objects can supply the following special method with a ' + 'more efficient\n' + 'implementation, which also does not require the object be ' + 'iterable.\n' + '\n' + 'object.__contains__(self, item)\n' + '\n' + ' Called to implement membership test operators. Should ' + 'return true\n' + ' if *item* is in *self*, false otherwise. For mapping ' + 'objects, this\n' + ' should consider the keys of the mapping rather than the ' + 'values or\n' + ' the key-item pairs.\n' + '\n' + ' For objects that don’t define "__contains__()", the ' + 'membership test\n' + ' first tries iteration via "__iter__()", then the old ' + 'sequence\n' + ' iteration protocol via "__getitem__()", see this ' + 'section in the\n' + ' language reference.\n', + 'shifting': 'Shifting operations\n' + '*******************\n' + '\n' + 'The shifting operations have lower priority than the arithmetic\n' + 'operations:\n' + '\n' + ' shift_expr ::= a_expr | shift_expr ("<<" | ">>") a_expr\n' + '\n' + 'These operators accept integers as arguments. They shift the ' + 'first\n' + 'argument to the left or right by the number of bits given by ' + 'the\n' + 'second argument.\n' + '\n' + 'This operation can be customized using the special ' + '"__lshift__()" and\n' + '"__rshift__()" methods.\n' + '\n' + 'A right shift by *n* bits is defined as floor division by ' + '"pow(2,n)".\n' + 'A left shift by *n* bits is defined as multiplication with ' + '"pow(2,n)".\n', + 'slicings': 'Slicings\n' + '********\n' + '\n' + 'A slicing selects a range of items in a sequence object (e.g., ' + 'a\n' + 'string, tuple or list). Slicings may be used as expressions or ' + 'as\n' + 'targets in assignment or "del" statements. The syntax for a ' + 'slicing:\n' + '\n' + ' slicing ::= primary "[" slice_list "]"\n' + ' slice_list ::= slice_item ("," slice_item)* [","]\n' + ' slice_item ::= expression | proper_slice\n' + ' proper_slice ::= [lower_bound] ":" [upper_bound] [ ":" ' + '[stride] ]\n' + ' lower_bound ::= expression\n' + ' upper_bound ::= expression\n' + ' stride ::= expression\n' + '\n' + 'There is ambiguity in the formal syntax here: anything that ' + 'looks like\n' + 'an expression list also looks like a slice list, so any ' + 'subscription\n' + 'can be interpreted as a slicing. Rather than further ' + 'complicating the\n' + 'syntax, this is disambiguated by defining that in this case the\n' + 'interpretation as a subscription takes priority over the\n' + 'interpretation as a slicing (this is the case if the slice list\n' + 'contains no proper slice).\n' + '\n' + 'The semantics for a slicing are as follows. The primary is ' + 'indexed\n' + '(using the same "__getitem__()" method as normal subscription) ' + 'with a\n' + 'key that is constructed from the slice list, as follows. If the ' + 'slice\n' + 'list contains at least one comma, the key is a tuple containing ' + 'the\n' + 'conversion of the slice items; otherwise, the conversion of the ' + 'lone\n' + 'slice item is the key. The conversion of a slice item that is ' + 'an\n' + 'expression is that expression. The conversion of a proper slice ' + 'is a\n' + 'slice object (see section The standard type hierarchy) whose ' + '"start",\n' + '"stop" and "step" attributes are the values of the expressions ' + 'given\n' + 'as lower bound, upper bound and stride, respectively, ' + 'substituting\n' + '"None" for missing expressions.\n', + 'specialattrs': 'Special Attributes\n' + '******************\n' + '\n' + 'The implementation adds a few special read-only attributes ' + 'to several\n' + 'object types, where they are relevant. Some of these are ' + 'not reported\n' + 'by the "dir()" built-in function.\n' + '\n' + 'object.__dict__\n' + '\n' + ' A dictionary or other mapping object used to store an ' + 'object’s\n' + ' (writable) attributes.\n' + '\n' + 'instance.__class__\n' + '\n' + ' The class to which a class instance belongs.\n' + '\n' + 'class.__bases__\n' + '\n' + ' The tuple of base classes of a class object.\n' + '\n' + 'definition.__name__\n' + '\n' + ' The name of the class, function, method, descriptor, or ' + 'generator\n' + ' instance.\n' + '\n' + 'definition.__qualname__\n' + '\n' + ' The *qualified name* of the class, function, method, ' + 'descriptor, or\n' + ' generator instance.\n' + '\n' + ' New in version 3.3.\n' + '\n' + 'class.__mro__\n' + '\n' + ' This attribute is a tuple of classes that are considered ' + 'when\n' + ' looking for base classes during method resolution.\n' + '\n' + 'class.mro()\n' + '\n' + ' This method can be overridden by a metaclass to customize ' + 'the\n' + ' method resolution order for its instances. It is called ' + 'at class\n' + ' instantiation, and its result is stored in "__mro__".\n' + '\n' + 'class.__subclasses__()\n' + '\n' + ' Each class keeps a list of weak references to its ' + 'immediate\n' + ' subclasses. This method returns a list of all those ' + 'references\n' + ' still alive. The list is in definition order. Example:\n' + '\n' + ' >>> int.__subclasses__()\n' + " []\n", + 'specialnames': 'Special method names\n' + '********************\n' + '\n' + 'A class can implement certain operations that are invoked by ' + 'special\n' + 'syntax (such as arithmetic operations or subscripting and ' + 'slicing) by\n' + 'defining methods with special names. This is Python’s ' + 'approach to\n' + '*operator overloading*, allowing classes to define their own ' + 'behavior\n' + 'with respect to language operators. For instance, if a ' + 'class defines\n' + 'a method named "__getitem__()", and "x" is an instance of ' + 'this class,\n' + 'then "x[i]" is roughly equivalent to "type(x).__getitem__(x, ' + 'i)".\n' + 'Except where mentioned, attempts to execute an operation ' + 'raise an\n' + 'exception when no appropriate method is defined (typically\n' + '"AttributeError" or "TypeError").\n' + '\n' + 'Setting a special method to "None" indicates that the ' + 'corresponding\n' + 'operation is not available. For example, if a class sets ' + '"__iter__()"\n' + 'to "None", the class is not iterable, so calling "iter()" on ' + 'its\n' + 'instances will raise a "TypeError" (without falling back to\n' + '"__getitem__()"). [2]\n' + '\n' + 'When implementing a class that emulates any built-in type, ' + 'it is\n' + 'important that the emulation only be implemented to the ' + 'degree that it\n' + 'makes sense for the object being modelled. For example, ' + 'some\n' + 'sequences may work well with retrieval of individual ' + 'elements, but\n' + 'extracting a slice may not make sense. (One example of this ' + 'is the\n' + '"NodeList" interface in the W3C’s Document Object Model.)\n' + '\n' + '\n' + 'Basic customization\n' + '===================\n' + '\n' + 'object.__new__(cls[, ...])\n' + '\n' + ' Called to create a new instance of class *cls*. ' + '"__new__()" is a\n' + ' static method (special-cased so you need not declare it ' + 'as such)\n' + ' that takes the class of which an instance was requested ' + 'as its\n' + ' first argument. The remaining arguments are those passed ' + 'to the\n' + ' object constructor expression (the call to the class). ' + 'The return\n' + ' value of "__new__()" should be the new object instance ' + '(usually an\n' + ' instance of *cls*).\n' + '\n' + ' Typical implementations create a new instance of the ' + 'class by\n' + ' invoking the superclass’s "__new__()" method using\n' + ' "super().__new__(cls[, ...])" with appropriate arguments ' + 'and then\n' + ' modifying the newly created instance as necessary before ' + 'returning\n' + ' it.\n' + '\n' + ' If "__new__()" is invoked during object construction and ' + 'it returns\n' + ' an instance of *cls*, then the new instance’s ' + '"__init__()" method\n' + ' will be invoked like "__init__(self[, ...])", where ' + '*self* is the\n' + ' new instance and the remaining arguments are the same as ' + 'were\n' + ' passed to the object constructor.\n' + '\n' + ' If "__new__()" does not return an instance of *cls*, then ' + 'the new\n' + ' instance’s "__init__()" method will not be invoked.\n' + '\n' + ' "__new__()" is intended mainly to allow subclasses of ' + 'immutable\n' + ' types (like int, str, or tuple) to customize instance ' + 'creation. It\n' + ' is also commonly overridden in custom metaclasses in ' + 'order to\n' + ' customize class creation.\n' + '\n' + 'object.__init__(self[, ...])\n' + '\n' + ' Called after the instance has been created (by ' + '"__new__()"), but\n' + ' before it is returned to the caller. The arguments are ' + 'those\n' + ' passed to the class constructor expression. If a base ' + 'class has an\n' + ' "__init__()" method, the derived class’s "__init__()" ' + 'method, if\n' + ' any, must explicitly call it to ensure proper ' + 'initialization of the\n' + ' base class part of the instance; for example:\n' + ' "super().__init__([args...])".\n' + '\n' + ' Because "__new__()" and "__init__()" work together in ' + 'constructing\n' + ' objects ("__new__()" to create it, and "__init__()" to ' + 'customize\n' + ' it), no non-"None" value may be returned by "__init__()"; ' + 'doing so\n' + ' will cause a "TypeError" to be raised at runtime.\n' + '\n' + 'object.__del__(self)\n' + '\n' + ' Called when the instance is about to be destroyed. This ' + 'is also\n' + ' called a finalizer or (improperly) a destructor. If a ' + 'base class\n' + ' has a "__del__()" method, the derived class’s "__del__()" ' + 'method,\n' + ' if any, must explicitly call it to ensure proper deletion ' + 'of the\n' + ' base class part of the instance.\n' + '\n' + ' It is possible (though not recommended!) for the ' + '"__del__()" method\n' + ' to postpone destruction of the instance by creating a new ' + 'reference\n' + ' to it. This is called object *resurrection*. It is\n' + ' implementation-dependent whether "__del__()" is called a ' + 'second\n' + ' time when a resurrected object is about to be destroyed; ' + 'the\n' + ' current *CPython* implementation only calls it once.\n' + '\n' + ' It is not guaranteed that "__del__()" methods are called ' + 'for\n' + ' objects that still exist when the interpreter exits.\n' + '\n' + ' Note:\n' + '\n' + ' "del x" doesn’t directly call "x.__del__()" — the ' + 'former\n' + ' decrements the reference count for "x" by one, and the ' + 'latter is\n' + ' only called when "x"’s reference count reaches zero.\n' + '\n' + ' **CPython implementation detail:** It is possible for a ' + 'reference\n' + ' cycle to prevent the reference count of an object from ' + 'going to\n' + ' zero. In this case, the cycle will be later detected and ' + 'deleted\n' + ' by the *cyclic garbage collector*. A common cause of ' + 'reference\n' + ' cycles is when an exception has been caught in a local ' + 'variable.\n' + ' The frame’s locals then reference the exception, which ' + 'references\n' + ' its own traceback, which references the locals of all ' + 'frames caught\n' + ' in the traceback.\n' + '\n' + ' See also: Documentation for the "gc" module.\n' + '\n' + ' Warning:\n' + '\n' + ' Due to the precarious circumstances under which ' + '"__del__()"\n' + ' methods are invoked, exceptions that occur during their ' + 'execution\n' + ' are ignored, and a warning is printed to "sys.stderr" ' + 'instead.\n' + ' In particular:\n' + '\n' + ' * "__del__()" can be invoked when arbitrary code is ' + 'being\n' + ' executed, including from any arbitrary thread. If ' + '"__del__()"\n' + ' needs to take a lock or invoke any other blocking ' + 'resource, it\n' + ' may deadlock as the resource may already be taken by ' + 'the code\n' + ' that gets interrupted to execute "__del__()".\n' + '\n' + ' * "__del__()" can be executed during interpreter ' + 'shutdown. As a\n' + ' consequence, the global variables it needs to access ' + '(including\n' + ' other modules) may already have been deleted or set ' + 'to "None".\n' + ' Python guarantees that globals whose name begins with ' + 'a single\n' + ' underscore are deleted from their module before other ' + 'globals\n' + ' are deleted; if no other references to such globals ' + 'exist, this\n' + ' may help in assuring that imported modules are still ' + 'available\n' + ' at the time when the "__del__()" method is called.\n' + '\n' + 'object.__repr__(self)\n' + '\n' + ' Called by the "repr()" built-in function to compute the ' + '“official”\n' + ' string representation of an object. If at all possible, ' + 'this\n' + ' should look like a valid Python expression that could be ' + 'used to\n' + ' recreate an object with the same value (given an ' + 'appropriate\n' + ' environment). If this is not possible, a string of the ' + 'form\n' + ' "<...some useful description...>" should be returned. The ' + 'return\n' + ' value must be a string object. If a class defines ' + '"__repr__()" but\n' + ' not "__str__()", then "__repr__()" is also used when an ' + '“informal”\n' + ' string representation of instances of that class is ' + 'required.\n' + '\n' + ' This is typically used for debugging, so it is important ' + 'that the\n' + ' representation is information-rich and unambiguous.\n' + '\n' + 'object.__str__(self)\n' + '\n' + ' Called by "str(object)" and the built-in functions ' + '"format()" and\n' + ' "print()" to compute the “informal” or nicely printable ' + 'string\n' + ' representation of an object. The return value must be a ' + 'string\n' + ' object.\n' + '\n' + ' This method differs from "object.__repr__()" in that ' + 'there is no\n' + ' expectation that "__str__()" return a valid Python ' + 'expression: a\n' + ' more convenient or concise representation can be used.\n' + '\n' + ' The default implementation defined by the built-in type ' + '"object"\n' + ' calls "object.__repr__()".\n' + '\n' + 'object.__bytes__(self)\n' + '\n' + ' Called by bytes to compute a byte-string representation ' + 'of an\n' + ' object. This should return a "bytes" object.\n' + '\n' + 'object.__format__(self, format_spec)\n' + '\n' + ' Called by the "format()" built-in function, and by ' + 'extension,\n' + ' evaluation of formatted string literals and the ' + '"str.format()"\n' + ' method, to produce a “formatted” string representation of ' + 'an\n' + ' object. The *format_spec* argument is a string that ' + 'contains a\n' + ' description of the formatting options desired. The ' + 'interpretation\n' + ' of the *format_spec* argument is up to the type ' + 'implementing\n' + ' "__format__()", however most classes will either ' + 'delegate\n' + ' formatting to one of the built-in types, or use a ' + 'similar\n' + ' formatting option syntax.\n' + '\n' + ' See Format Specification Mini-Language for a description ' + 'of the\n' + ' standard formatting syntax.\n' + '\n' + ' The return value must be a string object.\n' + '\n' + ' Changed in version 3.4: The __format__ method of "object" ' + 'itself\n' + ' raises a "TypeError" if passed any non-empty string.\n' + '\n' + ' Changed in version 3.7: "object.__format__(x, \'\')" is ' + 'now\n' + ' equivalent to "str(x)" rather than "format(str(x), ' + '\'\')".\n' + '\n' + 'object.__lt__(self, other)\n' + 'object.__le__(self, other)\n' + 'object.__eq__(self, other)\n' + 'object.__ne__(self, other)\n' + 'object.__gt__(self, other)\n' + 'object.__ge__(self, other)\n' + '\n' + ' These are the so-called “rich comparison” methods. The\n' + ' correspondence between operator symbols and method names ' + 'is as\n' + ' follows: "xy" calls\n' + ' "x.__gt__(y)", and "x>=y" calls "x.__ge__(y)".\n' + '\n' + ' A rich comparison method may return the singleton ' + '"NotImplemented"\n' + ' if it does not implement the operation for a given pair ' + 'of\n' + ' arguments. By convention, "False" and "True" are returned ' + 'for a\n' + ' successful comparison. However, these methods can return ' + 'any value,\n' + ' so if the comparison operator is used in a Boolean ' + 'context (e.g.,\n' + ' in the condition of an "if" statement), Python will call ' + '"bool()"\n' + ' on the value to determine if the result is true or ' + 'false.\n' + '\n' + ' By default, "object" implements "__eq__()" by using "is", ' + 'returning\n' + ' "NotImplemented" in the case of a false comparison: "True ' + 'if x is y\n' + ' else NotImplemented". For "__ne__()", by default it ' + 'delegates to\n' + ' "__eq__()" and inverts the result unless it is ' + '"NotImplemented".\n' + ' There are no other implied relationships among the ' + 'comparison\n' + ' operators or default implementations; for example, the ' + 'truth of\n' + ' "(x.__hash__".\n' + '\n' + ' If a class that does not override "__eq__()" wishes to ' + 'suppress\n' + ' hash support, it should include "__hash__ = None" in the ' + 'class\n' + ' definition. A class which defines its own "__hash__()" ' + 'that\n' + ' explicitly raises a "TypeError" would be incorrectly ' + 'identified as\n' + ' hashable by an "isinstance(obj, ' + 'collections.abc.Hashable)" call.\n' + '\n' + ' Note:\n' + '\n' + ' By default, the "__hash__()" values of str and bytes ' + 'objects are\n' + ' “salted” with an unpredictable random value. Although ' + 'they\n' + ' remain constant within an individual Python process, ' + 'they are not\n' + ' predictable between repeated invocations of Python.This ' + 'is\n' + ' intended to provide protection against a ' + 'denial-of-service caused\n' + ' by carefully chosen inputs that exploit the worst case\n' + ' performance of a dict insertion, O(n^2) complexity. ' + 'See\n' + ' http://www.ocert.org/advisories/ocert-2011-003.html ' + 'for\n' + ' details.Changing hash values affects the iteration ' + 'order of sets.\n' + ' Python has never made guarantees about this ordering ' + '(and it\n' + ' typically varies between 32-bit and 64-bit builds).See ' + 'also\n' + ' "PYTHONHASHSEED".\n' + '\n' + ' Changed in version 3.3: Hash randomization is enabled by ' + 'default.\n' + '\n' + 'object.__bool__(self)\n' + '\n' + ' Called to implement truth value testing and the built-in ' + 'operation\n' + ' "bool()"; should return "False" or "True". When this ' + 'method is not\n' + ' defined, "__len__()" is called, if it is defined, and the ' + 'object is\n' + ' considered true if its result is nonzero. If a class ' + 'defines\n' + ' neither "__len__()" nor "__bool__()", all its instances ' + 'are\n' + ' considered true.\n' + '\n' + '\n' + 'Customizing attribute access\n' + '============================\n' + '\n' + 'The following methods can be defined to customize the ' + 'meaning of\n' + 'attribute access (use of, assignment to, or deletion of ' + '"x.name") for\n' + 'class instances.\n' + '\n' + 'object.__getattr__(self, name)\n' + '\n' + ' Called when the default attribute access fails with an\n' + ' "AttributeError" (either "__getattribute__()" raises an\n' + ' "AttributeError" because *name* is not an instance ' + 'attribute or an\n' + ' attribute in the class tree for "self"; or "__get__()" of ' + 'a *name*\n' + ' property raises "AttributeError"). This method should ' + 'either\n' + ' return the (computed) attribute value or raise an ' + '"AttributeError"\n' + ' exception.\n' + '\n' + ' Note that if the attribute is found through the normal ' + 'mechanism,\n' + ' "__getattr__()" is not called. (This is an intentional ' + 'asymmetry\n' + ' between "__getattr__()" and "__setattr__()".) This is ' + 'done both for\n' + ' efficiency reasons and because otherwise "__getattr__()" ' + 'would have\n' + ' no way to access other attributes of the instance. Note ' + 'that at\n' + ' least for instance variables, you can fake total control ' + 'by not\n' + ' inserting any values in the instance attribute dictionary ' + '(but\n' + ' instead inserting them in another object). See the\n' + ' "__getattribute__()" method below for a way to actually ' + 'get total\n' + ' control over attribute access.\n' + '\n' + 'object.__getattribute__(self, name)\n' + '\n' + ' Called unconditionally to implement attribute accesses ' + 'for\n' + ' instances of the class. If the class also defines ' + '"__getattr__()",\n' + ' the latter will not be called unless "__getattribute__()" ' + 'either\n' + ' calls it explicitly or raises an "AttributeError". This ' + 'method\n' + ' should return the (computed) attribute value or raise an\n' + ' "AttributeError" exception. In order to avoid infinite ' + 'recursion in\n' + ' this method, its implementation should always call the ' + 'base class\n' + ' method with the same name to access any attributes it ' + 'needs, for\n' + ' example, "object.__getattribute__(self, name)".\n' + '\n' + ' Note:\n' + '\n' + ' This method may still be bypassed when looking up ' + 'special methods\n' + ' as the result of implicit invocation via language ' + 'syntax or\n' + ' built-in functions. See Special method lookup.\n' + '\n' + ' For certain sensitive attribute accesses, raises an ' + 'auditing event\n' + ' "object.__getattr__" with arguments "obj" and "name".\n' + '\n' + 'object.__setattr__(self, name, value)\n' + '\n' + ' Called when an attribute assignment is attempted. This ' + 'is called\n' + ' instead of the normal mechanism (i.e. store the value in ' + 'the\n' + ' instance dictionary). *name* is the attribute name, ' + '*value* is the\n' + ' value to be assigned to it.\n' + '\n' + ' If "__setattr__()" wants to assign to an instance ' + 'attribute, it\n' + ' should call the base class method with the same name, for ' + 'example,\n' + ' "object.__setattr__(self, name, value)".\n' + '\n' + ' For certain sensitive attribute assignments, raises an ' + 'auditing\n' + ' event "object.__setattr__" with arguments "obj", "name", ' + '"value".\n' + '\n' + 'object.__delattr__(self, name)\n' + '\n' + ' Like "__setattr__()" but for attribute deletion instead ' + 'of\n' + ' assignment. This should only be implemented if "del ' + 'obj.name" is\n' + ' meaningful for the object.\n' + '\n' + ' For certain sensitive attribute deletions, raises an ' + 'auditing event\n' + ' "object.__delattr__" with arguments "obj" and "name".\n' + '\n' + 'object.__dir__(self)\n' + '\n' + ' Called when "dir()" is called on the object. A sequence ' + 'must be\n' + ' returned. "dir()" converts the returned sequence to a ' + 'list and\n' + ' sorts it.\n' + '\n' + '\n' + 'Customizing module attribute access\n' + '-----------------------------------\n' + '\n' + 'Special names "__getattr__" and "__dir__" can be also used ' + 'to\n' + 'customize access to module attributes. The "__getattr__" ' + 'function at\n' + 'the module level should accept one argument which is the ' + 'name of an\n' + 'attribute and return the computed value or raise an ' + '"AttributeError".\n' + 'If an attribute is not found on a module object through the ' + 'normal\n' + 'lookup, i.e. "object.__getattribute__()", then "__getattr__" ' + 'is\n' + 'searched in the module "__dict__" before raising an ' + '"AttributeError".\n' + 'If found, it is called with the attribute name and the ' + 'result is\n' + 'returned.\n' + '\n' + 'The "__dir__" function should accept no arguments, and ' + 'return a\n' + 'sequence of strings that represents the names accessible on ' + 'module. If\n' + 'present, this function overrides the standard "dir()" search ' + 'on a\n' + 'module.\n' + '\n' + 'For a more fine grained customization of the module behavior ' + '(setting\n' + 'attributes, properties, etc.), one can set the "__class__" ' + 'attribute\n' + 'of a module object to a subclass of "types.ModuleType". For ' + 'example:\n' + '\n' + ' import sys\n' + ' from types import ModuleType\n' + '\n' + ' class VerboseModule(ModuleType):\n' + ' def __repr__(self):\n' + " return f'Verbose {self.__name__}'\n" + '\n' + ' def __setattr__(self, attr, value):\n' + " print(f'Setting {attr}...')\n" + ' super().__setattr__(attr, value)\n' + '\n' + ' sys.modules[__name__].__class__ = VerboseModule\n' + '\n' + 'Note:\n' + '\n' + ' Defining module "__getattr__" and setting module ' + '"__class__" only\n' + ' affect lookups made using the attribute access syntax – ' + 'directly\n' + ' accessing the module globals (whether by code within the ' + 'module, or\n' + ' via a reference to the module’s globals dictionary) is ' + 'unaffected.\n' + '\n' + 'Changed in version 3.5: "__class__" module attribute is now ' + 'writable.\n' + '\n' + 'New in version 3.7: "__getattr__" and "__dir__" module ' + 'attributes.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 562** - Module __getattr__ and __dir__\n' + ' Describes the "__getattr__" and "__dir__" functions on ' + 'modules.\n' + '\n' + '\n' + 'Implementing Descriptors\n' + '------------------------\n' + '\n' + 'The following methods only apply when an instance of the ' + 'class\n' + 'containing the method (a so-called *descriptor* class) ' + 'appears in an\n' + '*owner* class (the descriptor must be in either the owner’s ' + 'class\n' + 'dictionary or in the class dictionary for one of its ' + 'parents). In the\n' + 'examples below, “the attribute” refers to the attribute ' + 'whose name is\n' + 'the key of the property in the owner class’ "__dict__".\n' + '\n' + 'object.__get__(self, instance, owner=None)\n' + '\n' + ' Called to get the attribute of the owner class (class ' + 'attribute\n' + ' access) or of an instance of that class (instance ' + 'attribute\n' + ' access). The optional *owner* argument is the owner ' + 'class, while\n' + ' *instance* is the instance that the attribute was ' + 'accessed through,\n' + ' or "None" when the attribute is accessed through the ' + '*owner*.\n' + '\n' + ' This method should return the computed attribute value or ' + 'raise an\n' + ' "AttributeError" exception.\n' + '\n' + ' **PEP 252** specifies that "__get__()" is callable with ' + 'one or two\n' + ' arguments. Python’s own built-in descriptors support ' + 'this\n' + ' specification; however, it is likely that some ' + 'third-party tools\n' + ' have descriptors that require both arguments. Python’s ' + 'own\n' + ' "__getattribute__()" implementation always passes in both ' + 'arguments\n' + ' whether they are required or not.\n' + '\n' + 'object.__set__(self, instance, value)\n' + '\n' + ' Called to set the attribute on an instance *instance* of ' + 'the owner\n' + ' class to a new value, *value*.\n' + '\n' + ' Note, adding "__set__()" or "__delete__()" changes the ' + 'kind of\n' + ' descriptor to a “data descriptor”. See Invoking ' + 'Descriptors for\n' + ' more details.\n' + '\n' + 'object.__delete__(self, instance)\n' + '\n' + ' Called to delete the attribute on an instance *instance* ' + 'of the\n' + ' owner class.\n' + '\n' + 'The attribute "__objclass__" is interpreted by the "inspect" ' + 'module as\n' + 'specifying the class where this object was defined (setting ' + 'this\n' + 'appropriately can assist in runtime introspection of dynamic ' + 'class\n' + 'attributes). For callables, it may indicate that an instance ' + 'of the\n' + 'given type (or a subclass) is expected or required as the ' + 'first\n' + 'positional argument (for example, CPython sets this ' + 'attribute for\n' + 'unbound methods that are implemented in C).\n' + '\n' + '\n' + 'Invoking Descriptors\n' + '--------------------\n' + '\n' + 'In general, a descriptor is an object attribute with ' + '“binding\n' + 'behavior”, one whose attribute access has been overridden by ' + 'methods\n' + 'in the descriptor protocol: "__get__()", "__set__()", and\n' + '"__delete__()". If any of those methods are defined for an ' + 'object, it\n' + 'is said to be a descriptor.\n' + '\n' + 'The default behavior for attribute access is to get, set, or ' + 'delete\n' + 'the attribute from an object’s dictionary. For instance, ' + '"a.x" has a\n' + 'lookup chain starting with "a.__dict__[\'x\']", then\n' + '"type(a).__dict__[\'x\']", and continuing through the base ' + 'classes of\n' + '"type(a)" excluding metaclasses.\n' + '\n' + 'However, if the looked-up value is an object defining one of ' + 'the\n' + 'descriptor methods, then Python may override the default ' + 'behavior and\n' + 'invoke the descriptor method instead. Where this occurs in ' + 'the\n' + 'precedence chain depends on which descriptor methods were ' + 'defined and\n' + 'how they were called.\n' + '\n' + 'The starting point for descriptor invocation is a binding, ' + '"a.x". How\n' + 'the arguments are assembled depends on "a":\n' + '\n' + 'Direct Call\n' + ' The simplest and least common call is when user code ' + 'directly\n' + ' invokes a descriptor method: "x.__get__(a)".\n' + '\n' + 'Instance Binding\n' + ' If binding to an object instance, "a.x" is transformed ' + 'into the\n' + ' call: "type(a).__dict__[\'x\'].__get__(a, type(a))".\n' + '\n' + 'Class Binding\n' + ' If binding to a class, "A.x" is transformed into the ' + 'call:\n' + ' "A.__dict__[\'x\'].__get__(None, A)".\n' + '\n' + 'Super Binding\n' + ' If "a" is an instance of "super", then the binding ' + '"super(B,\n' + ' obj).m()" searches "obj.__class__.__mro__" for the base ' + 'class "A"\n' + ' immediately following "B" and then invokes the descriptor ' + 'with the\n' + ' call: "A.__dict__[\'m\'].__get__(obj, obj.__class__)".\n' + '\n' + 'For instance bindings, the precedence of descriptor ' + 'invocation depends\n' + 'on which descriptor methods are defined. A descriptor can ' + 'define any\n' + 'combination of "__get__()", "__set__()" and "__delete__()". ' + 'If it\n' + 'does not define "__get__()", then accessing the attribute ' + 'will return\n' + 'the descriptor object itself unless there is a value in the ' + 'object’s\n' + 'instance dictionary. If the descriptor defines "__set__()" ' + 'and/or\n' + '"__delete__()", it is a data descriptor; if it defines ' + 'neither, it is\n' + 'a non-data descriptor. Normally, data descriptors define ' + 'both\n' + '"__get__()" and "__set__()", while non-data descriptors have ' + 'just the\n' + '"__get__()" method. Data descriptors with "__get__()" and ' + '"__set__()"\n' + '(and/or "__delete__()") defined always override a ' + 'redefinition in an\n' + 'instance dictionary. In contrast, non-data descriptors can ' + 'be\n' + 'overridden by instances.\n' + '\n' + 'Python methods (including those decorated with ' + '"@staticmethod" and\n' + '"@classmethod") are implemented as non-data descriptors. ' + 'Accordingly,\n' + 'instances can redefine and override methods. This allows ' + 'individual\n' + 'instances to acquire behaviors that differ from other ' + 'instances of the\n' + 'same class.\n' + '\n' + 'The "property()" function is implemented as a data ' + 'descriptor.\n' + 'Accordingly, instances cannot override the behavior of a ' + 'property.\n' + '\n' + '\n' + '__slots__\n' + '---------\n' + '\n' + '*__slots__* allow us to explicitly declare data members ' + '(like\n' + 'properties) and deny the creation of "__dict__" and ' + '*__weakref__*\n' + '(unless explicitly declared in *__slots__* or available in a ' + 'parent.)\n' + '\n' + 'The space saved over using "__dict__" can be significant. ' + 'Attribute\n' + 'lookup speed can be significantly improved as well.\n' + '\n' + 'object.__slots__\n' + '\n' + ' This class variable can be assigned a string, iterable, ' + 'or sequence\n' + ' of strings with variable names used by instances. ' + '*__slots__*\n' + ' reserves space for the declared variables and prevents ' + 'the\n' + ' automatic creation of "__dict__" and *__weakref__* for ' + 'each\n' + ' instance.\n' + '\n' + '\n' + 'Notes on using *__slots__*\n' + '~~~~~~~~~~~~~~~~~~~~~~~~~~\n' + '\n' + '* When inheriting from a class without *__slots__*, the ' + '"__dict__" and\n' + ' *__weakref__* attribute of the instances will always be ' + 'accessible.\n' + '\n' + '* Without a "__dict__" variable, instances cannot be ' + 'assigned new\n' + ' variables not listed in the *__slots__* definition. ' + 'Attempts to\n' + ' assign to an unlisted variable name raises ' + '"AttributeError". If\n' + ' dynamic assignment of new variables is desired, then add\n' + ' "\'__dict__\'" to the sequence of strings in the ' + '*__slots__*\n' + ' declaration.\n' + '\n' + '* Without a *__weakref__* variable for each instance, ' + 'classes defining\n' + ' *__slots__* do not support "weak references" to its ' + 'instances. If\n' + ' weak reference support is needed, then add ' + '"\'__weakref__\'" to the\n' + ' sequence of strings in the *__slots__* declaration.\n' + '\n' + '* *__slots__* are implemented at the class level by ' + 'creating\n' + ' descriptors for each variable name. As a result, class ' + 'attributes\n' + ' cannot be used to set default values for instance ' + 'variables defined\n' + ' by *__slots__*; otherwise, the class attribute would ' + 'overwrite the\n' + ' descriptor assignment.\n' + '\n' + '* The action of a *__slots__* declaration is not limited to ' + 'the class\n' + ' where it is defined. *__slots__* declared in parents are ' + 'available\n' + ' in child classes. However, child subclasses will get a ' + '"__dict__"\n' + ' and *__weakref__* unless they also define *__slots__* ' + '(which should\n' + ' only contain names of any *additional* slots).\n' + '\n' + '* If a class defines a slot also defined in a base class, ' + 'the instance\n' + ' variable defined by the base class slot is inaccessible ' + '(except by\n' + ' retrieving its descriptor directly from the base class). ' + 'This\n' + ' renders the meaning of the program undefined. In the ' + 'future, a\n' + ' check may be added to prevent this.\n' + '\n' + '* "TypeError" will be raised if nonempty *__slots__* are ' + 'defined for a\n' + ' class derived from a ""variable-length" built-in type" ' + 'such as\n' + ' "int", "bytes", and "tuple".\n' + '\n' + '* Any non-string *iterable* may be assigned to *__slots__*.\n' + '\n' + '* If a "dictionary" is used to assign *__slots__*, the ' + 'dictionary keys\n' + ' will be used as the slot names. The values of the ' + 'dictionary can be\n' + ' used to provide per-attribute docstrings that will be ' + 'recognised by\n' + ' "inspect.getdoc()" and displayed in the output of ' + '"help()".\n' + '\n' + '* "__class__" assignment works only if both classes have the ' + 'same\n' + ' *__slots__*.\n' + '\n' + '* Multiple inheritance with multiple slotted parent classes ' + 'can be\n' + ' used, but only one parent is allowed to have attributes ' + 'created by\n' + ' slots (the other bases must have empty slot layouts) - ' + 'violations\n' + ' raise "TypeError".\n' + '\n' + '* If an *iterator* is used for *__slots__* then a ' + '*descriptor* is\n' + ' created for each of the iterator’s values. However, the ' + '*__slots__*\n' + ' attribute will be an empty iterator.\n' + '\n' + '\n' + 'Customizing class creation\n' + '==========================\n' + '\n' + 'Whenever a class inherits from another class, ' + '"__init_subclass__()" is\n' + 'called on the parent class. This way, it is possible to ' + 'write classes\n' + 'which change the behavior of subclasses. This is closely ' + 'related to\n' + 'class decorators, but where class decorators only affect the ' + 'specific\n' + 'class they’re applied to, "__init_subclass__" solely applies ' + 'to future\n' + 'subclasses of the class defining the method.\n' + '\n' + 'classmethod object.__init_subclass__(cls)\n' + '\n' + ' This method is called whenever the containing class is ' + 'subclassed.\n' + ' *cls* is then the new subclass. If defined as a normal ' + 'instance\n' + ' method, this method is implicitly converted to a class ' + 'method.\n' + '\n' + ' Keyword arguments which are given to a new class are ' + 'passed to the\n' + ' parent’s class "__init_subclass__". For compatibility ' + 'with other\n' + ' classes using "__init_subclass__", one should take out ' + 'the needed\n' + ' keyword arguments and pass the others over to the base ' + 'class, as\n' + ' in:\n' + '\n' + ' class Philosopher:\n' + ' def __init_subclass__(cls, /, default_name, ' + '**kwargs):\n' + ' super().__init_subclass__(**kwargs)\n' + ' cls.default_name = default_name\n' + '\n' + ' class AustralianPhilosopher(Philosopher, ' + 'default_name="Bruce"):\n' + ' pass\n' + '\n' + ' The default implementation "object.__init_subclass__" ' + 'does nothing,\n' + ' but raises an error if it is called with any arguments.\n' + '\n' + ' Note:\n' + '\n' + ' The metaclass hint "metaclass" is consumed by the rest ' + 'of the\n' + ' type machinery, and is never passed to ' + '"__init_subclass__"\n' + ' implementations. The actual metaclass (rather than the ' + 'explicit\n' + ' hint) can be accessed as "type(cls)".\n' + '\n' + ' New in version 3.6.\n' + '\n' + 'When a class is created, "type.__new__()" scans the class ' + 'variables\n' + 'and makes callbacks to those with a "__set_name__()" hook.\n' + '\n' + 'object.__set_name__(self, owner, name)\n' + '\n' + ' Automatically called at the time the owning class *owner* ' + 'is\n' + ' created. The object has been assigned to *name* in that ' + 'class:\n' + '\n' + ' class A:\n' + ' x = C() # Automatically calls: x.__set_name__(A, ' + "'x')\n" + '\n' + ' If the class variable is assigned after the class is ' + 'created,\n' + ' "__set_name__()" will not be called automatically. If ' + 'needed,\n' + ' "__set_name__()" can be called directly:\n' + '\n' + ' class A:\n' + ' pass\n' + '\n' + ' c = C()\n' + ' A.x = c # The hook is not called\n' + " c.__set_name__(A, 'x') # Manually invoke the hook\n" + '\n' + ' See Creating the class object for more details.\n' + '\n' + ' New in version 3.6.\n' + '\n' + '\n' + 'Metaclasses\n' + '-----------\n' + '\n' + 'By default, classes are constructed using "type()". The ' + 'class body is\n' + 'executed in a new namespace and the class name is bound ' + 'locally to the\n' + 'result of "type(name, bases, namespace)".\n' + '\n' + 'The class creation process can be customized by passing the\n' + '"metaclass" keyword argument in the class definition line, ' + 'or by\n' + 'inheriting from an existing class that included such an ' + 'argument. In\n' + 'the following example, both "MyClass" and "MySubclass" are ' + 'instances\n' + 'of "Meta":\n' + '\n' + ' class Meta(type):\n' + ' pass\n' + '\n' + ' class MyClass(metaclass=Meta):\n' + ' pass\n' + '\n' + ' class MySubclass(MyClass):\n' + ' pass\n' + '\n' + 'Any other keyword arguments that are specified in the class ' + 'definition\n' + 'are passed through to all metaclass operations described ' + 'below.\n' + '\n' + 'When a class definition is executed, the following steps ' + 'occur:\n' + '\n' + '* MRO entries are resolved;\n' + '\n' + '* the appropriate metaclass is determined;\n' + '\n' + '* the class namespace is prepared;\n' + '\n' + '* the class body is executed;\n' + '\n' + '* the class object is created.\n' + '\n' + '\n' + 'Resolving MRO entries\n' + '---------------------\n' + '\n' + 'If a base that appears in class definition is not an ' + 'instance of\n' + '"type", then an "__mro_entries__" method is searched on it. ' + 'If found,\n' + 'it is called with the original bases tuple. This method must ' + 'return a\n' + 'tuple of classes that will be used instead of this base. The ' + 'tuple may\n' + 'be empty, in such case the original base is ignored.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 560** - Core support for typing module and generic ' + 'types\n' + '\n' + '\n' + 'Determining the appropriate metaclass\n' + '-------------------------------------\n' + '\n' + 'The appropriate metaclass for a class definition is ' + 'determined as\n' + 'follows:\n' + '\n' + '* if no bases and no explicit metaclass are given, then ' + '"type()" is\n' + ' used;\n' + '\n' + '* if an explicit metaclass is given and it is *not* an ' + 'instance of\n' + ' "type()", then it is used directly as the metaclass;\n' + '\n' + '* if an instance of "type()" is given as the explicit ' + 'metaclass, or\n' + ' bases are defined, then the most derived metaclass is ' + 'used.\n' + '\n' + 'The most derived metaclass is selected from the explicitly ' + 'specified\n' + 'metaclass (if any) and the metaclasses (i.e. "type(cls)") of ' + 'all\n' + 'specified base classes. The most derived metaclass is one ' + 'which is a\n' + 'subtype of *all* of these candidate metaclasses. If none of ' + 'the\n' + 'candidate metaclasses meets that criterion, then the class ' + 'definition\n' + 'will fail with "TypeError".\n' + '\n' + '\n' + 'Preparing the class namespace\n' + '-----------------------------\n' + '\n' + 'Once the appropriate metaclass has been identified, then the ' + 'class\n' + 'namespace is prepared. If the metaclass has a "__prepare__" ' + 'attribute,\n' + 'it is called as "namespace = metaclass.__prepare__(name, ' + 'bases,\n' + '**kwds)" (where the additional keyword arguments, if any, ' + 'come from\n' + 'the class definition). The "__prepare__" method should be ' + 'implemented\n' + 'as a "classmethod". The namespace returned by "__prepare__" ' + 'is passed\n' + 'in to "__new__", but when the final class object is created ' + 'the\n' + 'namespace is copied into a new "dict".\n' + '\n' + 'If the metaclass has no "__prepare__" attribute, then the ' + 'class\n' + 'namespace is initialised as an empty ordered mapping.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 3115** - Metaclasses in Python 3000\n' + ' Introduced the "__prepare__" namespace hook\n' + '\n' + '\n' + 'Executing the class body\n' + '------------------------\n' + '\n' + 'The class body is executed (approximately) as "exec(body, ' + 'globals(),\n' + 'namespace)". The key difference from a normal call to ' + '"exec()" is that\n' + 'lexical scoping allows the class body (including any ' + 'methods) to\n' + 'reference names from the current and outer scopes when the ' + 'class\n' + 'definition occurs inside a function.\n' + '\n' + 'However, even when the class definition occurs inside the ' + 'function,\n' + 'methods defined inside the class still cannot see names ' + 'defined at the\n' + 'class scope. Class variables must be accessed through the ' + 'first\n' + 'parameter of instance or class methods, or through the ' + 'implicit\n' + 'lexically scoped "__class__" reference described in the next ' + 'section.\n' + '\n' + '\n' + 'Creating the class object\n' + '-------------------------\n' + '\n' + 'Once the class namespace has been populated by executing the ' + 'class\n' + 'body, the class object is created by calling ' + '"metaclass(name, bases,\n' + 'namespace, **kwds)" (the additional keywords passed here are ' + 'the same\n' + 'as those passed to "__prepare__").\n' + '\n' + 'This class object is the one that will be referenced by the ' + 'zero-\n' + 'argument form of "super()". "__class__" is an implicit ' + 'closure\n' + 'reference created by the compiler if any methods in a class ' + 'body refer\n' + 'to either "__class__" or "super". This allows the zero ' + 'argument form\n' + 'of "super()" to correctly identify the class being defined ' + 'based on\n' + 'lexical scoping, while the class or instance that was used ' + 'to make the\n' + 'current call is identified based on the first argument ' + 'passed to the\n' + 'method.\n' + '\n' + '**CPython implementation detail:** In CPython 3.6 and later, ' + 'the\n' + '"__class__" cell is passed to the metaclass as a ' + '"__classcell__" entry\n' + 'in the class namespace. If present, this must be propagated ' + 'up to the\n' + '"type.__new__" call in order for the class to be ' + 'initialised\n' + 'correctly. Failing to do so will result in a "RuntimeError" ' + 'in Python\n' + '3.8.\n' + '\n' + 'When using the default metaclass "type", or any metaclass ' + 'that\n' + 'ultimately calls "type.__new__", the following additional\n' + 'customization steps are invoked after creating the class ' + 'object:\n' + '\n' + '1. The "type.__new__" method collects all of the attributes ' + 'in the\n' + ' class namespace that define a "__set_name__()" method;\n' + '\n' + '2. Those "__set_name__" methods are called with the class ' + 'being\n' + ' defined and the assigned name of that particular ' + 'attribute;\n' + '\n' + '3. The "__init_subclass__()" hook is called on the immediate ' + 'parent of\n' + ' the new class in its method resolution order.\n' + '\n' + 'After the class object is created, it is passed to the ' + 'class\n' + 'decorators included in the class definition (if any) and the ' + 'resulting\n' + 'object is bound in the local namespace as the defined ' + 'class.\n' + '\n' + 'When a new class is created by "type.__new__", the object ' + 'provided as\n' + 'the namespace parameter is copied to a new ordered mapping ' + 'and the\n' + 'original object is discarded. The new copy is wrapped in a ' + 'read-only\n' + 'proxy, which becomes the "__dict__" attribute of the class ' + 'object.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 3135** - New super\n' + ' Describes the implicit "__class__" closure reference\n' + '\n' + '\n' + 'Uses for metaclasses\n' + '--------------------\n' + '\n' + 'The potential uses for metaclasses are boundless. Some ideas ' + 'that have\n' + 'been explored include enum, logging, interface checking, ' + 'automatic\n' + 'delegation, automatic property creation, proxies, ' + 'frameworks, and\n' + 'automatic resource locking/synchronization.\n' + '\n' + '\n' + 'Customizing instance and subclass checks\n' + '========================================\n' + '\n' + 'The following methods are used to override the default ' + 'behavior of the\n' + '"isinstance()" and "issubclass()" built-in functions.\n' + '\n' + 'In particular, the metaclass "abc.ABCMeta" implements these ' + 'methods in\n' + 'order to allow the addition of Abstract Base Classes (ABCs) ' + 'as\n' + '“virtual base classes” to any class or type (including ' + 'built-in\n' + 'types), including other ABCs.\n' + '\n' + 'class.__instancecheck__(self, instance)\n' + '\n' + ' Return true if *instance* should be considered a (direct ' + 'or\n' + ' indirect) instance of *class*. If defined, called to ' + 'implement\n' + ' "isinstance(instance, class)".\n' + '\n' + 'class.__subclasscheck__(self, subclass)\n' + '\n' + ' Return true if *subclass* should be considered a (direct ' + 'or\n' + ' indirect) subclass of *class*. If defined, called to ' + 'implement\n' + ' "issubclass(subclass, class)".\n' + '\n' + 'Note that these methods are looked up on the type ' + '(metaclass) of a\n' + 'class. They cannot be defined as class methods in the ' + 'actual class.\n' + 'This is consistent with the lookup of special methods that ' + 'are called\n' + 'on instances, only in this case the instance is itself a ' + 'class.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 3119** - Introducing Abstract Base Classes\n' + ' Includes the specification for customizing ' + '"isinstance()" and\n' + ' "issubclass()" behavior through "__instancecheck__()" ' + 'and\n' + ' "__subclasscheck__()", with motivation for this ' + 'functionality in\n' + ' the context of adding Abstract Base Classes (see the ' + '"abc"\n' + ' module) to the language.\n' + '\n' + '\n' + 'Emulating generic types\n' + '=======================\n' + '\n' + 'When using *type annotations*, it is often useful to ' + '*parameterize* a\n' + '*generic type* using Python’s square-brackets notation. For ' + 'example,\n' + 'the annotation "list[int]" might be used to signify a "list" ' + 'in which\n' + 'all the elements are of type "int".\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 484** - Type Hints\n' + ' Introducing Python’s framework for type annotations\n' + '\n' + ' Generic Alias Types\n' + ' Documentation for objects representing parameterized ' + 'generic\n' + ' classes\n' + '\n' + ' Generics, user-defined generics and "typing.Generic"\n' + ' Documentation on how to implement generic classes that ' + 'can be\n' + ' parameterized at runtime and understood by static ' + 'type-checkers.\n' + '\n' + 'A class can *generally* only be parameterized if it defines ' + 'the\n' + 'special class method "__class_getitem__()".\n' + '\n' + 'classmethod object.__class_getitem__(cls, key)\n' + '\n' + ' Return an object representing the specialization of a ' + 'generic class\n' + ' by type arguments found in *key*.\n' + '\n' + ' When defined on a class, "__class_getitem__()" is ' + 'automatically a\n' + ' class method. As such, there is no need for it to be ' + 'decorated with\n' + ' "@classmethod" when it is defined.\n' + '\n' + '\n' + 'The purpose of *__class_getitem__*\n' + '----------------------------------\n' + '\n' + 'The purpose of "__class_getitem__()" is to allow runtime\n' + 'parameterization of standard-library generic classes in ' + 'order to more\n' + 'easily apply *type hints* to these classes.\n' + '\n' + 'To implement custom generic classes that can be ' + 'parameterized at\n' + 'runtime and understood by static type-checkers, users should ' + 'either\n' + 'inherit from a standard library class that already ' + 'implements\n' + '"__class_getitem__()", or inherit from "typing.Generic", ' + 'which has its\n' + 'own implementation of "__class_getitem__()".\n' + '\n' + 'Custom implementations of "__class_getitem__()" on classes ' + 'defined\n' + 'outside of the standard library may not be understood by ' + 'third-party\n' + 'type-checkers such as mypy. Using "__class_getitem__()" on ' + 'any class\n' + 'for purposes other than type hinting is discouraged.\n' + '\n' + '\n' + '*__class_getitem__* versus *__getitem__*\n' + '----------------------------------------\n' + '\n' + 'Usually, the subscription of an object using square brackets ' + 'will call\n' + 'the "__getitem__()" instance method defined on the object’s ' + 'class.\n' + 'However, if the object being subscribed is itself a class, ' + 'the class\n' + 'method "__class_getitem__()" may be called instead.\n' + '"__class_getitem__()" should return a GenericAlias object if ' + 'it is\n' + 'properly defined.\n' + '\n' + 'Presented with the *expression* "obj[x]", the Python ' + 'interpreter\n' + 'follows something like the following process to decide ' + 'whether\n' + '"__getitem__()" or "__class_getitem__()" should be called:\n' + '\n' + ' from inspect import isclass\n' + '\n' + ' def subscribe(obj, x):\n' + ' """Return the result of the expression `obj[x]`"""\n' + '\n' + ' class_of_obj = type(obj)\n' + '\n' + ' # If the class of obj defines __getitem__,\n' + ' # call class_of_obj.__getitem__(obj, x)\n' + " if hasattr(class_of_obj, '__getitem__'):\n" + ' return class_of_obj.__getitem__(obj, x)\n' + '\n' + ' # Else, if obj is a class and defines ' + '__class_getitem__,\n' + ' # call obj.__class_getitem__(x)\n' + ' elif isclass(obj) and hasattr(obj, ' + "'__class_getitem__'):\n" + ' return obj.__class_getitem__(x)\n' + '\n' + ' # Else, raise an exception\n' + ' else:\n' + ' raise TypeError(\n' + ' f"\'{class_of_obj.__name__}\' object is not ' + 'subscriptable"\n' + ' )\n' + '\n' + 'In Python, all classes are themselves instances of other ' + 'classes. The\n' + 'class of a class is known as that class’s *metaclass*, and ' + 'most\n' + 'classes have the "type" class as their metaclass. "type" ' + 'does not\n' + 'define "__getitem__()", meaning that expressions such as ' + '"list[int]",\n' + '"dict[str, float]" and "tuple[str, bytes]" all result in\n' + '"__class_getitem__()" being called:\n' + '\n' + ' >>> # list has class "type" as its metaclass, like most ' + 'classes:\n' + ' >>> type(list)\n' + " \n" + ' >>> type(dict) == type(list) == type(tuple) == type(str) ' + '== type(bytes)\n' + ' True\n' + ' >>> # "list[int]" calls "list.__class_getitem__(int)"\n' + ' >>> list[int]\n' + ' list[int]\n' + ' >>> # list.__class_getitem__ returns a GenericAlias ' + 'object:\n' + ' >>> type(list[int])\n' + " \n" + '\n' + 'However, if a class has a custom metaclass that defines\n' + '"__getitem__()", subscribing the class may result in ' + 'different\n' + 'behaviour. An example of this can be found in the "enum" ' + 'module:\n' + '\n' + ' >>> from enum import Enum\n' + ' >>> class Menu(Enum):\n' + ' ... """A breakfast menu"""\n' + " ... SPAM = 'spam'\n" + " ... BACON = 'bacon'\n" + ' ...\n' + ' >>> # Enum classes have a custom metaclass:\n' + ' >>> type(Menu)\n' + " \n" + ' >>> # EnumMeta defines __getitem__,\n' + ' >>> # so __class_getitem__ is not called,\n' + ' >>> # and the result is not a GenericAlias object:\n' + " >>> Menu['SPAM']\n" + " \n" + " >>> type(Menu['SPAM'])\n" + " \n" + '\n' + 'See also:\n' + '\n' + ' **PEP 560** - Core Support for typing module and generic ' + 'types\n' + ' Introducing "__class_getitem__()", and outlining when ' + 'a\n' + ' subscription results in "__class_getitem__()" being ' + 'called\n' + ' instead of "__getitem__()"\n' + '\n' + '\n' + 'Emulating callable objects\n' + '==========================\n' + '\n' + 'object.__call__(self[, args...])\n' + '\n' + ' Called when the instance is “called” as a function; if ' + 'this method\n' + ' is defined, "x(arg1, arg2, ...)" roughly translates to\n' + ' "type(x).__call__(x, arg1, ...)".\n' + '\n' + '\n' + 'Emulating container types\n' + '=========================\n' + '\n' + 'The following methods can be defined to implement container ' + 'objects.\n' + 'Containers usually are *sequences* (such as "lists" or ' + '"tuples") or\n' + '*mappings* (like "dictionaries"), but can represent other ' + 'containers\n' + 'as well. The first set of methods is used either to emulate ' + 'a\n' + 'sequence or to emulate a mapping; the difference is that for ' + 'a\n' + 'sequence, the allowable keys should be the integers *k* for ' + 'which "0\n' + '<= k < N" where *N* is the length of the sequence, or ' + '"slice" objects,\n' + 'which define a range of items. It is also recommended that ' + 'mappings\n' + 'provide the methods "keys()", "values()", "items()", ' + '"get()",\n' + '"clear()", "setdefault()", "pop()", "popitem()", "copy()", ' + 'and\n' + '"update()" behaving similar to those for Python’s standard\n' + '"dictionary" objects. The "collections.abc" module provides ' + 'a\n' + '"MutableMapping" *abstract base class* to help create those ' + 'methods\n' + 'from a base set of "__getitem__()", "__setitem__()", ' + '"__delitem__()",\n' + 'and "keys()". Mutable sequences should provide methods ' + '"append()",\n' + '"count()", "index()", "extend()", "insert()", "pop()", ' + '"remove()",\n' + '"reverse()" and "sort()", like Python standard "list" ' + 'objects.\n' + 'Finally, sequence types should implement addition (meaning\n' + 'concatenation) and multiplication (meaning repetition) by ' + 'defining the\n' + 'methods "__add__()", "__radd__()", "__iadd__()", ' + '"__mul__()",\n' + '"__rmul__()" and "__imul__()" described below; they should ' + 'not define\n' + 'other numerical operators. It is recommended that both ' + 'mappings and\n' + 'sequences implement the "__contains__()" method to allow ' + 'efficient use\n' + 'of the "in" operator; for mappings, "in" should search the ' + 'mapping’s\n' + 'keys; for sequences, it should search through the values. ' + 'It is\n' + 'further recommended that both mappings and sequences ' + 'implement the\n' + '"__iter__()" method to allow efficient iteration through ' + 'the\n' + 'container; for mappings, "__iter__()" should iterate through ' + 'the\n' + 'object’s keys; for sequences, it should iterate through the ' + 'values.\n' + '\n' + 'object.__len__(self)\n' + '\n' + ' Called to implement the built-in function "len()". ' + 'Should return\n' + ' the length of the object, an integer ">=" 0. Also, an ' + 'object that\n' + ' doesn’t define a "__bool__()" method and whose ' + '"__len__()" method\n' + ' returns zero is considered to be false in a Boolean ' + 'context.\n' + '\n' + ' **CPython implementation detail:** In CPython, the length ' + 'is\n' + ' required to be at most "sys.maxsize". If the length is ' + 'larger than\n' + ' "sys.maxsize" some features (such as "len()") may raise\n' + ' "OverflowError". To prevent raising "OverflowError" by ' + 'truth value\n' + ' testing, an object must define a "__bool__()" method.\n' + '\n' + 'object.__length_hint__(self)\n' + '\n' + ' Called to implement "operator.length_hint()". Should ' + 'return an\n' + ' estimated length for the object (which may be greater or ' + 'less than\n' + ' the actual length). The length must be an integer ">=" 0. ' + 'The\n' + ' return value may also be "NotImplemented", which is ' + 'treated the\n' + ' same as if the "__length_hint__" method didn’t exist at ' + 'all. This\n' + ' method is purely an optimization and is never required ' + 'for\n' + ' correctness.\n' + '\n' + ' New in version 3.4.\n' + '\n' + 'Note:\n' + '\n' + ' Slicing is done exclusively with the following three ' + 'methods. A\n' + ' call like\n' + '\n' + ' a[1:2] = b\n' + '\n' + ' is translated to\n' + '\n' + ' a[slice(1, 2, None)] = b\n' + '\n' + ' and so forth. Missing slice items are always filled in ' + 'with "None".\n' + '\n' + 'object.__getitem__(self, key)\n' + '\n' + ' Called to implement evaluation of "self[key]". For ' + '*sequence*\n' + ' types, the accepted keys should be integers and slice ' + 'objects.\n' + ' Note that the special interpretation of negative indexes ' + '(if the\n' + ' class wishes to emulate a *sequence* type) is up to the\n' + ' "__getitem__()" method. If *key* is of an inappropriate ' + 'type,\n' + ' "TypeError" may be raised; if of a value outside the set ' + 'of indexes\n' + ' for the sequence (after any special interpretation of ' + 'negative\n' + ' values), "IndexError" should be raised. For *mapping* ' + 'types, if\n' + ' *key* is missing (not in the container), "KeyError" ' + 'should be\n' + ' raised.\n' + '\n' + ' Note:\n' + '\n' + ' "for" loops expect that an "IndexError" will be raised ' + 'for\n' + ' illegal indexes to allow proper detection of the end of ' + 'the\n' + ' sequence.\n' + '\n' + ' Note:\n' + '\n' + ' When subscripting a *class*, the special class method\n' + ' "__class_getitem__()" may be called instead of ' + '"__getitem__()".\n' + ' See __class_getitem__ versus __getitem__ for more ' + 'details.\n' + '\n' + 'object.__setitem__(self, key, value)\n' + '\n' + ' Called to implement assignment to "self[key]". Same note ' + 'as for\n' + ' "__getitem__()". This should only be implemented for ' + 'mappings if\n' + ' the objects support changes to the values for keys, or if ' + 'new keys\n' + ' can be added, or for sequences if elements can be ' + 'replaced. The\n' + ' same exceptions should be raised for improper *key* ' + 'values as for\n' + ' the "__getitem__()" method.\n' + '\n' + 'object.__delitem__(self, key)\n' + '\n' + ' Called to implement deletion of "self[key]". Same note ' + 'as for\n' + ' "__getitem__()". This should only be implemented for ' + 'mappings if\n' + ' the objects support removal of keys, or for sequences if ' + 'elements\n' + ' can be removed from the sequence. The same exceptions ' + 'should be\n' + ' raised for improper *key* values as for the ' + '"__getitem__()" method.\n' + '\n' + 'object.__missing__(self, key)\n' + '\n' + ' Called by "dict"."__getitem__()" to implement "self[key]" ' + 'for dict\n' + ' subclasses when key is not in the dictionary.\n' + '\n' + 'object.__iter__(self)\n' + '\n' + ' This method is called when an *iterator* is required for ' + 'a\n' + ' container. This method should return a new iterator ' + 'object that can\n' + ' iterate over all the objects in the container. For ' + 'mappings, it\n' + ' should iterate over the keys of the container.\n' + '\n' + 'object.__reversed__(self)\n' + '\n' + ' Called (if present) by the "reversed()" built-in to ' + 'implement\n' + ' reverse iteration. It should return a new iterator ' + 'object that\n' + ' iterates over all the objects in the container in reverse ' + 'order.\n' + '\n' + ' If the "__reversed__()" method is not provided, the ' + '"reversed()"\n' + ' built-in will fall back to using the sequence protocol ' + '("__len__()"\n' + ' and "__getitem__()"). Objects that support the sequence ' + 'protocol\n' + ' should only provide "__reversed__()" if they can provide ' + 'an\n' + ' implementation that is more efficient than the one ' + 'provided by\n' + ' "reversed()".\n' + '\n' + 'The membership test operators ("in" and "not in") are ' + 'normally\n' + 'implemented as an iteration through a container. However, ' + 'container\n' + 'objects can supply the following special method with a more ' + 'efficient\n' + 'implementation, which also does not require the object be ' + 'iterable.\n' + '\n' + 'object.__contains__(self, item)\n' + '\n' + ' Called to implement membership test operators. Should ' + 'return true\n' + ' if *item* is in *self*, false otherwise. For mapping ' + 'objects, this\n' + ' should consider the keys of the mapping rather than the ' + 'values or\n' + ' the key-item pairs.\n' + '\n' + ' For objects that don’t define "__contains__()", the ' + 'membership test\n' + ' first tries iteration via "__iter__()", then the old ' + 'sequence\n' + ' iteration protocol via "__getitem__()", see this section ' + 'in the\n' + ' language reference.\n' + '\n' + '\n' + 'Emulating numeric types\n' + '=======================\n' + '\n' + 'The following methods can be defined to emulate numeric ' + 'objects.\n' + 'Methods corresponding to operations that are not supported ' + 'by the\n' + 'particular kind of number implemented (e.g., bitwise ' + 'operations for\n' + 'non-integral numbers) should be left undefined.\n' + '\n' + 'object.__add__(self, other)\n' + 'object.__sub__(self, other)\n' + 'object.__mul__(self, other)\n' + 'object.__matmul__(self, other)\n' + 'object.__truediv__(self, other)\n' + 'object.__floordiv__(self, other)\n' + 'object.__mod__(self, other)\n' + 'object.__divmod__(self, other)\n' + 'object.__pow__(self, other[, modulo])\n' + 'object.__lshift__(self, other)\n' + 'object.__rshift__(self, other)\n' + 'object.__and__(self, other)\n' + 'object.__xor__(self, other)\n' + 'object.__or__(self, other)\n' + '\n' + ' These methods are called to implement the binary ' + 'arithmetic\n' + ' operations ("+", "-", "*", "@", "/", "//", "%", ' + '"divmod()",\n' + ' "pow()", "**", "<<", ">>", "&", "^", "|"). For instance, ' + 'to\n' + ' evaluate the expression "x + y", where *x* is an instance ' + 'of a\n' + ' class that has an "__add__()" method, "x.__add__(y)" is ' + 'called.\n' + ' The "__divmod__()" method should be the equivalent to ' + 'using\n' + ' "__floordiv__()" and "__mod__()"; it should not be ' + 'related to\n' + ' "__truediv__()". Note that "__pow__()" should be defined ' + 'to accept\n' + ' an optional third argument if the ternary version of the ' + 'built-in\n' + ' "pow()" function is to be supported.\n' + '\n' + ' If one of those methods does not support the operation ' + 'with the\n' + ' supplied arguments, it should return "NotImplemented".\n' + '\n' + 'object.__radd__(self, other)\n' + 'object.__rsub__(self, other)\n' + 'object.__rmul__(self, other)\n' + 'object.__rmatmul__(self, other)\n' + 'object.__rtruediv__(self, other)\n' + 'object.__rfloordiv__(self, other)\n' + 'object.__rmod__(self, other)\n' + 'object.__rdivmod__(self, other)\n' + 'object.__rpow__(self, other[, modulo])\n' + 'object.__rlshift__(self, other)\n' + 'object.__rrshift__(self, other)\n' + 'object.__rand__(self, other)\n' + 'object.__rxor__(self, other)\n' + 'object.__ror__(self, other)\n' + '\n' + ' These methods are called to implement the binary ' + 'arithmetic\n' + ' operations ("+", "-", "*", "@", "/", "//", "%", ' + '"divmod()",\n' + ' "pow()", "**", "<<", ">>", "&", "^", "|") with reflected ' + '(swapped)\n' + ' operands. These functions are only called if the left ' + 'operand does\n' + ' not support the corresponding operation [3] and the ' + 'operands are of\n' + ' different types. [4] For instance, to evaluate the ' + 'expression "x -\n' + ' y", where *y* is an instance of a class that has an ' + '"__rsub__()"\n' + ' method, "y.__rsub__(x)" is called if "x.__sub__(y)" ' + 'returns\n' + ' *NotImplemented*.\n' + '\n' + ' Note that ternary "pow()" will not try calling ' + '"__rpow__()" (the\n' + ' coercion rules would become too complicated).\n' + '\n' + ' Note:\n' + '\n' + ' If the right operand’s type is a subclass of the left ' + 'operand’s\n' + ' type and that subclass provides a different ' + 'implementation of the\n' + ' reflected method for the operation, this method will be ' + 'called\n' + ' before the left operand’s non-reflected method. This ' + 'behavior\n' + ' allows subclasses to override their ancestors’ ' + 'operations.\n' + '\n' + 'object.__iadd__(self, other)\n' + 'object.__isub__(self, other)\n' + 'object.__imul__(self, other)\n' + 'object.__imatmul__(self, other)\n' + 'object.__itruediv__(self, other)\n' + 'object.__ifloordiv__(self, other)\n' + 'object.__imod__(self, other)\n' + 'object.__ipow__(self, other[, modulo])\n' + 'object.__ilshift__(self, other)\n' + 'object.__irshift__(self, other)\n' + 'object.__iand__(self, other)\n' + 'object.__ixor__(self, other)\n' + 'object.__ior__(self, other)\n' + '\n' + ' These methods are called to implement the augmented ' + 'arithmetic\n' + ' assignments ("+=", "-=", "*=", "@=", "/=", "//=", "%=", ' + '"**=",\n' + ' "<<=", ">>=", "&=", "^=", "|="). These methods should ' + 'attempt to\n' + ' do the operation in-place (modifying *self*) and return ' + 'the result\n' + ' (which could be, but does not have to be, *self*). If a ' + 'specific\n' + ' method is not defined, the augmented assignment falls ' + 'back to the\n' + ' normal methods. For instance, if *x* is an instance of a ' + 'class\n' + ' with an "__iadd__()" method, "x += y" is equivalent to "x ' + '=\n' + ' x.__iadd__(y)" . Otherwise, "x.__add__(y)" and ' + '"y.__radd__(x)" are\n' + ' considered, as with the evaluation of "x + y". In ' + 'certain\n' + ' situations, augmented assignment can result in unexpected ' + 'errors\n' + ' (see Why does a_tuple[i] += [‘item’] raise an exception ' + 'when the\n' + ' addition works?), but this behavior is in fact part of ' + 'the data\n' + ' model.\n' + '\n' + 'object.__neg__(self)\n' + 'object.__pos__(self)\n' + 'object.__abs__(self)\n' + 'object.__invert__(self)\n' + '\n' + ' Called to implement the unary arithmetic operations ("-", ' + '"+",\n' + ' "abs()" and "~").\n' + '\n' + 'object.__complex__(self)\n' + 'object.__int__(self)\n' + 'object.__float__(self)\n' + '\n' + ' Called to implement the built-in functions "complex()", ' + '"int()" and\n' + ' "float()". Should return a value of the appropriate ' + 'type.\n' + '\n' + 'object.__index__(self)\n' + '\n' + ' Called to implement "operator.index()", and whenever ' + 'Python needs\n' + ' to losslessly convert the numeric object to an integer ' + 'object (such\n' + ' as in slicing, or in the built-in "bin()", "hex()" and ' + '"oct()"\n' + ' functions). Presence of this method indicates that the ' + 'numeric\n' + ' object is an integer type. Must return an integer.\n' + '\n' + ' If "__int__()", "__float__()" and "__complex__()" are not ' + 'defined\n' + ' then corresponding built-in functions "int()", "float()" ' + 'and\n' + ' "complex()" fall back to "__index__()".\n' + '\n' + 'object.__round__(self[, ndigits])\n' + 'object.__trunc__(self)\n' + 'object.__floor__(self)\n' + 'object.__ceil__(self)\n' + '\n' + ' Called to implement the built-in function "round()" and ' + '"math"\n' + ' functions "trunc()", "floor()" and "ceil()". Unless ' + '*ndigits* is\n' + ' passed to "__round__()" all these methods should return ' + 'the value\n' + ' of the object truncated to an "Integral" (typically an ' + '"int").\n' + '\n' + ' The built-in function "int()" falls back to "__trunc__()" ' + 'if\n' + ' neither "__int__()" nor "__index__()" is defined.\n' + '\n' + '\n' + 'With Statement Context Managers\n' + '===============================\n' + '\n' + 'A *context manager* is an object that defines the runtime ' + 'context to\n' + 'be established when executing a "with" statement. The ' + 'context manager\n' + 'handles the entry into, and the exit from, the desired ' + 'runtime context\n' + 'for the execution of the block of code. Context managers ' + 'are normally\n' + 'invoked using the "with" statement (described in section The ' + 'with\n' + 'statement), but can also be used by directly invoking their ' + 'methods.\n' + '\n' + 'Typical uses of context managers include saving and ' + 'restoring various\n' + 'kinds of global state, locking and unlocking resources, ' + 'closing opened\n' + 'files, etc.\n' + '\n' + 'For more information on context managers, see Context ' + 'Manager Types.\n' + '\n' + 'object.__enter__(self)\n' + '\n' + ' Enter the runtime context related to this object. The ' + '"with"\n' + ' statement will bind this method’s return value to the ' + 'target(s)\n' + ' specified in the "as" clause of the statement, if any.\n' + '\n' + 'object.__exit__(self, exc_type, exc_value, traceback)\n' + '\n' + ' Exit the runtime context related to this object. The ' + 'parameters\n' + ' describe the exception that caused the context to be ' + 'exited. If the\n' + ' context was exited without an exception, all three ' + 'arguments will\n' + ' be "None".\n' + '\n' + ' If an exception is supplied, and the method wishes to ' + 'suppress the\n' + ' exception (i.e., prevent it from being propagated), it ' + 'should\n' + ' return a true value. Otherwise, the exception will be ' + 'processed\n' + ' normally upon exit from this method.\n' + '\n' + ' Note that "__exit__()" methods should not reraise the ' + 'passed-in\n' + ' exception; this is the caller’s responsibility.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 343** - The “with” statement\n' + ' The specification, background, and examples for the ' + 'Python "with"\n' + ' statement.\n' + '\n' + '\n' + 'Customizing positional arguments in class pattern matching\n' + '==========================================================\n' + '\n' + 'When using a class name in a pattern, positional arguments ' + 'in the\n' + 'pattern are not allowed by default, i.e. "case MyClass(x, ' + 'y)" is\n' + 'typically invalid without special support in "MyClass". To ' + 'be able to\n' + 'use that kind of pattern, the class needs to define a ' + '*__match_args__*\n' + 'attribute.\n' + '\n' + 'object.__match_args__\n' + '\n' + ' This class variable can be assigned a tuple of strings. ' + 'When this\n' + ' class is used in a class pattern with positional ' + 'arguments, each\n' + ' positional argument will be converted into a keyword ' + 'argument,\n' + ' using the corresponding value in *__match_args__* as the ' + 'keyword.\n' + ' The absence of this attribute is equivalent to setting it ' + 'to "()".\n' + '\n' + 'For example, if "MyClass.__match_args__" is "("left", ' + '"center",\n' + '"right")" that means that "case MyClass(x, y)" is equivalent ' + 'to "case\n' + 'MyClass(left=x, center=y)". Note that the number of ' + 'arguments in the\n' + 'pattern must be smaller than or equal to the number of ' + 'elements in\n' + '*__match_args__*; if it is larger, the pattern match attempt ' + 'will\n' + 'raise a "TypeError".\n' + '\n' + 'New in version 3.10.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 634** - Structural Pattern Matching\n' + ' The specification for the Python "match" statement.\n' + '\n' + '\n' + 'Special method lookup\n' + '=====================\n' + '\n' + 'For custom classes, implicit invocations of special methods ' + 'are only\n' + 'guaranteed to work correctly if defined on an object’s type, ' + 'not in\n' + 'the object’s instance dictionary. That behaviour is the ' + 'reason why\n' + 'the following code raises an exception:\n' + '\n' + ' >>> class C:\n' + ' ... pass\n' + ' ...\n' + ' >>> c = C()\n' + ' >>> c.__len__ = lambda: 5\n' + ' >>> len(c)\n' + ' Traceback (most recent call last):\n' + ' File "", line 1, in \n' + " TypeError: object of type 'C' has no len()\n" + '\n' + 'The rationale behind this behaviour lies with a number of ' + 'special\n' + 'methods such as "__hash__()" and "__repr__()" that are ' + 'implemented by\n' + 'all objects, including type objects. If the implicit lookup ' + 'of these\n' + 'methods used the conventional lookup process, they would ' + 'fail when\n' + 'invoked on the type object itself:\n' + '\n' + ' >>> 1 .__hash__() == hash(1)\n' + ' True\n' + ' >>> int.__hash__() == hash(int)\n' + ' Traceback (most recent call last):\n' + ' File "", line 1, in \n' + " TypeError: descriptor '__hash__' of 'int' object needs an " + 'argument\n' + '\n' + 'Incorrectly attempting to invoke an unbound method of a ' + 'class in this\n' + 'way is sometimes referred to as ‘metaclass confusion’, and ' + 'is avoided\n' + 'by bypassing the instance when looking up special methods:\n' + '\n' + ' >>> type(1).__hash__(1) == hash(1)\n' + ' True\n' + ' >>> type(int).__hash__(int) == hash(int)\n' + ' True\n' + '\n' + 'In addition to bypassing any instance attributes in the ' + 'interest of\n' + 'correctness, implicit special method lookup generally also ' + 'bypasses\n' + 'the "__getattribute__()" method even of the object’s ' + 'metaclass:\n' + '\n' + ' >>> class Meta(type):\n' + ' ... def __getattribute__(*args):\n' + ' ... print("Metaclass getattribute invoked")\n' + ' ... return type.__getattribute__(*args)\n' + ' ...\n' + ' >>> class C(object, metaclass=Meta):\n' + ' ... def __len__(self):\n' + ' ... return 10\n' + ' ... def __getattribute__(*args):\n' + ' ... print("Class getattribute invoked")\n' + ' ... return object.__getattribute__(*args)\n' + ' ...\n' + ' >>> c = C()\n' + ' >>> c.__len__() # Explicit lookup via ' + 'instance\n' + ' Class getattribute invoked\n' + ' 10\n' + ' >>> type(c).__len__(c) # Explicit lookup via ' + 'type\n' + ' Metaclass getattribute invoked\n' + ' 10\n' + ' >>> len(c) # Implicit lookup\n' + ' 10\n' + '\n' + 'Bypassing the "__getattribute__()" machinery in this fashion ' + 'provides\n' + 'significant scope for speed optimisations within the ' + 'interpreter, at\n' + 'the cost of some flexibility in the handling of special ' + 'methods (the\n' + 'special method *must* be set on the class object itself in ' + 'order to be\n' + 'consistently invoked by the interpreter).\n', + 'string-methods': 'String Methods\n' + '**************\n' + '\n' + 'Strings implement all of the common sequence operations, ' + 'along with\n' + 'the additional methods described below.\n' + '\n' + 'Strings also support two styles of string formatting, one ' + 'providing a\n' + 'large degree of flexibility and customization (see ' + '"str.format()",\n' + 'Format String Syntax and Custom String Formatting) and the ' + 'other based\n' + 'on C "printf" style formatting that handles a narrower ' + 'range of types\n' + 'and is slightly harder to use correctly, but is often ' + 'faster for the\n' + 'cases it can handle (printf-style String Formatting).\n' + '\n' + 'The Text Processing Services section of the standard ' + 'library covers a\n' + 'number of other modules that provide various text related ' + 'utilities\n' + '(including regular expression support in the "re" ' + 'module).\n' + '\n' + 'str.capitalize()\n' + '\n' + ' Return a copy of the string with its first character ' + 'capitalized\n' + ' and the rest lowercased.\n' + '\n' + ' Changed in version 3.8: The first character is now put ' + 'into\n' + ' titlecase rather than uppercase. This means that ' + 'characters like\n' + ' digraphs will only have their first letter capitalized, ' + 'instead of\n' + ' the full character.\n' + '\n' + 'str.casefold()\n' + '\n' + ' Return a casefolded copy of the string. Casefolded ' + 'strings may be\n' + ' used for caseless matching.\n' + '\n' + ' Casefolding is similar to lowercasing but more ' + 'aggressive because\n' + ' it is intended to remove all case distinctions in a ' + 'string. For\n' + ' example, the German lowercase letter "\'ß\'" is ' + 'equivalent to ""ss"".\n' + ' Since it is already lowercase, "lower()" would do ' + 'nothing to "\'ß\'";\n' + ' "casefold()" converts it to ""ss"".\n' + '\n' + ' The casefolding algorithm is described in section 3.13 ' + 'of the\n' + ' Unicode Standard.\n' + '\n' + ' New in version 3.3.\n' + '\n' + 'str.center(width[, fillchar])\n' + '\n' + ' Return centered in a string of length *width*. Padding ' + 'is done\n' + ' using the specified *fillchar* (default is an ASCII ' + 'space). The\n' + ' original string is returned if *width* is less than or ' + 'equal to\n' + ' "len(s)".\n' + '\n' + 'str.count(sub[, start[, end]])\n' + '\n' + ' Return the number of non-overlapping occurrences of ' + 'substring *sub*\n' + ' in the range [*start*, *end*]. Optional arguments ' + '*start* and\n' + ' *end* are interpreted as in slice notation.\n' + '\n' + ' If *sub* is empty, returns the number of empty strings ' + 'between\n' + ' characters which is the length of the string plus one.\n' + '\n' + "str.encode(encoding='utf-8', errors='strict')\n" + '\n' + ' Return the string encoded to "bytes".\n' + '\n' + ' *encoding* defaults to "\'utf-8\'"; see Standard ' + 'Encodings for\n' + ' possible values.\n' + '\n' + ' *errors* controls how encoding errors are handled. If ' + '"\'strict\'"\n' + ' (the default), a "UnicodeError" exception is raised. ' + 'Other possible\n' + ' values are "\'ignore\'", "\'replace\'", ' + '"\'xmlcharrefreplace\'",\n' + ' "\'backslashreplace\'" and any other name registered ' + 'via\n' + ' "codecs.register_error()". See Error Handlers for ' + 'details.\n' + '\n' + ' For performance reasons, the value of *errors* is not ' + 'checked for\n' + ' validity unless an encoding error actually occurs, ' + 'Python\n' + ' Development Mode is enabled or a debug build is used.\n' + '\n' + ' Changed in version 3.1: Added support for keyword ' + 'arguments.\n' + '\n' + ' Changed in version 3.9: The value of the *errors* ' + 'argument is now\n' + ' checked in Python Development Mode and in debug mode.\n' + '\n' + 'str.endswith(suffix[, start[, end]])\n' + '\n' + ' Return "True" if the string ends with the specified ' + '*suffix*,\n' + ' otherwise return "False". *suffix* can also be a tuple ' + 'of suffixes\n' + ' to look for. With optional *start*, test beginning at ' + 'that\n' + ' position. With optional *end*, stop comparing at that ' + 'position.\n' + '\n' + 'str.expandtabs(tabsize=8)\n' + '\n' + ' Return a copy of the string where all tab characters ' + 'are replaced\n' + ' by one or more spaces, depending on the current column ' + 'and the\n' + ' given tab size. Tab positions occur every *tabsize* ' + 'characters\n' + ' (default is 8, giving tab positions at columns 0, 8, 16 ' + 'and so on).\n' + ' To expand the string, the current column is set to zero ' + 'and the\n' + ' string is examined character by character. If the ' + 'character is a\n' + ' tab ("\\t"), one or more space characters are inserted ' + 'in the result\n' + ' until the current column is equal to the next tab ' + 'position. (The\n' + ' tab character itself is not copied.) If the character ' + 'is a newline\n' + ' ("\\n") or return ("\\r"), it is copied and the current ' + 'column is\n' + ' reset to zero. Any other character is copied unchanged ' + 'and the\n' + ' current column is incremented by one regardless of how ' + 'the\n' + ' character is represented when printed.\n' + '\n' + " >>> '01\\t012\\t0123\\t01234'.expandtabs()\n" + " '01 012 0123 01234'\n" + " >>> '01\\t012\\t0123\\t01234'.expandtabs(4)\n" + " '01 012 0123 01234'\n" + '\n' + 'str.find(sub[, start[, end]])\n' + '\n' + ' Return the lowest index in the string where substring ' + '*sub* is\n' + ' found within the slice "s[start:end]". Optional ' + 'arguments *start*\n' + ' and *end* are interpreted as in slice notation. Return ' + '"-1" if\n' + ' *sub* is not found.\n' + '\n' + ' Note:\n' + '\n' + ' The "find()" method should be used only if you need ' + 'to know the\n' + ' position of *sub*. To check if *sub* is a substring ' + 'or not, use\n' + ' the "in" operator:\n' + '\n' + " >>> 'Py' in 'Python'\n" + ' True\n' + '\n' + 'str.format(*args, **kwargs)\n' + '\n' + ' Perform a string formatting operation. The string on ' + 'which this\n' + ' method is called can contain literal text or ' + 'replacement fields\n' + ' delimited by braces "{}". Each replacement field ' + 'contains either\n' + ' the numeric index of a positional argument, or the name ' + 'of a\n' + ' keyword argument. Returns a copy of the string where ' + 'each\n' + ' replacement field is replaced with the string value of ' + 'the\n' + ' corresponding argument.\n' + '\n' + ' >>> "The sum of 1 + 2 is {0}".format(1+2)\n' + " 'The sum of 1 + 2 is 3'\n" + '\n' + ' See Format String Syntax for a description of the ' + 'various\n' + ' formatting options that can be specified in format ' + 'strings.\n' + '\n' + ' Note:\n' + '\n' + ' When formatting a number ("int", "float", "complex",\n' + ' "decimal.Decimal" and subclasses) with the "n" type ' + '(ex:\n' + ' "\'{:n}\'.format(1234)"), the function temporarily ' + 'sets the\n' + ' "LC_CTYPE" locale to the "LC_NUMERIC" locale to ' + 'decode\n' + ' "decimal_point" and "thousands_sep" fields of ' + '"localeconv()" if\n' + ' they are non-ASCII or longer than 1 byte, and the ' + '"LC_NUMERIC"\n' + ' locale is different than the "LC_CTYPE" locale. This ' + 'temporary\n' + ' change affects other threads.\n' + '\n' + ' Changed in version 3.7: When formatting a number with ' + 'the "n" type,\n' + ' the function sets temporarily the "LC_CTYPE" locale to ' + 'the\n' + ' "LC_NUMERIC" locale in some cases.\n' + '\n' + 'str.format_map(mapping)\n' + '\n' + ' Similar to "str.format(**mapping)", except that ' + '"mapping" is used\n' + ' directly and not copied to a "dict". This is useful if ' + 'for example\n' + ' "mapping" is a dict subclass:\n' + '\n' + ' >>> class Default(dict):\n' + ' ... def __missing__(self, key):\n' + ' ... return key\n' + ' ...\n' + " >>> '{name} was born in " + "{country}'.format_map(Default(name='Guido'))\n" + " 'Guido was born in country'\n" + '\n' + ' New in version 3.2.\n' + '\n' + 'str.index(sub[, start[, end]])\n' + '\n' + ' Like "find()", but raise "ValueError" when the ' + 'substring is not\n' + ' found.\n' + '\n' + 'str.isalnum()\n' + '\n' + ' Return "True" if all characters in the string are ' + 'alphanumeric and\n' + ' there is at least one character, "False" otherwise. A ' + 'character\n' + ' "c" is alphanumeric if one of the following returns ' + '"True":\n' + ' "c.isalpha()", "c.isdecimal()", "c.isdigit()", or ' + '"c.isnumeric()".\n' + '\n' + 'str.isalpha()\n' + '\n' + ' Return "True" if all characters in the string are ' + 'alphabetic and\n' + ' there is at least one character, "False" otherwise. ' + 'Alphabetic\n' + ' characters are those characters defined in the Unicode ' + 'character\n' + ' database as “Letter”, i.e., those with general category ' + 'property\n' + ' being one of “Lm”, “Lt”, “Lu”, “Ll”, or “Lo”. Note ' + 'that this is\n' + ' different from the “Alphabetic” property defined in the ' + 'Unicode\n' + ' Standard.\n' + '\n' + 'str.isascii()\n' + '\n' + ' Return "True" if the string is empty or all characters ' + 'in the\n' + ' string are ASCII, "False" otherwise. ASCII characters ' + 'have code\n' + ' points in the range U+0000-U+007F.\n' + '\n' + ' New in version 3.7.\n' + '\n' + 'str.isdecimal()\n' + '\n' + ' Return "True" if all characters in the string are ' + 'decimal\n' + ' characters and there is at least one character, "False" ' + 'otherwise.\n' + ' Decimal characters are those that can be used to form ' + 'numbers in\n' + ' base 10, e.g. U+0660, ARABIC-INDIC DIGIT ZERO. ' + 'Formally a decimal\n' + ' character is a character in the Unicode General ' + 'Category “Nd”.\n' + '\n' + 'str.isdigit()\n' + '\n' + ' Return "True" if all characters in the string are ' + 'digits and there\n' + ' is at least one character, "False" otherwise. Digits ' + 'include\n' + ' decimal characters and digits that need special ' + 'handling, such as\n' + ' the compatibility superscript digits. This covers ' + 'digits which\n' + ' cannot be used to form numbers in base 10, like the ' + 'Kharosthi\n' + ' numbers. Formally, a digit is a character that has the ' + 'property\n' + ' value Numeric_Type=Digit or Numeric_Type=Decimal.\n' + '\n' + 'str.isidentifier()\n' + '\n' + ' Return "True" if the string is a valid identifier ' + 'according to the\n' + ' language definition, section Identifiers and keywords.\n' + '\n' + ' Call "keyword.iskeyword()" to test whether string "s" ' + 'is a reserved\n' + ' identifier, such as "def" and "class".\n' + '\n' + ' Example:\n' + '\n' + ' >>> from keyword import iskeyword\n' + '\n' + " >>> 'hello'.isidentifier(), iskeyword('hello')\n" + ' (True, False)\n' + " >>> 'def'.isidentifier(), iskeyword('def')\n" + ' (True, True)\n' + '\n' + 'str.islower()\n' + '\n' + ' Return "True" if all cased characters [4] in the string ' + 'are\n' + ' lowercase and there is at least one cased character, ' + '"False"\n' + ' otherwise.\n' + '\n' + 'str.isnumeric()\n' + '\n' + ' Return "True" if all characters in the string are ' + 'numeric\n' + ' characters, and there is at least one character, ' + '"False" otherwise.\n' + ' Numeric characters include digit characters, and all ' + 'characters\n' + ' that have the Unicode numeric value property, e.g. ' + 'U+2155, VULGAR\n' + ' FRACTION ONE FIFTH. Formally, numeric characters are ' + 'those with\n' + ' the property value Numeric_Type=Digit, ' + 'Numeric_Type=Decimal or\n' + ' Numeric_Type=Numeric.\n' + '\n' + 'str.isprintable()\n' + '\n' + ' Return "True" if all characters in the string are ' + 'printable or the\n' + ' string is empty, "False" otherwise. Nonprintable ' + 'characters are\n' + ' those characters defined in the Unicode character ' + 'database as\n' + ' “Other” or “Separator”, excepting the ASCII space ' + '(0x20) which is\n' + ' considered printable. (Note that printable characters ' + 'in this\n' + ' context are those which should not be escaped when ' + '"repr()" is\n' + ' invoked on a string. It has no bearing on the handling ' + 'of strings\n' + ' written to "sys.stdout" or "sys.stderr".)\n' + '\n' + 'str.isspace()\n' + '\n' + ' Return "True" if there are only whitespace characters ' + 'in the string\n' + ' and there is at least one character, "False" ' + 'otherwise.\n' + '\n' + ' A character is *whitespace* if in the Unicode character ' + 'database\n' + ' (see "unicodedata"), either its general category is ' + '"Zs"\n' + ' (“Separator, space”), or its bidirectional class is one ' + 'of "WS",\n' + ' "B", or "S".\n' + '\n' + 'str.istitle()\n' + '\n' + ' Return "True" if the string is a titlecased string and ' + 'there is at\n' + ' least one character, for example uppercase characters ' + 'may only\n' + ' follow uncased characters and lowercase characters only ' + 'cased ones.\n' + ' Return "False" otherwise.\n' + '\n' + 'str.isupper()\n' + '\n' + ' Return "True" if all cased characters [4] in the string ' + 'are\n' + ' uppercase and there is at least one cased character, ' + '"False"\n' + ' otherwise.\n' + '\n' + " >>> 'BANANA'.isupper()\n" + ' True\n' + " >>> 'banana'.isupper()\n" + ' False\n' + " >>> 'baNana'.isupper()\n" + ' False\n' + " >>> ' '.isupper()\n" + ' False\n' + '\n' + 'str.join(iterable)\n' + '\n' + ' Return a string which is the concatenation of the ' + 'strings in\n' + ' *iterable*. A "TypeError" will be raised if there are ' + 'any non-\n' + ' string values in *iterable*, including "bytes" ' + 'objects. The\n' + ' separator between elements is the string providing this ' + 'method.\n' + '\n' + 'str.ljust(width[, fillchar])\n' + '\n' + ' Return the string left justified in a string of length ' + '*width*.\n' + ' Padding is done using the specified *fillchar* (default ' + 'is an ASCII\n' + ' space). The original string is returned if *width* is ' + 'less than or\n' + ' equal to "len(s)".\n' + '\n' + 'str.lower()\n' + '\n' + ' Return a copy of the string with all the cased ' + 'characters [4]\n' + ' converted to lowercase.\n' + '\n' + ' The lowercasing algorithm used is described in section ' + '3.13 of the\n' + ' Unicode Standard.\n' + '\n' + 'str.lstrip([chars])\n' + '\n' + ' Return a copy of the string with leading characters ' + 'removed. The\n' + ' *chars* argument is a string specifying the set of ' + 'characters to be\n' + ' removed. If omitted or "None", the *chars* argument ' + 'defaults to\n' + ' removing whitespace. The *chars* argument is not a ' + 'prefix; rather,\n' + ' all combinations of its values are stripped:\n' + '\n' + " >>> ' spacious '.lstrip()\n" + " 'spacious '\n" + " >>> 'www.example.com'.lstrip('cmowz.')\n" + " 'example.com'\n" + '\n' + ' See "str.removeprefix()" for a method that will remove ' + 'a single\n' + ' prefix string rather than all of a set of characters. ' + 'For example:\n' + '\n' + " >>> 'Arthur: three!'.lstrip('Arthur: ')\n" + " 'ee!'\n" + " >>> 'Arthur: three!'.removeprefix('Arthur: ')\n" + " 'three!'\n" + '\n' + 'static str.maketrans(x[, y[, z]])\n' + '\n' + ' This static method returns a translation table usable ' + 'for\n' + ' "str.translate()".\n' + '\n' + ' If there is only one argument, it must be a dictionary ' + 'mapping\n' + ' Unicode ordinals (integers) or characters (strings of ' + 'length 1) to\n' + ' Unicode ordinals, strings (of arbitrary lengths) or ' + '"None".\n' + ' Character keys will then be converted to ordinals.\n' + '\n' + ' If there are two arguments, they must be strings of ' + 'equal length,\n' + ' and in the resulting dictionary, each character in x ' + 'will be mapped\n' + ' to the character at the same position in y. If there ' + 'is a third\n' + ' argument, it must be a string, whose characters will be ' + 'mapped to\n' + ' "None" in the result.\n' + '\n' + 'str.partition(sep)\n' + '\n' + ' Split the string at the first occurrence of *sep*, and ' + 'return a\n' + ' 3-tuple containing the part before the separator, the ' + 'separator\n' + ' itself, and the part after the separator. If the ' + 'separator is not\n' + ' found, return a 3-tuple containing the string itself, ' + 'followed by\n' + ' two empty strings.\n' + '\n' + 'str.removeprefix(prefix, /)\n' + '\n' + ' If the string starts with the *prefix* string, return\n' + ' "string[len(prefix):]". Otherwise, return a copy of the ' + 'original\n' + ' string:\n' + '\n' + " >>> 'TestHook'.removeprefix('Test')\n" + " 'Hook'\n" + " >>> 'BaseTestCase'.removeprefix('Test')\n" + " 'BaseTestCase'\n" + '\n' + ' New in version 3.9.\n' + '\n' + 'str.removesuffix(suffix, /)\n' + '\n' + ' If the string ends with the *suffix* string and that ' + '*suffix* is\n' + ' not empty, return "string[:-len(suffix)]". Otherwise, ' + 'return a copy\n' + ' of the original string:\n' + '\n' + " >>> 'MiscTests'.removesuffix('Tests')\n" + " 'Misc'\n" + " >>> 'TmpDirMixin'.removesuffix('Tests')\n" + " 'TmpDirMixin'\n" + '\n' + ' New in version 3.9.\n' + '\n' + 'str.replace(old, new[, count])\n' + '\n' + ' Return a copy of the string with all occurrences of ' + 'substring *old*\n' + ' replaced by *new*. If the optional argument *count* is ' + 'given, only\n' + ' the first *count* occurrences are replaced.\n' + '\n' + 'str.rfind(sub[, start[, end]])\n' + '\n' + ' Return the highest index in the string where substring ' + '*sub* is\n' + ' found, such that *sub* is contained within ' + '"s[start:end]".\n' + ' Optional arguments *start* and *end* are interpreted as ' + 'in slice\n' + ' notation. Return "-1" on failure.\n' + '\n' + 'str.rindex(sub[, start[, end]])\n' + '\n' + ' Like "rfind()" but raises "ValueError" when the ' + 'substring *sub* is\n' + ' not found.\n' + '\n' + 'str.rjust(width[, fillchar])\n' + '\n' + ' Return the string right justified in a string of length ' + '*width*.\n' + ' Padding is done using the specified *fillchar* (default ' + 'is an ASCII\n' + ' space). The original string is returned if *width* is ' + 'less than or\n' + ' equal to "len(s)".\n' + '\n' + 'str.rpartition(sep)\n' + '\n' + ' Split the string at the last occurrence of *sep*, and ' + 'return a\n' + ' 3-tuple containing the part before the separator, the ' + 'separator\n' + ' itself, and the part after the separator. If the ' + 'separator is not\n' + ' found, return a 3-tuple containing two empty strings, ' + 'followed by\n' + ' the string itself.\n' + '\n' + 'str.rsplit(sep=None, maxsplit=- 1)\n' + '\n' + ' Return a list of the words in the string, using *sep* ' + 'as the\n' + ' delimiter string. If *maxsplit* is given, at most ' + '*maxsplit* splits\n' + ' are done, the *rightmost* ones. If *sep* is not ' + 'specified or\n' + ' "None", any whitespace string is a separator. Except ' + 'for splitting\n' + ' from the right, "rsplit()" behaves like "split()" which ' + 'is\n' + ' described in detail below.\n' + '\n' + 'str.rstrip([chars])\n' + '\n' + ' Return a copy of the string with trailing characters ' + 'removed. The\n' + ' *chars* argument is a string specifying the set of ' + 'characters to be\n' + ' removed. If omitted or "None", the *chars* argument ' + 'defaults to\n' + ' removing whitespace. The *chars* argument is not a ' + 'suffix; rather,\n' + ' all combinations of its values are stripped:\n' + '\n' + " >>> ' spacious '.rstrip()\n" + " ' spacious'\n" + " >>> 'mississippi'.rstrip('ipz')\n" + " 'mississ'\n" + '\n' + ' See "str.removesuffix()" for a method that will remove ' + 'a single\n' + ' suffix string rather than all of a set of characters. ' + 'For example:\n' + '\n' + " >>> 'Monty Python'.rstrip(' Python')\n" + " 'M'\n" + " >>> 'Monty Python'.removesuffix(' Python')\n" + " 'Monty'\n" + '\n' + 'str.split(sep=None, maxsplit=- 1)\n' + '\n' + ' Return a list of the words in the string, using *sep* ' + 'as the\n' + ' delimiter string. If *maxsplit* is given, at most ' + '*maxsplit*\n' + ' splits are done (thus, the list will have at most ' + '"maxsplit+1"\n' + ' elements). If *maxsplit* is not specified or "-1", ' + 'then there is\n' + ' no limit on the number of splits (all possible splits ' + 'are made).\n' + '\n' + ' If *sep* is given, consecutive delimiters are not ' + 'grouped together\n' + ' and are deemed to delimit empty strings (for example,\n' + ' "\'1,,2\'.split(\',\')" returns "[\'1\', \'\', ' + '\'2\']"). The *sep* argument\n' + ' may consist of multiple characters (for example,\n' + ' "\'1<>2<>3\'.split(\'<>\')" returns "[\'1\', \'2\', ' + '\'3\']"). Splitting an\n' + ' empty string with a specified separator returns ' + '"[\'\']".\n' + '\n' + ' For example:\n' + '\n' + " >>> '1,2,3'.split(',')\n" + " ['1', '2', '3']\n" + " >>> '1,2,3'.split(',', maxsplit=1)\n" + " ['1', '2,3']\n" + " >>> '1,2,,3,'.split(',')\n" + " ['1', '2', '', '3', '']\n" + '\n' + ' If *sep* is not specified or is "None", a different ' + 'splitting\n' + ' algorithm is applied: runs of consecutive whitespace ' + 'are regarded\n' + ' as a single separator, and the result will contain no ' + 'empty strings\n' + ' at the start or end if the string has leading or ' + 'trailing\n' + ' whitespace. Consequently, splitting an empty string or ' + 'a string\n' + ' consisting of just whitespace with a "None" separator ' + 'returns "[]".\n' + '\n' + ' For example:\n' + '\n' + " >>> '1 2 3'.split()\n" + " ['1', '2', '3']\n" + " >>> '1 2 3'.split(maxsplit=1)\n" + " ['1', '2 3']\n" + " >>> ' 1 2 3 '.split()\n" + " ['1', '2', '3']\n" + '\n' + 'str.splitlines(keepends=False)\n' + '\n' + ' Return a list of the lines in the string, breaking at ' + 'line\n' + ' boundaries. Line breaks are not included in the ' + 'resulting list\n' + ' unless *keepends* is given and true.\n' + '\n' + ' This method splits on the following line boundaries. ' + 'In\n' + ' particular, the boundaries are a superset of *universal ' + 'newlines*.\n' + '\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | Representation | ' + 'Description |\n' + ' ' + '|=========================|===============================|\n' + ' | "\\n" | Line ' + 'Feed |\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | "\\r" | Carriage ' + 'Return |\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | "\\r\\n" | Carriage Return + Line ' + 'Feed |\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | "\\v" or "\\x0b" | Line ' + 'Tabulation |\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | "\\f" or "\\x0c" | Form ' + 'Feed |\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | "\\x1c" | File ' + 'Separator |\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | "\\x1d" | Group ' + 'Separator |\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | "\\x1e" | Record ' + 'Separator |\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | "\\x85" | Next Line (C1 Control ' + 'Code) |\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | "\\u2028" | Line ' + 'Separator |\n' + ' ' + '+-------------------------+-------------------------------+\n' + ' | "\\u2029" | Paragraph ' + 'Separator |\n' + ' ' + '+-------------------------+-------------------------------+\n' + '\n' + ' Changed in version 3.2: "\\v" and "\\f" added to list ' + 'of line\n' + ' boundaries.\n' + '\n' + ' For example:\n' + '\n' + " >>> 'ab c\\n\\nde fg\\rkl\\r\\n'.splitlines()\n" + " ['ab c', '', 'de fg', 'kl']\n" + " >>> 'ab c\\n\\nde " + "fg\\rkl\\r\\n'.splitlines(keepends=True)\n" + " ['ab c\\n', '\\n', 'de fg\\r', 'kl\\r\\n']\n" + '\n' + ' Unlike "split()" when a delimiter string *sep* is ' + 'given, this\n' + ' method returns an empty list for the empty string, and ' + 'a terminal\n' + ' line break does not result in an extra line:\n' + '\n' + ' >>> "".splitlines()\n' + ' []\n' + ' >>> "One line\\n".splitlines()\n' + " ['One line']\n" + '\n' + ' For comparison, "split(\'\\n\')" gives:\n' + '\n' + " >>> ''.split('\\n')\n" + " ['']\n" + " >>> 'Two lines\\n'.split('\\n')\n" + " ['Two lines', '']\n" + '\n' + 'str.startswith(prefix[, start[, end]])\n' + '\n' + ' Return "True" if string starts with the *prefix*, ' + 'otherwise return\n' + ' "False". *prefix* can also be a tuple of prefixes to ' + 'look for.\n' + ' With optional *start*, test string beginning at that ' + 'position.\n' + ' With optional *end*, stop comparing string at that ' + 'position.\n' + '\n' + 'str.strip([chars])\n' + '\n' + ' Return a copy of the string with the leading and ' + 'trailing\n' + ' characters removed. The *chars* argument is a string ' + 'specifying the\n' + ' set of characters to be removed. If omitted or "None", ' + 'the *chars*\n' + ' argument defaults to removing whitespace. The *chars* ' + 'argument is\n' + ' not a prefix or suffix; rather, all combinations of its ' + 'values are\n' + ' stripped:\n' + '\n' + " >>> ' spacious '.strip()\n" + " 'spacious'\n" + " >>> 'www.example.com'.strip('cmowz.')\n" + " 'example'\n" + '\n' + ' The outermost leading and trailing *chars* argument ' + 'values are\n' + ' stripped from the string. Characters are removed from ' + 'the leading\n' + ' end until reaching a string character that is not ' + 'contained in the\n' + ' set of characters in *chars*. A similar action takes ' + 'place on the\n' + ' trailing end. For example:\n' + '\n' + " >>> comment_string = '#....... Section 3.2.1 Issue " + "#32 .......'\n" + " >>> comment_string.strip('.#! ')\n" + " 'Section 3.2.1 Issue #32'\n" + '\n' + 'str.swapcase()\n' + '\n' + ' Return a copy of the string with uppercase characters ' + 'converted to\n' + ' lowercase and vice versa. Note that it is not ' + 'necessarily true that\n' + ' "s.swapcase().swapcase() == s".\n' + '\n' + 'str.title()\n' + '\n' + ' Return a titlecased version of the string where words ' + 'start with an\n' + ' uppercase character and the remaining characters are ' + 'lowercase.\n' + '\n' + ' For example:\n' + '\n' + " >>> 'Hello world'.title()\n" + " 'Hello World'\n" + '\n' + ' The algorithm uses a simple language-independent ' + 'definition of a\n' + ' word as groups of consecutive letters. The definition ' + 'works in\n' + ' many contexts but it means that apostrophes in ' + 'contractions and\n' + ' possessives form word boundaries, which may not be the ' + 'desired\n' + ' result:\n' + '\n' + ' >>> "they\'re bill\'s friends from the UK".title()\n' + ' "They\'Re Bill\'S Friends From The Uk"\n' + '\n' + ' The "string.capwords()" function does not have this ' + 'problem, as it\n' + ' splits words on spaces only.\n' + '\n' + ' Alternatively, a workaround for apostrophes can be ' + 'constructed\n' + ' using regular expressions:\n' + '\n' + ' >>> import re\n' + ' >>> def titlecase(s):\n' + ' ... return re.sub(r"[A-Za-z]+(\'[A-Za-z]+)?",\n' + ' ... lambda mo: ' + 'mo.group(0).capitalize(),\n' + ' ... s)\n' + ' ...\n' + ' >>> titlecase("they\'re bill\'s friends.")\n' + ' "They\'re Bill\'s Friends."\n' + '\n' + 'str.translate(table)\n' + '\n' + ' Return a copy of the string in which each character has ' + 'been mapped\n' + ' through the given translation table. The table must be ' + 'an object\n' + ' that implements indexing via "__getitem__()", typically ' + 'a *mapping*\n' + ' or *sequence*. When indexed by a Unicode ordinal (an ' + 'integer), the\n' + ' table object can do any of the following: return a ' + 'Unicode ordinal\n' + ' or a string, to map the character to one or more other ' + 'characters;\n' + ' return "None", to delete the character from the return ' + 'string; or\n' + ' raise a "LookupError" exception, to map the character ' + 'to itself.\n' + '\n' + ' You can use "str.maketrans()" to create a translation ' + 'map from\n' + ' character-to-character mappings in different formats.\n' + '\n' + ' See also the "codecs" module for a more flexible ' + 'approach to custom\n' + ' character mappings.\n' + '\n' + 'str.upper()\n' + '\n' + ' Return a copy of the string with all the cased ' + 'characters [4]\n' + ' converted to uppercase. Note that ' + '"s.upper().isupper()" might be\n' + ' "False" if "s" contains uncased characters or if the ' + 'Unicode\n' + ' category of the resulting character(s) is not “Lu” ' + '(Letter,\n' + ' uppercase), but e.g. “Lt” (Letter, titlecase).\n' + '\n' + ' The uppercasing algorithm used is described in section ' + '3.13 of the\n' + ' Unicode Standard.\n' + '\n' + 'str.zfill(width)\n' + '\n' + ' Return a copy of the string left filled with ASCII ' + '"\'0\'" digits to\n' + ' make a string of length *width*. A leading sign prefix\n' + ' ("\'+\'"/"\'-\'") is handled by inserting the padding ' + '*after* the sign\n' + ' character rather than before. The original string is ' + 'returned if\n' + ' *width* is less than or equal to "len(s)".\n' + '\n' + ' For example:\n' + '\n' + ' >>> "42".zfill(5)\n' + " '00042'\n" + ' >>> "-42".zfill(5)\n' + " '-0042'\n", + 'strings': 'String and Bytes literals\n' + '*************************\n' + '\n' + 'String literals are described by the following lexical ' + 'definitions:\n' + '\n' + ' stringliteral ::= [stringprefix](shortstring | longstring)\n' + ' stringprefix ::= "r" | "u" | "R" | "U" | "f" | "F"\n' + ' | "fr" | "Fr" | "fR" | "FR" | "rf" | "rF" | ' + '"Rf" | "RF"\n' + ' shortstring ::= "\'" shortstringitem* "\'" | \'"\' ' + 'shortstringitem* \'"\'\n' + ' longstring ::= "\'\'\'" longstringitem* "\'\'\'" | ' + '\'"""\' longstringitem* \'"""\'\n' + ' shortstringitem ::= shortstringchar | stringescapeseq\n' + ' longstringitem ::= longstringchar | stringescapeseq\n' + ' shortstringchar ::= \n' + ' longstringchar ::= \n' + ' stringescapeseq ::= "\\" \n' + '\n' + ' bytesliteral ::= bytesprefix(shortbytes | longbytes)\n' + ' bytesprefix ::= "b" | "B" | "br" | "Br" | "bR" | "BR" | ' + '"rb" | "rB" | "Rb" | "RB"\n' + ' shortbytes ::= "\'" shortbytesitem* "\'" | \'"\' ' + 'shortbytesitem* \'"\'\n' + ' longbytes ::= "\'\'\'" longbytesitem* "\'\'\'" | \'"""\' ' + 'longbytesitem* \'"""\'\n' + ' shortbytesitem ::= shortbyteschar | bytesescapeseq\n' + ' longbytesitem ::= longbyteschar | bytesescapeseq\n' + ' shortbyteschar ::= \n' + ' longbyteschar ::= \n' + ' bytesescapeseq ::= "\\" \n' + '\n' + 'One syntactic restriction not indicated by these productions is ' + 'that\n' + 'whitespace is not allowed between the "stringprefix" or ' + '"bytesprefix"\n' + 'and the rest of the literal. The source character set is defined ' + 'by\n' + 'the encoding declaration; it is UTF-8 if no encoding declaration ' + 'is\n' + 'given in the source file; see section Encoding declarations.\n' + '\n' + 'In plain English: Both types of literals can be enclosed in ' + 'matching\n' + 'single quotes ("\'") or double quotes ("""). They can also be ' + 'enclosed\n' + 'in matching groups of three single or double quotes (these are\n' + 'generally referred to as *triple-quoted strings*). The backslash ' + '("\\")\n' + 'character is used to give special meaning to otherwise ordinary\n' + 'characters like "n", which means ‘newline’ when escaped ("\\n"). ' + 'It can\n' + 'also be used to escape characters that otherwise have a special\n' + 'meaning, such as newline, backslash itself, or the quote ' + 'character.\n' + 'See escape sequences below for examples.\n' + '\n' + 'Bytes literals are always prefixed with "\'b\'" or "\'B\'"; they ' + 'produce\n' + 'an instance of the "bytes" type instead of the "str" type. They ' + 'may\n' + 'only contain ASCII characters; bytes with a numeric value of 128 ' + 'or\n' + 'greater must be expressed with escapes.\n' + '\n' + 'Both string and bytes literals may optionally be prefixed with a\n' + 'letter "\'r\'" or "\'R\'"; such strings are called *raw strings* ' + 'and treat\n' + 'backslashes as literal characters. As a result, in string ' + 'literals,\n' + '"\'\\U\'" and "\'\\u\'" escapes in raw strings are not treated ' + 'specially.\n' + 'Given that Python 2.x’s raw unicode literals behave differently ' + 'than\n' + 'Python 3.x’s the "\'ur\'" syntax is not supported.\n' + '\n' + 'New in version 3.3: The "\'rb\'" prefix of raw bytes literals has ' + 'been\n' + 'added as a synonym of "\'br\'".\n' + '\n' + 'New in version 3.3: Support for the unicode legacy literal\n' + '("u\'value\'") was reintroduced to simplify the maintenance of ' + 'dual\n' + 'Python 2.x and 3.x codebases. See **PEP 414** for more ' + 'information.\n' + '\n' + 'A string literal with "\'f\'" or "\'F\'" in its prefix is a ' + '*formatted\n' + 'string literal*; see Formatted string literals. The "\'f\'" may ' + 'be\n' + 'combined with "\'r\'", but not with "\'b\'" or "\'u\'", therefore ' + 'raw\n' + 'formatted strings are possible, but formatted bytes literals are ' + 'not.\n' + '\n' + 'In triple-quoted literals, unescaped newlines and quotes are ' + 'allowed\n' + '(and are retained), except that three unescaped quotes in a row\n' + 'terminate the literal. (A “quote” is the character used to open ' + 'the\n' + 'literal, i.e. either "\'" or """.)\n' + '\n' + 'Unless an "\'r\'" or "\'R\'" prefix is present, escape sequences ' + 'in string\n' + 'and bytes literals are interpreted according to rules similar to ' + 'those\n' + 'used by Standard C. The recognized escape sequences are:\n' + '\n' + '+-------------------+-----------------------------------+---------+\n' + '| Escape Sequence | Meaning | Notes ' + '|\n' + '|===================|===================================|=========|\n' + '| "\\" | Backslash and newline ignored | ' + '(1) |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\\\" | Backslash ("\\") ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\\'" | Single quote ("\'") ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\"" | Double quote (""") ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\a" | ASCII Bell (BEL) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\b" | ASCII Backspace (BS) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\f" | ASCII Formfeed (FF) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\n" | ASCII Linefeed (LF) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\r" | ASCII Carriage Return (CR) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\t" | ASCII Horizontal Tab (TAB) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\v" | ASCII Vertical Tab (VT) ' + '| |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\ooo" | Character with octal value *ooo* | ' + '(2,4) |\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\xhh" | Character with hex value *hh* | ' + '(3,4) |\n' + '+-------------------+-----------------------------------+---------+\n' + '\n' + 'Escape sequences only recognized in string literals are:\n' + '\n' + '+-------------------+-----------------------------------+---------+\n' + '| Escape Sequence | Meaning | Notes ' + '|\n' + '|===================|===================================|=========|\n' + '| "\\N{name}" | Character named *name* in the | ' + '(5) |\n' + '| | Unicode database | ' + '|\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\uxxxx" | Character with 16-bit hex value | ' + '(6) |\n' + '| | *xxxx* | ' + '|\n' + '+-------------------+-----------------------------------+---------+\n' + '| "\\Uxxxxxxxx" | Character with 32-bit hex value | ' + '(7) |\n' + '| | *xxxxxxxx* | ' + '|\n' + '+-------------------+-----------------------------------+---------+\n' + '\n' + 'Notes:\n' + '\n' + '1. A backslash can be added at the end of a line to ignore the\n' + ' newline:\n' + '\n' + " >>> 'This string will not include \\\n" + " ... backslashes or newline characters.'\n" + " 'This string will not include backslashes or newline " + "characters.'\n" + '\n' + ' The same result can be achieved using triple-quoted strings, ' + 'or\n' + ' parentheses and string literal concatenation.\n' + '\n' + '2. As in Standard C, up to three octal digits are accepted.\n' + '\n' + '3. Unlike in Standard C, exactly two hex digits are required.\n' + '\n' + '4. In a bytes literal, hexadecimal and octal escapes denote the ' + 'byte\n' + ' with the given value. In a string literal, these escapes ' + 'denote a\n' + ' Unicode character with the given value.\n' + '\n' + '5. Changed in version 3.3: Support for name aliases [1] has been\n' + ' added.\n' + '\n' + '6. Exactly four hex digits are required.\n' + '\n' + '7. Any Unicode character can be encoded this way. Exactly eight ' + 'hex\n' + ' digits are required.\n' + '\n' + 'Unlike Standard C, all unrecognized escape sequences are left in ' + 'the\n' + 'string unchanged, i.e., *the backslash is left in the result*. ' + '(This\n' + 'behavior is useful when debugging: if an escape sequence is ' + 'mistyped,\n' + 'the resulting output is more easily recognized as broken.) It is ' + 'also\n' + 'important to note that the escape sequences only recognized in ' + 'string\n' + 'literals fall into the category of unrecognized escapes for ' + 'bytes\n' + 'literals.\n' + '\n' + ' Changed in version 3.6: Unrecognized escape sequences produce ' + 'a\n' + ' "DeprecationWarning". In a future Python version they will be ' + 'a\n' + ' "SyntaxWarning" and eventually a "SyntaxError".\n' + '\n' + 'Even in a raw literal, quotes can be escaped with a backslash, ' + 'but the\n' + 'backslash remains in the result; for example, "r"\\""" is a ' + 'valid\n' + 'string literal consisting of two characters: a backslash and a ' + 'double\n' + 'quote; "r"\\"" is not a valid string literal (even a raw string ' + 'cannot\n' + 'end in an odd number of backslashes). Specifically, *a raw ' + 'literal\n' + 'cannot end in a single backslash* (since the backslash would ' + 'escape\n' + 'the following quote character). Note also that a single ' + 'backslash\n' + 'followed by a newline is interpreted as those two characters as ' + 'part\n' + 'of the literal, *not* as a line continuation.\n', + 'subscriptions': 'Subscriptions\n' + '*************\n' + '\n' + 'The subscription of an instance of a container class will ' + 'generally\n' + 'select an element from the container. The subscription of a ' + '*generic\n' + 'class* will generally return a GenericAlias object.\n' + '\n' + ' subscription ::= primary "[" expression_list "]"\n' + '\n' + 'When an object is subscripted, the interpreter will ' + 'evaluate the\n' + 'primary and the expression list.\n' + '\n' + 'The primary must evaluate to an object that supports ' + 'subscription. An\n' + 'object may support subscription through defining one or ' + 'both of\n' + '"__getitem__()" and "__class_getitem__()". When the primary ' + 'is\n' + 'subscripted, the evaluated result of the expression list ' + 'will be\n' + 'passed to one of these methods. For more details on when\n' + '"__class_getitem__" is called instead of "__getitem__", ' + 'see\n' + '__class_getitem__ versus __getitem__.\n' + '\n' + 'If the expression list contains at least one comma, it will ' + 'evaluate\n' + 'to a "tuple" containing the items of the expression list. ' + 'Otherwise,\n' + 'the expression list will evaluate to the value of the ' + 'list’s sole\n' + 'member.\n' + '\n' + 'For built-in objects, there are two types of objects that ' + 'support\n' + 'subscription via "__getitem__()":\n' + '\n' + '1. Mappings. If the primary is a *mapping*, the expression ' + 'list must\n' + ' evaluate to an object whose value is one of the keys of ' + 'the\n' + ' mapping, and the subscription selects the value in the ' + 'mapping that\n' + ' corresponds to that key. An example of a builtin mapping ' + 'class is\n' + ' the "dict" class.\n' + '\n' + '2. Sequences. If the primary is a *sequence*, the ' + 'expression list must\n' + ' evaluate to an "int" or a "slice" (as discussed in the ' + 'following\n' + ' section). Examples of builtin sequence classes include ' + 'the "str",\n' + ' "list" and "tuple" classes.\n' + '\n' + 'The formal syntax makes no special provision for negative ' + 'indices in\n' + '*sequences*. However, built-in sequences all provide a ' + '"__getitem__()"\n' + 'method that interprets negative indices by adding the ' + 'length of the\n' + 'sequence to the index so that, for example, "x[-1]" selects ' + 'the last\n' + 'item of "x". The resulting value must be a nonnegative ' + 'integer less\n' + 'than the number of items in the sequence, and the ' + 'subscription selects\n' + 'the item whose index is that value (counting from zero). ' + 'Since the\n' + 'support for negative indices and slicing occurs in the ' + 'object’s\n' + '"__getitem__()" method, subclasses overriding this method ' + 'will need to\n' + 'explicitly add that support.\n' + '\n' + 'A "string" is a special kind of sequence whose items are ' + '*characters*.\n' + 'A character is not a separate data type but a string of ' + 'exactly one\n' + 'character.\n', + 'truth': 'Truth Value Testing\n' + '*******************\n' + '\n' + 'Any object can be tested for truth value, for use in an "if" or\n' + '"while" condition or as operand of the Boolean operations below.\n' + '\n' + 'By default, an object is considered true unless its class defines\n' + 'either a "__bool__()" method that returns "False" or a "__len__()"\n' + 'method that returns zero, when called with the object. [1] Here ' + 'are\n' + 'most of the built-in objects considered false:\n' + '\n' + '* constants defined to be false: "None" and "False".\n' + '\n' + '* zero of any numeric type: "0", "0.0", "0j", "Decimal(0)",\n' + ' "Fraction(0, 1)"\n' + '\n' + '* empty sequences and collections: "\'\'", "()", "[]", "{}", ' + '"set()",\n' + ' "range(0)"\n' + '\n' + 'Operations and built-in functions that have a Boolean result ' + 'always\n' + 'return "0" or "False" for false and "1" or "True" for true, unless\n' + 'otherwise stated. (Important exception: the Boolean operations ' + '"or"\n' + 'and "and" always return one of their operands.)\n', + 'try': 'The "try" statement\n' + '*******************\n' + '\n' + 'The "try" statement specifies exception handlers and/or cleanup code\n' + 'for a group of statements:\n' + '\n' + ' try_stmt ::= try1_stmt | try2_stmt\n' + ' try1_stmt ::= "try" ":" suite\n' + ' ("except" [expression ["as" identifier]] ":" ' + 'suite)+\n' + ' ["else" ":" suite]\n' + ' ["finally" ":" suite]\n' + ' try2_stmt ::= "try" ":" suite\n' + ' "finally" ":" suite\n' + '\n' + 'The "except" clause(s) specify one or more exception handlers. When ' + 'no\n' + 'exception occurs in the "try" clause, no exception handler is\n' + 'executed. When an exception occurs in the "try" suite, a search for ' + 'an\n' + 'exception handler is started. This search inspects the except ' + 'clauses\n' + 'in turn until one is found that matches the exception. An ' + 'expression-\n' + 'less except clause, if present, must be last; it matches any\n' + 'exception. For an except clause with an expression, that expression\n' + 'is evaluated, and the clause matches the exception if the resulting\n' + 'object is “compatible” with the exception. An object is compatible\n' + 'with an exception if the object is the class or a *non-virtual base\n' + 'class* of the exception object, or a tuple containing an item that ' + 'is\n' + 'the class or a non-virtual base class of the exception object.\n' + '\n' + 'If no except clause matches the exception, the search for an ' + 'exception\n' + 'handler continues in the surrounding code and on the invocation ' + 'stack.\n' + '[1]\n' + '\n' + 'If the evaluation of an expression in the header of an except clause\n' + 'raises an exception, the original search for a handler is canceled ' + 'and\n' + 'a search starts for the new exception in the surrounding code and on\n' + 'the call stack (it is treated as if the entire "try" statement ' + 'raised\n' + 'the exception).\n' + '\n' + 'When a matching except clause is found, the exception is assigned to\n' + 'the target specified after the "as" keyword in that except clause, ' + 'if\n' + 'present, and the except clause’s suite is executed. All except\n' + 'clauses must have an executable block. When the end of this block ' + 'is\n' + 'reached, execution continues normally after the entire try ' + 'statement.\n' + '(This means that if two nested handlers exist for the same ' + 'exception,\n' + 'and the exception occurs in the try clause of the inner handler, the\n' + 'outer handler will not handle the exception.)\n' + '\n' + 'When an exception has been assigned using "as target", it is cleared\n' + 'at the end of the except clause. This is as if\n' + '\n' + ' except E as N:\n' + ' foo\n' + '\n' + 'was translated to\n' + '\n' + ' except E as N:\n' + ' try:\n' + ' foo\n' + ' finally:\n' + ' del N\n' + '\n' + 'This means the exception must be assigned to a different name to be\n' + 'able to refer to it after the except clause. Exceptions are cleared\n' + 'because with the traceback attached to them, they form a reference\n' + 'cycle with the stack frame, keeping all locals in that frame alive\n' + 'until the next garbage collection occurs.\n' + '\n' + 'Before an except clause’s suite is executed, details about the\n' + 'exception are stored in the "sys" module and can be accessed via\n' + '"sys.exc_info()". "sys.exc_info()" returns a 3-tuple consisting of ' + 'the\n' + 'exception class, the exception instance and a traceback object (see\n' + 'section The standard type hierarchy) identifying the point in the\n' + 'program where the exception occurred. The details about the ' + 'exception\n' + 'accessed via "sys.exc_info()" are restored to their previous values\n' + 'when leaving an exception handler:\n' + '\n' + ' >>> print(sys.exc_info())\n' + ' (None, None, None)\n' + ' >>> try:\n' + ' ... raise TypeError\n' + ' ... except:\n' + ' ... print(sys.exc_info())\n' + ' ... try:\n' + ' ... raise ValueError\n' + ' ... except:\n' + ' ... print(sys.exc_info())\n' + ' ... print(sys.exc_info())\n' + ' ...\n' + " (, TypeError(), )\n' + " (, ValueError(), )\n' + " (, TypeError(), )\n' + ' >>> print(sys.exc_info())\n' + ' (None, None, None)\n' + '\n' + 'The optional "else" clause is executed if the control flow leaves ' + 'the\n' + '"try" suite, no exception was raised, and no "return", "continue", ' + 'or\n' + '"break" statement was executed. Exceptions in the "else" clause are\n' + 'not handled by the preceding "except" clauses.\n' + '\n' + 'If "finally" is present, it specifies a ‘cleanup’ handler. The ' + '"try"\n' + 'clause is executed, including any "except" and "else" clauses. If ' + 'an\n' + 'exception occurs in any of the clauses and is not handled, the\n' + 'exception is temporarily saved. The "finally" clause is executed. ' + 'If\n' + 'there is a saved exception it is re-raised at the end of the ' + '"finally"\n' + 'clause. If the "finally" clause raises another exception, the saved\n' + 'exception is set as the context of the new exception. If the ' + '"finally"\n' + 'clause executes a "return", "break" or "continue" statement, the ' + 'saved\n' + 'exception is discarded:\n' + '\n' + ' >>> def f():\n' + ' ... try:\n' + ' ... 1/0\n' + ' ... finally:\n' + ' ... return 42\n' + ' ...\n' + ' >>> f()\n' + ' 42\n' + '\n' + 'The exception information is not available to the program during\n' + 'execution of the "finally" clause.\n' + '\n' + 'When a "return", "break" or "continue" statement is executed in the\n' + '"try" suite of a "try"…"finally" statement, the "finally" clause is\n' + 'also executed ‘on the way out.’\n' + '\n' + 'The return value of a function is determined by the last "return"\n' + 'statement executed. Since the "finally" clause always executes, a\n' + '"return" statement executed in the "finally" clause will always be ' + 'the\n' + 'last one executed:\n' + '\n' + ' >>> def foo():\n' + ' ... try:\n' + " ... return 'try'\n" + ' ... finally:\n' + " ... return 'finally'\n" + ' ...\n' + ' >>> foo()\n' + " 'finally'\n" + '\n' + 'Additional information on exceptions can be found in section\n' + 'Exceptions, and information on using the "raise" statement to ' + 'generate\n' + 'exceptions may be found in section The raise statement.\n' + '\n' + 'Changed in version 3.8: Prior to Python 3.8, a "continue" statement\n' + 'was illegal in the "finally" clause due to a problem with the\n' + 'implementation.\n', + 'types': 'The standard type hierarchy\n' + '***************************\n' + '\n' + 'Below is a list of the types that are built into Python. ' + 'Extension\n' + 'modules (written in C, Java, or other languages, depending on the\n' + 'implementation) can define additional types. Future versions of\n' + 'Python may add types to the type hierarchy (e.g., rational ' + 'numbers,\n' + 'efficiently stored arrays of integers, etc.), although such ' + 'additions\n' + 'will often be provided via the standard library instead.\n' + '\n' + 'Some of the type descriptions below contain a paragraph listing\n' + '‘special attributes.’ These are attributes that provide access to ' + 'the\n' + 'implementation and are not intended for general use. Their ' + 'definition\n' + 'may change in the future.\n' + '\n' + 'None\n' + ' This type has a single value. There is a single object with ' + 'this\n' + ' value. This object is accessed through the built-in name "None". ' + 'It\n' + ' is used to signify the absence of a value in many situations, ' + 'e.g.,\n' + ' it is returned from functions that don’t explicitly return\n' + ' anything. Its truth value is false.\n' + '\n' + 'NotImplemented\n' + ' This type has a single value. There is a single object with ' + 'this\n' + ' value. This object is accessed through the built-in name\n' + ' "NotImplemented". Numeric methods and rich comparison methods\n' + ' should return this value if they do not implement the operation ' + 'for\n' + ' the operands provided. (The interpreter will then try the\n' + ' reflected operation, or some other fallback, depending on the\n' + ' operator.) It should not be evaluated in a boolean context.\n' + '\n' + ' See Implementing the arithmetic operations for more details.\n' + '\n' + ' Changed in version 3.9: Evaluating "NotImplemented" in a ' + 'boolean\n' + ' context is deprecated. While it currently evaluates as true, it\n' + ' will emit a "DeprecationWarning". It will raise a "TypeError" in ' + 'a\n' + ' future version of Python.\n' + '\n' + 'Ellipsis\n' + ' This type has a single value. There is a single object with ' + 'this\n' + ' value. This object is accessed through the literal "..." or the\n' + ' built-in name "Ellipsis". Its truth value is true.\n' + '\n' + '"numbers.Number"\n' + ' These are created by numeric literals and returned as results ' + 'by\n' + ' arithmetic operators and arithmetic built-in functions. ' + 'Numeric\n' + ' objects are immutable; once created their value never changes.\n' + ' Python numbers are of course strongly related to mathematical\n' + ' numbers, but subject to the limitations of numerical ' + 'representation\n' + ' in computers.\n' + '\n' + ' The string representations of the numeric classes, computed by\n' + ' "__repr__()" and "__str__()", have the following properties:\n' + '\n' + ' * They are valid numeric literals which, when passed to their ' + 'class\n' + ' constructor, produce an object having the value of the ' + 'original\n' + ' numeric.\n' + '\n' + ' * The representation is in base 10, when possible.\n' + '\n' + ' * Leading zeros, possibly excepting a single zero before a ' + 'decimal\n' + ' point, are not shown.\n' + '\n' + ' * Trailing zeros, possibly excepting a single zero after a ' + 'decimal\n' + ' point, are not shown.\n' + '\n' + ' * A sign is shown only when the number is negative.\n' + '\n' + ' Python distinguishes between integers, floating point numbers, ' + 'and\n' + ' complex numbers:\n' + '\n' + ' "numbers.Integral"\n' + ' These represent elements from the mathematical set of ' + 'integers\n' + ' (positive and negative).\n' + '\n' + ' There are two types of integers:\n' + '\n' + ' Integers ("int")\n' + ' These represent numbers in an unlimited range, subject to\n' + ' available (virtual) memory only. For the purpose of ' + 'shift\n' + ' and mask operations, a binary representation is assumed, ' + 'and\n' + ' negative numbers are represented in a variant of 2’s\n' + ' complement which gives the illusion of an infinite string ' + 'of\n' + ' sign bits extending to the left.\n' + '\n' + ' Booleans ("bool")\n' + ' These represent the truth values False and True. The two\n' + ' objects representing the values "False" and "True" are ' + 'the\n' + ' only Boolean objects. The Boolean type is a subtype of ' + 'the\n' + ' integer type, and Boolean values behave like the values 0 ' + 'and\n' + ' 1, respectively, in almost all contexts, the exception ' + 'being\n' + ' that when converted to a string, the strings ""False"" or\n' + ' ""True"" are returned, respectively.\n' + '\n' + ' The rules for integer representation are intended to give ' + 'the\n' + ' most meaningful interpretation of shift and mask operations\n' + ' involving negative integers.\n' + '\n' + ' "numbers.Real" ("float")\n' + ' These represent machine-level double precision floating ' + 'point\n' + ' numbers. You are at the mercy of the underlying machine\n' + ' architecture (and C or Java implementation) for the accepted\n' + ' range and handling of overflow. Python does not support ' + 'single-\n' + ' precision floating point numbers; the savings in processor ' + 'and\n' + ' memory usage that are usually the reason for using these are\n' + ' dwarfed by the overhead of using objects in Python, so there ' + 'is\n' + ' no reason to complicate the language with two kinds of ' + 'floating\n' + ' point numbers.\n' + '\n' + ' "numbers.Complex" ("complex")\n' + ' These represent complex numbers as a pair of machine-level\n' + ' double precision floating point numbers. The same caveats ' + 'apply\n' + ' as for floating point numbers. The real and imaginary parts ' + 'of a\n' + ' complex number "z" can be retrieved through the read-only\n' + ' attributes "z.real" and "z.imag".\n' + '\n' + 'Sequences\n' + ' These represent finite ordered sets indexed by non-negative\n' + ' numbers. The built-in function "len()" returns the number of ' + 'items\n' + ' of a sequence. When the length of a sequence is *n*, the index ' + 'set\n' + ' contains the numbers 0, 1, …, *n*-1. Item *i* of sequence *a* ' + 'is\n' + ' selected by "a[i]".\n' + '\n' + ' Sequences also support slicing: "a[i:j]" selects all items with\n' + ' index *k* such that *i* "<=" *k* "<" *j*. When used as an\n' + ' expression, a slice is a sequence of the same type. This ' + 'implies\n' + ' that the index set is renumbered so that it starts at 0.\n' + '\n' + ' Some sequences also support “extended slicing” with a third ' + '“step”\n' + ' parameter: "a[i:j:k]" selects all items of *a* with index *x* ' + 'where\n' + ' "x = i + n*k", *n* ">=" "0" and *i* "<=" *x* "<" *j*.\n' + '\n' + ' Sequences are distinguished according to their mutability:\n' + '\n' + ' Immutable sequences\n' + ' An object of an immutable sequence type cannot change once it ' + 'is\n' + ' created. (If the object contains references to other ' + 'objects,\n' + ' these other objects may be mutable and may be changed; ' + 'however,\n' + ' the collection of objects directly referenced by an ' + 'immutable\n' + ' object cannot change.)\n' + '\n' + ' The following types are immutable sequences:\n' + '\n' + ' Strings\n' + ' A string is a sequence of values that represent Unicode ' + 'code\n' + ' points. All the code points in the range "U+0000 - ' + 'U+10FFFF"\n' + ' can be represented in a string. Python doesn’t have a ' + '"char"\n' + ' type; instead, every code point in the string is ' + 'represented\n' + ' as a string object with length "1". The built-in ' + 'function\n' + ' "ord()" converts a code point from its string form to an\n' + ' integer in the range "0 - 10FFFF"; "chr()" converts an\n' + ' integer in the range "0 - 10FFFF" to the corresponding ' + 'length\n' + ' "1" string object. "str.encode()" can be used to convert ' + 'a\n' + ' "str" to "bytes" using the given text encoding, and\n' + ' "bytes.decode()" can be used to achieve the opposite.\n' + '\n' + ' Tuples\n' + ' The items of a tuple are arbitrary Python objects. Tuples ' + 'of\n' + ' two or more items are formed by comma-separated lists of\n' + ' expressions. A tuple of one item (a ‘singleton’) can be\n' + ' formed by affixing a comma to an expression (an expression ' + 'by\n' + ' itself does not create a tuple, since parentheses must be\n' + ' usable for grouping of expressions). An empty tuple can ' + 'be\n' + ' formed by an empty pair of parentheses.\n' + '\n' + ' Bytes\n' + ' A bytes object is an immutable array. The items are ' + '8-bit\n' + ' bytes, represented by integers in the range 0 <= x < 256.\n' + ' Bytes literals (like "b\'abc\'") and the built-in ' + '"bytes()"\n' + ' constructor can be used to create bytes objects. Also, ' + 'bytes\n' + ' objects can be decoded to strings via the "decode()" ' + 'method.\n' + '\n' + ' Mutable sequences\n' + ' Mutable sequences can be changed after they are created. ' + 'The\n' + ' subscription and slicing notations can be used as the target ' + 'of\n' + ' assignment and "del" (delete) statements.\n' + '\n' + ' There are currently two intrinsic mutable sequence types:\n' + '\n' + ' Lists\n' + ' The items of a list are arbitrary Python objects. Lists ' + 'are\n' + ' formed by placing a comma-separated list of expressions ' + 'in\n' + ' square brackets. (Note that there are no special cases ' + 'needed\n' + ' to form lists of length 0 or 1.)\n' + '\n' + ' Byte Arrays\n' + ' A bytearray object is a mutable array. They are created ' + 'by\n' + ' the built-in "bytearray()" constructor. Aside from being\n' + ' mutable (and hence unhashable), byte arrays otherwise ' + 'provide\n' + ' the same interface and functionality as immutable "bytes"\n' + ' objects.\n' + '\n' + ' The extension module "array" provides an additional example ' + 'of a\n' + ' mutable sequence type, as does the "collections" module.\n' + '\n' + 'Set types\n' + ' These represent unordered, finite sets of unique, immutable\n' + ' objects. As such, they cannot be indexed by any subscript. ' + 'However,\n' + ' they can be iterated over, and the built-in function "len()"\n' + ' returns the number of items in a set. Common uses for sets are ' + 'fast\n' + ' membership testing, removing duplicates from a sequence, and\n' + ' computing mathematical operations such as intersection, union,\n' + ' difference, and symmetric difference.\n' + '\n' + ' For set elements, the same immutability rules apply as for\n' + ' dictionary keys. Note that numeric types obey the normal rules ' + 'for\n' + ' numeric comparison: if two numbers compare equal (e.g., "1" and\n' + ' "1.0"), only one of them can be contained in a set.\n' + '\n' + ' There are currently two intrinsic set types:\n' + '\n' + ' Sets\n' + ' These represent a mutable set. They are created by the ' + 'built-in\n' + ' "set()" constructor and can be modified afterwards by ' + 'several\n' + ' methods, such as "add()".\n' + '\n' + ' Frozen sets\n' + ' These represent an immutable set. They are created by the\n' + ' built-in "frozenset()" constructor. As a frozenset is ' + 'immutable\n' + ' and *hashable*, it can be used again as an element of ' + 'another\n' + ' set, or as a dictionary key.\n' + '\n' + 'Mappings\n' + ' These represent finite sets of objects indexed by arbitrary ' + 'index\n' + ' sets. The subscript notation "a[k]" selects the item indexed by ' + '"k"\n' + ' from the mapping "a"; this can be used in expressions and as ' + 'the\n' + ' target of assignments or "del" statements. The built-in ' + 'function\n' + ' "len()" returns the number of items in a mapping.\n' + '\n' + ' There is currently a single intrinsic mapping type:\n' + '\n' + ' Dictionaries\n' + ' These represent finite sets of objects indexed by nearly\n' + ' arbitrary values. The only types of values not acceptable ' + 'as\n' + ' keys are values containing lists or dictionaries or other\n' + ' mutable types that are compared by value rather than by ' + 'object\n' + ' identity, the reason being that the efficient implementation ' + 'of\n' + ' dictionaries requires a key’s hash value to remain constant.\n' + ' Numeric types used for keys obey the normal rules for ' + 'numeric\n' + ' comparison: if two numbers compare equal (e.g., "1" and ' + '"1.0")\n' + ' then they can be used interchangeably to index the same\n' + ' dictionary entry.\n' + '\n' + ' Dictionaries preserve insertion order, meaning that keys will ' + 'be\n' + ' produced in the same order they were added sequentially over ' + 'the\n' + ' dictionary. Replacing an existing key does not change the ' + 'order,\n' + ' however removing a key and re-inserting it will add it to ' + 'the\n' + ' end instead of keeping its old place.\n' + '\n' + ' Dictionaries are mutable; they can be created by the "{...}"\n' + ' notation (see section Dictionary displays).\n' + '\n' + ' The extension modules "dbm.ndbm" and "dbm.gnu" provide\n' + ' additional examples of mapping types, as does the ' + '"collections"\n' + ' module.\n' + '\n' + ' Changed in version 3.7: Dictionaries did not preserve ' + 'insertion\n' + ' order in versions of Python before 3.6. In CPython 3.6,\n' + ' insertion order was preserved, but it was considered an\n' + ' implementation detail at that time rather than a language\n' + ' guarantee.\n' + '\n' + 'Callable types\n' + ' These are the types to which the function call operation (see\n' + ' section Calls) can be applied:\n' + '\n' + ' User-defined functions\n' + ' A user-defined function object is created by a function\n' + ' definition (see section Function definitions). It should be\n' + ' called with an argument list containing the same number of ' + 'items\n' + ' as the function’s formal parameter list.\n' + '\n' + ' Special attributes:\n' + '\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | Attribute | Meaning ' + '| |\n' + ' ' + '|===========================|=================================|=============|\n' + ' | "__doc__" | The function’s documentation ' + '| Writable |\n' + ' | | string, or "None" if ' + '| |\n' + ' | | unavailable; not inherited by ' + '| |\n' + ' | | subclasses. ' + '| |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__name__" | The function’s name. ' + '| Writable |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__qualname__" | The function’s *qualified ' + '| Writable |\n' + ' | | name*. New in version 3.3. ' + '| |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__module__" | The name of the module the ' + '| Writable |\n' + ' | | function was defined in, or ' + '| |\n' + ' | | "None" if unavailable. ' + '| |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__defaults__" | A tuple containing default ' + '| Writable |\n' + ' | | argument values for those ' + '| |\n' + ' | | arguments that have defaults, ' + '| |\n' + ' | | or "None" if no arguments have ' + '| |\n' + ' | | a default value. ' + '| |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__code__" | The code object representing ' + '| Writable |\n' + ' | | the compiled function body. ' + '| |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__globals__" | A reference to the dictionary ' + '| Read-only |\n' + ' | | that holds the function’s ' + '| |\n' + ' | | global variables — the global ' + '| |\n' + ' | | namespace of the module in ' + '| |\n' + ' | | which the function was defined. ' + '| |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__dict__" | The namespace supporting ' + '| Writable |\n' + ' | | arbitrary function attributes. ' + '| |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__closure__" | "None" or a tuple of cells that ' + '| Read-only |\n' + ' | | contain bindings for the ' + '| |\n' + ' | | function’s free variables. See ' + '| |\n' + ' | | below for information on the ' + '| |\n' + ' | | "cell_contents" attribute. ' + '| |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__annotations__" | A dict containing annotations ' + '| Writable |\n' + ' | | of parameters. The keys of the ' + '| |\n' + ' | | dict are the parameter names, ' + '| |\n' + ' | | and "\'return\'" for the ' + 'return | |\n' + ' | | annotation, if provided. For ' + '| |\n' + ' | | more information on working ' + '| |\n' + ' | | with this attribute, see ' + '| |\n' + ' | | Annotations Best Practices. ' + '| |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + ' | "__kwdefaults__" | A dict containing defaults for ' + '| Writable |\n' + ' | | keyword-only parameters. ' + '| |\n' + ' ' + '+---------------------------+---------------------------------+-------------+\n' + '\n' + ' Most of the attributes labelled “Writable” check the type of ' + 'the\n' + ' assigned value.\n' + '\n' + ' Function objects also support getting and setting arbitrary\n' + ' attributes, which can be used, for example, to attach ' + 'metadata\n' + ' to functions. Regular attribute dot-notation is used to get ' + 'and\n' + ' set such attributes. *Note that the current implementation ' + 'only\n' + ' supports function attributes on user-defined functions. ' + 'Function\n' + ' attributes on built-in functions may be supported in the\n' + ' future.*\n' + '\n' + ' A cell object has the attribute "cell_contents". This can be\n' + ' used to get the value of the cell, as well as set the value.\n' + '\n' + ' Additional information about a function’s definition can be\n' + ' retrieved from its code object; see the description of ' + 'internal\n' + ' types below. The "cell" type can be accessed in the "types"\n' + ' module.\n' + '\n' + ' Instance methods\n' + ' An instance method object combines a class, a class instance ' + 'and\n' + ' any callable object (normally a user-defined function).\n' + '\n' + ' Special read-only attributes: "__self__" is the class ' + 'instance\n' + ' object, "__func__" is the function object; "__doc__" is the\n' + ' method’s documentation (same as "__func__.__doc__"); ' + '"__name__"\n' + ' is the method name (same as "__func__.__name__"); ' + '"__module__"\n' + ' is the name of the module the method was defined in, or ' + '"None"\n' + ' if unavailable.\n' + '\n' + ' Methods also support accessing (but not setting) the ' + 'arbitrary\n' + ' function attributes on the underlying function object.\n' + '\n' + ' User-defined method objects may be created when getting an\n' + ' attribute of a class (perhaps via an instance of that class), ' + 'if\n' + ' that attribute is a user-defined function object or a class\n' + ' method object.\n' + '\n' + ' When an instance method object is created by retrieving a ' + 'user-\n' + ' defined function object from a class via one of its ' + 'instances,\n' + ' its "__self__" attribute is the instance, and the method ' + 'object\n' + ' is said to be bound. The new method’s "__func__" attribute ' + 'is\n' + ' the original function object.\n' + '\n' + ' When an instance method object is created by retrieving a ' + 'class\n' + ' method object from a class or instance, its "__self__" ' + 'attribute\n' + ' is the class itself, and its "__func__" attribute is the\n' + ' function object underlying the class method.\n' + '\n' + ' When an instance method object is called, the underlying\n' + ' function ("__func__") is called, inserting the class ' + 'instance\n' + ' ("__self__") in front of the argument list. For instance, ' + 'when\n' + ' "C" is a class which contains a definition for a function ' + '"f()",\n' + ' and "x" is an instance of "C", calling "x.f(1)" is equivalent ' + 'to\n' + ' calling "C.f(x, 1)".\n' + '\n' + ' When an instance method object is derived from a class ' + 'method\n' + ' object, the “class instance” stored in "__self__" will ' + 'actually\n' + ' be the class itself, so that calling either "x.f(1)" or ' + '"C.f(1)"\n' + ' is equivalent to calling "f(C,1)" where "f" is the ' + 'underlying\n' + ' function.\n' + '\n' + ' Note that the transformation from function object to ' + 'instance\n' + ' method object happens each time the attribute is retrieved ' + 'from\n' + ' the instance. In some cases, a fruitful optimization is to\n' + ' assign the attribute to a local variable and call that local\n' + ' variable. Also notice that this transformation only happens ' + 'for\n' + ' user-defined functions; other callable objects (and all non-\n' + ' callable objects) are retrieved without transformation. It ' + 'is\n' + ' also important to note that user-defined functions which are\n' + ' attributes of a class instance are not converted to bound\n' + ' methods; this *only* happens when the function is an ' + 'attribute\n' + ' of the class.\n' + '\n' + ' Generator functions\n' + ' A function or method which uses the "yield" statement (see\n' + ' section The yield statement) is called a *generator ' + 'function*.\n' + ' Such a function, when called, always returns an *iterator*\n' + ' object which can be used to execute the body of the ' + 'function:\n' + ' calling the iterator’s "iterator.__next__()" method will ' + 'cause\n' + ' the function to execute until it provides a value using the\n' + ' "yield" statement. When the function executes a "return"\n' + ' statement or falls off the end, a "StopIteration" exception ' + 'is\n' + ' raised and the iterator will have reached the end of the set ' + 'of\n' + ' values to be returned.\n' + '\n' + ' Coroutine functions\n' + ' A function or method which is defined using "async def" is\n' + ' called a *coroutine function*. Such a function, when ' + 'called,\n' + ' returns a *coroutine* object. It may contain "await"\n' + ' expressions, as well as "async with" and "async for" ' + 'statements.\n' + ' See also the Coroutine Objects section.\n' + '\n' + ' Asynchronous generator functions\n' + ' A function or method which is defined using "async def" and\n' + ' which uses the "yield" statement is called a *asynchronous\n' + ' generator function*. Such a function, when called, returns ' + 'an\n' + ' *asynchronous iterator* object which can be used in an ' + '"async\n' + ' for" statement to execute the body of the function.\n' + '\n' + ' Calling the asynchronous iterator’s "aiterator.__anext__" ' + 'method\n' + ' will return an *awaitable* which when awaited will execute ' + 'until\n' + ' it provides a value using the "yield" expression. When the\n' + ' function executes an empty "return" statement or falls off ' + 'the\n' + ' end, a "StopAsyncIteration" exception is raised and the\n' + ' asynchronous iterator will have reached the end of the set ' + 'of\n' + ' values to be yielded.\n' + '\n' + ' Built-in functions\n' + ' A built-in function object is a wrapper around a C function.\n' + ' Examples of built-in functions are "len()" and "math.sin()"\n' + ' ("math" is a standard built-in module). The number and type ' + 'of\n' + ' the arguments are determined by the C function. Special ' + 'read-\n' + ' only attributes: "__doc__" is the function’s documentation\n' + ' string, or "None" if unavailable; "__name__" is the ' + 'function’s\n' + ' name; "__self__" is set to "None" (but see the next item);\n' + ' "__module__" is the name of the module the function was ' + 'defined\n' + ' in or "None" if unavailable.\n' + '\n' + ' Built-in methods\n' + ' This is really a different disguise of a built-in function, ' + 'this\n' + ' time containing an object passed to the C function as an\n' + ' implicit extra argument. An example of a built-in method is\n' + ' "alist.append()", assuming *alist* is a list object. In this\n' + ' case, the special read-only attribute "__self__" is set to ' + 'the\n' + ' object denoted by *alist*.\n' + '\n' + ' Classes\n' + ' Classes are callable. These objects normally act as ' + 'factories\n' + ' for new instances of themselves, but variations are possible ' + 'for\n' + ' class types that override "__new__()". The arguments of the\n' + ' call are passed to "__new__()" and, in the typical case, to\n' + ' "__init__()" to initialize the new instance.\n' + '\n' + ' Class Instances\n' + ' Instances of arbitrary classes can be made callable by ' + 'defining\n' + ' a "__call__()" method in their class.\n' + '\n' + 'Modules\n' + ' Modules are a basic organizational unit of Python code, and are\n' + ' created by the import system as invoked either by the "import"\n' + ' statement, or by calling functions such as\n' + ' "importlib.import_module()" and built-in "__import__()". A ' + 'module\n' + ' object has a namespace implemented by a dictionary object (this ' + 'is\n' + ' the dictionary referenced by the "__globals__" attribute of\n' + ' functions defined in the module). Attribute references are\n' + ' translated to lookups in this dictionary, e.g., "m.x" is ' + 'equivalent\n' + ' to "m.__dict__["x"]". A module object does not contain the code\n' + ' object used to initialize the module (since it isn’t needed ' + 'once\n' + ' the initialization is done).\n' + '\n' + ' Attribute assignment updates the module’s namespace dictionary,\n' + ' e.g., "m.x = 1" is equivalent to "m.__dict__["x"] = 1".\n' + '\n' + ' Predefined (writable) attributes:\n' + '\n' + ' "__name__"\n' + ' The module’s name.\n' + '\n' + ' "__doc__"\n' + ' The module’s documentation string, or "None" if ' + 'unavailable.\n' + '\n' + ' "__file__"\n' + ' The pathname of the file from which the module was loaded, ' + 'if\n' + ' it was loaded from a file. The "__file__" attribute may ' + 'be\n' + ' missing for certain types of modules, such as C modules ' + 'that\n' + ' are statically linked into the interpreter. For ' + 'extension\n' + ' modules loaded dynamically from a shared library, it’s ' + 'the\n' + ' pathname of the shared library file.\n' + '\n' + ' "__annotations__"\n' + ' A dictionary containing *variable annotations* collected\n' + ' during module body execution. For best practices on ' + 'working\n' + ' with "__annotations__", please see Annotations Best\n' + ' Practices.\n' + '\n' + ' Special read-only attribute: "__dict__" is the module’s ' + 'namespace\n' + ' as a dictionary object.\n' + '\n' + ' **CPython implementation detail:** Because of the way CPython\n' + ' clears module dictionaries, the module dictionary will be ' + 'cleared\n' + ' when the module falls out of scope even if the dictionary still ' + 'has\n' + ' live references. To avoid this, copy the dictionary or keep ' + 'the\n' + ' module around while using its dictionary directly.\n' + '\n' + 'Custom classes\n' + ' Custom class types are typically created by class definitions ' + '(see\n' + ' section Class definitions). A class has a namespace implemented ' + 'by\n' + ' a dictionary object. Class attribute references are translated ' + 'to\n' + ' lookups in this dictionary, e.g., "C.x" is translated to\n' + ' "C.__dict__["x"]" (although there are a number of hooks which ' + 'allow\n' + ' for other means of locating attributes). When the attribute name ' + 'is\n' + ' not found there, the attribute search continues in the base\n' + ' classes. This search of the base classes uses the C3 method\n' + ' resolution order which behaves correctly even in the presence ' + 'of\n' + ' ‘diamond’ inheritance structures where there are multiple\n' + ' inheritance paths leading back to a common ancestor. Additional\n' + ' details on the C3 MRO used by Python can be found in the\n' + ' documentation accompanying the 2.3 release at\n' + ' https://www.python.org/download/releases/2.3/mro/.\n' + '\n' + ' When a class attribute reference (for class "C", say) would ' + 'yield a\n' + ' class method object, it is transformed into an instance method\n' + ' object whose "__self__" attribute is "C". When it would yield ' + 'a\n' + ' static method object, it is transformed into the object wrapped ' + 'by\n' + ' the static method object. See section Implementing Descriptors ' + 'for\n' + ' another way in which attributes retrieved from a class may ' + 'differ\n' + ' from those actually contained in its "__dict__".\n' + '\n' + ' Class attribute assignments update the class’s dictionary, ' + 'never\n' + ' the dictionary of a base class.\n' + '\n' + ' A class object can be called (see above) to yield a class ' + 'instance\n' + ' (see below).\n' + '\n' + ' Special attributes:\n' + '\n' + ' "__name__"\n' + ' The class name.\n' + '\n' + ' "__module__"\n' + ' The name of the module in which the class was defined.\n' + '\n' + ' "__dict__"\n' + ' The dictionary containing the class’s namespace.\n' + '\n' + ' "__bases__"\n' + ' A tuple containing the base classes, in the order of ' + 'their\n' + ' occurrence in the base class list.\n' + '\n' + ' "__doc__"\n' + ' The class’s documentation string, or "None" if undefined.\n' + '\n' + ' "__annotations__"\n' + ' A dictionary containing *variable annotations* collected\n' + ' during class body execution. For best practices on ' + 'working\n' + ' with "__annotations__", please see Annotations Best\n' + ' Practices.\n' + '\n' + 'Class instances\n' + ' A class instance is created by calling a class object (see ' + 'above).\n' + ' A class instance has a namespace implemented as a dictionary ' + 'which\n' + ' is the first place in which attribute references are searched.\n' + ' When an attribute is not found there, and the instance’s class ' + 'has\n' + ' an attribute by that name, the search continues with the class\n' + ' attributes. If a class attribute is found that is a ' + 'user-defined\n' + ' function object, it is transformed into an instance method ' + 'object\n' + ' whose "__self__" attribute is the instance. Static method and\n' + ' class method objects are also transformed; see above under\n' + ' “Classes”. See section Implementing Descriptors for another way ' + 'in\n' + ' which attributes of a class retrieved via its instances may ' + 'differ\n' + ' from the objects actually stored in the class’s "__dict__". If ' + 'no\n' + ' class attribute is found, and the object’s class has a\n' + ' "__getattr__()" method, that is called to satisfy the lookup.\n' + '\n' + ' Attribute assignments and deletions update the instance’s\n' + ' dictionary, never a class’s dictionary. If the class has a\n' + ' "__setattr__()" or "__delattr__()" method, this is called ' + 'instead\n' + ' of updating the instance dictionary directly.\n' + '\n' + ' Class instances can pretend to be numbers, sequences, or ' + 'mappings\n' + ' if they have methods with certain special names. See section\n' + ' Special method names.\n' + '\n' + ' Special attributes: "__dict__" is the attribute dictionary;\n' + ' "__class__" is the instance’s class.\n' + '\n' + 'I/O objects (also known as file objects)\n' + ' A *file object* represents an open file. Various shortcuts are\n' + ' available to create file objects: the "open()" built-in ' + 'function,\n' + ' and also "os.popen()", "os.fdopen()", and the "makefile()" ' + 'method\n' + ' of socket objects (and perhaps by other functions or methods\n' + ' provided by extension modules).\n' + '\n' + ' The objects "sys.stdin", "sys.stdout" and "sys.stderr" are\n' + ' initialized to file objects corresponding to the interpreter’s\n' + ' standard input, output and error streams; they are all open in ' + 'text\n' + ' mode and therefore follow the interface defined by the\n' + ' "io.TextIOBase" abstract class.\n' + '\n' + 'Internal types\n' + ' A few types used internally by the interpreter are exposed to ' + 'the\n' + ' user. Their definitions may change with future versions of the\n' + ' interpreter, but they are mentioned here for completeness.\n' + '\n' + ' Code objects\n' + ' Code objects represent *byte-compiled* executable Python ' + 'code,\n' + ' or *bytecode*. The difference between a code object and a\n' + ' function object is that the function object contains an ' + 'explicit\n' + ' reference to the function’s globals (the module in which it ' + 'was\n' + ' defined), while a code object contains no context; also the\n' + ' default argument values are stored in the function object, ' + 'not\n' + ' in the code object (because they represent values calculated ' + 'at\n' + ' run-time). Unlike function objects, code objects are ' + 'immutable\n' + ' and contain no references (directly or indirectly) to ' + 'mutable\n' + ' objects.\n' + '\n' + ' Special read-only attributes: "co_name" gives the function ' + 'name;\n' + ' "co_argcount" is the total number of positional arguments\n' + ' (including positional-only arguments and arguments with ' + 'default\n' + ' values); "co_posonlyargcount" is the number of ' + 'positional-only\n' + ' arguments (including arguments with default values);\n' + ' "co_kwonlyargcount" is the number of keyword-only arguments\n' + ' (including arguments with default values); "co_nlocals" is ' + 'the\n' + ' number of local variables used by the function (including\n' + ' arguments); "co_varnames" is a tuple containing the names of ' + 'the\n' + ' local variables (starting with the argument names);\n' + ' "co_cellvars" is a tuple containing the names of local ' + 'variables\n' + ' that are referenced by nested functions; "co_freevars" is a\n' + ' tuple containing the names of free variables; "co_code" is a\n' + ' string representing the sequence of bytecode instructions;\n' + ' "co_consts" is a tuple containing the literals used by the\n' + ' bytecode; "co_names" is a tuple containing the names used by ' + 'the\n' + ' bytecode; "co_filename" is the filename from which the code ' + 'was\n' + ' compiled; "co_firstlineno" is the first line number of the\n' + ' function; "co_lnotab" is a string encoding the mapping from\n' + ' bytecode offsets to line numbers (for details see the source\n' + ' code of the interpreter); "co_stacksize" is the required ' + 'stack\n' + ' size; "co_flags" is an integer encoding a number of flags ' + 'for\n' + ' the interpreter.\n' + '\n' + ' The following flag bits are defined for "co_flags": bit ' + '"0x04"\n' + ' is set if the function uses the "*arguments" syntax to accept ' + 'an\n' + ' arbitrary number of positional arguments; bit "0x08" is set ' + 'if\n' + ' the function uses the "**keywords" syntax to accept ' + 'arbitrary\n' + ' keyword arguments; bit "0x20" is set if the function is a\n' + ' generator.\n' + '\n' + ' Future feature declarations ("from __future__ import ' + 'division")\n' + ' also use bits in "co_flags" to indicate whether a code ' + 'object\n' + ' was compiled with a particular feature enabled: bit "0x2000" ' + 'is\n' + ' set if the function was compiled with future division ' + 'enabled;\n' + ' bits "0x10" and "0x1000" were used in earlier versions of\n' + ' Python.\n' + '\n' + ' Other bits in "co_flags" are reserved for internal use.\n' + '\n' + ' If a code object represents a function, the first item in\n' + ' "co_consts" is the documentation string of the function, or\n' + ' "None" if undefined.\n' + '\n' + ' Frame objects\n' + ' Frame objects represent execution frames. They may occur in\n' + ' traceback objects (see below), and are also passed to ' + 'registered\n' + ' trace functions.\n' + '\n' + ' Special read-only attributes: "f_back" is to the previous ' + 'stack\n' + ' frame (towards the caller), or "None" if this is the bottom\n' + ' stack frame; "f_code" is the code object being executed in ' + 'this\n' + ' frame; "f_locals" is the dictionary used to look up local\n' + ' variables; "f_globals" is used for global variables;\n' + ' "f_builtins" is used for built-in (intrinsic) names; ' + '"f_lasti"\n' + ' gives the precise instruction (this is an index into the\n' + ' bytecode string of the code object).\n' + '\n' + ' Accessing "f_code" raises an auditing event ' + '"object.__getattr__"\n' + ' with arguments "obj" and ""f_code"".\n' + '\n' + ' Special writable attributes: "f_trace", if not "None", is a\n' + ' function called for various events during code execution ' + '(this\n' + ' is used by the debugger). Normally an event is triggered for\n' + ' each new source line - this can be disabled by setting\n' + ' "f_trace_lines" to "False".\n' + '\n' + ' Implementations *may* allow per-opcode events to be requested ' + 'by\n' + ' setting "f_trace_opcodes" to "True". Note that this may lead ' + 'to\n' + ' undefined interpreter behaviour if exceptions raised by the\n' + ' trace function escape to the function being traced.\n' + '\n' + ' "f_lineno" is the current line number of the frame — writing ' + 'to\n' + ' this from within a trace function jumps to the given line ' + '(only\n' + ' for the bottom-most frame). A debugger can implement a Jump\n' + ' command (aka Set Next Statement) by writing to f_lineno.\n' + '\n' + ' Frame objects support one method:\n' + '\n' + ' frame.clear()\n' + '\n' + ' This method clears all references to local variables held ' + 'by\n' + ' the frame. Also, if the frame belonged to a generator, ' + 'the\n' + ' generator is finalized. This helps break reference ' + 'cycles\n' + ' involving frame objects (for example when catching an\n' + ' exception and storing its traceback for later use).\n' + '\n' + ' "RuntimeError" is raised if the frame is currently ' + 'executing.\n' + '\n' + ' New in version 3.4.\n' + '\n' + ' Traceback objects\n' + ' Traceback objects represent a stack trace of an exception. ' + 'A\n' + ' traceback object is implicitly created when an exception ' + 'occurs,\n' + ' and may also be explicitly created by calling\n' + ' "types.TracebackType".\n' + '\n' + ' For implicitly created tracebacks, when the search for an\n' + ' exception handler unwinds the execution stack, at each ' + 'unwound\n' + ' level a traceback object is inserted in front of the current\n' + ' traceback. When an exception handler is entered, the stack\n' + ' trace is made available to the program. (See section The try\n' + ' statement.) It is accessible as the third item of the tuple\n' + ' returned by "sys.exc_info()", and as the "__traceback__"\n' + ' attribute of the caught exception.\n' + '\n' + ' When the program contains no suitable handler, the stack ' + 'trace\n' + ' is written (nicely formatted) to the standard error stream; ' + 'if\n' + ' the interpreter is interactive, it is also made available to ' + 'the\n' + ' user as "sys.last_traceback".\n' + '\n' + ' For explicitly created tracebacks, it is up to the creator ' + 'of\n' + ' the traceback to determine how the "tb_next" attributes ' + 'should\n' + ' be linked to form a full stack trace.\n' + '\n' + ' Special read-only attributes: "tb_frame" points to the ' + 'execution\n' + ' frame of the current level; "tb_lineno" gives the line ' + 'number\n' + ' where the exception occurred; "tb_lasti" indicates the ' + 'precise\n' + ' instruction. The line number and last instruction in the\n' + ' traceback may differ from the line number of its frame object ' + 'if\n' + ' the exception occurred in a "try" statement with no matching\n' + ' except clause or with a finally clause.\n' + '\n' + ' Accessing "tb_frame" raises an auditing event\n' + ' "object.__getattr__" with arguments "obj" and ""tb_frame"".\n' + '\n' + ' Special writable attribute: "tb_next" is the next level in ' + 'the\n' + ' stack trace (towards the frame where the exception occurred), ' + 'or\n' + ' "None" if there is no next level.\n' + '\n' + ' Changed in version 3.7: Traceback objects can now be ' + 'explicitly\n' + ' instantiated from Python code, and the "tb_next" attribute ' + 'of\n' + ' existing instances can be updated.\n' + '\n' + ' Slice objects\n' + ' Slice objects are used to represent slices for ' + '"__getitem__()"\n' + ' methods. They are also created by the built-in "slice()"\n' + ' function.\n' + '\n' + ' Special read-only attributes: "start" is the lower bound; ' + '"stop"\n' + ' is the upper bound; "step" is the step value; each is "None" ' + 'if\n' + ' omitted. These attributes can have any type.\n' + '\n' + ' Slice objects support one method:\n' + '\n' + ' slice.indices(self, length)\n' + '\n' + ' This method takes a single integer argument *length* and\n' + ' computes information about the slice that the slice ' + 'object\n' + ' would describe if applied to a sequence of *length* ' + 'items.\n' + ' It returns a tuple of three integers; respectively these ' + 'are\n' + ' the *start* and *stop* indices and the *step* or stride\n' + ' length of the slice. Missing or out-of-bounds indices are\n' + ' handled in a manner consistent with regular slices.\n' + '\n' + ' Static method objects\n' + ' Static method objects provide a way of defeating the\n' + ' transformation of function objects to method objects ' + 'described\n' + ' above. A static method object is a wrapper around any other\n' + ' object, usually a user-defined method object. When a static\n' + ' method object is retrieved from a class or a class instance, ' + 'the\n' + ' object actually returned is the wrapped object, which is not\n' + ' subject to any further transformation. Static method objects ' + 'are\n' + ' also callable. Static method objects are created by the ' + 'built-in\n' + ' "staticmethod()" constructor.\n' + '\n' + ' Class method objects\n' + ' A class method object, like a static method object, is a ' + 'wrapper\n' + ' around another object that alters the way in which that ' + 'object\n' + ' is retrieved from classes and class instances. The behaviour ' + 'of\n' + ' class method objects upon such retrieval is described above,\n' + ' under “User-defined methods”. Class method objects are ' + 'created\n' + ' by the built-in "classmethod()" constructor.\n', + 'typesfunctions': 'Functions\n' + '*********\n' + '\n' + 'Function objects are created by function definitions. The ' + 'only\n' + 'operation on a function object is to call it: ' + '"func(argument-list)".\n' + '\n' + 'There are really two flavors of function objects: built-in ' + 'functions\n' + 'and user-defined functions. Both support the same ' + 'operation (to call\n' + 'the function), but the implementation is different, hence ' + 'the\n' + 'different object types.\n' + '\n' + 'See Function definitions for more information.\n', + 'typesmapping': 'Mapping Types — "dict"\n' + '**********************\n' + '\n' + 'A *mapping* object maps *hashable* values to arbitrary ' + 'objects.\n' + 'Mappings are mutable objects. There is currently only one ' + 'standard\n' + 'mapping type, the *dictionary*. (For other containers see ' + 'the built-\n' + 'in "list", "set", and "tuple" classes, and the "collections" ' + 'module.)\n' + '\n' + 'A dictionary’s keys are *almost* arbitrary values. Values ' + 'that are\n' + 'not *hashable*, that is, values containing lists, ' + 'dictionaries or\n' + 'other mutable types (that are compared by value rather than ' + 'by object\n' + 'identity) may not be used as keys. Values that compare equal ' + '(such as\n' + '"1", "1.0", and "True") can be used interchangeably to index ' + 'the same\n' + 'dictionary entry.\n' + '\n' + 'class dict(**kwargs)\n' + 'class dict(mapping, **kwargs)\n' + 'class dict(iterable, **kwargs)\n' + '\n' + ' Return a new dictionary initialized from an optional ' + 'positional\n' + ' argument and a possibly empty set of keyword arguments.\n' + '\n' + ' Dictionaries can be created by several means:\n' + '\n' + ' * Use a comma-separated list of "key: value" pairs within ' + 'braces:\n' + ' "{\'jack\': 4098, \'sjoerd\': 4127}" or "{4098: ' + "'jack', 4127:\n" + ' \'sjoerd\'}"\n' + '\n' + ' * Use a dict comprehension: "{}", "{x: x ** 2 for x in ' + 'range(10)}"\n' + '\n' + ' * Use the type constructor: "dict()", "dict([(\'foo\', ' + "100), ('bar',\n" + ' 200)])", "dict(foo=100, bar=200)"\n' + '\n' + ' If no positional argument is given, an empty dictionary ' + 'is created.\n' + ' If a positional argument is given and it is a mapping ' + 'object, a\n' + ' dictionary is created with the same key-value pairs as ' + 'the mapping\n' + ' object. Otherwise, the positional argument must be an ' + '*iterable*\n' + ' object. Each item in the iterable must itself be an ' + 'iterable with\n' + ' exactly two objects. The first object of each item ' + 'becomes a key\n' + ' in the new dictionary, and the second object the ' + 'corresponding\n' + ' value. If a key occurs more than once, the last value ' + 'for that key\n' + ' becomes the corresponding value in the new dictionary.\n' + '\n' + ' If keyword arguments are given, the keyword arguments and ' + 'their\n' + ' values are added to the dictionary created from the ' + 'positional\n' + ' argument. If a key being added is already present, the ' + 'value from\n' + ' the keyword argument replaces the value from the ' + 'positional\n' + ' argument.\n' + '\n' + ' To illustrate, the following examples all return a ' + 'dictionary equal\n' + ' to "{"one": 1, "two": 2, "three": 3}":\n' + '\n' + ' >>> a = dict(one=1, two=2, three=3)\n' + " >>> b = {'one': 1, 'two': 2, 'three': 3}\n" + " >>> c = dict(zip(['one', 'two', 'three'], [1, 2, 3]))\n" + " >>> d = dict([('two', 2), ('one', 1), ('three', 3)])\n" + " >>> e = dict({'three': 3, 'one': 1, 'two': 2})\n" + " >>> f = dict({'one': 1, 'three': 3}, two=2)\n" + ' >>> a == b == c == d == e == f\n' + ' True\n' + '\n' + ' Providing keyword arguments as in the first example only ' + 'works for\n' + ' keys that are valid Python identifiers. Otherwise, any ' + 'valid keys\n' + ' can be used.\n' + '\n' + ' These are the operations that dictionaries support (and ' + 'therefore,\n' + ' custom mapping types should support too):\n' + '\n' + ' list(d)\n' + '\n' + ' Return a list of all the keys used in the dictionary ' + '*d*.\n' + '\n' + ' len(d)\n' + '\n' + ' Return the number of items in the dictionary *d*.\n' + '\n' + ' d[key]\n' + '\n' + ' Return the item of *d* with key *key*. Raises a ' + '"KeyError" if\n' + ' *key* is not in the map.\n' + '\n' + ' If a subclass of dict defines a method "__missing__()" ' + 'and *key*\n' + ' is not present, the "d[key]" operation calls that ' + 'method with\n' + ' the key *key* as argument. The "d[key]" operation ' + 'then returns\n' + ' or raises whatever is returned or raised by the\n' + ' "__missing__(key)" call. No other operations or ' + 'methods invoke\n' + ' "__missing__()". If "__missing__()" is not defined, ' + '"KeyError"\n' + ' is raised. "__missing__()" must be a method; it cannot ' + 'be an\n' + ' instance variable:\n' + '\n' + ' >>> class Counter(dict):\n' + ' ... def __missing__(self, key):\n' + ' ... return 0\n' + ' >>> c = Counter()\n' + " >>> c['red']\n" + ' 0\n' + " >>> c['red'] += 1\n" + " >>> c['red']\n" + ' 1\n' + '\n' + ' The example above shows part of the implementation of\n' + ' "collections.Counter". A different "__missing__" ' + 'method is used\n' + ' by "collections.defaultdict".\n' + '\n' + ' d[key] = value\n' + '\n' + ' Set "d[key]" to *value*.\n' + '\n' + ' del d[key]\n' + '\n' + ' Remove "d[key]" from *d*. Raises a "KeyError" if ' + '*key* is not\n' + ' in the map.\n' + '\n' + ' key in d\n' + '\n' + ' Return "True" if *d* has a key *key*, else "False".\n' + '\n' + ' key not in d\n' + '\n' + ' Equivalent to "not key in d".\n' + '\n' + ' iter(d)\n' + '\n' + ' Return an iterator over the keys of the dictionary. ' + 'This is a\n' + ' shortcut for "iter(d.keys())".\n' + '\n' + ' clear()\n' + '\n' + ' Remove all items from the dictionary.\n' + '\n' + ' copy()\n' + '\n' + ' Return a shallow copy of the dictionary.\n' + '\n' + ' classmethod fromkeys(iterable[, value])\n' + '\n' + ' Create a new dictionary with keys from *iterable* and ' + 'values set\n' + ' to *value*.\n' + '\n' + ' "fromkeys()" is a class method that returns a new ' + 'dictionary.\n' + ' *value* defaults to "None". All of the values refer ' + 'to just a\n' + ' single instance, so it generally doesn’t make sense ' + 'for *value*\n' + ' to be a mutable object such as an empty list. To get ' + 'distinct\n' + ' values, use a dict comprehension instead.\n' + '\n' + ' get(key[, default])\n' + '\n' + ' Return the value for *key* if *key* is in the ' + 'dictionary, else\n' + ' *default*. If *default* is not given, it defaults to ' + '"None", so\n' + ' that this method never raises a "KeyError".\n' + '\n' + ' items()\n' + '\n' + ' Return a new view of the dictionary’s items ("(key, ' + 'value)"\n' + ' pairs). See the documentation of view objects.\n' + '\n' + ' keys()\n' + '\n' + ' Return a new view of the dictionary’s keys. See the\n' + ' documentation of view objects.\n' + '\n' + ' pop(key[, default])\n' + '\n' + ' If *key* is in the dictionary, remove it and return ' + 'its value,\n' + ' else return *default*. If *default* is not given and ' + '*key* is\n' + ' not in the dictionary, a "KeyError" is raised.\n' + '\n' + ' popitem()\n' + '\n' + ' Remove and return a "(key, value)" pair from the ' + 'dictionary.\n' + ' Pairs are returned in LIFO (last-in, first-out) ' + 'order.\n' + '\n' + ' "popitem()" is useful to destructively iterate over a\n' + ' dictionary, as often used in set algorithms. If the ' + 'dictionary\n' + ' is empty, calling "popitem()" raises a "KeyError".\n' + '\n' + ' Changed in version 3.7: LIFO order is now guaranteed. ' + 'In prior\n' + ' versions, "popitem()" would return an arbitrary ' + 'key/value pair.\n' + '\n' + ' reversed(d)\n' + '\n' + ' Return a reverse iterator over the keys of the ' + 'dictionary. This\n' + ' is a shortcut for "reversed(d.keys())".\n' + '\n' + ' New in version 3.8.\n' + '\n' + ' setdefault(key[, default])\n' + '\n' + ' If *key* is in the dictionary, return its value. If ' + 'not, insert\n' + ' *key* with a value of *default* and return *default*. ' + '*default*\n' + ' defaults to "None".\n' + '\n' + ' update([other])\n' + '\n' + ' Update the dictionary with the key/value pairs from ' + '*other*,\n' + ' overwriting existing keys. Return "None".\n' + '\n' + ' "update()" accepts either another dictionary object or ' + 'an\n' + ' iterable of key/value pairs (as tuples or other ' + 'iterables of\n' + ' length two). If keyword arguments are specified, the ' + 'dictionary\n' + ' is then updated with those key/value pairs: ' + '"d.update(red=1,\n' + ' blue=2)".\n' + '\n' + ' values()\n' + '\n' + ' Return a new view of the dictionary’s values. See ' + 'the\n' + ' documentation of view objects.\n' + '\n' + ' An equality comparison between one "dict.values()" ' + 'view and\n' + ' another will always return "False". This also applies ' + 'when\n' + ' comparing "dict.values()" to itself:\n' + '\n' + " >>> d = {'a': 1}\n" + ' >>> d.values() == d.values()\n' + ' False\n' + '\n' + ' d | other\n' + '\n' + ' Create a new dictionary with the merged keys and ' + 'values of *d*\n' + ' and *other*, which must both be dictionaries. The ' + 'values of\n' + ' *other* take priority when *d* and *other* share ' + 'keys.\n' + '\n' + ' New in version 3.9.\n' + '\n' + ' d |= other\n' + '\n' + ' Update the dictionary *d* with keys and values from ' + '*other*,\n' + ' which may be either a *mapping* or an *iterable* of ' + 'key/value\n' + ' pairs. The values of *other* take priority when *d* ' + 'and *other*\n' + ' share keys.\n' + '\n' + ' New in version 3.9.\n' + '\n' + ' Dictionaries compare equal if and only if they have the ' + 'same "(key,\n' + ' value)" pairs (regardless of ordering). Order comparisons ' + '(‘<’,\n' + ' ‘<=’, ‘>=’, ‘>’) raise "TypeError".\n' + '\n' + ' Dictionaries preserve insertion order. Note that ' + 'updating a key\n' + ' does not affect the order. Keys added after deletion are ' + 'inserted\n' + ' at the end.\n' + '\n' + ' >>> d = {"one": 1, "two": 2, "three": 3, "four": 4}\n' + ' >>> d\n' + " {'one': 1, 'two': 2, 'three': 3, 'four': 4}\n" + ' >>> list(d)\n' + " ['one', 'two', 'three', 'four']\n" + ' >>> list(d.values())\n' + ' [1, 2, 3, 4]\n' + ' >>> d["one"] = 42\n' + ' >>> d\n' + " {'one': 42, 'two': 2, 'three': 3, 'four': 4}\n" + ' >>> del d["two"]\n' + ' >>> d["two"] = None\n' + ' >>> d\n' + " {'one': 42, 'three': 3, 'four': 4, 'two': None}\n" + '\n' + ' Changed in version 3.7: Dictionary order is guaranteed to ' + 'be\n' + ' insertion order. This behavior was an implementation ' + 'detail of\n' + ' CPython from 3.6.\n' + '\n' + ' Dictionaries and dictionary views are reversible.\n' + '\n' + ' >>> d = {"one": 1, "two": 2, "three": 3, "four": 4}\n' + ' >>> d\n' + " {'one': 1, 'two': 2, 'three': 3, 'four': 4}\n" + ' >>> list(reversed(d))\n' + " ['four', 'three', 'two', 'one']\n" + ' >>> list(reversed(d.values()))\n' + ' [4, 3, 2, 1]\n' + ' >>> list(reversed(d.items()))\n' + " [('four', 4), ('three', 3), ('two', 2), ('one', 1)]\n" + '\n' + ' Changed in version 3.8: Dictionaries are now reversible.\n' + '\n' + 'See also:\n' + '\n' + ' "types.MappingProxyType" can be used to create a read-only ' + 'view of a\n' + ' "dict".\n' + '\n' + '\n' + 'Dictionary view objects\n' + '=======================\n' + '\n' + 'The objects returned by "dict.keys()", "dict.values()" and\n' + '"dict.items()" are *view objects*. They provide a dynamic ' + 'view on the\n' + 'dictionary’s entries, which means that when the dictionary ' + 'changes,\n' + 'the view reflects these changes.\n' + '\n' + 'Dictionary views can be iterated over to yield their ' + 'respective data,\n' + 'and support membership tests:\n' + '\n' + 'len(dictview)\n' + '\n' + ' Return the number of entries in the dictionary.\n' + '\n' + 'iter(dictview)\n' + '\n' + ' Return an iterator over the keys, values or items ' + '(represented as\n' + ' tuples of "(key, value)") in the dictionary.\n' + '\n' + ' Keys and values are iterated over in insertion order. ' + 'This allows\n' + ' the creation of "(value, key)" pairs using "zip()": ' + '"pairs =\n' + ' zip(d.values(), d.keys())". Another way to create the ' + 'same list is\n' + ' "pairs = [(v, k) for (k, v) in d.items()]".\n' + '\n' + ' Iterating views while adding or deleting entries in the ' + 'dictionary\n' + ' may raise a "RuntimeError" or fail to iterate over all ' + 'entries.\n' + '\n' + ' Changed in version 3.7: Dictionary order is guaranteed to ' + 'be\n' + ' insertion order.\n' + '\n' + 'x in dictview\n' + '\n' + ' Return "True" if *x* is in the underlying dictionary’s ' + 'keys, values\n' + ' or items (in the latter case, *x* should be a "(key, ' + 'value)"\n' + ' tuple).\n' + '\n' + 'reversed(dictview)\n' + '\n' + ' Return a reverse iterator over the keys, values or items ' + 'of the\n' + ' dictionary. The view will be iterated in reverse order of ' + 'the\n' + ' insertion.\n' + '\n' + ' Changed in version 3.8: Dictionary views are now ' + 'reversible.\n' + '\n' + 'dictview.mapping\n' + '\n' + ' Return a "types.MappingProxyType" that wraps the ' + 'original\n' + ' dictionary to which the view refers.\n' + '\n' + ' New in version 3.10.\n' + '\n' + 'Keys views are set-like since their entries are unique and ' + '*hashable*.\n' + 'If all values are hashable, so that "(key, value)" pairs are ' + 'unique\n' + 'and hashable, then the items view is also set-like. (Values ' + 'views are\n' + 'not treated as set-like since the entries are generally not ' + 'unique.)\n' + 'For set-like views, all of the operations defined for the ' + 'abstract\n' + 'base class "collections.abc.Set" are available (for example, ' + '"==",\n' + '"<", or "^").\n' + '\n' + 'An example of dictionary view usage:\n' + '\n' + " >>> dishes = {'eggs': 2, 'sausage': 1, 'bacon': 1, " + "'spam': 500}\n" + ' >>> keys = dishes.keys()\n' + ' >>> values = dishes.values()\n' + '\n' + ' >>> # iteration\n' + ' >>> n = 0\n' + ' >>> for val in values:\n' + ' ... n += val\n' + ' >>> print(n)\n' + ' 504\n' + '\n' + ' >>> # keys and values are iterated over in the same order ' + '(insertion order)\n' + ' >>> list(keys)\n' + " ['eggs', 'sausage', 'bacon', 'spam']\n" + ' >>> list(values)\n' + ' [2, 1, 1, 500]\n' + '\n' + ' >>> # view objects are dynamic and reflect dict changes\n' + " >>> del dishes['eggs']\n" + " >>> del dishes['sausage']\n" + ' >>> list(keys)\n' + " ['bacon', 'spam']\n" + '\n' + ' >>> # set operations\n' + " >>> keys & {'eggs', 'bacon', 'salad'}\n" + " {'bacon'}\n" + " >>> keys ^ {'sausage', 'juice'}\n" + " {'juice', 'sausage', 'bacon', 'spam'}\n" + '\n' + ' >>> # get back a read-only proxy for the original ' + 'dictionary\n' + ' >>> values.mapping\n' + " mappingproxy({'bacon': 1, 'spam': 500})\n" + " >>> values.mapping['spam']\n" + ' 500\n', + 'typesmethods': 'Methods\n' + '*******\n' + '\n' + 'Methods are functions that are called using the attribute ' + 'notation.\n' + 'There are two flavors: built-in methods (such as "append()" ' + 'on lists)\n' + 'and class instance methods. Built-in methods are described ' + 'with the\n' + 'types that support them.\n' + '\n' + 'If you access a method (a function defined in a class ' + 'namespace)\n' + 'through an instance, you get a special object: a *bound ' + 'method* (also\n' + 'called *instance method*) object. When called, it will add ' + 'the "self"\n' + 'argument to the argument list. Bound methods have two ' + 'special read-\n' + 'only attributes: "m.__self__" is the object on which the ' + 'method\n' + 'operates, and "m.__func__" is the function implementing the ' + 'method.\n' + 'Calling "m(arg-1, arg-2, ..., arg-n)" is completely ' + 'equivalent to\n' + 'calling "m.__func__(m.__self__, arg-1, arg-2, ..., arg-n)".\n' + '\n' + 'Like function objects, bound method objects support getting ' + 'arbitrary\n' + 'attributes. However, since method attributes are actually ' + 'stored on\n' + 'the underlying function object ("meth.__func__"), setting ' + 'method\n' + 'attributes on bound methods is disallowed. Attempting to ' + 'set an\n' + 'attribute on a method results in an "AttributeError" being ' + 'raised. In\n' + 'order to set a method attribute, you need to explicitly set ' + 'it on the\n' + 'underlying function object:\n' + '\n' + ' >>> class C:\n' + ' ... def method(self):\n' + ' ... pass\n' + ' ...\n' + ' >>> c = C()\n' + " >>> c.method.whoami = 'my name is method' # can't set on " + 'the method\n' + ' Traceback (most recent call last):\n' + ' File "", line 1, in \n' + " AttributeError: 'method' object has no attribute " + "'whoami'\n" + " >>> c.method.__func__.whoami = 'my name is method'\n" + ' >>> c.method.whoami\n' + " 'my name is method'\n" + '\n' + 'See The standard type hierarchy for more information.\n', + 'typesmodules': 'Modules\n' + '*******\n' + '\n' + 'The only special operation on a module is attribute access: ' + '"m.name",\n' + 'where *m* is a module and *name* accesses a name defined in ' + '*m*’s\n' + 'symbol table. Module attributes can be assigned to. (Note ' + 'that the\n' + '"import" statement is not, strictly speaking, an operation ' + 'on a module\n' + 'object; "import foo" does not require a module object named ' + '*foo* to\n' + 'exist, rather it requires an (external) *definition* for a ' + 'module\n' + 'named *foo* somewhere.)\n' + '\n' + 'A special attribute of every module is "__dict__". This is ' + 'the\n' + 'dictionary containing the module’s symbol table. Modifying ' + 'this\n' + 'dictionary will actually change the module’s symbol table, ' + 'but direct\n' + 'assignment to the "__dict__" attribute is not possible (you ' + 'can write\n' + '"m.__dict__[\'a\'] = 1", which defines "m.a" to be "1", but ' + 'you can’t\n' + 'write "m.__dict__ = {}"). Modifying "__dict__" directly is ' + 'not\n' + 'recommended.\n' + '\n' + 'Modules built into the interpreter are written like this: ' + '"". If loaded from a file, they are ' + 'written as\n' + '"".\n', + 'typesseq': 'Sequence Types — "list", "tuple", "range"\n' + '*****************************************\n' + '\n' + 'There are three basic sequence types: lists, tuples, and range\n' + 'objects. Additional sequence types tailored for processing of ' + 'binary\n' + 'data and text strings are described in dedicated sections.\n' + '\n' + '\n' + 'Common Sequence Operations\n' + '==========================\n' + '\n' + 'The operations in the following table are supported by most ' + 'sequence\n' + 'types, both mutable and immutable. The ' + '"collections.abc.Sequence" ABC\n' + 'is provided to make it easier to correctly implement these ' + 'operations\n' + 'on custom sequence types.\n' + '\n' + 'This table lists the sequence operations sorted in ascending ' + 'priority.\n' + 'In the table, *s* and *t* are sequences of the same type, *n*, ' + '*i*,\n' + '*j* and *k* are integers and *x* is an arbitrary object that ' + 'meets any\n' + 'type and value restrictions imposed by *s*.\n' + '\n' + 'The "in" and "not in" operations have the same priorities as ' + 'the\n' + 'comparison operations. The "+" (concatenation) and "*" ' + '(repetition)\n' + 'operations have the same priority as the corresponding numeric\n' + 'operations. [3]\n' + '\n' + '+----------------------------+----------------------------------+------------+\n' + '| Operation | Result ' + '| Notes |\n' + '|============================|==================================|============|\n' + '| "x in s" | "True" if an item of *s* is ' + '| (1) |\n' + '| | equal to *x*, else "False" ' + '| |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "x not in s" | "False" if an item of *s* is ' + '| (1) |\n' + '| | equal to *x*, else "True" ' + '| |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s + t" | the concatenation of *s* and *t* ' + '| (6)(7) |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s * n" or "n * s" | equivalent to adding *s* to ' + '| (2)(7) |\n' + '| | itself *n* times ' + '| |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s[i]" | *i*th item of *s*, origin 0 ' + '| (3) |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s[i:j]" | slice of *s* from *i* to *j* ' + '| (3)(4) |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s[i:j:k]" | slice of *s* from *i* to *j* ' + '| (3)(5) |\n' + '| | with step *k* ' + '| |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "len(s)" | length of *s* ' + '| |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "min(s)" | smallest item of *s* ' + '| |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "max(s)" | largest item of *s* ' + '| |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s.index(x[, i[, j]])" | index of the first occurrence of ' + '| (8) |\n' + '| | *x* in *s* (at or after index ' + '| |\n' + '| | *i* and before index *j*) ' + '| |\n' + '+----------------------------+----------------------------------+------------+\n' + '| "s.count(x)" | total number of occurrences of ' + '| |\n' + '| | *x* in *s* ' + '| |\n' + '+----------------------------+----------------------------------+------------+\n' + '\n' + 'Sequences of the same type also support comparisons. In ' + 'particular,\n' + 'tuples and lists are compared lexicographically by comparing\n' + 'corresponding elements. This means that to compare equal, every\n' + 'element must compare equal and the two sequences must be of the ' + 'same\n' + 'type and have the same length. (For full details see ' + 'Comparisons in\n' + 'the language reference.)\n' + '\n' + 'Forward and reversed iterators over mutable sequences access ' + 'values\n' + 'using an index. That index will continue to march forward (or\n' + 'backward) even if the underlying sequence is mutated. The ' + 'iterator\n' + 'terminates only when an "IndexError" or a "StopIteration" is\n' + 'encountered (or when the index drops below zero).\n' + '\n' + 'Notes:\n' + '\n' + '1. While the "in" and "not in" operations are used only for ' + 'simple\n' + ' containment testing in the general case, some specialised ' + 'sequences\n' + ' (such as "str", "bytes" and "bytearray") also use them for\n' + ' subsequence testing:\n' + '\n' + ' >>> "gg" in "eggs"\n' + ' True\n' + '\n' + '2. Values of *n* less than "0" are treated as "0" (which yields ' + 'an\n' + ' empty sequence of the same type as *s*). Note that items in ' + 'the\n' + ' sequence *s* are not copied; they are referenced multiple ' + 'times.\n' + ' This often haunts new Python programmers; consider:\n' + '\n' + ' >>> lists = [[]] * 3\n' + ' >>> lists\n' + ' [[], [], []]\n' + ' >>> lists[0].append(3)\n' + ' >>> lists\n' + ' [[3], [3], [3]]\n' + '\n' + ' What has happened is that "[[]]" is a one-element list ' + 'containing\n' + ' an empty list, so all three elements of "[[]] * 3" are ' + 'references\n' + ' to this single empty list. Modifying any of the elements of\n' + ' "lists" modifies this single list. You can create a list of\n' + ' different lists this way:\n' + '\n' + ' >>> lists = [[] for i in range(3)]\n' + ' >>> lists[0].append(3)\n' + ' >>> lists[1].append(5)\n' + ' >>> lists[2].append(7)\n' + ' >>> lists\n' + ' [[3], [5], [7]]\n' + '\n' + ' Further explanation is available in the FAQ entry How do I ' + 'create a\n' + ' multidimensional list?.\n' + '\n' + '3. If *i* or *j* is negative, the index is relative to the end ' + 'of\n' + ' sequence *s*: "len(s) + i" or "len(s) + j" is substituted. ' + 'But\n' + ' note that "-0" is still "0".\n' + '\n' + '4. The slice of *s* from *i* to *j* is defined as the sequence ' + 'of\n' + ' items with index *k* such that "i <= k < j". If *i* or *j* ' + 'is\n' + ' greater than "len(s)", use "len(s)". If *i* is omitted or ' + '"None",\n' + ' use "0". If *j* is omitted or "None", use "len(s)". If *i* ' + 'is\n' + ' greater than or equal to *j*, the slice is empty.\n' + '\n' + '5. The slice of *s* from *i* to *j* with step *k* is defined as ' + 'the\n' + ' sequence of items with index "x = i + n*k" such that "0 <= n ' + '<\n' + ' (j-i)/k". In other words, the indices are "i", "i+k", ' + '"i+2*k",\n' + ' "i+3*k" and so on, stopping when *j* is reached (but never\n' + ' including *j*). When *k* is positive, *i* and *j* are ' + 'reduced to\n' + ' "len(s)" if they are greater. When *k* is negative, *i* and ' + '*j* are\n' + ' reduced to "len(s) - 1" if they are greater. If *i* or *j* ' + 'are\n' + ' omitted or "None", they become “end” values (which end ' + 'depends on\n' + ' the sign of *k*). Note, *k* cannot be zero. If *k* is ' + '"None", it\n' + ' is treated like "1".\n' + '\n' + '6. Concatenating immutable sequences always results in a new ' + 'object.\n' + ' This means that building up a sequence by repeated ' + 'concatenation\n' + ' will have a quadratic runtime cost in the total sequence ' + 'length.\n' + ' To get a linear runtime cost, you must switch to one of the\n' + ' alternatives below:\n' + '\n' + ' * if concatenating "str" objects, you can build a list and ' + 'use\n' + ' "str.join()" at the end or else write to an "io.StringIO"\n' + ' instance and retrieve its value when complete\n' + '\n' + ' * if concatenating "bytes" objects, you can similarly use\n' + ' "bytes.join()" or "io.BytesIO", or you can do in-place\n' + ' concatenation with a "bytearray" object. "bytearray" ' + 'objects are\n' + ' mutable and have an efficient overallocation mechanism\n' + '\n' + ' * if concatenating "tuple" objects, extend a "list" instead\n' + '\n' + ' * for other types, investigate the relevant class ' + 'documentation\n' + '\n' + '7. Some sequence types (such as "range") only support item ' + 'sequences\n' + ' that follow specific patterns, and hence don’t support ' + 'sequence\n' + ' concatenation or repetition.\n' + '\n' + '8. "index" raises "ValueError" when *x* is not found in *s*. Not ' + 'all\n' + ' implementations support passing the additional arguments *i* ' + 'and\n' + ' *j*. These arguments allow efficient searching of subsections ' + 'of\n' + ' the sequence. Passing the extra arguments is roughly ' + 'equivalent to\n' + ' using "s[i:j].index(x)", only without copying any data and ' + 'with the\n' + ' returned index being relative to the start of the sequence ' + 'rather\n' + ' than the start of the slice.\n' + '\n' + '\n' + 'Immutable Sequence Types\n' + '========================\n' + '\n' + 'The only operation that immutable sequence types generally ' + 'implement\n' + 'that is not also implemented by mutable sequence types is ' + 'support for\n' + 'the "hash()" built-in.\n' + '\n' + 'This support allows immutable sequences, such as "tuple" ' + 'instances, to\n' + 'be used as "dict" keys and stored in "set" and "frozenset" ' + 'instances.\n' + '\n' + 'Attempting to hash an immutable sequence that contains ' + 'unhashable\n' + 'values will result in "TypeError".\n' + '\n' + '\n' + 'Mutable Sequence Types\n' + '======================\n' + '\n' + 'The operations in the following table are defined on mutable ' + 'sequence\n' + 'types. The "collections.abc.MutableSequence" ABC is provided to ' + 'make\n' + 'it easier to correctly implement these operations on custom ' + 'sequence\n' + 'types.\n' + '\n' + 'In the table *s* is an instance of a mutable sequence type, *t* ' + 'is any\n' + 'iterable object and *x* is an arbitrary object that meets any ' + 'type and\n' + 'value restrictions imposed by *s* (for example, "bytearray" ' + 'only\n' + 'accepts integers that meet the value restriction "0 <= x <= ' + '255").\n' + '\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| Operation | ' + 'Result | Notes |\n' + '|================================|==================================|=======================|\n' + '| "s[i] = x" | item *i* of *s* is replaced ' + 'by | |\n' + '| | ' + '*x* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s[i:j] = t" | slice of *s* from *i* to *j* ' + 'is | |\n' + '| | replaced by the contents of ' + 'the | |\n' + '| | iterable ' + '*t* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "del s[i:j]" | same as "s[i:j] = ' + '[]" | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s[i:j:k] = t" | the elements of "s[i:j:k]" ' + 'are | (1) |\n' + '| | replaced by those of ' + '*t* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "del s[i:j:k]" | removes the elements ' + 'of | |\n' + '| | "s[i:j:k]" from the ' + 'list | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.append(x)" | appends *x* to the end of ' + 'the | |\n' + '| | sequence (same ' + 'as | |\n' + '| | "s[len(s):len(s)] = ' + '[x]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.clear()" | removes all items from *s* ' + '(same | (5) |\n' + '| | as "del ' + 's[:]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.copy()" | creates a shallow copy of ' + '*s* | (5) |\n' + '| | (same as ' + '"s[:]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.extend(t)" or "s += t" | extends *s* with the contents ' + 'of | |\n' + '| | *t* (for the most part the ' + 'same | |\n' + '| | as "s[len(s):len(s)] = ' + 't") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s *= n" | updates *s* with its ' + 'contents | (6) |\n' + '| | repeated *n* ' + 'times | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.insert(i, x)" | inserts *x* into *s* at ' + 'the | |\n' + '| | index given by *i* (same ' + 'as | |\n' + '| | "s[i:i] = ' + '[x]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.pop()" or "s.pop(i)" | retrieves the item at *i* ' + 'and | (2) |\n' + '| | also removes it from ' + '*s* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.remove(x)" | remove the first item from ' + '*s* | (3) |\n' + '| | where "s[i]" is equal to ' + '*x* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.reverse()" | reverses the items of *s* ' + 'in | (4) |\n' + '| | ' + 'place | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '\n' + 'Notes:\n' + '\n' + '1. *t* must have the same length as the slice it is replacing.\n' + '\n' + '2. The optional argument *i* defaults to "-1", so that by ' + 'default the\n' + ' last item is removed and returned.\n' + '\n' + '3. "remove()" raises "ValueError" when *x* is not found in *s*.\n' + '\n' + '4. The "reverse()" method modifies the sequence in place for ' + 'economy\n' + ' of space when reversing a large sequence. To remind users ' + 'that it\n' + ' operates by side effect, it does not return the reversed ' + 'sequence.\n' + '\n' + '5. "clear()" and "copy()" are included for consistency with the\n' + ' interfaces of mutable containers that don’t support slicing\n' + ' operations (such as "dict" and "set"). "copy()" is not part ' + 'of the\n' + ' "collections.abc.MutableSequence" ABC, but most concrete ' + 'mutable\n' + ' sequence classes provide it.\n' + '\n' + ' New in version 3.3: "clear()" and "copy()" methods.\n' + '\n' + '6. The value *n* is an integer, or an object implementing\n' + ' "__index__()". Zero and negative values of *n* clear the ' + 'sequence.\n' + ' Items in the sequence are not copied; they are referenced ' + 'multiple\n' + ' times, as explained for "s * n" under Common Sequence ' + 'Operations.\n' + '\n' + '\n' + 'Lists\n' + '=====\n' + '\n' + 'Lists are mutable sequences, typically used to store collections ' + 'of\n' + 'homogeneous items (where the precise degree of similarity will ' + 'vary by\n' + 'application).\n' + '\n' + 'class list([iterable])\n' + '\n' + ' Lists may be constructed in several ways:\n' + '\n' + ' * Using a pair of square brackets to denote the empty list: ' + '"[]"\n' + '\n' + ' * Using square brackets, separating items with commas: "[a]", ' + '"[a,\n' + ' b, c]"\n' + '\n' + ' * Using a list comprehension: "[x for x in iterable]"\n' + '\n' + ' * Using the type constructor: "list()" or "list(iterable)"\n' + '\n' + ' The constructor builds a list whose items are the same and in ' + 'the\n' + ' same order as *iterable*’s items. *iterable* may be either ' + 'a\n' + ' sequence, a container that supports iteration, or an ' + 'iterator\n' + ' object. If *iterable* is already a list, a copy is made and\n' + ' returned, similar to "iterable[:]". For example, ' + '"list(\'abc\')"\n' + ' returns "[\'a\', \'b\', \'c\']" and "list( (1, 2, 3) )" ' + 'returns "[1, 2,\n' + ' 3]". If no argument is given, the constructor creates a new ' + 'empty\n' + ' list, "[]".\n' + '\n' + ' Many other operations also produce lists, including the ' + '"sorted()"\n' + ' built-in.\n' + '\n' + ' Lists implement all of the common and mutable sequence ' + 'operations.\n' + ' Lists also provide the following additional method:\n' + '\n' + ' sort(*, key=None, reverse=False)\n' + '\n' + ' This method sorts the list in place, using only "<" ' + 'comparisons\n' + ' between items. Exceptions are not suppressed - if any ' + 'comparison\n' + ' operations fail, the entire sort operation will fail (and ' + 'the\n' + ' list will likely be left in a partially modified state).\n' + '\n' + ' "sort()" accepts two arguments that can only be passed by\n' + ' keyword (keyword-only arguments):\n' + '\n' + ' *key* specifies a function of one argument that is used ' + 'to\n' + ' extract a comparison key from each list element (for ' + 'example,\n' + ' "key=str.lower"). The key corresponding to each item in ' + 'the list\n' + ' is calculated once and then used for the entire sorting ' + 'process.\n' + ' The default value of "None" means that list items are ' + 'sorted\n' + ' directly without calculating a separate key value.\n' + '\n' + ' The "functools.cmp_to_key()" utility is available to ' + 'convert a\n' + ' 2.x style *cmp* function to a *key* function.\n' + '\n' + ' *reverse* is a boolean value. If set to "True", then the ' + 'list\n' + ' elements are sorted as if each comparison were reversed.\n' + '\n' + ' This method modifies the sequence in place for economy of ' + 'space\n' + ' when sorting a large sequence. To remind users that it ' + 'operates\n' + ' by side effect, it does not return the sorted sequence ' + '(use\n' + ' "sorted()" to explicitly request a new sorted list ' + 'instance).\n' + '\n' + ' The "sort()" method is guaranteed to be stable. A sort ' + 'is\n' + ' stable if it guarantees not to change the relative order ' + 'of\n' + ' elements that compare equal — this is helpful for sorting ' + 'in\n' + ' multiple passes (for example, sort by department, then by ' + 'salary\n' + ' grade).\n' + '\n' + ' For sorting examples and a brief sorting tutorial, see ' + 'Sorting\n' + ' HOW TO.\n' + '\n' + ' **CPython implementation detail:** While a list is being ' + 'sorted,\n' + ' the effect of attempting to mutate, or even inspect, the ' + 'list is\n' + ' undefined. The C implementation of Python makes the list ' + 'appear\n' + ' empty for the duration, and raises "ValueError" if it can ' + 'detect\n' + ' that the list has been mutated during a sort.\n' + '\n' + '\n' + 'Tuples\n' + '======\n' + '\n' + 'Tuples are immutable sequences, typically used to store ' + 'collections of\n' + 'heterogeneous data (such as the 2-tuples produced by the ' + '"enumerate()"\n' + 'built-in). Tuples are also used for cases where an immutable ' + 'sequence\n' + 'of homogeneous data is needed (such as allowing storage in a ' + '"set" or\n' + '"dict" instance).\n' + '\n' + 'class tuple([iterable])\n' + '\n' + ' Tuples may be constructed in a number of ways:\n' + '\n' + ' * Using a pair of parentheses to denote the empty tuple: ' + '"()"\n' + '\n' + ' * Using a trailing comma for a singleton tuple: "a," or ' + '"(a,)"\n' + '\n' + ' * Separating items with commas: "a, b, c" or "(a, b, c)"\n' + '\n' + ' * Using the "tuple()" built-in: "tuple()" or ' + '"tuple(iterable)"\n' + '\n' + ' The constructor builds a tuple whose items are the same and ' + 'in the\n' + ' same order as *iterable*’s items. *iterable* may be either ' + 'a\n' + ' sequence, a container that supports iteration, or an ' + 'iterator\n' + ' object. If *iterable* is already a tuple, it is returned\n' + ' unchanged. For example, "tuple(\'abc\')" returns "(\'a\', ' + '\'b\', \'c\')"\n' + ' and "tuple( [1, 2, 3] )" returns "(1, 2, 3)". If no argument ' + 'is\n' + ' given, the constructor creates a new empty tuple, "()".\n' + '\n' + ' Note that it is actually the comma which makes a tuple, not ' + 'the\n' + ' parentheses. The parentheses are optional, except in the ' + 'empty\n' + ' tuple case, or when they are needed to avoid syntactic ' + 'ambiguity.\n' + ' For example, "f(a, b, c)" is a function call with three ' + 'arguments,\n' + ' while "f((a, b, c))" is a function call with a 3-tuple as the ' + 'sole\n' + ' argument.\n' + '\n' + ' Tuples implement all of the common sequence operations.\n' + '\n' + 'For heterogeneous collections of data where access by name is ' + 'clearer\n' + 'than access by index, "collections.namedtuple()" may be a more\n' + 'appropriate choice than a simple tuple object.\n' + '\n' + '\n' + 'Ranges\n' + '======\n' + '\n' + 'The "range" type represents an immutable sequence of numbers and ' + 'is\n' + 'commonly used for looping a specific number of times in "for" ' + 'loops.\n' + '\n' + 'class range(stop)\n' + 'class range(start, stop[, step])\n' + '\n' + ' The arguments to the range constructor must be integers ' + '(either\n' + ' built-in "int" or any object that implements the ' + '"__index__()"\n' + ' special method). If the *step* argument is omitted, it ' + 'defaults to\n' + ' "1". If the *start* argument is omitted, it defaults to "0". ' + 'If\n' + ' *step* is zero, "ValueError" is raised.\n' + '\n' + ' For a positive *step*, the contents of a range "r" are ' + 'determined\n' + ' by the formula "r[i] = start + step*i" where "i >= 0" and ' + '"r[i] <\n' + ' stop".\n' + '\n' + ' For a negative *step*, the contents of the range are still\n' + ' determined by the formula "r[i] = start + step*i", but the\n' + ' constraints are "i >= 0" and "r[i] > stop".\n' + '\n' + ' A range object will be empty if "r[0]" does not meet the ' + 'value\n' + ' constraint. Ranges do support negative indices, but these ' + 'are\n' + ' interpreted as indexing from the end of the sequence ' + 'determined by\n' + ' the positive indices.\n' + '\n' + ' Ranges containing absolute values larger than "sys.maxsize" ' + 'are\n' + ' permitted but some features (such as "len()") may raise\n' + ' "OverflowError".\n' + '\n' + ' Range examples:\n' + '\n' + ' >>> list(range(10))\n' + ' [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]\n' + ' >>> list(range(1, 11))\n' + ' [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n' + ' >>> list(range(0, 30, 5))\n' + ' [0, 5, 10, 15, 20, 25]\n' + ' >>> list(range(0, 10, 3))\n' + ' [0, 3, 6, 9]\n' + ' >>> list(range(0, -10, -1))\n' + ' [0, -1, -2, -3, -4, -5, -6, -7, -8, -9]\n' + ' >>> list(range(0))\n' + ' []\n' + ' >>> list(range(1, 0))\n' + ' []\n' + '\n' + ' Ranges implement all of the common sequence operations ' + 'except\n' + ' concatenation and repetition (due to the fact that range ' + 'objects\n' + ' can only represent sequences that follow a strict pattern ' + 'and\n' + ' repetition and concatenation will usually violate that ' + 'pattern).\n' + '\n' + ' start\n' + '\n' + ' The value of the *start* parameter (or "0" if the ' + 'parameter was\n' + ' not supplied)\n' + '\n' + ' stop\n' + '\n' + ' The value of the *stop* parameter\n' + '\n' + ' step\n' + '\n' + ' The value of the *step* parameter (or "1" if the parameter ' + 'was\n' + ' not supplied)\n' + '\n' + 'The advantage of the "range" type over a regular "list" or ' + '"tuple" is\n' + 'that a "range" object will always take the same (small) amount ' + 'of\n' + 'memory, no matter the size of the range it represents (as it ' + 'only\n' + 'stores the "start", "stop" and "step" values, calculating ' + 'individual\n' + 'items and subranges as needed).\n' + '\n' + 'Range objects implement the "collections.abc.Sequence" ABC, and\n' + 'provide features such as containment tests, element index ' + 'lookup,\n' + 'slicing and support for negative indices (see Sequence Types — ' + 'list,\n' + 'tuple, range):\n' + '\n' + '>>> r = range(0, 20, 2)\n' + '>>> r\n' + 'range(0, 20, 2)\n' + '>>> 11 in r\n' + 'False\n' + '>>> 10 in r\n' + 'True\n' + '>>> r.index(10)\n' + '5\n' + '>>> r[5]\n' + '10\n' + '>>> r[:5]\n' + 'range(0, 10, 2)\n' + '>>> r[-1]\n' + '18\n' + '\n' + 'Testing range objects for equality with "==" and "!=" compares ' + 'them as\n' + 'sequences. That is, two range objects are considered equal if ' + 'they\n' + 'represent the same sequence of values. (Note that two range ' + 'objects\n' + 'that compare equal might have different "start", "stop" and ' + '"step"\n' + 'attributes, for example "range(0) == range(2, 1, 3)" or ' + '"range(0, 3,\n' + '2) == range(0, 4, 2)".)\n' + '\n' + 'Changed in version 3.2: Implement the Sequence ABC. Support ' + 'slicing\n' + 'and negative indices. Test "int" objects for membership in ' + 'constant\n' + 'time instead of iterating through all items.\n' + '\n' + 'Changed in version 3.3: Define ‘==’ and ‘!=’ to compare range ' + 'objects\n' + 'based on the sequence of values they define (instead of ' + 'comparing\n' + 'based on object identity).\n' + '\n' + 'New in version 3.3: The "start", "stop" and "step" attributes.\n' + '\n' + 'See also:\n' + '\n' + ' * The linspace recipe shows how to implement a lazy version of ' + 'range\n' + ' suitable for floating point applications.\n', + 'typesseq-mutable': 'Mutable Sequence Types\n' + '**********************\n' + '\n' + 'The operations in the following table are defined on ' + 'mutable sequence\n' + 'types. The "collections.abc.MutableSequence" ABC is ' + 'provided to make\n' + 'it easier to correctly implement these operations on ' + 'custom sequence\n' + 'types.\n' + '\n' + 'In the table *s* is an instance of a mutable sequence ' + 'type, *t* is any\n' + 'iterable object and *x* is an arbitrary object that ' + 'meets any type and\n' + 'value restrictions imposed by *s* (for example, ' + '"bytearray" only\n' + 'accepts integers that meet the value restriction "0 <= x ' + '<= 255").\n' + '\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| Operation | ' + 'Result | Notes ' + '|\n' + '|================================|==================================|=======================|\n' + '| "s[i] = x" | item *i* of *s* is ' + 'replaced by | |\n' + '| | ' + '*x* | ' + '|\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s[i:j] = t" | slice of *s* from *i* ' + 'to *j* is | |\n' + '| | replaced by the ' + 'contents of the | |\n' + '| | iterable ' + '*t* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "del s[i:j]" | same as "s[i:j] = ' + '[]" | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s[i:j:k] = t" | the elements of ' + '"s[i:j:k]" are | (1) |\n' + '| | replaced by those of ' + '*t* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "del s[i:j:k]" | removes the elements ' + 'of | |\n' + '| | "s[i:j:k]" from the ' + 'list | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.append(x)" | appends *x* to the ' + 'end of the | |\n' + '| | sequence (same ' + 'as | |\n' + '| | "s[len(s):len(s)] = ' + '[x]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.clear()" | removes all items ' + 'from *s* (same | (5) |\n' + '| | as "del ' + 's[:]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.copy()" | creates a shallow ' + 'copy of *s* | (5) |\n' + '| | (same as ' + '"s[:]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.extend(t)" or "s += t" | extends *s* with the ' + 'contents of | |\n' + '| | *t* (for the most ' + 'part the same | |\n' + '| | as "s[len(s):len(s)] ' + '= t") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s *= n" | updates *s* with its ' + 'contents | (6) |\n' + '| | repeated *n* ' + 'times | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.insert(i, x)" | inserts *x* into *s* ' + 'at the | |\n' + '| | index given by *i* ' + '(same as | |\n' + '| | "s[i:i] = ' + '[x]") | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.pop()" or "s.pop(i)" | retrieves the item at ' + '*i* and | (2) |\n' + '| | also removes it from ' + '*s* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.remove(x)" | remove the first item ' + 'from *s* | (3) |\n' + '| | where "s[i]" is equal ' + 'to *x* | |\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '| "s.reverse()" | reverses the items of ' + '*s* in | (4) |\n' + '| | ' + 'place | ' + '|\n' + '+--------------------------------+----------------------------------+-----------------------+\n' + '\n' + 'Notes:\n' + '\n' + '1. *t* must have the same length as the slice it is ' + 'replacing.\n' + '\n' + '2. The optional argument *i* defaults to "-1", so that ' + 'by default the\n' + ' last item is removed and returned.\n' + '\n' + '3. "remove()" raises "ValueError" when *x* is not found ' + 'in *s*.\n' + '\n' + '4. The "reverse()" method modifies the sequence in place ' + 'for economy\n' + ' of space when reversing a large sequence. To remind ' + 'users that it\n' + ' operates by side effect, it does not return the ' + 'reversed sequence.\n' + '\n' + '5. "clear()" and "copy()" are included for consistency ' + 'with the\n' + ' interfaces of mutable containers that don’t support ' + 'slicing\n' + ' operations (such as "dict" and "set"). "copy()" is ' + 'not part of the\n' + ' "collections.abc.MutableSequence" ABC, but most ' + 'concrete mutable\n' + ' sequence classes provide it.\n' + '\n' + ' New in version 3.3: "clear()" and "copy()" methods.\n' + '\n' + '6. The value *n* is an integer, or an object ' + 'implementing\n' + ' "__index__()". Zero and negative values of *n* clear ' + 'the sequence.\n' + ' Items in the sequence are not copied; they are ' + 'referenced multiple\n' + ' times, as explained for "s * n" under Common Sequence ' + 'Operations.\n', + 'unary': 'Unary arithmetic and bitwise operations\n' + '***************************************\n' + '\n' + 'All unary arithmetic and bitwise operations have the same ' + 'priority:\n' + '\n' + ' u_expr ::= power | "-" u_expr | "+" u_expr | "~" u_expr\n' + '\n' + 'The unary "-" (minus) operator yields the negation of its numeric\n' + 'argument; the operation can be overridden with the "__neg__()" ' + 'special\n' + 'method.\n' + '\n' + 'The unary "+" (plus) operator yields its numeric argument ' + 'unchanged;\n' + 'the operation can be overridden with the "__pos__()" special ' + 'method.\n' + '\n' + 'The unary "~" (invert) operator yields the bitwise inversion of ' + 'its\n' + 'integer argument. The bitwise inversion of "x" is defined as\n' + '"-(x+1)". It only applies to integral numbers or to custom ' + 'objects\n' + 'that override the "__invert__()" special method.\n' + '\n' + 'In all three cases, if the argument does not have the proper type, ' + 'a\n' + '"TypeError" exception is raised.\n', + 'while': 'The "while" statement\n' + '*********************\n' + '\n' + 'The "while" statement is used for repeated execution as long as an\n' + 'expression is true:\n' + '\n' + ' while_stmt ::= "while" assignment_expression ":" suite\n' + ' ["else" ":" suite]\n' + '\n' + 'This repeatedly tests the expression and, if it is true, executes ' + 'the\n' + 'first suite; if the expression is false (which may be the first ' + 'time\n' + 'it is tested) the suite of the "else" clause, if present, is ' + 'executed\n' + 'and the loop terminates.\n' + '\n' + 'A "break" statement executed in the first suite terminates the ' + 'loop\n' + 'without executing the "else" clause’s suite. A "continue" ' + 'statement\n' + 'executed in the first suite skips the rest of the suite and goes ' + 'back\n' + 'to testing the expression.\n', + 'with': 'The "with" statement\n' + '********************\n' + '\n' + 'The "with" statement is used to wrap the execution of a block with\n' + 'methods defined by a context manager (see section With Statement\n' + 'Context Managers). This allows common "try"…"except"…"finally" ' + 'usage\n' + 'patterns to be encapsulated for convenient reuse.\n' + '\n' + ' with_stmt ::= "with" ( "(" with_stmt_contents ","? ")" | ' + 'with_stmt_contents ) ":" suite\n' + ' with_stmt_contents ::= with_item ("," with_item)*\n' + ' with_item ::= expression ["as" target]\n' + '\n' + 'The execution of the "with" statement with one “item” proceeds as\n' + 'follows:\n' + '\n' + '1. The context expression (the expression given in the "with_item") ' + 'is\n' + ' evaluated to obtain a context manager.\n' + '\n' + '2. The context manager’s "__enter__()" is loaded for later use.\n' + '\n' + '3. The context manager’s "__exit__()" is loaded for later use.\n' + '\n' + '4. The context manager’s "__enter__()" method is invoked.\n' + '\n' + '5. If a target was included in the "with" statement, the return ' + 'value\n' + ' from "__enter__()" is assigned to it.\n' + '\n' + ' Note:\n' + '\n' + ' The "with" statement guarantees that if the "__enter__()" ' + 'method\n' + ' returns without an error, then "__exit__()" will always be\n' + ' called. Thus, if an error occurs during the assignment to the\n' + ' target list, it will be treated the same as an error occurring\n' + ' within the suite would be. See step 7 below.\n' + '\n' + '6. The suite is executed.\n' + '\n' + '7. The context manager’s "__exit__()" method is invoked. If an\n' + ' exception caused the suite to be exited, its type, value, and\n' + ' traceback are passed as arguments to "__exit__()". Otherwise, ' + 'three\n' + ' "None" arguments are supplied.\n' + '\n' + ' If the suite was exited due to an exception, and the return ' + 'value\n' + ' from the "__exit__()" method was false, the exception is ' + 'reraised.\n' + ' If the return value was true, the exception is suppressed, and\n' + ' execution continues with the statement following the "with"\n' + ' statement.\n' + '\n' + ' If the suite was exited for any reason other than an exception, ' + 'the\n' + ' return value from "__exit__()" is ignored, and execution ' + 'proceeds\n' + ' at the normal location for the kind of exit that was taken.\n' + '\n' + 'The following code:\n' + '\n' + ' with EXPRESSION as TARGET:\n' + ' SUITE\n' + '\n' + 'is semantically equivalent to:\n' + '\n' + ' manager = (EXPRESSION)\n' + ' enter = type(manager).__enter__\n' + ' exit = type(manager).__exit__\n' + ' value = enter(manager)\n' + ' hit_except = False\n' + '\n' + ' try:\n' + ' TARGET = value\n' + ' SUITE\n' + ' except:\n' + ' hit_except = True\n' + ' if not exit(manager, *sys.exc_info()):\n' + ' raise\n' + ' finally:\n' + ' if not hit_except:\n' + ' exit(manager, None, None, None)\n' + '\n' + 'With more than one item, the context managers are processed as if\n' + 'multiple "with" statements were nested:\n' + '\n' + ' with A() as a, B() as b:\n' + ' SUITE\n' + '\n' + 'is semantically equivalent to:\n' + '\n' + ' with A() as a:\n' + ' with B() as b:\n' + ' SUITE\n' + '\n' + 'You can also write multi-item context managers in multiple lines if\n' + 'the items are surrounded by parentheses. For example:\n' + '\n' + ' with (\n' + ' A() as a,\n' + ' B() as b,\n' + ' ):\n' + ' SUITE\n' + '\n' + 'Changed in version 3.1: Support for multiple context expressions.\n' + '\n' + 'Changed in version 3.10: Support for using grouping parentheses to\n' + 'break the statement in multiple lines.\n' + '\n' + 'See also:\n' + '\n' + ' **PEP 343** - The “with” statement\n' + ' The specification, background, and examples for the Python ' + '"with"\n' + ' statement.\n', + 'yield': 'The "yield" statement\n' + '*********************\n' + '\n' + ' yield_stmt ::= yield_expression\n' + '\n' + 'A "yield" statement is semantically equivalent to a yield ' + 'expression.\n' + 'The yield statement can be used to omit the parentheses that would\n' + 'otherwise be required in the equivalent yield expression ' + 'statement.\n' + 'For example, the yield statements\n' + '\n' + ' yield \n' + ' yield from \n' + '\n' + 'are equivalent to the yield expression statements\n' + '\n' + ' (yield )\n' + ' (yield from )\n' + '\n' + 'Yield expressions and statements are only used when defining a\n' + '*generator* function, and are only used in the body of the ' + 'generator\n' + 'function. Using yield in a function definition is sufficient to ' + 'cause\n' + 'that definition to create a generator function instead of a normal\n' + 'function.\n' + '\n' + 'For full details of "yield" semantics, refer to the Yield ' + 'expressions\n' + 'section.\n'} diff --git a/parrot/lib/python3.10/unittest/__init__.py b/parrot/lib/python3.10/unittest/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..348dc471f4c3d44af8cd984fc5d08b1d2072f30b --- /dev/null +++ b/parrot/lib/python3.10/unittest/__init__.py @@ -0,0 +1,95 @@ +""" +Python unit testing framework, based on Erich Gamma's JUnit and Kent Beck's +Smalltalk testing framework (used with permission). + +This module contains the core framework classes that form the basis of +specific test cases and suites (TestCase, TestSuite etc.), and also a +text-based utility class for running the tests and reporting the results + (TextTestRunner). + +Simple usage: + + import unittest + + class IntegerArithmeticTestCase(unittest.TestCase): + def testAdd(self): # test method names begin with 'test' + self.assertEqual((1 + 2), 3) + self.assertEqual(0 + 1, 1) + def testMultiply(self): + self.assertEqual((0 * 10), 0) + self.assertEqual((5 * 8), 40) + + if __name__ == '__main__': + unittest.main() + +Further information is available in the bundled documentation, and from + + http://docs.python.org/library/unittest.html + +Copyright (c) 1999-2003 Steve Purcell +Copyright (c) 2003-2010 Python Software Foundation +This module is free software, and you may redistribute it and/or modify +it under the same terms as Python itself, so long as this copyright message +and disclaimer are retained in their original form. + +IN NO EVENT SHALL THE AUTHOR BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, +SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OF +THIS CODE, EVEN IF THE AUTHOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH +DAMAGE. + +THE AUTHOR SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE. THE CODE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, +AND THERE IS NO OBLIGATION WHATSOEVER TO PROVIDE MAINTENANCE, +SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. +""" + +__all__ = ['TestResult', 'TestCase', 'IsolatedAsyncioTestCase', 'TestSuite', + 'TextTestRunner', 'TestLoader', 'FunctionTestCase', 'main', + 'defaultTestLoader', 'SkipTest', 'skip', 'skipIf', 'skipUnless', + 'expectedFailure', 'TextTestResult', 'installHandler', + 'registerResult', 'removeResult', 'removeHandler', + 'addModuleCleanup'] + +# Expose obsolete functions for backwards compatibility +__all__.extend(['getTestCaseNames', 'makeSuite', 'findTestCases']) + +__unittest = True + +from .result import TestResult +from .case import (addModuleCleanup, TestCase, FunctionTestCase, SkipTest, skip, + skipIf, skipUnless, expectedFailure) +from .suite import BaseTestSuite, TestSuite +from .loader import (TestLoader, defaultTestLoader, makeSuite, getTestCaseNames, + findTestCases) +from .main import TestProgram, main +from .runner import TextTestRunner, TextTestResult +from .signals import installHandler, registerResult, removeResult, removeHandler +# IsolatedAsyncioTestCase will be imported lazily. + +# deprecated +_TextTestResult = TextTestResult + +# There are no tests here, so don't try to run anything discovered from +# introspecting the symbols (e.g. FunctionTestCase). Instead, all our +# tests come from within unittest.test. +def load_tests(loader, tests, pattern): + import os.path + # top level directory cached on loader instance + this_dir = os.path.dirname(__file__) + return loader.discover(start_dir=this_dir, pattern=pattern) + + +# Lazy import of IsolatedAsyncioTestCase from .async_case +# It imports asyncio, which is relatively heavy, but most tests +# do not need it. + +def __dir__(): + return globals().keys() | {'IsolatedAsyncioTestCase'} + +def __getattr__(name): + if name == 'IsolatedAsyncioTestCase': + global IsolatedAsyncioTestCase + from .async_case import IsolatedAsyncioTestCase + return IsolatedAsyncioTestCase + raise AttributeError(f"module {__name__!r} has no attribute {name!r}") diff --git a/parrot/lib/python3.10/unittest/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/unittest/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..961b8be75a7c3905bf27581d02d5412fe5d13bb2 Binary files /dev/null and b/parrot/lib/python3.10/unittest/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/unittest/__pycache__/_log.cpython-310.pyc b/parrot/lib/python3.10/unittest/__pycache__/_log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ced5b76bf9ff892e8858aea751d9a018ef09110 Binary files /dev/null and b/parrot/lib/python3.10/unittest/__pycache__/_log.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/unittest/__pycache__/case.cpython-310.pyc b/parrot/lib/python3.10/unittest/__pycache__/case.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a0cb5888a5499c652282f5bd285d11c6860bb69 Binary files /dev/null and b/parrot/lib/python3.10/unittest/__pycache__/case.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/unittest/__pycache__/suite.cpython-310.pyc b/parrot/lib/python3.10/unittest/__pycache__/suite.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24e541c6d1f373faf958f58996603e97260810be Binary files /dev/null and b/parrot/lib/python3.10/unittest/__pycache__/suite.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/unittest/__pycache__/util.cpython-310.pyc b/parrot/lib/python3.10/unittest/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cf1c5fd016b6f764433a6f8d580a4d0f067d2bfa Binary files /dev/null and b/parrot/lib/python3.10/unittest/__pycache__/util.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/unittest/_log.py b/parrot/lib/python3.10/unittest/_log.py new file mode 100644 index 0000000000000000000000000000000000000000..94868e5bb95eb35fb17c9bea103f04c7d0fd5433 --- /dev/null +++ b/parrot/lib/python3.10/unittest/_log.py @@ -0,0 +1,86 @@ +import logging +import collections + +from .case import _BaseTestCaseContext + + +_LoggingWatcher = collections.namedtuple("_LoggingWatcher", + ["records", "output"]) + +class _CapturingHandler(logging.Handler): + """ + A logging handler capturing all (raw and formatted) logging output. + """ + + def __init__(self): + logging.Handler.__init__(self) + self.watcher = _LoggingWatcher([], []) + + def flush(self): + pass + + def emit(self, record): + self.watcher.records.append(record) + msg = self.format(record) + self.watcher.output.append(msg) + + +class _AssertLogsContext(_BaseTestCaseContext): + """A context manager for assertLogs() and assertNoLogs() """ + + LOGGING_FORMAT = "%(levelname)s:%(name)s:%(message)s" + + def __init__(self, test_case, logger_name, level, no_logs): + _BaseTestCaseContext.__init__(self, test_case) + self.logger_name = logger_name + if level: + self.level = logging._nameToLevel.get(level, level) + else: + self.level = logging.INFO + self.msg = None + self.no_logs = no_logs + + def __enter__(self): + if isinstance(self.logger_name, logging.Logger): + logger = self.logger = self.logger_name + else: + logger = self.logger = logging.getLogger(self.logger_name) + formatter = logging.Formatter(self.LOGGING_FORMAT) + handler = _CapturingHandler() + handler.setLevel(self.level) + handler.setFormatter(formatter) + self.watcher = handler.watcher + self.old_handlers = logger.handlers[:] + self.old_level = logger.level + self.old_propagate = logger.propagate + logger.handlers = [handler] + logger.setLevel(self.level) + logger.propagate = False + if self.no_logs: + return + return handler.watcher + + def __exit__(self, exc_type, exc_value, tb): + self.logger.handlers = self.old_handlers + self.logger.propagate = self.old_propagate + self.logger.setLevel(self.old_level) + + if exc_type is not None: + # let unexpected exceptions pass through + return False + + if self.no_logs: + # assertNoLogs + if len(self.watcher.records) > 0: + self._raiseFailure( + "Unexpected logs found: {!r}".format( + self.watcher.output + ) + ) + + else: + # assertLogs + if len(self.watcher.records) == 0: + self._raiseFailure( + "no logs of level {} or higher triggered on {}" + .format(logging.getLevelName(self.level), self.logger.name)) diff --git a/parrot/lib/python3.10/unittest/runner.py b/parrot/lib/python3.10/unittest/runner.py new file mode 100644 index 0000000000000000000000000000000000000000..caf159002d8bb6778b53315a96dabc2e177ac23c --- /dev/null +++ b/parrot/lib/python3.10/unittest/runner.py @@ -0,0 +1,230 @@ +"""Running tests""" + +import sys +import time +import warnings + +from . import result +from .signals import registerResult + +__unittest = True + + +class _WritelnDecorator(object): + """Used to decorate file-like objects with a handy 'writeln' method""" + def __init__(self,stream): + self.stream = stream + + def __getattr__(self, attr): + if attr in ('stream', '__getstate__'): + raise AttributeError(attr) + return getattr(self.stream,attr) + + def writeln(self, arg=None): + if arg: + self.write(arg) + self.write('\n') # text-mode streams translate to \r\n if needed + + +class TextTestResult(result.TestResult): + """A test result class that can print formatted text results to a stream. + + Used by TextTestRunner. + """ + separator1 = '=' * 70 + separator2 = '-' * 70 + + def __init__(self, stream, descriptions, verbosity): + super(TextTestResult, self).__init__(stream, descriptions, verbosity) + self.stream = stream + self.showAll = verbosity > 1 + self.dots = verbosity == 1 + self.descriptions = descriptions + + def getDescription(self, test): + doc_first_line = test.shortDescription() + if self.descriptions and doc_first_line: + return '\n'.join((str(test), doc_first_line)) + else: + return str(test) + + def startTest(self, test): + super(TextTestResult, self).startTest(test) + if self.showAll: + self.stream.write(self.getDescription(test)) + self.stream.write(" ... ") + self.stream.flush() + + def addSuccess(self, test): + super(TextTestResult, self).addSuccess(test) + if self.showAll: + self.stream.writeln("ok") + self.stream.flush() + elif self.dots: + self.stream.write('.') + self.stream.flush() + + def addError(self, test, err): + super(TextTestResult, self).addError(test, err) + if self.showAll: + self.stream.writeln("ERROR") + self.stream.flush() + elif self.dots: + self.stream.write('E') + self.stream.flush() + + def addFailure(self, test, err): + super(TextTestResult, self).addFailure(test, err) + if self.showAll: + self.stream.writeln("FAIL") + self.stream.flush() + elif self.dots: + self.stream.write('F') + self.stream.flush() + + def addSkip(self, test, reason): + super(TextTestResult, self).addSkip(test, reason) + if self.showAll: + self.stream.writeln("skipped {0!r}".format(reason)) + self.stream.flush() + elif self.dots: + self.stream.write("s") + self.stream.flush() + + def addExpectedFailure(self, test, err): + super(TextTestResult, self).addExpectedFailure(test, err) + if self.showAll: + self.stream.writeln("expected failure") + self.stream.flush() + elif self.dots: + self.stream.write("x") + self.stream.flush() + + def addUnexpectedSuccess(self, test): + super(TextTestResult, self).addUnexpectedSuccess(test) + if self.showAll: + self.stream.writeln("unexpected success") + self.stream.flush() + elif self.dots: + self.stream.write("u") + self.stream.flush() + + def printErrors(self): + if self.dots or self.showAll: + self.stream.writeln() + self.stream.flush() + self.printErrorList('ERROR', self.errors) + self.printErrorList('FAIL', self.failures) + + def printErrorList(self, flavour, errors): + for test, err in errors: + self.stream.writeln(self.separator1) + self.stream.writeln("%s: %s" % (flavour,self.getDescription(test))) + self.stream.writeln(self.separator2) + self.stream.writeln("%s" % err) + self.stream.flush() + + +class TextTestRunner(object): + """A test runner class that displays results in textual form. + + It prints out the names of tests as they are run, errors as they + occur, and a summary of the results at the end of the test run. + """ + resultclass = TextTestResult + + def __init__(self, stream=None, descriptions=True, verbosity=1, + failfast=False, buffer=False, resultclass=None, warnings=None, + *, tb_locals=False): + """Construct a TextTestRunner. + + Subclasses should accept **kwargs to ensure compatibility as the + interface changes. + """ + if stream is None: + stream = sys.stderr + self.stream = _WritelnDecorator(stream) + self.descriptions = descriptions + self.verbosity = verbosity + self.failfast = failfast + self.buffer = buffer + self.tb_locals = tb_locals + self.warnings = warnings + if resultclass is not None: + self.resultclass = resultclass + + def _makeResult(self): + return self.resultclass(self.stream, self.descriptions, self.verbosity) + + def run(self, test): + "Run the given test case or test suite." + result = self._makeResult() + registerResult(result) + result.failfast = self.failfast + result.buffer = self.buffer + result.tb_locals = self.tb_locals + with warnings.catch_warnings(): + if self.warnings: + # if self.warnings is set, use it to filter all the warnings + warnings.simplefilter(self.warnings) + # if the filter is 'default' or 'always', special-case the + # warnings from the deprecated unittest methods to show them + # no more than once per module, because they can be fairly + # noisy. The -Wd and -Wa flags can be used to bypass this + # only when self.warnings is None. + if self.warnings in ['default', 'always']: + warnings.filterwarnings('module', + category=DeprecationWarning, + message=r'Please use assert\w+ instead.') + startTime = time.perf_counter() + startTestRun = getattr(result, 'startTestRun', None) + if startTestRun is not None: + startTestRun() + try: + test(result) + finally: + stopTestRun = getattr(result, 'stopTestRun', None) + if stopTestRun is not None: + stopTestRun() + stopTime = time.perf_counter() + timeTaken = stopTime - startTime + result.printErrors() + if hasattr(result, 'separator2'): + self.stream.writeln(result.separator2) + run = result.testsRun + self.stream.writeln("Ran %d test%s in %.3fs" % + (run, run != 1 and "s" or "", timeTaken)) + self.stream.writeln() + + expectedFails = unexpectedSuccesses = skipped = 0 + try: + results = map(len, (result.expectedFailures, + result.unexpectedSuccesses, + result.skipped)) + except AttributeError: + pass + else: + expectedFails, unexpectedSuccesses, skipped = results + + infos = [] + if not result.wasSuccessful(): + self.stream.write("FAILED") + failed, errored = len(result.failures), len(result.errors) + if failed: + infos.append("failures=%d" % failed) + if errored: + infos.append("errors=%d" % errored) + else: + self.stream.write("OK") + if skipped: + infos.append("skipped=%d" % skipped) + if expectedFails: + infos.append("expected failures=%d" % expectedFails) + if unexpectedSuccesses: + infos.append("unexpected successes=%d" % unexpectedSuccesses) + if infos: + self.stream.writeln(" (%s)" % (", ".join(infos),)) + else: + self.stream.write("\n") + self.stream.flush() + return result diff --git a/parrot/lib/python3.10/xmlrpc/client.py b/parrot/lib/python3.10/xmlrpc/client.py new file mode 100644 index 0000000000000000000000000000000000000000..a614cef6ab2f1a52b3ece45f9a3c2e6920802e15 --- /dev/null +++ b/parrot/lib/python3.10/xmlrpc/client.py @@ -0,0 +1,1532 @@ +# +# XML-RPC CLIENT LIBRARY +# $Id$ +# +# an XML-RPC client interface for Python. +# +# the marshalling and response parser code can also be used to +# implement XML-RPC servers. +# +# Notes: +# this version is designed to work with Python 2.1 or newer. +# +# History: +# 1999-01-14 fl Created +# 1999-01-15 fl Changed dateTime to use localtime +# 1999-01-16 fl Added Binary/base64 element, default to RPC2 service +# 1999-01-19 fl Fixed array data element (from Skip Montanaro) +# 1999-01-21 fl Fixed dateTime constructor, etc. +# 1999-02-02 fl Added fault handling, handle empty sequences, etc. +# 1999-02-10 fl Fixed problem with empty responses (from Skip Montanaro) +# 1999-06-20 fl Speed improvements, pluggable parsers/transports (0.9.8) +# 2000-11-28 fl Changed boolean to check the truth value of its argument +# 2001-02-24 fl Added encoding/Unicode/SafeTransport patches +# 2001-02-26 fl Added compare support to wrappers (0.9.9/1.0b1) +# 2001-03-28 fl Make sure response tuple is a singleton +# 2001-03-29 fl Don't require empty params element (from Nicholas Riley) +# 2001-06-10 fl Folded in _xmlrpclib accelerator support (1.0b2) +# 2001-08-20 fl Base xmlrpclib.Error on built-in Exception (from Paul Prescod) +# 2001-09-03 fl Allow Transport subclass to override getparser +# 2001-09-10 fl Lazy import of urllib, cgi, xmllib (20x import speedup) +# 2001-10-01 fl Remove containers from memo cache when done with them +# 2001-10-01 fl Use faster escape method (80% dumps speedup) +# 2001-10-02 fl More dumps microtuning +# 2001-10-04 fl Make sure import expat gets a parser (from Guido van Rossum) +# 2001-10-10 sm Allow long ints to be passed as ints if they don't overflow +# 2001-10-17 sm Test for int and long overflow (allows use on 64-bit systems) +# 2001-11-12 fl Use repr() to marshal doubles (from Paul Felix) +# 2002-03-17 fl Avoid buffered read when possible (from James Rucker) +# 2002-04-07 fl Added pythondoc comments +# 2002-04-16 fl Added __str__ methods to datetime/binary wrappers +# 2002-05-15 fl Added error constants (from Andrew Kuchling) +# 2002-06-27 fl Merged with Python CVS version +# 2002-10-22 fl Added basic authentication (based on code from Phillip Eby) +# 2003-01-22 sm Add support for the bool type +# 2003-02-27 gvr Remove apply calls +# 2003-04-24 sm Use cStringIO if available +# 2003-04-25 ak Add support for nil +# 2003-06-15 gn Add support for time.struct_time +# 2003-07-12 gp Correct marshalling of Faults +# 2003-10-31 mvl Add multicall support +# 2004-08-20 mvl Bump minimum supported Python version to 2.1 +# 2014-12-02 ch/doko Add workaround for gzip bomb vulnerability +# +# Copyright (c) 1999-2002 by Secret Labs AB. +# Copyright (c) 1999-2002 by Fredrik Lundh. +# +# info@pythonware.com +# http://www.pythonware.com +# +# -------------------------------------------------------------------- +# The XML-RPC client interface is +# +# Copyright (c) 1999-2002 by Secret Labs AB +# Copyright (c) 1999-2002 by Fredrik Lundh +# +# By obtaining, using, and/or copying this software and/or its +# associated documentation, you agree that you have read, understood, +# and will comply with the following terms and conditions: +# +# Permission to use, copy, modify, and distribute this software and +# its associated documentation for any purpose and without fee is +# hereby granted, provided that the above copyright notice appears in +# all copies, and that both that copyright notice and this permission +# notice appear in supporting documentation, and that the name of +# Secret Labs AB or the author not be used in advertising or publicity +# pertaining to distribution of the software without specific, written +# prior permission. +# +# SECRET LABS AB AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD +# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT- +# ABILITY AND FITNESS. IN NO EVENT SHALL SECRET LABS AB OR THE AUTHOR +# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY +# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, +# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS +# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE +# OF THIS SOFTWARE. +# -------------------------------------------------------------------- + +""" +An XML-RPC client interface for Python. + +The marshalling and response parser code can also be used to +implement XML-RPC servers. + +Exported exceptions: + + Error Base class for client errors + ProtocolError Indicates an HTTP protocol error + ResponseError Indicates a broken response package + Fault Indicates an XML-RPC fault package + +Exported classes: + + ServerProxy Represents a logical connection to an XML-RPC server + + MultiCall Executor of boxcared xmlrpc requests + DateTime dateTime wrapper for an ISO 8601 string or time tuple or + localtime integer value to generate a "dateTime.iso8601" + XML-RPC value + Binary binary data wrapper + + Marshaller Generate an XML-RPC params chunk from a Python data structure + Unmarshaller Unmarshal an XML-RPC response from incoming XML event message + Transport Handles an HTTP transaction to an XML-RPC server + SafeTransport Handles an HTTPS transaction to an XML-RPC server + +Exported constants: + + (none) + +Exported functions: + + getparser Create instance of the fastest available parser & attach + to an unmarshalling object + dumps Convert an argument tuple or a Fault instance to an XML-RPC + request (or response, if the methodresponse option is used). + loads Convert an XML-RPC packet to unmarshalled data plus a method + name (None if not present). +""" + +import base64 +import sys +import time +from datetime import datetime +from decimal import Decimal +import http.client +import urllib.parse +from xml.parsers import expat +import errno +from io import BytesIO +try: + import gzip +except ImportError: + gzip = None #python can be built without zlib/gzip support + +# -------------------------------------------------------------------- +# Internal stuff + +def escape(s): + s = s.replace("&", "&") + s = s.replace("<", "<") + return s.replace(">", ">",) + +# used in User-Agent header sent +__version__ = '%d.%d' % sys.version_info[:2] + +# xmlrpc integer limits +MAXINT = 2**31-1 +MININT = -2**31 + +# -------------------------------------------------------------------- +# Error constants (from Dan Libby's specification at +# http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php) + +# Ranges of errors +PARSE_ERROR = -32700 +SERVER_ERROR = -32600 +APPLICATION_ERROR = -32500 +SYSTEM_ERROR = -32400 +TRANSPORT_ERROR = -32300 + +# Specific errors +NOT_WELLFORMED_ERROR = -32700 +UNSUPPORTED_ENCODING = -32701 +INVALID_ENCODING_CHAR = -32702 +INVALID_XMLRPC = -32600 +METHOD_NOT_FOUND = -32601 +INVALID_METHOD_PARAMS = -32602 +INTERNAL_ERROR = -32603 + +# -------------------------------------------------------------------- +# Exceptions + +## +# Base class for all kinds of client-side errors. + +class Error(Exception): + """Base class for client errors.""" + __str__ = object.__str__ + +## +# Indicates an HTTP-level protocol error. This is raised by the HTTP +# transport layer, if the server returns an error code other than 200 +# (OK). +# +# @param url The target URL. +# @param errcode The HTTP error code. +# @param errmsg The HTTP error message. +# @param headers The HTTP header dictionary. + +class ProtocolError(Error): + """Indicates an HTTP protocol error.""" + def __init__(self, url, errcode, errmsg, headers): + Error.__init__(self) + self.url = url + self.errcode = errcode + self.errmsg = errmsg + self.headers = headers + def __repr__(self): + return ( + "<%s for %s: %s %s>" % + (self.__class__.__name__, self.url, self.errcode, self.errmsg) + ) + +## +# Indicates a broken XML-RPC response package. This exception is +# raised by the unmarshalling layer, if the XML-RPC response is +# malformed. + +class ResponseError(Error): + """Indicates a broken response package.""" + pass + +## +# Indicates an XML-RPC fault response package. This exception is +# raised by the unmarshalling layer, if the XML-RPC response contains +# a fault string. This exception can also be used as a class, to +# generate a fault XML-RPC message. +# +# @param faultCode The XML-RPC fault code. +# @param faultString The XML-RPC fault string. + +class Fault(Error): + """Indicates an XML-RPC fault package.""" + def __init__(self, faultCode, faultString, **extra): + Error.__init__(self) + self.faultCode = faultCode + self.faultString = faultString + def __repr__(self): + return "<%s %s: %r>" % (self.__class__.__name__, + self.faultCode, self.faultString) + +# -------------------------------------------------------------------- +# Special values + +## +# Backwards compatibility + +boolean = Boolean = bool + +## +# Wrapper for XML-RPC DateTime values. This converts a time value to +# the format used by XML-RPC. +#

+# The value can be given as a datetime object, as a string in the +# format "yyyymmddThh:mm:ss", as a 9-item time tuple (as returned by +# time.localtime()), or an integer value (as returned by time.time()). +# The wrapper uses time.localtime() to convert an integer to a time +# tuple. +# +# @param value The time, given as a datetime object, an ISO 8601 string, +# a time tuple, or an integer time value. + + +# Issue #13305: different format codes across platforms +_day0 = datetime(1, 1, 1) +def _try(fmt): + try: + return _day0.strftime(fmt) == '0001' + except ValueError: + return False +if _try('%Y'): # Mac OS X + def _iso8601_format(value): + return value.strftime("%Y%m%dT%H:%M:%S") +elif _try('%4Y'): # Linux + def _iso8601_format(value): + return value.strftime("%4Y%m%dT%H:%M:%S") +else: + def _iso8601_format(value): + return value.strftime("%Y%m%dT%H:%M:%S").zfill(17) +del _day0 +del _try + + +def _strftime(value): + if isinstance(value, datetime): + return _iso8601_format(value) + + if not isinstance(value, (tuple, time.struct_time)): + if value == 0: + value = time.time() + value = time.localtime(value) + + return "%04d%02d%02dT%02d:%02d:%02d" % value[:6] + +class DateTime: + """DateTime wrapper for an ISO 8601 string or time tuple or + localtime integer value to generate 'dateTime.iso8601' XML-RPC + value. + """ + + def __init__(self, value=0): + if isinstance(value, str): + self.value = value + else: + self.value = _strftime(value) + + def make_comparable(self, other): + if isinstance(other, DateTime): + s = self.value + o = other.value + elif isinstance(other, datetime): + s = self.value + o = _iso8601_format(other) + elif isinstance(other, str): + s = self.value + o = other + elif hasattr(other, "timetuple"): + s = self.timetuple() + o = other.timetuple() + else: + s = self + o = NotImplemented + return s, o + + def __lt__(self, other): + s, o = self.make_comparable(other) + if o is NotImplemented: + return NotImplemented + return s < o + + def __le__(self, other): + s, o = self.make_comparable(other) + if o is NotImplemented: + return NotImplemented + return s <= o + + def __gt__(self, other): + s, o = self.make_comparable(other) + if o is NotImplemented: + return NotImplemented + return s > o + + def __ge__(self, other): + s, o = self.make_comparable(other) + if o is NotImplemented: + return NotImplemented + return s >= o + + def __eq__(self, other): + s, o = self.make_comparable(other) + if o is NotImplemented: + return NotImplemented + return s == o + + def timetuple(self): + return time.strptime(self.value, "%Y%m%dT%H:%M:%S") + + ## + # Get date/time value. + # + # @return Date/time value, as an ISO 8601 string. + + def __str__(self): + return self.value + + def __repr__(self): + return "<%s %r at %#x>" % (self.__class__.__name__, self.value, id(self)) + + def decode(self, data): + self.value = str(data).strip() + + def encode(self, out): + out.write("") + out.write(self.value) + out.write("\n") + +def _datetime(data): + # decode xml element contents into a DateTime structure. + value = DateTime() + value.decode(data) + return value + +def _datetime_type(data): + return datetime.strptime(data, "%Y%m%dT%H:%M:%S") + +## +# Wrapper for binary data. This can be used to transport any kind +# of binary data over XML-RPC, using BASE64 encoding. +# +# @param data An 8-bit string containing arbitrary data. + +class Binary: + """Wrapper for binary data.""" + + def __init__(self, data=None): + if data is None: + data = b"" + else: + if not isinstance(data, (bytes, bytearray)): + raise TypeError("expected bytes or bytearray, not %s" % + data.__class__.__name__) + data = bytes(data) # Make a copy of the bytes! + self.data = data + + ## + # Get buffer contents. + # + # @return Buffer contents, as an 8-bit string. + + def __str__(self): + return str(self.data, "latin-1") # XXX encoding?! + + def __eq__(self, other): + if isinstance(other, Binary): + other = other.data + return self.data == other + + def decode(self, data): + self.data = base64.decodebytes(data) + + def encode(self, out): + out.write("\n") + encoded = base64.encodebytes(self.data) + out.write(encoded.decode('ascii')) + out.write("\n") + +def _binary(data): + # decode xml element contents into a Binary structure + value = Binary() + value.decode(data) + return value + +WRAPPERS = (DateTime, Binary) + +# -------------------------------------------------------------------- +# XML parsers + +class ExpatParser: + # fast expat parser for Python 2.0 and later. + def __init__(self, target): + self._parser = parser = expat.ParserCreate(None, None) + self._target = target + parser.StartElementHandler = target.start + parser.EndElementHandler = target.end + parser.CharacterDataHandler = target.data + encoding = None + target.xml(encoding, None) + + def feed(self, data): + self._parser.Parse(data, False) + + def close(self): + try: + parser = self._parser + except AttributeError: + pass + else: + del self._target, self._parser # get rid of circular references + parser.Parse(b"", True) # end of data + +# -------------------------------------------------------------------- +# XML-RPC marshalling and unmarshalling code + +## +# XML-RPC marshaller. +# +# @param encoding Default encoding for 8-bit strings. The default +# value is None (interpreted as UTF-8). +# @see dumps + +class Marshaller: + """Generate an XML-RPC params chunk from a Python data structure. + + Create a Marshaller instance for each set of parameters, and use + the "dumps" method to convert your data (represented as a tuple) + to an XML-RPC params chunk. To write a fault response, pass a + Fault instance instead. You may prefer to use the "dumps" module + function for this purpose. + """ + + # by the way, if you don't understand what's going on in here, + # that's perfectly ok. + + def __init__(self, encoding=None, allow_none=False): + self.memo = {} + self.data = None + self.encoding = encoding + self.allow_none = allow_none + + dispatch = {} + + def dumps(self, values): + out = [] + write = out.append + dump = self.__dump + if isinstance(values, Fault): + # fault instance + write("\n") + dump({'faultCode': values.faultCode, + 'faultString': values.faultString}, + write) + write("\n") + else: + # parameter block + # FIXME: the xml-rpc specification allows us to leave out + # the entire block if there are no parameters. + # however, changing this may break older code (including + # old versions of xmlrpclib.py), so this is better left as + # is for now. See @XMLRPC3 for more information. /F + write("\n") + for v in values: + write("\n") + dump(v, write) + write("\n") + write("\n") + result = "".join(out) + return result + + def __dump(self, value, write): + try: + f = self.dispatch[type(value)] + except KeyError: + # check if this object can be marshalled as a structure + if not hasattr(value, '__dict__'): + raise TypeError("cannot marshal %s objects" % type(value)) + # check if this class is a sub-class of a basic type, + # because we don't know how to marshal these types + # (e.g. a string sub-class) + for type_ in type(value).__mro__: + if type_ in self.dispatch.keys(): + raise TypeError("cannot marshal %s objects" % type(value)) + # XXX(twouters): using "_arbitrary_instance" as key as a quick-fix + # for the p3yk merge, this should probably be fixed more neatly. + f = self.dispatch["_arbitrary_instance"] + f(self, value, write) + + def dump_nil (self, value, write): + if not self.allow_none: + raise TypeError("cannot marshal None unless allow_none is enabled") + write("") + dispatch[type(None)] = dump_nil + + def dump_bool(self, value, write): + write("") + write(value and "1" or "0") + write("\n") + dispatch[bool] = dump_bool + + def dump_long(self, value, write): + if value > MAXINT or value < MININT: + raise OverflowError("int exceeds XML-RPC limits") + write("") + write(str(int(value))) + write("\n") + dispatch[int] = dump_long + + # backward compatible + dump_int = dump_long + + def dump_double(self, value, write): + write("") + write(repr(value)) + write("\n") + dispatch[float] = dump_double + + def dump_unicode(self, value, write, escape=escape): + write("") + write(escape(value)) + write("\n") + dispatch[str] = dump_unicode + + def dump_bytes(self, value, write): + write("\n") + encoded = base64.encodebytes(value) + write(encoded.decode('ascii')) + write("\n") + dispatch[bytes] = dump_bytes + dispatch[bytearray] = dump_bytes + + def dump_array(self, value, write): + i = id(value) + if i in self.memo: + raise TypeError("cannot marshal recursive sequences") + self.memo[i] = None + dump = self.__dump + write("\n") + for v in value: + dump(v, write) + write("\n") + del self.memo[i] + dispatch[tuple] = dump_array + dispatch[list] = dump_array + + def dump_struct(self, value, write, escape=escape): + i = id(value) + if i in self.memo: + raise TypeError("cannot marshal recursive dictionaries") + self.memo[i] = None + dump = self.__dump + write("\n") + for k, v in value.items(): + write("\n") + if not isinstance(k, str): + raise TypeError("dictionary key must be string") + write("%s\n" % escape(k)) + dump(v, write) + write("\n") + write("\n") + del self.memo[i] + dispatch[dict] = dump_struct + + def dump_datetime(self, value, write): + write("") + write(_strftime(value)) + write("\n") + dispatch[datetime] = dump_datetime + + def dump_instance(self, value, write): + # check for special wrappers + if value.__class__ in WRAPPERS: + self.write = write + value.encode(self) + del self.write + else: + # store instance attributes as a struct (really?) + self.dump_struct(value.__dict__, write) + dispatch[DateTime] = dump_instance + dispatch[Binary] = dump_instance + # XXX(twouters): using "_arbitrary_instance" as key as a quick-fix + # for the p3yk merge, this should probably be fixed more neatly. + dispatch["_arbitrary_instance"] = dump_instance + +## +# XML-RPC unmarshaller. +# +# @see loads + +class Unmarshaller: + """Unmarshal an XML-RPC response, based on incoming XML event + messages (start, data, end). Call close() to get the resulting + data structure. + + Note that this reader is fairly tolerant, and gladly accepts bogus + XML-RPC data without complaining (but not bogus XML). + """ + + # and again, if you don't understand what's going on in here, + # that's perfectly ok. + + def __init__(self, use_datetime=False, use_builtin_types=False): + self._type = None + self._stack = [] + self._marks = [] + self._data = [] + self._value = False + self._methodname = None + self._encoding = "utf-8" + self.append = self._stack.append + self._use_datetime = use_builtin_types or use_datetime + self._use_bytes = use_builtin_types + + def close(self): + # return response tuple and target method + if self._type is None or self._marks: + raise ResponseError() + if self._type == "fault": + raise Fault(**self._stack[0]) + return tuple(self._stack) + + def getmethodname(self): + return self._methodname + + # + # event handlers + + def xml(self, encoding, standalone): + self._encoding = encoding + # FIXME: assert standalone == 1 ??? + + def start(self, tag, attrs): + # prepare to handle this element + if ':' in tag: + tag = tag.split(':')[-1] + if tag == "array" or tag == "struct": + self._marks.append(len(self._stack)) + self._data = [] + if self._value and tag not in self.dispatch: + raise ResponseError("unknown tag %r" % tag) + self._value = (tag == "value") + + def data(self, text): + self._data.append(text) + + def end(self, tag): + # call the appropriate end tag handler + try: + f = self.dispatch[tag] + except KeyError: + if ':' not in tag: + return # unknown tag ? + try: + f = self.dispatch[tag.split(':')[-1]] + except KeyError: + return # unknown tag ? + return f(self, "".join(self._data)) + + # + # accelerator support + + def end_dispatch(self, tag, data): + # dispatch data + try: + f = self.dispatch[tag] + except KeyError: + if ':' not in tag: + return # unknown tag ? + try: + f = self.dispatch[tag.split(':')[-1]] + except KeyError: + return # unknown tag ? + return f(self, data) + + # + # element decoders + + dispatch = {} + + def end_nil (self, data): + self.append(None) + self._value = 0 + dispatch["nil"] = end_nil + + def end_boolean(self, data): + if data == "0": + self.append(False) + elif data == "1": + self.append(True) + else: + raise TypeError("bad boolean value") + self._value = 0 + dispatch["boolean"] = end_boolean + + def end_int(self, data): + self.append(int(data)) + self._value = 0 + dispatch["i1"] = end_int + dispatch["i2"] = end_int + dispatch["i4"] = end_int + dispatch["i8"] = end_int + dispatch["int"] = end_int + dispatch["biginteger"] = end_int + + def end_double(self, data): + self.append(float(data)) + self._value = 0 + dispatch["double"] = end_double + dispatch["float"] = end_double + + def end_bigdecimal(self, data): + self.append(Decimal(data)) + self._value = 0 + dispatch["bigdecimal"] = end_bigdecimal + + def end_string(self, data): + if self._encoding: + data = data.decode(self._encoding) + self.append(data) + self._value = 0 + dispatch["string"] = end_string + dispatch["name"] = end_string # struct keys are always strings + + def end_array(self, data): + mark = self._marks.pop() + # map arrays to Python lists + self._stack[mark:] = [self._stack[mark:]] + self._value = 0 + dispatch["array"] = end_array + + def end_struct(self, data): + mark = self._marks.pop() + # map structs to Python dictionaries + dict = {} + items = self._stack[mark:] + for i in range(0, len(items), 2): + dict[items[i]] = items[i+1] + self._stack[mark:] = [dict] + self._value = 0 + dispatch["struct"] = end_struct + + def end_base64(self, data): + value = Binary() + value.decode(data.encode("ascii")) + if self._use_bytes: + value = value.data + self.append(value) + self._value = 0 + dispatch["base64"] = end_base64 + + def end_dateTime(self, data): + value = DateTime() + value.decode(data) + if self._use_datetime: + value = _datetime_type(data) + self.append(value) + dispatch["dateTime.iso8601"] = end_dateTime + + def end_value(self, data): + # if we stumble upon a value element with no internal + # elements, treat it as a string element + if self._value: + self.end_string(data) + dispatch["value"] = end_value + + def end_params(self, data): + self._type = "params" + dispatch["params"] = end_params + + def end_fault(self, data): + self._type = "fault" + dispatch["fault"] = end_fault + + def end_methodName(self, data): + if self._encoding: + data = data.decode(self._encoding) + self._methodname = data + self._type = "methodName" # no params + dispatch["methodName"] = end_methodName + +## Multicall support +# + +class _MultiCallMethod: + # some lesser magic to store calls made to a MultiCall object + # for batch execution + def __init__(self, call_list, name): + self.__call_list = call_list + self.__name = name + def __getattr__(self, name): + return _MultiCallMethod(self.__call_list, "%s.%s" % (self.__name, name)) + def __call__(self, *args): + self.__call_list.append((self.__name, args)) + +class MultiCallIterator: + """Iterates over the results of a multicall. Exceptions are + raised in response to xmlrpc faults.""" + + def __init__(self, results): + self.results = results + + def __getitem__(self, i): + item = self.results[i] + if type(item) == type({}): + raise Fault(item['faultCode'], item['faultString']) + elif type(item) == type([]): + return item[0] + else: + raise ValueError("unexpected type in multicall result") + +class MultiCall: + """server -> an object used to boxcar method calls + + server should be a ServerProxy object. + + Methods can be added to the MultiCall using normal + method call syntax e.g.: + + multicall = MultiCall(server_proxy) + multicall.add(2,3) + multicall.get_address("Guido") + + To execute the multicall, call the MultiCall object e.g.: + + add_result, address = multicall() + """ + + def __init__(self, server): + self.__server = server + self.__call_list = [] + + def __repr__(self): + return "<%s at %#x>" % (self.__class__.__name__, id(self)) + + def __getattr__(self, name): + return _MultiCallMethod(self.__call_list, name) + + def __call__(self): + marshalled_list = [] + for name, args in self.__call_list: + marshalled_list.append({'methodName' : name, 'params' : args}) + + return MultiCallIterator(self.__server.system.multicall(marshalled_list)) + +# -------------------------------------------------------------------- +# convenience functions + +FastMarshaller = FastParser = FastUnmarshaller = None + +## +# Create a parser object, and connect it to an unmarshalling instance. +# This function picks the fastest available XML parser. +# +# return A (parser, unmarshaller) tuple. + +def getparser(use_datetime=False, use_builtin_types=False): + """getparser() -> parser, unmarshaller + + Create an instance of the fastest available parser, and attach it + to an unmarshalling object. Return both objects. + """ + if FastParser and FastUnmarshaller: + if use_builtin_types: + mkdatetime = _datetime_type + mkbytes = base64.decodebytes + elif use_datetime: + mkdatetime = _datetime_type + mkbytes = _binary + else: + mkdatetime = _datetime + mkbytes = _binary + target = FastUnmarshaller(True, False, mkbytes, mkdatetime, Fault) + parser = FastParser(target) + else: + target = Unmarshaller(use_datetime=use_datetime, use_builtin_types=use_builtin_types) + if FastParser: + parser = FastParser(target) + else: + parser = ExpatParser(target) + return parser, target + +## +# Convert a Python tuple or a Fault instance to an XML-RPC packet. +# +# @def dumps(params, **options) +# @param params A tuple or Fault instance. +# @keyparam methodname If given, create a methodCall request for +# this method name. +# @keyparam methodresponse If given, create a methodResponse packet. +# If used with a tuple, the tuple must be a singleton (that is, +# it must contain exactly one element). +# @keyparam encoding The packet encoding. +# @return A string containing marshalled data. + +def dumps(params, methodname=None, methodresponse=None, encoding=None, + allow_none=False): + """data [,options] -> marshalled data + + Convert an argument tuple or a Fault instance to an XML-RPC + request (or response, if the methodresponse option is used). + + In addition to the data object, the following options can be given + as keyword arguments: + + methodname: the method name for a methodCall packet + + methodresponse: true to create a methodResponse packet. + If this option is used with a tuple, the tuple must be + a singleton (i.e. it can contain only one element). + + encoding: the packet encoding (default is UTF-8) + + All byte strings in the data structure are assumed to use the + packet encoding. Unicode strings are automatically converted, + where necessary. + """ + + assert isinstance(params, (tuple, Fault)), "argument must be tuple or Fault instance" + if isinstance(params, Fault): + methodresponse = 1 + elif methodresponse and isinstance(params, tuple): + assert len(params) == 1, "response tuple must be a singleton" + + if not encoding: + encoding = "utf-8" + + if FastMarshaller: + m = FastMarshaller(encoding) + else: + m = Marshaller(encoding, allow_none) + + data = m.dumps(params) + + if encoding != "utf-8": + xmlheader = "\n" % str(encoding) + else: + xmlheader = "\n" # utf-8 is default + + # standard XML-RPC wrappings + if methodname: + # a method call + data = ( + xmlheader, + "\n" + "", methodname, "\n", + data, + "\n" + ) + elif methodresponse: + # a method response, or a fault structure + data = ( + xmlheader, + "\n", + data, + "\n" + ) + else: + return data # return as is + return "".join(data) + +## +# Convert an XML-RPC packet to a Python object. If the XML-RPC packet +# represents a fault condition, this function raises a Fault exception. +# +# @param data An XML-RPC packet, given as an 8-bit string. +# @return A tuple containing the unpacked data, and the method name +# (None if not present). +# @see Fault + +def loads(data, use_datetime=False, use_builtin_types=False): + """data -> unmarshalled data, method name + + Convert an XML-RPC packet to unmarshalled data plus a method + name (None if not present). + + If the XML-RPC packet represents a fault condition, this function + raises a Fault exception. + """ + p, u = getparser(use_datetime=use_datetime, use_builtin_types=use_builtin_types) + p.feed(data) + p.close() + return u.close(), u.getmethodname() + +## +# Encode a string using the gzip content encoding such as specified by the +# Content-Encoding: gzip +# in the HTTP header, as described in RFC 1952 +# +# @param data the unencoded data +# @return the encoded data + +def gzip_encode(data): + """data -> gzip encoded data + + Encode data using the gzip content encoding as described in RFC 1952 + """ + if not gzip: + raise NotImplementedError + f = BytesIO() + with gzip.GzipFile(mode="wb", fileobj=f, compresslevel=1) as gzf: + gzf.write(data) + return f.getvalue() + +## +# Decode a string using the gzip content encoding such as specified by the +# Content-Encoding: gzip +# in the HTTP header, as described in RFC 1952 +# +# @param data The encoded data +# @keyparam max_decode Maximum bytes to decode (20 MiB default), use negative +# values for unlimited decoding +# @return the unencoded data +# @raises ValueError if data is not correctly coded. +# @raises ValueError if max gzipped payload length exceeded + +def gzip_decode(data, max_decode=20971520): + """gzip encoded data -> unencoded data + + Decode data using the gzip content encoding as described in RFC 1952 + """ + if not gzip: + raise NotImplementedError + with gzip.GzipFile(mode="rb", fileobj=BytesIO(data)) as gzf: + try: + if max_decode < 0: # no limit + decoded = gzf.read() + else: + decoded = gzf.read(max_decode + 1) + except OSError: + raise ValueError("invalid data") + if max_decode >= 0 and len(decoded) > max_decode: + raise ValueError("max gzipped payload length exceeded") + return decoded + +## +# Return a decoded file-like object for the gzip encoding +# as described in RFC 1952. +# +# @param response A stream supporting a read() method +# @return a file-like object that the decoded data can be read() from + +class GzipDecodedResponse(gzip.GzipFile if gzip else object): + """a file-like object to decode a response encoded with the gzip + method, as described in RFC 1952. + """ + def __init__(self, response): + #response doesn't support tell() and read(), required by + #GzipFile + if not gzip: + raise NotImplementedError + self.io = BytesIO(response.read()) + gzip.GzipFile.__init__(self, mode="rb", fileobj=self.io) + + def close(self): + try: + gzip.GzipFile.close(self) + finally: + self.io.close() + + +# -------------------------------------------------------------------- +# request dispatcher + +class _Method: + # some magic to bind an XML-RPC method to an RPC server. + # supports "nested" methods (e.g. examples.getStateName) + def __init__(self, send, name): + self.__send = send + self.__name = name + def __getattr__(self, name): + return _Method(self.__send, "%s.%s" % (self.__name, name)) + def __call__(self, *args): + return self.__send(self.__name, args) + +## +# Standard transport class for XML-RPC over HTTP. +#

+# You can create custom transports by subclassing this method, and +# overriding selected methods. + +class Transport: + """Handles an HTTP transaction to an XML-RPC server.""" + + # client identifier (may be overridden) + user_agent = "Python-xmlrpc/%s" % __version__ + + #if true, we'll request gzip encoding + accept_gzip_encoding = True + + # if positive, encode request using gzip if it exceeds this threshold + # note that many servers will get confused, so only use it if you know + # that they can decode such a request + encode_threshold = None #None = don't encode + + def __init__(self, use_datetime=False, use_builtin_types=False, + *, headers=()): + self._use_datetime = use_datetime + self._use_builtin_types = use_builtin_types + self._connection = (None, None) + self._headers = list(headers) + self._extra_headers = [] + + ## + # Send a complete request, and parse the response. + # Retry request if a cached connection has disconnected. + # + # @param host Target host. + # @param handler Target PRC handler. + # @param request_body XML-RPC request body. + # @param verbose Debugging flag. + # @return Parsed response. + + def request(self, host, handler, request_body, verbose=False): + #retry request once if cached connection has gone cold + for i in (0, 1): + try: + return self.single_request(host, handler, request_body, verbose) + except http.client.RemoteDisconnected: + if i: + raise + except OSError as e: + if i or e.errno not in (errno.ECONNRESET, errno.ECONNABORTED, + errno.EPIPE): + raise + + def single_request(self, host, handler, request_body, verbose=False): + # issue XML-RPC request + try: + http_conn = self.send_request(host, handler, request_body, verbose) + resp = http_conn.getresponse() + if resp.status == 200: + self.verbose = verbose + return self.parse_response(resp) + + except Fault: + raise + except Exception: + #All unexpected errors leave connection in + # a strange state, so we clear it. + self.close() + raise + + #We got an error response. + #Discard any response data and raise exception + if resp.getheader("content-length", ""): + resp.read() + raise ProtocolError( + host + handler, + resp.status, resp.reason, + dict(resp.getheaders()) + ) + + + ## + # Create parser. + # + # @return A 2-tuple containing a parser and an unmarshaller. + + def getparser(self): + # get parser and unmarshaller + return getparser(use_datetime=self._use_datetime, + use_builtin_types=self._use_builtin_types) + + ## + # Get authorization info from host parameter + # Host may be a string, or a (host, x509-dict) tuple; if a string, + # it is checked for a "user:pw@host" format, and a "Basic + # Authentication" header is added if appropriate. + # + # @param host Host descriptor (URL or (URL, x509 info) tuple). + # @return A 3-tuple containing (actual host, extra headers, + # x509 info). The header and x509 fields may be None. + + def get_host_info(self, host): + + x509 = {} + if isinstance(host, tuple): + host, x509 = host + + auth, host = urllib.parse._splituser(host) + + if auth: + auth = urllib.parse.unquote_to_bytes(auth) + auth = base64.encodebytes(auth).decode("utf-8") + auth = "".join(auth.split()) # get rid of whitespace + extra_headers = [ + ("Authorization", "Basic " + auth) + ] + else: + extra_headers = [] + + return host, extra_headers, x509 + + ## + # Connect to server. + # + # @param host Target host. + # @return An HTTPConnection object + + def make_connection(self, host): + #return an existing connection if possible. This allows + #HTTP/1.1 keep-alive. + if self._connection and host == self._connection[0]: + return self._connection[1] + # create a HTTP connection object from a host descriptor + chost, self._extra_headers, x509 = self.get_host_info(host) + self._connection = host, http.client.HTTPConnection(chost) + return self._connection[1] + + ## + # Clear any cached connection object. + # Used in the event of socket errors. + # + def close(self): + host, connection = self._connection + if connection: + self._connection = (None, None) + connection.close() + + ## + # Send HTTP request. + # + # @param host Host descriptor (URL or (URL, x509 info) tuple). + # @param handler Target RPC handler (a path relative to host) + # @param request_body The XML-RPC request body + # @param debug Enable debugging if debug is true. + # @return An HTTPConnection. + + def send_request(self, host, handler, request_body, debug): + connection = self.make_connection(host) + headers = self._headers + self._extra_headers + if debug: + connection.set_debuglevel(1) + if self.accept_gzip_encoding and gzip: + connection.putrequest("POST", handler, skip_accept_encoding=True) + headers.append(("Accept-Encoding", "gzip")) + else: + connection.putrequest("POST", handler) + headers.append(("Content-Type", "text/xml")) + headers.append(("User-Agent", self.user_agent)) + self.send_headers(connection, headers) + self.send_content(connection, request_body) + return connection + + ## + # Send request headers. + # This function provides a useful hook for subclassing + # + # @param connection httpConnection. + # @param headers list of key,value pairs for HTTP headers + + def send_headers(self, connection, headers): + for key, val in headers: + connection.putheader(key, val) + + ## + # Send request body. + # This function provides a useful hook for subclassing + # + # @param connection httpConnection. + # @param request_body XML-RPC request body. + + def send_content(self, connection, request_body): + #optionally encode the request + if (self.encode_threshold is not None and + self.encode_threshold < len(request_body) and + gzip): + connection.putheader("Content-Encoding", "gzip") + request_body = gzip_encode(request_body) + + connection.putheader("Content-Length", str(len(request_body))) + connection.endheaders(request_body) + + ## + # Parse response. + # + # @param file Stream. + # @return Response tuple and target method. + + def parse_response(self, response): + # read response data from httpresponse, and parse it + # Check for new http response object, otherwise it is a file object. + if hasattr(response, 'getheader'): + if response.getheader("Content-Encoding", "") == "gzip": + stream = GzipDecodedResponse(response) + else: + stream = response + else: + stream = response + + p, u = self.getparser() + + while 1: + data = stream.read(1024) + if not data: + break + if self.verbose: + print("body:", repr(data)) + p.feed(data) + + if stream is not response: + stream.close() + p.close() + + return u.close() + +## +# Standard transport class for XML-RPC over HTTPS. + +class SafeTransport(Transport): + """Handles an HTTPS transaction to an XML-RPC server.""" + + def __init__(self, use_datetime=False, use_builtin_types=False, + *, headers=(), context=None): + super().__init__(use_datetime=use_datetime, + use_builtin_types=use_builtin_types, + headers=headers) + self.context = context + + # FIXME: mostly untested + + def make_connection(self, host): + if self._connection and host == self._connection[0]: + return self._connection[1] + + if not hasattr(http.client, "HTTPSConnection"): + raise NotImplementedError( + "your version of http.client doesn't support HTTPS") + # create a HTTPS connection object from a host descriptor + # host may be a string, or a (host, x509-dict) tuple + chost, self._extra_headers, x509 = self.get_host_info(host) + self._connection = host, http.client.HTTPSConnection(chost, + None, context=self.context, **(x509 or {})) + return self._connection[1] + +## +# Standard server proxy. This class establishes a virtual connection +# to an XML-RPC server. +#

+# This class is available as ServerProxy and Server. New code should +# use ServerProxy, to avoid confusion. +# +# @def ServerProxy(uri, **options) +# @param uri The connection point on the server. +# @keyparam transport A transport factory, compatible with the +# standard transport class. +# @keyparam encoding The default encoding used for 8-bit strings +# (default is UTF-8). +# @keyparam verbose Use a true value to enable debugging output. +# (printed to standard output). +# @see Transport + +class ServerProxy: + """uri [,options] -> a logical connection to an XML-RPC server + + uri is the connection point on the server, given as + scheme://host/target. + + The standard implementation always supports the "http" scheme. If + SSL socket support is available (Python 2.0), it also supports + "https". + + If the target part and the slash preceding it are both omitted, + "/RPC2" is assumed. + + The following options can be given as keyword arguments: + + transport: a transport factory + encoding: the request encoding (default is UTF-8) + + All 8-bit strings passed to the server proxy are assumed to use + the given encoding. + """ + + def __init__(self, uri, transport=None, encoding=None, verbose=False, + allow_none=False, use_datetime=False, use_builtin_types=False, + *, headers=(), context=None): + # establish a "logical" server connection + + # get the url + p = urllib.parse.urlsplit(uri) + if p.scheme not in ("http", "https"): + raise OSError("unsupported XML-RPC protocol") + self.__host = p.netloc + self.__handler = urllib.parse.urlunsplit(["", "", *p[2:]]) + if not self.__handler: + self.__handler = "/RPC2" + + if transport is None: + if p.scheme == "https": + handler = SafeTransport + extra_kwargs = {"context": context} + else: + handler = Transport + extra_kwargs = {} + transport = handler(use_datetime=use_datetime, + use_builtin_types=use_builtin_types, + headers=headers, + **extra_kwargs) + self.__transport = transport + + self.__encoding = encoding or 'utf-8' + self.__verbose = verbose + self.__allow_none = allow_none + + def __close(self): + self.__transport.close() + + def __request(self, methodname, params): + # call a method on the remote server + + request = dumps(params, methodname, encoding=self.__encoding, + allow_none=self.__allow_none).encode(self.__encoding, 'xmlcharrefreplace') + + response = self.__transport.request( + self.__host, + self.__handler, + request, + verbose=self.__verbose + ) + + if len(response) == 1: + response = response[0] + + return response + + def __repr__(self): + return ( + "<%s for %s%s>" % + (self.__class__.__name__, self.__host, self.__handler) + ) + + def __getattr__(self, name): + # magic method dispatcher + return _Method(self.__request, name) + + # note: to call a remote object with a non-standard name, use + # result getattr(server, "strange-python-name")(args) + + def __call__(self, attr): + """A workaround to get special attributes on the ServerProxy + without interfering with the magic __getattr__ + """ + if attr == "close": + return self.__close + elif attr == "transport": + return self.__transport + raise AttributeError("Attribute %r not found" % (attr,)) + + def __enter__(self): + return self + + def __exit__(self, *args): + self.__close() + +# compatibility + +Server = ServerProxy + +# -------------------------------------------------------------------- +# test code + +if __name__ == "__main__": + + # simple test program (from the XML-RPC specification) + + # local server, available from Lib/xmlrpc/server.py + server = ServerProxy("http://localhost:8000") + + try: + print(server.currentTime.getCurrentTime()) + except Error as v: + print("ERROR", v) + + multi = MultiCall(server) + multi.getData() + multi.pow(2,9) + multi.add(1,2) + try: + for response in multi(): + print(response) + except Error as v: + print("ERROR", v) diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h new file mode 100644 index 0000000000000000000000000000000000000000..d1c2c829d660c31efacea6ed612a8ad69298726d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/compilation_unit.h @@ -0,0 +1,351 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +struct Def; +struct Property; +struct ClassDef; +struct SugaredValue; +struct Resolver; + +using ResolverPtr = std::shared_ptr; +struct Self { + virtual ~Self() = default; + virtual std::shared_ptr makeSugared(Value* v) const = 0; + virtual ClassTypePtr getClassType() const = 0; +}; + +// A CompilationUnit is a list of named Functions +// with helper methods to iterate the list or invoke the function. +// Classes have a CompilationUnit holding the class methods, +// and Modules have a CompilationUnit holding the Functions that +// are used to implement their Methods + +struct TORCH_API CompilationUnit { + enum class FunctionType { Method, Hook, PreHook }; + // constructor that takes a set of functions to compile using the native + // resolver + explicit CompilationUnit(const std::string& source); + CompilationUnit() = default; + + CompilationUnit& operator=(CompilationUnit&&) = default; + CompilationUnit(CompilationUnit&&) = default; + CompilationUnit& operator=(const CompilationUnit&) = delete; + CompilationUnit(const CompilationUnit&) = delete; + + Function* find_function(const c10::QualifiedName& name) const { + auto it = dict_.find(name); + if (it == dict_.end()) { + return nullptr; + } + return functions_[it->second].get(); + } + + Function& get_function(const c10::QualifiedName& name) const { + if (auto r = find_function(name)) { + return *r; + } + TORCH_CHECK(false, "attempted to get undefined function ", name.name()); + } + + void set_optimized(bool o) { + TORCH_WARN( + "CompilationUnit::set_optimized() is deprecated and has no effect. " + "Please use setGraphExecutorOptimize()"); + } + + bool is_optimized() const { + TORCH_WARN( + "CompilationUnit::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + // for historic reasons, these are defined in ir_emitter.cpp + // Returns the list of Functions just defined. + std::vector define( + const std::optional& prefix, + const std::vector& properties, + const std::vector& propResolvers, + const std::vector& definitions, + const std::vector& + defResolvers, /* determines how we handle free + variables in each definition*/ + // if non-null, the first argument to each def, is bound to this value + const Self* self, + // see [name mangling] + bool shouldMangle = false, + std::optional operator_set_version = std::nullopt); + + void define_hooks( + const std::optional& prefix, + const std::vector& hookDefs, + const std::vector& hookResolvers, + const std::vector& preHookDefs, + const std::vector& preHookResolvers, + const Self* self, + bool shouldMangle = false); + + // same as above but parse the definitions from source + // Returns the list of Functions just defined. + std::vector define( + // prefix namespace to put all the defined functions into + const std::optional& prefix, + const std::string& source, + const ResolverPtr& resolver, + const Self* self); + + void define_interface( + const c10::QualifiedName& qualifiedName, + const ClassDef& classDef, + ResolverPtr rcb, + bool is_module = false); + + Function* create_function( + c10::QualifiedName name, + std::shared_ptr graph, + bool shouldMangle = false) { + if (shouldMangle) { + name = mangle(name); + } + auto fn = std::make_unique( + std::move(name), std::move(graph), nullptr); + auto ret = fn.get(); + register_function(std::move(fn)); + return ret; + } + + std::vector get_functions() const { + return fmap(functions_, [](const std::unique_ptr& fn) { + return fn.get(); + }); + } + + /// Run a method from this compilation. + /// + /// For example: + /// @code + /// IValue output = module->run("relu_script", a, b); + /// @endcode + /// + /// To get a compile a module from a source string, see torch::jit::compile + /// + /// @param method_name The name of the method to run + /// @param args Arguments to be passed to the method + /// @return An IValue containing the return value (or values if it is a tuple) + /// from the method + template + IValue run_method(const c10::QualifiedName& method_name, Types&&... args) { + return get_function(method_name)({IValue(std::forward(args))...}); + } + + void drop_all_functions() { + dict_.clear(); + functions_.clear(); + } + + /** + * Register a class as being owned by this compilation unit. + */ + void register_type(c10::NamedTypePtr namedType) { + // TODO: class types cannot be redefined because we have no way right now + // of invalidating their methods. NamedTuples are fine though, since they + // don't have methods. + TORCH_CHECK( + 0 == classDict_.count(*namedType->name()), + "class '", + namedType->name()->qualifiedName(), + "' already defined."); + classes_.push_back(std::move(namedType)); + classDict_[*classes_.back()->name()] = classes_.size() - 1; + }; + + c10::ClassTypePtr get_class(const c10::QualifiedName& name) const { + auto type = get_type(name); + if (!type) { + return nullptr; + } + return type->cast(); + } + + c10::InterfaceTypePtr get_interface(const c10::QualifiedName& name) const { + auto type = get_type(name); + if (!type) { + return nullptr; + } + return type->cast(); + } + + c10::TupleTypePtr get_named_tuple(const c10::QualifiedName& name) const { + for (const auto& cls : classes_) { + if (cls->name()->qualifiedName() == name.qualifiedName()) { + return cls->expect(); + } + } + return nullptr; + } + + c10::NamedTypePtr get_type(const c10::QualifiedName& name) const { + auto it = classDict_.find(name); + if (it == classDict_.end()) { + return nullptr; + } + return classes_[it->second]; + } + + // For testing: clear all Python-defined classes to ensure that unit tests + // have isolation. + void _clear_python_cu() { + // Delete all the associated class methods + for (const auto& type : classes_) { + if (auto cls = type->cast()) { + for (auto method : cls->methods()) { + // Tombstone the method in the compilation unit. + // Don't erase because the dict_ + auto it = dict_.find(method->qualname()); + if (it != dict_.end()) { + functions_[it->second] = nullptr; + // Erase in our big lookup table + dict_.erase(it); + } + } + // Classes can have multiple pointers to the same hook, + // need to make sure to not delete it twice + std::unordered_set hooks_to_delete; + for (const auto& hook : cls->getForwardHooks()) { + hooks_to_delete.insert(hook); + } + for (const auto& pre_hook : cls->getForwardPreHooks()) { + hooks_to_delete.insert(pre_hook); + } + for (const auto& hook : hooks_to_delete) { + // Tombstone the hook in the compilation unit. + auto it = dict_.find(hook->qualname()); + if (it != dict_.end()) { + functions_[it->second] = nullptr; + // Erase in our big lookup table + dict_.erase(it); + } + } + } + } + classes_.clear(); + classDict_.clear(); + } + + // [Internal Only] Remove method. + // Note Used for freezing. + void unsafeRemoveMethod(const c10::QualifiedName& method_name) { + auto it = dict_.find(method_name); + TORCH_CHECK( + it != dict_.end(), + "method '", + method_name.qualifiedName(), + "' does not exist."); + functions_[it->second] = nullptr; + dict_.erase(it); + } + + // [name mangling] All code objects must have a unique qualified name in a + // CompilationUnit. In Python, sometimes functions won't have unique qualified + // name (for example, nested functions). So we mangle Python functions to + // ensure that they are uniquely named. + // + // We also use mangling to distinguish different Module instances. Since each + // Module is a singleton class instance, different instances of the same + // Python Module will have different types but the same qualified name. + c10::QualifiedName mangle(const c10::QualifiedName& name) const { + auto mangled = name; + while (get_type(mangled) || find_function(mangled)) { + mangled = mangler_.mangle(mangled); + } + return mangled; + } + + private: + std::unique_ptr define( + const std::optional& prefix, + const Def& def, + const ResolverPtr& resolver, + const Self* self, + const std::unordered_map& function_table, + bool shouldMangle = false, + FunctionType type = FunctionType::Method, + std::optional version = std::nullopt) const; + + // Define a property on \p self. + struct PropertyPair; + PropertyPair define_property( + const std::optional& prefix, + const Property& prop, + const ResolverPtr& resolver, + const Self* self, + const std::unordered_map& function_table, + bool shouldMangle = false) const; + + Function& register_function(std::unique_ptr fn) { + TORCH_CHECK( + 0 == dict_.count(fn->qualname().qualifiedName()), + "method '", + fn->qualname().qualifiedName(), + "' already defined."); + functions_.emplace_back(std::move(fn)); + dict_[functions_.back()->qualname()] = functions_.size() - 1; + return *functions_.back(); + } + std::vector> functions_; + // for fast lookup + std::unordered_map dict_; + std::unordered_map classDict_; + + // [class ownership] Right now there are two relationships between classes + // and compilation units: + // 1. Classes have compilation units internally that hold their methods. + // 2. On load, the TypePtrs of any imported classes are owned by the main + // module's compilation unit. + std::vector classes_; + + mutable NameMangler mangler_; +}; + +// An owning pointer to a Function. Just a pair of a raw Function ptr and it's +// owning CU. We need this because pybind requires a ref-counted way to refer to +// Functions. +struct StrongFunctionPtr { + StrongFunctionPtr(std::shared_ptr cu, Function* function) + : cu_(std::move(cu)), function_(function) { + TORCH_INTERNAL_ASSERT(cu_); + TORCH_INTERNAL_ASSERT(function_); + } + std::shared_ptr cu_; + Function* function_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using CompilationUnit = ::torch::jit::CompilationUnit; +} // namespace script +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..b5d336db2b6ec2495779c0063340b7348a82321f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/function_impl.h @@ -0,0 +1,180 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +struct TORCH_API GraphFunction : public Function { + GraphFunction( + c10::QualifiedName name, + std::shared_ptr graph, + std::function function_creator, + std::optional executor_execution_mode = + std::nullopt) + : name_(std::move(name)), + graph_(std::move(graph)), + executor_execution_mode_(executor_execution_mode), + function_creator_(std::move(function_creator)) {} + + bool isGraphFunction() const override { + return true; + } + + void run(Stack& stack) override; + + std::function function_creator() const { + return function_creator_; + } + + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher taskLauncher = at::launch) override; + + std::shared_ptr graph() const { + return graph_; + } + + std::shared_ptr optimized_graph() const; + + const c10::QualifiedName& qualname() const override { + return name_; + } + + // private/unstable api. sets the initial execution mode + // will not affect executor if there is an existing executor + // created for this function + void _set_initial_executor_execution_mode(ExecutorExecutionMode mode) { + executor_execution_mode_ = mode; + } + // private/unstable api. sets flag of whether or not to ignore amp. + // will not affect executor if there is an existing executor + // created for this function + void _set_ignore_amp(bool ignore_amp) { + force_no_amp_ = ignore_amp; + } + + // if this isn't yet defined, run its method_creator function + void ensure_defined() override; + + size_t num_inputs() const override { + return graph()->inputs().size(); + } + + Function& setSchema(FunctionSchema schema) override { + schema_ = std::make_unique(std::move(schema)); + return *this; + } + + const FunctionSchema& getSchema() const override; + + GraphExecutorState getDebugState() { + return get_executor().getDebugState(); + } + + bool is_optimized() const { + TORCH_WARN( + "GraphFunction::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + void check_single_output() { + TORCH_CHECK( + graph()->outputs().size() == 1, + "Method (but not graphs in general) require a single output. Use None/Tuple for 0 or 2+ outputs"); + } + + GraphExecutor& get_executor() { + ensure_defined(); + std::lock_guard lock(compile_mutex); + auto& executor = executors_[currentSpecialization()]; + if (executor) { + return *executor; + } + check_single_output(); + const std::string& name = name_.name(); + std::shared_ptr opt_graph = optimized_graph(); + if (!executor_execution_mode_) { + executor = GraphExecutor(opt_graph, name); + } else { + executor = GraphExecutor(opt_graph, name, *executor_execution_mode_); + } + return *executor; + } + + using Function::call; + bool call( + Stack& stack, + std::optional bailOut, + c10::function_ref f) override { + f(get_executor().getPlanFor(stack, bailOut).code); + return true; + } + + void clear_optimized_graphs() { + optimized_graphs_.fill(nullptr); + } + + private: + enum SpecializationKey { + AutocastOff, + CpuAutocastOn, + GpuAutocastOn, + CpuGpuAutocastOn, + + // This provides the number of specializations + // (Must be last entry) + TotalCount + }; + + SpecializationKey currentSpecialization() const; + + private: + c10::QualifiedName name_; + // The original, non-optimized graph + std::shared_ptr graph_; // for debugging and for inlining + + // allows users to specify Simple/Profiling Executor for function + // TODO: add more executors + mutable std::optional executor_execution_mode_; + + // if invoked on a graph that has already traced through amp + // don't invoke amp pass + mutable bool force_no_amp_ = false; + // Optimized graph, computed lazily. Used for inlining. + mutable std::array, SpecializationKey::TotalCount> + optimized_graphs_; + + // GraphFunctions are invokable from multiple threads, so this lock needs to + // be held when we're initializing graph executor for the first time or + // computing the optimized graph. We're using reentrant mutex so that we don't + // need to worry about causing a deadlock by calling one method from another + // (e.g. optimized_graph() from get_executor()). + mutable std::recursive_mutex compile_mutex; + + // executor_[0] - autocast off + // executor_[1] - autocast cpu on + // executor_[2] - autocast gpu on + // executor_[3] - autocast cpu & gpu on + std::array, SpecializationKey::TotalCount> + executors_; + + // an optional function that actually creates the method when + // ensure_defined() is called. This is used by the compiler so + // that it can construct methods out of order + std::function function_creator_; + + // if absent, then we generate a default schema based on the graph + // mutable because getSchema caches the default schema if one is requested + // before a call to setSchema + mutable std::unique_ptr schema_; +}; + +// Short hands for dynamic_cast. +TORCH_API GraphFunction* tryToGraphFunction(Function&) noexcept; +TORCH_API GraphFunction& toGraphFunction(Function&); +TORCH_API const GraphFunction& toGraphFunction(const Function&); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h new file mode 100644 index 0000000000000000000000000000000000000000..28675e5bd059f5e876e1b55c94b2c0a705aca28c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/method.h @@ -0,0 +1,84 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace torch::jit { + +using ObjectPtr = c10::intrusive_ptr; + +// A method in a module, e.g. f in: +// +// class M(ScriptModule): +// @script_method +// def f(self, x): +// ... +// Note: because Method/Module are exposed to python these +// classes use python method naming conventions +struct TORCH_API Method : public torch::IMethod { + Method(ObjectPtr owner, Function* function); + + // the module that contains this method. + Module owner() const; + // the raw objectptr that owns this method, for when the method is owned by a + // torchbind object. + ObjectPtr raw_owner() const; + void run(Stack& stack); + void run(Stack&& stack) { + run(stack); + } + + c10::IValue operator()( + std::vector stack, + const Kwargs& kwargs = Kwargs()) const override; + + // Run method async. Invocation on this function would invokes a JIT + // interpreter that executes ops inline, one by one, on caller's thread. A + // model can utilize async op, i.e. `fork`, to launch an asynchronous task + // which will be launched on provided `taskLauncher`. + c10::intrusive_ptr run_async( + std::vector stack, + const Kwargs& kwargs = Kwargs(), + TaskLauncher taskLauncher = at::launch); + + std::shared_ptr graph() const { + return toGraphFunction(*function_).graph(); + } + + const std::string& name() const override { + return function_->name(); + } + + size_t num_inputs() const { + return function_->num_inputs(); + } + + GraphExecutor& get_executor() { + return toGraphFunction(*function_).get_executor(); + } + + Function& function() const { + return *function_; + } + + private: + void setArgumentNames(std::vector&) const override; + + // Methods are uniqued onwed by a single module. This raw pointer allows + // looking up the module. + ObjectPtr owner_; + + // Underlying unbound function + Function* function_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Method = ::torch::jit::Method; +} // namespace script + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h new file mode 100644 index 0000000000000000000000000000000000000000..558dcdee57af281e5c1eb865d889bd1cc3ac6289 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/module.h @@ -0,0 +1,685 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// This file contains classes which assist in desugaring Python style +// modules and their methods into flattened graphs which don't have any +// function calls. + +namespace torch::jit { + +using ::c10::Argument; +using ::c10::FunctionSchema; +using ::c10::QualifiedName; +// Map which stores filename to content. +using ExtraFilesMap = std::unordered_map; + +using ModulePtr = c10::intrusive_ptr; + +struct Module; + +template +struct slot_list_impl; + +template +struct Named { + std::string name; + T value; +}; + +using NameModule = Named; +using NameValue = Named; +using NameTensor = Named; + +namespace detail { +struct TORCH_API ModulePolicy; +struct TORCH_API ParameterPolicy; +struct TORCH_API AttributePolicy; +struct TORCH_API BufferPolicy; +template +struct NamedPolicy; +} // namespace detail + +using module_list = slot_list_impl; +using named_module_list = + slot_list_impl>; + +using parameter_list = slot_list_impl; +using named_parameter_list = + slot_list_impl>; + +using attribute_list = slot_list_impl; +using named_attribute_list = + slot_list_impl>; + +using buffer_list = slot_list_impl; +using named_buffer_list = + slot_list_impl>; + +using ModuleLookup = std::function&)>; + +struct TORCH_API Module : public Object { + explicit Module(c10::QualifiedName class_name); + Module(std::shared_ptr cu, const c10::ClassTypePtr& type); + Module() = default; + Module(const Module&) = default; + Module& operator=(const Module&) = default; + Module(Module&&) noexcept = default; + Module& operator=(Module&&) noexcept = default; + Module( + c10::QualifiedName, + std::shared_ptr cu, + bool shouldMangle = false); + Module(ModulePtr module_value) : Object(std::move(module_value)) {} + ~Module() = default; + + void set_optimized(bool o) { + TORCH_WARN( + "Module::set_optimized() is deprecated and has no effect. " + "Please use setGraphExecutorOptimize()"); + } + + bool is_optimized() const { + TORCH_WARN( + "Module::is_optimized() is deprecated and always returns true. " + "Please use getGraphExecutorOptimize()"); + return true; + } + + IValue forward(std::vector inputs, const Kwargs& kwargs = Kwargs()) { + return get_method("forward")(std::move(inputs), kwargs); + } + + // In script modules, buffers are Tensors attribute that are _not_ registered + // as parameters. This is different than in nn.Module where there is a special + // register_buffer method. With this simplification, we only need to track + // whether a slot is a parameter to be able to classify it. + void register_buffer(const std::string& name, at::Tensor v) { + bool is_param = false; + bool is_buffer = true; + std::lock_guard lock(*register_mutex_); + type()->addOrCheckAttribute(name, TensorType::get(), is_param, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_parameter( + const std::string& name, + at::Tensor v, + bool is_buffer) { + std::lock_guard lock(*register_mutex_); + type()->addOrCheckAttribute(name, TensorType::get(), !is_buffer, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_attribute( + const std::string& name, + const TypePtr& t, + IValue v, + bool is_param = false, + bool is_buffer = false) { + type()->addOrCheckAttribute(name, t, is_param, is_buffer); + _ivalue()->setAttr(name, std::move(v)); + } + + void register_module(const std::string& name, const Module& module) { + type()->addOrCheckAttribute(name, module.type()); + _ivalue()->setAttr(name, module._ivalue()); + } + + void apply(const std::function& fn); + + buffer_list buffers(bool recurse = true) const; + named_buffer_list named_buffers(bool recurse = true) const; + + module_list children() const; // direct modules + named_module_list named_children() const; + module_list modules() const; // all modules, including this one, recursively + named_module_list named_modules() const; + + // all tensors involved in gradient optimization + parameter_list parameters(bool recurse = true) const; + named_parameter_list named_parameters(bool recurse = true) const; + + // all members of the object, similar to iterating over dir(obj) in python + attribute_list attributes(bool recurse = true) const; + named_attribute_list named_attributes(bool recurse = true) const; + + void dump( + bool print_method_bodies, + bool print_attr_values, + bool print_param_values) const; + + std::string dump_to_str( + bool print_method_bodies, + bool print_attr_values, + bool print_param_values) const; + + /// Enables "training" mode. + void train(bool on = true); + /// Calls train(false) to enable "eval" mode. + /// Do not override this method, override `train()` instead. + void eval() { + train(/*on=*/false); + } + /// True if the module is in training mode. + bool is_training() const { + return attr("training", true).toBool(); + } + + /// Recursively casts all parameters to the given `dtype` and `device`. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::Device device, at::ScalarType dtype, bool non_blocking = false); + + /// Recursively casts all parameters to the given dtype. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::ScalarType dtype, bool non_blocking = false); + + /// Recursively moves all parameters to the given device. + /// + /// If `non_blocking` is true and the source is in pinned memory and + /// destination is on the GPU or vice versa, the copy is performed + /// asynchronously with respect to the host. Otherwise, the argument has no + /// effect. + void to(at::Device device, bool non_blocking = false); + + void save( + std::ostream& out, + const ExtraFilesMap& extra_files = ExtraFilesMap()) const; + + void save( + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap()) const; + + void _save_for_mobile( + std::ostream& out, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + bool save_mobile_debug_info = false, + bool use_flatbuffer = false) const; + + void _save_for_mobile( + const std::string& filename, + const ExtraFilesMap& extra_files = ExtraFilesMap(), + bool save_mobile_debug_info = false, + bool use_flatbuffer = false) const; + + Module copy() const; + + Module deepcopy(std::optional device = std::nullopt) const; + + // Clones both the underlying `ClassType` and the module instance(data), this + // function creates a new `ClassType` and returns a new instance that has the + // same data as the current instance but with the new type, shared ClassType + // will be preserved as well + Module clone(bool inplace = false) const; + + // Clones both the underlying `ClassType` and the module instance(data), this + // function creates a new `ClassType` and returns a new instance that has the + // same data as the current instance but with the new type, shared ClassType + // will be preserved as well. Also allows the caller to specify a set of + // method and attribute names to not clone. + Module clone( + bool inplace, + const std::unordered_set& ignored_method, + const std::unordered_set& ignored_attributes) const; + + void clone_method(const Module& orig, const std::string& name); + + IValue operator()(std::vector inputs); + + template + IValue create_class(const c10::QualifiedName& name, Types&&... args) const { + return create_class(name, {IValue(std::forward(args))...}); + } + + IValue create_class(const c10::QualifiedName& name, Stack stack) const; + + inline bool operator==(const Module& y) const noexcept { + return _ivalue() == y._ivalue(); + } + + void set_delete_memory(std::shared_ptr delete_mem) { + mem_to_delete_ = std::move(delete_mem); + } + + // A set of functions to maintain input shapes through torch.jit.save and + // torch.jit.load. It only works on tensors and lists/dicts of tensors + // because tracing is only supported by these types. + void store_traced_inputs( + const std::string& func_name, + std::vector inputs) { + if (inputs.empty()) { + return; + } + auto c10_inputs = c10::impl::GenericList(AnyType::get()); + for (IValue& value : inputs) { + // Not checking whether this is traceable type as that is already checked + // higher up in the stack and changing that would require a larger + // restructuring. + c10_inputs.emplace_back(std::move(value)); + } + traced_inputs_.insert_or_assign(func_name, c10_inputs); + } + + c10::Dict retrieve_traced_inputs() + const { + return traced_inputs_; + } + + private: + Module clone_impl( + std::unordered_map& type_remap, + bool inplace, + IValue::HashIdentityIValueMap memo, + const std::unordered_set& ignored_methods, + const std::unordered_set& ignored_attributes) const; + + void clone_method( + const Module& orig, + const Function& method, + const std::unordered_map& type_remap); + + c10::QualifiedName getNameForMethod(std::string basename) const { + return QualifiedName(*type()->name(), std::move(basename)); + } + + void to_impl( + const std::optional& device, + const std::optional& dtype, + bool non_blocking); + + // Extra handle for the module to delete when itself is deleted + std::shared_ptr mem_to_delete_; + + // Map of function names to the traced inputs that they have been traced with + c10::Dict traced_inputs_; + + // Mutex to keep registring buffer or parameter thread safe. + std::shared_ptr register_mutex_ = std::make_shared(); +}; + +// C++ equivalent api of `torch.jit.freeze`. See documentation there for +// details. +TORCH_API Module freeze( + const Module& module, + const std::optional>& preserved_attrs = + std::nullopt, + bool optimize_numerics = true); + +// C++ equivalent api of `torch.jit.optimize_for_inference`. See documentation +// there for details. +TORCH_API Module optimize_for_inference( + Module& module, + const std::vector& other_methods = {}); + +enum class FusionBehavior { STATIC, DYNAMIC }; + +using FusionStrategy = std::vector>; +// clang-format off +/* +Sets the type and number of specializations that can occur during fusion. + +Usage: provide a list of pairs (type, depth) where type is one of STATIC or DYNAMIC +and depth is an integer. + +Behavior - static vs dynamic: + In STATIC fusion, fused ops are compiled to have fixed input shapes. The shape is determined + based on some initial profiling runs. + In DYNAMIC fusion, fused ops are compiled to have variable input shapes, so that multiple + shapes are possible. + +In both cases, we also recompile on new striding behavior, device, or dtype. + +Behavior - fallback functions & depth: + When an input doesn't match the format required by the specialized compiled op, it will run + a fallback function. Fallback functions are recursively be compiled and specialized based + on the observed tensor shapes. Since compilation can be slow, the "depth" parameter is provided to + limit the number of specializations that can be compiled, before giving up on recompiling and + falling back to a completely un-fused, un-specialized implementation. + +The list of (type, depth) pairs controls the type of specializations and the number of +specializations. For example: [(STATIC, 2), (DYNAMIC, 2)] indicates that the first +two specializations will use static fusions, the following two specializations will use +dynamic fusion, and any inputs that satisfy none of the 4 options will run an +unfused implementation. + +NB: in the future, if more as more fusion backends are added there may be more granular +apis for specific fusers. +*/ +// clang-format on +TORCH_API FusionStrategy getFusionStrategy(); +// returns previous strategy +TORCH_API FusionStrategy setFusionStrategy(FusionStrategy& fusion_strategy); + +namespace detail { + +struct TORCH_API SlotCursor { + Module module_; + int64_t i_; // slot offset, -1 indicates the module itself +}; + +} // namespace detail + +// This iterator allows the (optionally recursive) enumeration of +// the members of a Module. It performs a depth-first pre-order +// traversal of the module. The Policy template parameter determines +// which slots of the object should be included. For instance, +// when iterating parameters, we return the parameter tensors, +// but skip modules, buffers, and other attributes. +// See ModulePolicy for comments about Policy object's API. +template +struct slot_iterator_impl { + using SlotCursor = detail::SlotCursor; + using value_type = typename Policy::value_type; + slot_iterator_impl( + Module root, + bool recurse, // if true, do a depth-first search, otherwise, just look at + // slots of root + bool return_module) // if true include root itself as the first thing + // visited (used in modules()) + : cursors_({SlotCursor{std::move(root), return_module ? -1 : 0}}), + recurse_(recurse) { + // advance iterator to first valid element (or the end, if empty) + while_not_valid_next(); + } + // empty cursors_, represents end of iteration + slot_iterator_impl() : recurse_(false) {} + value_type operator*() const { + return Policy::create(cursors_, cur()); + } + value_type operator->() const { + return **this; + } + slot_iterator_impl& operator++() { + next_valid(); + return *this; + } + slot_iterator_impl operator++(int) { + // this is really expensive, should we delete it so people don't use it + // instead of prefix? + slot_iterator_impl old = *this; + ++(*this); + return old; + } + + private: + // return_module() is a corner case where instead of returning a submodule + // of root, we are returning root itself, because we are iterating modules(), + // which contains the root module itself. + // It is represented with a single SlotCursor whose index is -1. + bool return_module() const { + return top().i_ == -1; + } + const SlotCursor& top() const { + return cursors_.back(); + } + SlotCursor& top() { + return cursors_.back(); + } + IValue cur() const { + return return_module() ? top().module_._ivalue() + : top().module_._ivalue()->getSlot(top().i_); + } + + // advance to the next slot in a depth first pre-order traversal of the + // modules slots. This function does not guarantee the next slot is a + // valid element of the iteration. That is done by valid(). + // invariant: !cursors_.empty() + void next() { + // we just returned the module itself, advance i_ to 0 so we are now + // at the first slot of the module. + if (return_module()) { + ++top().i_; + return; + } + // the last traversal action advanced beyond the number of slots in the + // module so continue the iteration in the parent. + if (top().i_ >= int64_t(top().module_._ivalue()->type()->numAttributes())) { + cursors_.pop_back(); + if (!cursors_.empty()) { + ++top().i_; + } + return; + } + // if the current thing is a module, we have to scan it for recursive + // traversals. We do this by adding a new SlotCursor to track the traversal. + if (recurse_ && + top().module_._ivalue()->type()->getAttribute(top().i_)->is_module()) { + cursors_.emplace_back(SlotCursor{cur().toModule(), 0}); + return; + } + // common case: advance to the next slot. + ++top().i_; + } + // is the current position of the iterator a valid one? + // otherwise, we have to continue advancing. + bool valid() const { + return top().i_ < + int64_t(top().module_._ivalue()->type()->numAttributes()) && + Policy::valid( + top().module_._ivalue()->type(), + top().i_, + top().module_._ivalue()->getSlot(top().i_)); + } + void while_not_valid_next() { + // advance iteration until we are either at the end (cursors_.empty()) + // or in a valid state. return_module() is a special case, + // and is always considered valid, regardless of Policy, because it is + // it is only true when we are iterating modules. + while (!cursors_.empty() && !return_module() && !valid()) { + next(); + } + } + void next_valid() { + // avoid crashing if this is empty + if (cursors_.empty()) { + return; + } + // advance to next element, which is maybe not valid + next(); + while_not_valid_next(); + } + + std::vector cursors_; + bool recurse_; + + friend inline bool operator!=( + const slot_iterator_impl& a, + const slot_iterator_impl& b) { + // we are finished iteration when we have no more iteration SlotCursors. + // end is always an empty iterator with no cursors. + return (a.cursors_.empty() != b.cursors_.empty()); + } +}; + +// This type represents lists of parameters, attributes, and +// submodules contained in the module. It is abstract because +// they are not stored directly in std::vectors but inside the +// module's IValue object itself. +template +struct slot_list_impl { + using iterator = slot_iterator_impl; + using const_iterator = slot_iterator_impl; + using value_type = typename iterator::value_type; + slot_iterator_impl begin() const { + return slot_iterator_impl(module_, recurse_, return_module_); + } + slot_iterator_impl end() const { + return slot_iterator_impl(); + } + size_t size() const { + if (!size_) { + size_ = size_t(0); + for ([[maybe_unused]] const value_type& _ : *(this)) { + ++*size_; + } + } + return *size_; + } + + slot_list_impl(Module module, bool recurse, bool return_module) + : module_(std::move(module)), + recurse_(recurse), + return_module_(return_module), + size_(std::nullopt) { + if (!recurse && !return_module && Policy::all_slots) { + size_ = module_.num_slots(); + } + } + + private: + Module module_; + bool recurse_; + bool return_module_; + // size of this list, cached on first request + // when we need to filter the slot list + mutable std::optional size_; + friend struct Module; +}; + +namespace detail { + +// slot_iterator_impl always iterate over all the slots in a module, +// the Policy template argument determines slots should be returned and their +// types +struct TORCH_API ModulePolicy { + // the type of the value being returned + using value_type = Module; + + // the logic for creating the type being returned, given the raw IValue + // of that object. + static value_type create( + const std::vector& cursors, + IValue v) { + return Module(std::move(v).toObject()); + } + // is slot i in typ something that this iterator should return, otherwise, + // we skip it. + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->getAttribute(i)->is_module(); + } + // are we going to return everything? If so, we can optimize the calculate + // of the size of the list. + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API ParameterPolicy { + using value_type = at::Tensor; + static value_type create( + const std::vector& cursors, + IValue v) { + return std::move(v).toTensor(); + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->is_parameter(i) && v.isTensor(); + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API BufferPolicy { + using value_type = at::Tensor; + static value_type create( + const std::vector& cursors, + IValue v) { + return std::move(v).toTensor(); + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return typ->getAttribute(i)->isSubtypeOf(*TensorType::get()) && + typ->is_buffer(i); + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = false; +}; + +struct TORCH_API AttributePolicy { + using value_type = IValue; + static value_type create( + const std::vector& cursors, + IValue v) { + return v; + } + static bool valid(const ClassTypePtr& typ, size_t i, const IValue& v) { + return true; + } + static CONSTEXPR_EXCEPT_WIN_CUDA bool all_slots = true; +}; + +// take a Policy object, and make a version of it that returns the slot. +// along with the fully qualified name of that slot. This is used for the named_ +// variants like named_parameters(). +template +struct NamedPolicy { + using value_type = Named; + static value_type create( + const std::vector& cursors, + IValue v) { + std::string name; + if (cursors.size() == 1) { + name = (cursors.back().i_ == -1) ? "" : nameFragment(cursors.back()); + } else { + std::ostringstream ss; + for (const auto i : c10::irange(cursors.size())) { + if (i > 0) { + ss << "."; + } + ss << nameFragment(cursors[i]); + } + name = ss.str(); + } + return value_type{std::move(name), Policy::create(cursors, std::move(v))}; + } + static bool valid(const ClassTypePtr& t, size_t i, const IValue& v) { + return Policy::valid(t, i, v); + } + static constexpr bool all_slots = Policy::all_slots; + + private: + static std::string nameFragment(const detail::SlotCursor& f) { + return f.module_.type()->getAttributeName(f.i_); + } +}; + +} // namespace detail + +TORCH_API bool& getInlineEverythingMode(); + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Module = ::torch::jit::Module; +using ExtraFilesMap = ::torch::jit::ExtraFilesMap; +} // namespace script + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h new file mode 100644 index 0000000000000000000000000000000000000000..2c0f7e3b164f054b23c2ac5d58af28a7fa4fbe3b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/api/object.h @@ -0,0 +1,200 @@ +#pragma once + +#include +#include +#include +#include + +#include + +namespace torch::jit { + +struct Resolver; +using ResolverPtr = std::shared_ptr; + +using ObjectPtr = c10::intrusive_ptr; + +// Throw this in C++ land if `attr` fails. This will be converted to a Python +// AttributeError by the Python binding code +class ObjectAttributeError : public std::runtime_error { + public: + ObjectAttributeError(const std::string& what) : std::runtime_error(what) {} +}; + +struct TORCH_API Object { + Object() = default; + Object(const Object&) = default; + Object& operator=(const Object&) = default; + Object(Object&&) noexcept = default; + Object& operator=(Object&&) noexcept = default; + Object(ObjectPtr _ivalue) : _ivalue_(std::move(_ivalue)) {} + Object(std::shared_ptr cu, const c10::ClassTypePtr& type); + Object( + c10::QualifiedName, + std::shared_ptr cu, + bool shouldMangle = false); + + ObjectPtr _ivalue() const { + TORCH_INTERNAL_ASSERT(_ivalue_); + return _ivalue_; + } + + c10::ClassTypePtr type() const { + return _ivalue()->type(); + } + + struct Property { + std::string name; + Method getter_func; + std::optional setter_func; + }; + + void setattr(const std::string& name, c10::IValue v) { + if (_ivalue()->type()->hasConstant(name)) { + TORCH_CHECK( + false, + "Can't set constant '", + name, + "' which has value:", + _ivalue()->type()->getConstant(name)); + } else if (auto slot = _ivalue()->type()->findAttributeSlot(name)) { + const c10::TypePtr& expected = _ivalue()->type()->getAttribute(*slot); + TORCH_CHECK( + v.type()->isSubtypeOf(*expected), + "Expected a value of type '", + expected->repr_str(), + "' for field '", + name, + "', but found '", + v.type()->repr_str(), + "'"); + _ivalue()->setSlot(*slot, std::move(v)); + } else { + TORCH_CHECK(false, "Module has no attribute '", name, "'"); + } + } + + c10::IValue attr(const std::string& name) const { + if (auto r = _ivalue()->type()->findAttributeSlot(name)) { + return _ivalue()->getSlot(*r); + } + if (auto r = _ivalue()->type()->findConstantSlot(name)) { + return _ivalue()->type()->getConstant(*r); + } + std::stringstream err; + err << _ivalue()->type()->repr_str() << " does not have a field with name '" + << name.c_str() << "'"; + throw ObjectAttributeError(err.str()); + } + + c10::IValue attr(const std::string& name, c10::IValue or_else) const { + if (auto r = _ivalue()->type()->findAttributeSlot(name)) { + return _ivalue()->getSlot(*r); + } + if (auto r = _ivalue()->type()->findConstantSlot(name)) { + return _ivalue()->type()->getConstant(*r); + } + return or_else; + } + + bool hasattr(const std::string& name) const { + return _ivalue()->type()->hasAttribute(name) || + _ivalue()->type()->hasConstant(name); + } + + // each object owns its methods. The reference returned here + // is guaranteed to stay valid until this module has been destroyed + Method get_method(const std::string& name) const { + if (auto method = find_method(name)) { + return *method; + } + AT_ERROR("Method '", name, "' is not defined."); + } + + const std::vector get_methods() const { + return c10::fmap(type()->methods(), [&](Function* func) { + return Method(_ivalue(), func); + }); + } + + bool has_property(const std::string& name) const { + for (const auto& prop : type()->properties()) { + if (prop.name == name) { + return true; + } + } + return false; + } + + const Property get_property(const std::string& name) const { + for (const auto& prop : type()->properties()) { + if (prop.name == name) { + std::optional setter = std::nullopt; + if (prop.setter) { + setter = Method(_ivalue(), prop.setter); + } + return Property{ + prop.name, Method(_ivalue(), prop.getter), std::move(setter)}; + } + } + AT_ERROR("Property '", name, "' is not defined."); + } + + const std::vector get_properties() const { + return c10::fmap(type()->properties(), [&](ClassType::Property prop) { + std::optional setter = std::nullopt; + if (prop.setter) { + setter = Method(_ivalue(), prop.setter); + } + return Property{ + std::move(prop.name), + Method(_ivalue(), prop.getter), + std::move(setter)}; + }); + } + + std::optional find_method(const std::string& basename) const; + + /// Run a method from this module. + /// + /// For example: + /// @code + /// IValue output = module->run("relu_script", a, b); + /// @endcode + /// + /// To get a compile a module from a source string, see torch::jit::compile + /// + /// @param method_name The name of the method to run + /// @param args Arguments to be passed to the method + /// @return An IValue containing the return value (or values if it is a tuple) + /// from the method + template + IValue run_method(const std::string& method_name, Types&&... args) { + return get_method(method_name)({IValue(std::forward(args))...}); + } + + // so that C++ users can easily add methods + void define(const std::string& src, const ResolverPtr& resolver = nullptr); + + size_t num_slots() const { + return _ivalue()->slots().size(); + } + + // shallow copy the object + Object copy() const; + + // Copies all the attributes of the object recursively without creating new + // `ClassType`, including deepcopy of Tensors + Object deepcopy() const; + + private: + // mutable be we lazily initialize in module_object. + mutable ObjectPtr _ivalue_; +}; + +namespace script { +// We once had a `script::` namespace that was deleted. This is for backcompat +// of the public API; new code should not use this type alias. +using Object = ::torch::jit::Object; +} // namespace script +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..27e190a78a5a8eb6c82f9a9807b203e9cdf33a60 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/builtin_functions.h @@ -0,0 +1,9 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +TORCH_API const std::vector& getAllBuiltinFunctionsFor(Symbol name); +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h new file mode 100644 index 0000000000000000000000000000000000000000..9ea8bc8cb3819fba75941d4b7f4224436b55aeaf --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/convert_to_ssa.h @@ -0,0 +1,14 @@ +#pragma once +#include +#include +#include + +#include +#include + +namespace torch::jit { + +// Convert a graph with Loads & Stores into SSA form +TORCH_API void ConvertToSSA(std::shared_ptr& graph); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h new file mode 100644 index 0000000000000000000000000000000000000000..a4aee2b7e281ef94c3383b783db370d19ecd0984 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/ir_emitter.h @@ -0,0 +1,19 @@ +#pragma once +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +TORCH_API void runCleanupPasses(std::shared_ptr& to_clean); + +TORCH_API bool meaningfulName(const std::string& name); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h new file mode 100644 index 0000000000000000000000000000000000000000..447bf66a0572e01e758f61aee0a2d16583e05e82 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/lexer.h @@ -0,0 +1,562 @@ +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +// single character tokens are just the character itself '+' +// multi-character tokens need an entry here +// if the third entry is not the empty string, it is used +// in the lexer to match this token. + +// These kinds are also used in Tree.h as the kind of the AST node. +// Some kinds TK_APPLY, TK_LIST are only used in the AST and are not seen in the +// lexer. + +#define TC_FORALL_TOKEN_KINDS(_) \ + _(TK_EOF, "eof", "") \ + _(TK_WHITESPACE, "whitespace", "") \ + _(TK_WHITESPACE_EOF, "whitespace_eof", "") \ + _(TK_NUMBER, "number", "") \ + _(TK_NEWLINE, "newline", "") \ + _(TK_INDENT, "indent", "") \ + _(TK_DEDENT, "dedent", "") \ + _(TK_DEF, "def", "def") \ + _(TK_EQUIVALENT, "equivalent", "<=>") \ + _(TK_IDENT, "ident", "") \ + _(TK_STRING, "string", "") \ + _(TK_STRINGLITERAL, "string_literal", "") \ + _(TK_CONST, "const", "") \ + _(TK_LIST, "list", "") \ + _(TK_DICT, "dict", "") \ + _(TK_OPTION, "option", "") \ + _(TK_APPLY, "apply", "") \ + _(TK_COMPREHENSION, "comprehension", "") \ + _(TK_RANGE_CONSTRAINT, "range_constraint", "") \ + _(TK_PARAM, "param", "") \ + _(TK_INFERRED, "inferred", "") \ + _(TK_ACCESS, "access", "") \ + _(TK_ASSIGN, "assign", "") \ + _(TK_AUG_ASSIGN, "aug_assign", "") \ + _(TK_ATTRIBUTE, "attribute", "") \ + _(TK_IF, "if", "if") \ + _(TK_ELSE, "else", "else") \ + _(TK_ELIF, "elif", "elif") \ + _(TK_WHILE, "while", "while") \ + _(TK_EXPR_STMT, "expression statement", "") \ + _(TK_RETURN, "return", "return") \ + _(TK_IS, "is", "is") \ + _(TK_ISNOT, "is not", "is not") \ + _(TK_NE, "ne", "!=") \ + _(TK_EQ, "eq", "==") \ + _(TK_LE, "le", "<=") \ + _(TK_GE, "ge", ">=") \ + _(TK_FLOOR_DIV, "floordiv", "//") \ + _(TK_IF_EXPR, "if", "") \ + _(TK_TRUE, "True", "True") \ + _(TK_FALSE, "False", "False") \ + _(TK_NONE, "None", "None") \ + _(TK_AND, "and", "and") \ + _(TK_OR, "or", "or") \ + _(TK_NOT, "not", "not") \ + _(TK_LSHIFT, "<<", "<<") \ + _(TK_RSHIFT, ">>", ">>") \ + _(TK_CAST, "cast", "") \ + _(TK_PLUS_EQ, "+=", "+=") \ + _(TK_MINUS_EQ, "-=", "-=") \ + _(TK_TIMES_EQ, "*=", "*=") \ + _(TK_DIV_EQ, "/=", "/=") \ + _(TK_MOD_EQ, "%=", "%=") \ + _(TK_BIT_OR_EQ, "|=", "|=") \ + _(TK_BIT_AND_EQ, "&=", "&=") \ + _(TK_BIT_XOR_EQ, "^=", "^=") \ + _(TK_LSHIFT_EQ, "<<=", "<<=") \ + _(TK_RSHIFT_EQ, ">>=", ">>=") \ + _(TK_POW_EQ, "**=", "**=") \ + _(TK_GLOBAL, "global", "global") \ + _(TK_BUILT_IN, "built-in", "") \ + _(TK_SUBSCRIPT, "subscript", "") \ + _(TK_VAR, "variable", "") \ + _(TK_NOTHING, "nothing", "") \ + _(TK_DICT_LITERAL, "dict-literal", "") \ + _(TK_LIST_LITERAL, "list-literal", "") \ + _(TK_TUPLE_LITERAL, "tuple-literal", "") \ + _(TK_FOR, "for", "for") \ + _(TK_IN, "in", "in") \ + _(TK_NOTIN, "not in", "not in") \ + _(TK_STARRED, "starred", "") \ + _(TK_UNARY_MINUS, "unary minus", "") \ + _(TK_POW, "pow operator", "**") \ + _(TK_ARROW, "arrow", "->") \ + _(TK_DECL, "decl", "") \ + _(TK_SLICE_EXPR, "slice expr", "") \ + _(TK_TYPE_COMMENT, "type comment", "# type:") \ + _(TK_RAISE, "raise", "raise") \ + _(TK_ASSERT, "assert", "assert") \ + _(TK_DOTS, "dots", "...") \ + _(TK_LIST_COMP, "list comprehension", "") \ + _(TK_DICT_COMP, "dict comprehension", "") \ + _(TK_BREAK, "break", "break") \ + _(TK_CONTINUE, "continue", "continue") \ + _(TK_DELETE, "del", "del") \ + _(TK_PASS, "pass", "pass") \ + _(TK_CLASS_DEF, "class", "class") \ + _(TK_IMPORT, "import", "import") \ + _(TK_WITH, "with", "with") \ + _(TK_WITH_ITEM, "withitem", "") \ + _(TK_AS, "as", "as") \ + _(TK_PROP, "property", "") \ + _(TK_ELLIPSIS, "Ellipsis", "Ellipsis") \ + _(TK_NONE_TYPE, "NoneType", "NoneType") + +enum TokenKind { + // we use characters to represent themselves so skip all valid characters + // before + // assigning enum values to multi-char tokens. + TK_DUMMY_START = 256, +#define DEFINE_TOKEN(tok, _, _2) tok, + TC_FORALL_TOKEN_KINDS(DEFINE_TOKEN) +#undef DEFINE_TOKEN +}; + +TORCH_API std::string kindToString(int kind); +TORCH_API int stringToKind(const std::string& str); + +// nested hash tables that indicate char-by-char what is a valid token. +struct TokenTrie; +using TokenTrieRef = std::unique_ptr; +struct TokenTrie { + TokenTrie() = default; + void insert(const char* str, int tok) { + if (*str == '\0') { + AT_ASSERT(kind == 0); + kind = tok; + return; + } + + for (size_t i = 0, e = child_chars.size(); i < e; ++i) { + if (child_chars[i] == *str) { + child_tries[i]->insert(str + 1, tok); + return; + } + } + + child_chars.emplace_back(*str); + child_tries.emplace_back(std::make_unique()); + child_tries.back()->insert(str + 1, tok); + } + int kind{0}; // 0 == invalid token + + std::vector child_chars; + std::vector child_tries; +}; + +// stuff that is shared against all TC lexers/parsers and is initialized only +// once. +struct TORCH_API SharedParserData { + SharedParserData() : head(new TokenTrie()) { + for (const char* c = valid_single_char_tokens; *c; c++) { + std::string str(1, *c); + head->insert(str.c_str(), *c); + } + +#define ADD_CASE(tok, _, tokstring) \ + if (*(tokstring) != '\0') { \ + head->insert((tokstring), (tok)); \ + } + TC_FORALL_TOKEN_KINDS(ADD_CASE) +#undef ADD_CASE + } + + bool match( + StringCordView::Iterator pos, + bool continuation, // are we inside a scope where newlines don't count + // (e.g. inside parens) + bool whitespace_token, // should we treat whitespace as a token + int* kind, + StringCordView::Iterator* start, + StringCordView::Iterator* end) { + *start = pos; + // skip whitespace + while (pos.has_next() && isblank(*pos)) { + ++pos; + } + + // special handling + if (pos.has_next()) { + if (*pos == '#' && !isTypeComment(pos)) { + // skip comments + while (pos.has_next() && *pos != '\n') + ++pos; + // tail call, handle whitespace and more comments + return match(pos, continuation, whitespace_token, kind, start, end); + } + if (*pos == '\\') { + auto newiter = pos; + ++newiter; + if (newiter.has_next() && *newiter == '\n' && !whitespace_token) { + ++newiter; + return match(newiter, continuation, false, kind, start, end); + } + } + if (*pos == '\n') { + return match(++pos, continuation, !continuation, kind, start, end); + } + } + // we handle white space before EOF because in the case we have something + // like the following where we need to generate the dedent token if foo: + // ... + // else: + // pass + if (whitespace_token) { + *kind = !pos.has_next() ? TK_WHITESPACE_EOF : TK_WHITESPACE; + *end = pos; + return true; + } + if (!pos.has_next()) { + *kind = TK_EOF; + *start = pos; + *end = *start; + return true; + } + // invariant: the next token is not whitespace or newline + *start = pos; + // check for a valid number + size_t len = 0; + if (isNumber(pos.rest_line(), 0, &len)) { + *end = *start; + *end += len; + *kind = TK_NUMBER; + return true; + } + // check for string + if (isString(pos.rest_line(), 0, &len)) { + *kind = TK_STRINGLITERAL; + *end = *start; + *end += len; + return true; + } + + // check for either an ident or a token + // ident tracks whether what we have scanned so far could be an identifier + // matched indicates if we have found any match. + bool matched = false; + bool ident = true; + TokenTrie* cur = head.get(); + // for (size_t i = 0; pos + i < str.size() && (ident || cur != nullptr); + // i++) + for (size_t i = 0; pos.has_next() && (ident || cur != nullptr); + ++pos, ++i) { + ident = ident && validIdent(i, *pos); + if (ident) { + matched = true; + *end = pos.next_iter(); + *kind = TK_IDENT; + } + // check for token second, so that e.g. 'max' matches the token TK_MAX + // rather the + // identifier 'max' + if (cur) { + const auto begin_it = cur->child_chars.begin(); + const auto end_it = cur->child_chars.end(); + const auto ch_it = std::find(begin_it, end_it, *pos); + + cur = (ch_it == end_it) ? nullptr + : cur->child_tries[ch_it - begin_it].get(); + + if (cur && cur->kind != 0) { + matched = true; + *end = pos.next_iter(); + *kind = cur->kind; + } + } + } + return matched; + } + + bool isUnary(int kind, int* prec); + bool isBinary(int kind, int* prec); + bool isRightAssociative(int kind) { + switch (kind) { + case '?': + case TK_POW: + case TK_IF: + return true; + default: + return false; + } + } + + private: + bool validIdent(size_t i, char n) { + return isalpha(n) || n == '_' || (i > 0 && isdigit(n)); + } + + // 1. skip whitespace + // 2. handle comment or newline + // + bool isNumber(c10::string_view str, size_t start, size_t* len) { + char first = str[start]; + // strtod allows numbers to start with + or - or nan or inf + // http://en.cppreference.com/w/cpp/string/byte/strtof + // but we want only the number part, otherwise 1+3 will turn into two + // adjacent numbers in the lexer + if (first == '-' || first == '+' || isalpha(first)) + return false; + const char* startptr = str.data() + start; + char* endptr = nullptr; + torch::jit::strtod_c(startptr, &endptr); + *len = endptr - startptr; + // check if the number is complex valued + // access is safe because string is assumed to be null terminated + if (endptr != nullptr && *endptr == 'j') { + *len += 1; + } + return *len > 0; + } + + bool isCharCount(char c, c10::string_view str, size_t start, int len) { + // count checks from [start, start + len) + return start + len <= str.size() && + std::count(str.begin() + start, str.begin() + start + len, c) == len; + } + + // python concatenates all adjacent strings "a" "b" == "ab" + // strings can be enclosed with 1 or 3 single or double quotes + // if enclosed with 3 quotes newlines are valid + // as elsewhere, backslash and new line should be ignored + bool isString(c10::string_view str, size_t start, size_t* len) { + char quote = str[start]; + if (quote != '\"' && quote != '\'') + return false; + int quote_len = isCharCount(quote, str, start, 3) ? 3 : 1; + + // end is now set past the opening quotation marks + size_t end = start + quote_len; + while (end < str.size() && !isCharCount(quote, str, end, quote_len)) { + if (str[end] == '\n' && quote_len != 3) { + return false; + } + // handle escaped characters. advances past escaped quotation marks, + // escaped newlines and escaped backslashes + // multi-char escapes like \x1A are handled fine here because the + // remainder of the escape are valid string characters anyway + if (str[end] == '\\') { + end++; + } + end++; + } + // set length equal to the complete string including quotations + *len = end - start + quote_len; + // if end finished without going past the last character of the string than + // there is a match + return end < str.size(); + } + + bool isblank(int n) { + return isspace(n) && n != '\n'; + } + + bool isTypeComment(StringCordView::Iterator str_iter) { + c10::string_view rest_line = str_iter.rest_line(); + const std::string type_string = "# type:"; + if (rest_line.size() < type_string.length()) { + return false; + } + auto match_string = rest_line.substr(0, type_string.size()); + return match_string == type_string; + } + + // Make an exception ignoring comments for type annotation comments + bool isTypeComment(const StringCordView& str, size_t pos) { + const std::string type_string = "# type:"; + if (str.size() < pos + type_string.length()) { + return false; + } + auto match_string = str.substr(pos, type_string.size()); + return match_string == type_string; + } + + TokenTrieRef head; +}; + +TORCH_API SharedParserData& sharedParserData(); + +struct Token { + int kind; + SourceRange range; + Token(int kind, SourceRange range) : kind(kind), range(std::move(range)) {} + std::string text() { + return std::string(range.token_text()); + } + std::string kindString() const { + return kindToString(kind); + } +}; + +struct Lexer { + explicit Lexer(std::shared_ptr source) + : source(std::move(source)), + + indent_stack(), + next_tokens(), + shared(sharedParserData()) { + auto first_indent = lexRaw(true); + indent_stack.push_back(first_indent.range.size()); + lex(); + } + // Return the current token, and then move to the next one + Token next() { + if (next_tokens.empty()) + reportError("Lexer invariant violated: empty token queue"); + Token r = std::move(next_tokens.front()); + next_tokens.erase(next_tokens.begin()); + if (next_tokens.empty()) { + lex(); + } + return r; + } + // Skip the current token if it matches the given kind + bool nextIf(int kind) { + if (cur().kind != kind) + return false; + next(); + return true; + } + + [[noreturn]] void reportError(const std::string& what) { + reportError(what, cur()); + } + [[noreturn]] void reportError(const std::string& what, const Token& t) { + std::stringstream ss; + ss << what << ":\n"; + t.range.highlight(ss); + throw std::runtime_error(ss.str()); + } + [[noreturn]] void expected(const std::string& what, const Token& t) { + std::stringstream ss; + ss << "expected " << what << " but found '" << t.kindString() + << "' here:\n"; + t.range.highlight(ss); + throw std::runtime_error(ss.str()); + } + [[noreturn]] void expected(const std::string& what) { + expected(what, cur()); + } + // Check that the current token has a given kind, return the current token, + // and advance to the next one. + Token expect(int kind) { + if (cur().kind != kind) { + expected(kindToString(kind)); + } + return next(); + } + Token& lookahead() { + if (next_tokens.size() < 2) { + lex(); + } + return next_tokens[1]; + } + Token& cur() { + return next_tokens.front(); + } + + private: + void lex() { + auto r = lexRaw(); + switch (r.kind) { + case '(': + case '[': + case '{': + nesting++; + break; + case ')': + case ']': + case '}': + nesting--; + break; + case TK_WHITESPACE: + case TK_WHITESPACE_EOF: { + const auto depth = + r.kind == TK_WHITESPACE_EOF ? indent_stack.front() : r.range.size(); + // note: TK_WHITESPACE_EOF is whitespace right before the EOF token + // just like we allow the code to be indented to a particular initial + // indent level, we allow the final indent to be anything and set + // it back to the initial indent level. This allows the code to be + // put into string literals inside code without worrying about final + // whitespace + if (depth > indent_stack.back()) { + indent_stack.push_back(depth); + r.kind = TK_INDENT; + } else if (depth == indent_stack.back()) { + r.kind = TK_NEWLINE; + } else { + next_tokens.emplace_back(TK_NEWLINE, r.range); + while (indent_stack.back() != depth) { + indent_stack.pop_back(); + next_tokens.emplace_back(TK_DEDENT, r.range); + if (indent_stack.empty()) { + reportError("invalid indent level " + std::to_string(depth), r); + } + } + return; // We've already queued the tokens + } + } break; + default: + break; + } + next_tokens.push_back(std::move(r)); + } + Token lexRaw(bool whitespace_token = false) { + AT_ASSERT(source); + if (current == nullptr) { + AT_ASSERT(pos == 0); + current = std::make_unique( + source->text_str().begin()); + } + + StringCordView::Iterator start_iter = *current; + StringCordView::Iterator end_iter = *current; + int kind = 0; + if (!shared.match( + *current, + nesting > 0, + whitespace_token, + &kind, + &start_iter, + &end_iter)) { + expected( + "a valid token", + Token( + **current, + SourceRange(source, start_iter, start_iter.pos() + 1))); + } + + auto t = Token(kind, SourceRange(source, start_iter, end_iter.pos())); + pos = end_iter.pos(); + *current = end_iter; + return t; + } + + std::shared_ptr source; + std::unique_ptr current; + size_t pos{0}; + size_t nesting{0}; // depth of ( [ { nesting... + std::vector indent_stack; // stack of indentation level of blocks + // Invariant: this should always contain at least a single element + std::vector next_tokens; + // NOLINTNEXTLINE(cppcoreguidelines-avoid-const-or-ref-data-members) + SharedParserData& shared; +}; +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h new file mode 100644 index 0000000000000000000000000000000000000000..1b71927ffd594c80abd6f0a9eab7f938723cd9d7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/mini_environment.h @@ -0,0 +1,55 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +// Simple data structure for containing a type T in nested control blocks +// Should only be used after initial compilation where type checking and +// loads and stores are emitted + +template +struct MiniEnvironment { + MiniEnvironment(Block* b, std::shared_ptr next = nullptr) + : next(std::move(next)) {} + + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr> next; + + T findInThisFrame(const std::string& name) { + auto it = table.find(name); + if (it != table.end()) { + return it->second; + } + return nullptr; + } + + T findInAnyFrame(const std::string& name) { + for (auto runner = this; runner; runner = runner->next.get()) { + if (auto r = runner->findInThisFrame(name)) { + return r; + } + } + return nullptr; + } + + void setVar(const std::string& name, T value) { + table[name] = value; + } + + std::vector definedVariables() { + std::vector result; + result.reserve(table.size()); + for (auto& kv : table) { + result.push_back(kv.first); + } + std::sort(result.begin(), result.end()); + return result; + } + + private: + std::unordered_map table; +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h new file mode 100644 index 0000000000000000000000000000000000000000..5139ae9ec790ad96868535c2329d6128754d6f07 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/parse_string_literal.h @@ -0,0 +1,87 @@ +#pragma once +#include +#include +#include + +namespace torch::jit { + +inline bool isCharCount(char c, const std::string& str, size_t start, int len) { + // count checks from [start, start + len) + return start + len <= str.size() && + std::count( + str.begin() + static_cast(start), + str.begin() + static_cast(start + len), + c) == len; +} + +inline std::optional parseOctal(const std::string& str, size_t pos) { + //\xxx where x are 0-7 + if (pos + 3 >= str.size()) + return std::nullopt; + size_t c = 0; + for (size_t i = 1, b = 64; i < 4; ++i, b /= 8) { + auto d = str[pos + i]; + if (d < '0' || d > '7') + return std::nullopt; + c += b * (d - '0'); + } + if (c >= 256) + return std::nullopt; + return c; +} + +inline std::string parseStringLiteral( + const SourceRange& range, + const std::string& str) { + size_t quote_len = isCharCount(str[0], str, 0, 3) ? 3 : 1; + auto ret_str = str.substr(quote_len, str.size() - quote_len * 2); + size_t pos = ret_str.find('\\'); + while (pos != std::string::npos) { + // invariant: pos has to escape a character because it is a valid string + char c = ret_str[pos + 1]; + size_t to_erase = 2; + switch (ret_str[pos + 1]) { + case '\\': + case '\'': + case '\"': + case '\n': + break; + case 'a': + c = '\a'; + break; + case 'b': + c = '\b'; + break; + case 'f': + c = '\f'; + break; + case 'n': + c = '\n'; + break; + case 'v': + c = '\v'; + break; + case 't': + c = '\t'; + break; + case 'x': + throw(ErrorReport(range) << "unsupported hex specifier"); + case 'u': + case 'U': + throw(ErrorReport(range) << "unsupported unicode specifier"); + default: + // octal value in format \nnn, n is [0-7] + if (auto v = parseOctal(ret_str, pos)) { + to_erase = 4; + c = *v; + } else { + throw(ErrorReport(range) << " ill formed octal specifier"); + } + } + ret_str.replace(pos, to_erase, /* num copies */ 1, c); + pos = ret_str.find('\\', pos + 1); + } + return ret_str; +} + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..205727fe6d6546d6887bc3221ac4215a79b7415e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/script_type_parser.h @@ -0,0 +1,53 @@ +#pragma once +#include +#include +#include +#include + +namespace torch::jit { + +/** + * class ScriptTypeParser + * + * Parses expressions in our typed AST format (TreeView) into types and + * typenames. + */ +class TORCH_API ScriptTypeParser { + public: + explicit ScriptTypeParser() = default; + explicit ScriptTypeParser(ResolverPtr resolver) + : resolver_(std::move(resolver)) {} + + c10::TypePtr parseTypeFromExpr(const Expr& expr) const; + + std::optional> parseBroadcastList( + const Expr& expr) const; + + c10::TypePtr parseType(const std::string& str); + + FunctionSchema parseSchemaFromDef(const Def& def, bool skip_self); + + c10::IValue parseClassConstant(const Assign& assign); + + private: + c10::TypePtr parseTypeFromExprImpl(const Expr& expr) const; + + std::optional parseBaseTypeName(const Expr& expr) const; + at::TypePtr subscriptToType( + const std::string& typeName, + const Subscript& subscript) const; + std::vector evaluateDefaults( + const SourceRange& r, + const std::vector& default_types, + const std::vector& default_exprs); + std::vector parseArgsFromDecl(const Decl& decl, bool skip_self); + + std::vector parseReturnFromDecl(const Decl& decl); + + ResolverPtr resolver_ = nullptr; + + // Need to use `evaluateDefaults` in serialization + friend struct ConstantTableValue; + friend struct SourceImporterImpl; +}; +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h new file mode 100644 index 0000000000000000000000000000000000000000..161b25342e258c75129229f76f016c2e8ffdcc2d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/sugared_value.h @@ -0,0 +1,861 @@ +#pragma once +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +using SugaredValuePtr = std::shared_ptr; + +// The AST can contain nodes like `self`, `self.b` or `python_fn` that +// are not first-class values in the graph representation, but instead +// will be desugared based on how they are used in the AST. + +// SugaredValue is used to temporarily represent these values in a way +// that separates their behavior from the AST -> IR converter itself. +// This allows us to keep dependencies on python minimal. + +struct TORCH_API SugaredValue + : public std::enable_shared_from_this { + // what is this node? for error reporting (e.g. Module, python function) + virtual std::string kind() const = 0; + + // what can we do with this thing? + // use it as a value e.g. `this + 4` + virtual Value* asValue(const SourceRange& loc, GraphFunction& m) { + throw(ErrorReport(loc) << kind() << " cannot be used as a value"); + } + + // select an attribute on it, e.g. `this.field` + virtual std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) { + throw(ErrorReport(loc) << "attribute lookup is not defined on " << kind()); + } + + virtual bool hasAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) { + throw(ErrorReport(loc) << "attribute lookup is not defined on " << kind()); + } + + // assign an attribute on it, e.g. `this.field = newValue` + virtual void setAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field, + Value* newValue) { + throw( + ErrorReport(loc) << "attribute assignment is not defined on " + << kind()); + } + + // use it as a vector of values, e.g. a tuple of values as return value from + // a method invocation + virtual std::vector> asTuple( + const SourceRange& loc, + GraphFunction& m, + const std::optional& size_hint = {}) { + throw(ErrorReport(loc) << kind() << " cannot be used as a tuple"); + } + + // TODO @wconstab refactor to use ModuleValue::asTuple instead of new API + virtual SugaredValuePtr asTupleValue( + const SourceRange& loc, + GraphFunction& m) { + throw(ErrorReport(loc) << kind() << " cannot be used as a tuplevalue"); + } + + virtual std::vector> asType( + const SourceRange& loc, + Method& m) { + throw(ErrorReport(loc) << kind() << " cannot be used as a type"); + } + + // call it like a function, e.g. `outputs = this(inputs)` + virtual std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + // note: names for args will be 'argument 0', 'argument 1', etc.. + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) { + // n_binders is always set to the number of variables an expression is + // syntactically bound to: + // a = foo() # 1 binder (note in this case the single binder might be a + // tuple) a, * b = foo() # 1 binder a, b = foo() # 2 binders foo() # 0 + // binders + // + // In subexpressions, like bar() in foo(bar()), n_binders is always set to + // 1. n_binders is used as a hint to subexpressions to determine how many + // values they should return when that number is ambiguous statically. In + // particular it is currently used to decide how many tensors a call to a + // python function will return. It is only a hint, functions do not have to + // check that n_binders match the number of things they are returning, the + // assignment logic will do that anyway. + + throw(ErrorReport(loc) << "cannot call a " << kind()); + } + + // This function is called when to convert a SugaredValue to its iterator. + // For example, when iterating through a Dict we iterate over its keys + virtual std::shared_ptr iter( + const SourceRange& loc, + GraphFunction& m) { + throw(ErrorReport(loc) << kind() << " cannot be used as an iterable"); + } + + // If we are iterating over a Sugared Value and it returns a value from this + // function, then we emit an unrolled loop over the variable. This allows us + // to support containers of Heterogenous types, like Module Containers & + // Tuples + virtual std::optional staticLen() { + return std::nullopt; + } + + // When iterating over this SugaredValue, should we emit the for loop as an + // unrolled loop. + bool shouldEmitUnrolled() { + return staticLen() != std::nullopt; + } + + // return length of this thing, if not then it can't be iterated. + // If it does not have a statically-determinable length, then it cannot + // be iterated over with a modulelist. If it does it must return a constant + // Value * + virtual Value* len(const SourceRange& loc, GraphFunction& m) { + throw( + ErrorReport(loc) << "'" << kind() << "'" + << " object is not iterable"); + } + + // expression for ith elemement for iterable value + virtual std::shared_ptr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) { + throw( + ErrorReport(loc) << "'" << kind() << "'" + << " object is not subscriptable"); + } + + virtual ~SugaredValue() = default; +}; + +// most things in the environment are just simple value types +// and not special python syntax sugar types +struct TORCH_API SimpleValue : public SugaredValue { + SimpleValue(Value* value) : value_(value) {} + std::string kind() const override { + std::stringstream ss; + // NOLINTNEXTLINE(clang-analyzer-core.CallAndMessage) + ss << "value of type '" << value_->type()->annotation_str() << "'"; + return ss.str(); + } + Value* asValue(const SourceRange& range, GraphFunction& m) override { + return value_; + } + std::vector> asTuple( + const SourceRange& loc, + GraphFunction& m, + const std::optional& size_hint = {}) override; + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + bool hasAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + void setAttr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field, + Value* newValue) override; + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + // note: names for args will be 'argument 0', 'argument 1', etc.. + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override; + + Value* getValue() const { + return value_; + } + + Value* len(const SourceRange& loc, GraphFunction& m) override; + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override; + + private: + Value* value_; +}; + +struct TORCH_API BuiltinFunction : public SugaredValue { + BuiltinFunction(Symbol symbol, std::optional self) + : symbol(symbol), self(std::move(self)) {} + + // The symbol of the function (e.g. `aten::relu`). + Symbol symbol; + + // if this is method, then this is the self argument. + std::optional self; + std::string kind() const override { + return "builtin"; + } + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + // try to create this builtin but if it doesn't exist or the self argument + // cannot possibly match, then return nullptr. Use in situations where it is + // not clear if it is a valid builtin + static std::shared_ptr tryCreate( + Symbol symbol, + std::optional self); +}; + +struct TORCH_API SugaredTupleValue : public SugaredValue { + explicit SugaredTupleValue(std::vector> tup) + : tup_(std::move(tup)){}; + + std::vector> asTuple( + const SourceRange& loc, + GraphFunction& m, + const std::optional& size_hint = {}) override { + return tup_; + }; + + Value* asValue(const SourceRange& loc, GraphFunction& m) override { + std::vector vec; + vec.reserve(tup_.size()); + for (const auto& sv : tup_) { + vec.push_back(sv->asValue(loc, m)); + } + Graph& g = *m.graph(); + return g.insertNode(g.createTuple(vec))->output(); + } + + std::string kind() const override { + return "Tuple"; + } + + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override { + if (!(idx->type()->cast() && toIValue(idx))) { + throw( + ErrorReport(loc) + << "Expected integer literal for index but got a variable or non-integer. " + << "ModuleList/Sequential indexing is only supported with integer literals. " + << "For example, 'i = 4; self.layers[i](x)' will fail because i is not a literal. " + << "Enumeration is supported, e.g. 'for index, v in enumerate(self): out = v(inp)'"); + } + auto index = toIValue(idx)->toInt(); + int64_t adj_index = + (index < 0) ? index + static_cast(tup_.size()) : index; + if (!(adj_index >= 0 && adj_index < static_cast(tup_.size()))) { + throw( + ErrorReport(loc) << "Index " << index << " out of range of length " + << tup_.size()); + } + return tup_.at(adj_index); + } + + // This function is called when a SugaredValue is used to convert a + // SugaredValue to its iterator. For example, when iterating through a Dict we + // iterate over its keys + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override { + return shared_from_this(); + }; + + // Because this is used to contain SugaredValues of Heterogenous types, + // we define staticLen() so that when this is iterated over it is emitted + // as an unrolled loop. + std::optional staticLen() override { + return static_cast(tup_.size()); + } + + std::vector> tup_; +}; + +struct TORCH_API BuiltinModule : public SugaredValue { + BuiltinModule(std::string name, std::optional version = std::nullopt) + : name(std::move(name)), version(version) {} + + std::string kind() const override { + return "builtin module"; + } + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override { + if (field == "autograd") { + // When refering torch.autograd, it is also considered to be a + // BuiltinModule and we will dispatch to the aten operators for the + // methods under its module. + return std::make_shared("aten", version); + } + + auto sym = Symbol::fromQualString(name + "::" + field); + return std::make_shared(sym, std::nullopt); + } + + private: + std::string name; + // when we add operator versioning, emit this op as it exising at 'version' + // if not set, use the latest version + std::optional version; +}; + +// Represents a class, analagous to `int` or `dict`. Instances of classes, +// like `1` or `{"foo": 5}`, are represented as SimpleValues +struct TORCH_API ClassValue : public SugaredValue { + explicit ClassValue(ClassTypePtr type) : type_(std::move(type)) {} + + // Call the type's constructor, as in: + // n = Foo(constructor_arg) + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::shared_ptr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + std::string kind() const override { + return type_->str(); + } + + ClassTypePtr type_; +}; + +struct TORCH_API NamedTupleConstructor : public SugaredValue { + explicit NamedTupleConstructor(TupleTypePtr type) : type_(std::move(type)) {} + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + std::string kind() const override { + return type_->str(); + } + + TupleTypePtr type_; +}; + +struct FunctionValue : public SugaredValue { + FunctionValue(Function* callee) : callees_({callee}) {} + FunctionValue(const StrongFunctionPtr& p) + : callees_({p.function_}), cu_(p.cu_) {} + FunctionValue(const std::vector& callees) { + for (const StrongFunctionPtr& callee : callees) { + cu_ = cu_ ? cu_ : callee.cu_; + TORCH_INTERNAL_ASSERT(callee.cu_ == cu_); + callees_.push_back(callee.function_); + } + } + + std::string kind() const override { + return "function"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& f, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + std::vector schemas; + for (Function* callee : callees_) { + try { + callee->ensure_defined(); + } catch (const RecursiveMethodCallError&) { + throw( + ErrorReport(loc) + << " function '" << callee->name() << "' is called recursively. " + << "Recursive calls are not supported"); + } + schemas.push_back(&callee->getSchema()); + } + auto match = matchSchemas(schemas, loc, *f.graph(), args, kwargs); + Value* output = + f.graph()->insertFunctionCall(callees_[match.first], match.second); + output->node()->setSourceRange(loc); + return std::make_shared(output); + } + + const std::vector& callees() { + return callees_; + } + + private: + std::vector callees_; + // TODO holding this thing is creepy + std::shared_ptr cu_; +}; + +struct TORCH_API ClosureValue : public SugaredValue { + ClosureValue(Value* value) : value_(value) { + TORCH_INTERNAL_ASSERT(value_->node()->kind() == prim::Closure); + } + std::string kind() const override { + return "closure"; + } + Value* asValue(const SourceRange& range, GraphFunction& m) override { + return value_; + } + Value* value_; +}; + +// defines how a method obtained from a module/class/interface behaves in script +struct MethodValue : public SugaredValue { + MethodValue(Value* self, std::vector method_names) + : self_(self), method_names_(std::move(method_names)) {} + MethodValue(Value* self, std::string method_name) + : MethodValue(self, std::vector({std::move(method_name)})) {} + + std::string kind() const override { + return "method"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& f, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + std::vector argsWithSelf = {self_}; + argsWithSelf.insert(argsWithSelf.end(), args.begin(), args.end()); + std::vector schemas; + for (const std::string& method_name : method_names_) { + if (auto class_type = self_->type()->cast()) { + Function& method = class_type->getMethod(method_name); + try { + method.ensure_defined(); + } catch (const RecursiveMethodCallError&) { + throw( + ErrorReport(loc) + << " method '" << method.name() << "' is called recursively. " + << "Recursive calls are not supported"); + } + schemas.push_back(&method.getSchema()); + } else if (auto interface_type = self_->type()->cast()) { + schemas.push_back(interface_type->getMethod(method_name)); + } else { + TORCH_INTERNAL_ASSERT( + false, "method constructed that is not a class or interface"); + } + } + auto match = matchSchemas(schemas, loc, *f.graph(), argsWithSelf, kwargs); + Value* output = + f.graph()->insertMethodCall(method_names_[match.first], match.second); + output->node()->setSourceRange(loc); + return std::make_shared(output); + } + + private: + Value* self_; + std::vector method_names_; +}; + +struct TORCH_API PrintValue : public SugaredValue { + std::string kind() const override { + return "print"; + } + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; +}; + +// expressions like int(x) +// these are the same as call prim::Int or equivalent except it +// is a noop when the input is a subtype of 'type' +struct TORCH_API CastValue : public BuiltinFunction { + CastValue(TypePtr type, c10::Symbol method) + : BuiltinFunction(method, std::nullopt), type_(std::move(type)) {} + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + if (args.size() == 1 && kwargs.empty()) { + auto len_op = std::make_shared(aten::len, std::nullopt); + auto gt_op = std::make_shared(aten::gt, std::nullopt); + auto zero = m.graph()->insertConstant(0); + + auto v = args[0].value(*m.graph()); + if (v->type()->isSubtypeOf(*type_)) { + return std::make_shared(v); + } else if ( + *type_ == *BoolType::get() && + (v->type()->isSubtypeOf(*AnyListType::get()) || + v->type()->isSubtypeOf(*StringType::get()) || + v->type()->cast())) { + auto len = len_op->call(loc, m, {v}, {}, 1); + return gt_op->call(loc, m, {len->asValue(loc, m), zero}, {}, 1); + } + } + return BuiltinFunction::call(loc, m, args, kwargs, n_binders); + } + + private: + TypePtr type_; +}; + +struct TORCH_API TensorCastValue : public SugaredValue { + TensorCastValue(at::ScalarType type, NamedValue self) + : dtype_(type), self_(std::move(self)) {} + + std::string kind() const override { + return "Cast"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override { + TORCH_INTERNAL_ASSERT(args.empty() && kwargs.empty()); + Value* dtype_const = m.graph()->insertConstant(dtype_, loc); + std::vector kwargs_{ + self_, NamedValue(loc, "dtype", dtype_const)}; + Value* casted_val = m.graph()->insert( + /*opname=*/Symbol::fromQualString("aten::to"), + /*args=*/args, + /*kwargs=*/kwargs_, + /*range=*/loc); + return std::make_shared(casted_val); + } + + at::ScalarType dtype_; + NamedValue self_; +}; + +// builtins operators and functions that call a method if it exists +// on a class type, like 'len(x)' and 'x + y' +struct TORCH_API MagicMethod : public SugaredValue { + MagicMethod(std::string desugared_name, SugaredValuePtr base) + : base_value_(std::move(base)), + desugared_name_(std::move(desugared_name)) {} + + std::string kind() const override { + return desugared_name_; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef kwargs, + size_t n_binders) override; + + private: + SugaredValuePtr base_value_; + std::string desugared_name_; +}; + +// things that look like function applications, but +// perform non-standard evaluation are represented +// with SpecialFormValues, e.g. +// isinstance(x, int) +// fork(fn) +// annotate(int, 3) +// The implementation of each value is handled by a case inside emitApplyExpr +struct TORCH_API SpecialFormValue : public SugaredValue { + SpecialFormValue(Symbol form) : form_(form) {} + std::string kind() const override { + return form_.toUnqualString(); + } + Symbol form() const { + return form_; + } + static std::shared_ptr create(Symbol form) { + return std::make_shared(form); + } + + private: + Symbol form_; +}; + +struct TORCH_API LegacyTensorConstructor : public SpecialFormValue { + LegacyTensorConstructor(Symbol form, at::ScalarType dtype, at::Device device) + : SpecialFormValue(form), device_(device), dtype_(dtype) {} + + static std::shared_ptr create( + Symbol form, + at::ScalarType dtype, + at::Device device) { + return std::make_shared(form, dtype, device); + } + at::ScalarType dtype() const { + return dtype_; + } + + private: + at::Device device_; + at::ScalarType dtype_; +}; + +// matched against for special handling of range expressions +struct TORCH_API RangeValue : SugaredValue { + RangeValue( + const SourceRange& loc, + GraphFunction& m, + std::vector input, + std::optional static_len = std::nullopt); + + std::string kind() const override { + return "range"; + } + Value* len(const SourceRange& loc, GraphFunction& m) override; + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override; + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override; + + // When Range is instantiated via enumerate(iterable_with_static_len), + // then it takes the static length of the iterable + std::optional staticLen() override { + return static_len_; + } + + private: + Value* start_{}; + Value* end_{}; + Value* step_{}; + // a flag to determine if it's a simple range() call with only end_ from + // arguments If true, we will not insert length calculation and index + // derivation nodes to simplify the graph and enable more possible + // optimizations + bool has_only_end_{}; + std::optional static_len_; +}; + +// Specialized Tree structure to matched against for special handling +// of builtin functions iterables expressions like zip(), enumerate(), etc. +// zip and enumerate can be modeled as a tree of SimpleValue/RangeValue: +// zip(x, y) -> (x, y) with tuple assignment to each loop target +// enumerate(x) -> (range(0, math.inf, 1), x) +// So a complicated expression like zip(a, enumerate(b), range(0, 100)) will be: +// (a, (range(0, math.inf, 1), b), range(0, 100)) +// We use those base iterables to fill in the loop information like +// max_trip_count and set the value table for loop targets +// Iterables can contain lists of SugaredValues like ModuleLists. If it +// does, then we emit it unrolled and require that all values it contains +// have a statically-determinable length. +struct TORCH_API IterableTree : SugaredValue { + IterableTree() = default; + IterableTree( + const SourceRange& range, + GraphFunction& m, + at::ArrayRef children) { + for (const auto& child : children) { + addChild(range, m, child); + } + } + std::string kind() const override { + return "iterabletree"; + } + + std::shared_ptr iter(const SourceRange& loc, GraphFunction& m) + override { + return shared_from_this(); + } + + void addChild( + const SourceRange& range, + GraphFunction& m, + const SugaredValuePtr& iter_value); + + std::vector get_children() { + return children_; + } + + // If this iterable contains a ModuleList or Tuple, then it will have a + // static length, and we will emit it as an unrolled for loop. + std::optional staticLen() override { + return unroll_length_; + } + + // given a IterableTree node, get all the base iterables/leaves under the + // IterableTree node. This enables + // us to get all the basic SugaredValues that contains valid loop information + // with len() and getitem() + std::vector get_base_iterables(); + + Value* len(const SourceRange& loc, GraphFunction& m) override; + SugaredValuePtr getitem( + const SourceRange& loc, + GraphFunction& m, + Value* idx, + TypePtr type_hint = nullptr) override; + + private: + std::optional unroll_length_ = std::nullopt; + std::vector children_; +}; + +static inline std::vector toValues( + Graph& g, + at::ArrayRef nvs) { + return fmap(nvs, [&](const NamedValue& v) { return v.value(g); }); +} + +struct SimpleSelf : public Self { + explicit SimpleSelf(ClassTypePtr classType) + : Self(), classType_(std::move(classType)) {} + std::shared_ptr makeSugared(Value* v) const override { + v->setType(classType_); + return std::make_shared(v); + } + ClassTypePtr getClassType() const override { + return classType_; + } + + private: + ClassTypePtr classType_; +}; + +// This is not a SimpleValue so it can not pass through the code paths that +// expect a SimpleValue as a sugared value. +struct TORCH_API ExceptionMessageValue : public SugaredValue { + explicit ExceptionMessageValue( + Value* value, + Value* qualified_class_name = nullptr) + : value_(value), qualified_class_name_(qualified_class_name) {} + + std::string kind() const override { + return "exception message"; + } + + Value* getValue() { + return value_; + } + + // qualified python class name + Value* getQualifiedClassName() { + return qualified_class_name_; + } + + private: + Value* value_; + Value* qualified_class_name_; +}; + +struct TORCH_API ExceptionValue : public SugaredValue { + explicit ExceptionValue(std::string message) : message_(std::move(message)) {} + + std::string kind() const override { + return "exception"; + } + + std::shared_ptr call( + const SourceRange& loc, + GraphFunction& m, + at::ArrayRef args, + at::ArrayRef /*attributes*/, + size_t /*n_binders*/) override { + auto exception_message = insertConstant(*m.graph(), message_ + ": ", loc); + for (auto& input : args) { + auto input_str = input.value(*m.graph()); + if (!input_str->type()->isSubtypeOf(*StringType::get())) { + input_str = + emitBuiltinCall(loc, *m.graph(), aten::str, {input_str}, {}); + } + exception_message = emitBuiltinCall( + loc, *m.graph(), aten::add, {exception_message, input_str}, {}); + } + return std::make_shared(exception_message); + } + + std::string message_; +}; + +struct TORCH_API SugaredEnumClass : public SugaredValue { + explicit SugaredEnumClass(EnumTypePtr enum_type) + : enum_type_(std::move(enum_type)) {} + + std::string kind() const override { + return "EnumClass"; + } + + SugaredValuePtr attr( + const SourceRange& loc, + GraphFunction& m, + const std::string& field) override; + + SugaredValuePtr iter(const SourceRange& loc, GraphFunction& m) override; + + private: + EnumTypePtr enum_type_; +}; + +struct TORCH_API SliceValue : public SugaredValue { + explicit SliceValue(Value* start, Value* stop, Value* step) + : start_(start), stop_(stop), step_(step) {} + + std::string kind() const override { + return "Python slice value"; + } + + Value* start() { + return start_; + }; + Value* stop() { + return stop_; + }; + Value* step() { + return step_; + }; + + private: + Value* start_; + Value* stop_; + Value* step_; +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h new file mode 100644 index 0000000000000000000000000000000000000000..84e5e7755fef798c9dc107c0d524c73dd110693e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree.h @@ -0,0 +1,218 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include + +namespace torch::jit { + +// Trees are used to represent all forms of TC IR, pre- and post-typechecking. +// Rather than have a full class hierarchy for all TC statements, trees are a +// slight variation of Lisp s-expressions. For instance, the expression a*b+1 +// is represented as: +// (+ (* (ident a) (ident b)) (const 1)) +// Atoms like 'a', 'b', and '1' are represented by subclasses of Tree which +// define stringValue(). Everything else is a Compound object, which has a +// 'kind' that is a token from lexer.h's TokenKind enum. Single-character +// operators like '+' are represented using the character itself (so, add.kind() +// would be '+'). Each Compound object also contains a list of subtrees and is +// associated with a SourceRange for error reporting. +// Memory management of trees is done using intrusive_ptr. + +struct Tree; +using TreeRef = c10::intrusive_ptr; +using TreeList = at::SmallVector; + +struct Tree : c10::intrusive_ptr_target { + Tree(int kind_) : kind_(kind_) {} + int kind() const { + return kind_; + } + virtual bool isAtom() const { + return true; + } + virtual const SourceRange& range() const { + throw std::runtime_error("is an Atom"); + } + virtual const std::string& stringValue() const { + throw std::runtime_error("stringValue can only be called on TK_STRING"); + } + virtual const TreeList& trees() const { + static const TreeList empty_trees = {}; + return empty_trees; + } + const TreeRef& tree(size_t i) const { + return trees().at(i); + } + virtual TreeRef map(const std::function& fn) { + (void)fn; + c10::raw::intrusive_ptr::incref(this); // we are creating a new pointer + // from a raw `this` pointer + // so we need to bump the refcount + // to account for this ownership + return TreeRef::reclaim(this); + } + template + void match(int k, Args&... args) const { + matchD(k, "unknown", 0, args...); + } + template + void matchD(int k, const char* filename, int lineno, Args&... args) const { + std::initializer_list vars = {args...}; + matchNumSubtreesD(k, filename, lineno, vars.size(), true); + size_t i = 0; + for (TreeRef* v : vars) { + *v = trees()[i++]; + } + } + void matchNumSubtrees(int k, size_t expected_subtrees) { + return matchNumSubtreesD(k, "unknown", 0, expected_subtrees, false); + } + void matchNumSubtreesD( + int k, + const char* filename, + int lineno, + size_t expected_subtrees, + bool allow_more) const { + if (kind() != k) { + std::stringstream ss; + ss << filename << ":" << lineno << ": expecting kind '" << kindToString(k) + << "' but found '" << kindToString(kind()) << "'\n"; + range().highlight(ss); + throw std::runtime_error(ss.str()); + } + if (trees().size() < expected_subtrees || + (!allow_more && trees().size() != expected_subtrees)) { + std::stringstream ss; + ss << filename << ":" << lineno << ": expected at least " + << expected_subtrees << " subtrees, but found only " << trees().size() + << "\n"; + range().highlight(ss); + throw std::runtime_error(ss.str()); + } + } + ~Tree() override = default; + + private: + int kind_; +}; + +struct String : public Tree { + String(std::string value) : Tree(TK_STRING), value_(std::move(value)) {} + const std::string& stringValue() const override { + return value_; + } + template + static TreeRef create(Args&&... args) { + return c10::make_intrusive(std::forward(args)...); + } + + private: + std::string value_; +}; + +static SourceRange mergeRanges(SourceRange c, const TreeList& others) { + for (const auto& t : others) { + if (t->isAtom()) + continue; + size_t s = std::min(c.start(), t->range().start()); + size_t e = std::max(c.end(), t->range().end()); + c = SourceRange(c.source(), s, e); + } + return c; +} + +struct Compound : public Tree { + Compound(int kind, SourceRange range) + : Tree(kind), range_(std::move(range)) {} + Compound(int kind, const SourceRange& range_, TreeList&& trees_) + : Tree(kind), + range_(mergeRanges(range_, trees_)), + trees_(std::move(trees_)) {} + const TreeList& trees() const override { + return trees_; + } + static TreeRef create( + int kind, + const SourceRange& range_, + TreeList&& trees_) { + return c10::make_intrusive(kind, range_, std::move(trees_)); + } + bool isAtom() const override { + return false; + } + TreeRef map(const std::function& fn) override { + TreeList ret; + for (auto& t : trees()) { + ret.push_back(fn(t)); + } + return Compound::create(kind(), range(), std::move(ret)); + } + + const SourceRange& range() const override { + return range_; + } + + private: + SourceRange range_; + TreeList trees_; +}; + +// tree pretty printer +struct pretty_tree { + pretty_tree(const TreeRef& tree, size_t col = 40) : tree(tree), col(col) {} + const TreeRef& tree; + size_t col; + std::unordered_map flat_strings; + const std::string& get_flat(const TreeRef& t) { + auto it = flat_strings.find(t); + if (it != flat_strings.end()) + return it->second; + + std::stringstream out; + switch (t->kind()) { + case TK_STRING: + out << t->stringValue(); + break; + default: + out << "(" << kindToString(t->kind()); + for (const auto& e : t->trees()) { + out << " " << get_flat(e); + } + out << ")"; + break; + } + auto it_ = flat_strings.emplace(t, out.str()); + return it_.first->second; + } + void print(std::ostream& out, const TreeRef& t, int indent) { + const std::string& s = get_flat(t); + if (indent + s.size() < col || t->isAtom()) { + out << s; + return; + } + std::string k = kindToString(t->kind()); + out << "(" << k; + for (const auto& e : t->trees()) { + out << "\n" << std::string(indent + 2, ' '); + print(out, e, indent + 2); + } + out << ")"; + } +}; + +static inline std::ostream& operator<<(std::ostream& out, pretty_tree t_) { + t_.print(out, t_.tree, 0); + return out << '\n'; +} + +static inline std::ostream& operator<<(std::ostream& out, const TreeRef& t) { + return out << pretty_tree(t); +} + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h new file mode 100644 index 0000000000000000000000000000000000000000..d7e758e34e38b8cce5abd0ccecec71500fe33b2f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/frontend/tree_views.h @@ -0,0 +1,1276 @@ +#pragma once +#include +#include +#include + +#include +#include +#include +#include +#include + +namespace torch::jit { + +// clang-format off +// TreeView provides a statically-typed way to traverse the tree, which should +// be formed according to the grammar below. +// +// A few notes on types and their aliases: +// - List is really a Tree with kind TK_LIST and elements as subtrees +// - Maybe is really a Tree with kind TK_OPTION that has 0 or 1 subtree of type T +// - Builtin types are: Ident (TK_IDENT), String (TK_STRING) +// +// Param = Param(Maybe type, Ident name) TK_PARAM +// +// Decl = Decl(List params, Maybe return_type) TK_DECL +// Def = Def(Ident name, Decl decl, List body) TK_DEF +// ClassDef = ClassDef(Ident name, TK_CLASS_DEF +// Maybe superclass, +// List body) +// +// Stmt = If(Expr cond, List true_body, List false_body) TK_IF +// | For(List targets, List iters, List body) TK_FOR +// | While(Expr cond, List body) TK_WHILE +// | Global(List idents) TK_GLOBAL +// -- NB: the only type of Expr's allowed on lhs are Var +// Or a tuple containing Var with an optional terminating Starred +// | Assign(Expr lhs, Maybe rhs, Maybe type) TK_ASSIGN +// | AugAssign(Expr lhs, AugAssignKind aug_op, Expr rhs) TK_AUG_ASSIGN +// | Return(List values) TK_RETURN +// | ExprStmt(List expr) TK_EXPR_STMT +// | Raise(Expr expr) TK_RAISE +// | Def TK_DEF +// | With(List targets, List body) TK_WITH +// +// Expr = TernaryIf(Expr cond, Expr true_expr, Expr false_expr) TK_IF_EXPR +// | BinOp(Expr lhs, Expr rhs) +// | And TK_AND +// | Or TK_OR +// | Lt '<' +// | Gt '>' +// | Eq TK_EQ +// | Le TK_LE +// | Ge TK_GE +// | Ne TK_NE +// | Is TK_IS +// | IsNot TK_ISNOT +// | Add '+' +// | Sub '-' +// | Mul '*' +// | Div '/' +// | Mod '%' +// | MatMult '@' +// | Pow TK_POW +// | UnaryOp(Expr expr) +// | Not TK_NOT +// | USub '-' +// | Const(String value) TK_CONST +// -- NB: x.name(y) is desugared into name(x, y) +// | Apply(Ident name, List args, List kwargs) TK_APPLY +// | Select(Expr value, Ident selector) '.' +// | Subscript(Expr value, List subscript_exprs) TK_SUBSCRIPT +// | SliceExpr(Maybe start, Maybe end) TK_SLICE_EXPR +// | Var(Ident name) TK_VAR +// | ListLiteral(List inputs) TK_LIST_LITERAL +// | TupleLiteral(List inputs) TK_TUPLE_LITERAL +// | Starred(Expr expr) TK_STARRED +// | WithItem(Expr target, Maybe var) TK_WITH_ITEM +// -- NB: only allowed expressions are Const or List(Const) +// (List as a value, not type constructor) +// Attribute = Attribute(Ident name, Expr value) TK_ATTRIBUTE +// +// AugAssignKind = +// | Add() TK_PLUS_EQ +// | Sub() TK_MINUS_EQ +// | Mul() TK_TIMES_EQ +// | Div() TK_DIV_EQ +// | Mod() TK_MOD_EQ +// + +// Each subclass of TreeView should provide: +// 1. Constructor that takes a TreeRef, and checks that it's of the right type. +// 2. Accessors that get underlying information out of the object. If they +// return subtrees, they should wrap them in appropriate views too. +// 3. Static method 'create' that creates the underlying TreeRef object +// for every TreeRef kind that has a TreeView, the parser always uses +// (e.g.) Ident::create rather than Compound::Create, this means that +// changes to the structure of Ident are always made right here rather +// than both in the parser and in this code. +// XXX: these structs should have no fields to prevent slicing when passing by value +// clang-format on +struct TreeView { + explicit TreeView(TreeRef tree) : tree_(std::move(tree)) {} + TreeRef tree() const { + return tree_; + } + const SourceRange& range() const { + return tree_->range(); + } + operator TreeRef() const { + return tree_; + } + const TreeRef& get() const { + return tree_; + } + int kind() const { + return tree_->kind(); + } + void dump() const { + std::cout << tree_; + } + + protected: + const TreeRef& subtree(size_t i) const { + return tree_->trees().at(i); + } + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + TreeRef tree_; +}; + +template +struct ListIterator { + ListIterator(TreeList::const_iterator it) : it(it) {} + bool operator!=(const ListIterator& rhs) const { + return it != rhs.it; + } + bool operator==(const ListIterator& rhs) const { + return it == rhs.it; + } + T operator*() const { + return T(*it); + } + ListIterator& operator+=(std::ptrdiff_t n) { + it += n; + return *this; + } + ListIterator& operator++() { + ++it; + return *this; + } + ListIterator& operator--() { + --it; + return *this; + } + + private: + TreeList::const_iterator it; +}; + +template +struct List : public TreeView { + using iterator = ListIterator; + using const_iterator = ListIterator; + + List(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_LIST); + // Iterate over list to temporarily instantiate Ts that will check the type + for (const T& elem : *this) { + (void)elem; // silence unused warning + } + } + iterator begin() const { + return iterator(tree_->trees().begin()); + } + iterator end() const { + return iterator(tree_->trees().end()); + } + bool empty() const { + return tree_->trees().begin() == tree_->trees().end(); + } + T operator[](size_t i) const { + return T(subtree(i)); + } + TreeRef map(const std::function& fn) { + return tree_->map([&](TreeRef v) { return fn(T(v)); }); + } + static List create(const SourceRange& range, const std::vector& subtrees) { + TreeList type_erased_sub{subtrees.begin(), subtrees.end()}; + return List(Compound::create(TK_LIST, range, std::move(type_erased_sub))); + } + static List unsafeCreate(const SourceRange& range, TreeList&& subtrees) { + return List(Compound::create(TK_LIST, range, std::move(subtrees))); + } + size_t size() const { + return tree_->trees().size(); + } +}; + +template +struct Maybe : public TreeView { + explicit Maybe(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_OPTION); + if (tree_->trees().size() > 1) + throw(ErrorReport(tree) << "Maybe trees can have at most one subtree"); + } + /* implicit */ Maybe(const T& tree) : TreeView(tree) {} + bool present() const { + return tree_->trees().size() > 0; + } + T get() const { + return T(tree_->trees().at(0)); + } + TreeRef map(const std::function& fn) { + return tree_->map([&](TreeRef v) { return fn(T(v)); }); + } + static Maybe create(const SourceRange& range) { + return Maybe(Compound::create(TK_OPTION, range, {})); + } + static Maybe create(const SourceRange& range, const T& value) { + return Maybe(Compound::create(TK_OPTION, range, {value})); + } +}; + +struct Ident : public TreeView { + explicit Ident(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_IDENT); + } + const std::string& name() const { + return subtree(0)->stringValue(); + } + static Ident create(const SourceRange& range, std::string name) { + return Ident( + Compound::create(TK_IDENT, range, {String::create(std::move(name))})); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Base types (production LHS) +//////////////////////////////////////////////////////////////////////////////// + +struct Stmt : public TreeView { + explicit Stmt(const TreeRef& tree) : TreeView(tree) { + switch (tree->kind()) { + case TK_IF: + case TK_FOR: + case TK_WHILE: + case TK_GLOBAL: + case TK_ASSIGN: + case TK_AUG_ASSIGN: + case TK_RETURN: + case TK_EXPR_STMT: + case TK_RAISE: + case TK_ASSERT: + case TK_PASS: + case TK_BREAK: + case TK_DELETE: + case TK_CONTINUE: + case TK_DEF: + case TK_WITH: + return; + default: + throw( + ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid Stmt"); + } + } +}; + +struct Expr : public TreeView { + explicit Expr(const TreeRef& tree) : TreeView(tree) { + switch (tree->kind()) { + case TK_IF_EXPR: + case TK_AND: + case TK_OR: + case '<': + case '>': + case TK_IS: + case TK_ISNOT: + case TK_EQ: + case TK_LE: + case TK_GE: + case TK_NE: + case '+': + case '-': + case TK_UNARY_MINUS: + case '~': + case '*': + case TK_STARRED: + case '/': + case '%': + case TK_NOT: + case TK_CONST: + case TK_STRINGLITERAL: + case TK_TRUE: + case TK_FALSE: + case TK_NONE: + case TK_NONE_TYPE: + case TK_CAST: + case TK_APPLY: + case '.': + case TK_SUBSCRIPT: + case TK_SLICE_EXPR: + case TK_VAR: + case TK_LIST_LITERAL: + case TK_TUPLE_LITERAL: + case TK_DICT_LITERAL: + case '@': + case TK_POW: + case TK_LSHIFT: + case TK_RSHIFT: + case TK_FLOOR_DIV: + case '&': + case '^': + case '|': + case TK_LIST_COMP: + case TK_DICT_COMP: + case TK_DOTS: + case TK_IN: + case TK_WITH_ITEM: + return; + default: + throw( + ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid Expr"); + } + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Helper nodes (mostly for function arguments) +//////////////////////////////////////////////////////////////////////////////// + +struct Attribute : public TreeView { + explicit Attribute(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_ATTRIBUTE); + } + Ident name() const { + return Ident(subtree(0)); + } + Expr value() const { + return Expr(subtree(1)); + } + static Attribute create( + const SourceRange& range, + const Ident& name, + const TreeRef& value) { + return Attribute(Compound::create(TK_ATTRIBUTE, range, {name, value})); + } +}; + +struct Param : public TreeView { + explicit Param(const TreeRef& tree) : TreeView(tree) { + tree_->match(TK_PARAM); + } + static Param create( + const SourceRange& range, + const Ident& ident, + const Maybe& type, + const Maybe& def, + bool kwarg_only) { + TreeRef kwarg_only_tree = + Compound::create(kwarg_only ? TK_TRUE : TK_FALSE, range, {}); + return Param(Compound::create( + TK_PARAM, range, {ident, type, def, std::move(kwarg_only_tree)})); + } + Ident ident() const { + return Ident(subtree(0)); + } + Maybe type() const { + return Maybe(subtree(1)); + } + Maybe defaultValue() const { + return Maybe(subtree(2)); + } + bool kwarg_only() const { + return TK_TRUE == subtree(3)->kind(); + } + Param withType(const Maybe& typ) const { + return Param::create(range(), ident(), typ, defaultValue(), kwarg_only()); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Top level definitions +//////////////////////////////////////////////////////////////////////////////// + +struct Decl : public TreeView { + explicit Decl(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_DECL); + } + List params() const { + return List(subtree(0)); + } + Maybe return_type() const { + return Maybe(subtree(1)); + } + static Decl create( + const SourceRange& range, + const List& params, + const Maybe& return_type) { + return Decl(Compound::create(TK_DECL, range, {params, return_type})); + } +}; + +struct Def : public TreeView { + explicit Def(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_DEF); + } + Def withName(std::string new_name) const { + auto new_ident = Ident::create(name().range(), std::move(new_name)); + return create(range(), new_ident, decl(), statements()); + } + Def withDecl(const Decl& decl) const { + return create(range(), name(), decl, statements()); + } + Ident name() const { + return Ident(subtree(0)); + } + Decl decl() const { + return Decl(subtree(1)); + } + List statements() const { + return List(subtree(2)); + } + static Def create( + const SourceRange& range, + const Ident& name, + const Decl& decl, + const List& stmts) { + return Def(Compound::create(TK_DEF, range, {name, decl, stmts})); + } +}; + +// Property represents a named attribute combined with a getter and setter +// method to access and mutate that attribute. +struct Property : public TreeView { + explicit Property(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_PROP); + } + Ident name() const { + return Ident(subtree(0)); + } + Def getter() const { + return Def(subtree(1)); + } + Maybe setter() const { + return Maybe(subtree(2)); + } + static Property create( + const SourceRange& range, + const Ident& name, + const Def& getter, + const Maybe& setter) { + return Property(Compound::create(TK_PROP, range, {name, getter, setter})); + } +}; + +struct Assign; + +struct ClassDef : public TreeView { + explicit ClassDef(const TreeRef& tree) : TreeView(tree) { + tree->match(TK_CLASS_DEF); + } + explicit ClassDef(TreeRef&& tree) : TreeView(std::move(tree)) { + tree_->match(TK_CLASS_DEF); + } + ClassDef withName(std::string new_name) const { + auto new_ident = Ident::create(name().range(), std::move(new_name)); + return create(range(), new_ident, superclass(), body()); + } + Ident name() const { + return Ident(subtree(0)); + } + Maybe superclass() const { + return Maybe(subtree(1)); + } + List body() const { + return List(subtree(2)); + } + Maybe> properties() const { + return Maybe>(subtree(3)); + } + Maybe> assigns() const { + return Maybe>(subtree(4)); + } + static ClassDef create( + const SourceRange& range, + const Ident& name, + const Maybe& superclass, + const List& body) { + return ClassDef(Compound::create( + TK_CLASS_DEF, + range, + {name, + superclass, + body, + Maybe>::create(range), + Maybe>::create(range)})); + } + static ClassDef create( + const SourceRange& range, + const Ident& name, + const Maybe& superclass, + const List& body, + const List& properties, + const List& assigns); +}; + +TORCH_API std::vector getUnresolvedClassAttributes( + const ClassDef& def); + +//////////////////////////////////////////////////////////////////////////////// +// Statements +//////////////////////////////////////////////////////////////////////////////// + +struct If : public Stmt { + explicit If(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_IF); + } + Expr cond() const { + return Expr(subtree(0)); + } + List trueBranch() const { + return List(subtree(1)); + } + List falseBranch() const { + return List(subtree(2)); + } + If withNewBranches( + const List& true_branch, + const List& false_branch) const { + return create(range(), cond(), true_branch, false_branch); + } + static If create( + const SourceRange& range, + const Expr& cond, + const List& true_branch, + const List& false_branch) { + return If( + Compound::create(TK_IF, range, {cond, true_branch, false_branch})); + } +}; + +struct While : public Stmt { + explicit While(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_WHILE); + } + Expr cond() const { + return Expr(subtree(0)); + } + List body() const { + return List(subtree(1)); + } + static While create( + const SourceRange& range, + const Expr& cond, + const List& body) { + return While(Compound::create(TK_WHILE, range, {cond, body})); + } +}; + +struct For : public Stmt { + explicit For(const TreeRef& tree) : Stmt(tree) { + tree->match(TK_FOR); + } + List targets() const { + return List(subtree(0)); + } + List itrs() const { + return List(subtree(1)); + } + List body() const { + return List(subtree(2)); + } + static For create( + const SourceRange& range, + const List& targets, + const List& itrs, + const List& body) { + return For(Compound::create(TK_FOR, range, {targets, itrs, body})); + } +}; + +// TODO: supports only single comprehension for now +struct ListComp : public Expr { + explicit ListComp(const TreeRef& tree) : Expr(tree) { + tree->match(TK_LIST_COMP); + } + Expr elt() const { + return Expr(subtree(0)); + } + Expr target() const { + return Expr(subtree(1)); + } + Expr iter() const { + return Expr(subtree(2)); + } + // TODO: no ifs for now + static ListComp create( + const SourceRange& range, + const Expr& elt, + const Expr& target, + const Expr& iter) { + return ListComp(Compound::create(TK_LIST_COMP, range, {elt, target, iter})); + } +}; + +// TODO: supports only single comprehension for now +struct DictComp : public Expr { + explicit DictComp(const TreeRef& tree) : Expr(tree) { + tree->match(TK_DICT_COMP); + } + Expr key() const { + return Expr(subtree(0)); + } + Expr value() const { + return Expr(subtree(1)); + } + Expr target() const { + return Expr(subtree(2)); + } + Expr iter() const { + return Expr(subtree(3)); + } + // TODO: no ifs for now + static DictComp create( + const SourceRange& range, + const Expr& key, + const Expr& value, + const Expr& target, + const Expr& iter) { + return DictComp( + Compound::create(TK_DICT_COMP, range, {key, value, target, iter})); + } +}; + +struct Global : public Stmt { + explicit Global(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_GLOBAL); + } + List names() { + return List(subtree(0)); + } + static Global create(const SourceRange& range, const List& names) { + return Global(Compound::create(TK_GLOBAL, range, {names})); + } +}; + +struct AugAssignKind : public TreeView { + explicit AugAssignKind(const TreeRef& tree) : TreeView(tree) { + switch (tree->kind()) { + case '+': + case '-': + case '*': + case '/': + case '%': + case '|': + case '&': + case '^': + case TK_POW: + case TK_LSHIFT: + case TK_RSHIFT: + return; + default: + throw(ErrorReport(tree) << "is not a valid AugAssignKind"); + } + } +}; + +// Augmented assignment, like "foo += bar" +struct AugAssign : public Stmt { + explicit AugAssign(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_AUG_ASSIGN); + } + static AugAssign create( + const SourceRange& range, + const Expr& lhs, + const AugAssignKind& aug_op, + const Expr& rhs) { + return AugAssign( + Compound::create(TK_AUG_ASSIGN, range, {lhs, aug_op, rhs})); + } + Expr lhs() const { + return Expr(subtree(0)); + } + int aug_op() const { + return subtree(1)->kind(); + } + Expr rhs() const { + return Expr(subtree(2)); + } +}; + +struct Assign : public Stmt { + explicit Assign(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_ASSIGN); + } + static Assign create( + const SourceRange& range, + const List& lhs, + const Maybe& rhs, + const Maybe& type) { + return Assign(Compound::create(TK_ASSIGN, range, {lhs, rhs, type})); + } + + List lhs_list() const { + return List(subtree(0)); + } + + Expr lhs() const { + const auto& li = lhs_list(); + TORCH_INTERNAL_ASSERT(li.size() == 1); + return *li.begin(); + } + + Maybe rhs() const { + return Maybe(subtree(1)); + } + + Maybe type() const { + return Maybe(subtree(2)); + } +}; + +struct Return : public Stmt { + explicit Return(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_RETURN); + } + Expr expr() const { + return Expr(subtree(0)); + } + static Return create(const SourceRange& range, const Expr& value) { + return Return(Compound::create(TK_RETURN, range, {value})); + } +}; + +struct Raise : public Stmt { + explicit Raise(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_RAISE); + } + Expr expr() const { + return Expr(subtree(0)); + } + static Raise create(const SourceRange& range, const Expr& expr) { + return Raise(Compound::create(TK_RAISE, range, {expr})); + } +}; + +struct Assert : public Stmt { + explicit Assert(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_ASSERT); + } + Expr test() const { + return Expr(subtree(0)); + } + Maybe msg() const { + return Maybe(subtree(1)); + } + static Assert create( + const SourceRange& range, + const Expr& test, + const Maybe& msg) { + return Assert(Compound::create(TK_ASSERT, range, {test, msg})); + } +}; + +struct Pass : public Stmt { + explicit Pass(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_PASS); + } + static Pass create(const SourceRange& range) { + return Pass(Compound::create(TK_PASS, range, {})); + } +}; + +struct Dots : public Expr { + explicit Dots(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_DOTS); + } + static Dots create(const SourceRange& range) { + return Dots(Compound::create(TK_DOTS, range, {})); + } +}; + +struct Break : public Stmt { + explicit Break(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_BREAK); + } + static Break create(const SourceRange& range) { + return Break(Compound::create(TK_BREAK, range, {})); + } +}; + +struct Continue : public Stmt { + explicit Continue(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_CONTINUE); + } + static Continue create(const SourceRange& range) { + return Continue(Compound::create(TK_CONTINUE, range, {})); + } +}; + +struct ExprStmt : public Stmt { + explicit ExprStmt(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_EXPR_STMT); + } + Expr expr() { + return Expr(subtree(0)); + } + static ExprStmt create(const SourceRange& range, const Expr& list) { + return ExprStmt(Compound::create(TK_EXPR_STMT, range, {list})); + } +}; + +//////////////////////////////////////////////////////////////////////////////// +// Expressions +//////////////////////////////////////////////////////////////////////////////// + +struct BinOp : public Expr { + explicit BinOp(const TreeRef& tree) : Expr(tree) { + switch (tree->kind()) { + case TK_AND: + case TK_OR: + case '<': + case '>': + case TK_IS: + case TK_ISNOT: + case TK_EQ: + case TK_LE: + case TK_GE: + case TK_NE: + case '+': + case '*': + case '/': + case '-': + case '@': + case TK_POW: + case TK_LSHIFT: + case TK_RSHIFT: + case '%': + case '&': + case '^': + case '|': + case TK_FLOOR_DIV: + case TK_IN: + if (tree->trees().size() != 2) + throw( + ErrorReport(tree) + << "BinOp expected 2 subtrees, found " << tree->trees().size()); + return; + default: + throw( + ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid BinOp"); + } + } + Expr lhs() const { + return Expr(subtree(0)); + } + Expr rhs() const { + return Expr(subtree(1)); + } + static BinOp create( + const SourceRange& range, + int kind, + const Expr& lhs, + const Expr& rhs) { + return BinOp(Compound::create(kind, range, {lhs, rhs})); + } +}; + +struct UnaryOp : public Expr { + explicit UnaryOp(const TreeRef& tree) : Expr(tree) { + switch (tree->kind()) { + case TK_UNARY_MINUS: + case '~': + case TK_NOT: + if (tree->trees().size() != 1) + throw( + ErrorReport(tree) + << "UnaryOp expected 1 subtree, found " << tree->trees().size()); + return; + default: + throw( + ErrorReport(tree) + << kindToString(tree->kind()) << " is not a valid UnaryOp"); + } + } + static UnaryOp create(const SourceRange& range, int kind, const Expr& expr) { + return UnaryOp(Compound::create(kind, range, {expr})); + } +}; + +struct Const : public Expr { + explicit Const(const TreeRef& tree) : Expr(tree) { + tree_->matchNumSubtrees(TK_CONST, 1); + } + bool isFloatingPoint() const { + if (isComplex()) + return false; + + bool is_inf = subtree(0)->stringValue() == "inf"; + return is_inf || + subtree(0)->stringValue().find_first_of(".eE") != std::string::npos; + } + bool isIntegral() const { + return !isFloatingPoint() && !isComplex(); + } + bool isComplex() const { + return subtree(0)->stringValue().find_first_of('j') != std::string::npos; + } + int64_t asIntegral() const { + try { + return std::stoll(subtree(0)->stringValue(), nullptr, 0); + } catch (const std::out_of_range&) { + throw( + ErrorReport(range()) << "Integral constant out of range " + "(must fit in a signed 64 bit integer)"); + } + } + double asFloatingPoint() const { + // We can't pass in nullptr as the dummy pointer gets dereferenced for + // Android version of strtod_c(). + char* dummy = nullptr; + return torch::jit::strtod_c(subtree(0)->stringValue().c_str(), &dummy); + } + c10::complex asComplex() const { + char* dummy = nullptr; + auto str = subtree(0)->stringValue(); + // Complex numbers (a+bj, where a is non-zero) are parsed as an addition + // between float/int a and a complex number "bj". When a is 0, a complex + // number bj is created as above. So, while parsing the string, we don't + // have to worry about the real component of the complex number. + auto imag = + torch::jit::strtod_c(str.substr(0, str.size() - 1).c_str(), &dummy); + return c10::complex(0, imag); + } + const std::string& text() const { + return subtree(0)->stringValue(); + } + static Const create(const SourceRange& range, const std::string& value) { + return Const(Compound::create(TK_CONST, range, {String::create(value)})); + } +}; + +struct StringLiteral : public Expr { + explicit StringLiteral(const TreeRef& tree) : Expr(tree) { + tree_->matchNumSubtrees(TK_STRINGLITERAL, 1); + } + const std::string& text() const { + return subtree(0)->stringValue(); + } + static StringLiteral create( + const SourceRange& range, + const std::string& value) { + return StringLiteral( + Compound::create(TK_STRINGLITERAL, range, {String::create(value)})); + } +}; + +struct Apply : public Expr { + explicit Apply(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_APPLY); + } + Expr callee() const { + return Expr(subtree(0)); + } + List inputs() const { + return List(subtree(1)); + } + List attributes() const { + return List(subtree(2)); + } + static Apply create( + const SourceRange& range, + const Expr& callee, + const List& inputs, + const List& attributes) { + return Apply( + Compound::create(TK_APPLY, range, {callee, inputs, attributes})); + } +}; + +struct Select : public Expr { + explicit Select(const TreeRef& tree) : Expr(tree) { + tree_->match('.'); + } + Expr value() const { + return Expr(subtree(0)); + } + Ident selector() const { + return Ident(subtree(1)); + } + static Select create( + const SourceRange& range, + const Expr& value, + const Ident& selector) { + return Select(Compound::create('.', range, {value, selector})); + } +}; + +struct SliceExpr : public Expr { + explicit SliceExpr(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_SLICE_EXPR); + } + Maybe start() const { + return Maybe(subtree(0)); + } + Maybe end() const { + return Maybe(subtree(1)); + } + Maybe step() const { + return Maybe(subtree(2)); + } + Expr startOr(int64_t alternative) const { + const auto startOption = start(); + return startOption.present() ? startOption.get() : createInt(alternative); + } + Expr endOr(int64_t alternative) const { + const auto endOption = end(); + return endOption.present() ? endOption.get() : createInt(alternative); + } + Expr stepOr(int64_t alternative) const { + const auto stepOption = step(); + return stepOption.present() ? stepOption.get() : createInt(alternative); + } + static SliceExpr create( + const SourceRange& range, + const Maybe& start, + const Maybe& end, + const Maybe& step) { + return SliceExpr( + Compound::create(TK_SLICE_EXPR, range, {start, end, step})); + } + + private: + Expr createInt(int64_t value) const { + return Expr(Const::create(range(), std::to_string(value))); + } +}; + +struct Subscript : public Expr { + explicit Subscript(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_SUBSCRIPT); + } + Expr value() const { + return Expr(subtree(0)); + } + List subscript_exprs() const { + return List(subtree(1)); + } + static Subscript create( + const SourceRange& range, + const Expr& value, + const List& subscript_exprs) { + auto whole_range = SourceRange( + range.source(), range.start(), subscript_exprs.range().end() + 1); + return Subscript( + Compound::create(TK_SUBSCRIPT, whole_range, {value, subscript_exprs})); + } +}; + +struct Var : public Expr { + explicit Var(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_VAR); + }; + Ident name() const { + return Ident(subtree(0)); + } + static Var create(const SourceRange& range, const Ident& name) { + return Var(Compound::create(TK_VAR, range, {name})); + } +}; + +// WithItem represents an item using with a WithStmt. +struct WithItem : public Expr { + explicit WithItem(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_WITH_ITEM); + } + + Expr target() const { + return Expr(subtree(0)); + } + + Maybe var() const { + return Maybe(subtree(1)); + } + + static WithItem create( + const SourceRange& range, + const Expr& target, + const Maybe& var) { + return WithItem(Compound::create(TK_WITH_ITEM, range, {target, var})); + } +}; + +// With represents a with statement consisting of a list of with items and a +// body of statements. +struct With : public Stmt { + explicit With(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_WITH); + } + + List targets() const { + return List(subtree(0)); + } + + List body() const { + return List(subtree(1)); + } + + static With create( + const SourceRange& range, + const List& targets, + const List& body) { + return With(Compound::create(TK_WITH, range, {targets, body})); + } +}; + +struct TernaryIf : public Expr { + explicit TernaryIf(const TreeRef& tree) : Expr(tree) { + tree_->matchNumSubtrees(TK_IF_EXPR, 3); + }; + Expr cond() const { + return Expr(subtree(0)); + } + Expr true_expr() const { + return Expr(subtree(1)); + } + Expr false_expr() const { + return Expr(subtree(2)); + } + static TernaryIf create( + const SourceRange& range, + const Expr& cond, + const Expr& true_expr, + const Expr& false_expr) { + return TernaryIf( + Compound::create(TK_IF_EXPR, range, {cond, true_expr, false_expr})); + }; +}; + +struct ListLiteral : public Expr { + explicit ListLiteral(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_LIST_LITERAL); + } + List inputs() const { + return subtree(0); + } + static ListLiteral create( + const SourceRange& range, + const List& inputs) { + return ListLiteral(Compound::create(TK_LIST_LITERAL, range, {inputs})); + } +}; + +struct TupleLiteral : public Expr { + explicit TupleLiteral(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_TUPLE_LITERAL); + } + List inputs() const { + return subtree(0); + } + static TupleLiteral create( + const SourceRange& range, + const List& inputs) { + return TupleLiteral(Compound::create(TK_TUPLE_LITERAL, range, {inputs})); + } +}; + +struct DictLiteral : public Expr { + explicit DictLiteral(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_DICT_LITERAL); + } + List key_inputs() const { + return subtree(0); + } + List value_inputs() const { + return subtree(1); + } + static DictLiteral create( + const SourceRange& range, + const List& keys, + const List& values) { + return DictLiteral( + Compound::create(TK_DICT_LITERAL, range, {keys, values})); + } +}; + +struct Starred : public Expr { + explicit Starred(const TreeRef& tree) : Expr(tree) { + tree_->match(TK_STARRED); + } + Expr expr() const { + return Expr(subtree(0)); + } + static Starred create(const SourceRange& range, const Expr& expr) { + return Starred(Compound::create(TK_STARRED, range, {expr})); + } +}; + +struct Delete : public Stmt { + explicit Delete(const TreeRef& tree) : Stmt(tree) { + tree_->match(TK_DELETE); + } + List targets() const { + return subtree(0); + } + static Delete create(const SourceRange& range, const List& targets) { + return Delete(Compound::create(TK_DELETE, range, {targets})); + } +}; + +/* + * NOTE: transforming PEP 604 union into equivalent union type + * + * NOTE: Union[int, float] parses into: + * expr:(subscript + * (variable (ident Union)) + * (list + * (variable (ident int)) + * (variable (ident float)))) + * subscript + * + * NOTE: (int | float) parses into: + * expr:(| + * (variable (ident int)) + * (variable (ident float))) + * | + */ + +inline void _flatten_pep604_union( + const torch::jit::Expr& node, + std::vector* result) { + // flatten possibly nested union expressions like (int | (float | str)) + // into a flat list of expressions like [int, float, str] + if (node.kind() == '|') { + auto as_binop = torch::jit::BinOp(node); + _flatten_pep604_union(as_binop.lhs(), result); + _flatten_pep604_union(as_binop.rhs(), result); + } else { + result->push_back(node); + } +} + +inline std::vector get_pep604_union_members(const Expr& node) { + std::vector result; + _flatten_pep604_union(node, &result); + return result; +} + +// Flattens a PEP 604 union into a classical union. +// For example, ((x | y) | z) is transformed into Union[x, y, z]. +inline Expr pep604union_to_union(const Expr& expr) { + // noop if not a pep604 union + if (expr.kind() != '|') + return expr; + + // In order to support unions with more than 2 operands ((x|y)|z), we need to + // recursively flatten the tree of | expressions. + auto members = get_pep604_union_members(expr); + auto synthesised_union = Subscript::create( + expr.range(), + Var::create(expr.range(), Ident::create(expr.range(), "Union")), + List::create(expr.range(), members)); + return std::move(synthesised_union); +} + +} // namespace torch::jit + +namespace std { + +template +struct iterator_traits> + : std::iterator_traits {}; + +} // namespace std diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h new file mode 100644 index 0000000000000000000000000000000000000000..73c100094f2c04982a2a563e4ae76d74d545469b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/code.h @@ -0,0 +1,34 @@ +#pragma once + +#include + +#include +#include +#include + +namespace torch::jit::mobile { + +using Stack = std::vector; +using DebugHandle = int64_t; + +class Function; + +struct Code { + std::vector instructions_; + std::vector debug_handles_; + std::vector op_names_; + std::vector operator_input_sizes_; + std::vector> operators_; + std::vector constants_; + std::vector types_; + // TODO After we actually export CALL instructions we can remove this. + // We may need a two-stage importing scheme, where we firstly construct all + // function objects, and then append referenced function pointers. This could + // be done in parseMethods(). + std::vector functions_; + size_t register_size_ = 0; // Aggregated output size. + // initialized means operators_ array is filled with operators + bool initialized = false; +}; + +} // namespace torch::jit::mobile diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h new file mode 100644 index 0000000000000000000000000000000000000000..0cf4b42508b26bceb619bdea9fc481b181157638 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/debug_info.h @@ -0,0 +1,55 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace torch::jit { +/* + * MobileDebugTable: + * Deserializes debug_pkl and callstack_map records from PT model's zip archive + * and stores them in a map of debug handles to DebugInfoPair. Debug handles are + * unique per model and runtime, be in lite interpreter or delegate, an + * exception of BackendRuntimeException should raised using debug handles. + * getSourceDebugString method is responsible for translating debug + * handles to correspond debug information. + * This debug informatin includes stack trace of model level source code and + * module hierarchy where the exception occurred. + */ +class MobileDebugTable { + public: + MobileDebugTable() = default; + MobileDebugTable( + std::unique_ptr& reader, + const std::shared_ptr& cu); + + template + MobileDebugTable(It begin, It end) : callstack_ptr_map_(begin, end) {} + + std::string getSourceDebugString( + const int64_t debug_handle, + const std::string& top_module_type_name = "ModuleTypeUnknown") const; + std::string getSourceDebugString( + const std::vector& debug_handles, + const std::string& top_module_type_name = "ModuleTypeUnknown") const; + std::string getModuleHierarchyInfo( + const int64_t debug_handle, + const std::string& top_module_type_name = "ModuleTypeUnknown") const; + std::string getModuleHierarchyInfo( + const std::vector& debug_handles, + const std::string& top_module_type_name = "ModuleTypeUnknown") const; + + const ska::flat_hash_map& getCallStackPtrMap() + const { + return callstack_ptr_map_; + } + + private: + std::pair getSourceDebugModuleHierarchyInfo( + const std::vector& debug_handles, + const std::string& top_module_type_name = "ModuleTypeUnknown") const; + ska::flat_hash_map callstack_ptr_map_; +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h new file mode 100644 index 0000000000000000000000000000000000000000..73869a85a5019417527edf761aaa2d25b8c6acf1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/file_format.h @@ -0,0 +1,194 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#if defined(HAVE_MMAP) +#include +#include +#include +#include +#include +#endif + +/** + * @file + * + * Helpers for identifying file formats when reading serialized data. + * + * Note that these functions are declared inline because they will typically + * only be called from one or two locations per binary. + */ + +namespace torch::jit { + +/** + * The format of a file or data stream. + */ +enum class FileFormat { + UnknownFileFormat = 0, + FlatbufferFileFormat, + ZipFileFormat, +}; + +/// The size of the buffer to pass to #getFileFormat(), in bytes. +constexpr size_t kFileFormatHeaderSize = 8; +constexpr size_t kMaxAlignment = 16; + +/** + * Returns the likely file format based on the magic header bytes in @p header, + * which should contain the first bytes of a file or data stream. + */ +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline FileFormat getFileFormat(const char* data) { + // The size of magic strings to look for in the buffer. + static constexpr size_t kMagicSize = 4; + + // Bytes 4..7 of a Flatbuffer-encoded file produced by + // `flatbuffer_serializer.h`. (The first four bytes contain an offset to the + // actual Flatbuffer data.) + static constexpr std::array kFlatbufferMagicString = { + 'P', 'T', 'M', 'F'}; + static constexpr size_t kFlatbufferMagicOffset = 4; + + // The first four bytes of a ZIP file. + static constexpr std::array kZipMagicString = { + 'P', 'K', '\x03', '\x04'}; + + // Note that we check for Flatbuffer magic first. Since the first four bytes + // of flatbuffer data contain an offset to the root struct, it's theoretically + // possible to construct a file whose offset looks like the ZIP magic. On the + // other hand, bytes 4-7 of ZIP files are constrained to a small set of values + // that do not typically cross into the printable ASCII range, so a ZIP file + // should never have a header that looks like a Flatbuffer file. + if (std::memcmp( + data + kFlatbufferMagicOffset, + kFlatbufferMagicString.data(), + kMagicSize) == 0) { + // Magic header for a binary file containing a Flatbuffer-serialized mobile + // Module. + return FileFormat::FlatbufferFileFormat; + } else if (std::memcmp(data, kZipMagicString.data(), kMagicSize) == 0) { + // Magic header for a zip file, which we use to store pickled sub-files. + return FileFormat::ZipFileFormat; + } + return FileFormat::UnknownFileFormat; +} + +/** + * Returns the likely file format based on the magic header bytes of @p data. + * If the stream position changes while inspecting the data, this function will + * restore the stream position to its original offset before returning. + */ +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline FileFormat getFileFormat(std::istream& data) { + FileFormat format = FileFormat::UnknownFileFormat; + std::streampos orig_pos = data.tellg(); + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + std::array header; + data.read(header.data(), header.size()); + if (data.good()) { + format = getFileFormat(header.data()); + } + data.seekg(orig_pos, data.beg); + return format; +} + +/** + * Returns the likely file format based on the magic header bytes of the file + * named @p filename. + */ +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline FileFormat getFileFormat(const std::string& filename) { + std::ifstream data(filename, std::ifstream::binary); + return getFileFormat(data); +} + +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static void file_not_found_error() { + std::stringstream message; + message << "Error while opening file: "; + if (errno == ENOENT) { + message << "no such file or directory" << '\n'; + } else { + message << "error no is: " << errno << '\n'; + } + TORCH_CHECK(false, message.str()); +} + +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline std::tuple, size_t> get_file_content( + const char* filename) { +#if defined(HAVE_MMAP) + int fd = open(filename, O_RDONLY); + if (fd < 0) { + // failed to open file, chances are it's no such file or directory. + file_not_found_error(); + } + struct stat statbuf {}; + fstat(fd, &statbuf); + size_t size = statbuf.st_size; + void* ptr = mmap(nullptr, statbuf.st_size, PROT_READ, MAP_PRIVATE, fd, 0); + close(fd); + auto deleter = [statbuf](char* ptr) { munmap(ptr, statbuf.st_size); }; + std::shared_ptr data(reinterpret_cast(ptr), deleter); +#else + FILE* f = fopen(filename, "rb"); + if (f == nullptr) { + file_not_found_error(); + } + fseek(f, 0, SEEK_END); + size_t size = ftell(f); + fseek(f, 0, SEEK_SET); + // make sure buffer size is multiple of alignment + size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment; + std::shared_ptr data( + static_cast(c10::alloc_cpu(buffer_size)), c10::free_cpu); + fread(data.get(), size, 1, f); + fclose(f); +#endif + return std::make_tuple(data, size); +} + +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline std::tuple, size_t> get_stream_content( + std::istream& in) { + // get size of the stream and reset to orig + std::streampos orig_pos = in.tellg(); + in.seekg(orig_pos, std::ios::end); + const long size = in.tellg(); + in.seekg(orig_pos, in.beg); + + // read stream + // NOLINT make sure buffer size is multiple of alignment + size_t buffer_size = (size / kMaxAlignment + 1) * kMaxAlignment; + std::shared_ptr data( + static_cast(c10::alloc_cpu(buffer_size)), c10::free_cpu); + in.read(data.get(), size); + + // reset stream to original position + in.seekg(orig_pos, in.beg); + return std::make_tuple(data, size); +} + +// NOLINTNEXTLINE(facebook-hte-NamespaceScopedStaticDeclaration) +static inline std::tuple, size_t> get_rai_content( + caffe2::serialize::ReadAdapterInterface* rai) { + size_t buffer_size = (rai->size() / kMaxAlignment + 1) * kMaxAlignment; + std::shared_ptr data( + static_cast(c10::alloc_cpu(buffer_size)), c10::free_cpu); + rai->read( + 0, data.get(), rai->size(), "Loading ReadAdapterInterface to bytes"); + return std::make_tuple(data, buffer_size); +} + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h new file mode 100644 index 0000000000000000000000000000000000000000..c1e062edf98348b2aa13dff79d944b6f6218b87e --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/flatbuffer_loader.h @@ -0,0 +1,134 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +/** + * Defines the public API for loading flatbuffer-serialized mobile modules. + * Note that this header must not include or depend on flatbuffer-defined + * types, to avoid leaking those details to PyTorch clients. + */ + +namespace torch::jit { + +/// All non-copied data pointers provided to `parse_and_initialize_*` functions +/// must be aligned to this boundary. Since the Module will point directly into +/// the data, this alignment is necessary to ensure that certain types/structs +/// are properly aligned. +constexpr size_t kFlatbufferDataAlignmentBytes = 16; + +/// Maps file names to file contents. +using ExtraFilesMap = std::unordered_map; + +// On high level, to produce a Module from a file on disk, we need to go +// through the follow steps: +// 1. Read: Read the file from disk -> memory +// 2. Deserialize: Parse the bytes to produce some in memory manipulable +// structure +// 3. Module initialization: Produce mobile::Module out of the structure +// produced in 2. +// Under this context, the structure described in 2. is the flatbuffer-defined +// type mobile::serialization::Module. However, this step/type is not visible in +// the public API. + +// Parse a mobile::Module from raw bytes. +// +// This function does steps 2+3 described above. +// +// Does not take ownership of `data`; if you want it to take ownership, see the +// shared_ptr overload of this function. +// +// If should_copy_tensor_memory is true, then the returned module will NOT have +// refences to `data`, so `data` can be freed immediately. +// +// If should_copy_tensor_memory is false, then returned module will have tensors +// that points inside of `data`; the caller will need to make sure that `data` +// outlives the returned Module. Also, `data` must be aligned to +// kFlatbufferDataAlignmentBytes. +TORCH_API mobile::Module parse_and_initialize_mobile_module( + void* data, + size_t size, // of `data`, in bytes. + std::optional device = std::nullopt, + ExtraFilesMap* extra_files = nullptr, + bool should_copy_tensor_memory = false); + +// Parse a mobile::Module from raw bytes. +// +// This function does steps 2+3 described above. +// +// The returned Module holds a reference to `data`, which must be aligned to +// kFlatbufferDataAlignmentBytes. +// +// If you do not want the Module to hold a reference to `data`, see the raw +// pointer overload of this function. +TORCH_API mobile::Module parse_and_initialize_mobile_module( + std::shared_ptr data, + size_t size, // of `data`, in bytes. + std::optional device = std::nullopt, + ExtraFilesMap* extra_files = nullptr); + +// Parse a mobile::Module from raw bytes, also returning JIT-related metadata. +// +// This is the same as parse_and_initialize_mobile_module() except that it also +// extracts JIT source files and constants. Can be used to construct a +// jit::Module. +TORCH_API mobile::Module parse_and_initialize_mobile_module_for_jit( + void* data, + size_t size, // of `data`, in bytes. + ExtraFilesMap& jit_sources, + std::vector& jit_constants, + std::optional device = std::nullopt, + ExtraFilesMap* extra_files = nullptr); + +// Load a mobile::Module from a filepath. +// +// This function does steps 1+2+3 described above. +// +// We need to have this as a convienience because Python API will need to wrap +// this. C++ clients should use one of the versions of +// parse_and_initialize_mobile_module() so they can manage the raw data more +// directly. +TORCH_API mobile::Module load_mobile_module_from_file( + const std::string& filename, + std::optional device = std::nullopt, + ExtraFilesMap* extra_files = nullptr); + +TORCH_API uint64_t get_bytecode_version(std::istream& in); +TORCH_API uint64_t get_bytecode_version(const std::string& filename); +TORCH_API uint64_t get_bytecode_version_from_bytes(char* flatbuffer_content); + +TORCH_API mobile::ModuleInfo get_module_info_from_flatbuffer( + char* flatbuffer_content); + +// The methods below are less efficient because it need to read the stream in +// its entirity to a buffer +TORCH_API mobile::Module load_mobile_module_from_stream_with_copy( + std::istream& in, + std::optional device = std::nullopt, + ExtraFilesMap* extra_files = nullptr); + +TORCH_API mobile::Module parse_flatbuffer_no_object( + std::shared_ptr data, + size_t size, + std::optional device); + +TORCH_API mobile::Module parse_and_initialize_mobile_module( + void* data, + size_t, + std::optional, + ExtraFilesMap* extra_files, + bool should_copy_tensor_memory); + +// no op, TODO(qihan) delete +TORCH_API bool register_flatbuffer_loader(); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h new file mode 100644 index 0000000000000000000000000000000000000000..3304f030613be6b73b0dd41acff02b0ed04d6528 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/frame.h @@ -0,0 +1,49 @@ +#pragma once + +#include + +#include +#include + +namespace torch::jit::mobile { + +class Frame { + public: + explicit Frame(const Code& code) : code_(code) {} + const Code& getCode() const { + return code_; + } + + void step() { + pc_++; + } + + void jump(size_t n) { + pc_ += n; + } + + size_t getPC() const { + return pc_; + } + + const Instruction& getInstruction() const { + return code_.instructions_.at(pc_); + } + + std::optional getDebugHandle() const { + return getDebugHandle(pc_); + } + + std::optional getDebugHandle(size_t pc) const { + if (pc >= code_.debug_handles_.size()) { + return {}; + } + return code_.debug_handles_[pc]; + } + + private: + const Code& code_; + size_t pc_{0}; +}; + +} // namespace torch::jit::mobile diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h new file mode 100644 index 0000000000000000000000000000000000000000..5e0824f880b2db0857d82579e1613c958adbc65f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/function.h @@ -0,0 +1,84 @@ +#pragma once + +#include + +#include +#include +#include +#include + +namespace torch::jit { +enum OpCode : uint8_t; +struct Instruction; +struct OperatorString; + +namespace mobile { + +class TORCH_API Function : public torch::jit::Function { + public: + explicit Function(c10::QualifiedName name); + Function( + c10::QualifiedName name, + Code code, + std::optional schema); + void run(Stack& stack) override; + at::IValue operator()(Stack& stack); + void ensure_defined() override {} + size_t num_inputs() const override; + const c10::QualifiedName& qualname() const override; + bool call(Stack&, c10::function_ref) override; + + // NOTE: the APIs below is dangerous: if you call append_instruction with + // dbg_handle and then call it without; then the dbg_handle will become + // misaligned. Therefore only use ONE variant at time. + void append_instruction(OpCode op, int64_t X, int64_t N, int64_t dbg_handle); + void append_instruction(OpCode op, int64_t X, int64_t N); + void append_operator( + const std::string& name, + const std::string& overload_name, + const std::optional& num_specified_args); + void append_constant(const c10::IValue& constant); + void append_type(const c10::TypePtr& type); + void append_function(mobile::Function& func); + + void set_register_size(size_t size); + + int64_t get_debug_handle(size_t pc) const; + const Code& get_code() const; + Code& get_code(); + + torch::jit::Function& setSchema(c10::FunctionSchema schema) override; + bool hasSchema() const; + const c10::FunctionSchema& getSchema() const override; + + // Returns the debug handle corresponding to where the execution + // is halted due to exception. + // If no corresponding debug handle is found then -1 is returned. + const std::vector& getExceptionDebugHandles() const; + static Function& registerFunc( + const std::string& qualified_name, + const std::vector& instructions, + const std::vector& constants, + const std::vector& types, + const size_t register_size); + + // if not initialize, initialize by loading operators. + // return true of all op loaded, return false if some op is not found + // in the current runtime. Then, the ops that did not found will be filled + // in unsupported_op_names + bool initialize_operators(bool should_check_operators); + + private: + c10::QualifiedName name_; + Code code_; + std::optional schema_; // (byte-code version 4+) +}; + +std::optional> makeOperatorFunction( + const c10::OperatorName& opname, + std::optional num_specified_args); + +TORCH_API std::string operator_str(const c10::OperatorName& opname); + +} // namespace mobile +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h new file mode 100644 index 0000000000000000000000000000000000000000..bb754430a9b88c9fe2f3a7ab605569cf5c4232a6 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import.h @@ -0,0 +1,108 @@ +#pragma once +#include +#include + +#include +#include + +#include + +namespace torch::jit { +using caffe2::serialize::ReadAdapterInterface; +using ExtraFilesMap = std::unordered_map; + +constexpr const char* kArchiveNameBytecode = "bytecode"; +constexpr const char* kArchiveNameConstants = "constants"; +constexpr const char* kArchiveNameVersion = "version"; + +// The family of methods below load a serialized Mobile Module +// into a mobile::Module object. +TORCH_API mobile::Module _load_for_mobile( + std::istream& in, + std::optional device, + ExtraFilesMap& extra_file, + uint64_t module_load_options = kDefaultMobileLoadOptions); + +TORCH_API mobile::Module _load_for_mobile( + const std::string& filename, + std::optional device, + ExtraFilesMap& extra_files); + +TORCH_API mobile::Module _load_for_mobile( + std::unique_ptr rai, + std::optional device, + ExtraFilesMap& extra_files, + uint64_t module_load_options = kDefaultMobileLoadOptions); + +TORCH_API mobile::Module _load_for_mobile( + const std::string& filename, + std::optional device, + ExtraFilesMap& extra_files, + uint64_t module_load_options); + +TORCH_API mobile::Module _load_for_mobile( + std::istream& in, + std::optional device = std::nullopt); + +TORCH_API mobile::Module _load_for_mobile( + const std::string& filename, + std::optional device = std::nullopt); + +TORCH_API mobile::Module _load_for_mobile( + std::unique_ptr rai, + std::optional device = std::nullopt); + +/** + * Load only the contents of the "extra/" files whose names are + * passed in the map (extra_files). Populate the corresponding values + * with the contents of those files. Do not attempt to load the entire + * model, and stop once the extra files have been extracted. + * + * This API is needed to be able to load GPU models on linux CPU + * machines and extract only the extra files so that we can inspect + * the metadata that was added to the .ptl archive when it was + * generated. + * + */ +void _load_extra_only_for_mobile( + const std::string& filename, + std::optional device, + ExtraFilesMap& extra_files); + +// Currently used by both mobile/import.cpp and model_compatibility.cpp. +// Should be removed after model_compatibility.cpp start using simplified +// version type_resolver and obj_loader. +at::TypePtr resolveTypeNameMobile( + const c10::QualifiedName& qn, + const std::shared_ptr& compilation_unit); +c10::StrongTypePtr typeResolverMobile( + const c10::QualifiedName& qn, + const std::shared_ptr& compilation_unit); +c10::intrusive_ptr objLoaderMobile( + const at::StrongTypePtr& type, + const at::IValue& input, + mobile::CompilationUnit& mobile_compilation_unit); + +// Given a reader, which has access to a model file, +// return true if there exists tensors in `bytecode` archive +bool isTensorInBytecodeArchive( + caffe2::serialize::PyTorchStreamReader& stream_reader); + +namespace mobile { + +/** + * Given a torch::jit::mobile::Module, return a set of operator names + * (with overload name) that are used by any method in this mobile + * Mobile. This method runs through the bytecode for all methods + * in the specified model (module), and extracts all the root + * operator names. Root operators are operators that are called + * directly by the model (as opposed to non-root operators, which + * may be called transitively by the root operators). + * + */ +TORCH_API std::set _export_operator_list( + torch::jit::mobile::Module& module); + +} // namespace mobile + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h new file mode 100644 index 0000000000000000000000000000000000000000..bdb7f1e57de0de44bc47066af360e479951f2926 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/import_export_common.h @@ -0,0 +1,15 @@ +#pragma once + +/** + * @file + * Declarations shared between import_data.cpp and export_data.cpp + */ + +namespace torch::jit::mobile::internal { +/** + * The name of the mobile::Module attribute which contains saved parameters, as + * a Dict of names to Tensors. Only used for Flatbuffer serialization. + */ +// NOLINTNEXTLINE(cppcoreguidelines-avoid-c-arrays,modernize-avoid-c-arrays) +constexpr char kSavedParametersAttributeName[] = "data"; +} // namespace torch::jit::mobile::internal diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h new file mode 100644 index 0000000000000000000000000000000000000000..a05e2c44f452783102b8133be72ce15279960a83 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/method.h @@ -0,0 +1,41 @@ +#pragma once + +#include +#include + +namespace torch::jit::mobile { + +class Module; + +struct TORCH_API Method { + Method(const Module* owner, Function* function); + + void run(Stack& stack) const; + void run(Stack&& stack) const { + run(stack); + } + + c10::IValue operator()(std::vector stack) const; + + const std::string& name() const { + return function_->name(); + } + + int64_t get_debug_handle(size_t pc) const { + return function_->get_debug_handle(pc); + } + + Function& function() const { + return *function_; + } + + private: + // Methods are uniquely owned by a single module. + // This raw pointer allows referencing the module + const Module* owner_; + + // Underlying unbound function + Function* function_; +}; + +} // namespace torch::jit::mobile diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h new file mode 100644 index 0000000000000000000000000000000000000000..ec41744e53bb6d6767baca8d4caa9a519dda7bcf --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/module.h @@ -0,0 +1,193 @@ +#pragma once +#include +#include +#include +#include +#include + +#include + +namespace torch::jit::mobile { +using Stack = std::vector; + +// A CompilationUnit object is the one that gets executed by the lite +// interpreter. +// +// A CompilationUnit object contains a list of Method Objects. These are methods +// that appear in the original PyTorch Model. These method correspond to Python +// member functions of the Model class. +// +// Methods in turn contain a Function, and a back-pointer to the Module that +// owns this Method instance. +// +// A Function contains a Code Object (code_) which is defined in interpreter.h +// +// A Code object contains the following: +// +// std::vector instructions_; +// std::vector op_names_; +// std::vector> operators_; +// std::vector constants_; +// std::vector types_; +// size_t register_size_; // Aggregated output size. +// +class CompilationUnit { + public: + void register_function(std::unique_ptr fn); + std::vector>& methods() { + return methods_; + } + const std::vector>& methods() const { + return methods_; + } + Function* find_function(const c10::QualifiedName& qn); + const Function* find_function(const c10::QualifiedName& qn) const; + + void unsafeRemoveFunction(const int64_t index) { + methods_.erase(methods_.begin() + index); + } + + private: + std::vector> methods_; +}; + +// A Torch Mobile Module is a representation of the model (trained in case +// of inference). A Mobile Module contains +// +// 1. data (object_) +// 2. metadata (optional) about the model (metadata_ from the metadata.pkl +// file added after training) +// 3. Compilation Unit (cu_) +// +class TORCH_API Module { + public: + Module( + c10::intrusive_ptr object, + std::shared_ptr cu) + : object_(std::move(object)), cu_(std::move(cu)) {} + Module() = default; + Method get_method(const std::string& method_name) const; + template + c10::IValue run_method(const std::string& method_name, Types&&... args) { + return get_method(method_name)({IValue(std::forward(args))...}); + } + c10::IValue forward(std::vector inputs) { + return get_method("forward")(std::move(inputs)); + } + std::optional find_method(const std::string& basename) const; + + const std::string name() const { + return object_->name(); + } + const std::vector& slots() const { + return object_->slots(); + } + const c10::intrusive_ptr _ivalue() const { + return object_; + } + const std::vector parameters() const; + const std::map named_parameters() const; + std::string get_forward_method_debug_info(int64_t debug_handle) const; + std::string getModuleHierarchy(const int64_t debug_handle) const; + std::string getCallStack(const int64_t debug_handle) const; + /// Enables "training" mode. + void train(bool on = true); + /// Calls train(false) to enable "eval" mode. + void eval() { + train(/*on=*/false); + } + /// True if the module is in training mode. + bool is_training() const; + const std::unordered_map getMetadata() const { + return metadata_; + } + void setMetadata( + const std::unordered_map& metadata) { + metadata_ = metadata; + } + const std::vector get_methods() const; + + c10::IValue attr(const std::string& name, c10::IValue or_else) const { + if (auto r = object_->type()->findAttributeSlot(name)) { + return object_->getSlot(*r); + } + if (auto r = object_->type()->findConstantSlot(name)) { + return object_->type()->getConstant(*r); + } + return or_else; + } + + void setDebugTable(MobileDebugTable&& debug_table) { + debug_table_ = std::move(debug_table); + } + const MobileDebugTable& getDebugTable() const { + return debug_table_; + } + + void setHasDebugHandles(bool has_debug_handles) { + has_debug_handles_ = has_debug_handles; + } + + bool hasDebugHandles() const { + return has_debug_handles_; + } + + const CompilationUnit& compilation_unit() const { + return *cu_; + } + + void set_delete_memory(std::shared_ptr delete_mem) { + mem_to_delete_ = std::move(delete_mem); + } + + void set_min_operator_version(int64_t version) { + min_operator_version_ = version; + } + + int64_t min_operator_version() const { + return min_operator_version_; + } + + void set_bytecode_version(int64_t version) { + bytecode_version_ = version; + } + + int64_t bytecode_version() const { + return bytecode_version_; + } + + private: + friend class quantization::PTQQuanizationHelper; + + bool compareMethodSchemas( + const std::string& name_1, + const std::string& name_2); + + void unsafeRemoveMethod(const std::string& basename); + + void unsafeCopyMethod( + const std::string& new_method_name, + const Function& to_be_copied); + + c10::intrusive_ptr object_; + std::unordered_map metadata_; + std::shared_ptr cu_; + MobileDebugTable debug_table_; + bool has_debug_handles_ = false; + int64_t min_operator_version_ = 4; + int64_t bytecode_version_ = 4; + + // Extra handle for the module to delete when itself is deleted + std::shared_ptr mem_to_delete_; +}; + +struct TORCH_API ModuleInfo { + uint64_t bytecode_version; + uint64_t operator_version; + std::unordered_map opname_to_num_args; + std::unordered_set function_names; + std::unordered_set type_names; +}; +TORCH_API ModuleInfo get_module_info(const mobile::Module& module); + +} // namespace torch::jit::mobile diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h new file mode 100644 index 0000000000000000000000000000000000000000..694fe1df82c10a4227fd585282f2dd78af6c8ce8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/observer.h @@ -0,0 +1,110 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch { + +class MobileDebugInfo : public c10::DebugInfoBase { + public: + const std::string& getModelName() { + return model_name_; + } + + void setModelName(const std::string& model_name) { + model_name_ = model_name; + } + + const std::string& getMethodName() { + return method_name_; + } + + void setMethodName(const std::string& method_name) { + method_name_ = method_name; + } + + size_t getOpIdx() { + return op_idx_; + } + + void setOpIdx(size_t op_idx) { + op_idx_ = op_idx; + } + + private: + std::string model_name_; + std::string method_name_; + // TODO: Kimish + // If we launch a thread such as for at::launch, interepter continuation + // and if the caching allocator is enabled in the base thread + // then, in order to propagate this information, that is caching allocator + // is enabled, across thread boundaries we can use the mechanism provided + // by ThreadLocalDebugInfo + // Once the thread local MobileDebugInfo is accessible in the launched + // thread, it can be accessed in that thread and that thread can set + // its own thread local CachingAllocatorInfo. + // However, we cannot expect every launched thread to extract and set + // its own thread local copy of CachingAllocatorInfo. + // But this can be done in lite interpreter, where in the run method + // it can do info = + // c10::ThreadLocalDebugInfo::get(c10::DebugInfoKind::MOBILE_RUNTIME_INFO)) + // .get_caching_allocator_info(); + // GetThreadLocalCachingAllocatorInfo() = info; + // Other option is to have MobileDebugInfo itself be the place where thread + // local copy of CachingAllocatorInfo is stored. Then + // DefaultMobileCPUAllocator inspects this to decide if to use + // CachingAllocator. However, current lite interpreter does not support FORK, + // thus from the run method of lite interpreter we are not really gonna launch + // another instance of lite interpreter in a different thread. So for now not + // getting bothered about passing CachingAllocatorInfo across thread + // boundaries. c10::CachingAllocatorInfo caching_allocator_info; + size_t op_idx_ = 0; +}; + +class MobileModuleObserver { + public: + virtual ~MobileModuleObserver() = default; + + virtual void onEnterRunMethod(const int32_t) {} + virtual void onExitRunMethod( + const std::unordered_map&, + const std::string&, + const int32_t) {} + virtual void onFailRunMethod( + const std::unordered_map&, + const std::string&, + const int32_t, + const char*) {} + virtual void onEnterLoadModel(const int32_t) {} + virtual void onExitLoadModel( + const int32_t, + const std::unordered_map&) { + } // key: filename, value: file content + virtual void onFailLoadModel(const int32_t, const char*) {} + virtual void onFailLoadModel( + const int32_t, + const char*, + const std::unordered_map&) {} + virtual std::vector getDefaultExtraFiles() = 0; + virtual std::unordered_map processMetadataFromExtra( + const std::unordered_map&) = 0; +}; + +class MobileObserverConfig { + public: + void setModuleObserver(std::unique_ptr reporter) { + module_observer_ = std::move(reporter); + } + MobileModuleObserver* getModuleObserver() { + return module_observer_.get(); + } + + private: + std::unique_ptr module_observer_; +}; + +MobileObserverConfig& observerConfig(); + +} // namespace torch diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h new file mode 100644 index 0000000000000000000000000000000000000000..d7fe9521fd320f7910b1a245fa873b08aa0f6251 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/prim_ops_registery.h @@ -0,0 +1,28 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit::mobile { + +using Stack = std::vector; + +void registerPrimOpsFunction( + const std::string& name, + const std::function& fn); + +bool hasPrimOpsFn(const std::string& name); + +std::function& getPrimOpsFn(const std::string& name); + +class prim_op_fn_register { + public: + prim_op_fn_register( + const std::string& name, + const std::function& fn) { + registerPrimOpsFunction(name, fn); + } +}; + +} // namespace torch::jit::mobile diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h new file mode 100644 index 0000000000000000000000000000000000000000..117b8b595daa8ca7a9800403e2a338036b4352c2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/profiler_edge.h @@ -0,0 +1,115 @@ +#pragma once +#include +#include + +namespace torch::jit::mobile { + +// If we dont have kineto available then edge profiler does not +// work since it relies on Kineto +#ifdef USE_KINETO +class TORCH_API KinetoEdgeCPUProfiler { + public: + // This profiler only profiles KINETO events + // No GPU_FALLBACK or NVTX + /* + * @param m is the instance of mobile Module which is being profiled. + * Note that this implies that KinetoEdgeCPUProfiler can be used + * to profile specific Module (see usage below), unliked ProfilerKineto + * which can profile pytorch runtime in arbitrary scope. + * @param fname is the name of the file to which chrome trace is written. + * @param report_input_shapes: whether to record shapes of op's inputs. + * @param with_stack: whether to record model's python stacktrace for the op. + * @param with_flops: whether to report flops corresponding to the op. + * @param with_modules: whether to report original python module + * hierarchy to which the op belongs. + * @param events + * @param adjust_vulkan_timestamps: whether to adjust vulkan timestamps from + * query pool to align with cpu event times + * + * Usage pattern for this profiler must be as follows: + * + * { + * KinetoEdgeCPUProfiler(m, filename, args); + * m.forward(...); + * } + * + * The reason being that KinetoEdgeCPUProfiler has a dependency on Module + * and thus it must not outlive it. + * + * Thus, when KinetoEdgeCPUProfiler is used as RAII to do profiling + * within certain scope. In that scope, the captured reference to + * Module will outlive KinetoEdgeCPUProfiler. This is gauranteed because + * KinetoEdgeCPUProfiler must be constructed later than Module, on stack. + * + * An example of the anti-pattern and wrong usage is: + * + * std::shared_ptr profiler(m, filename, args); + * m.forward(...); + * + * Since KinetoEdgeCPUProfiler object would then be constructed on heap + * with its lifetime managed manually or via smart pointers. + */ + KinetoEdgeCPUProfiler( + const torch::jit::mobile::Module& m, + const std::string& fname, + const bool report_input_shapes = false, + const bool profile_memory = false, + const bool with_stack = false, + const bool with_flops = false, + const bool with_modules = false, + std::vector events = {}, + const bool adjust_vulkan_timestamps = false); + + const std::unique_ptr& + disableProfiler(); + const std::unique_ptr& + getProfilerResult(); + void recordBackendEvent( + const int64_t start_time_us, + const int64_t end_time_us, + const int64_t debug_handle, + const std::string& event_name, + const std::string& backend_name); + void recordBackendMemoryEvent( + void* ptr, + int64_t alloc_size, + size_t total_allocated, + size_t total_reserved, + c10::Device device); + + ~KinetoEdgeCPUProfiler(); + + private: + /* + * We store a reference to Module to make such dependency explicit, since + * a Module reference is already stored in a functor. + */ + const mobile::Module& m_; + std::string trace_file_name_; + std::unique_ptr profiler_result_; +}; + +TORCH_API KinetoEdgeCPUProfiler* getCurrentEdgeProfiler(); + +#define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \ + start_time_us, end_time_us, debug_handle, event_name, backend_name) \ + if (mobile::getCurrentEdgeProfiler()) { \ + mobile::getCurrentEdgeProfiler()->recordBackendEvent( \ + start_time_us, end_time_us, debug_handle, event_name, backend_name); \ + } + +#define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \ + ptr, alloc_size, total_allocated, total_reserved, device) \ + if (mobile::getCurrentEdgeProfiler()) { \ + mobile::getCurrentEdgeProfiler()->recordBackendMemoryEvent( \ + ptr, alloc_size, total_allocated, total_reserved, device); \ + } +#else + +#define RECORD_BACKEND_EVENT_TO_EDGE_PROFILER( \ + start_time_us, end_time_us, debug_handle, event_name, backend_name) + +#define RECORD_BACKEND_MEMORY_EVENT_TO_EDGE_PROFILER( \ + ptr, alloc_size, total_allocated, total_reserved, device) +#endif +} // namespace torch::jit::mobile diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h new file mode 100644 index 0000000000000000000000000000000000000000..344b4dd25b858b171fee2db360dc7875759cb48b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/register_ops_common_utils.h @@ -0,0 +1,53 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +inline void noop(Stack& n) {} + +int64_t normalizeIndex(int64_t idx, int64_t list_size); + +// reference function THPVariable_to in python_variable_methods.cpp +static C10_UNUSED at::Tensor to_dispatch( + at::Tensor self, + std::optional device, + std::optional scalarType, + bool non_blocking, + bool copy) { + if (device && device->is_cuda()) { + at::globalContext().lazyInitCUDA(); + } + if (!device && !scalarType && !copy) { + return self; + } else if (!device) { + return self.to(*scalarType, non_blocking, copy); + } else if (!scalarType) { + return self.to(*device, non_blocking, copy); + } else { + return self.to(*device, *scalarType, non_blocking, copy); + } +} + +// Convert the tensor pointed to by \p data to a nested list. \p dim is the +// number of dimensions in the tensor and \p cur_dim is the dimension being +// processed by the current invocation. \p ty is the expected output IR type of +// the operation. \p is the scalar type of \p data. \p sizes and \p strides are +// the sizes and strides of the tensor operand and \p element_size is the size +// in bytes of one tensor element. +IValue tensorToListRecursive( + char* data, + int64_t cur_dim, + int64_t num_tensor_dims, + at::TypePtr ty, + at::ScalarType scalar_ty, + at::IntArrayRef sizes, + at::IntArrayRef strides, + size_t element_size); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/type_parser.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/type_parser.h new file mode 100644 index 0000000000000000000000000000000000000000..420e43a5c406e6d0350feb906a6896394fbde922 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/type_parser.h @@ -0,0 +1,54 @@ +#pragma once + +#include +#include +#include + +namespace c10 { + +class TORCH_API TypeParser { + public: + explicit TypeParser(std::string pythonStr); + explicit TypeParser(std::vector& pythonStrs); + + TypePtr parse(); + std::vector parseList(); + static const std::unordered_set& getNonSimpleType(); + static const std::unordered_set& getCustomType(); + std::unordered_set getContainedTypes(); + + private: + TypePtr parseNamedTuple(const std::string& qualified_name); + TypePtr parseCustomType(); + TypePtr parseTorchbindClassType(); + TypePtr parseNonSimple(const std::string& token); + + void expect(const char* s); + void expectChar(char c); + template + TypePtr parseSingleElementType(); + + void lex(); + + std::string next(); + c10::string_view nextView(); + void advance(); + C10_NODISCARD c10::string_view cur() const; + + std::string pythonStr_; + size_t start_; + c10::string_view next_token_; + + // Used for parsing string list + std::vector pythonStrs_; + std::unordered_map str_type_ptr_map_; + + // Store all contained types when parsing a string + std::unordered_set contained_types_; +}; + +TORCH_API TypePtr parseType(const std::string& pythonStr); + +TORCH_API std::vector parseType(std::vector& pythonStr); + +} // namespace c10 diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/upgrader_mobile.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/upgrader_mobile.h new file mode 100644 index 0000000000000000000000000000000000000000..6966b778c858351dd57a684102bf9bf124354552 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/mobile/upgrader_mobile.h @@ -0,0 +1,39 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include + +namespace torch::jit { +struct Instruction; +struct Upgrader { + int min_version; + int max_version; + std::string upgrader_name; + int index; +}; + +// From operator_versions.yaml +TORCH_API const std::unordered_map> +getOperatorVersionMapForMobile(); + +struct OperatorString { + const std::string name; + const std::string overload_name; + const std::optional num_specified_args; +}; + +struct ByteCodeFunctionWithOperator { + mobile::Function& function; + std::vector operators; +}; + +TORCH_API const std::vector& +getUpgraderBytecodeList(); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h new file mode 100644 index 0000000000000000000000000000000000000000..2a66cc3228470c56fa09a8ac086bfe5c3c676d09 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/init.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +namespace torch::jit { + +void initJITBindings(PyObject* module); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h new file mode 100644 index 0000000000000000000000000000000000000000..eb9b59d08d854742ed9a00f072c56598d91e8b96 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/pybind.h @@ -0,0 +1,213 @@ +#pragma once + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +namespace py = pybind11; + +namespace torch::jit { + +// This is a variant of shared_ptr that "sees through" a wrapper. +// We use it to convert Value, Node, Block and node to "wrapped" Python +// values. When we destruct the C++ object, the wrapper's pointer will +// be set to 0 and any future dereferencing will throw. We need this +// because the Python objects may hang around after the C++ object +// has already been destroyed. +// This also needs the magic type_caster below, which is from the +// workaround offered in https://github.com/pybind/pybind11/issues/2751 +template +class unwrapping_shared_ptr { + static_assert( + std::is_same_v || + std::is_same_v || + std::is_same_v, + "unwrapping type only defined for Graph object types"); + + private: + std::shared_ptr> impl; + + public: + unwrapping_shared_ptr() : impl({}) {} + explicit unwrapping_shared_ptr(T* p) : impl(p->wrap()) { + impl->clear_cb = &clear_registered_instances; + } + T* get() const { + if (!impl->elem) { + throw std::logic_error("has been invalidated"); + } + return impl->elem; + } + // we need to disable the overloaded & for PyBind11 < 2.3 due. + // see https://github.com/pybind/pybind11/pull/1435 +#if (PYBIND11_VERSION_MAJOR > 2) || \ + ((PYBIND11_VERSION_MAJOR == 2) && (PYBIND11_VERSION_MINOR >= 3)) + T** operator&() { + if (!impl->elem) { + throw std::logic_error("has been invalidated"); + } + return &(impl->elem); + } +#endif +}; + +} // namespace torch::jit + +PYBIND11_DECLARE_HOLDER_TYPE(T, torch::jit::unwrapping_shared_ptr, true); + +namespace pybind11::detail { + +#define CREATE_UNWRAPPING_CASTER(Class) \ + template <> \ + struct type_caster : public type_caster_base { \ + public: \ + using type = Class; \ + using holder_type = torch::jit::unwrapping_shared_ptr; \ + \ + bool load(handle src, bool convert) { \ + return load_impl>(src, convert); \ + } \ + \ + explicit operator type*() { \ + return static_cast(value); \ + } \ + explicit operator type&() { \ + return *static_cast(value); \ + } \ + \ + protected: \ + friend class type_caster_generic; \ + \ + bool load_value(const value_and_holder& v_h) { \ + if (v_h.holder_constructed()) { \ + value = v_h.template holder().get(); \ + return true; \ + } else { \ + throw cast_error( \ + "Unable to cast from non-held to held instance (#Class& to Holder<#Class>)"); \ + } \ + } \ + } + +CREATE_UNWRAPPING_CASTER(torch::jit::Node); +CREATE_UNWRAPPING_CASTER(torch::jit::Value); +CREATE_UNWRAPPING_CASTER(torch::jit::Block); + +#undef CREATE_UNWRAPPING_CASTER + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(torch::jit::IValue, _("IValue")); + + bool load(handle src, bool) { + try { + value = torch::jit::toTypeInferredIValue(src); + return true; + } catch (std::exception& e) { + return false; + } + } + + static handle cast( + torch::jit::IValue src, + return_value_policy /* policy */, + handle /* parent */) { + return torch::jit::toPyObject(std::move(src)).release(); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(torch::jit::Symbol, _("Symbol")); + + bool load(handle src, bool) { + // TODO: Is there a way to py::cast that doesn't raise an exception on + // failure? Can we catch pybind11::cast_error here instead? + std::string src_str; + try { + src_str = py::cast(src); + } catch (std::exception& e) { + return false; + } + value = torch::jit::Symbol::fromQualString(src_str); + return true; + } + + static handle cast( + torch::jit::Symbol src, + return_value_policy /* policy */, + handle /* parent */) { + return py::cast(std::string(src.toQualString()), return_value_policy::copy) + .release(); + } +}; + +template <> +struct type_caster { + public: + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + PYBIND11_TYPE_CASTER(torch::jit::AttributeKind, _("AttributeKind")); + + bool load(handle src, bool) { + return false; + } + + static handle cast( + torch::jit::AttributeKind src, + return_value_policy /* policy */, + handle /* parent */) { + return py::cast( + std::string(torch::jit::toString(src)), + return_value_policy::copy) + .release(); + } +}; + +// See https://github.com/pybind/pybind11/issues/637 +using ListCasterBase = pybind11::detail:: + list_caster, torch::jit::Node*>; +template <> +struct type_caster> : ListCasterBase { + static handle cast( + const std::vector& src, + return_value_policy, + handle parent) { + return ListCasterBase::cast(src, return_value_policy::reference, parent); + } + static handle cast( + const std::vector* src, + return_value_policy pol, + handle parent) { + return cast(*src, pol, parent); + } +}; + +} // namespace pybind11::detail + +namespace torch::jit { + +static inline py::tuple tuple_tail(const py::tuple& tup) { + py::tuple r(tup.size() - 1); + for (const auto i : c10::irange(1, tup.size())) { + r[i - 1] = tup[i]; + } + return r; +} + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h new file mode 100644 index 0000000000000000000000000000000000000000..232f5b6ea08129b9ec29c4940a85d682c301d2c0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_arg_flatten.h @@ -0,0 +1,119 @@ +#pragma once + +#include +#include +#include +#include + +#include +#include +#include +#include + +namespace torch::jit::python { + +struct IODescriptor { + struct VariableMetadata { + VariableMetadata(const autograd::Variable& var) + : sizes(var.sizes().vec()), + type(var.scalar_type()), + device(var.device()), + requires_grad(var.requires_grad()) {} + + bool operator==(const VariableMetadata& o) const { + return std::tie(device, requires_grad, type, sizes) == + std::tie(o.device, o.requires_grad, o.type, o.sizes); + } + + static size_t hash(const VariableMetadata& m) { + return c10::get_hash(m.sizes, m.device, m.requires_grad, m.type); + } + + std::vector sizes; + at::ScalarType type; + at::Device device; + bool requires_grad; + }; + + bool operator==(const IODescriptor& o) const { + return std::tie(structure, metadata, grad_enabled) == + std::tie(o.structure, o.metadata, o.grad_enabled); + } + + static size_t hash(const IODescriptor& o) { + return c10::get_hash(o.structure, o.metadata, o.grad_enabled); + } + + void extend(const autograd::variable_list& list) { + metadata.reserve(metadata.size() + list.size()); + for (auto& var : list) + metadata.emplace_back(var); + } + + // Description of argument structure. Variables are replaced with + // different characters, depending on their flags, beginnings and + // ends of tuples and lists are denoted by a pair of parenthesis + // of their corresponding kind. They should always be paired. + // Example desc: (vv[v(v)v]) + // NOTE: if extend() was ever called then metadata.size() can be + // different than the number of 'v's in structure. + std::string structure; + std::vector strings; + std::vector metadata; + bool grad_enabled = false; +}; + +static inline std::ostream& operator<<( + std::ostream& out, + const IODescriptor::VariableMetadata& meta) { + at::Device meta_device = meta.device; + auto& t = at::getDeprecatedTypeProperties( + meta_device.is_cpu() ? at::Backend::CPU : at::Backend::CUDA, meta.type); + out << t << "(requires_grad=" << meta.requires_grad; + if (meta_device.is_cuda()) { + out << ", device=" << meta_device.index(); + } + out << ") {"; + for (const auto i : c10::irange(meta.sizes.size())) { + if (i > 0) + out << ", "; + out << meta.sizes[i]; + } + out << "}"; + return out; +} + +static inline std::ostream& operator<<( + std::ostream& out, + const IODescriptor& desc) { + out << desc.structure << "\n"; + out << " with grad_enabled=" << desc.grad_enabled << "\n"; + for (const auto i : c10::irange(desc.metadata.size())) { + out << " with v" << i << " having type " << desc.metadata[i] << "\n"; + } + return out; +} + +struct ParsedArgs { + // Flat vector of Variables found in arguments + autograd::variable_list vars; + // Metadata describing nesting of objects received from Python and + // metadata of vars and whether grad is enabled. + IODescriptor desc; + + void extend(const autograd::variable_list& list) { + if (list.empty()) + return; + vars.reserve(vars.size() + list.size()); + for (auto& var : list) + vars.emplace_back(var); + desc.extend(list); + } +}; + +ParsedArgs flatten(py::handle obj); +PyObject* unflatten( + at::ArrayRef vars, + const IODescriptor& structure); + +} // namespace torch::jit::python diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h new file mode 100644 index 0000000000000000000000000000000000000000..3c2c58efbde0c904b82b17623ff087db8bf824d8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_custom_class.h @@ -0,0 +1,19 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +void initPythonCustomClassBindings(PyObject* module); + +struct ScriptClass { + ScriptClass(c10::StrongTypePtr class_type) + : class_type_(std::move(class_type)) {} + + py::object __call__(const py::args& args, const py::kwargs& kwargs); + + c10::StrongTypePtr class_type_; +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h new file mode 100644 index 0000000000000000000000000000000000000000..c8433a7df6cdd92ea8d3a0c3ff7b240c73b6cf49 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_dict.h @@ -0,0 +1,127 @@ +#pragma once + +#include +#include +#include +#include + +namespace torch::jit { + +void initScriptDictBindings(PyObject* module); + +/// An iterator over the keys of ScriptDict. This is used to support +/// .keys() and iteration. +class ScriptDictKeyIterator final { + public: + ScriptDictKeyIterator( + c10::impl::GenericDict::iterator iter, + c10::impl::GenericDict::iterator end) + : iter_(std::move(iter)), end_(std::move(end)) {} + at::IValue next(); + + private: + c10::impl::GenericDict::iterator iter_; + c10::impl::GenericDict::iterator end_; +}; + +/// An iterator over the key-value pairs of ScriptDict. This is used to support +/// .items(). +class ScriptDictIterator final { + public: + ScriptDictIterator( + c10::impl::GenericDict::iterator iter, + c10::impl::GenericDict::iterator end) + : iter_(std::move(iter)), end_(std::move(end)) {} + at::IValue next(); + + private: + c10::impl::GenericDict::iterator iter_; + c10::impl::GenericDict::iterator end_; +}; + +/// A wrapper around c10::Dict that can be exposed in Python via pybind +/// with an API identical to the Python dictionary class. This allows +/// dictionaries to have reference semantics across the Python/TorchScript +/// boundary. +class ScriptDict final { + public: + // Constructor. + ScriptDict(const at::IValue& data) + : dict_(at::AnyType::get(), at::AnyType::get()) { + TORCH_INTERNAL_ASSERT(data.isGenericDict()); + dict_ = data.toGenericDict(); + } + + // Get the type of the dictionary. + at::DictTypePtr type() const { + return at::DictType::create(dict_.keyType(), dict_.valueType()); + } + + // Return a string representation that can be used + // to reconstruct the instance. + std::string repr() const { + std::ostringstream s; + s << '{'; + bool f = false; + for (auto const& kv : dict_) { + if (f) { + s << ", "; + } + s << kv.key() << ": " << kv.value(); + f = true; + } + s << '}'; + return s.str(); + } + + // Return an iterator over the keys of the dictionary. + ScriptDictKeyIterator iter() const { + auto begin = dict_.begin(); + auto end = dict_.end(); + return ScriptDictKeyIterator(begin, end); + } + + // Return an iterator over the key-value pairs of the dictionary. + ScriptDictIterator items() const { + auto begin = dict_.begin(); + auto end = dict_.end(); + return ScriptDictIterator(begin, end); + } + + // Interpret the dictionary as a boolean; empty means false, non-empty means + // true. + bool toBool() const { + return !(dict_.empty()); + } + + // Get the value for the given key. Throws std::out_of_range if the key does + // not exist. + at::IValue getItem(const at::IValue& key) { + return dict_.at(key); + }; + + // Set the value for the given key. + void setItem(const at::IValue& key, const at::IValue& value) { + dict_.insert_or_assign(key, value); + }; + + // Check whether the dictionary contains the given key. + bool contains(const at::IValue& key) { + return dict_.contains(key); + } + + // Delete the given key from the dictionary. + bool delItem(const at::IValue& key) { + return dict_.erase(key); + } + + // Get the size of the dictionary. + int64_t len() const { + return dict_.size(); + } + + // A c10::Dict instance that holds the actual data. + c10::impl::GenericDict dict_; +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h new file mode 100644 index 0000000000000000000000000000000000000000..26adf8c0e49419b578c233d369f6c79ef43f6796 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ir.h @@ -0,0 +1,50 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +void initPythonIRBindings(PyObject* module); + +// execute a Python function, used for Ops we can't optimize but that we want to +// optimize around +struct ConcretePythonOp : public PythonOp { + static Symbol Kind; + + ConcretePythonOp(Graph* graph) : PythonOp(graph, ::c10::prim::PythonOp) {} + ConcretePythonOp* init( + THPObjectPtr&& pyobj, + const std::string& cconv, + pyobj_list&& scalar_args) { + this->pyobj = std::move(pyobj); + this->scalar_args = std::move(scalar_args); + this->cconv = cconv; + return this; + } + // The Python object which contains the implementation of this function. + // This is either a class (non-legacy) or an object (legacy). See + // TraceInterpreterState for execution semantics. + THPObjectPtr pyobj; + // The calling convention for the Python function. + // 'c' -- constant argument + // 'd' -- dynamic argument + std::string cconv; + // Scalar arguments to the Python function. Not necessarily passed to + // the function in this order; see cconv for the correct order. + std::vector scalar_args; + + std::string name() const override; + void cloneFrom(Node* other_) override; + Node* allocNewInstance(Graph* g) override { + return new ConcretePythonOp(g); + } + // recover the autograd.Function instance, if this PythonOp's function + // was originally SomeFunction.apply + // used in ONNX for discovering symbolics + std::optional autogradFunction() const override; + void writeScalars(std::ostream& out) const override; + void lint_python() const override; +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h new file mode 100644 index 0000000000000000000000000000000000000000..a5475bfb849960276142804633c15f14ea623393 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/python_ivalue.h @@ -0,0 +1,97 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace py = pybind11; + +namespace c10::ivalue { + +// concrete ivalue Holder that hold a py::object +struct C10_EXPORT ConcretePyObjectHolder final : PyObjectHolder { + public: + static c10::intrusive_ptr create(py::object py_obj) { + return c10::make_intrusive(std::move(py_obj)); + } + + static c10::intrusive_ptr create(const py::handle& handle) { + py::gil_scoped_acquire ag; + return c10::make_intrusive( + handle.cast()); + } + + PyObject* getPyObject() override { + return py_obj_.ptr(); + } + + InferredType tryToInferType() override { + pybind11::gil_scoped_acquire ag; + return torch::jit::tryToInferType(py_obj_); + } + + IValue toIValue(const TypePtr& type, std::optional N = std::nullopt) + override { + pybind11::gil_scoped_acquire ag; + return torch::jit::toIValue(py_obj_, type, N); + } + + std::string toStr() override { + pybind11::gil_scoped_acquire ag; + return py::str(py_obj_); + } + + std::vector extractTensors() override { + // We could implement this entirely in C++ via pybind11 but it turns out to + // be substantially slower. Namely, the total time taken by markCompleted on + // a CUDAFuture is 21.5us with this implementation, but goes up to 58.7us + // when using C++. The reason is unclear. + try { + pybind11::gil_scoped_acquire ag; + static py::object& extractorFn = *new py::object( + py::module::import("torch._jit_internal").attr("_extract_tensors")); + return extractorFn(py_obj_).cast>(); + } catch (py::error_already_set& e) { + auto err = std::runtime_error( + c10::str("Cannot extract tensors from value: ", e.what())); + { + pybind11::gil_scoped_acquire ag; + e.restore(); + PyErr_Clear(); + } + throw std::runtime_error(err); + } + } + + // Note [Destructing py::object] + // ~~~~~~~~~~~~~~~~~~~~~~~~~~ + // + // (1) Why py_obj_ = py::none(); does not work. Because we also need to + // acquire GIL when destructing py::object of None that de-references None. + // https://docs.python.org/3/c-api/none.html#c.Py_RETURN_NONE + // + // https://stackoverflow.com/questions/15287590/why-should-py-increfpy-none-be-required-before-returning-py-none-in-c + // + // (2) Why we need to call dec_ref() explicitly. Because py::object of + // nullptr, on destruction, effectively does nothing because of it calls + // Py_XDECREF(NULL) underlying. + // https://docs.python.org/3/c-api/refcounting.html#c.Py_XDECREF + ~ConcretePyObjectHolder() override { + pybind11::gil_scoped_acquire ag; + py_obj_.dec_ref(); + // explicitly setting PyObject* to nullptr to prevent py::object's dtor to + // decref on the PyObject again. + py_obj_.ptr() = nullptr; + } + + // explicit construction to avoid errornous implicit conversion and + // copy-initialization + explicit ConcretePyObjectHolder(py::object py_obj) + : py_obj_(std::move(py_obj)) {} + + private: + py::object py_obj_; +}; + +} // namespace c10::ivalue diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h new file mode 100644 index 0000000000000000000000000000000000000000..65c8ad3be6850e6629c52238b9d64a20062c5c0a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/script_init.h @@ -0,0 +1,7 @@ +#pragma once + +#include + +namespace torch::jit { +void initJitScriptBindings(PyObject* module); +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h new file mode 100644 index 0000000000000000000000000000000000000000..3b50bce86ff5b9cb5984f16b3e2c248cded67569 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/python/utf8_decoding_ignore.h @@ -0,0 +1,6 @@ +#pragma once +#include +namespace torch::jit { +TORCH_API void setUTF8DecodingIgnore(bool o); +TORCH_API bool getUTF8DecodingIgnore(); +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h new file mode 100644 index 0000000000000000000000000000000000000000..32a8166caf0e5936f3eea292aa7a895ad6ddbc58 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/autodiff.h @@ -0,0 +1,94 @@ +#pragma once + +#include +#include + +#include +#include + +namespace torch::jit { + +using value_list = std::vector; +// clang-format off +// Example showcasing how Gradient is constructed: +// +// Let's assume we have a function f, `m` and `n` do not require grad +// (`n` can depend only on `m`): +// y, n = f(x, m) +// +// Now, let's assume that the reverse of f (called f') needs to use values of `x`, `t` and `y`. +// `t` is an intermediate value produced in the body of f, and let's assume that it requires +// grad too. +// +// In this case differentiate(f) will return this: +// y, n, t = f(x, m) // `t` is appended to the output list +// dx = f'(dy, dt, x, t, y) // No `dm` or `dn` because they do not require gradient +// // All needed values from f are prepended to the input list +// +// f_real_outputs = 2 // Only first two outputs were present in f originally +// df_input_vjps = {0, 2} // i.e. connect grad_fn of y and t variables produced by f, +// y t // with y's output_nr = 0 and t's output_nr = 1 +// df_input_captures = {I0, O2, O0} // Order matches the prefix of inputs to df +// x t y +// df_output_vjps = {0} // i.e. connect next_edge[0] of grad_fn to x's (grad_fn, output_nr). +// +// Terminology: vjp = vector-jacobian product +// clang-format on + +struct Gradient { + explicit operator bool() const { + return df != nullptr; + } + std::shared_ptr f; + std::shared_ptr df; + + // Describes how to construct outputs of f from what its graph will return. + // This is necessary because some trailing outputs are intermediates produced + // only to be saved for df (and should be ignored). + size_t f_real_outputs = 0; // initialized for safety. + + // df inputs are split into two sections: vjps (aka grad_outputs) and + // captures. VJPs are "seeds" for the gradient computation given for each + // input capture of an Output kind. Captures are values the need to be saved + // when f is run. We handle inputs specially, because this allows us to avoid + // adding extra vjps as df inputs. + + std::vector df_input_vjps; // Offsets into f's outputs. + // capture can come from inputs or outputs + std::vector df_input_captured_inputs; // Offsets into f's inputs + std::vector df_input_captured_outputs; // Offsets into f's outputs + + // df will produce vjps for a subset of inputs of f that required grad. + // df_output_vjps[idx] == inp_idx means that idx-th output of df produces a + // vjp for inp_idx-th input of f. + std::vector df_output_vjps; // Offsets into f's inputs. + + // How to use gradient to implement a differentiable autograd function: + // When running f: + // - Unwrap input Variables + // - Run f's graph + // - Create grad_fn + // - Wrap outputs in Variables (assume we have a tensor_outputs array): + // outputs = map(Variable, tensor_output) + // for i, offset in enumerate(df_input_vjps): + // outputs[offset].set_grad_fn(grad_fn, output_nr=i) + // - Use df_output_vjps to connect next_edges of grad_fn: + // for idx in df_output_vjps: + // grad_fn.add_next_edge(inputs[idx].gradient_edge()) + // - Save captures for df (care needs to be taken to use SavedVariables for + // inputs and outputs that we will actually return) + // - Return outputs[:f_real_outputs] + // + // When running df: + // - Concatenate received vjps and captured Variables + // - Interpret df + // - Wrap outputs of df into Variables (that don't require grad) +}; +TORCH_API Gradient differentiate(std::shared_ptr& graph); + +// can we take a derivative of this node symbolically? +TORCH_API bool isDifferentiable(const Node* n); +TORCH_API bool isDifferentiable(Graph& g); +TORCH_API bool isZero(Value* v); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h new file mode 100644 index 0000000000000000000000000000000000000000..e1aff151f35e421e1d06be6de259953b83c23ba1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/calculate_necessary_args.h @@ -0,0 +1,69 @@ +#pragma once + +#include +#include +#include + +namespace torch::jit { + +// Calculates the number of args that need to be passed in. +// Less args may be needed if defaults are provided. +// Returns: {number args needed, number of out args} +inline std::pair CalculateNecessaryArgs( + const std::vector& schema_args, + at::ArrayRef actual_inputs, + bool allow_trailing_out_args) { + if (schema_args.empty()) { + return std::make_pair(0, 0); + } + + // count number of out arguments + int64_t schema_idx = static_cast(schema_args.size()) - 1; + if (allow_trailing_out_args) { + // skip over out arguments in the end. + while (schema_idx >= 0) { + const auto& current_arg = schema_args.at(schema_idx); + if (!current_arg.is_out()) { + break; + } + schema_idx--; + } + } + + int64_t num_out = static_cast(schema_args.size()) - schema_idx - 1; + + if (schema_args.size() < actual_inputs.size()) { + return std::make_pair(actual_inputs.size(), num_out); + } + + // if it is the default args, we reset the index to the last element + if (!allow_trailing_out_args) { + schema_idx = schema_args.size() - 1; + } + // keeps track of trailing unnecessary args + while (schema_idx >= 0) { + // this means it is not default argument, so it is necessary + if (!schema_args.at(schema_idx).default_value().has_value()) { + return std::make_pair(schema_idx + 1, num_out); + } else { + auto schema_value = + schema_args.at(schema_idx).default_value().value().toIValue(); + // non-const value will become nullptr here, so will be marked necessary + // non-const would include prim::ListConstruct, prim::DictConstruct as + // well. + auto actual_value = toIValue(actual_inputs[schema_idx]); + if (!actual_value.has_value()) { + return std::make_pair(schema_idx + 1, num_out); + } + // if the IR has same value as default value of the schema, + // it is not necessary argument. + if (schema_value != actual_value.value()) { + return std::make_pair(schema_idx + 1, num_out); + } + } + schema_idx--; + } + return std::make_pair(0, num_out); +} + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..59f5aa796f76cf4080135be5de9c83488035f23d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/decomposition_registry.h @@ -0,0 +1,33 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include + +namespace torch::jit { + +TORCH_API std::optional> GetDecomposition( + const FunctionSchema& schema); + +TORCH_API void RegisterDecomposition( + const FunctionSchema& schema, + std::shared_ptr g); + +TORCH_API void RunDecompositions(std::shared_ptr g); + +TORCH_API std::optional GetDecompositionFunction( + const FunctionSchema& schema); + +// For invocation in C++, recommended is to assign to static local variable +TORCH_API Function* GetDecompositionExecutor(const char* schema_literal); + +TORCH_API Function* GetDecompositionExecutor(const FunctionSchema& schema); + +TORCH_API void run_jit_decomposition( + const c10::OperatorHandle& op, + torch::jit::Stack* stack); + +TORCH_API bool has_jit_decomposition(const FunctionSchema& schema); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h new file mode 100644 index 0000000000000000000000000000000000000000..e3f00272a999f3d9431528db7d8e74ff0cc3d823 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/exception_message.h @@ -0,0 +1,29 @@ +#pragma once +#include +#include + +namespace torch::jit { + +struct ExceptionMessage { + ExceptionMessage(const std::exception& e) : e_(e) {} + + private: + const std::exception& e_; + friend std::ostream& operator<<( + std::ostream& out, + const ExceptionMessage& msg); +}; + +inline std::ostream& operator<<( + std::ostream& out, + const ExceptionMessage& msg) { + auto c10_error = dynamic_cast(&msg.e_); + if (c10_error) { + out << c10_error->what_without_backtrace(); + } else { + out << msg.e_.what(); + } + return out; +} + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h new file mode 100644 index 0000000000000000000000000000000000000000..971e45e818ca6df32c774823e3ca802721f61079 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor.h @@ -0,0 +1,148 @@ +#pragma once + +#include +#include + +#include +#include +#include +#include +#include + +C10_DECLARE_bool(torch_jit_enable_new_executor); + +C10_DECLARE_bool(torch_jit_execution_plan_reuse_code_graph); + +namespace torch::jit { +struct GraphExecutorState; +struct Code; + +enum ExecutorExecutionMode { + SIMPLE, + PROFILING, +}; + +struct ExecutionPlan { + ExecutionPlan() = default; + ExecutionPlan(std::shared_ptr graph, std::string function_name) + : code(graph, std::move(function_name)), + graph( + FLAGS_torch_jit_execution_plan_reuse_code_graph + ? code.graph() + : std::move(graph)) {} + + operator bool() const { + return static_cast(graph); + } + + Code code; + std::shared_ptr graph; +}; + +// Notice that those structs don't manage lifetime of their members. +// They are only valid only right after you call getDebugState() and should +// never be used again once another GraphExecutor function is called. + +// NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) +struct GraphExecutorState { + const Graph* graph = nullptr; + ExecutionPlan fallback; // XXX: members of this field are optional + std::unordered_map execution_plans; +}; + +struct TORCH_API EnableProfilingGuard { + EnableProfilingGuard(); + ~EnableProfilingGuard(); + + private: + bool old_executor_mode = false; + bool old_get_optimize = false; +}; + +struct GraphExecutorImplBase; +struct TORCH_API GraphExecutor { + GraphExecutor() = default; + GraphExecutor(const std::shared_ptr& graph, std::string function_name); + + GraphExecutor( + const std::shared_ptr& graph, + std::string function_name, + ExecutorExecutionMode executor_mode); + + void run(Stack& inputs); + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher taskLauncher = at::launch); + + // `remaining_bailout_depth` stands for the maximum number of profiled and + // specialized recompilations allowed for the current `GraphExecutor`. if + // remaining_bailout_depth is equal to 0, `GraphExecutor` won't perform any + // profiling and specialization. This is also equivalent to the + // SIMPLE_EXECUTOR mode. if remaining_bailout_depth is greater than 0, + // `GraphExecutor` will profile and specialize its input graph based on the + // profiled information whenever a bailout check is failed/triggered, a new + // `GraphExecutor` will be created. This new `GraphExecutor`'s + // remaining_bailout_depth will be reduced by 1. + // If no bailout depth is passed, the depth will be initialized from the + // current global fusion strategy settings. + const ExecutionPlan& getPlanFor( + Stack& inputs, + std::optional remaining_bailout_depth = std::nullopt); + GraphExecutorState getDebugState(); + + void debugFlushCompilationCache(); + + bool isOptimized() const; + + private: + std::shared_ptr pImpl; +}; + +TORCH_API Node* replaceBlockWithFallbackGraph( + Block* b, + ArrayRef inputs); + +// These passes need to run before it is valid to pass to the interpreter +// regardless of whether sizes have been specialized or not. +TORCH_API void runRequiredPasses(const std::shared_ptr& g); + +TORCH_API void debugSetFusionGroupInlining(bool state); +TORCH_API bool getFusionGroupInlining(); + +TORCH_API void debugSetAutodiffSubgraphInlining(bool state); +TORCH_API std::shared_ptr lastExecutedOptimizedGraph(); + +TORCH_API std::atomic& getProfilingMode(); +TORCH_API std::atomic& getExecutorMode(); +TORCH_API std::atomic& getNumProfiledRuns(); +TORCH_API size_t getBailoutDepth(); +TORCH_API bool IsNewExecutorEnabled(); + +struct TORCH_API GraphOptimizerEnabledGuard { + GraphOptimizerEnabledGuard(bool state) + : old_state_(getGraphExecutorOptimize()) { + setGraphExecutorOptimize(state); + } + + ~GraphOptimizerEnabledGuard() { + setGraphExecutorOptimize(old_state_); + } + + bool old_state_; +}; + +namespace detail { + +GraphExecutor* getGradExecutor(Operation& op); + +GraphExecutor* getDifferentiableGraphOpExecutor(Operation& op); + +// for debugging information we expose a way to get the last actually +// run graph. Previous approaches allowed querying the GraphExecutor +// for what graph it would run in certain circumstances (graphFor), but +// this is fragile because we sometimes change how these decisions are made. +// This interface still allows our tests to look at optimized graphs, but +// with less plumbing. +} // namespace detail + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..70069ac1907b0f54fa425ceecee20d48801b13ed --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_executor_impl.h @@ -0,0 +1,113 @@ +#pragma once +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +void packGradient(const Gradient& gradient, Node* dnode); +bool needsGradient(const std::shared_ptr& graph); +void runOptimization( + std::shared_ptr& graph, + bool unroll_non_constant_loops = true, + bool const_prop_user_classes = true); +void runNondiffOptimization( + std::shared_ptr& graph, + bool strict_fuser_check = false); +void debugSetAutodiffSubgraphInlining(bool state); +bool TORCH_API getAutodiffSubgraphInlining(); + +void debugSetFusionGroupInlining(bool state); +bool getFusionGroupInlining(); + +// Tunable parameters for deciding when to create/keep subgraphs of +// differentiable code +const size_t autodiffSubgraphNodeThreshold = 2; +const size_t autodiffSubgraphInlineThreshold = 5; + +// a Graph can be created via tracing, or via a language-based frontend +// GraphExecutor runs it. It can run the same graph on many different sizes +// and different requires_grad states, and handles specializations for each +// situation. GraphExecutor is completely unaware of tracing or module +// parameters to keep the tracing concerns separated. +struct GraphExecutorImplBase { + static std::shared_ptr prepareGraph( + const std::shared_ptr& graph) { + auto copy = graph->copy(); + EraseShapeInformation(copy); + return copy; + } + + GraphExecutorImplBase( + const std::shared_ptr& graph, + std::string function_name) + : graph(prepareGraph(graph)), + function_name_(std::move(function_name)), + num_inputs(this->graph->inputs().size()), + num_outputs(this->graph->outputs().size()) {} + + // entry point where execution begins + void run(Stack& stack); + c10::intrusive_ptr runAsync( + Stack& stack, + TaskLauncher taskLauncher = at::launch); + + virtual const ExecutionPlan& getPlanFor( + Stack& stack, + std::optional remaining_bailout_depth = std::nullopt) = 0; + virtual GraphExecutorState getDebugState() = 0; + virtual ~GraphExecutorImplBase() = default; + + virtual bool isOptimized() const { + return false; + } + + protected: + friend struct GraphExecutor; + + // The unoptimized starting graph. This field is effectively const, but we + // can't make it so because Graph::copy() is not const (and making it const is + // not that easy at this point). + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::shared_ptr graph; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::string function_name_; + + // If false, we'll run the graph as we get it, without any optimizations. + // Useful for debugging. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const size_t num_inputs; + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + const size_t num_outputs; + + // GraphExecutors can be accessed from multiple threads, so this thread needs + // to be held every time we access the fallback or plan_cache. + // NOLINTNEXTLINE(cppcoreguidelines-non-private-member-variables-in-classes) + std::mutex compile_mutex; +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h new file mode 100644 index 0000000000000000000000000000000000000000..c008902a2e8f71f1cb9eb2ce58b250971f488b50 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/graph_iterator.h @@ -0,0 +1,147 @@ +#include + +namespace torch::jit { + +// This class facilitates depth-first iteration over all nodes in a graph. +class DepthFirstGraphNodeIterator { + Node* current_; + + public: + // Constructor. + explicit DepthFirstGraphNodeIterator(std::shared_ptr& graph) + : current_(*(graph->block()->nodes().begin())) {} + + // Moves up and to the next node (may move up recursively). + void move_up() { + if (current_ == nullptr) { + return; + } + // Basically we start from the child block (which is current_) + // and we try to find the block that owns it. Now we need to check + // if that block is the graph root block, or if it is an If/Loop/etc + // block. + // + // If it's the graph root block we can stop because there is no "up" + // but if it is a node (e.g. If/Loop/etc) we need to apply logic + // based on where we are coming from to move to the next block. + // This might mean that we need to traverse up again (e.g. if we've + // reached the end of the else clause in an if block we need to go) + // up to the parent block that contains the if. + // + // Similarly if we've reached the end of the parent block containing + // the else clause we might need to go up again so this is a recursive + // function. + // + // BlockNode (if/loop/with) + // | + // [Block1] ... [Block2] + // | + // [ Node1, Node2, Node3, FromNode] + // + auto parent_block = current_->owningBlock(); + TORCH_INTERNAL_ASSERT(parent_block, "Every node must be owned by a block"); + + // Get the node that owns the parent block. This node has to be an if, + // loop, or with. + auto parent_node = parent_block->owningNode(); + if (parent_node == nullptr) { + // If there's no node that owns this current block then we're at the + // top of the graph and since we're trying to move up we have reached + // the end of the traversal. + current_ = nullptr; + return; + } + + // Check the type of node this root is. + if (parent_node->kind() == prim::If) { + // Need to check if we came from the `then` branch or the `else` branch. + auto* then_block = parent_node->blocks().at(0); + auto* else_block = parent_node->blocks().at(1); + + if (parent_block == else_block) { + // If else block then we move to the next node in the parent block. + current_ = parent_node->next(); + if (current_->kind() == prim::Return) { + move_up(); + } + } else { + // If then block then move to the else block if it is not empty. + TORCH_INTERNAL_ASSERT(parent_block == then_block); + bool else_block_empty = + else_block->nodes().begin() == else_block->nodes().end(); + + if (!else_block_empty) { + current_ = *(else_block->nodes().begin()); + } else { + // Since it's empty we move to the next node. + current_ = parent_node->next(); + if (current_->kind() == prim::Return) { + move_up(); + } + } + } + } else if ( + parent_node->kind() == prim::Loop || + parent_node->kind() == prim::With) { + current_ = parent_node->next(); + if (current_->kind() == prim::Return) { + move_up(); + } + } else { + TORCH_INTERNAL_ASSERT( + false, "Only if/loop/with nodes should have child blocks"); + } + } + + // Moves to the next adjacent node or up in to the parent if that is not + // possible. + void move_next() { + if (current_ == nullptr) { + return; + } + + // Increment to the next node in the current block. + current_ = current_->next(); + + // Check if we're at the end of the block. If so we need + // to move upwards (if it makes sense to). + if (current_->kind() == prim::Return) { + move_up(); + } + } + + // Moves to the next node in the graph into children if it can. + void move_into() { + if (current_ == nullptr) { + return; + } + + // Check if we're currently on a node that contains sub-nodes. + if (current_->kind() == prim::If || current_->kind() == prim::Loop || + current_->kind() == prim::With) { + auto* first_block = current_->blocks().at(0); + current_ = first_block->param_node(); + // Move next will move up and out of the current node if the block is + // empty. `move_up` which is called by `move_next` will handle the + // difference between If, Loop, and With blocks appropriately. + move_next(); + } else { + move_next(); + } + } + + // Get the next Node in the graph. \returns nullptr if there are no nodes + // left. + Node* next() { + auto result = current_; + + // Try move into the existing node to set the next node to be returned. + // This will move to the next node if not possible, or move upwards and + // to the next. + move_into(); + + return result; + } +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h new file mode 100644 index 0000000000000000000000000000000000000000..ffafd3ab096a9bd8e2eea5ea2f4a2cacb46f6f9d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/interpreter.h @@ -0,0 +1,159 @@ +#pragma once +#include +#include +#include + +#include +#include +#include +#include +#include + +C10_DECLARE_bool(torch_jit_disable_warning_prints); +C10_DECLARE_bool(torch_jit_enable_rethrow_caught_exception); + +namespace at { +class Tensor; +TORCH_API void launch(std::function func); +} // namespace at +namespace c10 { +struct IValue; +struct OperatorName; +} // namespace c10 + +namespace torch::jit { + +// The interpreter run Graphs with Tensor inputs and Tensor outputs +// a separate component in the autograd handles unwrapping and wrapping +// variable objects for use in the interpreter. +namespace interpreter { +struct CodeImpl; +} + +struct Node; +struct GraphExecutor; +struct InterpreterStateImpl; +struct Graph; +struct Node; +struct Instruction; +using Stack = std::vector; +using c10::ivalue::Future; +using TaskLauncher = std::function)>; + +struct TORCH_API Code { + Code() = default; + explicit Code(interpreter::CodeImpl* pImpl); + // remaining_bailout_depth is irrelevant in a `Code` object unless the `Code` + // is directly created by `GraphExecutor` in which case it's likely to contain + // `prim::BailOut`s to control the maximum depth of bailout chains + explicit Code( + const std::shared_ptr& graph, + std::string function_name, + size_t remaining_bailout_depth = 0); + + const std::vector& grad_executors(); + const std::vector& diff_graph_op_executors(); + + explicit operator bool() const { + return pImpl != nullptr; + } + size_t num_inputs() const; + size_t num_outputs() const; + size_t num_bailouts() const; + const std::vector& constant_table() const; + const std::vector& type_table() const; + const std::vector& instructions() const; + const std::unordered_map& op_to_num_specified_args() + const; + const std::vector& instructions_source() const; + void request_bailout(size_t index); + size_t register_size() const; + std::shared_ptr graph() const; + + private: + std::shared_ptr pImpl; + friend struct InterpreterStateImpl; + friend std::ostream& operator<<(std::ostream& out, const Code& code); +}; + +struct TORCH_API MobileCode : Code { + explicit MobileCode( + const std::shared_ptr& graph, + std::string function_name, + bool emit_default_input_instructions = true, + bool support_default_args_before_out = true, + bool emit_promoted_ops = true, + size_t remaining_bailout_depth = 0); +}; + +struct InterpreterState { + TORCH_API InterpreterState( + const Code& code, + TaskLauncher taskLauncher = at::launch); + TORCH_API void run(Stack& stack); + TORCH_API c10::intrusive_ptr runAsync(Stack& stack); + c10::intrusive_ptr getFuture(); + + private: + InterpreterState(c10::intrusive_ptr pImpl); + // Ideally we should use c10::intrusive_ptr for pImpl; + // but intrusive_ptr requires full definition of InterpreterStateImpl, + // which we need to hide in the header. + c10::intrusive_ptr pImpl; + friend struct InterpreterStateImpl; +}; + +// Created by wait() +struct Suspend : public std::exception { + const char* what() const noexcept override { + return "Suspend"; + } + + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + explicit Suspend(c10::intrusive_ptr future_) + : future(std::move(future_)) {} + + c10::intrusive_ptr future; +}; + +// InterpreterContinuation propagates dist_autograd_context_id +// through (and only through) the forward pass manually, other +// thread local settings are propagated with ThreadLocalState +struct InterpreterContinuation { + InterpreterContinuation( + InterpreterState state_, + Stack stack_, + int64_t dist_autograd_context_id = 0, + std::optional tls_state = std::nullopt) + : state(std::move(state_)), + stack(std::move(stack_)), + tls_state_(std::move(tls_state)) +#ifdef USE_DISTRIBUTED + , + dist_autograd_context_id_(dist_autograd_context_id) +#endif + { + } + + void operator()(); + + private: + InterpreterState state; + Stack stack; + std::optional tls_state_ = std::nullopt; +#ifdef USE_DISTRIBUTED + int64_t dist_autograd_context_id_; +#endif +}; + +// what is the tensors type, including state from the current execution context +// that modifies how the tensor behaves. For instance if no_grad is enabled +// this will cause the TensorType to have requires_grad=False. +TORCH_API at::TensorTypePtr tensorTypeInCurrentExecutionContext( + const at::Tensor& t); + +// current (TLS) TorchScript interpreter callstack +TORCH_API std::vector currentCallstack(); +TORCH_API std::vector currentModuleHierarchy(); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h new file mode 100644 index 0000000000000000000000000000000000000000..cb4f572a8bd3c0b0e076143a36c6e2af7a4885f8 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/jit_exception.h @@ -0,0 +1,38 @@ +#pragma once + +#include + +#include +#include +#include + +namespace torch::jit { + +struct TORCH_API JITException : public std::runtime_error { + explicit JITException( + const std::string& msg, + std::optional python_class_name = std::nullopt, + std::optional original_msg = std::nullopt); + + std::optional getPythonClassName() const { + return python_class_name_; + } + + // the original msg if this is from a python exception. The interpretor has + // changed the original message by adding "The following operation failed in + // the TorchScript interpreter." in front of it in the handleError function. + std::optional getOriginalMsg() const { + return original_msg_; + } + + static const std::string& getCaughtOriginalMsg(); + static const std::string& getCaughtPythonClassName(); + static void setCaughtOriginalMsg(const std::string& msg); + static void setCaughtPythonClassName(const std::string& pythonClassName); + + private: + std::optional python_class_name_; + std::optional original_msg_; +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h new file mode 100644 index 0000000000000000000000000000000000000000..44561a8006cfdfd1751fb44172572429ccd39f77 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/logging.h @@ -0,0 +1,89 @@ +#pragma once + +#include +#include +#include +#include +#include + +#include + +namespace torch::jit::logging { + +class LoggerBase { + public: + TORCH_API virtual void addStatValue( + const std::string& stat_name, + int64_t val) = 0; + virtual ~LoggerBase() = default; +}; + +TORCH_API LoggerBase* getLogger(); +TORCH_API LoggerBase* setLogger(LoggerBase* logger); + +// No-op logger. This is the default and is meant to incur almost no runtime +// overhead. + +class NoopLogger : public LoggerBase { + public: + void addStatValue( + const std::string& stat_name [[maybe_unused]], + int64_t val [[maybe_unused]]) override {} + ~NoopLogger() override = default; +}; + +// Trivial locking logger. Pass in an instance of this to setLogger() to use it. +// This keeps track of the sum of all statistics. +// +// NOTE: this is not written in a scalable way and should probably only be used +// in the single-threaded case or for testing. +class TORCH_API LockingLogger : public LoggerBase { + public: + void addStatValue(const std::string& stat_name, int64_t val) override; + virtual int64_t getCounterValue(const std::string& name) const; + enum class AggregationType { SUM = 0, AVG = 1 }; + void setAggregationType(const std::string& stat_name, AggregationType type); + ~LockingLogger() override = default; + + private: + mutable std::mutex m; + struct RawCounter { + RawCounter() : sum(0), count(0) {} + int64_t sum; + size_t count; + }; + std::unordered_map raw_counters; + std::unordered_map agg_types; +}; + +// Make this struct so the timer internals are opaque to the user. +struct JITTimePoint { + std::chrono::time_point point; +}; + +TORCH_API JITTimePoint timePoint(); +TORCH_API void recordDurationSince( + const std::string& name, + const JITTimePoint& tp); + +namespace runtime_counters { +constexpr const char* GRAPH_EXECUTORS_CONSTRUCTED = + "pytorch_runtime.graph_executors_constructed"; +constexpr const char* GRAPH_EXECUTOR_INVOCATIONS = + "pytorch_runtime.graph_executor_invocations"; +constexpr const char* EXECUTION_PLAN_CACHE_HIT = + "pytorch_runtime.execution_plan_cache_hit"; +constexpr const char* EXECUTION_PLAN_CACHE_MISS = + "pytorch_runtime.execution_plan_cache_miss"; + +inline std::vector allRuntimeCounters() { + return { + GRAPH_EXECUTORS_CONSTRUCTED, + GRAPH_EXECUTOR_INVOCATIONS, + EXECUTION_PLAN_CACHE_HIT, + EXECUTION_PLAN_CACHE_MISS}; +} + +} // namespace runtime_counters + +} // namespace torch::jit::logging diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h new file mode 100644 index 0000000000000000000000000000000000000000..2e609f18ecc07489863ba59518bb4af558f9c7b7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/operator.h @@ -0,0 +1,345 @@ +// in memory description of all ATen Ops similar to Caffe2 schema +// once C10 exists this can be removed, or stubbed out, but we need +// it now to implement correct semantic checking for script +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace torch::jit { + +struct Node; +using ::c10::Argument; +using ::c10::FunctionSchema; +using ::c10::Symbol; + +using OperationCreator = Operation (*)(const Node*); + +namespace { +const std::array kJitOnlyOperatorTags = { + at::Tag::pt2_compliant_tag}; +} + +/* + * Note: JIT relies on Operator instances having static lifetime, because + * it for example stores a non-owning FunctionSchema* pointer in the Node class, + * which points to the function schema stored in the Operator instance. + * Also, jit::Operator is meant to store more operator related information like + * symbolic derivatives, which also requires them to have static lifetime + * so that changes to symbolic derivatives are remembered. + * + * Currently, the JIT operator library contains a jit::Operator instance + * with a wrapper for each c10 operator. The c10 operator library registers + * those wrappers using listeners in register_c10_ops.cpp. + * TODO Instead of doing it this way, we should only have pure-jit ops in + * the jit library but have the JIT operator lookup look into the c10 library + * too. + */ + +// An Operator is a thin wrapper around either a pure JIT operator (e.g. prim +// ops) or a c10 operator, allowing some common operations and abstracting away +// the concrete operator nature. +struct TORCH_API Operator { + private: + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + struct C10Operator final { + c10::OperatorHandle handle_; + Operation op_; + }; + struct UnparsedFunctionSchema final { + std::string schema_string_; + mutable std::optional alias_analysis_; + }; + // NOLINTNEXTLINE(cppcoreguidelines-pro-type-member-init) + struct JitOnlyOperator final { + // The only valid transition for schema_ is from right->left, i.e. + // when the schema gets parsed. + mutable std::variant schema_; + + std::variant op_; + }; + + public: + Operator(c10::OperatorHandle opHandle, Operation operation) + : op_(C10Operator{std::move(opHandle), std::move(operation)}) {} + + Operator( + std::string schema, + Operation op, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + UnparsedFunctionSchema{std::move(schema), alias_analysis}, + Operation(std::move(op))}) {} + + Operator( + std::string name, + std::string overload_name, + std::vector arguments, + std::vector returns, + Operation op, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + FunctionSchema(varArgSchemaWithName( + std::move(name), + std::move(overload_name), + std::move(arguments), + std::move(returns), + alias_analysis)), + std::move(op)}) {} + + Operator( + std::string schema, + OperationCreator op_creator, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + UnparsedFunctionSchema{std::move(schema), alias_analysis}, + op_creator}) {} + + // Helper constructor to register `op` to run + // run for _every_ IR Node where n.kind() == name, regardless of arguments. + // This is accomplished by marking the schema varargs and having no required + // arguments. + Operator( + Symbol name, + OperationCreator op_creator, + c10::AliasAnalysisKind alias_analysis) + : op_(JitOnlyOperator{ + FunctionSchema(varArgSchemaWithName(name, alias_analysis)), + op_creator}) {} + + Operation getOperation(const Node* node = nullptr) const { + return std::visit( + c10::overloaded( + [](const C10Operator& op) { return op.op_; }, + [node](const JitOnlyOperator& op) { + return std::visit( + c10::overloaded( + [](const Operation& op) { return op; }, + [node](const OperationCreator& op_creator) { + return op_creator(node); + }), + op.op_); + }), + op_); + } + + Operation getOperationForDispatchKey(c10::DispatchKey dk) const { + // TODO: some sort of caching mechanism? + return std::visit( + c10::overloaded( + [dk](const C10Operator& op) { + return Operation([op, dk](Stack& stack) { + op.handle_.callBoxedForDispatchKey(dk, stack); + }); + }, + [](const JitOnlyOperator& op) { + TORCH_CHECK( + false, + "calling a JIT operator for dispatch key is not supported"); + return Operation(nullptr); + }), + op_); + } + + const FunctionSchema& schema() const { + return std::visit( + c10::overloaded( + [](const C10Operator& op) -> const FunctionSchema& { + return op.handle_.schema(); + }, + [](const JitOnlyOperator& op) -> const FunctionSchema& { + // we lazily parse schema initialized from strings so that + // we do less work during static operator registration + if (op.schema_.index() == 1) { + auto& unmaterializedSchema = + std::get(op.schema_); + FunctionSchema schema = + parseSchema(unmaterializedSchema.schema_string_); + if (unmaterializedSchema.alias_analysis_.has_value()) { + // TODO What if it gets set later? + schema.setAliasAnalysis( + *unmaterializedSchema.alias_analysis_); + } + op.schema_ = std::move(schema); + } + return std::get(op.schema_); + }), + op_); + } + + c10::ArrayRef getTags() const { + return std::visit( + c10::overloaded( + [](const C10Operator& op) { return op.handle_.getTags(); }, + [](const JitOnlyOperator& op) { + // JitOnlyOperators don't have an c10::OperatorHandle or a way to + // specify tags. We're grandfathering them all into + // pt2_compliant_tag, but for anything else, please just stop + // using JitOnlyOperator. + return c10::ArrayRef(kJitOnlyOperatorTags); + }), + op_); + } + + bool isC10Op() const { + return op_.index() == 0; + } + + c10::AliasAnalysisKind aliasAnalysisKind() const { + const FunctionSchema& schemaRef = schema(); + c10::AliasAnalysisKind alias_analysis = schemaRef.aliasAnalysis(); + + TORCH_CHECK( + alias_analysis == AliasAnalysisKind::FROM_SCHEMA || + !schemaRef.hasAnyAliasInfo(), + "In operator registration: Tried to register operator ", + schemaRef, + " with aliasing information in the schema but without AliasAnalysisKind::FROM_SCHEMA."); + return alias_analysis; + } + + bool hasOperation() const { + return std::visit( + c10::overloaded( + [](const C10Operator&) { return true; }, + [](const JitOnlyOperator& op) { return op.op_.index() == 0; }), + op_); + } + + private: + static FunctionSchema varArgSchemaWithName( + Symbol name, + AliasAnalysisKind alias_analysis) { + auto result = FunctionSchema( + name, + "", + {}, + {}, + /*is_vararg*/ true, + /*is_varret*/ true); + result.setAliasAnalysis(alias_analysis); + return result; + } + + static FunctionSchema varArgSchemaWithName( + std::string name, + std::string overload_name, + std::vector arguments, + std::vector returns, + AliasAnalysisKind alias_analysis) { + auto result = FunctionSchema( + std::move(name), + std::move(overload_name), + std::move(arguments), + std::move(returns), + /*is_vararg*/ false, + /*is_varret*/ false); + result.setAliasAnalysis(alias_analysis); + return result; + } + + std::variant op_; +}; + +TORCH_API std::string canonicalSchemaString(const FunctionSchema& schema); + +TORCH_API const std::vector> getAllOperators(); +TORCH_API const std::vector>& getAllOperatorsFor( + Symbol name); +// Returns operators in the order which OpOverloadPacket resolves them. +TORCH_API std::vector> getAllSortedOperatorsFor( + Symbol name); + +// given a operator with an overload name, find the specific operator related to +// it, may return nullptr if no operator exists. +TORCH_API std::shared_ptr findOperatorFor( + const c10::OperatorName& full_name); + +TORCH_API std::vector findSimilarOperators(Symbol input_op); + +TORCH_API void registerOperator(Operator&& op); +TORCH_API void deregisterOperator(const FunctionSchema& schema); + +// XXX: this function is meant to be used with string literals only! +TORCH_API std::shared_ptr getOperatorForLiteral( + const char* signature); + +// Ensure the thing that registers c10 ops is defined. +// Otherwise, our registry will not have c10 ops. You can run into this +// scenario if you're querying registered ops during static init. +// +// This fn is defined in register_c10_ops.cpp +TORCH_API void ensure_c10_registerer_defined(); + +// Used to assert that unschematized operators have an analysis method written +TORCH_API bool aliasAnalysisHasSpecialCaseFor(c10::Symbol sym); + +// A factory function to generate an optional operator. It has two +// instantiations depending on the template bool arg value. The arg can be a +// compile-time function for the selective op registration based on schema +// string. +template +std::optional OperatorGenerator( + const char* schema_str, + Func&& op, + AliasAnalysisKind alias_analysis) { + return std::optional(Operator( + std::string(schema_str), std::forward(op), alias_analysis)); +} + +template +std::optional OperatorGenerator( + torch::detail::SelectiveStr schema_str, + Func&& op, + AliasAnalysisKind alias_analysis) { + return OperatorGenerator( + static_cast(schema_str), + std::forward(op), + alias_analysis); +} + +template +std::optional OperatorGenerator( + torch::detail::SelectiveStr schema_str, + Func&& op, + AliasAnalysisKind alias_analysis) { + return std::nullopt; +} + +template +std::optional OperatorGenerator( + const std::string name, + const std::string overload_name, + const std::vector arguments, + const std::vector returns, + Func&& op, + AliasAnalysisKind alias_analysis) { + return std::optional(Operator( + name, + overload_name, + arguments, + returns, + std::forward(op), + alias_analysis)); +} + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..a49ef18e2fa42db8f694d64df998791624c0503d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_graph_executor_impl.h @@ -0,0 +1,73 @@ +#pragma once +#include +#include +#include + +C10_DECLARE_bool(torch_jit_static_then_dynamic); + +C10_DECLARE_bool(torch_jit_always_dynamic); + +namespace torch::jit { + +TORCH_API void runNooptPassPipeline(std::shared_ptr& graph); + +struct TORCH_API ProfilingGraphExecutorImpl : public GraphExecutorImplBase { + ProfilingGraphExecutorImpl( + const std::shared_ptr& graph, + std::string function_name); + + const ExecutionPlan& getPlanFor( + Stack& stack, + std::optional remaining_bailout_depth) override; + GraphExecutorState getDebugState() override; + ~ProfilingGraphExecutorImpl() override = default; + + void debugFlushCompilationCache(); + + bool isOptimized() const override { + return optimized_plan_.has_value(); + } + + private: + const ExecutionPlan& getOptimizedPlanFor( + Stack& stack, + std::optional remaining_bailout_depth); + void runProfilingInsensitiveOptimizations(std::shared_ptr& graph); + void runProfilingOptimizations( + std::shared_ptr& graph, + size_t remaining_depth); + void replaceFallbackGraphWithFallbackFunction(Block* b); + FusionBehavior getCurrentBehavior(size_t remaining_depth); + size_t getInstantiatedBailoutDepth(); + void runNoGradOptimizations( + std::shared_ptr& graph, + size_t remaining_bailout_depth); + void runFinalOptimizations(std::shared_ptr& graph); + + void clearTheGraphCompilationIntermediateGraphs(); + + std::unique_ptr pr_; + std::optional + profiling_plan_; // plan to run in order to profiling the code + std::optional optimized_plan_; + FusionStrategy fusion_strategy_; + + // this plan is used if getGraphExecutorOptimize is unset + std::optional fallback_plan_; + // fallback functions are inserted for tensorexpr fusion groups + // and by specialize_autogradzero. Whenever, at runtime, input + // tensor don't match profiled properties, fallback functions are called + // They are the deoptimized version of the logic in fusion groups + // and/or autograd. + // The fallback functions are owned by a GraphExecutor instance + // They only exist in the optimized graph which is a private property + // of the GraphExecutor and only shared with InterpreterState + std::vector> fallback_functions_; + std::optional remaining_bailout_depth_; + // The time the optimized_plan_ is created. + int32_t time_optimized_plan_created_ = 0; + // Has the extra memory used by the graph for profiling is released? + bool is_graph_extra_memory_released_ = false; +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h new file mode 100644 index 0000000000000000000000000000000000000000..c45dcde7b0bf0ea2314eb676ea87e958499ff7a2 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/profiling_record.h @@ -0,0 +1,205 @@ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +// We would like to assign each position/axis of a tensor an abstract size +// * For each `tensor` we have a profiled `Value` of a `TensorType` describing +// the properties of the `tensor`. +// * `TensorType` has a property called `symbolic_sizes_` to describe observed +// `tensor.sizes()` +// * `symbolic_sizes_` is a vector of abstract sizes (or +// `std::vector`) where +// * `ShapeSymbol`at `symbolic_sizes_[i]` describes the size value +// (`Dimension`) at `tensor.sizes()[i]` +// * We may see the same `Dimension` at different positions `i` in +// `tensor.sizes()` or even in different `tensor` +// * First, we would like associate the same `ShapeSymbol` to the same +// `Dimension` across **one** profiling execution or run of a TorchScript +// function. +// * The same `ShapeSymbol`s in different positions of `symbolic_shapes_` in +// possibly different `TensorType`s (i.e. `TensorType`s for different +// profiled values) form an implicit set. The elements of such a set are +// called *dimension locations*. +// * These sets allow us to track how the shapes of input arguments of some +// operation relate to operation's output shapes as the input and output +// shapes might share the same `ShapeSymbol`s +// * For **every** profiling run, we would like to maintain the invariant that +// *the same `ShapeSymbol` is always associated with the same `Dimension`*. +// * To maintain this invariant we merge the profiling information from all +// profiling runs, +// * For every two runs, we iterate over all `symbic_shapes_` and compare +// their `ShapeSymbol`s in the same position. +// * if we observe that for every dimension location that has +// the`ShapeSymbol S1` in run #1 there is **only one** `ShapeSymbol S2` in +// the same dimension location in run #2, we conclude that the invariant +// holds. +// * However, if we observe some dimension locations in run #2 have +// `ShapeSymbol S2` and the other ones have `ShapeSymbol S3` we would like +// to partition the virtual set of dimension locations associated with +// `ShapeSymbol S1` into two new subsets, so the invariant holds. +// * The partitioning works by assigning a new symbol to the dimension +// locations (associated with `ShapeSymbol S1`) that have `ShapeSymbol S2` +// and another new symbol to the dimension locations that have `ShapeSymbol +// S3`. In other words, +// * Subset #1 will consist of the dimension locations that in run #2 have +// `ShapeSymbol S2` and will have `ShapeSymbol S4` in those dimension +// locations +// * Subset #2 will consist of the dimension locations that in run #2 have +// `ShapeSymbol S4` and will have `ShapeSymbol S5` in those dimension +// locations +// * The effective result of merging the profiling information from two runs +// is new `TensorTypes` whose `symbolic_sizes_` /dimension locations have +// either `ShapeSymbol S4` or `ShapeSymbol S5`. +// * Partitioning can be done even before we have seen all the dimension +// locations associated with `ShapeSymbol S1` +// * We use `getSymbolInSet` of `ShapeSymbolTable` to remember all +// `ShapeSymbols` from run #2 we observed in the dimension locations +// associated with `ShapeSymbol S1` . +// * For every `ShapeSymbol` from run #2 in the dimension location +// associated with `ShapeSymbol S1` `getSymbolInSet` returns a symbol +// that we assign to the dimension location in a new TensorType. +// * It's important to point out that the same `ShapeSymbol S2` from run +// #2 in two dimension locations that have different `ShapeSymbol`s in +// run #1 are different! These dimension locations will belong to +// different subsets and have different `ShapeSymbol`s after merge. +// * On the other hand, for the same `ShapeSymbol S2` in two dimension +// locations that have `ShapeSymbol S1` in run #1`getSymbolInSet` will +// return the same symbol. + +namespace torch::jit { + +using ::c10::TensorTypePtr; +using Dimension = int64_t; + +TORCH_API void RegisterProfilingNode(const std::function&); + +struct ProfilingRecord; + +// `SetPartitioningHelper` is used to maintain the following invariant: +// For **every** profiling run, *the same `ShapeSymbol` is always associated +// with the same `Dimension`*. +// while merging the profiling information from multiple runs. +struct SetPartitioningHelper { + std::map> + sets2subsets_; + + // `partitionSetByDimension` partitions a virtual set + // of dimension locations associated with ShapeSymbol `symbol` into subsets. + // Partitioning is equivalent to giving (or renaming) a particular + // dimension location a new `ShapeSymbol`. + // The same `Dimension` value in different dimension locations + // that used to have `symbol` will receive the same + // new `ShapeSymbol`, effectively forming a new set. + c10::ShapeSymbol partitionSetByDimension( + Dimension new_size, + c10::ShapeSymbol symbol) { + auto& dims2symbols = getSetForSymbol(symbol); + + if (dims2symbols.count(new_size) == 0) { + auto new_sym = c10::ShapeSymbol::newSymbol(); + dims2symbols[new_size] = new_sym; + return new_sym; + } + + return dims2symbols[new_size]; + } + + private: + std::map& getSetForSymbol(c10::ShapeSymbol s) { + auto& set = sets2subsets_[s]; + // N.B. adding a mapping { s.static_size(), s } + // makes sure we preserve the fact that + // some dimension values remain the same + // across all profiled runs + if (s.is_static()) { + set.insert({s.static_size(), s}); + } + return set; + } +}; + +// ShapeSymbolTable is used by Interpreter +// to assign dimension values to ShapeSymbols +// and fail a guard if the same symbol +// is assigned more than one dimension value. +struct ShapeSymbolTable { + // N.B. we treat static symbols as always assigned + // to themselves + bool isBound(c10::ShapeSymbol s) { + if (s.is_static()) { + return true; + } + return data_.count(s) != 0; + } + + // N.B. we treat static symbols as always assigned + // to themselves + Dimension getValue(c10::ShapeSymbol s) { + if (s.is_static()) { + return s.static_size(); + } + return data_[s]; + } + void assign(c10::ShapeSymbol s, Dimension v) { + TORCH_INTERNAL_ASSERT(!s.is_static()); + data_[s] = v; + } + std::map data_; + // Tries to assign dimension values from `new_sizes` to + // `ShapeSymbol`s `sym_shapes`. + // Returns `true` if every dimension value from `new_sizes` + // can be assigned to the corresponding `ShapeSymbol` from + // `sym_shapes` + // A dimension value can be assigned to a `ShapeSymbol` + // * if the symbol isn't assigned yet any dimension value + // * if the symbol is assigned and its value is equal to + // the dimension value from `new_sizes` + bool bindSymbolicShapes( + at::IntArrayRef new_sizes, + const c10::SymbolicShape& sym_shapes); +}; + +struct ProfilingRecord { + // N.B. ProfilingRecord's copy and move c-tor are disabled, so we won't + // end up accidentally copying or moving ProfilingRecords whose addresses + // are captured in callbacks_ + ProfilingRecord(const ProfilingRecord&) = delete; + ProfilingRecord(ProfilingRecord&&) noexcept = delete; + TORCH_API static std::unique_ptr instrumentGraph( + const std::shared_ptr& graph); + TORCH_API static void removeProfilingNodes(Block* b); + TORCH_API static void removeProfileCounter(Block* b); + + std::shared_ptr profiled_graph_; + mutable std::mutex mutex_; + size_t profiling_count_; + + bool ready() const; + + std::shared_ptr graph() const { + return profiled_graph_; + } + + TORCH_API ProfileIValueOp* createProfileIValueNode(Value* in_val); + TORCH_API ProfileIValueOp* createProfileIValueNode(ArrayRef inputs); + + private: + ProfileOp* createProfileNode( + const std::function& fp, + at::ArrayRef inputs); + void instrumentBlock(Block* block); + void insertShapeProfile(Node* n, size_t offset, const TypePtr& input_type); + ProfilingRecord(std::shared_ptr g); +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h new file mode 100644 index 0000000000000000000000000000000000000000..7abaf5d73f83e3edbf68cdccd44061907847ad9d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/script_profile.h @@ -0,0 +1,103 @@ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace torch::jit { +namespace profiling { + +struct Datapoint { + using Timepoint = std::chrono::time_point; + SourceRange sourceRange; + Timepoint start; + Timepoint end; + + explicit Datapoint(SourceRange sr) + : sourceRange(std::move(sr)), start(std::chrono::steady_clock::now()) {} +}; + +class TORCH_API InstructionSpan { + public: + explicit InstructionSpan(Node&); + ~InstructionSpan(); + InstructionSpan(InstructionSpan&&) = delete; + InstructionSpan& operator=(InstructionSpan&&) = delete; + + private: + std::unique_ptr datapoint_; +}; + +bool TORCH_API isProfilingOngoing(); + +} // namespace profiling + +struct TORCH_API InstructionStats : public CustomClassHolder { + int64_t count{0}; + std::chrono::nanoseconds duration{0}; +}; + +class TORCH_API SourceStats : public CustomClassHolder { + public: + using LineMap = c10::Dict>; + + SourceStats(SourceRef source, LineMap lineMap) + : source_(std::move(source)), lineMap_(std::move(lineMap)) {} + + const SourceRef& getSourceRef() const { + return source_; + } + + const LineMap& getLineMap() const { + return lineMap_; + } + + private: + SourceRef source_; + LineMap lineMap_; +}; + +/** + * ScriptProfile is an underlying C++ implementation for TorchScript profiling. + * The profiling section is specified by calling enable() and disable(): + * + * ... + * scriptProfile.enable(); + * ... + * (scripts) + * ... + * scriptProfile.disable(); + * ... + * + * NOTE: you cannot attach the profiler while the script is running. + * + * To retrieve collected runtime data, users may call dumpStats() and do + * arbitrary filtering on the data they want. Note that dumpStats() should + * not be called inside a profiling section. + * In general, stats are aggregated per source function body, and then by line + * number. + */ +class TORCH_API ScriptProfile : public CustomClassHolder { + // Aggregates datapoints by function source id, then by line number. + using LineMap = std::map; + using SourceMap = std::map>; + + public: + void enable(); + void disable(); + const SourceMap& dumpStats(); + void addDatapoint(std::shared_ptr); + ~ScriptProfile() override; + + private: + bool enabled_{false}; + std::vector> datapoints_; + SourceMap sourceMap_; +}; + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..e822f3f93e3d29d533f27e8565d7a0de787f33b5 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/serialized_shape_function_registry.h @@ -0,0 +1,15 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +TORCH_API const std::string& GetSerializedShapeFunctions(); + +TORCH_API const OperatorMap& GetShapeFunctionMappings(); + +TORCH_API const OperatorMap>& +GetBoundedShapeMappings(); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..533b1f11020763e2d6d1d05734c6a4b09bcc44aa --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/shape_function_registry.h @@ -0,0 +1,12 @@ +#pragma once + +#include +#include + +namespace torch::jit { + +TORCH_API const std::string& GetSerializedFuncs(); + +TORCH_API const OperatorMap& GetFuncMapping(); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h new file mode 100644 index 0000000000000000000000000000000000000000..0715f0deeb1208ce0cdd0606598f5cc8fd3d4bde --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_script.h @@ -0,0 +1,18 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include +#include + +namespace torch::jit { +struct GradientPair { + std::shared_ptr forward; + std::shared_ptr backward; +}; + +TORCH_API std::optional gradientInfoForSchema( + const FunctionSchema& schema); +TORCH_API bool hasGradientInfoForSchema(const FunctionSchema& schema); +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h new file mode 100644 index 0000000000000000000000000000000000000000..7222fd8bca326930a4d891038183a073b0e0232b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry.h @@ -0,0 +1,69 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include + +namespace torch::jit { + +/* +ADDING A NEW SHAPE GRAPH: +- For one node schema, there is one corresponding registered shape compute +graph. The schema of the graph should be the same except for Tensor arguments. +For every Tensor input in operator schema, there should be a List[int] +corresponding to that Tensor's shape. For example: "aten::linear(Tensor input, +Tensor weight, Tensor? bias=None) -> Tensor" ==> def linear(input: List[int], +weight: List[int], bias: Optional[List[int]]) + +Additionally, arguments which are unused at the end of the schema may be left +off. This allows sharing a single graph for multiple function schemas, such as +unary operators with different trailing arguments that do not affect the output +shape. + +The shape graph should return a new, unaliased List[int] (or tuple of lists for +multiple returns) and should not modify any input lists. This allows the shape +graphs to be composed and executed. + +The shape analysis (particularly for non-complete, or symbolic shapes) works by +partially evaluating the JIT IR. It may be possible for a Graph to be registered +that we cannot currently partially evaluate. If this happens, please file an +issue. There are lints registered to avoid particular known patterns (continue +or break or early return in a loop). Those may be improved in the future, please +file an issue if necessary. + +To debug (and write initially) the recommended flow is to define these functions +in python and iterate there. Functions should be added to +torch/jit/_shape_functions. + +To test operators, the preferred flow is through OpInfos, with +`assert_jit_shape_analysis=True`. If this is not feasible, you can look at tests +in `test_symbolic_shape_analysis.py` such as `test_adaptive_avg_pool2d`. + +Operators which take in a list of tensors, such as concat, are not yet +supported. Concat has been special cased and could be generalized as needed. +Please file an issue. +*/ + +struct BoundedShapeGraphs { + std::shared_ptr lower_bound; + std::shared_ptr upper_bound; +}; + +TORCH_API void RegisterShapeComputeGraphForSchema( + const FunctionSchema& schema, + const std::shared_ptr& g); + +TORCH_API std::optional> shapeComputeGraphForSchema( + const FunctionSchema& schema); + +TORCH_API std::optional boundedGraphsForSchema( + const FunctionSchema& schema); + +TORCH_API std::vector RegisteredShapeComputeSchemas(); + +TORCH_API void LintShapeComputeGraph( + const FunctionSchema* schema, + const std::shared_ptr& graph); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h new file mode 100644 index 0000000000000000000000000000000000000000..e1280504e5c914f51809e300d0d46bc182ae9789 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/symbolic_shape_registry_util.h @@ -0,0 +1,12 @@ +#pragma once +// This file is temporary until native_functions.yaml and derivatives.yaml are +// merged. Ideally this should all go into native_functions.yaml + +#include +#include + +namespace torch::jit { + +TORCH_API const OperatorMap& get_tensorexpr_elementwise_set(); + +} // namespace torch::jit diff --git a/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h new file mode 100644 index 0000000000000000000000000000000000000000..0be53d4ffeb28b910e1c3f9d3eb1115a7e527784 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/torch/include/torch/csrc/jit/runtime/vararg_functions.h @@ -0,0 +1,41 @@ +#pragma once +#include +#include +#include +#include +#include + +namespace torch::jit { + +void tupleUnpack(Stack& stack); + +void format(Stack& stack, size_t num_inputs); + +void einsum(Stack& stack, size_t num_inputs); + +void percentFormat(Stack& stack, size_t num_inputs); + +void listUnpack(Stack& stack, size_t num_outputs); + +void tupleConstruct(Stack& stack, size_t num_inputs); + +void namedTupleConstruct(Stack& stack, c10::TypePtr type, size_t num_inputs); + +void listConstruct(Stack& stack, const c10::Type& list_type, size_t num_inputs); + +void dictConstruct(Stack& stack, const c10::Type& type, size_t num_inputs); + +// as weak_ref will create a Object with a non-owning CompilationUnit reference, +// for use as a constant in the Graph to avoid a reference cycle +void createObject( + Stack& stack, + const at::ClassTypePtr& type, + bool as_weak_ref = false); + +void isinstance(Stack& stack, at::ArrayRef types); + +void tupleSlice(Stack& stack, size_t begin, size_t end); + +void dequantize(Stack& stack); + +} // namespace torch::jit