` ().
+
+ Example::
+
+ num = Word(nums).set_parse_action(lambda toks: int(toks[0]))
+ na = one_of("N/A NA").set_parse_action(replace_with(math.nan))
+ term = na | num
+
+ term[1, ...].parse_string("324 234 N/A 234") # -> [324, 234, nan, 234]
+ """
+ return lambda s, l, t: [repl_str]
+
+
+def remove_quotes(s, l, t):
+ """
+ Helper parse action for removing quotation marks from parsed
+ quoted strings.
+
+ Example::
+
+ # by default, quotation marks are included in parsed results
+ quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["'Now is the Winter of our Discontent'"]
+
+ # use remove_quotes to strip quotation marks from parsed results
+ quoted_string.set_parse_action(remove_quotes)
+ quoted_string.parse_string("'Now is the Winter of our Discontent'") # -> ["Now is the Winter of our Discontent"]
+ """
+ return t[0][1:-1]
+
+
+def with_attribute(*args, **attr_dict):
+ """
+ Helper to create a validating parse action to be used with start
+ tags created with :class:`make_xml_tags` or
+ :class:`make_html_tags`. Use ``with_attribute`` to qualify
+ a starting tag with a required attribute value, to avoid false
+ matches on common tags such as ```` or ````.
+
+ Call ``with_attribute`` with a series of attribute names and
+ values. Specify the list of filter attributes names and values as:
+
+ - keyword arguments, as in ``(align="right")``, or
+ - as an explicit dict with ``**`` operator, when an attribute
+ name is also a Python reserved word, as in ``**{"class":"Customer", "align":"right"}``
+ - a list of name-value tuples, as in ``(("ns1:class", "Customer"), ("ns2:align", "right"))``
+
+ For attribute names with a namespace prefix, you must use the second
+ form. Attribute names are matched insensitive to upper/lower case.
+
+ If just testing for ``class`` (with or without a namespace), use
+ :class:`with_class`.
+
+ To verify that the attribute exists, but without specifying a value,
+ pass ``with_attribute.ANY_VALUE`` as the value.
+
+ Example::
+
+ html = '''
+
+ Some text
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ this has no type
+
+ '''
+ div,div_end = make_html_tags("div")
+
+ # only match div tag having a type attribute with value "grid"
+ div_grid = div().set_parse_action(with_attribute(type="grid"))
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.search_string(html):
+ print(grid_header.body)
+
+ # construct a match with any div tag having a type attribute, regardless of the value
+ div_any_type = div().set_parse_action(with_attribute(type=with_attribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.search_string(html):
+ print(div_header.body)
+
+ prints::
+
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ if args:
+ attrs = args[:]
+ else:
+ attrs = attr_dict.items()
+ attrs = [(k, v) for k, v in attrs]
+
+ def pa(s, l, tokens):
+ for attrName, attrValue in attrs:
+ if attrName not in tokens:
+ raise ParseException(s, l, "no matching attribute " + attrName)
+ if attrValue != with_attribute.ANY_VALUE and tokens[attrName] != attrValue:
+ raise ParseException(
+ s,
+ l,
+ f"attribute {attrName!r} has value {tokens[attrName]!r}, must be {attrValue!r}",
+ )
+
+ return pa
+
+
+with_attribute.ANY_VALUE = object() # type: ignore [attr-defined]
+
+
+def with_class(classname, namespace=""):
+ """
+ Simplified version of :class:`with_attribute` when
+ matching on a div class - made difficult because ``class`` is
+ a reserved word in Python.
+
+ Example::
+
+ html = '''
+
+ Some text
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ this <div> has no class
+
+
+ '''
+ div,div_end = make_html_tags("div")
+ div_grid = div().set_parse_action(with_class("grid"))
+
+ grid_expr = div_grid + SkipTo(div | div_end)("body")
+ for grid_header in grid_expr.search_string(html):
+ print(grid_header.body)
+
+ div_any_type = div().set_parse_action(with_class(withAttribute.ANY_VALUE))
+ div_expr = div_any_type + SkipTo(div | div_end)("body")
+ for div_header in div_expr.search_string(html):
+ print(div_header.body)
+
+ prints::
+
+ 1 4 0 1 0
+
+ 1 4 0 1 0
+ 1,3 2,3 1,1
+ """
+ classattr = f"{namespace}:class" if namespace else "class"
+ return with_attribute(**{classattr: classname})
+
+
+# Compatibility synonyms
+# fmt: off
+replaceWith = replaced_by_pep8("replaceWith", replace_with)
+removeQuotes = replaced_by_pep8("removeQuotes", remove_quotes)
+withAttribute = replaced_by_pep8("withAttribute", with_attribute)
+withClass = replaced_by_pep8("withClass", with_class)
+matchOnlyAtCol = replaced_by_pep8("matchOnlyAtCol", match_only_at_col)
+# fmt: on
diff --git a/evalkit_tf437/lib/python3.10/site-packages/pyparsing/common.py b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..649aad009617467e066437eb816bd45d2928785a
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/common.py
@@ -0,0 +1,434 @@
+# common.py
+from .core import *
+from .helpers import DelimitedList, any_open_tag, any_close_tag
+from datetime import datetime
+
+
+# some other useful expressions - using lower-case class name since we are really using this as a namespace
+class pyparsing_common:
+ """Here are some common low-level expressions that may be useful in
+ jump-starting parser development:
+
+ - numeric forms (:class:`integers `, :class:`reals`,
+ :class:`scientific notation`)
+ - common :class:`programming identifiers`
+ - network addresses (:class:`MAC`,
+ :class:`IPv4`, :class:`IPv6`)
+ - ISO8601 :class:`dates` and
+ :class:`datetime`
+ - :class:`UUID`
+ - :class:`comma-separated list`
+ - :class:`url`
+
+ Parse actions:
+
+ - :class:`convert_to_integer`
+ - :class:`convert_to_float`
+ - :class:`convert_to_date`
+ - :class:`convert_to_datetime`
+ - :class:`strip_html_tags`
+ - :class:`upcase_tokens`
+ - :class:`downcase_tokens`
+
+ Example::
+
+ pyparsing_common.number.run_tests('''
+ # any int or real number, returned as the appropriate type
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.fnumber.run_tests('''
+ # any int or real number, returned as float
+ 100
+ -100
+ +100
+ 3.14159
+ 6.02e23
+ 1e-12
+ ''')
+
+ pyparsing_common.hex_integer.run_tests('''
+ # hex numbers
+ 100
+ FF
+ ''')
+
+ pyparsing_common.fraction.run_tests('''
+ # fractions
+ 1/2
+ -3/4
+ ''')
+
+ pyparsing_common.mixed_integer.run_tests('''
+ # mixed fractions
+ 1
+ 1/2
+ -3/4
+ 1-3/4
+ ''')
+
+ import uuid
+ pyparsing_common.uuid.set_parse_action(token_map(uuid.UUID))
+ pyparsing_common.uuid.run_tests('''
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ ''')
+
+ prints::
+
+ # any int or real number, returned as the appropriate type
+ 100
+ [100]
+
+ -100
+ [-100]
+
+ +100
+ [100]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # any int or real number, returned as float
+ 100
+ [100.0]
+
+ -100
+ [-100.0]
+
+ +100
+ [100.0]
+
+ 3.14159
+ [3.14159]
+
+ 6.02e23
+ [6.02e+23]
+
+ 1e-12
+ [1e-12]
+
+ # hex numbers
+ 100
+ [256]
+
+ FF
+ [255]
+
+ # fractions
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ # mixed fractions
+ 1
+ [1]
+
+ 1/2
+ [0.5]
+
+ -3/4
+ [-0.75]
+
+ 1-3/4
+ [1.75]
+
+ # uuid
+ 12345678-1234-5678-1234-567812345678
+ [UUID('12345678-1234-5678-1234-567812345678')]
+ """
+
+ convert_to_integer = token_map(int)
+ """
+ Parse action for converting parsed integers to Python int
+ """
+
+ convert_to_float = token_map(float)
+ """
+ Parse action for converting parsed numbers to Python float
+ """
+
+ integer = Word(nums).set_name("integer").set_parse_action(convert_to_integer)
+ """expression that parses an unsigned integer, returns an int"""
+
+ hex_integer = (
+ Word(hexnums).set_name("hex integer").set_parse_action(token_map(int, 16))
+ )
+ """expression that parses a hexadecimal integer, returns an int"""
+
+ signed_integer = (
+ Regex(r"[+-]?\d+")
+ .set_name("signed integer")
+ .set_parse_action(convert_to_integer)
+ )
+ """expression that parses an integer with optional leading sign, returns an int"""
+
+ fraction = (
+ signed_integer().set_parse_action(convert_to_float)
+ + "/"
+ + signed_integer().set_parse_action(convert_to_float)
+ ).set_name("fraction")
+ """fractional expression of an integer divided by an integer, returns a float"""
+ fraction.add_parse_action(lambda tt: tt[0] / tt[-1])
+
+ mixed_integer = (
+ fraction | signed_integer + Opt(Opt("-").suppress() + fraction)
+ ).set_name("fraction or mixed integer-fraction")
+ """mixed integer of the form 'integer - fraction', with optional leading integer, returns float"""
+ mixed_integer.add_parse_action(sum)
+
+ real = (
+ Regex(r"[+-]?(?:\d+\.\d*|\.\d+)")
+ .set_name("real number")
+ .set_parse_action(convert_to_float)
+ )
+ """expression that parses a floating point number and returns a float"""
+
+ sci_real = (
+ Regex(r"[+-]?(?:\d+(?:[eE][+-]?\d+)|(?:\d+\.\d*|\.\d+)(?:[eE][+-]?\d+)?)")
+ .set_name("real number with scientific notation")
+ .set_parse_action(convert_to_float)
+ )
+ """expression that parses a floating point number with optional
+ scientific notation and returns a float"""
+
+ # streamlining this expression makes the docs nicer-looking
+ number = (sci_real | real | signed_integer).set_name("number").streamline()
+ """any numeric expression, returns the corresponding Python type"""
+
+ fnumber = (
+ Regex(r"[+-]?\d+\.?\d*([eE][+-]?\d+)?")
+ .set_name("fnumber")
+ .set_parse_action(convert_to_float)
+ )
+ """any int or real number, returned as float"""
+
+ ieee_float = (
+ Regex(r"(?i)[+-]?((\d+\.?\d*(e[+-]?\d+)?)|nan|inf(inity)?)")
+ .set_name("ieee_float")
+ .set_parse_action(convert_to_float)
+ )
+ """any floating-point literal (int, real number, infinity, or NaN), returned as float"""
+
+ identifier = Word(identchars, identbodychars).set_name("identifier")
+ """typical code identifier (leading alpha or '_', followed by 0 or more alphas, nums, or '_')"""
+
+ ipv4_address = Regex(
+ r"(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})(\.(25[0-5]|2[0-4][0-9]|1?[0-9]{1,2})){3}"
+ ).set_name("IPv4 address")
+ "IPv4 address (``0.0.0.0 - 255.255.255.255``)"
+
+ _ipv6_part = Regex(r"[0-9a-fA-F]{1,4}").set_name("hex_integer")
+ _full_ipv6_address = (_ipv6_part + (":" + _ipv6_part) * 7).set_name(
+ "full IPv6 address"
+ )
+ _short_ipv6_address = (
+ Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
+ + "::"
+ + Opt(_ipv6_part + (":" + _ipv6_part) * (0, 6))
+ ).set_name("short IPv6 address")
+ _short_ipv6_address.add_condition(
+ lambda t: sum(1 for tt in t if pyparsing_common._ipv6_part.matches(tt)) < 8
+ )
+ _mixed_ipv6_address = ("::ffff:" + ipv4_address).set_name("mixed IPv6 address")
+ ipv6_address = Combine(
+ (_full_ipv6_address | _mixed_ipv6_address | _short_ipv6_address).set_name(
+ "IPv6 address"
+ )
+ ).set_name("IPv6 address")
+ "IPv6 address (long, short, or mixed form)"
+
+ mac_address = Regex(
+ r"[0-9a-fA-F]{2}([:.-])[0-9a-fA-F]{2}(?:\1[0-9a-fA-F]{2}){4}"
+ ).set_name("MAC address")
+ "MAC address xx:xx:xx:xx:xx (may also have '-' or '.' delimiters)"
+
+ @staticmethod
+ def convert_to_date(fmt: str = "%Y-%m-%d"):
+ """
+ Helper to create a parse action for converting parsed date string to Python datetime.date
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%d"``)
+
+ Example::
+
+ date_expr = pyparsing_common.iso8601_date.copy()
+ date_expr.set_parse_action(pyparsing_common.convert_to_date())
+ print(date_expr.parse_string("1999-12-31"))
+
+ prints::
+
+ [datetime.date(1999, 12, 31)]
+ """
+
+ def cvt_fn(ss, ll, tt):
+ try:
+ return datetime.strptime(tt[0], fmt).date()
+ except ValueError as ve:
+ raise ParseException(ss, ll, str(ve))
+
+ return cvt_fn
+
+ @staticmethod
+ def convert_to_datetime(fmt: str = "%Y-%m-%dT%H:%M:%S.%f"):
+ """Helper to create a parse action for converting parsed
+ datetime string to Python datetime.datetime
+
+ Params -
+ - fmt - format to be passed to datetime.strptime (default= ``"%Y-%m-%dT%H:%M:%S.%f"``)
+
+ Example::
+
+ dt_expr = pyparsing_common.iso8601_datetime.copy()
+ dt_expr.set_parse_action(pyparsing_common.convert_to_datetime())
+ print(dt_expr.parse_string("1999-12-31T23:59:59.999"))
+
+ prints::
+
+ [datetime.datetime(1999, 12, 31, 23, 59, 59, 999000)]
+ """
+
+ def cvt_fn(s, l, t):
+ try:
+ return datetime.strptime(t[0], fmt)
+ except ValueError as ve:
+ raise ParseException(s, l, str(ve))
+
+ return cvt_fn
+
+ iso8601_date = Regex(
+ r"(?P\d{4})(?:-(?P\d\d)(?:-(?P\d\d))?)?"
+ ).set_name("ISO8601 date")
+ "ISO8601 date (``yyyy-mm-dd``)"
+
+ iso8601_datetime = Regex(
+ r"(?P\d{4})-(?P\d\d)-(?P\d\d)[T ](?P\d\d):(?P\d\d)(:(?P\d\d(\.\d*)?)?)?(?PZ|[+-]\d\d:?\d\d)?"
+ ).set_name("ISO8601 datetime")
+ "ISO8601 datetime (``yyyy-mm-ddThh:mm:ss.s(Z|+-00:00)``) - trailing seconds, milliseconds, and timezone optional; accepts separating ``'T'`` or ``' '``"
+
+ uuid = Regex(r"[0-9a-fA-F]{8}(-[0-9a-fA-F]{4}){3}-[0-9a-fA-F]{12}").set_name("UUID")
+ "UUID (``xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx``)"
+
+ _html_stripper = any_open_tag.suppress() | any_close_tag.suppress()
+
+ @staticmethod
+ def strip_html_tags(s: str, l: int, tokens: ParseResults):
+ """Parse action to remove HTML tags from web page HTML source
+
+ Example::
+
+ # strip HTML links from normal text
+ text = 'More info at the pyparsing wiki page | '
+ td, td_end = make_html_tags("TD")
+ table_text = td + SkipTo(td_end).set_parse_action(pyparsing_common.strip_html_tags)("body") + td_end
+ print(table_text.parse_string(text).body)
+
+ Prints::
+
+ More info at the pyparsing wiki page
+ """
+ return pyparsing_common._html_stripper.transform_string(tokens[0])
+
+ _commasepitem = (
+ Combine(
+ OneOrMore(
+ ~Literal(",")
+ + ~LineEnd()
+ + Word(printables, exclude_chars=",")
+ + Opt(White(" \t") + ~FollowedBy(LineEnd() | ","))
+ )
+ )
+ .streamline()
+ .set_name("commaItem")
+ )
+ comma_separated_list = DelimitedList(
+ Opt(quoted_string.copy() | _commasepitem, default="")
+ ).set_name("comma separated list")
+ """Predefined expression of 1 or more printable words or quoted strings, separated by commas."""
+
+ upcase_tokens = staticmethod(token_map(lambda t: t.upper()))
+ """Parse action to convert tokens to upper case."""
+
+ downcase_tokens = staticmethod(token_map(lambda t: t.lower()))
+ """Parse action to convert tokens to lower case."""
+
+ # fmt: off
+ url = Regex(
+ # https://mathiasbynens.be/demo/url-regex
+ # https://gist.github.com/dperini/729294
+ r"(?P" +
+ # protocol identifier (optional)
+ # short syntax // still required
+ r"(?:(?:(?Phttps?|ftp):)?\/\/)" +
+ # user:pass BasicAuth (optional)
+ r"(?:(?P\S+(?::\S*)?)@)?" +
+ r"(?P" +
+ # IP address exclusion
+ # private & local networks
+ r"(?!(?:10|127)(?:\.\d{1,3}){3})" +
+ r"(?!(?:169\.254|192\.168)(?:\.\d{1,3}){2})" +
+ r"(?!172\.(?:1[6-9]|2\d|3[0-1])(?:\.\d{1,3}){2})" +
+ # IP address dotted notation octets
+ # excludes loopback network 0.0.0.0
+ # excludes reserved space >= 224.0.0.0
+ # excludes network & broadcast addresses
+ # (first & last IP address of each class)
+ r"(?:[1-9]\d?|1\d\d|2[01]\d|22[0-3])" +
+ r"(?:\.(?:1?\d{1,2}|2[0-4]\d|25[0-5])){2}" +
+ r"(?:\.(?:[1-9]\d?|1\d\d|2[0-4]\d|25[0-4]))" +
+ r"|" +
+ # host & domain names, may end with dot
+ # can be replaced by a shortest alternative
+ # (?![-_])(?:[-\w\u00a1-\uffff]{0,63}[^-_]\.)+
+ r"(?:" +
+ r"(?:" +
+ r"[a-z0-9\u00a1-\uffff]" +
+ r"[a-z0-9\u00a1-\uffff_-]{0,62}" +
+ r")?" +
+ r"[a-z0-9\u00a1-\uffff]\." +
+ r")+" +
+ # TLD identifier name, may end with dot
+ r"(?:[a-z\u00a1-\uffff]{2,}\.?)" +
+ r")" +
+ # port number (optional)
+ r"(:(?P\d{2,5}))?" +
+ # resource path (optional)
+ r"(?P\/[^?# ]*)?" +
+ # query string (optional)
+ r"(\?(?P[^#]*))?" +
+ # fragment (optional)
+ r"(#(?P\S*))?" +
+ r")"
+ ).set_name("url")
+ """URL (http/https/ftp scheme)"""
+ # fmt: on
+
+ # pre-PEP8 compatibility names
+ # fmt: off
+ convertToInteger = staticmethod(replaced_by_pep8("convertToInteger", convert_to_integer))
+ convertToFloat = staticmethod(replaced_by_pep8("convertToFloat", convert_to_float))
+ convertToDate = staticmethod(replaced_by_pep8("convertToDate", convert_to_date))
+ convertToDatetime = staticmethod(replaced_by_pep8("convertToDatetime", convert_to_datetime))
+ stripHTMLTags = staticmethod(replaced_by_pep8("stripHTMLTags", strip_html_tags))
+ upcaseTokens = staticmethod(replaced_by_pep8("upcaseTokens", upcase_tokens))
+ downcaseTokens = staticmethod(replaced_by_pep8("downcaseTokens", downcase_tokens))
+ # fmt: on
+
+
+_builtin_exprs = [
+ v for v in vars(pyparsing_common).values() if isinstance(v, ParserElement)
+]
diff --git a/evalkit_tf437/lib/python3.10/site-packages/pyparsing/diagram/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/diagram/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..4387036c8498fe4f8a74d3673440f9206e131a32
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/diagram/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/pyparsing/exceptions.py b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/exceptions.py
new file mode 100644
index 0000000000000000000000000000000000000000..57a1579d121e46e407efe73f5cc827d29eb352e8
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/exceptions.py
@@ -0,0 +1,314 @@
+# exceptions.py
+from __future__ import annotations
+
+import copy
+import re
+import sys
+import typing
+from functools import cached_property
+
+from .unicode import pyparsing_unicode as ppu
+from .util import (
+ _collapse_string_to_ranges,
+ col,
+ line,
+ lineno,
+ replaced_by_pep8,
+)
+
+
+class _ExceptionWordUnicodeSet(
+ ppu.Latin1, ppu.LatinA, ppu.LatinB, ppu.Greek, ppu.Cyrillic
+):
+ pass
+
+
+_extract_alphanums = _collapse_string_to_ranges(_ExceptionWordUnicodeSet.alphanums)
+_exception_word_extractor = re.compile("([" + _extract_alphanums + "]{1,16})|.")
+
+
+class ParseBaseException(Exception):
+ """base exception class for all parsing runtime exceptions"""
+
+ loc: int
+ msg: str
+ pstr: str
+ parser_element: typing.Any # "ParserElement"
+ args: tuple[str, int, typing.Optional[str]]
+
+ __slots__ = (
+ "loc",
+ "msg",
+ "pstr",
+ "parser_element",
+ "args",
+ )
+
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__(
+ self,
+ pstr: str,
+ loc: int = 0,
+ msg: typing.Optional[str] = None,
+ elem=None,
+ ):
+ if msg is None:
+ msg, pstr = pstr, ""
+
+ self.loc = loc
+ self.msg = msg
+ self.pstr = pstr
+ self.parser_element = elem
+ self.args = (pstr, loc, msg)
+
+ @staticmethod
+ def explain_exception(exc: Exception, depth: int = 16) -> str:
+ """
+ Method to take an exception and translate the Python internal traceback into a list
+ of the pyparsing expressions that caused the exception to be raised.
+
+ Parameters:
+
+ - exc - exception raised during parsing (need not be a ParseException, in support
+ of Python exceptions that might be raised in a parse action)
+ - depth (default=16) - number of levels back in the stack trace to list expression
+ and function names; if None, the full stack trace names will be listed; if 0, only
+ the failing input line, marker, and exception string will be shown
+
+ Returns a multi-line string listing the ParserElements and/or function names in the
+ exception's stack trace.
+ """
+ import inspect
+ from .core import ParserElement
+
+ if depth is None:
+ depth = sys.getrecursionlimit()
+ ret: list[str] = []
+ if isinstance(exc, ParseBaseException):
+ ret.append(exc.line)
+ ret.append(f"{' ' * (exc.column - 1)}^")
+ ret.append(f"{type(exc).__name__}: {exc}")
+
+ if depth <= 0 or exc.__traceback__ is None:
+ return "\n".join(ret)
+
+ callers = inspect.getinnerframes(exc.__traceback__, context=depth)
+ seen: set[int] = set()
+ for ff in callers[-depth:]:
+ frm = ff[0]
+
+ f_self = frm.f_locals.get("self", None)
+ if isinstance(f_self, ParserElement):
+ if not frm.f_code.co_name.startswith(("parseImpl", "_parseNoCache")):
+ continue
+ if id(f_self) in seen:
+ continue
+ seen.add(id(f_self))
+
+ self_type = type(f_self)
+ ret.append(f"{self_type.__module__}.{self_type.__name__} - {f_self}")
+
+ elif f_self is not None:
+ self_type = type(f_self)
+ ret.append(f"{self_type.__module__}.{self_type.__name__}")
+
+ else:
+ code = frm.f_code
+ if code.co_name in ("wrapper", ""):
+ continue
+
+ ret.append(code.co_name)
+
+ depth -= 1
+ if not depth:
+ break
+
+ return "\n".join(ret)
+
+ @classmethod
+ def _from_exception(cls, pe) -> ParseBaseException:
+ """
+ internal factory method to simplify creating one type of ParseException
+ from another - avoids having __init__ signature conflicts among subclasses
+ """
+ return cls(pe.pstr, pe.loc, pe.msg, pe.parser_element)
+
+ @cached_property
+ def line(self) -> str:
+ """
+ Return the line of text where the exception occurred.
+ """
+ return line(self.loc, self.pstr)
+
+ @cached_property
+ def lineno(self) -> int:
+ """
+ Return the 1-based line number of text where the exception occurred.
+ """
+ return lineno(self.loc, self.pstr)
+
+ @cached_property
+ def col(self) -> int:
+ """
+ Return the 1-based column on the line of text where the exception occurred.
+ """
+ return col(self.loc, self.pstr)
+
+ @cached_property
+ def column(self) -> int:
+ """
+ Return the 1-based column on the line of text where the exception occurred.
+ """
+ return col(self.loc, self.pstr)
+
+ @cached_property
+ def found(self) -> str:
+ if not self.pstr:
+ return ""
+
+ if self.loc >= len(self.pstr):
+ return "end of text"
+
+ # pull out next word at error location
+ found_match = _exception_word_extractor.match(self.pstr, self.loc)
+ if found_match is not None:
+ found_text = found_match.group(0)
+ else:
+ found_text = self.pstr[self.loc : self.loc + 1]
+
+ return repr(found_text).replace(r"\\", "\\")
+
+ # pre-PEP8 compatibility
+ @property
+ def parserElement(self):
+ return self.parser_element
+
+ @parserElement.setter
+ def parserElement(self, elem):
+ self.parser_element = elem
+
+ def copy(self):
+ return copy.copy(self)
+
+ def formatted_message(self) -> str:
+ found_phrase = f", found {self.found}" if self.found else ""
+ return f"{self.msg}{found_phrase} (at char {self.loc}), (line:{self.lineno}, col:{self.column})"
+
+ def __str__(self) -> str:
+ return self.formatted_message()
+
+ def __repr__(self):
+ return str(self)
+
+ def mark_input_line(
+ self, marker_string: typing.Optional[str] = None, *, markerString: str = ">!<"
+ ) -> str:
+ """
+ Extracts the exception line from the input string, and marks
+ the location of the exception with a special symbol.
+ """
+ markerString = marker_string if marker_string is not None else markerString
+ line_str = self.line
+ line_column = self.column - 1
+ if markerString:
+ line_str = f"{line_str[:line_column]}{markerString}{line_str[line_column:]}"
+ return line_str.strip()
+
+ def explain(self, depth: int = 16) -> str:
+ """
+ Method to translate the Python internal traceback into a list
+ of the pyparsing expressions that caused the exception to be raised.
+
+ Parameters:
+
+ - depth (default=16) - number of levels back in the stack trace to list expression
+ and function names; if None, the full stack trace names will be listed; if 0, only
+ the failing input line, marker, and exception string will be shown
+
+ Returns a multi-line string listing the ParserElements and/or function names in the
+ exception's stack trace.
+
+ Example::
+
+ # an expression to parse 3 integers
+ expr = pp.Word(pp.nums) * 3
+ try:
+ # a failing parse - the third integer is prefixed with "A"
+ expr.parse_string("123 456 A789")
+ except pp.ParseException as pe:
+ print(pe.explain(depth=0))
+
+ prints::
+
+ 123 456 A789
+ ^
+ ParseException: Expected W:(0-9), found 'A' (at char 8), (line:1, col:9)
+
+ Note: the diagnostic output will include string representations of the expressions
+ that failed to parse. These representations will be more helpful if you use `set_name` to
+ give identifiable names to your expressions. Otherwise they will use the default string
+ forms, which may be cryptic to read.
+
+ Note: pyparsing's default truncation of exception tracebacks may also truncate the
+ stack of expressions that are displayed in the ``explain`` output. To get the full listing
+ of parser expressions, you may have to set ``ParserElement.verbose_stacktrace = True``
+ """
+ return self.explain_exception(self, depth)
+
+ # Compatibility synonyms
+ # fmt: off
+ markInputline = replaced_by_pep8("markInputline", mark_input_line)
+ # fmt: on
+
+
+class ParseException(ParseBaseException):
+ """
+ Exception thrown when a parse expression doesn't match the input string
+
+ Example::
+
+ integer = Word(nums).set_name("integer")
+ try:
+ integer.parse_string("ABC")
+ except ParseException as pe:
+ print(pe)
+ print(f"column: {pe.column}")
+
+ prints::
+
+ Expected integer (at char 0), (line:1, col:1) column: 1
+
+ """
+
+
+class ParseFatalException(ParseBaseException):
+ """
+ User-throwable exception thrown when inconsistent parse content
+ is found; stops all parsing immediately
+ """
+
+
+class ParseSyntaxException(ParseFatalException):
+ """
+ Just like :class:`ParseFatalException`, but thrown internally
+ when an :class:`ErrorStop` ('-' operator) indicates
+ that parsing is to stop immediately because an unbacktrackable
+ syntax error has been found.
+ """
+
+
+class RecursiveGrammarException(Exception):
+ """
+ Exception thrown by :class:`ParserElement.validate` if the
+ grammar could be left-recursive; parser may need to enable
+ left recursion using :class:`ParserElement.enable_left_recursion`
+
+ Deprecated: only used by deprecated method ParserElement.validate.
+ """
+
+ def __init__(self, parseElementList):
+ self.parseElementTrace = parseElementList
+
+ def __str__(self) -> str:
+ return f"RecursiveGrammarException: {self.parseElementTrace}"
diff --git a/evalkit_tf437/lib/python3.10/site-packages/pyparsing/helpers.py b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/helpers.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2bd05f3d39a6652ed8dfb44769cdc4bd5a17754
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/helpers.py
@@ -0,0 +1,1086 @@
+# helpers.py
+import html.entities
+import operator
+import re
+import sys
+import typing
+
+from . import __diag__
+from .core import *
+from .util import (
+ _bslash,
+ _flatten,
+ _escape_regex_range_chars,
+ make_compressed_re,
+ replaced_by_pep8,
+)
+
+
+#
+# global helpers
+#
+def counted_array(
+ expr: ParserElement,
+ int_expr: typing.Optional[ParserElement] = None,
+ *,
+ intExpr: typing.Optional[ParserElement] = None,
+) -> ParserElement:
+ """Helper to define a counted list of expressions.
+
+ This helper defines a pattern of the form::
+
+ integer expr expr expr...
+
+ where the leading integer tells how many expr expressions follow.
+ The matched tokens returns the array of expr tokens as a list - the
+ leading count token is suppressed.
+
+ If ``int_expr`` is specified, it should be a pyparsing expression
+ that produces an integer value.
+
+ Example::
+
+ counted_array(Word(alphas)).parse_string('2 ab cd ef') # -> ['ab', 'cd']
+
+ # in this parser, the leading integer value is given in binary,
+ # '10' indicating that 2 values are in the array
+ binary_constant = Word('01').set_parse_action(lambda t: int(t[0], 2))
+ counted_array(Word(alphas), int_expr=binary_constant).parse_string('10 ab cd ef') # -> ['ab', 'cd']
+
+ # if other fields must be parsed after the count but before the
+ # list items, give the fields results names and they will
+ # be preserved in the returned ParseResults:
+ count_with_metadata = integer + Word(alphas)("type")
+ typed_array = counted_array(Word(alphanums), int_expr=count_with_metadata)("items")
+ result = typed_array.parse_string("3 bool True True False")
+ print(result.dump())
+
+ # prints
+ # ['True', 'True', 'False']
+ # - items: ['True', 'True', 'False']
+ # - type: 'bool'
+ """
+ intExpr = intExpr or int_expr
+ array_expr = Forward()
+
+ def count_field_parse_action(s, l, t):
+ nonlocal array_expr
+ n = t[0]
+ array_expr <<= (expr * n) if n else Empty()
+ # clear list contents, but keep any named results
+ del t[:]
+
+ if intExpr is None:
+ intExpr = Word(nums).set_parse_action(lambda t: int(t[0]))
+ else:
+ intExpr = intExpr.copy()
+ intExpr.set_name("arrayLen")
+ intExpr.add_parse_action(count_field_parse_action, call_during_try=True)
+ return (intExpr + array_expr).set_name(f"(len) {expr}...")
+
+
+def match_previous_literal(expr: ParserElement) -> ParserElement:
+ """Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks for
+ a 'repeat' of a previous expression. For example::
+
+ first = Word(nums)
+ second = match_previous_literal(first)
+ match_expr = first + ":" + second
+
+ will match ``"1:1"``, but not ``"1:2"``. Because this
+ matches a previous literal, will also match the leading
+ ``"1:1"`` in ``"1:10"``. If this is not desired, use
+ :class:`match_previous_expr`. Do *not* use with packrat parsing
+ enabled.
+ """
+ rep = Forward()
+
+ def copy_token_to_repeater(s, l, t):
+ if not t:
+ rep << Empty()
+ return
+
+ if len(t) == 1:
+ rep << t[0]
+ return
+
+ # flatten t tokens
+ tflat = _flatten(t.as_list())
+ rep << And(Literal(tt) for tt in tflat)
+
+ expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
+ rep.set_name("(prev) " + str(expr))
+ return rep
+
+
+def match_previous_expr(expr: ParserElement) -> ParserElement:
+ """Helper to define an expression that is indirectly defined from
+ the tokens matched in a previous expression, that is, it looks for
+ a 'repeat' of a previous expression. For example::
+
+ first = Word(nums)
+ second = match_previous_expr(first)
+ match_expr = first + ":" + second
+
+ will match ``"1:1"``, but not ``"1:2"``. Because this
+ matches by expressions, will *not* match the leading ``"1:1"``
+ in ``"1:10"``; the expressions are evaluated first, and then
+ compared, so ``"1"`` is compared with ``"10"``. Do *not* use
+ with packrat parsing enabled.
+ """
+ rep = Forward()
+ e2 = expr.copy()
+ rep <<= e2
+
+ def copy_token_to_repeater(s, l, t):
+ matchTokens = _flatten(t.as_list())
+
+ def must_match_these_tokens(s, l, t):
+ theseTokens = _flatten(t.as_list())
+ if theseTokens != matchTokens:
+ raise ParseException(
+ s, l, f"Expected {matchTokens}, found{theseTokens}"
+ )
+
+ rep.set_parse_action(must_match_these_tokens, callDuringTry=True)
+
+ expr.add_parse_action(copy_token_to_repeater, callDuringTry=True)
+ rep.set_name("(prev) " + str(expr))
+ return rep
+
+
+def one_of(
+ strs: Union[typing.Iterable[str], str],
+ caseless: bool = False,
+ use_regex: bool = True,
+ as_keyword: bool = False,
+ *,
+ useRegex: bool = True,
+ asKeyword: bool = False,
+) -> ParserElement:
+ """Helper to quickly define a set of alternative :class:`Literal` s,
+ and makes sure to do longest-first testing when there is a conflict,
+ regardless of the input order, but returns
+ a :class:`MatchFirst` for best performance.
+
+ Parameters:
+
+ - ``strs`` - a string of space-delimited literals, or a collection of
+ string literals
+ - ``caseless`` - treat all literals as caseless - (default= ``False``)
+ - ``use_regex`` - as an optimization, will
+ generate a :class:`Regex` object; otherwise, will generate
+ a :class:`MatchFirst` object (if ``caseless=True`` or ``as_keyword=True``, or if
+ creating a :class:`Regex` raises an exception) - (default= ``True``)
+ - ``as_keyword`` - enforce :class:`Keyword`-style matching on the
+ generated expressions - (default= ``False``)
+ - ``asKeyword`` and ``useRegex`` are retained for pre-PEP8 compatibility,
+ but will be removed in a future release
+
+ Example::
+
+ comp_oper = one_of("< = > <= >= !=")
+ var = Word(alphas)
+ number = Word(nums)
+ term = var | number
+ comparison_expr = term + comp_oper + term
+ print(comparison_expr.search_string("B = 12 AA=23 B<=AA AA>12"))
+
+ prints::
+
+ [['B', '=', '12'], ['AA', '=', '23'], ['B', '<=', 'AA'], ['AA', '>', '12']]
+ """
+ asKeyword = asKeyword or as_keyword
+ useRegex = useRegex and use_regex
+
+ if (
+ isinstance(caseless, str_type)
+ and __diag__.warn_on_multiple_string_args_to_oneof
+ ):
+ warnings.warn(
+ "More than one string argument passed to one_of, pass"
+ " choices as a list or space-delimited string",
+ stacklevel=2,
+ )
+
+ if caseless:
+ is_equal = lambda a, b: a.upper() == b.upper()
+ masks = lambda a, b: b.upper().startswith(a.upper())
+ parse_element_class = CaselessKeyword if asKeyword else CaselessLiteral
+ else:
+ is_equal = operator.eq
+ masks = lambda a, b: b.startswith(a)
+ parse_element_class = Keyword if asKeyword else Literal
+
+ symbols: list[str]
+ if isinstance(strs, str_type):
+ strs = typing.cast(str, strs)
+ symbols = strs.split()
+ elif isinstance(strs, Iterable):
+ symbols = list(strs)
+ else:
+ raise TypeError("Invalid argument to one_of, expected string or iterable")
+ if not symbols:
+ return NoMatch()
+
+ # reorder given symbols to take care to avoid masking longer choices with shorter ones
+ # (but only if the given symbols are not just single characters)
+ i = 0
+ while i < len(symbols) - 1:
+ cur = symbols[i]
+ for j, other in enumerate(symbols[i + 1 :]):
+ if is_equal(other, cur):
+ del symbols[i + j + 1]
+ break
+ if len(other) > len(cur) and masks(cur, other):
+ del symbols[i + j + 1]
+ symbols.insert(i, other)
+ break
+ else:
+ i += 1
+
+ if useRegex:
+ re_flags: int = re.IGNORECASE if caseless else 0
+
+ try:
+ if all(len(sym) == 1 for sym in symbols):
+ # symbols are just single characters, create range regex pattern
+ patt = f"[{''.join(_escape_regex_range_chars(sym) for sym in symbols)}]"
+ else:
+ patt = "|".join(re.escape(sym) for sym in symbols)
+
+ # wrap with \b word break markers if defining as keywords
+ if asKeyword:
+ patt = rf"\b(?:{patt})\b"
+
+ ret = Regex(patt, flags=re_flags).set_name(" | ".join(symbols))
+
+ if caseless:
+ # add parse action to return symbols as specified, not in random
+ # casing as found in input string
+ symbol_map = {sym.lower(): sym for sym in symbols}
+ ret.add_parse_action(lambda s, l, t: symbol_map[t[0].lower()])
+
+ return ret
+
+ except re.error:
+ warnings.warn(
+ "Exception creating Regex for one_of, building MatchFirst", stacklevel=2
+ )
+
+ # last resort, just use MatchFirst
+ return MatchFirst(parse_element_class(sym) for sym in symbols).set_name(
+ " | ".join(symbols)
+ )
+
+
+def dict_of(key: ParserElement, value: ParserElement) -> ParserElement:
+ """Helper to easily and clearly define a dictionary by specifying
+ the respective patterns for the key and value. Takes care of
+ defining the :class:`Dict`, :class:`ZeroOrMore`, and
+ :class:`Group` tokens in the proper order. The key pattern
+ can include delimiting markers or punctuation, as long as they are
+ suppressed, thereby leaving the significant key text. The value
+ pattern can include named results, so that the :class:`Dict` results
+ can include named token fields.
+
+ Example::
+
+ text = "shape: SQUARE posn: upper left color: light blue texture: burlap"
+ attr_expr = (label + Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join))
+ print(attr_expr[1, ...].parse_string(text).dump())
+
+ attr_label = label
+ attr_value = Suppress(':') + OneOrMore(data_word, stop_on=label).set_parse_action(' '.join)
+
+ # similar to Dict, but simpler call format
+ result = dict_of(attr_label, attr_value).parse_string(text)
+ print(result.dump())
+ print(result['shape'])
+ print(result.shape) # object attribute access works too
+ print(result.as_dict())
+
+ prints::
+
+ [['shape', 'SQUARE'], ['posn', 'upper left'], ['color', 'light blue'], ['texture', 'burlap']]
+ - color: 'light blue'
+ - posn: 'upper left'
+ - shape: 'SQUARE'
+ - texture: 'burlap'
+ SQUARE
+ SQUARE
+ {'color': 'light blue', 'shape': 'SQUARE', 'posn': 'upper left', 'texture': 'burlap'}
+ """
+ return Dict(OneOrMore(Group(key + value)))
+
+
+def original_text_for(
+ expr: ParserElement, as_string: bool = True, *, asString: bool = True
+) -> ParserElement:
+ """Helper to return the original, untokenized text for a given
+ expression. Useful to restore the parsed fields of an HTML start
+ tag into the raw tag text itself, or to revert separate tokens with
+ intervening whitespace back to the original matching input text. By
+ default, returns a string containing the original parsed text.
+
+ If the optional ``as_string`` argument is passed as
+ ``False``, then the return value is
+ a :class:`ParseResults` containing any results names that
+ were originally matched, and a single token containing the original
+ matched text from the input string. So if the expression passed to
+ :class:`original_text_for` contains expressions with defined
+ results names, you must set ``as_string`` to ``False`` if you
+ want to preserve those results name values.
+
+ The ``asString`` pre-PEP8 argument is retained for compatibility,
+ but will be removed in a future release.
+
+ Example::
+
+ src = "this is test bold text normal text "
+ for tag in ("b", "i"):
+ opener, closer = make_html_tags(tag)
+ patt = original_text_for(opener + ... + closer)
+ print(patt.search_string(src)[0])
+
+ prints::
+
+ [' bold text ']
+ ['text']
+ """
+ asString = asString and as_string
+
+ locMarker = Empty().set_parse_action(lambda s, loc, t: loc)
+ endlocMarker = locMarker.copy()
+ endlocMarker.callPreparse = False
+ matchExpr = locMarker("_original_start") + expr + endlocMarker("_original_end")
+ if asString:
+ extractText = lambda s, l, t: s[t._original_start : t._original_end]
+ else:
+
+ def extractText(s, l, t):
+ t[:] = [s[t.pop("_original_start") : t.pop("_original_end")]]
+
+ matchExpr.set_parse_action(extractText)
+ matchExpr.ignoreExprs = expr.ignoreExprs
+ matchExpr.suppress_warning(Diagnostics.warn_ungrouped_named_tokens_in_collection)
+ return matchExpr
+
+
+def ungroup(expr: ParserElement) -> ParserElement:
+ """Helper to undo pyparsing's default grouping of And expressions,
+ even if all but one are non-empty.
+ """
+ return TokenConverter(expr).add_parse_action(lambda t: t[0])
+
+
+def locatedExpr(expr: ParserElement) -> ParserElement:
+ """
+ (DEPRECATED - future code should use the :class:`Located` class)
+ Helper to decorate a returned token with its starting and ending
+ locations in the input string.
+
+ This helper adds the following results names:
+
+ - ``locn_start`` - location where matched expression begins
+ - ``locn_end`` - location where matched expression ends
+ - ``value`` - the actual parsed results
+
+ Be careful if the input text contains ```` characters, you
+ may want to call :class:`ParserElement.parse_with_tabs`
+
+ Example::
+
+ wd = Word(alphas)
+ for match in locatedExpr(wd).search_string("ljsdf123lksdjjf123lkkjj1222"):
+ print(match)
+
+ prints::
+
+ [[0, 'ljsdf', 5]]
+ [[8, 'lksdjjf', 15]]
+ [[18, 'lkkjj', 23]]
+ """
+ locator = Empty().set_parse_action(lambda ss, ll, tt: ll)
+ return Group(
+ locator("locn_start")
+ + expr("value")
+ + locator.copy().leaveWhitespace()("locn_end")
+ )
+
+
+def nested_expr(
+ opener: Union[str, ParserElement] = "(",
+ closer: Union[str, ParserElement] = ")",
+ content: typing.Optional[ParserElement] = None,
+ ignore_expr: ParserElement = quoted_string(),
+ *,
+ ignoreExpr: ParserElement = quoted_string(),
+) -> ParserElement:
+ """Helper method for defining nested lists enclosed in opening and
+ closing delimiters (``"("`` and ``")"`` are the default).
+
+ Parameters:
+
+ - ``opener`` - opening character for a nested list
+ (default= ``"("``); can also be a pyparsing expression
+ - ``closer`` - closing character for a nested list
+ (default= ``")"``); can also be a pyparsing expression
+ - ``content`` - expression for items within the nested lists
+ (default= ``None``)
+ - ``ignore_expr`` - expression for ignoring opening and closing delimiters
+ (default= :class:`quoted_string`)
+ - ``ignoreExpr`` - this pre-PEP8 argument is retained for compatibility
+ but will be removed in a future release
+
+ If an expression is not provided for the content argument, the
+ nested expression will capture all whitespace-delimited content
+ between delimiters as a list of separate values.
+
+ Use the ``ignore_expr`` argument to define expressions that may
+ contain opening or closing characters that should not be treated as
+ opening or closing characters for nesting, such as quoted_string or
+ a comment expression. Specify multiple expressions using an
+ :class:`Or` or :class:`MatchFirst`. The default is
+ :class:`quoted_string`, but if no expressions are to be ignored, then
+ pass ``None`` for this argument.
+
+ Example::
+
+ data_type = one_of("void int short long char float double")
+ decl_data_type = Combine(data_type + Opt(Word('*')))
+ ident = Word(alphas+'_', alphanums+'_')
+ number = pyparsing_common.number
+ arg = Group(decl_data_type + ident)
+ LPAR, RPAR = map(Suppress, "()")
+
+ code_body = nested_expr('{', '}', ignore_expr=(quoted_string | c_style_comment))
+
+ c_function = (decl_data_type("type")
+ + ident("name")
+ + LPAR + Opt(DelimitedList(arg), [])("args") + RPAR
+ + code_body("body"))
+ c_function.ignore(c_style_comment)
+
+ source_code = '''
+ int is_odd(int x) {
+ return (x%2);
+ }
+
+ int dec_to_hex(char hchar) {
+ if (hchar >= '0' && hchar <= '9') {
+ return (ord(hchar)-ord('0'));
+ } else {
+ return (10+ord(hchar)-ord('A'));
+ }
+ }
+ '''
+ for func in c_function.search_string(source_code):
+ print("%(name)s (%(type)s) args: %(args)s" % func)
+
+
+ prints::
+
+ is_odd (int) args: [['int', 'x']]
+ dec_to_hex (int) args: [['char', 'hchar']]
+ """
+ if ignoreExpr != ignore_expr:
+ ignoreExpr = ignore_expr if ignoreExpr == quoted_string() else ignoreExpr
+ if opener == closer:
+ raise ValueError("opening and closing strings cannot be the same")
+ if content is None:
+ if isinstance(opener, str_type) and isinstance(closer, str_type):
+ opener = typing.cast(str, opener)
+ closer = typing.cast(str, closer)
+ if len(opener) == 1 and len(closer) == 1:
+ if ignoreExpr is not None:
+ content = Combine(
+ OneOrMore(
+ ~ignoreExpr
+ + CharsNotIn(
+ opener + closer + ParserElement.DEFAULT_WHITE_CHARS,
+ exact=1,
+ )
+ )
+ ).set_parse_action(lambda t: t[0].strip())
+ else:
+ content = empty.copy() + CharsNotIn(
+ opener + closer + ParserElement.DEFAULT_WHITE_CHARS
+ ).set_parse_action(lambda t: t[0].strip())
+ else:
+ if ignoreExpr is not None:
+ content = Combine(
+ OneOrMore(
+ ~ignoreExpr
+ + ~Literal(opener)
+ + ~Literal(closer)
+ + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
+ )
+ ).set_parse_action(lambda t: t[0].strip())
+ else:
+ content = Combine(
+ OneOrMore(
+ ~Literal(opener)
+ + ~Literal(closer)
+ + CharsNotIn(ParserElement.DEFAULT_WHITE_CHARS, exact=1)
+ )
+ ).set_parse_action(lambda t: t[0].strip())
+ else:
+ raise ValueError(
+ "opening and closing arguments must be strings if no content expression is given"
+ )
+ ret = Forward()
+ if ignoreExpr is not None:
+ ret <<= Group(
+ Suppress(opener) + ZeroOrMore(ignoreExpr | ret | content) + Suppress(closer)
+ )
+ else:
+ ret <<= Group(Suppress(opener) + ZeroOrMore(ret | content) + Suppress(closer))
+ ret.set_name(f"nested {opener}{closer} expression")
+ # don't override error message from content expressions
+ ret.errmsg = None
+ return ret
+
+
+def _makeTags(tagStr, xml, suppress_LT=Suppress("<"), suppress_GT=Suppress(">")):
+ """Internal helper to construct opening and closing tag expressions, given a tag name"""
+ if isinstance(tagStr, str_type):
+ resname = tagStr
+ tagStr = Keyword(tagStr, caseless=not xml)
+ else:
+ resname = tagStr.name
+
+ tagAttrName = Word(alphas, alphanums + "_-:")
+ if xml:
+ tagAttrValue = dbl_quoted_string.copy().set_parse_action(remove_quotes)
+ openTag = (
+ suppress_LT
+ + tagStr("tag")
+ + Dict(ZeroOrMore(Group(tagAttrName + Suppress("=") + tagAttrValue)))
+ + Opt("/", default=[False])("empty").set_parse_action(
+ lambda s, l, t: t[0] == "/"
+ )
+ + suppress_GT
+ )
+ else:
+ tagAttrValue = quoted_string.copy().set_parse_action(remove_quotes) | Word(
+ printables, exclude_chars=">"
+ )
+ openTag = (
+ suppress_LT
+ + tagStr("tag")
+ + Dict(
+ ZeroOrMore(
+ Group(
+ tagAttrName.set_parse_action(lambda t: t[0].lower())
+ + Opt(Suppress("=") + tagAttrValue)
+ )
+ )
+ )
+ + Opt("/", default=[False])("empty").set_parse_action(
+ lambda s, l, t: t[0] == "/"
+ )
+ + suppress_GT
+ )
+ closeTag = Combine(Literal("") + tagStr + ">", adjacent=False)
+
+ openTag.set_name(f"<{resname}>")
+ # add start results name in parse action now that ungrouped names are not reported at two levels
+ openTag.add_parse_action(
+ lambda t: t.__setitem__(
+ "start" + "".join(resname.replace(":", " ").title().split()), t.copy()
+ )
+ )
+ closeTag = closeTag(
+ "end" + "".join(resname.replace(":", " ").title().split())
+ ).set_name(f"{resname}>")
+ openTag.tag = resname
+ closeTag.tag = resname
+ openTag.tag_body = SkipTo(closeTag())
+ return openTag, closeTag
+
+
+def make_html_tags(
+ tag_str: Union[str, ParserElement]
+) -> tuple[ParserElement, ParserElement]:
+ """Helper to construct opening and closing tag expressions for HTML,
+ given a tag name. Matches tags in either upper or lower case,
+ attributes with namespaces and with quoted or unquoted values.
+
+ Example::
+
+ text = 'More info at the pyparsing wiki page | '
+ # make_html_tags returns pyparsing expressions for the opening and
+ # closing tags as a 2-tuple
+ a, a_end = make_html_tags("A")
+ link_expr = a + SkipTo(a_end)("link_text") + a_end
+
+ for link in link_expr.search_string(text):
+ # attributes in the tag (like "href" shown here) are
+ # also accessible as named results
+ print(link.link_text, '->', link.href)
+
+ prints::
+
+ pyparsing -> https://github.com/pyparsing/pyparsing/wiki
+ """
+ return _makeTags(tag_str, False)
+
+
+def make_xml_tags(
+ tag_str: Union[str, ParserElement]
+) -> tuple[ParserElement, ParserElement]:
+ """Helper to construct opening and closing tag expressions for XML,
+ given a tag name. Matches tags only in the given upper/lower case.
+
+ Example: similar to :class:`make_html_tags`
+ """
+ return _makeTags(tag_str, True)
+
+
+any_open_tag: ParserElement
+any_close_tag: ParserElement
+any_open_tag, any_close_tag = make_html_tags(
+ Word(alphas, alphanums + "_:").set_name("any tag")
+)
+
+_htmlEntityMap = {k.rstrip(";"): v for k, v in html.entities.html5.items()}
+_most_common_entities = "nbsp lt gt amp quot apos cent pound euro copy".replace(
+ " ", "|"
+)
+common_html_entity = Regex(
+ lambda: f"&(?P{_most_common_entities}|{make_compressed_re(_htmlEntityMap)});"
+).set_name("common HTML entity")
+
+
+def replace_html_entity(s, l, t):
+ """Helper parser action to replace common HTML entities with their special characters"""
+ return _htmlEntityMap.get(t.entity)
+
+
+class OpAssoc(Enum):
+ """Enumeration of operator associativity
+ - used in constructing InfixNotationOperatorSpec for :class:`infix_notation`"""
+
+ LEFT = 1
+ RIGHT = 2
+
+
+InfixNotationOperatorArgType = Union[
+ ParserElement, str, tuple[Union[ParserElement, str], Union[ParserElement, str]]
+]
+InfixNotationOperatorSpec = Union[
+ tuple[
+ InfixNotationOperatorArgType,
+ int,
+ OpAssoc,
+ typing.Optional[ParseAction],
+ ],
+ tuple[
+ InfixNotationOperatorArgType,
+ int,
+ OpAssoc,
+ ],
+]
+
+
+def infix_notation(
+ base_expr: ParserElement,
+ op_list: list[InfixNotationOperatorSpec],
+ lpar: Union[str, ParserElement] = Suppress("("),
+ rpar: Union[str, ParserElement] = Suppress(")"),
+) -> ParserElement:
+ """Helper method for constructing grammars of expressions made up of
+ operators working in a precedence hierarchy. Operators may be unary
+ or binary, left- or right-associative. Parse actions can also be
+ attached to operator expressions. The generated parser will also
+ recognize the use of parentheses to override operator precedences
+ (see example below).
+
+ Note: if you define a deep operator list, you may see performance
+ issues when using infix_notation. See
+ :class:`ParserElement.enable_packrat` for a mechanism to potentially
+ improve your parser performance.
+
+ Parameters:
+
+ - ``base_expr`` - expression representing the most basic operand to
+ be used in the expression
+ - ``op_list`` - list of tuples, one for each operator precedence level
+ in the expression grammar; each tuple is of the form ``(op_expr,
+ num_operands, right_left_assoc, (optional)parse_action)``, where:
+
+ - ``op_expr`` is the pyparsing expression for the operator; may also
+ be a string, which will be converted to a Literal; if ``num_operands``
+ is 3, ``op_expr`` is a tuple of two expressions, for the two
+ operators separating the 3 terms
+ - ``num_operands`` is the number of terms for this operator (must be 1,
+ 2, or 3)
+ - ``right_left_assoc`` is the indicator whether the operator is right
+ or left associative, using the pyparsing-defined constants
+ ``OpAssoc.RIGHT`` and ``OpAssoc.LEFT``.
+ - ``parse_action`` is the parse action to be associated with
+ expressions matching this operator expression (the parse action
+ tuple member may be omitted); if the parse action is passed
+ a tuple or list of functions, this is equivalent to calling
+ ``set_parse_action(*fn)``
+ (:class:`ParserElement.set_parse_action`)
+ - ``lpar`` - expression for matching left-parentheses; if passed as a
+ str, then will be parsed as ``Suppress(lpar)``. If lpar is passed as
+ an expression (such as ``Literal('(')``), then it will be kept in
+ the parsed results, and grouped with them. (default= ``Suppress('(')``)
+ - ``rpar`` - expression for matching right-parentheses; if passed as a
+ str, then will be parsed as ``Suppress(rpar)``. If rpar is passed as
+ an expression (such as ``Literal(')')``), then it will be kept in
+ the parsed results, and grouped with them. (default= ``Suppress(')')``)
+
+ Example::
+
+ # simple example of four-function arithmetic with ints and
+ # variable names
+ integer = pyparsing_common.signed_integer
+ varname = pyparsing_common.identifier
+
+ arith_expr = infix_notation(integer | varname,
+ [
+ ('-', 1, OpAssoc.RIGHT),
+ (one_of('* /'), 2, OpAssoc.LEFT),
+ (one_of('+ -'), 2, OpAssoc.LEFT),
+ ])
+
+ arith_expr.run_tests('''
+ 5+3*6
+ (5+3)*6
+ -2--11
+ ''', full_dump=False)
+
+ prints::
+
+ 5+3*6
+ [[5, '+', [3, '*', 6]]]
+
+ (5+3)*6
+ [[[5, '+', 3], '*', 6]]
+
+ (5+x)*y
+ [[[5, '+', 'x'], '*', 'y']]
+
+ -2--11
+ [[['-', 2], '-', ['-', 11]]]
+ """
+
+ # captive version of FollowedBy that does not do parse actions or capture results names
+ class _FB(FollowedBy):
+ def parseImpl(self, instring, loc, doActions=True):
+ self.expr.try_parse(instring, loc)
+ return loc, []
+
+ _FB.__name__ = "FollowedBy>"
+
+ ret = Forward()
+ if isinstance(lpar, str):
+ lpar = Suppress(lpar)
+ if isinstance(rpar, str):
+ rpar = Suppress(rpar)
+
+ # if lpar and rpar are not suppressed, wrap in group
+ if not (isinstance(lpar, Suppress) and isinstance(rpar, Suppress)):
+ lastExpr = base_expr | Group(lpar + ret + rpar).set_name(
+ f"nested_{base_expr.name}"
+ )
+ else:
+ lastExpr = base_expr | (lpar + ret + rpar).set_name(f"nested_{base_expr.name}")
+ root_expr = lastExpr
+
+ arity: int
+ rightLeftAssoc: opAssoc
+ pa: typing.Optional[ParseAction]
+ opExpr1: ParserElement
+ opExpr2: ParserElement
+ for operDef in op_list:
+ opExpr, arity, rightLeftAssoc, pa = (operDef + (None,))[:4] # type: ignore[assignment]
+ if isinstance(opExpr, str_type):
+ opExpr = ParserElement._literalStringClass(opExpr)
+ opExpr = typing.cast(ParserElement, opExpr)
+ if arity == 3:
+ if not isinstance(opExpr, (tuple, list)) or len(opExpr) != 2:
+ raise ValueError(
+ "if numterms=3, opExpr must be a tuple or list of two expressions"
+ )
+ opExpr1, opExpr2 = opExpr
+ term_name = f"{opExpr1}{opExpr2} term"
+ else:
+ term_name = f"{opExpr} term"
+
+ if not 1 <= arity <= 3:
+ raise ValueError("operator must be unary (1), binary (2), or ternary (3)")
+
+ if rightLeftAssoc not in (OpAssoc.LEFT, OpAssoc.RIGHT):
+ raise ValueError("operator must indicate right or left associativity")
+
+ thisExpr: ParserElement = Forward().set_name(term_name)
+ thisExpr = typing.cast(Forward, thisExpr)
+ if rightLeftAssoc is OpAssoc.LEFT:
+ if arity == 1:
+ matchExpr = _FB(lastExpr + opExpr) + Group(lastExpr + opExpr[1, ...])
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = _FB(lastExpr + opExpr + lastExpr) + Group(
+ lastExpr + (opExpr + lastExpr)[1, ...]
+ )
+ else:
+ matchExpr = _FB(lastExpr + lastExpr) + Group(lastExpr[2, ...])
+ elif arity == 3:
+ matchExpr = _FB(
+ lastExpr + opExpr1 + lastExpr + opExpr2 + lastExpr
+ ) + Group(lastExpr + OneOrMore(opExpr1 + lastExpr + opExpr2 + lastExpr))
+ elif rightLeftAssoc is OpAssoc.RIGHT:
+ if arity == 1:
+ # try to avoid LR with this extra test
+ if not isinstance(opExpr, Opt):
+ opExpr = Opt(opExpr)
+ matchExpr = _FB(opExpr.expr + thisExpr) + Group(opExpr + thisExpr)
+ elif arity == 2:
+ if opExpr is not None:
+ matchExpr = _FB(lastExpr + opExpr + thisExpr) + Group(
+ lastExpr + (opExpr + thisExpr)[1, ...]
+ )
+ else:
+ matchExpr = _FB(lastExpr + thisExpr) + Group(
+ lastExpr + thisExpr[1, ...]
+ )
+ elif arity == 3:
+ matchExpr = _FB(
+ lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr
+ ) + Group(lastExpr + opExpr1 + thisExpr + opExpr2 + thisExpr)
+ if pa:
+ if isinstance(pa, (tuple, list)):
+ matchExpr.set_parse_action(*pa)
+ else:
+ matchExpr.set_parse_action(pa)
+ thisExpr <<= (matchExpr | lastExpr).setName(term_name)
+ lastExpr = thisExpr
+ ret <<= lastExpr
+ root_expr.set_name("base_expr")
+ return ret
+
+
+def indentedBlock(blockStatementExpr, indentStack, indent=True, backup_stacks=[]):
+ """
+ (DEPRECATED - use :class:`IndentedBlock` class instead)
+ Helper method for defining space-delimited indentation blocks,
+ such as those used to define block statements in Python source code.
+
+ Parameters:
+
+ - ``blockStatementExpr`` - expression defining syntax of statement that
+ is repeated within the indented block
+ - ``indentStack`` - list created by caller to manage indentation stack
+ (multiple ``statementWithIndentedBlock`` expressions within a single
+ grammar should share a common ``indentStack``)
+ - ``indent`` - boolean indicating whether block must be indented beyond
+ the current level; set to ``False`` for block of left-most statements
+ (default= ``True``)
+
+ A valid block must contain at least one ``blockStatement``.
+
+ (Note that indentedBlock uses internal parse actions which make it
+ incompatible with packrat parsing.)
+
+ Example::
+
+ data = '''
+ def A(z):
+ A1
+ B = 100
+ G = A2
+ A2
+ A3
+ B
+ def BB(a,b,c):
+ BB1
+ def BBA():
+ bba1
+ bba2
+ bba3
+ C
+ D
+ def spam(x,y):
+ def eggs(z):
+ pass
+ '''
+
+
+ indentStack = [1]
+ stmt = Forward()
+
+ identifier = Word(alphas, alphanums)
+ funcDecl = ("def" + identifier + Group("(" + Opt(delimitedList(identifier)) + ")") + ":")
+ func_body = indentedBlock(stmt, indentStack)
+ funcDef = Group(funcDecl + func_body)
+
+ rvalue = Forward()
+ funcCall = Group(identifier + "(" + Opt(delimitedList(rvalue)) + ")")
+ rvalue << (funcCall | identifier | Word(nums))
+ assignment = Group(identifier + "=" + rvalue)
+ stmt << (funcDef | assignment | identifier)
+
+ module_body = stmt[1, ...]
+
+ parseTree = module_body.parseString(data)
+ parseTree.pprint()
+
+ prints::
+
+ [['def',
+ 'A',
+ ['(', 'z', ')'],
+ ':',
+ [['A1'], [['B', '=', '100']], [['G', '=', 'A2']], ['A2'], ['A3']]],
+ 'B',
+ ['def',
+ 'BB',
+ ['(', 'a', 'b', 'c', ')'],
+ ':',
+ [['BB1'], [['def', 'BBA', ['(', ')'], ':', [['bba1'], ['bba2'], ['bba3']]]]]],
+ 'C',
+ 'D',
+ ['def',
+ 'spam',
+ ['(', 'x', 'y', ')'],
+ ':',
+ [[['def', 'eggs', ['(', 'z', ')'], ':', [['pass']]]]]]]
+ """
+ backup_stacks.append(indentStack[:])
+
+ def reset_stack():
+ indentStack[:] = backup_stacks[-1]
+
+ def checkPeerIndent(s, l, t):
+ if l >= len(s):
+ return
+ curCol = col(l, s)
+ if curCol != indentStack[-1]:
+ if curCol > indentStack[-1]:
+ raise ParseException(s, l, "illegal nesting")
+ raise ParseException(s, l, "not a peer entry")
+
+ def checkSubIndent(s, l, t):
+ curCol = col(l, s)
+ if curCol > indentStack[-1]:
+ indentStack.append(curCol)
+ else:
+ raise ParseException(s, l, "not a subentry")
+
+ def checkUnindent(s, l, t):
+ if l >= len(s):
+ return
+ curCol = col(l, s)
+ if not (indentStack and curCol in indentStack):
+ raise ParseException(s, l, "not an unindent")
+ if curCol < indentStack[-1]:
+ indentStack.pop()
+
+ NL = OneOrMore(LineEnd().set_whitespace_chars("\t ").suppress())
+ INDENT = (Empty() + Empty().set_parse_action(checkSubIndent)).set_name("INDENT")
+ PEER = Empty().set_parse_action(checkPeerIndent).set_name("")
+ UNDENT = Empty().set_parse_action(checkUnindent).set_name("UNINDENT")
+ if indent:
+ smExpr = Group(
+ Opt(NL)
+ + INDENT
+ + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
+ + UNDENT
+ )
+ else:
+ smExpr = Group(
+ Opt(NL)
+ + OneOrMore(PEER + Group(blockStatementExpr) + Opt(NL))
+ + Opt(UNDENT)
+ )
+
+ # add a parse action to remove backup_stack from list of backups
+ smExpr.add_parse_action(
+ lambda: backup_stacks.pop(-1) and None if backup_stacks else None
+ )
+ smExpr.set_fail_action(lambda a, b, c, d: reset_stack())
+ blockStatementExpr.ignore(_bslash + LineEnd())
+ return smExpr.set_name("indented block")
+
+
+# it's easy to get these comment structures wrong - they're very common, so may as well make them available
+c_style_comment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/").set_name(
+ "C style comment"
+)
+"Comment of the form ``/* ... */``"
+
+html_comment = Regex(r"").set_name("HTML comment")
+"Comment of the form ````"
+
+rest_of_line = Regex(r".*").leave_whitespace().set_name("rest of line")
+dbl_slash_comment = Regex(r"//(?:\\\n|[^\n])*").set_name("// comment")
+"Comment of the form ``// ... (to end of line)``"
+
+cpp_style_comment = Combine(
+ Regex(r"/\*(?:[^*]|\*(?!/))*") + "*/" | dbl_slash_comment
+).set_name("C++ style comment")
+"Comment of either form :class:`c_style_comment` or :class:`dbl_slash_comment`"
+
+java_style_comment = cpp_style_comment
+"Same as :class:`cpp_style_comment`"
+
+python_style_comment = Regex(r"#.*").set_name("Python style comment")
+"Comment of the form ``# ... (to end of line)``"
+
+
+# build list of built-in expressions, for future reference if a global default value
+# gets updated
+_builtin_exprs: list[ParserElement] = [
+ v for v in vars().values() if isinstance(v, ParserElement)
+]
+
+
+# compatibility function, superseded by DelimitedList class
+def delimited_list(
+ expr: Union[str, ParserElement],
+ delim: Union[str, ParserElement] = ",",
+ combine: bool = False,
+ min: typing.Optional[int] = None,
+ max: typing.Optional[int] = None,
+ *,
+ allow_trailing_delim: bool = False,
+) -> ParserElement:
+ """(DEPRECATED - use :class:`DelimitedList` class)"""
+ return DelimitedList(
+ expr, delim, combine, min, max, allow_trailing_delim=allow_trailing_delim
+ )
+
+
+# Compatibility synonyms
+# fmt: off
+opAssoc = OpAssoc
+anyOpenTag = any_open_tag
+anyCloseTag = any_close_tag
+commonHTMLEntity = common_html_entity
+cStyleComment = c_style_comment
+htmlComment = html_comment
+restOfLine = rest_of_line
+dblSlashComment = dbl_slash_comment
+cppStyleComment = cpp_style_comment
+javaStyleComment = java_style_comment
+pythonStyleComment = python_style_comment
+delimitedList = replaced_by_pep8("delimitedList", DelimitedList)
+delimited_list = replaced_by_pep8("delimited_list", DelimitedList)
+countedArray = replaced_by_pep8("countedArray", counted_array)
+matchPreviousLiteral = replaced_by_pep8("matchPreviousLiteral", match_previous_literal)
+matchPreviousExpr = replaced_by_pep8("matchPreviousExpr", match_previous_expr)
+oneOf = replaced_by_pep8("oneOf", one_of)
+dictOf = replaced_by_pep8("dictOf", dict_of)
+originalTextFor = replaced_by_pep8("originalTextFor", original_text_for)
+nestedExpr = replaced_by_pep8("nestedExpr", nested_expr)
+makeHTMLTags = replaced_by_pep8("makeHTMLTags", make_html_tags)
+makeXMLTags = replaced_by_pep8("makeXMLTags", make_xml_tags)
+replaceHTMLEntity = replaced_by_pep8("replaceHTMLEntity", replace_html_entity)
+infixNotation = replaced_by_pep8("infixNotation", infix_notation)
+# fmt: on
diff --git a/evalkit_tf437/lib/python3.10/site-packages/pyparsing/py.typed b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_tf437/lib/python3.10/site-packages/pyparsing/results.py b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/results.py
new file mode 100644
index 0000000000000000000000000000000000000000..245847832a8f5a975a8c3d56d390385923508dcd
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/results.py
@@ -0,0 +1,815 @@
+# results.py
+from __future__ import annotations
+
+import collections
+from collections.abc import (
+ MutableMapping,
+ Mapping,
+ MutableSequence,
+ Iterator,
+ Iterable,
+)
+import pprint
+from typing import Any
+
+from .util import replaced_by_pep8
+
+
+str_type: tuple[type, ...] = (str, bytes)
+_generator_type = type((_ for _ in ()))
+
+
+class _ParseResultsWithOffset:
+ tup: tuple[ParseResults, int]
+ __slots__ = ["tup"]
+
+ def __init__(self, p1: ParseResults, p2: int):
+ self.tup: tuple[ParseResults, int] = (p1, p2)
+
+ def __getitem__(self, i):
+ return self.tup[i]
+
+ def __getstate__(self):
+ return self.tup
+
+ def __setstate__(self, *args):
+ self.tup = args[0]
+
+
+class ParseResults:
+ """Structured parse results, to provide multiple means of access to
+ the parsed data:
+
+ - as a list (``len(results)``)
+ - by list index (``results[0], results[1]``, etc.)
+ - by attribute (``results.`` - see :class:`ParserElement.set_results_name`)
+
+ Example::
+
+ integer = Word(nums)
+ date_str = (integer.set_results_name("year") + '/'
+ + integer.set_results_name("month") + '/'
+ + integer.set_results_name("day"))
+ # equivalent form:
+ # date_str = (integer("year") + '/'
+ # + integer("month") + '/'
+ # + integer("day"))
+
+ # parse_string returns a ParseResults object
+ result = date_str.parse_string("1999/12/31")
+
+ def test(s, fn=repr):
+ print(f"{s} -> {fn(eval(s))}")
+ test("list(result)")
+ test("result[0]")
+ test("result['month']")
+ test("result.day")
+ test("'month' in result")
+ test("'minutes' in result")
+ test("result.dump()", str)
+
+ prints::
+
+ list(result) -> ['1999', '/', '12', '/', '31']
+ result[0] -> '1999'
+ result['month'] -> '12'
+ result.day -> '31'
+ 'month' in result -> True
+ 'minutes' in result -> False
+ result.dump() -> ['1999', '/', '12', '/', '31']
+ - day: '31'
+ - month: '12'
+ - year: '1999'
+ """
+
+ _null_values: tuple[Any, ...] = (None, [], ())
+
+ _name: str
+ _parent: ParseResults
+ _all_names: set[str]
+ _modal: bool
+ _toklist: list[Any]
+ _tokdict: dict[str, Any]
+
+ __slots__ = (
+ "_name",
+ "_parent",
+ "_all_names",
+ "_modal",
+ "_toklist",
+ "_tokdict",
+ )
+
+ class List(list):
+ """
+ Simple wrapper class to distinguish parsed list results that should be preserved
+ as actual Python lists, instead of being converted to :class:`ParseResults`::
+
+ LBRACK, RBRACK = map(pp.Suppress, "[]")
+ element = pp.Forward()
+ item = ppc.integer
+ element_list = LBRACK + pp.DelimitedList(element) + RBRACK
+
+ # add parse actions to convert from ParseResults to actual Python collection types
+ def as_python_list(t):
+ return pp.ParseResults.List(t.as_list())
+ element_list.add_parse_action(as_python_list)
+
+ element <<= item | element_list
+
+ element.run_tests('''
+ 100
+ [2,3,4]
+ [[2, 1],3,4]
+ [(2, 1),3,4]
+ (2,3,4)
+ ''', post_parse=lambda s, r: (r[0], type(r[0])))
+
+ prints::
+
+ 100
+ (100, )
+
+ [2,3,4]
+ ([2, 3, 4], )
+
+ [[2, 1],3,4]
+ ([[2, 1], 3, 4], )
+
+ (Used internally by :class:`Group` when `aslist=True`.)
+ """
+
+ def __new__(cls, contained=None):
+ if contained is None:
+ contained = []
+
+ if not isinstance(contained, list):
+ raise TypeError(
+ f"{cls.__name__} may only be constructed with a list, not {type(contained).__name__}"
+ )
+
+ return list.__new__(cls)
+
+ def __new__(cls, toklist=None, name=None, **kwargs):
+ if isinstance(toklist, ParseResults):
+ return toklist
+ self = object.__new__(cls)
+ self._name = None
+ self._parent = None
+ self._all_names = set()
+
+ if toklist is None:
+ self._toklist = []
+ elif isinstance(toklist, (list, _generator_type)):
+ self._toklist = (
+ [toklist[:]]
+ if isinstance(toklist, ParseResults.List)
+ else list(toklist)
+ )
+ else:
+ self._toklist = [toklist]
+ self._tokdict = dict()
+ return self
+
+ # Performance tuning: we construct a *lot* of these, so keep this
+ # constructor as small and fast as possible
+ def __init__(
+ self, toklist=None, name=None, asList=True, modal=True, isinstance=isinstance
+ ) -> None:
+ self._tokdict: dict[str, _ParseResultsWithOffset]
+ self._modal = modal
+
+ if name is None or name == "":
+ return
+
+ if isinstance(name, int):
+ name = str(name)
+
+ if not modal:
+ self._all_names = {name}
+
+ self._name = name
+
+ if toklist in self._null_values:
+ return
+
+ if isinstance(toklist, (str_type, type)):
+ toklist = [toklist]
+
+ if asList:
+ if isinstance(toklist, ParseResults):
+ self[name] = _ParseResultsWithOffset(ParseResults(toklist._toklist), 0)
+ else:
+ self[name] = _ParseResultsWithOffset(ParseResults(toklist[0]), 0)
+ self[name]._name = name
+ return
+
+ try:
+ self[name] = toklist[0]
+ except (KeyError, TypeError, IndexError):
+ if toklist is not self:
+ self[name] = toklist
+ else:
+ self._name = name
+
+ def __getitem__(self, i):
+ if isinstance(i, (int, slice)):
+ return self._toklist[i]
+
+ if i not in self._all_names:
+ return self._tokdict[i][-1][0]
+
+ return ParseResults([v[0] for v in self._tokdict[i]])
+
+ def __setitem__(self, k, v, isinstance=isinstance):
+ if isinstance(v, _ParseResultsWithOffset):
+ self._tokdict[k] = self._tokdict.get(k, list()) + [v]
+ sub = v[0]
+ elif isinstance(k, (int, slice)):
+ self._toklist[k] = v
+ sub = v
+ else:
+ self._tokdict[k] = self._tokdict.get(k, []) + [
+ _ParseResultsWithOffset(v, 0)
+ ]
+ sub = v
+ if isinstance(sub, ParseResults):
+ sub._parent = self
+
+ def __delitem__(self, i):
+ if not isinstance(i, (int, slice)):
+ del self._tokdict[i]
+ return
+
+ mylen = len(self._toklist)
+ del self._toklist[i]
+
+ # convert int to slice
+ if isinstance(i, int):
+ if i < 0:
+ i += mylen
+ i = slice(i, i + 1)
+ # get removed indices
+ removed = list(range(*i.indices(mylen)))
+ removed.reverse()
+ # fixup indices in token dictionary
+ for occurrences in self._tokdict.values():
+ for j in removed:
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(
+ value, position - (position > j)
+ )
+
+ def __contains__(self, k) -> bool:
+ return k in self._tokdict
+
+ def __len__(self) -> int:
+ return len(self._toklist)
+
+ def __bool__(self) -> bool:
+ return not not (self._toklist or self._tokdict)
+
+ def __iter__(self) -> Iterator:
+ return iter(self._toklist)
+
+ def __reversed__(self) -> Iterator:
+ return iter(self._toklist[::-1])
+
+ def keys(self):
+ return iter(self._tokdict)
+
+ def values(self):
+ return (self[k] for k in self.keys())
+
+ def items(self):
+ return ((k, self[k]) for k in self.keys())
+
+ def haskeys(self) -> bool:
+ """
+ Since ``keys()`` returns an iterator, this method is helpful in bypassing
+ code that looks for the existence of any defined results names."""
+ return not not self._tokdict
+
+ def pop(self, *args, **kwargs):
+ """
+ Removes and returns item at specified index (default= ``last``).
+ Supports both ``list`` and ``dict`` semantics for ``pop()``. If
+ passed no argument or an integer argument, it will use ``list``
+ semantics and pop tokens from the list of parsed tokens. If passed
+ a non-integer argument (most likely a string), it will use ``dict``
+ semantics and pop the corresponding value from any defined results
+ names. A second default return value argument is supported, just as in
+ ``dict.pop()``.
+
+ Example::
+
+ numlist = Word(nums)[...]
+ print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
+
+ def remove_first(tokens):
+ tokens.pop(0)
+ numlist.add_parse_action(remove_first)
+ print(numlist.parse_string("0 123 321")) # -> ['123', '321']
+
+ label = Word(alphas)
+ patt = label("LABEL") + Word(nums)[1, ...]
+ print(patt.parse_string("AAB 123 321").dump())
+
+ # Use pop() in a parse action to remove named result (note that corresponding value is not
+ # removed from list form of results)
+ def remove_LABEL(tokens):
+ tokens.pop("LABEL")
+ return tokens
+ patt.add_parse_action(remove_LABEL)
+ print(patt.parse_string("AAB 123 321").dump())
+
+ prints::
+
+ ['AAB', '123', '321']
+ - LABEL: 'AAB'
+
+ ['AAB', '123', '321']
+ """
+ if not args:
+ args = [-1]
+ for k, v in kwargs.items():
+ if k == "default":
+ args = (args[0], v)
+ else:
+ raise TypeError(f"pop() got an unexpected keyword argument {k!r}")
+ if isinstance(args[0], int) or len(args) == 1 or args[0] in self:
+ index = args[0]
+ ret = self[index]
+ del self[index]
+ return ret
+ else:
+ defaultvalue = args[1]
+ return defaultvalue
+
+ def get(self, key, default_value=None):
+ """
+ Returns named result matching the given key, or if there is no
+ such name, then returns the given ``default_value`` or ``None`` if no
+ ``default_value`` is specified.
+
+ Similar to ``dict.get()``.
+
+ Example::
+
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parse_string("1999/12/31")
+ print(result.get("year")) # -> '1999'
+ print(result.get("hour", "not specified")) # -> 'not specified'
+ print(result.get("hour")) # -> None
+ """
+ if key in self:
+ return self[key]
+ else:
+ return default_value
+
+ def insert(self, index, ins_string):
+ """
+ Inserts new element at location index in the list of parsed tokens.
+
+ Similar to ``list.insert()``.
+
+ Example::
+
+ numlist = Word(nums)[...]
+ print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to insert the parse location in the front of the parsed results
+ def insert_locn(locn, tokens):
+ tokens.insert(0, locn)
+ numlist.add_parse_action(insert_locn)
+ print(numlist.parse_string("0 123 321")) # -> [0, '0', '123', '321']
+ """
+ self._toklist.insert(index, ins_string)
+ # fixup indices in token dictionary
+ for occurrences in self._tokdict.values():
+ for k, (value, position) in enumerate(occurrences):
+ occurrences[k] = _ParseResultsWithOffset(
+ value, position + (position > index)
+ )
+
+ def append(self, item):
+ """
+ Add single element to end of ``ParseResults`` list of elements.
+
+ Example::
+
+ numlist = Word(nums)[...]
+ print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321']
+
+ # use a parse action to compute the sum of the parsed integers, and add it to the end
+ def append_sum(tokens):
+ tokens.append(sum(map(int, tokens)))
+ numlist.add_parse_action(append_sum)
+ print(numlist.parse_string("0 123 321")) # -> ['0', '123', '321', 444]
+ """
+ self._toklist.append(item)
+
+ def extend(self, itemseq):
+ """
+ Add sequence of elements to end of ``ParseResults`` list of elements.
+
+ Example::
+
+ patt = Word(alphas)[1, ...]
+
+ # use a parse action to append the reverse of the matched strings, to make a palindrome
+ def make_palindrome(tokens):
+ tokens.extend(reversed([t[::-1] for t in tokens]))
+ return ''.join(tokens)
+ patt.add_parse_action(make_palindrome)
+ print(patt.parse_string("lskdj sdlkjf lksd")) # -> 'lskdjsdlkjflksddsklfjkldsjdksl'
+ """
+ if isinstance(itemseq, ParseResults):
+ self.__iadd__(itemseq)
+ else:
+ self._toklist.extend(itemseq)
+
+ def clear(self):
+ """
+ Clear all elements and results names.
+ """
+ del self._toklist[:]
+ self._tokdict.clear()
+
+ def __getattr__(self, name):
+ try:
+ return self[name]
+ except KeyError:
+ if name.startswith("__"):
+ raise AttributeError(name)
+ return ""
+
+ def __add__(self, other: ParseResults) -> ParseResults:
+ ret = self.copy()
+ ret += other
+ return ret
+
+ def __iadd__(self, other: ParseResults) -> ParseResults:
+ if not other:
+ return self
+
+ if other._tokdict:
+ offset = len(self._toklist)
+ addoffset = lambda a: offset if a < 0 else a + offset
+ otheritems = other._tokdict.items()
+ otherdictitems = [
+ (k, _ParseResultsWithOffset(v[0], addoffset(v[1])))
+ for k, vlist in otheritems
+ for v in vlist
+ ]
+ for k, v in otherdictitems:
+ self[k] = v
+ if isinstance(v[0], ParseResults):
+ v[0]._parent = self
+
+ self._toklist += other._toklist
+ self._all_names |= other._all_names
+ return self
+
+ def __radd__(self, other) -> ParseResults:
+ if isinstance(other, int) and other == 0:
+ # useful for merging many ParseResults using sum() builtin
+ return self.copy()
+ else:
+ # this may raise a TypeError - so be it
+ return other + self
+
+ def __repr__(self) -> str:
+ return f"{type(self).__name__}({self._toklist!r}, {self.as_dict()})"
+
+ def __str__(self) -> str:
+ return (
+ "["
+ + ", ".join(
+ [
+ str(i) if isinstance(i, ParseResults) else repr(i)
+ for i in self._toklist
+ ]
+ )
+ + "]"
+ )
+
+ def _asStringList(self, sep=""):
+ out = []
+ for item in self._toklist:
+ if out and sep:
+ out.append(sep)
+ if isinstance(item, ParseResults):
+ out += item._asStringList()
+ else:
+ out.append(str(item))
+ return out
+
+ def as_list(self, *, flatten: bool = False) -> list:
+ """
+ Returns the parse results as a nested list of matching tokens, all converted to strings.
+ If flatten is True, all the nesting levels in the returned list are collapsed.
+
+ Example::
+
+ patt = Word(alphas)[1, ...]
+ result = patt.parse_string("sldkj lsdkj sldkj")
+ # even though the result prints in string-like form, it is actually a pyparsing ParseResults
+ print(type(result), result) # -> ['sldkj', 'lsdkj', 'sldkj']
+
+ # Use as_list() to create an actual list
+ result_list = result.as_list()
+ print(type(result_list), result_list) # -> ['sldkj', 'lsdkj', 'sldkj']
+ """
+ def flattened(pr):
+ to_visit = collections.deque([*self])
+ while to_visit:
+ to_do = to_visit.popleft()
+ if isinstance(to_do, ParseResults):
+ to_visit.extendleft(to_do[::-1])
+ else:
+ yield to_do
+
+ if flatten:
+ return [*flattened(self)]
+ else:
+ return [
+ res.as_list() if isinstance(res, ParseResults) else res
+ for res in self._toklist
+ ]
+
+ def as_dict(self) -> dict:
+ """
+ Returns the named parse results as a nested dictionary.
+
+ Example::
+
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parse_string('12/31/1999')
+ print(type(result), repr(result)) # -> (['12', '/', '31', '/', '1999'], {'day': [('1999', 4)], 'year': [('12', 0)], 'month': [('31', 2)]})
+
+ result_dict = result.as_dict()
+ print(type(result_dict), repr(result_dict)) # -> {'day': '1999', 'year': '12', 'month': '31'}
+
+ # even though a ParseResults supports dict-like access, sometime you just need to have a dict
+ import json
+ print(json.dumps(result)) # -> Exception: TypeError: ... is not JSON serializable
+ print(json.dumps(result.as_dict())) # -> {"month": "31", "day": "1999", "year": "12"}
+ """
+
+ def to_item(obj):
+ if isinstance(obj, ParseResults):
+ return obj.as_dict() if obj.haskeys() else [to_item(v) for v in obj]
+ else:
+ return obj
+
+ return dict((k, to_item(v)) for k, v in self.items())
+
+ def copy(self) -> ParseResults:
+ """
+ Returns a new shallow copy of a :class:`ParseResults` object. `ParseResults`
+ items contained within the source are shared with the copy. Use
+ :class:`ParseResults.deepcopy()` to create a copy with its own separate
+ content values.
+ """
+ ret = ParseResults(self._toklist)
+ ret._tokdict = self._tokdict.copy()
+ ret._parent = self._parent
+ ret._all_names |= self._all_names
+ ret._name = self._name
+ return ret
+
+ def deepcopy(self) -> ParseResults:
+ """
+ Returns a new deep copy of a :class:`ParseResults` object.
+ """
+ ret = self.copy()
+ # replace values with copies if they are of known mutable types
+ for i, obj in enumerate(self._toklist):
+ if isinstance(obj, ParseResults):
+ ret._toklist[i] = obj.deepcopy()
+ elif isinstance(obj, (str, bytes)):
+ pass
+ elif isinstance(obj, MutableMapping):
+ ret._toklist[i] = dest = type(obj)()
+ for k, v in obj.items():
+ dest[k] = v.deepcopy() if isinstance(v, ParseResults) else v
+ elif isinstance(obj, Iterable):
+ ret._toklist[i] = type(obj)(
+ v.deepcopy() if isinstance(v, ParseResults) else v for v in obj # type: ignore[call-arg]
+ )
+ return ret
+
+ def get_name(self) -> str | None:
+ r"""
+ Returns the results name for this token expression. Useful when several
+ different expressions might match at a particular location.
+
+ Example::
+
+ integer = Word(nums)
+ ssn_expr = Regex(r"\d\d\d-\d\d-\d\d\d\d")
+ house_number_expr = Suppress('#') + Word(nums, alphanums)
+ user_data = (Group(house_number_expr)("house_number")
+ | Group(ssn_expr)("ssn")
+ | Group(integer)("age"))
+ user_info = user_data[1, ...]
+
+ result = user_info.parse_string("22 111-22-3333 #221B")
+ for item in result:
+ print(item.get_name(), ':', item[0])
+
+ prints::
+
+ age : 22
+ ssn : 111-22-3333
+ house_number : 221B
+ """
+ if self._name:
+ return self._name
+ elif self._parent:
+ par: ParseResults = self._parent
+ parent_tokdict_items = par._tokdict.items()
+ return next(
+ (
+ k
+ for k, vlist in parent_tokdict_items
+ for v, loc in vlist
+ if v is self
+ ),
+ None,
+ )
+ elif (
+ len(self) == 1
+ and len(self._tokdict) == 1
+ and next(iter(self._tokdict.values()))[0][1] in (0, -1)
+ ):
+ return next(iter(self._tokdict.keys()))
+ else:
+ return None
+
+ def dump(self, indent="", full=True, include_list=True, _depth=0) -> str:
+ """
+ Diagnostic method for listing out the contents of
+ a :class:`ParseResults`. Accepts an optional ``indent`` argument so
+ that this string can be embedded in a nested display of other data.
+
+ Example::
+
+ integer = Word(nums)
+ date_str = integer("year") + '/' + integer("month") + '/' + integer("day")
+
+ result = date_str.parse_string('1999/12/31')
+ print(result.dump())
+
+ prints::
+
+ ['1999', '/', '12', '/', '31']
+ - day: '31'
+ - month: '12'
+ - year: '1999'
+ """
+ out = []
+ NL = "\n"
+ out.append(indent + str(self.as_list()) if include_list else "")
+
+ if not full:
+ return "".join(out)
+
+ if self.haskeys():
+ items = sorted((str(k), v) for k, v in self.items())
+ for k, v in items:
+ if out:
+ out.append(NL)
+ out.append(f"{indent}{(' ' * _depth)}- {k}: ")
+ if not isinstance(v, ParseResults):
+ out.append(repr(v))
+ continue
+
+ if not v:
+ out.append(str(v))
+ continue
+
+ out.append(
+ v.dump(
+ indent=indent,
+ full=full,
+ include_list=include_list,
+ _depth=_depth + 1,
+ )
+ )
+ if not any(isinstance(vv, ParseResults) for vv in self):
+ return "".join(out)
+
+ v = self
+ incr = " "
+ nl = "\n"
+ for i, vv in enumerate(v):
+ if isinstance(vv, ParseResults):
+ vv_dump = vv.dump(
+ indent=indent,
+ full=full,
+ include_list=include_list,
+ _depth=_depth + 1,
+ )
+ out.append(
+ f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv_dump}"
+ )
+ else:
+ out.append(
+ f"{nl}{indent}{incr * _depth}[{i}]:{nl}{indent}{incr * (_depth + 1)}{vv}"
+ )
+
+ return "".join(out)
+
+ def pprint(self, *args, **kwargs):
+ """
+ Pretty-printer for parsed results as a list, using the
+ `pprint `_ module.
+ Accepts additional positional or keyword args as defined for
+ `pprint.pprint `_ .
+
+ Example::
+
+ ident = Word(alphas, alphanums)
+ num = Word(nums)
+ func = Forward()
+ term = ident | num | Group('(' + func + ')')
+ func <<= ident + Group(Optional(DelimitedList(term)))
+ result = func.parse_string("fna a,b,(fnb c,d,200),100")
+ result.pprint(width=40)
+
+ prints::
+
+ ['fna',
+ ['a',
+ 'b',
+ ['(', 'fnb', ['c', 'd', '200'], ')'],
+ '100']]
+ """
+ pprint.pprint(self.as_list(), *args, **kwargs)
+
+ # add support for pickle protocol
+ def __getstate__(self):
+ return (
+ self._toklist,
+ (
+ self._tokdict.copy(),
+ None,
+ self._all_names,
+ self._name,
+ ),
+ )
+
+ def __setstate__(self, state):
+ self._toklist, (self._tokdict, par, inAccumNames, self._name) = state
+ self._all_names = set(inAccumNames)
+ self._parent = None
+
+ def __getnewargs__(self):
+ return self._toklist, self._name
+
+ def __dir__(self):
+ return dir(type(self)) + list(self.keys())
+
+ @classmethod
+ def from_dict(cls, other, name=None) -> ParseResults:
+ """
+ Helper classmethod to construct a ``ParseResults`` from a ``dict``, preserving the
+ name-value relations as results names. If an optional ``name`` argument is
+ given, a nested ``ParseResults`` will be returned.
+ """
+
+ def is_iterable(obj):
+ try:
+ iter(obj)
+ except Exception:
+ return False
+ # str's are iterable, but in pyparsing, we don't want to iterate over them
+ else:
+ return not isinstance(obj, str_type)
+
+ ret = cls([])
+ for k, v in other.items():
+ if isinstance(v, Mapping):
+ ret += cls.from_dict(v, name=k)
+ else:
+ ret += cls([v], name=k, asList=is_iterable(v))
+ if name is not None:
+ ret = cls([ret], name=name)
+ return ret
+
+ asList = as_list
+ """Deprecated - use :class:`as_list`"""
+ asDict = as_dict
+ """Deprecated - use :class:`as_dict`"""
+ getName = get_name
+ """Deprecated - use :class:`get_name`"""
+
+
+MutableMapping.register(ParseResults)
+MutableSequence.register(ParseResults)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/pyparsing/testing.py b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..836b2f86fbeb0ad7c12f36996f4099bcf96e492a
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/testing.py
@@ -0,0 +1,362 @@
+# testing.py
+
+from contextlib import contextmanager
+import re
+import typing
+
+
+from .core import (
+ ParserElement,
+ ParseException,
+ Keyword,
+ __diag__,
+ __compat__,
+)
+
+
+class pyparsing_test:
+ """
+ namespace class for classes useful in writing unit tests
+ """
+
+ class reset_pyparsing_context:
+ """
+ Context manager to be used when writing unit tests that modify pyparsing config values:
+ - packrat parsing
+ - bounded recursion parsing
+ - default whitespace characters.
+ - default keyword characters
+ - literal string auto-conversion class
+ - __diag__ settings
+
+ Example::
+
+ with reset_pyparsing_context():
+ # test that literals used to construct a grammar are automatically suppressed
+ ParserElement.inlineLiteralsUsing(Suppress)
+
+ term = Word(alphas) | Word(nums)
+ group = Group('(' + term[...] + ')')
+
+ # assert that the '()' characters are not included in the parsed tokens
+ self.assertParseAndCheckList(group, "(abc 123 def)", ['abc', '123', 'def'])
+
+ # after exiting context manager, literals are converted to Literal expressions again
+ """
+
+ def __init__(self):
+ self._save_context = {}
+
+ def save(self):
+ self._save_context["default_whitespace"] = ParserElement.DEFAULT_WHITE_CHARS
+ self._save_context["default_keyword_chars"] = Keyword.DEFAULT_KEYWORD_CHARS
+
+ self._save_context["literal_string_class"] = (
+ ParserElement._literalStringClass
+ )
+
+ self._save_context["verbose_stacktrace"] = ParserElement.verbose_stacktrace
+
+ self._save_context["packrat_enabled"] = ParserElement._packratEnabled
+ if ParserElement._packratEnabled:
+ self._save_context["packrat_cache_size"] = (
+ ParserElement.packrat_cache.size
+ )
+ else:
+ self._save_context["packrat_cache_size"] = None
+ self._save_context["packrat_parse"] = ParserElement._parse
+ self._save_context["recursion_enabled"] = (
+ ParserElement._left_recursion_enabled
+ )
+
+ self._save_context["__diag__"] = {
+ name: getattr(__diag__, name) for name in __diag__._all_names
+ }
+
+ self._save_context["__compat__"] = {
+ "collect_all_And_tokens": __compat__.collect_all_And_tokens
+ }
+
+ return self
+
+ def restore(self):
+ # reset pyparsing global state
+ if (
+ ParserElement.DEFAULT_WHITE_CHARS
+ != self._save_context["default_whitespace"]
+ ):
+ ParserElement.set_default_whitespace_chars(
+ self._save_context["default_whitespace"]
+ )
+
+ ParserElement.verbose_stacktrace = self._save_context["verbose_stacktrace"]
+
+ Keyword.DEFAULT_KEYWORD_CHARS = self._save_context["default_keyword_chars"]
+ ParserElement.inlineLiteralsUsing(
+ self._save_context["literal_string_class"]
+ )
+
+ for name, value in self._save_context["__diag__"].items():
+ (__diag__.enable if value else __diag__.disable)(name)
+
+ ParserElement._packratEnabled = False
+ if self._save_context["packrat_enabled"]:
+ ParserElement.enable_packrat(self._save_context["packrat_cache_size"])
+ else:
+ ParserElement._parse = self._save_context["packrat_parse"]
+ ParserElement._left_recursion_enabled = self._save_context[
+ "recursion_enabled"
+ ]
+
+ __compat__.collect_all_And_tokens = self._save_context["__compat__"]
+
+ return self
+
+ def copy(self):
+ ret = type(self)()
+ ret._save_context.update(self._save_context)
+ return ret
+
+ def __enter__(self):
+ return self.save()
+
+ def __exit__(self, *args):
+ self.restore()
+
+ class TestParseResultsAsserts:
+ """
+ A mixin class to add parse results assertion methods to normal unittest.TestCase classes.
+ """
+
+ def assertParseResultsEquals(
+ self, result, expected_list=None, expected_dict=None, msg=None
+ ):
+ """
+ Unit test assertion to compare a :class:`ParseResults` object with an optional ``expected_list``,
+ and compare any defined results names with an optional ``expected_dict``.
+ """
+ if expected_list is not None:
+ self.assertEqual(expected_list, result.as_list(), msg=msg)
+ if expected_dict is not None:
+ self.assertEqual(expected_dict, result.as_dict(), msg=msg)
+
+ def assertParseAndCheckList(
+ self, expr, test_string, expected_list, msg=None, verbose=True
+ ):
+ """
+ Convenience wrapper assert to test a parser element and input string, and assert that
+ the resulting ``ParseResults.asList()`` is equal to the ``expected_list``.
+ """
+ result = expr.parse_string(test_string, parse_all=True)
+ if verbose:
+ print(result.dump())
+ else:
+ print(result.as_list())
+ self.assertParseResultsEquals(result, expected_list=expected_list, msg=msg)
+
+ def assertParseAndCheckDict(
+ self, expr, test_string, expected_dict, msg=None, verbose=True
+ ):
+ """
+ Convenience wrapper assert to test a parser element and input string, and assert that
+ the resulting ``ParseResults.asDict()`` is equal to the ``expected_dict``.
+ """
+ result = expr.parse_string(test_string, parseAll=True)
+ if verbose:
+ print(result.dump())
+ else:
+ print(result.as_list())
+ self.assertParseResultsEquals(result, expected_dict=expected_dict, msg=msg)
+
+ def assertRunTestResults(
+ self, run_tests_report, expected_parse_results=None, msg=None
+ ):
+ """
+ Unit test assertion to evaluate output of ``ParserElement.runTests()``. If a list of
+ list-dict tuples is given as the ``expected_parse_results`` argument, then these are zipped
+ with the report tuples returned by ``runTests`` and evaluated using ``assertParseResultsEquals``.
+ Finally, asserts that the overall ``runTests()`` success value is ``True``.
+
+ :param run_tests_report: tuple(bool, [tuple(str, ParseResults or Exception)]) returned from runTests
+ :param expected_parse_results (optional): [tuple(str, list, dict, Exception)]
+ """
+ run_test_success, run_test_results = run_tests_report
+
+ if expected_parse_results is None:
+ self.assertTrue(
+ run_test_success, msg=msg if msg is not None else "failed runTests"
+ )
+ return
+
+ merged = [
+ (*rpt, expected)
+ for rpt, expected in zip(run_test_results, expected_parse_results)
+ ]
+ for test_string, result, expected in merged:
+ # expected should be a tuple containing a list and/or a dict or an exception,
+ # and optional failure message string
+ # an empty tuple will skip any result validation
+ fail_msg = next((exp for exp in expected if isinstance(exp, str)), None)
+ expected_exception = next(
+ (
+ exp
+ for exp in expected
+ if isinstance(exp, type) and issubclass(exp, Exception)
+ ),
+ None,
+ )
+ if expected_exception is not None:
+ with self.assertRaises(
+ expected_exception=expected_exception, msg=fail_msg or msg
+ ):
+ if isinstance(result, Exception):
+ raise result
+ else:
+ expected_list = next(
+ (exp for exp in expected if isinstance(exp, list)), None
+ )
+ expected_dict = next(
+ (exp for exp in expected if isinstance(exp, dict)), None
+ )
+ if (expected_list, expected_dict) != (None, None):
+ self.assertParseResultsEquals(
+ result,
+ expected_list=expected_list,
+ expected_dict=expected_dict,
+ msg=fail_msg or msg,
+ )
+ else:
+ # warning here maybe?
+ print(f"no validation for {test_string!r}")
+
+ # do this last, in case some specific test results can be reported instead
+ self.assertTrue(
+ run_test_success, msg=msg if msg is not None else "failed runTests"
+ )
+
+ @contextmanager
+ def assertRaisesParseException(
+ self, exc_type=ParseException, expected_msg=None, msg=None
+ ):
+ if expected_msg is not None:
+ if isinstance(expected_msg, str):
+ expected_msg = re.escape(expected_msg)
+ with self.assertRaisesRegex(exc_type, expected_msg, msg=msg) as ctx:
+ yield ctx
+
+ else:
+ with self.assertRaises(exc_type, msg=msg) as ctx:
+ yield ctx
+
+ @staticmethod
+ def with_line_numbers(
+ s: str,
+ start_line: typing.Optional[int] = None,
+ end_line: typing.Optional[int] = None,
+ expand_tabs: bool = True,
+ eol_mark: str = "|",
+ mark_spaces: typing.Optional[str] = None,
+ mark_control: typing.Optional[str] = None,
+ *,
+ indent: typing.Union[str, int] = "",
+ base_1: bool = True,
+ ) -> str:
+ """
+ Helpful method for debugging a parser - prints a string with line and column numbers.
+ (Line and column numbers are 1-based by default - if debugging a parse action,
+ pass base_1=False, to correspond to the loc value passed to the parse action.)
+
+ :param s: tuple(bool, str - string to be printed with line and column numbers
+ :param start_line: int - (optional) starting line number in s to print (default=1)
+ :param end_line: int - (optional) ending line number in s to print (default=len(s))
+ :param expand_tabs: bool - (optional) expand tabs to spaces, to match the pyparsing default
+ :param eol_mark: str - (optional) string to mark the end of lines, helps visualize trailing spaces (default="|")
+ :param mark_spaces: str - (optional) special character to display in place of spaces
+ :param mark_control: str - (optional) convert non-printing control characters to a placeholding
+ character; valid values:
+ - "unicode" - replaces control chars with Unicode symbols, such as "␍" and "␊"
+ - any single character string - replace control characters with given string
+ - None (default) - string is displayed as-is
+ :param indent: str | int - (optional) string to indent with line and column numbers; if an int
+ is passed, converted to " " * indent
+ :param base_1: bool - (optional) whether to label string using base 1; if False, string will be
+ labeled based at 0 (default=True)
+
+ :return: str - input string with leading line numbers and column number headers
+ """
+ if expand_tabs:
+ s = s.expandtabs()
+ if isinstance(indent, int):
+ indent = " " * indent
+ indent = indent.expandtabs()
+ if mark_control is not None:
+ mark_control = typing.cast(str, mark_control)
+ if mark_control == "unicode":
+ transtable_map = {
+ c: u for c, u in zip(range(0, 33), range(0x2400, 0x2433))
+ }
+ transtable_map[127] = 0x2421
+ tbl = str.maketrans(transtable_map)
+ eol_mark = ""
+ else:
+ ord_mark_control = ord(mark_control)
+ tbl = str.maketrans(
+ {c: ord_mark_control for c in list(range(0, 32)) + [127]}
+ )
+ s = s.translate(tbl)
+ if mark_spaces is not None and mark_spaces != " ":
+ if mark_spaces == "unicode":
+ tbl = str.maketrans({9: 0x2409, 32: 0x2423})
+ s = s.translate(tbl)
+ else:
+ s = s.replace(" ", mark_spaces)
+ if start_line is None:
+ start_line = 0
+ if end_line is None:
+ end_line = len(s)
+ end_line = min(end_line, len(s))
+ start_line = min(max(0, start_line), end_line)
+
+ if mark_control != "unicode":
+ s_lines = s.splitlines()[start_line - base_1 : end_line]
+ else:
+ s_lines = [
+ line + "␊" for line in s.split("␊")[start_line - base_1 : end_line]
+ ]
+ if not s_lines:
+ return ""
+
+ lineno_width = len(str(end_line))
+ max_line_len = max(len(line) for line in s_lines)
+ lead = indent + " " * (lineno_width + 1)
+ if max_line_len >= 99:
+ header0 = (
+ lead
+ + ("" if base_1 else " ")
+ + "".join(
+ f"{' ' * 99}{(i + 1) % 100}"
+ for i in range(1 if base_1 else 0, max(max_line_len // 100, 1))
+ )
+ + "\n"
+ )
+ else:
+ header0 = ""
+ header1 = (
+ ("" if base_1 else " ")
+ + lead
+ + "".join(f" {(i + 1) % 10}" for i in range(-(-max_line_len // 10)))
+ + "\n"
+ )
+ digits = "1234567890"
+ header2 = (
+ lead + ("" if base_1 else "0") + digits * (-(-max_line_len // 10)) + "\n"
+ )
+ return (
+ header1
+ + header2
+ + "\n".join(
+ f"{indent}{i:{lineno_width}d}:{line}{eol_mark}"
+ for i, line in enumerate(s_lines, start=start_line + base_1)
+ )
+ + "\n"
+ )
diff --git a/evalkit_tf437/lib/python3.10/site-packages/pyparsing/unicode.py b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/unicode.py
new file mode 100644
index 0000000000000000000000000000000000000000..066486c28eea020d420c2e90fdda76f69f1c9ead
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/pyparsing/unicode.py
@@ -0,0 +1,356 @@
+# unicode.py
+
+import sys
+from itertools import filterfalse
+from typing import Union
+
+
+class _lazyclassproperty:
+ def __init__(self, fn):
+ self.fn = fn
+ self.__doc__ = fn.__doc__
+ self.__name__ = fn.__name__
+
+ def __get__(self, obj, cls):
+ if cls is None:
+ cls = type(obj)
+ if not hasattr(cls, "_intern") or any(
+ cls._intern is getattr(superclass, "_intern", [])
+ for superclass in cls.__mro__[1:]
+ ):
+ cls._intern = {}
+ attrname = self.fn.__name__
+ if attrname not in cls._intern:
+ cls._intern[attrname] = self.fn(cls)
+ return cls._intern[attrname]
+
+
+UnicodeRangeList = list[Union[tuple[int, int], tuple[int]]]
+
+
+class unicode_set:
+ """
+ A set of Unicode characters, for language-specific strings for
+ ``alphas``, ``nums``, ``alphanums``, and ``printables``.
+ A unicode_set is defined by a list of ranges in the Unicode character
+ set, in a class attribute ``_ranges``. Ranges can be specified using
+ 2-tuples or a 1-tuple, such as::
+
+ _ranges = [
+ (0x0020, 0x007e),
+ (0x00a0, 0x00ff),
+ (0x0100,),
+ ]
+
+ Ranges are left- and right-inclusive. A 1-tuple of (x,) is treated as (x, x).
+
+ A unicode set can also be defined using multiple inheritance of other unicode sets::
+
+ class CJK(Chinese, Japanese, Korean):
+ pass
+ """
+
+ _ranges: UnicodeRangeList = []
+
+ @_lazyclassproperty
+ def _chars_for_ranges(cls) -> list[str]:
+ ret: list[int] = []
+ for cc in cls.__mro__: # type: ignore[attr-defined]
+ if cc is unicode_set:
+ break
+ for rr in getattr(cc, "_ranges", ()):
+ ret.extend(range(rr[0], rr[-1] + 1))
+ return sorted(chr(c) for c in set(ret))
+
+ @_lazyclassproperty
+ def printables(cls) -> str:
+ """all non-whitespace characters in this range"""
+ return "".join(filterfalse(str.isspace, cls._chars_for_ranges))
+
+ @_lazyclassproperty
+ def alphas(cls) -> str:
+ """all alphabetic characters in this range"""
+ return "".join(filter(str.isalpha, cls._chars_for_ranges))
+
+ @_lazyclassproperty
+ def nums(cls) -> str:
+ """all numeric digit characters in this range"""
+ return "".join(filter(str.isdigit, cls._chars_for_ranges))
+
+ @_lazyclassproperty
+ def alphanums(cls) -> str:
+ """all alphanumeric characters in this range"""
+ return cls.alphas + cls.nums
+
+ @_lazyclassproperty
+ def identchars(cls) -> str:
+ """all characters in this range that are valid identifier characters, plus underscore '_'"""
+ return "".join(
+ sorted(
+ set(filter(str.isidentifier, cls._chars_for_ranges))
+ | set(
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyzªµº"
+ "ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖØÙÚÛÜÝÞßàáâãäåæçèéêëìíîïðñòóôõöøùúûüýþÿ"
+ "_"
+ )
+ )
+ )
+
+ @_lazyclassproperty
+ def identbodychars(cls) -> str:
+ """
+ all characters in this range that are valid identifier body characters,
+ plus the digits 0-9, and · (Unicode MIDDLE DOT)
+ """
+ identifier_chars = set(
+ c for c in cls._chars_for_ranges if ("_" + c).isidentifier()
+ )
+ return "".join(
+ sorted(identifier_chars | set(cls.identchars) | set("0123456789·"))
+ )
+
+ @_lazyclassproperty
+ def identifier(cls):
+ """
+ a pyparsing Word expression for an identifier using this range's definitions for
+ identchars and identbodychars
+ """
+ from pyparsing import Word
+
+ return Word(cls.identchars, cls.identbodychars)
+
+
+class pyparsing_unicode(unicode_set):
+ """
+ A namespace class for defining common language unicode_sets.
+ """
+
+ # fmt: off
+
+ # define ranges in language character sets
+ _ranges: UnicodeRangeList = [
+ (0x0020, sys.maxunicode),
+ ]
+
+ class BasicMultilingualPlane(unicode_set):
+ """Unicode set for the Basic Multilingual Plane"""
+ _ranges: UnicodeRangeList = [
+ (0x0020, 0xFFFF),
+ ]
+
+ class Latin1(unicode_set):
+ """Unicode set for Latin-1 Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x0020, 0x007E),
+ (0x00A0, 0x00FF),
+ ]
+
+ class LatinA(unicode_set):
+ """Unicode set for Latin-A Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x0100, 0x017F),
+ ]
+
+ class LatinB(unicode_set):
+ """Unicode set for Latin-B Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x0180, 0x024F),
+ ]
+
+ class Greek(unicode_set):
+ """Unicode set for Greek Unicode Character Ranges"""
+ _ranges: UnicodeRangeList = [
+ (0x0342, 0x0345),
+ (0x0370, 0x0377),
+ (0x037A, 0x037F),
+ (0x0384, 0x038A),
+ (0x038C,),
+ (0x038E, 0x03A1),
+ (0x03A3, 0x03E1),
+ (0x03F0, 0x03FF),
+ (0x1D26, 0x1D2A),
+ (0x1D5E,),
+ (0x1D60,),
+ (0x1D66, 0x1D6A),
+ (0x1F00, 0x1F15),
+ (0x1F18, 0x1F1D),
+ (0x1F20, 0x1F45),
+ (0x1F48, 0x1F4D),
+ (0x1F50, 0x1F57),
+ (0x1F59,),
+ (0x1F5B,),
+ (0x1F5D,),
+ (0x1F5F, 0x1F7D),
+ (0x1F80, 0x1FB4),
+ (0x1FB6, 0x1FC4),
+ (0x1FC6, 0x1FD3),
+ (0x1FD6, 0x1FDB),
+ (0x1FDD, 0x1FEF),
+ (0x1FF2, 0x1FF4),
+ (0x1FF6, 0x1FFE),
+ (0x2129,),
+ (0x2719, 0x271A),
+ (0xAB65,),
+ (0x10140, 0x1018D),
+ (0x101A0,),
+ (0x1D200, 0x1D245),
+ (0x1F7A1, 0x1F7A7),
+ ]
+
+ class Cyrillic(unicode_set):
+ """Unicode set for Cyrillic Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x0400, 0x052F),
+ (0x1C80, 0x1C88),
+ (0x1D2B,),
+ (0x1D78,),
+ (0x2DE0, 0x2DFF),
+ (0xA640, 0xA672),
+ (0xA674, 0xA69F),
+ (0xFE2E, 0xFE2F),
+ ]
+
+ class Chinese(unicode_set):
+ """Unicode set for Chinese Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x2E80, 0x2E99),
+ (0x2E9B, 0x2EF3),
+ (0x31C0, 0x31E3),
+ (0x3400, 0x4DB5),
+ (0x4E00, 0x9FEF),
+ (0xA700, 0xA707),
+ (0xF900, 0xFA6D),
+ (0xFA70, 0xFAD9),
+ (0x16FE2, 0x16FE3),
+ (0x1F210, 0x1F212),
+ (0x1F214, 0x1F23B),
+ (0x1F240, 0x1F248),
+ (0x20000, 0x2A6D6),
+ (0x2A700, 0x2B734),
+ (0x2B740, 0x2B81D),
+ (0x2B820, 0x2CEA1),
+ (0x2CEB0, 0x2EBE0),
+ (0x2F800, 0x2FA1D),
+ ]
+
+ class Japanese(unicode_set):
+ """Unicode set for Japanese Unicode Character Range, combining Kanji, Hiragana, and Katakana ranges"""
+
+ class Kanji(unicode_set):
+ "Unicode set for Kanji Unicode Character Range"
+ _ranges: UnicodeRangeList = [
+ (0x4E00, 0x9FBF),
+ (0x3000, 0x303F),
+ ]
+
+ class Hiragana(unicode_set):
+ """Unicode set for Hiragana Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x3041, 0x3096),
+ (0x3099, 0x30A0),
+ (0x30FC,),
+ (0xFF70,),
+ (0x1B001,),
+ (0x1B150, 0x1B152),
+ (0x1F200,),
+ ]
+
+ class Katakana(unicode_set):
+ """Unicode set for Katakana Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x3099, 0x309C),
+ (0x30A0, 0x30FF),
+ (0x31F0, 0x31FF),
+ (0x32D0, 0x32FE),
+ (0xFF65, 0xFF9F),
+ (0x1B000,),
+ (0x1B164, 0x1B167),
+ (0x1F201, 0x1F202),
+ (0x1F213,),
+ ]
+
+ 漢字 = Kanji
+ カタカナ = Katakana
+ ひらがな = Hiragana
+
+ _ranges = (
+ Kanji._ranges
+ + Hiragana._ranges
+ + Katakana._ranges
+ )
+
+ class Hangul(unicode_set):
+ """Unicode set for Hangul (Korean) Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x1100, 0x11FF),
+ (0x302E, 0x302F),
+ (0x3131, 0x318E),
+ (0x3200, 0x321C),
+ (0x3260, 0x327B),
+ (0x327E,),
+ (0xA960, 0xA97C),
+ (0xAC00, 0xD7A3),
+ (0xD7B0, 0xD7C6),
+ (0xD7CB, 0xD7FB),
+ (0xFFA0, 0xFFBE),
+ (0xFFC2, 0xFFC7),
+ (0xFFCA, 0xFFCF),
+ (0xFFD2, 0xFFD7),
+ (0xFFDA, 0xFFDC),
+ ]
+
+ Korean = Hangul
+
+ class CJK(Chinese, Japanese, Hangul):
+ """Unicode set for combined Chinese, Japanese, and Korean (CJK) Unicode Character Range"""
+
+ class Thai(unicode_set):
+ """Unicode set for Thai Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x0E01, 0x0E3A),
+ (0x0E3F, 0x0E5B)
+ ]
+
+ class Arabic(unicode_set):
+ """Unicode set for Arabic Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x0600, 0x061B),
+ (0x061E, 0x06FF),
+ (0x0700, 0x077F),
+ ]
+
+ class Hebrew(unicode_set):
+ """Unicode set for Hebrew Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x0591, 0x05C7),
+ (0x05D0, 0x05EA),
+ (0x05EF, 0x05F4),
+ (0xFB1D, 0xFB36),
+ (0xFB38, 0xFB3C),
+ (0xFB3E,),
+ (0xFB40, 0xFB41),
+ (0xFB43, 0xFB44),
+ (0xFB46, 0xFB4F),
+ ]
+
+ class Devanagari(unicode_set):
+ """Unicode set for Devanagari Unicode Character Range"""
+ _ranges: UnicodeRangeList = [
+ (0x0900, 0x097F),
+ (0xA8E0, 0xA8FF)
+ ]
+
+ BMP = BasicMultilingualPlane
+
+ # add language identifiers using language Unicode
+ العربية = Arabic
+ 中文 = Chinese
+ кириллица = Cyrillic
+ Ελληνικά = Greek
+ עִברִית = Hebrew
+ 日本語 = Japanese
+ 한국어 = Korean
+ ไทย = Thai
+ देवनागरी = Devanagari
+
+ # fmt: on
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sniffio/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sniffio/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..63f2f19e409c7f4b3c6c064022e8f104227873aa
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sniffio/__init__.py
@@ -0,0 +1,17 @@
+"""Top-level package for sniffio."""
+
+__all__ = [
+ "current_async_library",
+ "AsyncLibraryNotFoundError",
+ "current_async_library_cvar",
+ "thread_local",
+]
+
+from ._version import __version__
+
+from ._impl import (
+ current_async_library,
+ AsyncLibraryNotFoundError,
+ current_async_library_cvar,
+ thread_local,
+)
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..7f58da2f2263dc48dae0b330b271d18318e5dbe5
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..3b475dbfc4a5890a04cf09a235c73b7e309b8fe1
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/_impl.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dc52f38c53e23e85b72175c6ac23c0561fd03606
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sniffio/__pycache__/_version.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sniffio/_impl.py b/evalkit_tf437/lib/python3.10/site-packages/sniffio/_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1a7bbf218ba985b87cd1d9b23da69222894c1dd
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sniffio/_impl.py
@@ -0,0 +1,95 @@
+from contextvars import ContextVar
+from typing import Optional
+import sys
+import threading
+
+current_async_library_cvar = ContextVar(
+ "current_async_library_cvar", default=None
+) # type: ContextVar[Optional[str]]
+
+
+class _ThreadLocal(threading.local):
+ # Since threading.local provides no explicit mechanism is for setting
+ # a default for a value, a custom class with a class attribute is used
+ # instead.
+ name = None # type: Optional[str]
+
+
+thread_local = _ThreadLocal()
+
+
+class AsyncLibraryNotFoundError(RuntimeError):
+ pass
+
+
+def current_async_library() -> str:
+ """Detect which async library is currently running.
+
+ The following libraries are currently supported:
+
+ ================ =========== ============================
+ Library Requires Magic string
+ ================ =========== ============================
+ **Trio** Trio v0.6+ ``"trio"``
+ **Curio** - ``"curio"``
+ **asyncio** ``"asyncio"``
+ **Trio-asyncio** v0.8.2+ ``"trio"`` or ``"asyncio"``,
+ depending on current mode
+ ================ =========== ============================
+
+ Returns:
+ A string like ``"trio"``.
+
+ Raises:
+ AsyncLibraryNotFoundError: if called from synchronous context,
+ or if the current async library was not recognized.
+
+ Examples:
+
+ .. code-block:: python3
+
+ from sniffio import current_async_library
+
+ async def generic_sleep(seconds):
+ library = current_async_library()
+ if library == "trio":
+ import trio
+ await trio.sleep(seconds)
+ elif library == "asyncio":
+ import asyncio
+ await asyncio.sleep(seconds)
+ # ... and so on ...
+ else:
+ raise RuntimeError(f"Unsupported library {library!r}")
+
+ """
+ value = thread_local.name
+ if value is not None:
+ return value
+
+ value = current_async_library_cvar.get()
+ if value is not None:
+ return value
+
+ # Need to sniff for asyncio
+ if "asyncio" in sys.modules:
+ import asyncio
+ try:
+ current_task = asyncio.current_task # type: ignore[attr-defined]
+ except AttributeError:
+ current_task = asyncio.Task.current_task # type: ignore[attr-defined]
+ try:
+ if current_task() is not None:
+ return "asyncio"
+ except RuntimeError:
+ pass
+
+ # Sniff for curio (for now)
+ if 'curio' in sys.modules:
+ from curio.meta import curio_running
+ if curio_running():
+ return 'curio'
+
+ raise AsyncLibraryNotFoundError(
+ "unknown async library, or not in async context"
+ )
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..26be1c4e147e98189ffc6a1532c9604ec75b4ca9
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/__pycache__/__init__.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py b/evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py
new file mode 100644
index 0000000000000000000000000000000000000000..02945a947527a9bfb396d0cd35ffee22eda665bb
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sniffio/_tests/test_sniffio.py
@@ -0,0 +1,84 @@
+import os
+import sys
+
+import pytest
+
+from .. import (
+ current_async_library, AsyncLibraryNotFoundError,
+ current_async_library_cvar, thread_local
+)
+
+
+def test_basics_cvar():
+ with pytest.raises(AsyncLibraryNotFoundError):
+ current_async_library()
+
+ token = current_async_library_cvar.set("generic-lib")
+ try:
+ assert current_async_library() == "generic-lib"
+ finally:
+ current_async_library_cvar.reset(token)
+
+ with pytest.raises(AsyncLibraryNotFoundError):
+ current_async_library()
+
+
+def test_basics_tlocal():
+ with pytest.raises(AsyncLibraryNotFoundError):
+ current_async_library()
+
+ old_name, thread_local.name = thread_local.name, "generic-lib"
+ try:
+ assert current_async_library() == "generic-lib"
+ finally:
+ thread_local.name = old_name
+
+ with pytest.raises(AsyncLibraryNotFoundError):
+ current_async_library()
+
+
+def test_asyncio():
+ import asyncio
+
+ with pytest.raises(AsyncLibraryNotFoundError):
+ current_async_library()
+
+ ran = []
+
+ async def this_is_asyncio():
+ assert current_async_library() == "asyncio"
+ # Call it a second time to exercise the caching logic
+ assert current_async_library() == "asyncio"
+ ran.append(True)
+
+ asyncio.run(this_is_asyncio())
+ assert ran == [True]
+
+ with pytest.raises(AsyncLibraryNotFoundError):
+ current_async_library()
+
+
+@pytest.mark.skipif(
+ sys.version_info >= (3, 12),
+ reason=
+ "curio broken on 3.12 (https://github.com/python-trio/sniffio/pull/42)",
+)
+def test_curio():
+ import curio
+
+ with pytest.raises(AsyncLibraryNotFoundError):
+ current_async_library()
+
+ ran = []
+
+ async def this_is_curio():
+ assert current_async_library() == "curio"
+ # Call it a second time to exercise the caching logic
+ assert current_async_library() == "curio"
+ ran.append(True)
+
+ curio.run(this_is_curio)
+ assert ran == [True]
+
+ with pytest.raises(AsyncLibraryNotFoundError):
+ current_async_library()
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sniffio/_version.py b/evalkit_tf437/lib/python3.10/site-packages/sniffio/_version.py
new file mode 100644
index 0000000000000000000000000000000000000000..0495d10545c9fd515ed51e890309d2b66e2c30bb
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/sniffio/_version.py
@@ -0,0 +1,3 @@
+# This file is imported from __init__.py and exec'd from setup.py
+
+__version__ = "1.3.1"
diff --git a/evalkit_tf437/lib/python3.10/site-packages/sniffio/py.typed b/evalkit_tf437/lib/python3.10/site-packages/sniffio/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e27ecf1a4511657bc1cb79991a2594644e371e1d
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/traitlets/__init__.py
@@ -0,0 +1,35 @@
+"""Traitlets Python configuration system"""
+from __future__ import annotations
+
+import typing as _t
+
+from . import traitlets
+from ._version import __version__, version_info
+from .traitlets import *
+from .utils.bunch import Bunch
+from .utils.decorators import signature_has_traits
+from .utils.importstring import import_item
+from .utils.warnings import warn
+
+__all__ = [
+ "traitlets",
+ "__version__",
+ "version_info",
+ "Bunch",
+ "signature_has_traits",
+ "import_item",
+ "Sentinel",
+]
+
+
+class Sentinel(traitlets.Sentinel): # type:ignore[name-defined, misc]
+ def __init__(self, *args: _t.Any, **kwargs: _t.Any) -> None:
+ super().__init__(*args, **kwargs)
+ warn(
+ """
+ Sentinel is not a public part of the traitlets API.
+ It was published by mistake, and may be removed in the future.
+ """,
+ DeprecationWarning,
+ stacklevel=2,
+ )
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/__pycache__/_version.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/traitlets/__pycache__/_version.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..69bcabd4c9b838cf713640d5ec691a3f5e100bd7
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/traitlets/__pycache__/_version.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/__pycache__/log.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/traitlets/__pycache__/log.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..841a92a10954107e53037353f96b487b3f24f65e
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/traitlets/__pycache__/log.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e51a4219a01873baa8820a0979636ff011064d76
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+from __future__ import annotations
+
+from .application import *
+from .configurable import *
+from .loader import Config
+
+__all__ = [ # noqa: F405
+ "Config",
+ "Application",
+ "ApplicationError",
+ "LevelFormatter",
+ "configurable",
+ "Configurable",
+ "ConfigurableError",
+ "MultipleInstanceError",
+ "LoggingConfigurable",
+ "SingletonConfigurable",
+]
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/__pycache__/application.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/__pycache__/application.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..bc7050be8e7a5a31a39dd230d6e64af607233053
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/__pycache__/application.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/__pycache__/argcomplete_config.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/__pycache__/argcomplete_config.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..e2785573544698b249ff848723043df33c4b4c50
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/__pycache__/argcomplete_config.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/application.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/application.py
new file mode 100644
index 0000000000000000000000000000000000000000..b01a11e518db0613bc2fa4a36701fe26a02a4043
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/application.py
@@ -0,0 +1,1129 @@
+"""A base class for a configurable application."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+from __future__ import annotations
+
+import functools
+import json
+import logging
+import os
+import pprint
+import re
+import sys
+import typing as t
+from collections import OrderedDict, defaultdict
+from contextlib import suppress
+from copy import deepcopy
+from logging.config import dictConfig
+from textwrap import dedent
+
+from traitlets.config.configurable import Configurable, SingletonConfigurable
+from traitlets.config.loader import (
+ ArgumentError,
+ Config,
+ ConfigFileNotFound,
+ DeferredConfigString,
+ JSONFileConfigLoader,
+ KVArgParseConfigLoader,
+ PyFileConfigLoader,
+)
+from traitlets.traitlets import (
+ Bool,
+ Dict,
+ Enum,
+ Instance,
+ List,
+ TraitError,
+ Unicode,
+ default,
+ observe,
+ observe_compat,
+)
+from traitlets.utils.bunch import Bunch
+from traitlets.utils.nested_update import nested_update
+from traitlets.utils.text import indent, wrap_paragraphs
+
+from ..utils import cast_unicode
+from ..utils.importstring import import_item
+
+# -----------------------------------------------------------------------------
+# Descriptions for the various sections
+# -----------------------------------------------------------------------------
+# merge flags&aliases into options
+option_description = """
+The options below are convenience aliases to configurable class-options,
+as listed in the "Equivalent to" description-line of the aliases.
+To see all configurable class-options for some , use:
+ --help-all
+""".strip() # trim newlines of front and back
+
+keyvalue_description = """
+The command-line option below sets the respective configurable class-parameter:
+ --Class.parameter=value
+This line is evaluated in Python, so simple expressions are allowed.
+For instance, to set `C.a=[0,1,2]`, you may type this:
+ --C.a='range(3)'
+""".strip() # trim newlines of front and back
+
+# sys.argv can be missing, for example when python is embedded. See the docs
+# for details: http://docs.python.org/2/c-api/intro.html#embedding-python
+if not hasattr(sys, "argv"):
+ sys.argv = [""]
+
+subcommand_description = """
+Subcommands are launched as `{app} cmd [args]`. For information on using
+subcommand 'cmd', do: `{app} cmd -h`.
+"""
+# get running program name
+
+# -----------------------------------------------------------------------------
+# Application class
+# -----------------------------------------------------------------------------
+
+
+_envvar = os.environ.get("TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR", "")
+if _envvar.lower() in {"1", "true"}:
+ TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR = True
+elif _envvar.lower() in {"0", "false", ""}:
+ TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR = False
+else:
+ raise ValueError(
+ "Unsupported value for environment variable: 'TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR' is set to '%s' which is none of {'0', '1', 'false', 'true', ''}."
+ % _envvar
+ )
+
+
+IS_PYTHONW = sys.executable and sys.executable.endswith("pythonw.exe")
+
+T = t.TypeVar("T", bound=t.Callable[..., t.Any])
+AnyLogger = t.Union[logging.Logger, "logging.LoggerAdapter[t.Any]"]
+StrDict = t.Dict[str, t.Any]
+ArgvType = t.Optional[t.List[str]]
+ClassesType = t.List[t.Type[Configurable]]
+
+
+def catch_config_error(method: T) -> T:
+ """Method decorator for catching invalid config (Trait/ArgumentErrors) during init.
+
+ On a TraitError (generally caused by bad config), this will print the trait's
+ message, and exit the app.
+
+ For use on init methods, to prevent invoking excepthook on invalid input.
+ """
+
+ @functools.wraps(method)
+ def inner(app: Application, *args: t.Any, **kwargs: t.Any) -> t.Any:
+ try:
+ return method(app, *args, **kwargs)
+ except (TraitError, ArgumentError) as e:
+ app.log.fatal("Bad config encountered during initialization: %s", e)
+ app.log.debug("Config at the time: %s", app.config)
+ app.exit(1)
+
+ return t.cast(T, inner)
+
+
+class ApplicationError(Exception):
+ pass
+
+
+class LevelFormatter(logging.Formatter):
+ """Formatter with additional `highlevel` record
+
+ This field is empty if log level is less than highlevel_limit,
+ otherwise it is formatted with self.highlevel_format.
+
+ Useful for adding 'WARNING' to warning messages,
+ without adding 'INFO' to info, etc.
+ """
+
+ highlevel_limit = logging.WARN
+ highlevel_format = " %(levelname)s |"
+
+ def format(self, record: logging.LogRecord) -> str:
+ if record.levelno >= self.highlevel_limit:
+ record.highlevel = self.highlevel_format % record.__dict__
+ else:
+ record.highlevel = ""
+ return super().format(record)
+
+
+class Application(SingletonConfigurable):
+ """A singleton application with full configuration support."""
+
+ # The name of the application, will usually match the name of the command
+ # line application
+ name: str | Unicode[str, str | bytes] = Unicode("application")
+
+ # The description of the application that is printed at the beginning
+ # of the help.
+ description: str | Unicode[str, str | bytes] = Unicode("This is an application.")
+ # default section descriptions
+ option_description: str | Unicode[str, str | bytes] = Unicode(option_description)
+ keyvalue_description: str | Unicode[str, str | bytes] = Unicode(keyvalue_description)
+ subcommand_description: str | Unicode[str, str | bytes] = Unicode(subcommand_description)
+
+ python_config_loader_class = PyFileConfigLoader
+ json_config_loader_class = JSONFileConfigLoader
+
+ # The usage and example string that goes at the end of the help string.
+ examples: str | Unicode[str, str | bytes] = Unicode()
+
+ # A sequence of Configurable subclasses whose config=True attributes will
+ # be exposed at the command line.
+ classes: ClassesType = []
+
+ def _classes_inc_parents(
+ self, classes: ClassesType | None = None
+ ) -> t.Generator[type[Configurable], None, None]:
+ """Iterate through configurable classes, including configurable parents
+
+ :param classes:
+ The list of classes to iterate; if not set, uses :attr:`classes`.
+
+ Children should always be after parents, and each class should only be
+ yielded once.
+ """
+ if classes is None:
+ classes = self.classes
+
+ seen = set()
+ for c in classes:
+ # We want to sort parents before children, so we reverse the MRO
+ for parent in reversed(c.mro()):
+ if issubclass(parent, Configurable) and (parent not in seen):
+ seen.add(parent)
+ yield parent
+
+ # The version string of this application.
+ version: str | Unicode[str, str | bytes] = Unicode("0.0")
+
+ # the argv used to initialize the application
+ argv: list[str] | List[str] = List()
+
+ # Whether failing to load config files should prevent startup
+ raise_config_file_errors = Bool(TRAITLETS_APPLICATION_RAISE_CONFIG_FILE_ERROR)
+
+ # The log level for the application
+ log_level = Enum(
+ (0, 10, 20, 30, 40, 50, "DEBUG", "INFO", "WARN", "ERROR", "CRITICAL"),
+ default_value=logging.WARN,
+ help="Set the log level by value or name.",
+ ).tag(config=True)
+
+ _log_formatter_cls = LevelFormatter
+
+ log_datefmt = Unicode(
+ "%Y-%m-%d %H:%M:%S", help="The date format used by logging formatters for %(asctime)s"
+ ).tag(config=True)
+
+ log_format = Unicode(
+ "[%(name)s]%(highlevel)s %(message)s",
+ help="The Logging format template",
+ ).tag(config=True)
+
+ def get_default_logging_config(self) -> StrDict:
+ """Return the base logging configuration.
+
+ The default is to log to stderr using a StreamHandler, if no default
+ handler already exists.
+
+ The log handler level starts at logging.WARN, but this can be adjusted
+ by setting the ``log_level`` attribute.
+
+ The ``logging_config`` trait is merged into this allowing for finer
+ control of logging.
+
+ """
+ config: StrDict = {
+ "version": 1,
+ "handlers": {
+ "console": {
+ "class": "logging.StreamHandler",
+ "formatter": "console",
+ "level": logging.getLevelName(self.log_level), # type:ignore[arg-type]
+ "stream": "ext://sys.stderr",
+ },
+ },
+ "formatters": {
+ "console": {
+ "class": (
+ f"{self._log_formatter_cls.__module__}"
+ f".{self._log_formatter_cls.__name__}"
+ ),
+ "format": self.log_format,
+ "datefmt": self.log_datefmt,
+ },
+ },
+ "loggers": {
+ self.__class__.__name__: {
+ "level": "DEBUG",
+ "handlers": ["console"],
+ }
+ },
+ "disable_existing_loggers": False,
+ }
+
+ if IS_PYTHONW:
+ # disable logging
+ # (this should really go to a file, but file-logging is only
+ # hooked up in parallel applications)
+ del config["handlers"]
+ del config["loggers"]
+
+ return config
+
+ @observe("log_datefmt", "log_format", "log_level", "logging_config")
+ def _observe_logging_change(self, change: Bunch) -> None:
+ # convert log level strings to ints
+ log_level = self.log_level
+ if isinstance(log_level, str):
+ self.log_level = t.cast(int, getattr(logging, log_level))
+ self._configure_logging()
+
+ @observe("log", type="default")
+ def _observe_logging_default(self, change: Bunch) -> None:
+ self._configure_logging()
+
+ def _configure_logging(self) -> None:
+ config = self.get_default_logging_config()
+ nested_update(config, self.logging_config or {})
+ dictConfig(config)
+ # make a note that we have configured logging
+ self._logging_configured = True
+
+ @default("log")
+ def _log_default(self) -> AnyLogger:
+ """Start logging for this application."""
+ log = logging.getLogger(self.__class__.__name__)
+ log.propagate = False
+ _log = log # copied from Logger.hasHandlers() (new in Python 3.2)
+ while _log is not None:
+ if _log.handlers:
+ return log
+ if not _log.propagate:
+ break
+ _log = _log.parent # type:ignore[assignment]
+ return log
+
+ logging_config = Dict(
+ help="""
+ Configure additional log handlers.
+
+ The default stderr logs handler is configured by the
+ log_level, log_datefmt and log_format settings.
+
+ This configuration can be used to configure additional handlers
+ (e.g. to output the log to a file) or for finer control over the
+ default handlers.
+
+ If provided this should be a logging configuration dictionary, for
+ more information see:
+ https://docs.python.org/3/library/logging.config.html#logging-config-dictschema
+
+ This dictionary is merged with the base logging configuration which
+ defines the following:
+
+ * A logging formatter intended for interactive use called
+ ``console``.
+ * A logging handler that writes to stderr called
+ ``console`` which uses the formatter ``console``.
+ * A logger with the name of this application set to ``DEBUG``
+ level.
+
+ This example adds a new handler that writes to a file:
+
+ .. code-block:: python
+
+ c.Application.logging_config = {
+ "handlers": {
+ "file": {
+ "class": "logging.FileHandler",
+ "level": "DEBUG",
+ "filename": "",
+ }
+ },
+ "loggers": {
+ "": {
+ "level": "DEBUG",
+ # NOTE: if you don't list the default "console"
+ # handler here then it will be disabled
+ "handlers": ["console", "file"],
+ },
+ },
+ }
+
+ """,
+ ).tag(config=True)
+
+ #: the alias map for configurables
+ #: Keys might strings or tuples for additional options; single-letter alias accessed like `-v`.
+ #: Values might be like "Class.trait" strings of two-tuples: (Class.trait, help-text),
+ # or just the "Class.trait" string, in which case the help text is inferred from the
+ # corresponding trait
+ aliases: StrDict = {"log-level": "Application.log_level"}
+
+ # flags for loading Configurables or store_const style flags
+ # flags are loaded from this dict by '--key' flags
+ # this must be a dict of two-tuples, the first element being the Config/dict
+ # and the second being the help string for the flag
+ flags: StrDict = {
+ "debug": (
+ {
+ "Application": {
+ "log_level": logging.DEBUG,
+ },
+ },
+ "Set log-level to debug, for the most verbose logging.",
+ ),
+ "show-config": (
+ {
+ "Application": {
+ "show_config": True,
+ },
+ },
+ "Show the application's configuration (human-readable format)",
+ ),
+ "show-config-json": (
+ {
+ "Application": {
+ "show_config_json": True,
+ },
+ },
+ "Show the application's configuration (json format)",
+ ),
+ }
+
+ # subcommands for launching other applications
+ # if this is not empty, this will be a parent Application
+ # this must be a dict of two-tuples,
+ # the first element being the application class/import string
+ # and the second being the help string for the subcommand
+ subcommands: dict[str, t.Any] | Dict[str, t.Any] = Dict()
+ # parse_command_line will initialize a subapp, if requested
+ subapp = Instance("traitlets.config.application.Application", allow_none=True)
+
+ # extra command-line arguments that don't set config values
+ extra_args = List(Unicode())
+
+ cli_config = Instance(
+ Config,
+ (),
+ {},
+ help="""The subset of our configuration that came from the command-line
+
+ We re-load this configuration after loading config files,
+ to ensure that it maintains highest priority.
+ """,
+ )
+
+ _loaded_config_files: List[str] = List()
+
+ show_config = Bool(
+ help="Instead of starting the Application, dump configuration to stdout"
+ ).tag(config=True)
+
+ show_config_json = Bool(
+ help="Instead of starting the Application, dump configuration to stdout (as JSON)"
+ ).tag(config=True)
+
+ @observe("show_config_json")
+ def _show_config_json_changed(self, change: Bunch) -> None:
+ self.show_config = change.new
+
+ @observe("show_config")
+ def _show_config_changed(self, change: Bunch) -> None:
+ if change.new:
+ self._save_start = self.start
+ self.start = self.start_show_config # type:ignore[method-assign]
+
+ def __init__(self, **kwargs: t.Any) -> None:
+ SingletonConfigurable.__init__(self, **kwargs)
+ # Ensure my class is in self.classes, so my attributes appear in command line
+ # options and config files.
+ cls = self.__class__
+ if cls not in self.classes:
+ if self.classes is cls.classes:
+ # class attr, assign instead of insert
+ self.classes = [cls, *self.classes]
+ else:
+ self.classes.insert(0, self.__class__)
+
+ @observe("config")
+ @observe_compat
+ def _config_changed(self, change: Bunch) -> None:
+ super()._config_changed(change)
+ self.log.debug("Config changed: %r", change.new)
+
+ @catch_config_error
+ def initialize(self, argv: ArgvType = None) -> None:
+ """Do the basic steps to configure me.
+
+ Override in subclasses.
+ """
+ self.parse_command_line(argv)
+
+ def start(self) -> None:
+ """Start the app mainloop.
+
+ Override in subclasses.
+ """
+ if self.subapp is not None:
+ assert isinstance(self.subapp, Application)
+ return self.subapp.start()
+
+ def start_show_config(self) -> None:
+ """start function used when show_config is True"""
+ config = self.config.copy()
+ # exclude show_config flags from displayed config
+ for cls in self.__class__.mro():
+ if cls.__name__ in config:
+ cls_config = config[cls.__name__]
+ cls_config.pop("show_config", None)
+ cls_config.pop("show_config_json", None)
+
+ if self.show_config_json:
+ json.dump(config, sys.stdout, indent=1, sort_keys=True, default=repr)
+ # add trailing newline
+ sys.stdout.write("\n")
+ return
+
+ if self._loaded_config_files:
+ print("Loaded config files:")
+ for f in self._loaded_config_files:
+ print(" " + f)
+ print()
+
+ for classname in sorted(config):
+ class_config = config[classname]
+ if not class_config:
+ continue
+ print(classname)
+ pformat_kwargs: StrDict = dict(indent=4, compact=True) # noqa: C408
+
+ for traitname in sorted(class_config):
+ value = class_config[traitname]
+ print(f" .{traitname} = {pprint.pformat(value, **pformat_kwargs)}")
+
+ def print_alias_help(self) -> None:
+ """Print the alias parts of the help."""
+ print("\n".join(self.emit_alias_help()))
+
+ def emit_alias_help(self) -> t.Generator[str, None, None]:
+ """Yield the lines for alias part of the help."""
+ if not self.aliases:
+ return
+
+ classdict: dict[str, type[Configurable]] = {}
+ for cls in self.classes:
+ # include all parents (up to, but excluding Configurable) in available names
+ for c in cls.mro()[:-3]:
+ classdict[c.__name__] = t.cast(t.Type[Configurable], c)
+
+ fhelp: str | None
+ for alias, longname in self.aliases.items():
+ try:
+ if isinstance(longname, tuple):
+ longname, fhelp = longname
+ else:
+ fhelp = None
+ classname, traitname = longname.split(".")[-2:]
+ longname = classname + "." + traitname
+ cls = classdict[classname]
+
+ trait = cls.class_traits(config=True)[traitname]
+ fhelp_lines = cls.class_get_trait_help(trait, helptext=fhelp).splitlines()
+
+ if not isinstance(alias, tuple): # type:ignore[unreachable]
+ alias = (alias,) # type:ignore[assignment]
+ alias = sorted(alias, key=len) # type:ignore[assignment]
+ alias = ", ".join(("--%s" if len(m) > 1 else "-%s") % m for m in alias)
+
+ # reformat first line
+ fhelp_lines[0] = fhelp_lines[0].replace("--" + longname, alias)
+ yield from fhelp_lines
+ yield indent("Equivalent to: [--%s]" % longname)
+ except Exception as ex:
+ self.log.error("Failed collecting help-message for alias %r, due to: %s", alias, ex)
+ raise
+
+ def print_flag_help(self) -> None:
+ """Print the flag part of the help."""
+ print("\n".join(self.emit_flag_help()))
+
+ def emit_flag_help(self) -> t.Generator[str, None, None]:
+ """Yield the lines for the flag part of the help."""
+ if not self.flags:
+ return
+
+ for flags, (cfg, fhelp) in self.flags.items():
+ try:
+ if not isinstance(flags, tuple): # type:ignore[unreachable]
+ flags = (flags,) # type:ignore[assignment]
+ flags = sorted(flags, key=len) # type:ignore[assignment]
+ flags = ", ".join(("--%s" if len(m) > 1 else "-%s") % m for m in flags)
+ yield flags
+ yield indent(dedent(fhelp.strip()))
+ cfg_list = " ".join(
+ f"--{clname}.{prop}={val}"
+ for clname, props_dict in cfg.items()
+ for prop, val in props_dict.items()
+ )
+ cfg_txt = "Equivalent to: [%s]" % cfg_list
+ yield indent(dedent(cfg_txt))
+ except Exception as ex:
+ self.log.error("Failed collecting help-message for flag %r, due to: %s", flags, ex)
+ raise
+
+ def print_options(self) -> None:
+ """Print the options part of the help."""
+ print("\n".join(self.emit_options_help()))
+
+ def emit_options_help(self) -> t.Generator[str, None, None]:
+ """Yield the lines for the options part of the help."""
+ if not self.flags and not self.aliases:
+ return
+ header = "Options"
+ yield header
+ yield "=" * len(header)
+ for p in wrap_paragraphs(self.option_description):
+ yield p
+ yield ""
+
+ yield from self.emit_flag_help()
+ yield from self.emit_alias_help()
+ yield ""
+
+ def print_subcommands(self) -> None:
+ """Print the subcommand part of the help."""
+ print("\n".join(self.emit_subcommands_help()))
+
+ def emit_subcommands_help(self) -> t.Generator[str, None, None]:
+ """Yield the lines for the subcommand part of the help."""
+ if not self.subcommands:
+ return
+
+ header = "Subcommands"
+ yield header
+ yield "=" * len(header)
+ for p in wrap_paragraphs(self.subcommand_description.format(app=self.name)):
+ yield p
+ yield ""
+ for subc, (_, help) in self.subcommands.items():
+ yield subc
+ if help:
+ yield indent(dedent(help.strip()))
+ yield ""
+
+ def emit_help_epilogue(self, classes: bool) -> t.Generator[str, None, None]:
+ """Yield the very bottom lines of the help message.
+
+ If classes=False (the default), print `--help-all` msg.
+ """
+ if not classes:
+ yield "To see all available configurables, use `--help-all`."
+ yield ""
+
+ def print_help(self, classes: bool = False) -> None:
+ """Print the help for each Configurable class in self.classes.
+
+ If classes=False (the default), only flags and aliases are printed.
+ """
+ print("\n".join(self.emit_help(classes=classes)))
+
+ def emit_help(self, classes: bool = False) -> t.Generator[str, None, None]:
+ """Yield the help-lines for each Configurable class in self.classes.
+
+ If classes=False (the default), only flags and aliases are printed.
+ """
+ yield from self.emit_description()
+ yield from self.emit_subcommands_help()
+ yield from self.emit_options_help()
+
+ if classes:
+ help_classes = self._classes_with_config_traits()
+ if help_classes is not None:
+ yield "Class options"
+ yield "============="
+ for p in wrap_paragraphs(self.keyvalue_description):
+ yield p
+ yield ""
+
+ for cls in help_classes:
+ yield cls.class_get_help()
+ yield ""
+ yield from self.emit_examples()
+
+ yield from self.emit_help_epilogue(classes)
+
+ def document_config_options(self) -> str:
+ """Generate rST format documentation for the config options this application
+
+ Returns a multiline string.
+ """
+ return "\n".join(c.class_config_rst_doc() for c in self._classes_inc_parents())
+
+ def print_description(self) -> None:
+ """Print the application description."""
+ print("\n".join(self.emit_description()))
+
+ def emit_description(self) -> t.Generator[str, None, None]:
+ """Yield lines with the application description."""
+ for p in wrap_paragraphs(self.description or self.__doc__ or ""):
+ yield p
+ yield ""
+
+ def print_examples(self) -> None:
+ """Print usage and examples (see `emit_examples()`)."""
+ print("\n".join(self.emit_examples()))
+
+ def emit_examples(self) -> t.Generator[str, None, None]:
+ """Yield lines with the usage and examples.
+
+ This usage string goes at the end of the command line help string
+ and should contain examples of the application's usage.
+ """
+ if self.examples:
+ yield "Examples"
+ yield "--------"
+ yield ""
+ yield indent(dedent(self.examples.strip()))
+ yield ""
+
+ def print_version(self) -> None:
+ """Print the version string."""
+ print(self.version)
+
+ @catch_config_error
+ def initialize_subcommand(self, subc: str, argv: ArgvType = None) -> None:
+ """Initialize a subcommand with argv."""
+ val = self.subcommands.get(subc)
+ assert val is not None
+ subapp, _ = val
+
+ if isinstance(subapp, str):
+ subapp = import_item(subapp)
+
+ # Cannot issubclass() on a non-type (SOhttp://stackoverflow.com/questions/8692430)
+ if isinstance(subapp, type) and issubclass(subapp, Application):
+ # Clear existing instances before...
+ self.__class__.clear_instance()
+ # instantiating subapp...
+ self.subapp = subapp.instance(parent=self)
+ elif callable(subapp):
+ # or ask factory to create it...
+ self.subapp = subapp(self)
+ else:
+ raise AssertionError("Invalid mappings for subcommand '%s'!" % subc)
+
+ # ... and finally initialize subapp.
+ self.subapp.initialize(argv)
+
+ def flatten_flags(self) -> tuple[dict[str, t.Any], dict[str, t.Any]]:
+ """Flatten flags and aliases for loaders, so cl-args override as expected.
+
+ This prevents issues such as an alias pointing to InteractiveShell,
+ but a config file setting the same trait in TerminalInteraciveShell
+ getting inappropriate priority over the command-line arg.
+ Also, loaders expect ``(key: longname)`` and not ``key: (longname, help)`` items.
+
+ Only aliases with exactly one descendent in the class list
+ will be promoted.
+
+ """
+ # build a tree of classes in our list that inherit from a particular
+ # it will be a dict by parent classname of classes in our list
+ # that are descendents
+ mro_tree = defaultdict(list)
+ for cls in self.classes:
+ clsname = cls.__name__
+ for parent in cls.mro()[1:-3]:
+ # exclude cls itself and Configurable,HasTraits,object
+ mro_tree[parent.__name__].append(clsname)
+ # flatten aliases, which have the form:
+ # { 'alias' : 'Class.trait' }
+ aliases: dict[str, str] = {}
+ for alias, longname in self.aliases.items():
+ if isinstance(longname, tuple):
+ longname, _ = longname
+ cls, trait = longname.split(".", 1)
+ children = mro_tree[cls] # type:ignore[index]
+ if len(children) == 1:
+ # exactly one descendent, promote alias
+ cls = children[0] # type:ignore[assignment]
+ if not isinstance(aliases, tuple): # type:ignore[unreachable]
+ alias = (alias,) # type:ignore[assignment]
+ for al in alias:
+ aliases[al] = ".".join([cls, trait]) # type:ignore[list-item]
+
+ # flatten flags, which are of the form:
+ # { 'key' : ({'Cls' : {'trait' : value}}, 'help')}
+ flags = {}
+ for key, (flagdict, help) in self.flags.items():
+ newflag: dict[t.Any, t.Any] = {}
+ for cls, subdict in flagdict.items():
+ children = mro_tree[cls] # type:ignore[index]
+ # exactly one descendent, promote flag section
+ if len(children) == 1:
+ cls = children[0] # type:ignore[assignment]
+
+ if cls in newflag:
+ newflag[cls].update(subdict)
+ else:
+ newflag[cls] = subdict
+
+ if not isinstance(key, tuple): # type:ignore[unreachable]
+ key = (key,) # type:ignore[assignment]
+ for k in key:
+ flags[k] = (newflag, help)
+ return flags, aliases
+
+ def _create_loader(
+ self,
+ argv: list[str] | None,
+ aliases: StrDict,
+ flags: StrDict,
+ classes: ClassesType | None,
+ ) -> KVArgParseConfigLoader:
+ return KVArgParseConfigLoader(
+ argv, aliases, flags, classes=classes, log=self.log, subcommands=self.subcommands
+ )
+
+ @classmethod
+ def _get_sys_argv(cls, check_argcomplete: bool = False) -> list[str]:
+ """Get `sys.argv` or equivalent from `argcomplete`
+
+ `argcomplete`'s strategy is to call the python script with no arguments,
+ so ``len(sys.argv) == 1``, and run until the `ArgumentParser` is constructed
+ and determine what completions are available.
+
+ On the other hand, `traitlet`'s subcommand-handling strategy is to check
+ ``sys.argv[1]`` and see if it matches a subcommand, and if so then dynamically
+ load the subcommand app and initialize it with ``sys.argv[1:]``.
+
+ This helper method helps to take the current tokens for `argcomplete` and pass
+ them through as `argv`.
+ """
+ if check_argcomplete and "_ARGCOMPLETE" in os.environ:
+ try:
+ from traitlets.config.argcomplete_config import get_argcomplete_cwords
+
+ cwords = get_argcomplete_cwords()
+ assert cwords is not None
+ return cwords
+ except (ImportError, ModuleNotFoundError):
+ pass
+ return sys.argv
+
+ @classmethod
+ def _handle_argcomplete_for_subcommand(cls) -> None:
+ """Helper for `argcomplete` to recognize `traitlets` subcommands
+
+ `argcomplete` does not know that `traitlets` has already consumed subcommands,
+ as it only "sees" the final `argparse.ArgumentParser` that is constructed.
+ (Indeed `KVArgParseConfigLoader` does not get passed subcommands at all currently.)
+ We explicitly manipulate the environment variables used internally by `argcomplete`
+ to get it to skip over the subcommand tokens.
+ """
+ if "_ARGCOMPLETE" not in os.environ:
+ return
+
+ try:
+ from traitlets.config.argcomplete_config import increment_argcomplete_index
+
+ increment_argcomplete_index()
+ except (ImportError, ModuleNotFoundError):
+ pass
+
+ @catch_config_error
+ def parse_command_line(self, argv: ArgvType = None) -> None:
+ """Parse the command line arguments."""
+ assert not isinstance(argv, str)
+ if argv is None:
+ argv = self._get_sys_argv(check_argcomplete=bool(self.subcommands))[1:]
+ self.argv = [cast_unicode(arg) for arg in argv]
+
+ if argv and argv[0] == "help":
+ # turn `ipython help notebook` into `ipython notebook -h`
+ argv = argv[1:] + ["-h"]
+
+ if self.subcommands and len(argv) > 0:
+ # we have subcommands, and one may have been specified
+ subc, subargv = argv[0], argv[1:]
+ if re.match(r"^\w(\-?\w)*$", subc) and subc in self.subcommands:
+ # it's a subcommand, and *not* a flag or class parameter
+ self._handle_argcomplete_for_subcommand()
+ return self.initialize_subcommand(subc, subargv)
+
+ # Arguments after a '--' argument are for the script IPython may be
+ # about to run, not IPython iteslf. For arguments parsed here (help and
+ # version), we want to only search the arguments up to the first
+ # occurrence of '--', which we're calling interpreted_argv.
+ try:
+ interpreted_argv = argv[: argv.index("--")]
+ except ValueError:
+ interpreted_argv = argv
+
+ if any(x in interpreted_argv for x in ("-h", "--help-all", "--help")):
+ self.print_help("--help-all" in interpreted_argv)
+ self.exit(0)
+
+ if "--version" in interpreted_argv or "-V" in interpreted_argv:
+ self.print_version()
+ self.exit(0)
+
+ # flatten flags&aliases, so cl-args get appropriate priority:
+ flags, aliases = self.flatten_flags()
+ classes = list(self._classes_with_config_traits())
+ loader = self._create_loader(argv, aliases, flags, classes=classes)
+ try:
+ self.cli_config = deepcopy(loader.load_config())
+ except SystemExit:
+ # traitlets 5: no longer print help output on error
+ # help output is huge, and comes after the error
+ raise
+ self.update_config(self.cli_config)
+ # store unparsed args in extra_args
+ self.extra_args = loader.extra_args
+
+ @classmethod
+ def _load_config_files(
+ cls,
+ basefilename: str,
+ path: str | t.Sequence[str | None] | None,
+ log: AnyLogger | None = None,
+ raise_config_file_errors: bool = False,
+ ) -> t.Generator[t.Any, None, None]:
+ """Load config files (py,json) by filename and path.
+
+ yield each config object in turn.
+ """
+ if isinstance(path, str) or path is None:
+ path = [path]
+ for current in reversed(path):
+ # path list is in descending priority order, so load files backwards:
+ pyloader = cls.python_config_loader_class(basefilename + ".py", path=current, log=log)
+ if log:
+ log.debug("Looking for %s in %s", basefilename, current or os.getcwd())
+ jsonloader = cls.json_config_loader_class(basefilename + ".json", path=current, log=log)
+ loaded: list[t.Any] = []
+ filenames: list[str] = []
+ for loader in [pyloader, jsonloader]:
+ config = None
+ try:
+ config = loader.load_config()
+ except ConfigFileNotFound:
+ pass
+ except Exception:
+ # try to get the full filename, but it will be empty in the
+ # unlikely event that the error raised before filefind finished
+ filename = loader.full_filename or basefilename
+ # problem while running the file
+ if raise_config_file_errors:
+ raise
+ if log:
+ log.error("Exception while loading config file %s", filename, exc_info=True) # noqa: G201
+ else:
+ if log:
+ log.debug("Loaded config file: %s", loader.full_filename)
+ if config:
+ for filename, earlier_config in zip(filenames, loaded):
+ collisions = earlier_config.collisions(config)
+ if collisions and log:
+ log.warning(
+ "Collisions detected in {0} and {1} config files." # noqa: G001
+ " {1} has higher priority: {2}".format(
+ filename,
+ loader.full_filename,
+ json.dumps(collisions, indent=2),
+ )
+ )
+ yield (config, loader.full_filename)
+ loaded.append(config)
+ filenames.append(loader.full_filename)
+
+ @property
+ def loaded_config_files(self) -> list[str]:
+ """Currently loaded configuration files"""
+ return self._loaded_config_files[:]
+
+ @catch_config_error
+ def load_config_file(
+ self, filename: str, path: str | t.Sequence[str | None] | None = None
+ ) -> None:
+ """Load config files by filename and path."""
+ filename, ext = os.path.splitext(filename)
+ new_config = Config()
+ for config, fname in self._load_config_files(
+ filename,
+ path=path,
+ log=self.log,
+ raise_config_file_errors=self.raise_config_file_errors,
+ ):
+ new_config.merge(config)
+ if (
+ fname not in self._loaded_config_files
+ ): # only add to list of loaded files if not previously loaded
+ self._loaded_config_files.append(fname)
+ # add self.cli_config to preserve CLI config priority
+ new_config.merge(self.cli_config)
+ self.update_config(new_config)
+
+ @catch_config_error
+ def load_config_environ(self) -> None:
+ """Load config files by environment."""
+ PREFIX = self.name.upper().replace("-", "_")
+ new_config = Config()
+
+ self.log.debug('Looping through config variables with prefix "%s"', PREFIX)
+
+ for k, v in os.environ.items():
+ if k.startswith(PREFIX):
+ self.log.debug('Seeing environ "%s"="%s"', k, v)
+ # use __ instead of . as separator in env variable.
+ # Warning, case sensitive !
+ _, *path, key = k.split("__")
+ section = new_config
+ for p in path:
+ section = section[p]
+ setattr(section, key, DeferredConfigString(v))
+
+ new_config.merge(self.cli_config)
+ self.update_config(new_config)
+
+ def _classes_with_config_traits(
+ self, classes: ClassesType | None = None
+ ) -> t.Generator[type[Configurable], None, None]:
+ """
+ Yields only classes with configurable traits, and their subclasses.
+
+ :param classes:
+ The list of classes to iterate; if not set, uses :attr:`classes`.
+
+ Thus, produced sample config-file will contain all classes
+ on which a trait-value may be overridden:
+
+ - either on the class owning the trait,
+ - or on its subclasses, even if those subclasses do not define
+ any traits themselves.
+ """
+ if classes is None:
+ classes = self.classes
+
+ cls_to_config = OrderedDict(
+ (cls, bool(cls.class_own_traits(config=True)))
+ for cls in self._classes_inc_parents(classes)
+ )
+
+ def is_any_parent_included(cls: t.Any) -> bool:
+ return any(b in cls_to_config and cls_to_config[b] for b in cls.__bases__)
+
+ # Mark "empty" classes for inclusion if their parents own-traits,
+ # and loop until no more classes gets marked.
+ #
+ while True:
+ to_incl_orig = cls_to_config.copy()
+ cls_to_config = OrderedDict(
+ (cls, inc_yes or is_any_parent_included(cls))
+ for cls, inc_yes in cls_to_config.items()
+ )
+ if cls_to_config == to_incl_orig:
+ break
+ for cl, inc_yes in cls_to_config.items():
+ if inc_yes:
+ yield cl
+
+ def generate_config_file(self, classes: ClassesType | None = None) -> str:
+ """generate default config file from Configurables"""
+ lines = ["# Configuration file for %s." % self.name]
+ lines.append("")
+ lines.append("c = get_config() #" + "noqa")
+ lines.append("")
+ classes = self.classes if classes is None else classes
+ config_classes = list(self._classes_with_config_traits(classes))
+ for cls in config_classes:
+ lines.append(cls.class_config_section(config_classes))
+ return "\n".join(lines)
+
+ def close_handlers(self) -> None:
+ if getattr(self, "_logging_configured", False):
+ # don't attempt to close handlers unless they have been opened
+ # (note accessing self.log.handlers will create handlers if they
+ # have not yet been initialised)
+ for handler in self.log.handlers:
+ with suppress(Exception):
+ handler.close()
+ self._logging_configured = False
+
+ def exit(self, exit_status: int | str | None = 0) -> None:
+ self.log.debug("Exiting application: %s", self.name)
+ self.close_handlers()
+ sys.exit(exit_status)
+
+ def __del__(self) -> None:
+ self.close_handlers()
+
+ @classmethod
+ def launch_instance(cls, argv: ArgvType = None, **kwargs: t.Any) -> None:
+ """Launch a global instance of this Application
+
+ If a global instance already exists, this reinitializes and starts it
+ """
+ app = cls.instance(**kwargs)
+ app.initialize(argv)
+ app.start()
+
+
+# -----------------------------------------------------------------------------
+# utility functions, for convenience
+# -----------------------------------------------------------------------------
+
+default_aliases = Application.aliases
+default_flags = Application.flags
+
+
+def boolean_flag(name: str, configurable: str, set_help: str = "", unset_help: str = "") -> StrDict:
+ """Helper for building basic --trait, --no-trait flags.
+
+ Parameters
+ ----------
+ name : str
+ The name of the flag.
+ configurable : str
+ The 'Class.trait' string of the trait to be set/unset with the flag
+ set_help : unicode
+ help string for --name flag
+ unset_help : unicode
+ help string for --no-name flag
+
+ Returns
+ -------
+ cfg : dict
+ A dict with two keys: 'name', and 'no-name', for setting and unsetting
+ the trait, respectively.
+ """
+ # default helpstrings
+ set_help = set_help or "set %s=True" % configurable
+ unset_help = unset_help or "set %s=False" % configurable
+
+ cls, trait = configurable.split(".")
+
+ setter = {cls: {trait: True}}
+ unsetter = {cls: {trait: False}}
+ return {name: (setter, set_help), "no-" + name: (unsetter, unset_help)}
+
+
+def get_config() -> Config:
+ """Get the config object for the global Application instance, if there is one
+
+ otherwise return an empty config object
+ """
+ if Application.initialized():
+ return Application.instance().config
+ else:
+ return Config()
+
+
+if __name__ == "__main__":
+ Application.launch_instance()
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/loader.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..f9eb5fe191e6119d22d1a376f62619d6e3c63615
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/loader.py
@@ -0,0 +1,1179 @@
+"""A simple configuration system."""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+from __future__ import annotations
+
+import argparse
+import copy
+import functools
+import json
+import os
+import re
+import sys
+import typing as t
+from logging import Logger
+
+from traitlets.traitlets import Any, Container, Dict, HasTraits, List, TraitType, Undefined
+
+from ..utils import cast_unicode, filefind, warnings
+
+# -----------------------------------------------------------------------------
+# Exceptions
+# -----------------------------------------------------------------------------
+
+
+class ConfigError(Exception):
+ pass
+
+
+class ConfigLoaderError(ConfigError):
+ pass
+
+
+class ConfigFileNotFound(ConfigError):
+ pass
+
+
+class ArgumentError(ConfigLoaderError):
+ pass
+
+
+# -----------------------------------------------------------------------------
+# Argparse fix
+# -----------------------------------------------------------------------------
+
+# Unfortunately argparse by default prints help messages to stderr instead of
+# stdout. This makes it annoying to capture long help screens at the command
+# line, since one must know how to pipe stderr, which many users don't know how
+# to do. So we override the print_help method with one that defaults to
+# stdout and use our class instead.
+
+
+class _Sentinel:
+ def __repr__(self) -> str:
+ return ""
+
+ def __str__(self) -> str:
+ return ""
+
+
+_deprecated = _Sentinel()
+
+
+class ArgumentParser(argparse.ArgumentParser):
+ """Simple argparse subclass that prints help to stdout by default."""
+
+ def print_help(self, file: t.Any = None) -> None:
+ if file is None:
+ file = sys.stdout
+ return super().print_help(file)
+
+ print_help.__doc__ = argparse.ArgumentParser.print_help.__doc__
+
+
+# -----------------------------------------------------------------------------
+# Config class for holding config information
+# -----------------------------------------------------------------------------
+
+
+def execfile(fname: str, glob: dict[str, Any]) -> None:
+ with open(fname, "rb") as f:
+ exec(compile(f.read(), fname, "exec"), glob, glob) # noqa: S102
+
+
+class LazyConfigValue(HasTraits):
+ """Proxy object for exposing methods on configurable containers
+
+ These methods allow appending/extending/updating
+ to add to non-empty defaults instead of clobbering them.
+
+ Exposes:
+
+ - append, extend, insert on lists
+ - update on dicts
+ - update, add on sets
+ """
+
+ _value = None
+
+ # list methods
+ _extend: List[t.Any] = List()
+ _prepend: List[t.Any] = List()
+ _inserts: List[t.Any] = List()
+
+ def append(self, obj: t.Any) -> None:
+ """Append an item to a List"""
+ self._extend.append(obj)
+
+ def extend(self, other: t.Any) -> None:
+ """Extend a list"""
+ self._extend.extend(other)
+
+ def prepend(self, other: t.Any) -> None:
+ """like list.extend, but for the front"""
+ self._prepend[:0] = other
+
+ def merge_into(self, other: t.Any) -> t.Any:
+ """
+ Merge with another earlier LazyConfigValue or an earlier container.
+ This is useful when having global system-wide configuration files.
+
+ Self is expected to have higher precedence.
+
+ Parameters
+ ----------
+ other : LazyConfigValue or container
+
+ Returns
+ -------
+ LazyConfigValue
+ if ``other`` is also lazy, a reified container otherwise.
+ """
+ if isinstance(other, LazyConfigValue):
+ other._extend.extend(self._extend)
+ self._extend = other._extend
+
+ self._prepend.extend(other._prepend)
+
+ other._inserts.extend(self._inserts)
+ self._inserts = other._inserts
+
+ if self._update:
+ other.update(self._update)
+ self._update = other._update
+ return self
+ else:
+ # other is a container, reify now.
+ return self.get_value(other)
+
+ def insert(self, index: int, other: t.Any) -> None:
+ if not isinstance(index, int):
+ raise TypeError("An integer is required")
+ self._inserts.append((index, other))
+
+ # dict methods
+ # update is used for both dict and set
+ _update = Any()
+
+ def update(self, other: t.Any) -> None:
+ """Update either a set or dict"""
+ if self._update is None:
+ if isinstance(other, dict):
+ self._update = {}
+ else:
+ self._update = set()
+ self._update.update(other)
+
+ # set methods
+ def add(self, obj: t.Any) -> None:
+ """Add an item to a set"""
+ self.update({obj})
+
+ def get_value(self, initial: t.Any) -> t.Any:
+ """construct the value from the initial one
+
+ after applying any insert / extend / update changes
+ """
+ if self._value is not None:
+ return self._value # type:ignore[unreachable]
+ value = copy.deepcopy(initial)
+ if isinstance(value, list):
+ for idx, obj in self._inserts:
+ value.insert(idx, obj)
+ value[:0] = self._prepend
+ value.extend(self._extend)
+
+ elif isinstance(value, dict):
+ if self._update:
+ value.update(self._update)
+ elif isinstance(value, set):
+ if self._update:
+ value.update(self._update)
+ self._value = value
+ return value
+
+ def to_dict(self) -> dict[str, t.Any]:
+ """return JSONable dict form of my data
+
+ Currently update as dict or set, extend, prepend as lists, and inserts as list of tuples.
+ """
+ d = {}
+ if self._update:
+ d["update"] = self._update
+ if self._extend:
+ d["extend"] = self._extend
+ if self._prepend:
+ d["prepend"] = self._prepend
+ elif self._inserts:
+ d["inserts"] = self._inserts
+ return d
+
+ def __repr__(self) -> str:
+ if self._value is not None:
+ return f"<{self.__class__.__name__} value={self._value!r}>"
+ else:
+ return f"<{self.__class__.__name__} {self.to_dict()!r}>"
+
+
+def _is_section_key(key: str) -> bool:
+ """Is a Config key a section name (does it start with a capital)?"""
+ return bool(key and key[0].upper() == key[0] and not key.startswith("_"))
+
+
+class Config(dict): # type:ignore[type-arg]
+ """An attribute-based dict that can do smart merges.
+
+ Accessing a field on a config object for the first time populates the key
+ with either a nested Config object for keys starting with capitals
+ or :class:`.LazyConfigValue` for lowercase keys,
+ allowing quick assignments such as::
+
+ c = Config()
+ c.Class.int_trait = 5
+ c.Class.list_trait.append("x")
+
+ """
+
+ def __init__(self, *args: t.Any, **kwds: t.Any) -> None:
+ dict.__init__(self, *args, **kwds)
+ self._ensure_subconfig()
+
+ def _ensure_subconfig(self) -> None:
+ """ensure that sub-dicts that should be Config objects are
+
+ casts dicts that are under section keys to Config objects,
+ which is necessary for constructing Config objects from dict literals.
+ """
+ for key in self:
+ obj = self[key]
+ if _is_section_key(key) and isinstance(obj, dict) and not isinstance(obj, Config):
+ setattr(self, key, Config(obj))
+
+ def _merge(self, other: t.Any) -> None:
+ """deprecated alias, use Config.merge()"""
+ self.merge(other)
+
+ def merge(self, other: t.Any) -> None:
+ """merge another config object into this one"""
+ to_update = {}
+ for k, v in other.items():
+ if k not in self:
+ to_update[k] = v
+ else: # I have this key
+ if isinstance(v, Config) and isinstance(self[k], Config):
+ # Recursively merge common sub Configs
+ self[k].merge(v)
+ elif isinstance(v, LazyConfigValue):
+ self[k] = v.merge_into(self[k])
+ else:
+ # Plain updates for non-Configs
+ to_update[k] = v
+
+ self.update(to_update)
+
+ def collisions(self, other: Config) -> dict[str, t.Any]:
+ """Check for collisions between two config objects.
+
+ Returns a dict of the form {"Class": {"trait": "collision message"}}`,
+ indicating which values have been ignored.
+
+ An empty dict indicates no collisions.
+ """
+ collisions: dict[str, t.Any] = {}
+ for section in self:
+ if section not in other:
+ continue
+ mine = self[section]
+ theirs = other[section]
+ for key in mine:
+ if key in theirs and mine[key] != theirs[key]:
+ collisions.setdefault(section, {})
+ collisions[section][key] = f"{mine[key]!r} ignored, using {theirs[key]!r}"
+ return collisions
+
+ def __contains__(self, key: t.Any) -> bool:
+ # allow nested contains of the form `"Section.key" in config`
+ if "." in key:
+ first, remainder = key.split(".", 1)
+ if first not in self:
+ return False
+ return remainder in self[first]
+
+ return super().__contains__(key)
+
+ # .has_key is deprecated for dictionaries.
+ has_key = __contains__
+
+ def _has_section(self, key: str) -> bool:
+ return _is_section_key(key) and key in self
+
+ def copy(self) -> dict[str, t.Any]:
+ return type(self)(dict.copy(self))
+
+ def __copy__(self) -> dict[str, t.Any]:
+ return self.copy()
+
+ def __deepcopy__(self, memo: t.Any) -> Config:
+ new_config = type(self)()
+ for key, value in self.items():
+ if isinstance(value, (Config, LazyConfigValue)):
+ # deep copy config objects
+ value = copy.deepcopy(value, memo)
+ elif type(value) in {dict, list, set, tuple}:
+ # shallow copy plain container traits
+ value = copy.copy(value)
+ new_config[key] = value
+ return new_config
+
+ def __getitem__(self, key: str) -> t.Any:
+ try:
+ return dict.__getitem__(self, key)
+ except KeyError:
+ if _is_section_key(key):
+ c = Config()
+ dict.__setitem__(self, key, c)
+ return c
+ elif not key.startswith("_"):
+ # undefined, create lazy value, used for container methods
+ v = LazyConfigValue()
+ dict.__setitem__(self, key, v)
+ return v
+ else:
+ raise
+
+ def __setitem__(self, key: str, value: t.Any) -> None:
+ if _is_section_key(key):
+ if not isinstance(value, Config):
+ raise ValueError(
+ "values whose keys begin with an uppercase "
+ f"char must be Config instances: {key!r}, {value!r}"
+ )
+ dict.__setitem__(self, key, value)
+
+ def __getattr__(self, key: str) -> t.Any:
+ if key.startswith("__"):
+ return dict.__getattr__(self, key) # type:ignore[attr-defined]
+ try:
+ return self.__getitem__(key)
+ except KeyError as e:
+ raise AttributeError(e) from e
+
+ def __setattr__(self, key: str, value: t.Any) -> None:
+ if key.startswith("__"):
+ return dict.__setattr__(self, key, value)
+ try:
+ self.__setitem__(key, value)
+ except KeyError as e:
+ raise AttributeError(e) from e
+
+ def __delattr__(self, key: str) -> None:
+ if key.startswith("__"):
+ return dict.__delattr__(self, key)
+ try:
+ dict.__delitem__(self, key)
+ except KeyError as e:
+ raise AttributeError(e) from e
+
+
+class DeferredConfig:
+ """Class for deferred-evaluation of config from CLI"""
+
+ def get_value(self, trait: TraitType[t.Any, t.Any]) -> t.Any:
+ raise NotImplementedError("Implement in subclasses")
+
+ def _super_repr(self) -> str:
+ # explicitly call super on direct parent
+ return super(self.__class__, self).__repr__()
+
+
+class DeferredConfigString(str, DeferredConfig):
+ """Config value for loading config from a string
+
+ Interpretation is deferred until it is loaded into the trait.
+
+ Subclass of str for backward compatibility.
+
+ This class is only used for values that are not listed
+ in the configurable classes.
+
+ When config is loaded, `trait.from_string` will be used.
+
+ If an error is raised in `.from_string`,
+ the original string is returned.
+
+ .. versionadded:: 5.0
+ """
+
+ def get_value(self, trait: TraitType[t.Any, t.Any]) -> t.Any:
+ """Get the value stored in this string"""
+ s = str(self)
+ try:
+ return trait.from_string(s)
+ except Exception:
+ # exception casting from string,
+ # let the original string lie.
+ # this will raise a more informative error when config is loaded.
+ return s
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}({self._super_repr()})"
+
+
+class DeferredConfigList(t.List[t.Any], DeferredConfig):
+ """Config value for loading config from a list of strings
+
+ Interpretation is deferred until it is loaded into the trait.
+
+ This class is only used for values that are not listed
+ in the configurable classes.
+
+ When config is loaded, `trait.from_string_list` will be used.
+
+ If an error is raised in `.from_string_list`,
+ the original string list is returned.
+
+ .. versionadded:: 5.0
+ """
+
+ def get_value(self, trait: TraitType[t.Any, t.Any]) -> t.Any:
+ """Get the value stored in this string"""
+ if hasattr(trait, "from_string_list"):
+ src = list(self)
+ cast = trait.from_string_list
+ else:
+ # only allow one item
+ if len(self) > 1:
+ raise ValueError(
+ f"{trait.name} only accepts one value, got {len(self)}: {list(self)}"
+ )
+ src = self[0]
+ cast = trait.from_string
+
+ try:
+ return cast(src)
+ except Exception:
+ # exception casting from string,
+ # let the original value lie.
+ # this will raise a more informative error when config is loaded.
+ return src
+
+ def __repr__(self) -> str:
+ return f"{self.__class__.__name__}({self._super_repr()})"
+
+
+# -----------------------------------------------------------------------------
+# Config loading classes
+# -----------------------------------------------------------------------------
+
+
+class ConfigLoader:
+ """A object for loading configurations from just about anywhere.
+
+ The resulting configuration is packaged as a :class:`Config`.
+
+ Notes
+ -----
+ A :class:`ConfigLoader` does one thing: load a config from a source
+ (file, command line arguments) and returns the data as a :class:`Config` object.
+ There are lots of things that :class:`ConfigLoader` does not do. It does
+ not implement complex logic for finding config files. It does not handle
+ default values or merge multiple configs. These things need to be
+ handled elsewhere.
+ """
+
+ def _log_default(self) -> Logger:
+ from traitlets.log import get_logger
+
+ return t.cast(Logger, get_logger())
+
+ def __init__(self, log: Logger | None = None) -> None:
+ """A base class for config loaders.
+
+ log : instance of :class:`logging.Logger` to use.
+ By default logger of :meth:`traitlets.config.application.Application.instance()`
+ will be used
+
+ Examples
+ --------
+ >>> cl = ConfigLoader()
+ >>> config = cl.load_config()
+ >>> config
+ {}
+ """
+ self.clear()
+ if log is None:
+ self.log = self._log_default()
+ self.log.debug("Using default logger")
+ else:
+ self.log = log
+
+ def clear(self) -> None:
+ self.config = Config()
+
+ def load_config(self) -> Config:
+ """Load a config from somewhere, return a :class:`Config` instance.
+
+ Usually, this will cause self.config to be set and then returned.
+ However, in most cases, :meth:`ConfigLoader.clear` should be called
+ to erase any previous state.
+ """
+ self.clear()
+ return self.config
+
+
+class FileConfigLoader(ConfigLoader):
+ """A base class for file based configurations.
+
+ As we add more file based config loaders, the common logic should go
+ here.
+ """
+
+ def __init__(self, filename: str, path: str | None = None, **kw: t.Any) -> None:
+ """Build a config loader for a filename and path.
+
+ Parameters
+ ----------
+ filename : str
+ The file name of the config file.
+ path : str, list, tuple
+ The path to search for the config file on, or a sequence of
+ paths to try in order.
+ """
+ super().__init__(**kw)
+ self.filename = filename
+ self.path = path
+ self.full_filename = ""
+
+ def _find_file(self) -> None:
+ """Try to find the file by searching the paths."""
+ self.full_filename = filefind(self.filename, self.path)
+
+
+class JSONFileConfigLoader(FileConfigLoader):
+ """A JSON file loader for config
+
+ Can also act as a context manager that rewrite the configuration file to disk on exit.
+
+ Example::
+
+ with JSONFileConfigLoader('myapp.json','/home/jupyter/configurations/') as c:
+ c.MyNewConfigurable.new_value = 'Updated'
+
+ """
+
+ def load_config(self) -> Config:
+ """Load the config from a file and return it as a Config object."""
+ self.clear()
+ try:
+ self._find_file()
+ except OSError as e:
+ raise ConfigFileNotFound(str(e)) from e
+ dct = self._read_file_as_dict()
+ self.config = self._convert_to_config(dct)
+ return self.config
+
+ def _read_file_as_dict(self) -> dict[str, t.Any]:
+ with open(self.full_filename) as f:
+ return t.cast("dict[str, t.Any]", json.load(f))
+
+ def _convert_to_config(self, dictionary: dict[str, t.Any]) -> Config:
+ if "version" in dictionary:
+ version = dictionary.pop("version")
+ else:
+ version = 1
+
+ if version == 1:
+ return Config(dictionary)
+ else:
+ raise ValueError(f"Unknown version of JSON config file: {version}")
+
+ def __enter__(self) -> Config:
+ self.load_config()
+ return self.config
+
+ def __exit__(self, exc_type: object, exc_value: object, traceback: object) -> None:
+ """
+ Exit the context manager but do not handle any errors.
+
+ In case of any error, we do not want to write the potentially broken
+ configuration to disk.
+ """
+ self.config.version = 1
+ json_config = json.dumps(self.config, indent=2)
+ with open(self.full_filename, "w") as f:
+ f.write(json_config)
+
+
+class PyFileConfigLoader(FileConfigLoader):
+ """A config loader for pure python files.
+
+ This is responsible for locating a Python config file by filename and
+ path, then executing it to construct a Config object.
+ """
+
+ def load_config(self) -> Config:
+ """Load the config from a file and return it as a Config object."""
+ self.clear()
+ try:
+ self._find_file()
+ except OSError as e:
+ raise ConfigFileNotFound(str(e)) from e
+ self._read_file_as_dict()
+ return self.config
+
+ def load_subconfig(self, fname: str, path: str | None = None) -> None:
+ """Injected into config file namespace as load_subconfig"""
+ if path is None:
+ path = self.path
+
+ loader = self.__class__(fname, path)
+ try:
+ sub_config = loader.load_config()
+ except ConfigFileNotFound:
+ # Pass silently if the sub config is not there,
+ # treat it as an empty config file.
+ pass
+ else:
+ self.config.merge(sub_config)
+
+ def _read_file_as_dict(self) -> None:
+ """Load the config file into self.config, with recursive loading."""
+
+ def get_config() -> Config:
+ """Unnecessary now, but a deprecation warning is more trouble than it's worth."""
+ return self.config
+
+ namespace = dict( # noqa: C408
+ c=self.config,
+ load_subconfig=self.load_subconfig,
+ get_config=get_config,
+ __file__=self.full_filename,
+ )
+ conf_filename = self.full_filename
+ with open(conf_filename, "rb") as f:
+ exec(compile(f.read(), conf_filename, "exec"), namespace, namespace) # noqa: S102
+
+
+class CommandLineConfigLoader(ConfigLoader):
+ """A config loader for command line arguments.
+
+ As we add more command line based loaders, the common logic should go
+ here.
+ """
+
+ def _exec_config_str(
+ self, lhs: t.Any, rhs: t.Any, trait: TraitType[t.Any, t.Any] | None = None
+ ) -> None:
+ """execute self.config. =
+
+ * expands ~ with expanduser
+ * interprets value with trait if available
+ """
+ value = rhs
+ if isinstance(value, DeferredConfig):
+ if trait:
+ # trait available, reify config immediately
+ value = value.get_value(trait)
+ elif isinstance(rhs, DeferredConfigList) and len(rhs) == 1:
+ # single item, make it a deferred str
+ value = DeferredConfigString(os.path.expanduser(rhs[0]))
+ else:
+ if trait:
+ value = trait.from_string(value)
+ else:
+ value = DeferredConfigString(value)
+
+ *path, key = lhs.split(".")
+ section = self.config
+ for part in path:
+ section = section[part]
+ section[key] = value
+ return
+
+ def _load_flag(self, cfg: t.Any) -> None:
+ """update self.config from a flag, which can be a dict or Config"""
+ if isinstance(cfg, (dict, Config)):
+ # don't clobber whole config sections, update
+ # each section from config:
+ for sec, c in cfg.items():
+ self.config[sec].update(c)
+ else:
+ raise TypeError("Invalid flag: %r" % cfg)
+
+
+# match --Class.trait keys for argparse
+# matches:
+# --Class.trait
+# --x
+# -x
+
+class_trait_opt_pattern = re.compile(r"^\-?\-[A-Za-z][\w]*(\.[\w]+)*$")
+
+_DOT_REPLACEMENT = "__DOT__"
+_DASH_REPLACEMENT = "__DASH__"
+
+
+class _KVAction(argparse.Action):
+ """Custom argparse action for handling --Class.trait=x
+
+ Always
+ """
+
+ def __call__( # type:ignore[override]
+ self,
+ parser: argparse.ArgumentParser,
+ namespace: dict[str, t.Any],
+ values: t.Sequence[t.Any],
+ option_string: str | None = None,
+ ) -> None:
+ if isinstance(values, str):
+ values = [values]
+ values = ["-" if v is _DASH_REPLACEMENT else v for v in values]
+ items = getattr(namespace, self.dest, None)
+ if items is None:
+ items = DeferredConfigList()
+ else:
+ items = DeferredConfigList(items)
+ items.extend(values)
+ setattr(namespace, self.dest, items)
+
+
+class _DefaultOptionDict(dict): # type:ignore[type-arg]
+ """Like the default options dict
+
+ but acts as if all --Class.trait options are predefined
+ """
+
+ def _add_kv_action(self, key: str) -> None:
+ self[key] = _KVAction(
+ option_strings=[key],
+ dest=key.lstrip("-").replace(".", _DOT_REPLACEMENT),
+ # use metavar for display purposes
+ metavar=key.lstrip("-"),
+ )
+
+ def __contains__(self, key: t.Any) -> bool:
+ if "=" in key:
+ return False
+ if super().__contains__(key):
+ return True
+
+ if key.startswith("-") and class_trait_opt_pattern.match(key):
+ self._add_kv_action(key)
+ return True
+ return False
+
+ def __getitem__(self, key: str) -> t.Any:
+ if key in self:
+ return super().__getitem__(key)
+ else:
+ raise KeyError(key)
+
+ def get(self, key: str, default: t.Any = None) -> t.Any:
+ try:
+ return self[key]
+ except KeyError:
+ return default
+
+
+class _KVArgParser(argparse.ArgumentParser):
+ """subclass of ArgumentParser where any --Class.trait option is implicitly defined"""
+
+ def parse_known_args( # type:ignore[override]
+ self, args: t.Sequence[str] | None = None, namespace: argparse.Namespace | None = None
+ ) -> tuple[argparse.Namespace | None, list[str]]:
+ # must be done immediately prior to parsing because if we do it in init,
+ # registration of explicit actions via parser.add_option will fail during setup
+ for container in (self, self._optionals):
+ container._option_string_actions = _DefaultOptionDict(container._option_string_actions)
+ return super().parse_known_args(args, namespace)
+
+
+# type aliases
+SubcommandsDict = t.Dict[str, t.Any]
+
+
+class ArgParseConfigLoader(CommandLineConfigLoader):
+ """A loader that uses the argparse module to load from the command line."""
+
+ parser_class = ArgumentParser
+
+ def __init__(
+ self,
+ argv: list[str] | None = None,
+ aliases: dict[str, str] | None = None,
+ flags: dict[str, str] | None = None,
+ log: t.Any = None,
+ classes: list[type[t.Any]] | None = None,
+ subcommands: SubcommandsDict | None = None,
+ *parser_args: t.Any,
+ **parser_kw: t.Any,
+ ) -> None:
+ """Create a config loader for use with argparse.
+
+ Parameters
+ ----------
+ classes : optional, list
+ The classes to scan for *container* config-traits and decide
+ for their "multiplicity" when adding them as *argparse* arguments.
+ argv : optional, list
+ If given, used to read command-line arguments from, otherwise
+ sys.argv[1:] is used.
+ *parser_args : tuple
+ A tuple of positional arguments that will be passed to the
+ constructor of :class:`argparse.ArgumentParser`.
+ **parser_kw : dict
+ A tuple of keyword arguments that will be passed to the
+ constructor of :class:`argparse.ArgumentParser`.
+ aliases : dict of str to str
+ Dict of aliases to full traitlets names for CLI parsing
+ flags : dict of str to str
+ Dict of flags to full traitlets names for CLI parsing
+ log
+ Passed to `ConfigLoader`
+
+ Returns
+ -------
+ config : Config
+ The resulting Config object.
+ """
+ classes = classes or []
+ super(CommandLineConfigLoader, self).__init__(log=log)
+ self.clear()
+ if argv is None:
+ argv = sys.argv[1:]
+ self.argv = argv
+ self.aliases = aliases or {}
+ self.flags = flags or {}
+ self.classes = classes
+ self.subcommands = subcommands # only used for argcomplete currently
+
+ self.parser_args = parser_args
+ self.version = parser_kw.pop("version", None)
+ kwargs = dict(argument_default=argparse.SUPPRESS) # noqa: C408
+ kwargs.update(parser_kw)
+ self.parser_kw = kwargs
+
+ def load_config(
+ self,
+ argv: list[str] | None = None,
+ aliases: t.Any = None,
+ flags: t.Any = _deprecated,
+ classes: t.Any = None,
+ ) -> Config:
+ """Parse command line arguments and return as a Config object.
+
+ Parameters
+ ----------
+ argv : optional, list
+ If given, a list with the structure of sys.argv[1:] to parse
+ arguments from. If not given, the instance's self.argv attribute
+ (given at construction time) is used.
+ flags
+ Deprecated in traitlets 5.0, instantiate the config loader with the flags.
+
+ """
+
+ if flags is not _deprecated:
+ warnings.warn(
+ "The `flag` argument to load_config is deprecated since Traitlets "
+ f"5.0 and will be ignored, pass flags the `{type(self)}` constructor.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+
+ self.clear()
+ if argv is None:
+ argv = self.argv
+ if aliases is not None:
+ self.aliases = aliases
+ if classes is not None:
+ self.classes = classes
+ self._create_parser()
+ self._argcomplete(self.classes, self.subcommands)
+ self._parse_args(argv)
+ self._convert_to_config()
+ return self.config
+
+ def get_extra_args(self) -> list[str]:
+ if hasattr(self, "extra_args"):
+ return self.extra_args
+ else:
+ return []
+
+ def _create_parser(self) -> None:
+ self.parser = self.parser_class(
+ *self.parser_args,
+ **self.parser_kw, # type:ignore[arg-type]
+ )
+ self._add_arguments(self.aliases, self.flags, self.classes)
+
+ def _add_arguments(self, aliases: t.Any, flags: t.Any, classes: t.Any) -> None:
+ raise NotImplementedError("subclasses must implement _add_arguments")
+
+ def _argcomplete(self, classes: list[t.Any], subcommands: SubcommandsDict | None) -> None:
+ """If argcomplete is enabled, allow triggering command-line autocompletion"""
+
+ def _parse_args(self, args: t.Any) -> t.Any:
+ """self.parser->self.parsed_data"""
+ uargs = [cast_unicode(a) for a in args]
+
+ unpacked_aliases: dict[str, str] = {}
+ if self.aliases:
+ unpacked_aliases = {}
+ for alias, alias_target in self.aliases.items():
+ if alias in self.flags:
+ continue
+ if not isinstance(alias, tuple): # type:ignore[unreachable]
+ alias = (alias,) # type:ignore[assignment]
+ for al in alias:
+ if len(al) == 1:
+ unpacked_aliases["-" + al] = "--" + alias_target
+ unpacked_aliases["--" + al] = "--" + alias_target
+
+ def _replace(arg: str) -> str:
+ if arg == "-":
+ return _DASH_REPLACEMENT
+ for k, v in unpacked_aliases.items():
+ if arg == k:
+ return v
+ if arg.startswith(k + "="):
+ return v + "=" + arg[len(k) + 1 :]
+ return arg
+
+ if "--" in uargs:
+ idx = uargs.index("--")
+ extra_args = uargs[idx + 1 :]
+ to_parse = uargs[:idx]
+ else:
+ extra_args = []
+ to_parse = uargs
+ to_parse = [_replace(a) for a in to_parse]
+
+ self.parsed_data = self.parser.parse_args(to_parse)
+ self.extra_args = extra_args
+
+ def _convert_to_config(self) -> None:
+ """self.parsed_data->self.config"""
+ for k, v in vars(self.parsed_data).items():
+ *path, key = k.split(".")
+ section = self.config
+ for p in path:
+ section = section[p]
+ setattr(section, key, v)
+
+
+class _FlagAction(argparse.Action):
+ """ArgParse action to handle a flag"""
+
+ def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
+ self.flag = kwargs.pop("flag")
+ self.alias = kwargs.pop("alias", None)
+ kwargs["const"] = Undefined
+ if not self.alias:
+ kwargs["nargs"] = 0
+ super().__init__(*args, **kwargs)
+
+ def __call__(
+ self, parser: t.Any, namespace: t.Any, values: t.Any, option_string: str | None = None
+ ) -> None:
+ if self.nargs == 0 or values is Undefined:
+ if not hasattr(namespace, "_flags"):
+ namespace._flags = []
+ namespace._flags.append(self.flag)
+ else:
+ setattr(namespace, self.alias, values)
+
+
+class KVArgParseConfigLoader(ArgParseConfigLoader):
+ """A config loader that loads aliases and flags with argparse,
+
+ as well as arbitrary --Class.trait value
+ """
+
+ parser_class = _KVArgParser # type:ignore[assignment]
+
+ def _add_arguments(self, aliases: t.Any, flags: t.Any, classes: t.Any) -> None:
+ alias_flags: dict[str, t.Any] = {}
+ argparse_kwds: dict[str, t.Any]
+ argparse_traits: dict[str, t.Any]
+ paa = self.parser.add_argument
+ self.parser.set_defaults(_flags=[])
+ paa("extra_args", nargs="*")
+
+ # An index of all container traits collected::
+ #
+ # { : (, ) }
+ #
+ # Used to add the correct type into the `config` tree.
+ # Used also for aliases, not to re-collect them.
+ self.argparse_traits = argparse_traits = {}
+ for cls in classes:
+ for traitname, trait in cls.class_traits(config=True).items():
+ argname = f"{cls.__name__}.{traitname}"
+ argparse_kwds = {"type": str}
+ if isinstance(trait, (Container, Dict)):
+ multiplicity = trait.metadata.get("multiplicity", "append")
+ if multiplicity == "append":
+ argparse_kwds["action"] = multiplicity
+ else:
+ argparse_kwds["nargs"] = multiplicity
+ argparse_traits[argname] = (trait, argparse_kwds)
+
+ for keys, (value, fhelp) in flags.items():
+ if not isinstance(keys, tuple):
+ keys = (keys,)
+ for key in keys:
+ if key in aliases:
+ alias_flags[aliases[key]] = value
+ continue
+ keys = ("-" + key, "--" + key) if len(key) == 1 else ("--" + key,)
+ paa(*keys, action=_FlagAction, flag=value, help=fhelp)
+
+ for keys, traitname in aliases.items():
+ if not isinstance(keys, tuple):
+ keys = (keys,)
+
+ for key in keys:
+ argparse_kwds = {
+ "type": str,
+ "dest": traitname.replace(".", _DOT_REPLACEMENT),
+ "metavar": traitname,
+ }
+ argcompleter = None
+ if traitname in argparse_traits:
+ trait, kwds = argparse_traits[traitname]
+ argparse_kwds.update(kwds)
+ if "action" in argparse_kwds and traitname in alias_flags:
+ # flag sets 'action', so can't have flag & alias with custom action
+ # on the same name
+ raise ArgumentError(
+ f"The alias `{key}` for the 'append' sequence "
+ f"config-trait `{traitname}` cannot be also a flag!'"
+ )
+ # For argcomplete, check if any either an argcompleter metadata tag or method
+ # is available. If so, it should be a callable which takes the command-line key
+ # string as an argument and other kwargs passed by argcomplete,
+ # and returns the a list of string completions.
+ argcompleter = trait.metadata.get("argcompleter") or getattr(
+ trait, "argcompleter", None
+ )
+ if traitname in alias_flags:
+ # alias and flag.
+ # when called with 0 args: flag
+ # when called with >= 1: alias
+ argparse_kwds.setdefault("nargs", "?")
+ argparse_kwds["action"] = _FlagAction
+ argparse_kwds["flag"] = alias_flags[traitname]
+ argparse_kwds["alias"] = traitname
+ keys = ("-" + key, "--" + key) if len(key) == 1 else ("--" + key,)
+ action = paa(*keys, **argparse_kwds)
+ if argcompleter is not None:
+ # argcomplete's completers are callables returning list of completion strings
+ action.completer = functools.partial( # type:ignore[attr-defined]
+ argcompleter, key=key
+ )
+
+ def _convert_to_config(self) -> None:
+ """self.parsed_data->self.config, parse unrecognized extra args via KVLoader."""
+ extra_args = self.extra_args
+
+ for lhs, rhs in vars(self.parsed_data).items():
+ if lhs == "extra_args":
+ self.extra_args = ["-" if a == _DASH_REPLACEMENT else a for a in rhs] + extra_args
+ continue
+ if lhs == "_flags":
+ # _flags will be handled later
+ continue
+
+ lhs = lhs.replace(_DOT_REPLACEMENT, ".")
+ if "." not in lhs:
+ self._handle_unrecognized_alias(lhs)
+ trait = None
+
+ if isinstance(rhs, list):
+ rhs = DeferredConfigList(rhs)
+ elif isinstance(rhs, str):
+ rhs = DeferredConfigString(rhs)
+
+ trait = self.argparse_traits.get(lhs)
+ if trait:
+ trait = trait[0]
+
+ # eval the KV assignment
+ try:
+ self._exec_config_str(lhs, rhs, trait)
+ except Exception as e:
+ # cast deferred to nicer repr for the error
+ # DeferredList->list, etc
+ if isinstance(rhs, DeferredConfig):
+ rhs = rhs._super_repr()
+ raise ArgumentError(f"Error loading argument {lhs}={rhs}, {e}") from e
+
+ for subc in self.parsed_data._flags:
+ self._load_flag(subc)
+
+ def _handle_unrecognized_alias(self, arg: str) -> None:
+ """Handling for unrecognized alias arguments
+
+ Probably a mistyped alias. By default just log a warning,
+ but users can override this to raise an error instead, e.g.
+ self.parser.error("Unrecognized alias: '%s'" % arg)
+ """
+ self.log.warning("Unrecognized alias: '%s', it will have no effect.", arg)
+
+ def _argcomplete(self, classes: list[t.Any], subcommands: SubcommandsDict | None) -> None:
+ """If argcomplete is enabled, allow triggering command-line autocompletion"""
+ try:
+ import argcomplete # noqa: F401
+ except ImportError:
+ return
+
+ from . import argcomplete_config
+
+ finder = argcomplete_config.ExtendedCompletionFinder() # type:ignore[no-untyped-call]
+ finder.config_classes = classes
+ finder.subcommands = list(subcommands or [])
+ # for ease of testing, pass through self._argcomplete_kwargs if set
+ finder(self.parser, **getattr(self, "_argcomplete_kwargs", {}))
+
+
+class KeyValueConfigLoader(KVArgParseConfigLoader):
+ """Deprecated in traitlets 5.0
+
+ Use KVArgParseConfigLoader
+ """
+
+ def __init__(self, *args: t.Any, **kwargs: t.Any) -> None:
+ warnings.warn(
+ "KeyValueConfigLoader is deprecated since Traitlets 5.0."
+ " Use KVArgParseConfigLoader instead.",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ super().__init__(*args, **kwargs)
+
+
+def load_pyconfig_files(config_files: list[str], path: str) -> Config:
+ """Load multiple Python config files, merging each of them in turn.
+
+ Parameters
+ ----------
+ config_files : list of str
+ List of config files names to load and merge into the config.
+ path : unicode
+ The full path to the location of the config files.
+ """
+ config = Config()
+ for cf in config_files:
+ loader = PyFileConfigLoader(cf, path=path)
+ try:
+ next_config = loader.load_config()
+ except ConfigFileNotFound:
+ pass
+ except Exception:
+ raise
+ else:
+ config.merge(next_config)
+ return config
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/sphinxdoc.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/sphinxdoc.py
new file mode 100644
index 0000000000000000000000000000000000000000..e4708a22706a6d7f7205849f01dd4fb0928661f3
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/traitlets/config/sphinxdoc.py
@@ -0,0 +1,164 @@
+"""Machinery for documenting traitlets config options with Sphinx.
+
+This includes:
+
+- A Sphinx extension defining directives and roles for config options.
+- A function to generate an rst file given an Application instance.
+
+To make this documentation, first set this module as an extension in Sphinx's
+conf.py::
+
+ extensions = [
+ # ...
+ 'traitlets.config.sphinxdoc',
+ ]
+
+Autogenerate the config documentation by running code like this before
+Sphinx builds::
+
+ from traitlets.config.sphinxdoc import write_doc
+ from myapp import MyApplication
+
+ writedoc('config/options.rst', # File to write
+ 'MyApp config options', # Title
+ MyApplication()
+ )
+
+The generated rST syntax looks like this::
+
+ .. configtrait:: Application.log_datefmt
+
+ Description goes here.
+
+ Cross reference like this: :configtrait:`Application.log_datefmt`.
+"""
+from __future__ import annotations
+
+import typing as t
+from collections import defaultdict
+from textwrap import dedent
+
+from traitlets import HasTraits, Undefined
+from traitlets.config.application import Application
+from traitlets.utils.text import indent
+
+
+def setup(app: t.Any) -> dict[str, t.Any]:
+ """Registers the Sphinx extension.
+
+ You shouldn't need to call this directly; configure Sphinx to use this
+ module instead.
+ """
+ app.add_object_type("configtrait", "configtrait", objname="Config option")
+ return {"parallel_read_safe": True, "parallel_write_safe": True}
+
+
+def interesting_default_value(dv: t.Any) -> bool:
+ if (dv is None) or (dv is Undefined):
+ return False
+ if isinstance(dv, (str, list, tuple, dict, set)):
+ return bool(dv)
+ return True
+
+
+def format_aliases(aliases: list[str]) -> str:
+ fmted = []
+ for a in aliases:
+ dashes = "-" if len(a) == 1 else "--"
+ fmted.append(f"``{dashes}{a}``")
+ return ", ".join(fmted)
+
+
+def class_config_rst_doc(cls: type[HasTraits], trait_aliases: dict[str, t.Any]) -> str:
+ """Generate rST documentation for this class' config options.
+
+ Excludes traits defined on parent classes.
+ """
+ lines = []
+ classname = cls.__name__
+ for _, trait in sorted(cls.class_traits(config=True).items()):
+ ttype = trait.__class__.__name__
+
+ fullname = classname + "." + (trait.name or "")
+ lines += [".. configtrait:: " + fullname, ""]
+
+ help = trait.help.rstrip() or "No description"
+ lines.append(indent(dedent(help)) + "\n")
+
+ # Choices or type
+ if "Enum" in ttype:
+ # include Enum choices
+ lines.append(indent(":options: " + ", ".join("``%r``" % x for x in trait.values))) # type:ignore[attr-defined]
+ else:
+ lines.append(indent(":trait type: " + ttype))
+
+ # Default value
+ # Ignore boring default values like None, [] or ''
+ if interesting_default_value(trait.default_value):
+ try:
+ dvr = trait.default_value_repr()
+ except Exception:
+ dvr = None # ignore defaults we can't construct
+ if dvr is not None:
+ if len(dvr) > 64:
+ dvr = dvr[:61] + "..."
+ # Double up backslashes, so they get to the rendered docs
+ dvr = dvr.replace("\\n", "\\\\n")
+ lines.append(indent(":default: ``%s``" % dvr))
+
+ # Command line aliases
+ if trait_aliases[fullname]:
+ fmt_aliases = format_aliases(trait_aliases[fullname])
+ lines.append(indent(":CLI option: " + fmt_aliases))
+
+ # Blank line
+ lines.append("")
+
+ return "\n".join(lines)
+
+
+def reverse_aliases(app: Application) -> dict[str, list[str]]:
+ """Produce a mapping of trait names to lists of command line aliases."""
+ res = defaultdict(list)
+ for alias, trait in app.aliases.items():
+ res[trait].append(alias)
+
+ # Flags also often act as aliases for a boolean trait.
+ # Treat flags which set one trait to True as aliases.
+ for flag, (cfg, _) in app.flags.items():
+ if len(cfg) == 1:
+ classname = next(iter(cfg))
+ cls_cfg = cfg[classname]
+ if len(cls_cfg) == 1:
+ traitname = next(iter(cls_cfg))
+ if cls_cfg[traitname] is True:
+ res[classname + "." + traitname].append(flag)
+
+ return res
+
+
+def write_doc(path: str, title: str, app: Application, preamble: str | None = None) -> None:
+ """Write a rst file documenting config options for a traitlets application.
+
+ Parameters
+ ----------
+ path : str
+ The file to be written
+ title : str
+ The human-readable title of the document
+ app : traitlets.config.Application
+ An instance of the application class to be documented
+ preamble : str
+ Extra text to add just after the title (optional)
+ """
+ trait_aliases = reverse_aliases(app)
+ with open(path, "w") as f:
+ f.write(title + "\n")
+ f.write(("=" * len(title)) + "\n")
+ f.write("\n")
+ if preamble is not None:
+ f.write(preamble + "\n\n")
+
+ for c in app._classes_inc_parents():
+ f.write(class_config_rst_doc(c, trait_aliases))
+ f.write("\n")
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/py.typed b/evalkit_tf437/lib/python3.10/site-packages/traitlets/py.typed
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/tests/__init__.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/tests/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/tests/__pycache__/test_traitlets.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/traitlets/tests/__pycache__/test_traitlets.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..f6ae47f75fc4f713617223151f0ebf95e54044e6
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/traitlets/tests/__pycache__/test_traitlets.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/tests/test_traitlets.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/tests/test_traitlets.py
new file mode 100644
index 0000000000000000000000000000000000000000..8380059fdd28445008887f3af0c2d33aadfa561c
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/traitlets/tests/test_traitlets.py
@@ -0,0 +1,59 @@
+from __future__ import annotations
+
+from typing import Any
+from unittest import TestCase
+
+from traitlets import TraitError
+
+
+class TraitTestBase(TestCase):
+ """A best testing class for basic trait types."""
+
+ def assign(self, value: Any) -> None:
+ self.obj.value = value # type:ignore[attr-defined]
+
+ def coerce(self, value: Any) -> Any:
+ return value
+
+ def test_good_values(self) -> None:
+ if hasattr(self, "_good_values"):
+ for value in self._good_values:
+ self.assign(value)
+ self.assertEqual(self.obj.value, self.coerce(value)) # type:ignore[attr-defined]
+
+ def test_bad_values(self) -> None:
+ if hasattr(self, "_bad_values"):
+ for value in self._bad_values:
+ try:
+ self.assertRaises(TraitError, self.assign, value)
+ except AssertionError:
+ raise AssertionError(value) from None
+
+ def test_default_value(self) -> None:
+ if hasattr(self, "_default_value"):
+ self.assertEqual(self._default_value, self.obj.value) # type:ignore[attr-defined]
+
+ def test_allow_none(self) -> None:
+ if (
+ hasattr(self, "_bad_values")
+ and hasattr(self, "_good_values")
+ and None in self._bad_values
+ ):
+ trait = self.obj.traits()["value"] # type:ignore[attr-defined]
+ try:
+ trait.allow_none = True
+ self._bad_values.remove(None)
+ # skip coerce. Allow None casts None to None.
+ self.assign(None)
+ self.assertEqual(self.obj.value, None) # type:ignore[attr-defined]
+ self.test_good_values()
+ self.test_bad_values()
+ finally:
+ # tear down
+ trait.allow_none = False
+ self._bad_values.append(None)
+
+ def tearDown(self) -> None:
+ # restore default value after tests, if set
+ if hasattr(self, "_default_value"):
+ self.obj.value = self._default_value # type:ignore[attr-defined]
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/decorators.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/decorators.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..976f964891b4435a7c9bff541dc206b34f1b0fe6
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/decorators.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/descriptions.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/descriptions.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..dec1865722f6f7db6971831922458c0227959403
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/descriptions.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/importstring.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/importstring.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..c00b53cf2721c7f3634bde380dc8eace47f8bc96
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/importstring.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/nested_update.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/nested_update.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..b539682d66325fb201d8321cc15819273036b5c0
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/nested_update.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/sentinel.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/sentinel.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..6dc099298c7f7a556a121aa652f56d58eb124a5c
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/__pycache__/sentinel.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/bunch.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/bunch.py
new file mode 100644
index 0000000000000000000000000000000000000000..498563e0b5b43a5f5f291c185db4cd0dc48c991a
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/bunch.py
@@ -0,0 +1,29 @@
+"""Yet another implementation of bunch
+
+attribute-access of items on a dict.
+"""
+
+# Copyright (c) Jupyter Development Team.
+# Distributed under the terms of the Modified BSD License.
+from __future__ import annotations
+
+from typing import Any
+
+
+class Bunch(dict): # type:ignore[type-arg]
+ """A dict with attribute-access"""
+
+ def __getattr__(self, key: str) -> Any:
+ try:
+ return self.__getitem__(key)
+ except KeyError as e:
+ raise AttributeError(key) from e
+
+ def __setattr__(self, key: str, value: Any) -> None:
+ self.__setitem__(key, value)
+
+ def __dir__(self) -> list[str]:
+ # py2-compat: can't use super because dict doesn't have __dir__
+ names = dir({})
+ names.extend(self.keys())
+ return names
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/decorators.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/decorators.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b77d701deec4ae630c8f8e5c55814a373662ad1
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/decorators.py
@@ -0,0 +1,86 @@
+"""Useful decorators for Traitlets users."""
+from __future__ import annotations
+
+import copy
+from inspect import Parameter, Signature, signature
+from typing import Any, Type, TypeVar
+
+from ..traitlets import HasTraits, Undefined
+
+
+def _get_default(value: Any) -> Any:
+ """Get default argument value, given the trait default value."""
+ return Parameter.empty if value == Undefined else value
+
+
+T = TypeVar("T", bound=HasTraits)
+
+
+def signature_has_traits(cls: Type[T]) -> Type[T]:
+ """Return a decorated class with a constructor signature that contain Trait names as kwargs."""
+ traits = [
+ (name, _get_default(value.default_value))
+ for name, value in cls.class_traits().items()
+ if not name.startswith("_")
+ ]
+
+ # Taking the __init__ signature, as the cls signature is not initialized yet
+ old_signature = signature(cls.__init__)
+ old_parameter_names = list(old_signature.parameters)
+
+ old_positional_parameters = []
+ old_var_positional_parameter = None # This won't be None if the old signature contains *args
+ old_keyword_only_parameters = []
+ old_var_keyword_parameter = None # This won't be None if the old signature contains **kwargs
+
+ for parameter_name in old_signature.parameters:
+ # Copy the parameter
+ parameter = copy.copy(old_signature.parameters[parameter_name])
+
+ if (
+ parameter.kind is Parameter.POSITIONAL_ONLY
+ or parameter.kind is Parameter.POSITIONAL_OR_KEYWORD
+ ):
+ old_positional_parameters.append(parameter)
+
+ elif parameter.kind is Parameter.VAR_POSITIONAL:
+ old_var_positional_parameter = parameter
+
+ elif parameter.kind is Parameter.KEYWORD_ONLY:
+ old_keyword_only_parameters.append(parameter)
+
+ elif parameter.kind is Parameter.VAR_KEYWORD:
+ old_var_keyword_parameter = parameter
+
+ # Unfortunately, if the old signature does not contain **kwargs, we can't do anything,
+ # because it can't accept traits as keyword arguments
+ if old_var_keyword_parameter is None:
+ raise RuntimeError(
+ f"The {cls} constructor does not take **kwargs, which means that the signature can not be expanded with trait names"
+ )
+
+ new_parameters = []
+
+ # Append the old positional parameters (except `self` which is the first parameter)
+ new_parameters += old_positional_parameters[1:]
+
+ # Append *args if the old signature had it
+ if old_var_positional_parameter is not None:
+ new_parameters.append(old_var_positional_parameter)
+
+ # Append the old keyword only parameters
+ new_parameters += old_keyword_only_parameters
+
+ # Append trait names as keyword only parameters in the signature
+ new_parameters += [
+ Parameter(name, kind=Parameter.KEYWORD_ONLY, default=default)
+ for name, default in traits
+ if name not in old_parameter_names
+ ]
+
+ # Append **kwargs
+ new_parameters.append(old_var_keyword_parameter)
+
+ cls.__signature__ = Signature(new_parameters) # type:ignore[attr-defined]
+
+ return cls
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/sentinel.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/sentinel.py
new file mode 100644
index 0000000000000000000000000000000000000000..079443d83f5a1c2adaf99d2b6745e2f10f49b019
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/sentinel.py
@@ -0,0 +1,24 @@
+"""Sentinel class for constants with useful reprs"""
+
+# Copyright (c) IPython Development Team.
+# Distributed under the terms of the Modified BSD License.
+from __future__ import annotations
+
+import typing as t
+
+
+class Sentinel:
+ def __init__(self, name: str, module: t.Any, docstring: str | None = None) -> None:
+ self.name = name
+ self.module = module
+ if docstring:
+ self.__doc__ = docstring
+
+ def __repr__(self) -> str:
+ return str(self.module) + "." + self.name
+
+ def __copy__(self) -> Sentinel:
+ return self
+
+ def __deepcopy__(self, memo: t.Any) -> Sentinel:
+ return self
diff --git a/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/text.py b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/text.py
new file mode 100644
index 0000000000000000000000000000000000000000..1c1ac2081506ccb6fe2c69ade3298d262e0ee365
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/traitlets/utils/text.py
@@ -0,0 +1,40 @@
+"""
+Utilities imported from ipython_genutils
+"""
+from __future__ import annotations
+
+import re
+import textwrap
+from textwrap import dedent
+from textwrap import indent as _indent
+from typing import List
+
+
+def indent(val: str) -> str:
+ return _indent(val, " ")
+
+
+def wrap_paragraphs(text: str, ncols: int = 80) -> List[str]:
+ """Wrap multiple paragraphs to fit a specified width.
+
+ This is equivalent to textwrap.wrap, but with support for multiple
+ paragraphs, as separated by empty lines.
+
+ Returns
+ -------
+
+ list of complete paragraphs, wrapped to fill `ncols` columns.
+ """
+ paragraph_re = re.compile(r"\n(\s*\n)+", re.MULTILINE)
+ text = dedent(text).strip()
+ paragraphs = paragraph_re.split(text)[::2] # every other entry is space
+ out_ps = []
+ indent_re = re.compile(r"\n\s+", re.MULTILINE)
+ for p in paragraphs:
+ # presume indentation that survives dedent is meaningful formatting,
+ # so don't fill unless text is flush.
+ if indent_re.search(p) is None:
+ # wrap paragraph
+ p = textwrap.fill(p, ncols)
+ out_ps.append(p)
+ return out_ps
diff --git a/evalkit_tf437/lib/python3.10/site-packages/wcwidth/__pycache__/wcwidth.cpython-310.pyc b/evalkit_tf437/lib/python3.10/site-packages/wcwidth/__pycache__/wcwidth.cpython-310.pyc
new file mode 100644
index 0000000000000000000000000000000000000000..374417e8f9f48c710e2d1657d3f7735a7c2bcce0
Binary files /dev/null and b/evalkit_tf437/lib/python3.10/site-packages/wcwidth/__pycache__/wcwidth.cpython-310.pyc differ
diff --git a/evalkit_tf437/lib/python3.10/site-packages/wcwidth/wcwidth.py b/evalkit_tf437/lib/python3.10/site-packages/wcwidth/wcwidth.py
new file mode 100644
index 0000000000000000000000000000000000000000..e924020630c96f9df09629090dd898de7b2dc643
--- /dev/null
+++ b/evalkit_tf437/lib/python3.10/site-packages/wcwidth/wcwidth.py
@@ -0,0 +1,345 @@
+"""
+This is a python implementation of wcwidth() and wcswidth().
+
+https://github.com/jquast/wcwidth
+
+from Markus Kuhn's C code, retrieved from:
+
+ http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
+
+This is an implementation of wcwidth() and wcswidth() (defined in
+IEEE Std 1002.1-2001) for Unicode.
+
+http://www.opengroup.org/onlinepubs/007904975/functions/wcwidth.html
+http://www.opengroup.org/onlinepubs/007904975/functions/wcswidth.html
+
+In fixed-width output devices, Latin characters all occupy a single
+"cell" position of equal width, whereas ideographic CJK characters
+occupy two such cells. Interoperability between terminal-line
+applications and (teletype-style) character terminals using the
+UTF-8 encoding requires agreement on which character should advance
+the cursor by how many cell positions. No established formal
+standards exist at present on which Unicode character shall occupy
+how many cell positions on character terminals. These routines are
+a first attempt of defining such behavior based on simple rules
+applied to data provided by the Unicode Consortium.
+
+For some graphical characters, the Unicode standard explicitly
+defines a character-cell width via the definition of the East Asian
+FullWidth (F), Wide (W), Half-width (H), and Narrow (Na) classes.
+In all these cases, there is no ambiguity about which width a
+terminal shall use. For characters in the East Asian Ambiguous (A)
+class, the width choice depends purely on a preference of backward
+compatibility with either historic CJK or Western practice.
+Choosing single-width for these characters is easy to justify as
+the appropriate long-term solution, as the CJK practice of
+displaying these characters as double-width comes from historic
+implementation simplicity (8-bit encoded characters were displayed
+single-width and 16-bit ones double-width, even for Greek,
+Cyrillic, etc.) and not any typographic considerations.
+
+Much less clear is the choice of width for the Not East Asian
+(Neutral) class. Existing practice does not dictate a width for any
+of these characters. It would nevertheless make sense
+typographically to allocate two character cells to characters such
+as for instance EM SPACE or VOLUME INTEGRAL, which cannot be
+represented adequately with a single-width glyph. The following
+routines at present merely assign a single-cell width to all
+neutral characters, in the interest of simplicity. This is not
+entirely satisfactory and should be reconsidered before
+establishing a formal standard in this area. At the moment, the
+decision which Not East Asian (Neutral) characters should be
+represented by double-width glyphs cannot yet be answered by
+applying a simple rule from the Unicode database content. Setting
+up a proper standard for the behavior of UTF-8 character terminals
+will require a careful analysis not only of each Unicode character,
+but also of each presentation form, something the author of these
+routines has avoided to do so far.
+
+http://www.unicode.org/unicode/reports/tr11/
+
+Latest version: http://www.cl.cam.ac.uk/~mgk25/ucs/wcwidth.c
+"""
+from __future__ import division
+
+# std imports
+import os
+import sys
+import warnings
+
+# local
+from .table_vs16 import VS16_NARROW_TO_WIDE
+from .table_wide import WIDE_EASTASIAN
+from .table_zero import ZERO_WIDTH
+from .unicode_versions import list_versions
+
+try:
+ # std imports
+ from functools import lru_cache
+except ImportError:
+ # lru_cache was added in Python 3.2
+ # 3rd party
+ from backports.functools_lru_cache import lru_cache
+
+# global cache
+_PY3 = sys.version_info[0] >= 3
+
+
+def _bisearch(ucs, table):
+ """
+ Auxiliary function for binary search in interval table.
+
+ :arg int ucs: Ordinal value of unicode character.
+ :arg list table: List of starting and ending ranges of ordinal values,
+ in form of ``[(start, end), ...]``.
+ :rtype: int
+ :returns: 1 if ordinal value ucs is found within lookup table, else 0.
+ """
+ lbound = 0
+ ubound = len(table) - 1
+
+ if ucs < table[0][0] or ucs > table[ubound][1]:
+ return 0
+ while ubound >= lbound:
+ mid = (lbound + ubound) // 2
+ if ucs > table[mid][1]:
+ lbound = mid + 1
+ elif ucs < table[mid][0]:
+ ubound = mid - 1
+ else:
+ return 1
+
+ return 0
+
+
+@lru_cache(maxsize=1000)
+def wcwidth(wc, unicode_version='auto'):
+ r"""
+ Given one Unicode character, return its printable length on a terminal.
+
+ :param str wc: A single Unicode character.
+ :param str unicode_version: A Unicode version number, such as
+ ``'6.0.0'``. A list of version levels suported by wcwidth
+ is returned by :func:`list_versions`.
+
+ Any version string may be specified without error -- the nearest
+ matching version is selected. When ``latest`` (default), the
+ highest Unicode version level is used.
+ :return: The width, in cells, necessary to display the character of
+ Unicode string character, ``wc``. Returns 0 if the ``wc`` argument has
+ no printable effect on a terminal (such as NUL '\0'), -1 if ``wc`` is
+ not printable, or has an indeterminate effect on the terminal, such as
+ a control character. Otherwise, the number of column positions the
+ character occupies on a graphic terminal (1 or 2) is returned.
+ :rtype: int
+
+ See :ref:`Specification` for details of cell measurement.
+ """
+ ucs = ord(wc) if wc else 0
+
+ # small optimization: early return of 1 for printable ASCII, this provides
+ # approximately 40% performance improvement for mostly-ascii documents, with
+ # less than 1% impact to others.
+ if 32 <= ucs < 0x7f:
+ return 1
+
+ # C0/C1 control characters are -1 for compatibility with POSIX-like calls
+ if ucs and ucs < 32 or 0x07F <= ucs < 0x0A0:
+ return -1
+
+ _unicode_version = _wcmatch_version(unicode_version)
+
+ # Zero width
+ if _bisearch(ucs, ZERO_WIDTH[_unicode_version]):
+ return 0
+
+ # 1 or 2 width
+ return 1 + _bisearch(ucs, WIDE_EASTASIAN[_unicode_version])
+
+
+def wcswidth(pwcs, n=None, unicode_version='auto'):
+ """
+ Given a unicode string, return its printable length on a terminal.
+
+ :param str pwcs: Measure width of given unicode string.
+ :param int n: When ``n`` is None (default), return the length of the entire
+ string, otherwise only the first ``n`` characters are measured. This
+ argument exists only for compatibility with the C POSIX function
+ signature. It is suggested instead to use python's string slicing
+ capability, ``wcswidth(pwcs[:n])``
+ :param str unicode_version: An explicit definition of the unicode version
+ level to use for determination, may be ``auto`` (default), which uses
+ the Environment Variable, ``UNICODE_VERSION`` if defined, or the latest
+ available unicode version, otherwise.
+ :rtype: int
+ :returns: The width, in cells, needed to display the first ``n`` characters
+ of the unicode string ``pwcs``. Returns ``-1`` for C0 and C1 control
+ characters!
+
+ See :ref:`Specification` for details of cell measurement.
+ """
+ # this 'n' argument is a holdover for POSIX function
+ _unicode_version = None
+ end = len(pwcs) if n is None else n
+ width = 0
+ idx = 0
+ last_measured_char = None
+ while idx < end:
+ char = pwcs[idx]
+ if char == u'\u200D':
+ # Zero Width Joiner, do not measure this or next character
+ idx += 2
+ continue
+ if char == u'\uFE0F' and last_measured_char:
+ # on variation selector 16 (VS16) following another character,
+ # conditionally add '1' to the measured width if that character is
+ # known to be converted from narrow to wide by the VS16 character.
+ if _unicode_version is None:
+ _unicode_version = _wcversion_value(_wcmatch_version(unicode_version))
+ if _unicode_version >= (9, 0, 0):
+ width += _bisearch(ord(last_measured_char), VS16_NARROW_TO_WIDE["9.0.0"])
+ last_measured_char = None
+ idx += 1
+ continue
+ # measure character at current index
+ wcw = wcwidth(char, unicode_version)
+ if wcw < 0:
+ # early return -1 on C0 and C1 control characters
+ return wcw
+ if wcw > 0:
+ # track last character measured to contain a cell, so that
+ # subsequent VS-16 modifiers may be understood
+ last_measured_char = char
+ width += wcw
+ idx += 1
+ return width
+
+
+@lru_cache(maxsize=128)
+def _wcversion_value(ver_string):
+ """
+ Integer-mapped value of given dotted version string.
+
+ :param str ver_string: Unicode version string, of form ``n.n.n``.
+ :rtype: tuple(int)
+ :returns: tuple of digit tuples, ``tuple(int, [...])``.
+ """
+ retval = tuple(map(int, (ver_string.split('.'))))
+ return retval
+
+
+@lru_cache(maxsize=8)
+def _wcmatch_version(given_version):
+ """
+ Return nearest matching supported Unicode version level.
+
+ If an exact match is not determined, the nearest lowest version level is
+ returned after a warning is emitted. For example, given supported levels
+ ``4.1.0`` and ``5.0.0``, and a version string of ``4.9.9``, then ``4.1.0``
+ is selected and returned:
+
+ >>> _wcmatch_version('4.9.9')
+ '4.1.0'
+ >>> _wcmatch_version('8.0')
+ '8.0.0'
+ >>> _wcmatch_version('1')
+ '4.1.0'
+
+ :param str given_version: given version for compare, may be ``auto``
+ (default), to select Unicode Version from Environment Variable,
+ ``UNICODE_VERSION``. If the environment variable is not set, then the
+ latest is used.
+ :rtype: str
+ :returns: unicode string, or non-unicode ``str`` type for python 2
+ when given ``version`` is also type ``str``.
+ """
+ # Design note: the choice to return the same type that is given certainly
+ # complicates it for python 2 str-type, but allows us to define an api that
+ # uses 'string-type' for unicode version level definitions, so all of our
+ # example code works with all versions of python.
+ #
+ # That, along with the string-to-numeric and comparisons of earliest,
+ # latest, matching, or nearest, greatly complicates this function.
+ # Performance is somewhat curbed by memoization.
+ _return_str = not _PY3 and isinstance(given_version, str)
+
+ if _return_str:
+ # avoid list-comprehension to work around a coverage issue:
+ # https://github.com/nedbat/coveragepy/issues/753
+ unicode_versions = list(map(lambda ucs: ucs.encode(), list_versions()))
+ else:
+ unicode_versions = list_versions()
+ latest_version = unicode_versions[-1]
+
+ if given_version in (u'auto', 'auto'):
+ given_version = os.environ.get(
+ 'UNICODE_VERSION',
+ 'latest' if not _return_str else latest_version.encode())
+
+ if given_version in (u'latest', 'latest'):
+ # default match, when given as 'latest', use the most latest unicode
+ # version specification level supported.
+ return latest_version if not _return_str else latest_version.encode()
+
+ if given_version in unicode_versions:
+ # exact match, downstream has specified an explicit matching version
+ # matching any value of list_versions().
+ return given_version if not _return_str else given_version.encode()
+
+ # The user's version is not supported by ours. We return the newest unicode
+ # version level that we support below their given value.
+ try:
+ cmp_given = _wcversion_value(given_version)
+
+ except ValueError:
+ # submitted value raises ValueError in int(), warn and use latest.
+ warnings.warn("UNICODE_VERSION value, {given_version!r}, is invalid. "
+ "Value should be in form of `integer[.]+', the latest "
+ "supported unicode version {latest_version!r} has been "
+ "inferred.".format(given_version=given_version,
+ latest_version=latest_version))
+ return latest_version if not _return_str else latest_version.encode()
+
+ # given version is less than any available version, return earliest
+ # version.
+ earliest_version = unicode_versions[0]
+ cmp_earliest_version = _wcversion_value(earliest_version)
+
+ if cmp_given <= cmp_earliest_version:
+ # this probably isn't what you wanted, the oldest wcwidth.c you will
+ # find in the wild is likely version 5 or 6, which we both support,
+ # but it's better than not saying anything at all.
+ warnings.warn("UNICODE_VERSION value, {given_version!r}, is lower "
+ "than any available unicode version. Returning lowest "
+ "version level, {earliest_version!r}".format(
+ given_version=given_version,
+ earliest_version=earliest_version))
+ return earliest_version if not _return_str else earliest_version.encode()
+
+ # create list of versions which are less than our equal to given version,
+ # and return the tail value, which is the highest level we may support,
+ # or the latest value we support, when completely unmatched or higher
+ # than any supported version.
+ #
+ # function will never complete, always returns.
+ for idx, unicode_version in enumerate(unicode_versions):
+ # look ahead to next value
+ try:
+ cmp_next_version = _wcversion_value(unicode_versions[idx + 1])
+ except IndexError:
+ # at end of list, return latest version
+ return latest_version if not _return_str else latest_version.encode()
+
+ # Maybe our given version has less parts, as in tuple(8, 0), than the
+ # next compare version tuple(8, 0, 0). Test for an exact match by
+ # comparison of only the leading dotted piece(s): (8, 0) == (8, 0).
+ if cmp_given == cmp_next_version[:len(cmp_given)]:
+ return unicode_versions[idx + 1]
+
+ # Or, if any next value is greater than our given support level
+ # version, return the current value in index. Even though it must
+ # be less than the given value, its our closest possible match. That
+ # is, 4.1 is returned for given 4.9.9, where 4.1 and 5.0 are available.
+ if cmp_next_version > cmp_given:
+ return unicode_version
+ assert False, ("Code path unreachable", given_version, unicode_versions) # pragma: no cover
|