")
+ print()
+ print(sys.argv)
+ print()
+
+def print_environ_usage():
+ """Dump a list of environment variables used by CGI as HTML."""
+ print("""
+
These environment variables could have been set:
+
+
AUTH_TYPE
+
CONTENT_LENGTH
+
CONTENT_TYPE
+
DATE_GMT
+
DATE_LOCAL
+
DOCUMENT_NAME
+
DOCUMENT_ROOT
+
DOCUMENT_URI
+
GATEWAY_INTERFACE
+
LAST_MODIFIED
+
PATH
+
PATH_INFO
+
PATH_TRANSLATED
+
QUERY_STRING
+
REMOTE_ADDR
+
REMOTE_HOST
+
REMOTE_IDENT
+
REMOTE_USER
+
REQUEST_METHOD
+
SCRIPT_NAME
+
SERVER_NAME
+
SERVER_PORT
+
SERVER_PROTOCOL
+
SERVER_ROOT
+
SERVER_SOFTWARE
+
+In addition, HTTP headers sent by the server may be passed in the
+environment as well. Here are some common variable names:
+
+
HTTP_ACCEPT
+
HTTP_CONNECTION
+
HTTP_HOST
+
HTTP_PRAGMA
+
HTTP_REFERER
+
HTTP_USER_AGENT
+
+""")
+
+
+# Utilities
+# =========
+
+def valid_boundary(s):
+ import re
+ if isinstance(s, bytes):
+ _vb_pattern = b"^[ -~]{0,200}[!-~]$"
+ else:
+ _vb_pattern = "^[ -~]{0,200}[!-~]$"
+ return re.match(_vb_pattern, s)
+
+# Invoke mainline
+# ===============
+
+# Call test() when this file is run as a script (not imported as a module)
+if __name__ == '__main__':
+ test()
diff --git a/llava/lib/python3.10/cmd.py b/llava/lib/python3.10/cmd.py
new file mode 100644
index 0000000000000000000000000000000000000000..859e91096d8f57d906c00023ef1a1c0e663178d6
--- /dev/null
+++ b/llava/lib/python3.10/cmd.py
@@ -0,0 +1,401 @@
+"""A generic class to build line-oriented command interpreters.
+
+Interpreters constructed with this class obey the following conventions:
+
+1. End of file on input is processed as the command 'EOF'.
+2. A command is parsed out of each line by collecting the prefix composed
+ of characters in the identchars member.
+3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method
+ is passed a single argument consisting of the remainder of the line.
+4. Typing an empty line repeats the last command. (Actually, it calls the
+ method `emptyline', which may be overridden in a subclass.)
+5. There is a predefined `help' method. Given an argument `topic', it
+ calls the command `help_topic'. With no arguments, it lists all topics
+ with defined help_ functions, broken into up to three topics; documented
+ commands, miscellaneous help topics, and undocumented commands.
+6. The command '?' is a synonym for `help'. The command '!' is a synonym
+ for `shell', if a do_shell method exists.
+7. If completion is enabled, completing commands will be done automatically,
+ and completing of commands args is done by calling complete_foo() with
+ arguments text, line, begidx, endidx. text is string we are matching
+ against, all returned matches must begin with it. line is the current
+ input line (lstripped), begidx and endidx are the beginning and end
+ indexes of the text being matched, which could be used to provide
+ different completion depending upon which position the argument is in.
+
+The `default' method may be overridden to intercept commands for which there
+is no do_ method.
+
+The `completedefault' method may be overridden to intercept completions for
+commands that have no complete_ method.
+
+The data member `self.ruler' sets the character used to draw separator lines
+in the help messages. If empty, no ruler line is drawn. It defaults to "=".
+
+If the value of `self.intro' is nonempty when the cmdloop method is called,
+it is printed out on interpreter startup. This value may be overridden
+via an optional argument to the cmdloop() method.
+
+The data members `self.doc_header', `self.misc_header', and
+`self.undoc_header' set the headers used for the help function's
+listings of documented functions, miscellaneous topics, and undocumented
+functions respectively.
+"""
+
+import string, sys
+
+__all__ = ["Cmd"]
+
+PROMPT = '(Cmd) '
+IDENTCHARS = string.ascii_letters + string.digits + '_'
+
+class Cmd:
+ """A simple framework for writing line-oriented command interpreters.
+
+ These are often useful for test harnesses, administrative tools, and
+ prototypes that will later be wrapped in a more sophisticated interface.
+
+ A Cmd instance or subclass instance is a line-oriented interpreter
+ framework. There is no good reason to instantiate Cmd itself; rather,
+ it's useful as a superclass of an interpreter class you define yourself
+ in order to inherit Cmd's methods and encapsulate action methods.
+
+ """
+ prompt = PROMPT
+ identchars = IDENTCHARS
+ ruler = '='
+ lastcmd = ''
+ intro = None
+ doc_leader = ""
+ doc_header = "Documented commands (type help ):"
+ misc_header = "Miscellaneous help topics:"
+ undoc_header = "Undocumented commands:"
+ nohelp = "*** No help on %s"
+ use_rawinput = 1
+
+ def __init__(self, completekey='tab', stdin=None, stdout=None):
+ """Instantiate a line-oriented interpreter framework.
+
+ The optional argument 'completekey' is the readline name of a
+ completion key; it defaults to the Tab key. If completekey is
+ not None and the readline module is available, command completion
+ is done automatically. The optional arguments stdin and stdout
+ specify alternate input and output file objects; if not specified,
+ sys.stdin and sys.stdout are used.
+
+ """
+ if stdin is not None:
+ self.stdin = stdin
+ else:
+ self.stdin = sys.stdin
+ if stdout is not None:
+ self.stdout = stdout
+ else:
+ self.stdout = sys.stdout
+ self.cmdqueue = []
+ self.completekey = completekey
+
+ def cmdloop(self, intro=None):
+ """Repeatedly issue a prompt, accept input, parse an initial prefix
+ off the received input, and dispatch to action methods, passing them
+ the remainder of the line as argument.
+
+ """
+
+ self.preloop()
+ if self.use_rawinput and self.completekey:
+ try:
+ import readline
+ self.old_completer = readline.get_completer()
+ readline.set_completer(self.complete)
+ readline.parse_and_bind(self.completekey+": complete")
+ except ImportError:
+ pass
+ try:
+ if intro is not None:
+ self.intro = intro
+ if self.intro:
+ self.stdout.write(str(self.intro)+"\n")
+ stop = None
+ while not stop:
+ if self.cmdqueue:
+ line = self.cmdqueue.pop(0)
+ else:
+ if self.use_rawinput:
+ try:
+ line = input(self.prompt)
+ except EOFError:
+ line = 'EOF'
+ else:
+ self.stdout.write(self.prompt)
+ self.stdout.flush()
+ line = self.stdin.readline()
+ if not len(line):
+ line = 'EOF'
+ else:
+ line = line.rstrip('\r\n')
+ line = self.precmd(line)
+ stop = self.onecmd(line)
+ stop = self.postcmd(stop, line)
+ self.postloop()
+ finally:
+ if self.use_rawinput and self.completekey:
+ try:
+ import readline
+ readline.set_completer(self.old_completer)
+ except ImportError:
+ pass
+
+
+ def precmd(self, line):
+ """Hook method executed just before the command line is
+ interpreted, but after the input prompt is generated and issued.
+
+ """
+ return line
+
+ def postcmd(self, stop, line):
+ """Hook method executed just after a command dispatch is finished."""
+ return stop
+
+ def preloop(self):
+ """Hook method executed once when the cmdloop() method is called."""
+ pass
+
+ def postloop(self):
+ """Hook method executed once when the cmdloop() method is about to
+ return.
+
+ """
+ pass
+
+ def parseline(self, line):
+ """Parse the line into a command name and a string containing
+ the arguments. Returns a tuple containing (command, args, line).
+ 'command' and 'args' may be None if the line couldn't be parsed.
+ """
+ line = line.strip()
+ if not line:
+ return None, None, line
+ elif line[0] == '?':
+ line = 'help ' + line[1:]
+ elif line[0] == '!':
+ if hasattr(self, 'do_shell'):
+ line = 'shell ' + line[1:]
+ else:
+ return None, None, line
+ i, n = 0, len(line)
+ while i < n and line[i] in self.identchars: i = i+1
+ cmd, arg = line[:i], line[i:].strip()
+ return cmd, arg, line
+
+ def onecmd(self, line):
+ """Interpret the argument as though it had been typed in response
+ to the prompt.
+
+ This may be overridden, but should not normally need to be;
+ see the precmd() and postcmd() methods for useful execution hooks.
+ The return value is a flag indicating whether interpretation of
+ commands by the interpreter should stop.
+
+ """
+ cmd, arg, line = self.parseline(line)
+ if not line:
+ return self.emptyline()
+ if cmd is None:
+ return self.default(line)
+ self.lastcmd = line
+ if line == 'EOF' :
+ self.lastcmd = ''
+ if cmd == '':
+ return self.default(line)
+ else:
+ try:
+ func = getattr(self, 'do_' + cmd)
+ except AttributeError:
+ return self.default(line)
+ return func(arg)
+
+ def emptyline(self):
+ """Called when an empty line is entered in response to the prompt.
+
+ If this method is not overridden, it repeats the last nonempty
+ command entered.
+
+ """
+ if self.lastcmd:
+ return self.onecmd(self.lastcmd)
+
+ def default(self, line):
+ """Called on an input line when the command prefix is not recognized.
+
+ If this method is not overridden, it prints an error message and
+ returns.
+
+ """
+ self.stdout.write('*** Unknown syntax: %s\n'%line)
+
+ def completedefault(self, *ignored):
+ """Method called to complete an input line when no command-specific
+ complete_*() method is available.
+
+ By default, it returns an empty list.
+
+ """
+ return []
+
+ def completenames(self, text, *ignored):
+ dotext = 'do_'+text
+ return [a[3:] for a in self.get_names() if a.startswith(dotext)]
+
+ def complete(self, text, state):
+ """Return the next possible completion for 'text'.
+
+ If a command has not been entered, then complete against command list.
+ Otherwise try to call complete_ to get list of completions.
+ """
+ if state == 0:
+ import readline
+ origline = readline.get_line_buffer()
+ line = origline.lstrip()
+ stripped = len(origline) - len(line)
+ begidx = readline.get_begidx() - stripped
+ endidx = readline.get_endidx() - stripped
+ if begidx>0:
+ cmd, args, foo = self.parseline(line)
+ if cmd == '':
+ compfunc = self.completedefault
+ else:
+ try:
+ compfunc = getattr(self, 'complete_' + cmd)
+ except AttributeError:
+ compfunc = self.completedefault
+ else:
+ compfunc = self.completenames
+ self.completion_matches = compfunc(text, line, begidx, endidx)
+ try:
+ return self.completion_matches[state]
+ except IndexError:
+ return None
+
+ def get_names(self):
+ # This method used to pull in base class attributes
+ # at a time dir() didn't do it yet.
+ return dir(self.__class__)
+
+ def complete_help(self, *args):
+ commands = set(self.completenames(*args))
+ topics = set(a[5:] for a in self.get_names()
+ if a.startswith('help_' + args[0]))
+ return list(commands | topics)
+
+ def do_help(self, arg):
+ 'List available commands with "help" or detailed help with "help cmd".'
+ if arg:
+ # XXX check arg syntax
+ try:
+ func = getattr(self, 'help_' + arg)
+ except AttributeError:
+ try:
+ doc=getattr(self, 'do_' + arg).__doc__
+ if doc:
+ self.stdout.write("%s\n"%str(doc))
+ return
+ except AttributeError:
+ pass
+ self.stdout.write("%s\n"%str(self.nohelp % (arg,)))
+ return
+ func()
+ else:
+ names = self.get_names()
+ cmds_doc = []
+ cmds_undoc = []
+ help = {}
+ for name in names:
+ if name[:5] == 'help_':
+ help[name[5:]]=1
+ names.sort()
+ # There can be duplicates if routines overridden
+ prevname = ''
+ for name in names:
+ if name[:3] == 'do_':
+ if name == prevname:
+ continue
+ prevname = name
+ cmd=name[3:]
+ if cmd in help:
+ cmds_doc.append(cmd)
+ del help[cmd]
+ elif getattr(self, name).__doc__:
+ cmds_doc.append(cmd)
+ else:
+ cmds_undoc.append(cmd)
+ self.stdout.write("%s\n"%str(self.doc_leader))
+ self.print_topics(self.doc_header, cmds_doc, 15,80)
+ self.print_topics(self.misc_header, list(help.keys()),15,80)
+ self.print_topics(self.undoc_header, cmds_undoc, 15,80)
+
+ def print_topics(self, header, cmds, cmdlen, maxcol):
+ if cmds:
+ self.stdout.write("%s\n"%str(header))
+ if self.ruler:
+ self.stdout.write("%s\n"%str(self.ruler * len(header)))
+ self.columnize(cmds, maxcol-1)
+ self.stdout.write("\n")
+
+ def columnize(self, list, displaywidth=80):
+ """Display a list of strings as a compact set of columns.
+
+ Each column is only as wide as necessary.
+ Columns are separated by two spaces (one was not legible enough).
+ """
+ if not list:
+ self.stdout.write("\n")
+ return
+
+ nonstrings = [i for i in range(len(list))
+ if not isinstance(list[i], str)]
+ if nonstrings:
+ raise TypeError("list[i] not a string for i in %s"
+ % ", ".join(map(str, nonstrings)))
+ size = len(list)
+ if size == 1:
+ self.stdout.write('%s\n'%str(list[0]))
+ return
+ # Try every row count from 1 upwards
+ for nrows in range(1, len(list)):
+ ncols = (size+nrows-1) // nrows
+ colwidths = []
+ totwidth = -2
+ for col in range(ncols):
+ colwidth = 0
+ for row in range(nrows):
+ i = row + nrows*col
+ if i >= size:
+ break
+ x = list[i]
+ colwidth = max(colwidth, len(x))
+ colwidths.append(colwidth)
+ totwidth += colwidth + 2
+ if totwidth > displaywidth:
+ break
+ if totwidth <= displaywidth:
+ break
+ else:
+ nrows = len(list)
+ ncols = 1
+ colwidths = [0]
+ for row in range(nrows):
+ texts = []
+ for col in range(ncols):
+ i = row + nrows*col
+ if i >= size:
+ x = ""
+ else:
+ x = list[i]
+ texts.append(x)
+ while texts and not texts[-1]:
+ del texts[-1]
+ for col in range(len(texts)):
+ texts[col] = texts[col].ljust(colwidths[col])
+ self.stdout.write("%s\n"%str(" ".join(texts)))
diff --git a/llava/lib/python3.10/enum.py b/llava/lib/python3.10/enum.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5657a6eba29c19e139bff5089e01d5f1eb362d5
--- /dev/null
+++ b/llava/lib/python3.10/enum.py
@@ -0,0 +1,1053 @@
+import sys
+from types import MappingProxyType, DynamicClassAttribute
+
+
+__all__ = [
+ 'EnumMeta',
+ 'Enum', 'IntEnum', 'Flag', 'IntFlag',
+ 'auto', 'unique',
+ ]
+
+
+def _is_descriptor(obj):
+ """
+ Returns True if obj is a descriptor, False otherwise.
+ """
+ return (
+ hasattr(obj, '__get__') or
+ hasattr(obj, '__set__') or
+ hasattr(obj, '__delete__')
+ )
+
+def _is_dunder(name):
+ """
+ Returns True if a __dunder__ name, False otherwise.
+ """
+ return (
+ len(name) > 4 and
+ name[:2] == name[-2:] == '__' and
+ name[2] != '_' and
+ name[-3] != '_'
+ )
+
+def _is_sunder(name):
+ """
+ Returns True if a _sunder_ name, False otherwise.
+ """
+ return (
+ len(name) > 2 and
+ name[0] == name[-1] == '_' and
+ name[1:2] != '_' and
+ name[-2:-1] != '_'
+ )
+
+def _is_private(cls_name, name):
+ # do not use `re` as `re` imports `enum`
+ pattern = '_%s__' % (cls_name, )
+ pat_len = len(pattern)
+ if (
+ len(name) > pat_len
+ and name.startswith(pattern)
+ and name[pat_len:pat_len+1] != ['_']
+ and (name[-1] != '_' or name[-2] != '_')
+ ):
+ return True
+ else:
+ return False
+
+def _make_class_unpicklable(cls):
+ """
+ Make the given class un-picklable.
+ """
+ def _break_on_call_reduce(self, proto):
+ raise TypeError('%r cannot be pickled' % self)
+ cls.__reduce_ex__ = _break_on_call_reduce
+ cls.__module__ = ''
+
+_auto_null = object()
+class auto:
+ """
+ Instances are replaced with an appropriate value in Enum class suites.
+ """
+ value = _auto_null
+
+
+class _EnumDict(dict):
+ """
+ Track enum member order and ensure member names are not reused.
+
+ EnumMeta will use the names found in self._member_names as the
+ enumeration member names.
+ """
+ def __init__(self):
+ super().__init__()
+ self._member_names = []
+ self._last_values = []
+ self._ignore = []
+ self._auto_called = False
+
+ def __setitem__(self, key, value):
+ """
+ Changes anything not dundered or not a descriptor.
+
+ If an enum member name is used twice, an error is raised; duplicate
+ values are not checked for.
+
+ Single underscore (sunder) names are reserved.
+ """
+ if _is_private(self._cls_name, key):
+ import warnings
+ warnings.warn(
+ "private variables, such as %r, will be normal attributes in 3.11"
+ % (key, ),
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ if _is_sunder(key):
+ if key not in (
+ '_order_', '_create_pseudo_member_',
+ '_generate_next_value_', '_missing_', '_ignore_',
+ ):
+ raise ValueError('_names_ are reserved for future Enum use')
+ if key == '_generate_next_value_':
+ # check if members already defined as auto()
+ if self._auto_called:
+ raise TypeError("_generate_next_value_ must be defined before members")
+ setattr(self, '_generate_next_value', value)
+ elif key == '_ignore_':
+ if isinstance(value, str):
+ value = value.replace(',',' ').split()
+ else:
+ value = list(value)
+ self._ignore = value
+ already = set(value) & set(self._member_names)
+ if already:
+ raise ValueError(
+ '_ignore_ cannot specify already set names: %r'
+ % (already, )
+ )
+ elif _is_dunder(key):
+ if key == '__order__':
+ key = '_order_'
+ elif key in self._member_names:
+ # descriptor overwriting an enum?
+ raise TypeError('Attempted to reuse key: %r' % key)
+ elif key in self._ignore:
+ pass
+ elif not _is_descriptor(value):
+ if key in self:
+ # enum overwriting a descriptor?
+ raise TypeError('%r already defined as: %r' % (key, self[key]))
+ if isinstance(value, auto):
+ if value.value == _auto_null:
+ value.value = self._generate_next_value(
+ key,
+ 1,
+ len(self._member_names),
+ self._last_values[:],
+ )
+ self._auto_called = True
+ value = value.value
+ self._member_names.append(key)
+ self._last_values.append(value)
+ super().__setitem__(key, value)
+
+
+# Dummy value for Enum as EnumMeta explicitly checks for it, but of course
+# until EnumMeta finishes running the first time the Enum class doesn't exist.
+# This is also why there are checks in EnumMeta like `if Enum is not None`
+Enum = None
+
+class EnumMeta(type):
+ """
+ Metaclass for Enum
+ """
+ @classmethod
+ def __prepare__(metacls, cls, bases, **kwds):
+ # check that previous enum members do not exist
+ metacls._check_for_existing_members(cls, bases)
+ # create the namespace dict
+ enum_dict = _EnumDict()
+ enum_dict._cls_name = cls
+ # inherit previous flags and _generate_next_value_ function
+ member_type, first_enum = metacls._get_mixins_(cls, bases)
+ if first_enum is not None:
+ enum_dict['_generate_next_value_'] = getattr(
+ first_enum, '_generate_next_value_', None,
+ )
+ return enum_dict
+
+ def __new__(metacls, cls, bases, classdict, **kwds):
+ # an Enum class is final once enumeration items have been defined; it
+ # cannot be mixed with other types (int, float, etc.) if it has an
+ # inherited __new__ unless a new __new__ is defined (or the resulting
+ # class will fail).
+ #
+ # remove any keys listed in _ignore_
+ classdict.setdefault('_ignore_', []).append('_ignore_')
+ ignore = classdict['_ignore_']
+ for key in ignore:
+ classdict.pop(key, None)
+ member_type, first_enum = metacls._get_mixins_(cls, bases)
+ __new__, save_new, use_args = metacls._find_new_(
+ classdict, member_type, first_enum,
+ )
+
+ # save enum items into separate mapping so they don't get baked into
+ # the new class
+ enum_members = {k: classdict[k] for k in classdict._member_names}
+ for name in classdict._member_names:
+ del classdict[name]
+
+ # adjust the sunders
+ _order_ = classdict.pop('_order_', None)
+
+ # check for illegal enum names (any others?)
+ invalid_names = set(enum_members) & {'mro', ''}
+ if invalid_names:
+ raise ValueError('Invalid enum member name: {0}'.format(
+ ','.join(invalid_names)))
+
+ # create a default docstring if one has not been provided
+ if '__doc__' not in classdict:
+ classdict['__doc__'] = 'An enumeration.'
+
+ enum_class = super().__new__(metacls, cls, bases, classdict, **kwds)
+ enum_class._member_names_ = [] # names in definition order
+ enum_class._member_map_ = {} # name->value map
+ enum_class._member_type_ = member_type
+
+ # save DynamicClassAttribute attributes from super classes so we know
+ # if we can take the shortcut of storing members in the class dict
+ dynamic_attributes = {
+ k for c in enum_class.mro()
+ for k, v in c.__dict__.items()
+ if isinstance(v, DynamicClassAttribute)
+ }
+
+ # Reverse value->name map for hashable values.
+ enum_class._value2member_map_ = {}
+
+ # If a custom type is mixed into the Enum, and it does not know how
+ # to pickle itself, pickle.dumps will succeed but pickle.loads will
+ # fail. Rather than have the error show up later and possibly far
+ # from the source, sabotage the pickle protocol for this class so
+ # that pickle.dumps also fails.
+ #
+ # However, if the new class implements its own __reduce_ex__, do not
+ # sabotage -- it's on them to make sure it works correctly. We use
+ # __reduce_ex__ instead of any of the others as it is preferred by
+ # pickle over __reduce__, and it handles all pickle protocols.
+ if '__reduce_ex__' not in classdict:
+ if member_type is not object:
+ methods = ('__getnewargs_ex__', '__getnewargs__',
+ '__reduce_ex__', '__reduce__')
+ if not any(m in member_type.__dict__ for m in methods):
+ if '__new__' in classdict:
+ # too late, sabotage
+ _make_class_unpicklable(enum_class)
+ else:
+ # final attempt to verify that pickling would work:
+ # travel mro until __new__ is found, checking for
+ # __reduce__ and friends along the way -- if any of them
+ # are found before/when __new__ is found, pickling should
+ # work
+ sabotage = None
+ for chain in bases:
+ for base in chain.__mro__:
+ if base is object:
+ continue
+ elif any(m in base.__dict__ for m in methods):
+ # found one, we're good
+ sabotage = False
+ break
+ elif '__new__' in base.__dict__:
+ # not good
+ sabotage = True
+ break
+ if sabotage is not None:
+ break
+ if sabotage:
+ _make_class_unpicklable(enum_class)
+ # instantiate them, checking for duplicates as we go
+ # we instantiate first instead of checking for duplicates first in case
+ # a custom __new__ is doing something funky with the values -- such as
+ # auto-numbering ;)
+ for member_name in classdict._member_names:
+ value = enum_members[member_name]
+ if not isinstance(value, tuple):
+ args = (value, )
+ else:
+ args = value
+ if member_type is tuple: # special case for tuple enums
+ args = (args, ) # wrap it one more time
+ if not use_args:
+ enum_member = __new__(enum_class)
+ if not hasattr(enum_member, '_value_'):
+ enum_member._value_ = value
+ else:
+ enum_member = __new__(enum_class, *args)
+ if not hasattr(enum_member, '_value_'):
+ if member_type is object:
+ enum_member._value_ = value
+ else:
+ enum_member._value_ = member_type(*args)
+ value = enum_member._value_
+ enum_member._name_ = member_name
+ enum_member.__objclass__ = enum_class
+ enum_member.__init__(*args)
+ # If another member with the same value was already defined, the
+ # new member becomes an alias to the existing one.
+ for name, canonical_member in enum_class._member_map_.items():
+ if canonical_member._value_ == enum_member._value_:
+ enum_member = canonical_member
+ break
+ else:
+ # Aliases don't appear in member names (only in __members__).
+ enum_class._member_names_.append(member_name)
+ # performance boost for any member that would not shadow
+ # a DynamicClassAttribute
+ if member_name not in dynamic_attributes:
+ setattr(enum_class, member_name, enum_member)
+ # now add to _member_map_
+ enum_class._member_map_[member_name] = enum_member
+ try:
+ # This may fail if value is not hashable. We can't add the value
+ # to the map, and by-value lookups for this value will be
+ # linear.
+ enum_class._value2member_map_[value] = enum_member
+ except TypeError:
+ pass
+
+ # double check that repr and friends are not the mixin's or various
+ # things break (such as pickle)
+ # however, if the method is defined in the Enum itself, don't replace
+ # it
+ for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'):
+ if name in classdict:
+ continue
+ class_method = getattr(enum_class, name)
+ obj_method = getattr(member_type, name, None)
+ enum_method = getattr(first_enum, name, None)
+ if obj_method is not None and obj_method is class_method:
+ setattr(enum_class, name, enum_method)
+
+ # replace any other __new__ with our own (as long as Enum is not None,
+ # anyway) -- again, this is to support pickle
+ if Enum is not None:
+ # if the user defined their own __new__, save it before it gets
+ # clobbered in case they subclass later
+ if save_new:
+ enum_class.__new_member__ = __new__
+ enum_class.__new__ = Enum.__new__
+
+ # py3 support for definition order (helps keep py2/py3 code in sync)
+ if _order_ is not None:
+ if isinstance(_order_, str):
+ _order_ = _order_.replace(',', ' ').split()
+ if _order_ != enum_class._member_names_:
+ raise TypeError('member order does not match _order_')
+
+ return enum_class
+
+ def __bool__(self):
+ """
+ classes/types should always be True.
+ """
+ return True
+
+ def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1):
+ """
+ Either returns an existing member, or creates a new enum class.
+
+ This method is used both when an enum class is given a value to match
+ to an enumeration member (i.e. Color(3)) and for the functional API
+ (i.e. Color = Enum('Color', names='RED GREEN BLUE')).
+
+ When used for the functional API:
+
+ `value` will be the name of the new class.
+
+ `names` should be either a string of white-space/comma delimited names
+ (values will start at `start`), or an iterator/mapping of name, value pairs.
+
+ `module` should be set to the module this class is being created in;
+ if it is not set, an attempt to find that module will be made, but if
+ it fails the class will not be picklable.
+
+ `qualname` should be set to the actual location this class can be found
+ at in its module; by default it is set to the global scope. If this is
+ not correct, unpickling will fail in some circumstances.
+
+ `type`, if set, will be mixed in as the first base class.
+ """
+ if names is None: # simple value lookup
+ return cls.__new__(cls, value)
+ # otherwise, functional API: we're creating a new Enum type
+ return cls._create_(
+ value,
+ names,
+ module=module,
+ qualname=qualname,
+ type=type,
+ start=start,
+ )
+
+ def __contains__(cls, obj):
+ if not isinstance(obj, Enum):
+ import warnings
+ warnings.warn(
+ "in 3.12 __contains__ will no longer raise TypeError, but will return True if\n"
+ "obj is a member or a member's value",
+ DeprecationWarning,
+ stacklevel=2,
+ )
+ raise TypeError(
+ "unsupported operand type(s) for 'in': '%s' and '%s'" % (
+ type(obj).__qualname__, cls.__class__.__qualname__))
+ return isinstance(obj, cls) and obj._name_ in cls._member_map_
+
+ def __delattr__(cls, attr):
+ # nicer error message when someone tries to delete an attribute
+ # (see issue19025).
+ if attr in cls._member_map_:
+ raise AttributeError("%s: cannot delete Enum member." % cls.__name__)
+ super().__delattr__(attr)
+
+ def __dir__(self):
+ return (
+ ['__class__', '__doc__', '__members__', '__module__']
+ + self._member_names_
+ )
+
+ def __getattr__(cls, name):
+ """
+ Return the enum member matching `name`
+
+ We use __getattr__ instead of descriptors or inserting into the enum
+ class' __dict__ in order to support `name` and `value` being both
+ properties for enum members (which live in the class' __dict__) and
+ enum members themselves.
+ """
+ if _is_dunder(name):
+ raise AttributeError(name)
+ try:
+ return cls._member_map_[name]
+ except KeyError:
+ raise AttributeError(name) from None
+
+ def __getitem__(cls, name):
+ return cls._member_map_[name]
+
+ def __iter__(cls):
+ """
+ Returns members in definition order.
+ """
+ return (cls._member_map_[name] for name in cls._member_names_)
+
+ def __len__(cls):
+ return len(cls._member_names_)
+
+ @property
+ def __members__(cls):
+ """
+ Returns a mapping of member name->value.
+
+ This mapping lists all enum members, including aliases. Note that this
+ is a read-only view of the internal mapping.
+ """
+ return MappingProxyType(cls._member_map_)
+
+ def __repr__(cls):
+ return "" % cls.__name__
+
+ def __reversed__(cls):
+ """
+ Returns members in reverse definition order.
+ """
+ return (cls._member_map_[name] for name in reversed(cls._member_names_))
+
+ def __setattr__(cls, name, value):
+ """
+ Block attempts to reassign Enum members.
+
+ A simple assignment to the class namespace only changes one of the
+ several possible ways to get an Enum member from the Enum class,
+ resulting in an inconsistent Enumeration.
+ """
+ member_map = cls.__dict__.get('_member_map_', {})
+ if name in member_map:
+ raise AttributeError('Cannot reassign members.')
+ super().__setattr__(name, value)
+
+ def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1):
+ """
+ Convenience method to create a new Enum class.
+
+ `names` can be:
+
+ * A string containing member names, separated either with spaces or
+ commas. Values are incremented by 1 from `start`.
+ * An iterable of member names. Values are incremented by 1 from `start`.
+ * An iterable of (member name, value) pairs.
+ * A mapping of member name -> value pairs.
+ """
+ metacls = cls.__class__
+ bases = (cls, ) if type is None else (type, cls)
+ _, first_enum = cls._get_mixins_(cls, bases)
+ classdict = metacls.__prepare__(class_name, bases)
+
+ # special processing needed for names?
+ if isinstance(names, str):
+ names = names.replace(',', ' ').split()
+ if isinstance(names, (tuple, list)) and names and isinstance(names[0], str):
+ original_names, names = names, []
+ last_values = []
+ for count, name in enumerate(original_names):
+ value = first_enum._generate_next_value_(name, start, count, last_values[:])
+ last_values.append(value)
+ names.append((name, value))
+
+ # Here, names is either an iterable of (name, value) or a mapping.
+ for item in names:
+ if isinstance(item, str):
+ member_name, member_value = item, names[item]
+ else:
+ member_name, member_value = item
+ classdict[member_name] = member_value
+ enum_class = metacls.__new__(metacls, class_name, bases, classdict)
+
+ # TODO: replace the frame hack if a blessed way to know the calling
+ # module is ever developed
+ if module is None:
+ try:
+ module = sys._getframe(2).f_globals['__name__']
+ except (AttributeError, ValueError, KeyError):
+ pass
+ if module is None:
+ _make_class_unpicklable(enum_class)
+ else:
+ enum_class.__module__ = module
+ if qualname is not None:
+ enum_class.__qualname__ = qualname
+
+ return enum_class
+
+ def _convert_(cls, name, module, filter, source=None):
+ """
+ Create a new Enum subclass that replaces a collection of global constants
+ """
+ # convert all constants from source (or module) that pass filter() to
+ # a new Enum called name, and export the enum and its members back to
+ # module;
+ # also, replace the __reduce_ex__ method so unpickling works in
+ # previous Python versions
+ module_globals = vars(sys.modules[module])
+ if source:
+ source = vars(source)
+ else:
+ source = module_globals
+ # _value2member_map_ is populated in the same order every time
+ # for a consistent reverse mapping of number to name when there
+ # are multiple names for the same number.
+ members = [
+ (name, value)
+ for name, value in source.items()
+ if filter(name)]
+ try:
+ # sort by value
+ members.sort(key=lambda t: (t[1], t[0]))
+ except TypeError:
+ # unless some values aren't comparable, in which case sort by name
+ members.sort(key=lambda t: t[0])
+ cls = cls(name, members, module=module)
+ cls.__reduce_ex__ = _reduce_ex_by_name
+ module_globals.update(cls.__members__)
+ module_globals[name] = cls
+ return cls
+
+ @staticmethod
+ def _check_for_existing_members(class_name, bases):
+ for chain in bases:
+ for base in chain.__mro__:
+ if issubclass(base, Enum) and base._member_names_:
+ raise TypeError(
+ "%s: cannot extend enumeration %r"
+ % (class_name, base.__name__)
+ )
+
+ @staticmethod
+ def _get_mixins_(class_name, bases):
+ """
+ Returns the type for creating enum members, and the first inherited
+ enum class.
+
+ bases: the tuple of bases that was given to __new__
+ """
+ if not bases:
+ return object, Enum
+
+ def _find_data_type(bases):
+ data_types = set()
+ for chain in bases:
+ candidate = None
+ for base in chain.__mro__:
+ if base is object:
+ continue
+ elif issubclass(base, Enum):
+ if base._member_type_ is not object:
+ data_types.add(base._member_type_)
+ break
+ elif '__new__' in base.__dict__:
+ if issubclass(base, Enum):
+ continue
+ data_types.add(candidate or base)
+ break
+ else:
+ candidate = candidate or base
+ if len(data_types) > 1:
+ raise TypeError('%r: too many data types: %r' % (class_name, data_types))
+ elif data_types:
+ return data_types.pop()
+ else:
+ return None
+
+ # ensure final parent class is an Enum derivative, find any concrete
+ # data type, and check that Enum has no members
+ first_enum = bases[-1]
+ if not issubclass(first_enum, Enum):
+ raise TypeError("new enumerations should be created as "
+ "`EnumName([mixin_type, ...] [data_type,] enum_type)`")
+ member_type = _find_data_type(bases) or object
+ if first_enum._member_names_:
+ raise TypeError("Cannot extend enumerations")
+ return member_type, first_enum
+
+ @staticmethod
+ def _find_new_(classdict, member_type, first_enum):
+ """
+ Returns the __new__ to be used for creating the enum members.
+
+ classdict: the class dictionary given to __new__
+ member_type: the data type whose __new__ will be used by default
+ first_enum: enumeration to check for an overriding __new__
+ """
+ # now find the correct __new__, checking to see of one was defined
+ # by the user; also check earlier enum classes in case a __new__ was
+ # saved as __new_member__
+ __new__ = classdict.get('__new__', None)
+
+ # should __new__ be saved as __new_member__ later?
+ save_new = __new__ is not None
+
+ if __new__ is None:
+ # check all possibles for __new_member__ before falling back to
+ # __new__
+ for method in ('__new_member__', '__new__'):
+ for possible in (member_type, first_enum):
+ target = getattr(possible, method, None)
+ if target not in {
+ None,
+ None.__new__,
+ object.__new__,
+ Enum.__new__,
+ }:
+ __new__ = target
+ break
+ if __new__ is not None:
+ break
+ else:
+ __new__ = object.__new__
+
+ # if a non-object.__new__ is used then whatever value/tuple was
+ # assigned to the enum member name will be passed to __new__ and to the
+ # new enum member's __init__
+ if __new__ is object.__new__:
+ use_args = False
+ else:
+ use_args = True
+ return __new__, save_new, use_args
+
+
+class Enum(metaclass=EnumMeta):
+ """
+ Generic enumeration.
+
+ Derive from this class to define new enumerations.
+ """
+ def __new__(cls, value):
+ # all enum instances are actually created during class construction
+ # without calling this method; this method is called by the metaclass'
+ # __call__ (i.e. Color(3) ), and by pickle
+ if type(value) is cls:
+ # For lookups like Color(Color.RED)
+ return value
+ # by-value search for a matching enum member
+ # see if it's in the reverse mapping (for hashable values)
+ try:
+ return cls._value2member_map_[value]
+ except KeyError:
+ # Not found, no need to do long O(n) search
+ pass
+ except TypeError:
+ # not there, now do long search -- O(n) behavior
+ for member in cls._member_map_.values():
+ if member._value_ == value:
+ return member
+ # still not found -- try _missing_ hook
+ try:
+ exc = None
+ result = cls._missing_(value)
+ except Exception as e:
+ exc = e
+ result = None
+ try:
+ if isinstance(result, cls):
+ return result
+ else:
+ ve_exc = ValueError("%r is not a valid %s" % (value, cls.__qualname__))
+ if result is None and exc is None:
+ raise ve_exc
+ elif exc is None:
+ exc = TypeError(
+ 'error in %s._missing_: returned %r instead of None or a valid member'
+ % (cls.__name__, result)
+ )
+ if not isinstance(exc, ValueError):
+ exc.__context__ = ve_exc
+ raise exc
+ finally:
+ # ensure all variables that could hold an exception are destroyed
+ exc = None
+ ve_exc = None
+
+ def _generate_next_value_(name, start, count, last_values):
+ """
+ Generate the next value when not given.
+
+ name: the name of the member
+ start: the initial start value or None
+ count: the number of existing members
+ last_value: the last value assigned or None
+ """
+ for last_value in reversed(last_values):
+ try:
+ return last_value + 1
+ except TypeError:
+ pass
+ else:
+ return start
+
+ @classmethod
+ def _missing_(cls, value):
+ return None
+
+ def __repr__(self):
+ return "<%s.%s: %r>" % (
+ self.__class__.__name__, self._name_, self._value_)
+
+ def __str__(self):
+ return "%s.%s" % (self.__class__.__name__, self._name_)
+
+ def __dir__(self):
+ """
+ Returns all members and all public methods
+ """
+ added_behavior = [
+ m
+ for cls in self.__class__.mro()
+ for m in cls.__dict__
+ if m[0] != '_' and m not in self._member_map_
+ ] + [m for m in self.__dict__ if m[0] != '_']
+ return (['__class__', '__doc__', '__module__'] + added_behavior)
+
+ def __format__(self, format_spec):
+ """
+ Returns format using actual value type unless __str__ has been overridden.
+ """
+ # mixed-in Enums should use the mixed-in type's __format__, otherwise
+ # we can get strange results with the Enum name showing up instead of
+ # the value
+
+ # pure Enum branch, or branch with __str__ explicitly overridden
+ str_overridden = type(self).__str__ not in (Enum.__str__, Flag.__str__)
+ if self._member_type_ is object or str_overridden:
+ cls = str
+ val = str(self)
+ # mix-in branch
+ else:
+ cls = self._member_type_
+ val = self._value_
+ return cls.__format__(val, format_spec)
+
+ def __hash__(self):
+ return hash(self._name_)
+
+ def __reduce_ex__(self, proto):
+ return self.__class__, (self._value_, )
+
+ # DynamicClassAttribute is used to provide access to the `name` and
+ # `value` properties of enum members while keeping some measure of
+ # protection from modification, while still allowing for an enumeration
+ # to have members named `name` and `value`. This works because enumeration
+ # members are not set directly on the enum class -- __getattr__ is
+ # used to look them up.
+
+ @DynamicClassAttribute
+ def name(self):
+ """The name of the Enum member."""
+ return self._name_
+
+ @DynamicClassAttribute
+ def value(self):
+ """The value of the Enum member."""
+ return self._value_
+
+
+class IntEnum(int, Enum):
+ """Enum where members are also (and must be) ints"""
+
+
+def _reduce_ex_by_name(self, proto):
+ return self.name
+
+class Flag(Enum):
+ """
+ Support for flags
+ """
+
+ def _generate_next_value_(name, start, count, last_values):
+ """
+ Generate the next value when not given.
+
+ name: the name of the member
+ start: the initial start value or None
+ count: the number of existing members
+ last_value: the last value assigned or None
+ """
+ if not count:
+ return start if start is not None else 1
+ for last_value in reversed(last_values):
+ try:
+ high_bit = _high_bit(last_value)
+ break
+ except Exception:
+ raise TypeError('Invalid Flag value: %r' % last_value) from None
+ return 2 ** (high_bit+1)
+
+ @classmethod
+ def _missing_(cls, value):
+ """
+ Returns member (possibly creating it) if one can be found for value.
+ """
+ original_value = value
+ if value < 0:
+ value = ~value
+ possible_member = cls._create_pseudo_member_(value)
+ if original_value < 0:
+ possible_member = ~possible_member
+ return possible_member
+
+ @classmethod
+ def _create_pseudo_member_(cls, value):
+ """
+ Create a composite member iff value contains only members.
+ """
+ pseudo_member = cls._value2member_map_.get(value, None)
+ if pseudo_member is None:
+ # verify all bits are accounted for
+ _, extra_flags = _decompose(cls, value)
+ if extra_flags:
+ raise ValueError("%r is not a valid %s" % (value, cls.__qualname__))
+ # construct a singleton enum pseudo-member
+ pseudo_member = object.__new__(cls)
+ pseudo_member._name_ = None
+ pseudo_member._value_ = value
+ # use setdefault in case another thread already created a composite
+ # with this value
+ pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member)
+ return pseudo_member
+
+ def __contains__(self, other):
+ """
+ Returns True if self has at least the same flags set as other.
+ """
+ if not isinstance(other, self.__class__):
+ raise TypeError(
+ "unsupported operand type(s) for 'in': '%s' and '%s'" % (
+ type(other).__qualname__, self.__class__.__qualname__))
+ return other._value_ & self._value_ == other._value_
+
+ def __repr__(self):
+ cls = self.__class__
+ if self._name_ is not None:
+ return '<%s.%s: %r>' % (cls.__name__, self._name_, self._value_)
+ members, uncovered = _decompose(cls, self._value_)
+ return '<%s.%s: %r>' % (
+ cls.__name__,
+ '|'.join([str(m._name_ or m._value_) for m in members]),
+ self._value_,
+ )
+
+ def __str__(self):
+ cls = self.__class__
+ if self._name_ is not None:
+ return '%s.%s' % (cls.__name__, self._name_)
+ members, uncovered = _decompose(cls, self._value_)
+ if len(members) == 1 and members[0]._name_ is None:
+ return '%s.%r' % (cls.__name__, members[0]._value_)
+ else:
+ return '%s.%s' % (
+ cls.__name__,
+ '|'.join([str(m._name_ or m._value_) for m in members]),
+ )
+
+ def __bool__(self):
+ return bool(self._value_)
+
+ def __or__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self.__class__(self._value_ | other._value_)
+
+ def __and__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self.__class__(self._value_ & other._value_)
+
+ def __xor__(self, other):
+ if not isinstance(other, self.__class__):
+ return NotImplemented
+ return self.__class__(self._value_ ^ other._value_)
+
+ def __invert__(self):
+ members, uncovered = _decompose(self.__class__, self._value_)
+ inverted = self.__class__(0)
+ for m in self.__class__:
+ if m not in members and not (m._value_ & self._value_):
+ inverted = inverted | m
+ return self.__class__(inverted)
+
+
+class IntFlag(int, Flag):
+ """
+ Support for integer-based Flags
+ """
+
+ @classmethod
+ def _missing_(cls, value):
+ """
+ Returns member (possibly creating it) if one can be found for value.
+ """
+ if not isinstance(value, int):
+ raise ValueError("%r is not a valid %s" % (value, cls.__qualname__))
+ new_member = cls._create_pseudo_member_(value)
+ return new_member
+
+ @classmethod
+ def _create_pseudo_member_(cls, value):
+ """
+ Create a composite member iff value contains only members.
+ """
+ pseudo_member = cls._value2member_map_.get(value, None)
+ if pseudo_member is None:
+ need_to_create = [value]
+ # get unaccounted for bits
+ _, extra_flags = _decompose(cls, value)
+ # timer = 10
+ while extra_flags:
+ # timer -= 1
+ bit = _high_bit(extra_flags)
+ flag_value = 2 ** bit
+ if (flag_value not in cls._value2member_map_ and
+ flag_value not in need_to_create
+ ):
+ need_to_create.append(flag_value)
+ if extra_flags == -flag_value:
+ extra_flags = 0
+ else:
+ extra_flags ^= flag_value
+ for value in reversed(need_to_create):
+ # construct singleton pseudo-members
+ pseudo_member = int.__new__(cls, value)
+ pseudo_member._name_ = None
+ pseudo_member._value_ = value
+ # use setdefault in case another thread already created a composite
+ # with this value
+ pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member)
+ return pseudo_member
+
+ def __or__(self, other):
+ if not isinstance(other, (self.__class__, int)):
+ return NotImplemented
+ result = self.__class__(self._value_ | self.__class__(other)._value_)
+ return result
+
+ def __and__(self, other):
+ if not isinstance(other, (self.__class__, int)):
+ return NotImplemented
+ return self.__class__(self._value_ & self.__class__(other)._value_)
+
+ def __xor__(self, other):
+ if not isinstance(other, (self.__class__, int)):
+ return NotImplemented
+ return self.__class__(self._value_ ^ self.__class__(other)._value_)
+
+ __ror__ = __or__
+ __rand__ = __and__
+ __rxor__ = __xor__
+
+ def __invert__(self):
+ result = self.__class__(~self._value_)
+ return result
+
+
+def _high_bit(value):
+ """
+ returns index of highest bit, or -1 if value is zero or negative
+ """
+ return value.bit_length() - 1
+
+def unique(enumeration):
+ """
+ Class decorator for enumerations ensuring unique member values.
+ """
+ duplicates = []
+ for name, member in enumeration.__members__.items():
+ if name != member.name:
+ duplicates.append((name, member.name))
+ if duplicates:
+ alias_details = ', '.join(
+ ["%s -> %s" % (alias, name) for (alias, name) in duplicates])
+ raise ValueError('duplicate values found in %r: %s' %
+ (enumeration, alias_details))
+ return enumeration
+
+def _decompose(flag, value):
+ """
+ Extract all members from the value.
+ """
+ # _decompose is only called if the value is not named
+ not_covered = value
+ negative = value < 0
+ members = []
+ for member in flag:
+ member_value = member.value
+ if member_value and member_value & value == member_value:
+ members.append(member)
+ not_covered &= ~member_value
+ if not negative:
+ tmp = not_covered
+ while tmp:
+ flag_value = 2 ** _high_bit(tmp)
+ if flag_value in flag._value2member_map_:
+ members.append(flag._value2member_map_[flag_value])
+ not_covered &= ~flag_value
+ tmp &= ~flag_value
+ if not members and value in flag._value2member_map_:
+ members.append(flag._value2member_map_[value])
+ members.sort(key=lambda m: m._value_, reverse=True)
+ if len(members) > 1 and members[0].value == value:
+ # we have the breakdown, don't need the value member itself
+ members.pop(0)
+ return members, not_covered
diff --git a/llava/lib/python3.10/fileinput.py b/llava/lib/python3.10/fileinput.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bd19906dcf5d276b9259d5e1147cde270f372de
--- /dev/null
+++ b/llava/lib/python3.10/fileinput.py
@@ -0,0 +1,462 @@
+"""Helper class to quickly write a loop over all standard input files.
+
+Typical use is:
+
+ import fileinput
+ for line in fileinput.input(encoding="utf-8"):
+ process(line)
+
+This iterates over the lines of all files listed in sys.argv[1:],
+defaulting to sys.stdin if the list is empty. If a filename is '-' it
+is also replaced by sys.stdin and the optional arguments mode and
+openhook are ignored. To specify an alternative list of filenames,
+pass it as the argument to input(). A single file name is also allowed.
+
+Functions filename(), lineno() return the filename and cumulative line
+number of the line that has just been read; filelineno() returns its
+line number in the current file; isfirstline() returns true iff the
+line just read is the first line of its file; isstdin() returns true
+iff the line was read from sys.stdin. Function nextfile() closes the
+current file so that the next iteration will read the first line from
+the next file (if any); lines not read from the file will not count
+towards the cumulative line count; the filename is not changed until
+after the first line of the next file has been read. Function close()
+closes the sequence.
+
+Before any lines have been read, filename() returns None and both line
+numbers are zero; nextfile() has no effect. After all lines have been
+read, filename() and the line number functions return the values
+pertaining to the last line read; nextfile() has no effect.
+
+All files are opened in text mode by default, you can override this by
+setting the mode parameter to input() or FileInput.__init__().
+If an I/O error occurs during opening or reading a file, the OSError
+exception is raised.
+
+If sys.stdin is used more than once, the second and further use will
+return no lines, except perhaps for interactive use, or if it has been
+explicitly reset (e.g. using sys.stdin.seek(0)).
+
+Empty files are opened and immediately closed; the only time their
+presence in the list of filenames is noticeable at all is when the
+last file opened is empty.
+
+It is possible that the last line of a file doesn't end in a newline
+character; otherwise lines are returned including the trailing
+newline.
+
+Class FileInput is the implementation; its methods filename(),
+lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close()
+correspond to the functions in the module. In addition it has a
+readline() method which returns the next input line, and a
+__getitem__() method which implements the sequence behavior. The
+sequence must be accessed in strictly sequential order; sequence
+access and readline() cannot be mixed.
+
+Optional in-place filtering: if the keyword argument inplace=1 is
+passed to input() or to the FileInput constructor, the file is moved
+to a backup file and standard output is directed to the input file.
+This makes it possible to write a filter that rewrites its input file
+in place. If the keyword argument backup="." is also
+given, it specifies the extension for the backup file, and the backup
+file remains around; by default, the extension is ".bak" and it is
+deleted when the output file is closed. In-place filtering is
+disabled when standard input is read. XXX The current implementation
+does not work for MS-DOS 8+3 filesystems.
+"""
+
+import io
+import sys, os
+from types import GenericAlias
+
+__all__ = ["input", "close", "nextfile", "filename", "lineno", "filelineno",
+ "fileno", "isfirstline", "isstdin", "FileInput", "hook_compressed",
+ "hook_encoded"]
+
+_state = None
+
+def input(files=None, inplace=False, backup="", *, mode="r", openhook=None,
+ encoding=None, errors=None):
+ """Return an instance of the FileInput class, which can be iterated.
+
+ The parameters are passed to the constructor of the FileInput class.
+ The returned instance, in addition to being an iterator,
+ keeps global state for the functions of this module,.
+ """
+ global _state
+ if _state and _state._file:
+ raise RuntimeError("input() already active")
+ _state = FileInput(files, inplace, backup, mode=mode, openhook=openhook,
+ encoding=encoding, errors=errors)
+ return _state
+
+def close():
+ """Close the sequence."""
+ global _state
+ state = _state
+ _state = None
+ if state:
+ state.close()
+
+def nextfile():
+ """
+ Close the current file so that the next iteration will read the first
+ line from the next file (if any); lines not read from the file will
+ not count towards the cumulative line count. The filename is not
+ changed until after the first line of the next file has been read.
+ Before the first line has been read, this function has no effect;
+ it cannot be used to skip the first file. After the last line of the
+ last file has been read, this function has no effect.
+ """
+ if not _state:
+ raise RuntimeError("no active input()")
+ return _state.nextfile()
+
+def filename():
+ """
+ Return the name of the file currently being read.
+ Before the first line has been read, returns None.
+ """
+ if not _state:
+ raise RuntimeError("no active input()")
+ return _state.filename()
+
+def lineno():
+ """
+ Return the cumulative line number of the line that has just been read.
+ Before the first line has been read, returns 0. After the last line
+ of the last file has been read, returns the line number of that line.
+ """
+ if not _state:
+ raise RuntimeError("no active input()")
+ return _state.lineno()
+
+def filelineno():
+ """
+ Return the line number in the current file. Before the first line
+ has been read, returns 0. After the last line of the last file has
+ been read, returns the line number of that line within the file.
+ """
+ if not _state:
+ raise RuntimeError("no active input()")
+ return _state.filelineno()
+
+def fileno():
+ """
+ Return the file number of the current file. When no file is currently
+ opened, returns -1.
+ """
+ if not _state:
+ raise RuntimeError("no active input()")
+ return _state.fileno()
+
+def isfirstline():
+ """
+ Returns true the line just read is the first line of its file,
+ otherwise returns false.
+ """
+ if not _state:
+ raise RuntimeError("no active input()")
+ return _state.isfirstline()
+
+def isstdin():
+ """
+ Returns true if the last line was read from sys.stdin,
+ otherwise returns false.
+ """
+ if not _state:
+ raise RuntimeError("no active input()")
+ return _state.isstdin()
+
+class FileInput:
+ """FileInput([files[, inplace[, backup]]], *, mode=None, openhook=None)
+
+ Class FileInput is the implementation of the module; its methods
+ filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(),
+ nextfile() and close() correspond to the functions of the same name
+ in the module.
+ In addition it has a readline() method which returns the next
+ input line, and a __getitem__() method which implements the
+ sequence behavior. The sequence must be accessed in strictly
+ sequential order; random access and readline() cannot be mixed.
+ """
+
+ def __init__(self, files=None, inplace=False, backup="", *,
+ mode="r", openhook=None, encoding=None, errors=None):
+ if isinstance(files, str):
+ files = (files,)
+ elif isinstance(files, os.PathLike):
+ files = (os.fspath(files), )
+ else:
+ if files is None:
+ files = sys.argv[1:]
+ if not files:
+ files = ('-',)
+ else:
+ files = tuple(files)
+ self._files = files
+ self._inplace = inplace
+ self._backup = backup
+ self._savestdout = None
+ self._output = None
+ self._filename = None
+ self._startlineno = 0
+ self._filelineno = 0
+ self._file = None
+ self._isstdin = False
+ self._backupfilename = None
+ self._encoding = encoding
+ self._errors = errors
+
+ # We can not use io.text_encoding() here because old openhook doesn't
+ # take encoding parameter.
+ if (sys.flags.warn_default_encoding and
+ "b" not in mode and encoding is None and openhook is None):
+ import warnings
+ warnings.warn("'encoding' argument not specified.",
+ EncodingWarning, 2)
+
+ # restrict mode argument to reading modes
+ if mode not in ('r', 'rU', 'U', 'rb'):
+ raise ValueError("FileInput opening mode must be one of "
+ "'r', 'rU', 'U' and 'rb'")
+ if 'U' in mode:
+ import warnings
+ warnings.warn("'U' mode is deprecated",
+ DeprecationWarning, 2)
+ self._mode = mode
+ self._write_mode = mode.replace('r', 'w') if 'U' not in mode else 'w'
+ if openhook:
+ if inplace:
+ raise ValueError("FileInput cannot use an opening hook in inplace mode")
+ if not callable(openhook):
+ raise ValueError("FileInput openhook must be callable")
+ self._openhook = openhook
+
+ def __del__(self):
+ self.close()
+
+ def close(self):
+ try:
+ self.nextfile()
+ finally:
+ self._files = ()
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, traceback):
+ self.close()
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ while True:
+ line = self._readline()
+ if line:
+ self._filelineno += 1
+ return line
+ if not self._file:
+ raise StopIteration
+ self.nextfile()
+ # repeat with next file
+
+ def __getitem__(self, i):
+ import warnings
+ warnings.warn(
+ "Support for indexing FileInput objects is deprecated. "
+ "Use iterator protocol instead.",
+ DeprecationWarning,
+ stacklevel=2
+ )
+ if i != self.lineno():
+ raise RuntimeError("accessing lines out of order")
+ try:
+ return self.__next__()
+ except StopIteration:
+ raise IndexError("end of input reached")
+
+ def nextfile(self):
+ savestdout = self._savestdout
+ self._savestdout = None
+ if savestdout:
+ sys.stdout = savestdout
+
+ output = self._output
+ self._output = None
+ try:
+ if output:
+ output.close()
+ finally:
+ file = self._file
+ self._file = None
+ try:
+ del self._readline # restore FileInput._readline
+ except AttributeError:
+ pass
+ try:
+ if file and not self._isstdin:
+ file.close()
+ finally:
+ backupfilename = self._backupfilename
+ self._backupfilename = None
+ if backupfilename and not self._backup:
+ try: os.unlink(backupfilename)
+ except OSError: pass
+
+ self._isstdin = False
+
+ def readline(self):
+ while True:
+ line = self._readline()
+ if line:
+ self._filelineno += 1
+ return line
+ if not self._file:
+ return line
+ self.nextfile()
+ # repeat with next file
+
+ def _readline(self):
+ if not self._files:
+ if 'b' in self._mode:
+ return b''
+ else:
+ return ''
+ self._filename = self._files[0]
+ self._files = self._files[1:]
+ self._startlineno = self.lineno()
+ self._filelineno = 0
+ self._file = None
+ self._isstdin = False
+ self._backupfilename = 0
+
+ # EncodingWarning is emitted in __init__() already
+ if "b" not in self._mode:
+ encoding = self._encoding or "locale"
+ else:
+ encoding = None
+
+ if self._filename == '-':
+ self._filename = ''
+ if 'b' in self._mode:
+ self._file = getattr(sys.stdin, 'buffer', sys.stdin)
+ else:
+ self._file = sys.stdin
+ self._isstdin = True
+ else:
+ if self._inplace:
+ self._backupfilename = (
+ os.fspath(self._filename) + (self._backup or ".bak"))
+ try:
+ os.unlink(self._backupfilename)
+ except OSError:
+ pass
+ # The next few lines may raise OSError
+ os.rename(self._filename, self._backupfilename)
+ self._file = open(self._backupfilename, self._mode,
+ encoding=encoding, errors=self._errors)
+ try:
+ perm = os.fstat(self._file.fileno()).st_mode
+ except OSError:
+ self._output = open(self._filename, self._write_mode,
+ encoding=encoding, errors=self._errors)
+ else:
+ mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
+ if hasattr(os, 'O_BINARY'):
+ mode |= os.O_BINARY
+
+ fd = os.open(self._filename, mode, perm)
+ self._output = os.fdopen(fd, self._write_mode,
+ encoding=encoding, errors=self._errors)
+ try:
+ os.chmod(self._filename, perm)
+ except OSError:
+ pass
+ self._savestdout = sys.stdout
+ sys.stdout = self._output
+ else:
+ # This may raise OSError
+ if self._openhook:
+ # Custom hooks made previous to Python 3.10 didn't have
+ # encoding argument
+ if self._encoding is None:
+ self._file = self._openhook(self._filename, self._mode)
+ else:
+ self._file = self._openhook(
+ self._filename, self._mode, encoding=self._encoding, errors=self._errors)
+ else:
+ self._file = open(self._filename, self._mode, encoding=encoding, errors=self._errors)
+ self._readline = self._file.readline # hide FileInput._readline
+ return self._readline()
+
+ def filename(self):
+ return self._filename
+
+ def lineno(self):
+ return self._startlineno + self._filelineno
+
+ def filelineno(self):
+ return self._filelineno
+
+ def fileno(self):
+ if self._file:
+ try:
+ return self._file.fileno()
+ except ValueError:
+ return -1
+ else:
+ return -1
+
+ def isfirstline(self):
+ return self._filelineno == 1
+
+ def isstdin(self):
+ return self._isstdin
+
+ __class_getitem__ = classmethod(GenericAlias)
+
+
+def hook_compressed(filename, mode, *, encoding=None, errors=None):
+ if encoding is None and "b" not in mode: # EncodingWarning is emitted in FileInput() already.
+ encoding = "locale"
+ ext = os.path.splitext(filename)[1]
+ if ext == '.gz':
+ import gzip
+ stream = gzip.open(filename, mode)
+ elif ext == '.bz2':
+ import bz2
+ stream = bz2.BZ2File(filename, mode)
+ else:
+ return open(filename, mode, encoding=encoding, errors=errors)
+
+ # gzip and bz2 are binary mode by default.
+ if "b" not in mode:
+ stream = io.TextIOWrapper(stream, encoding=encoding, errors=errors)
+ return stream
+
+
+def hook_encoded(encoding, errors=None):
+ def openhook(filename, mode):
+ return open(filename, mode, encoding=encoding, errors=errors)
+ return openhook
+
+
+def _test():
+ import getopt
+ inplace = False
+ backup = False
+ opts, args = getopt.getopt(sys.argv[1:], "ib:")
+ for o, a in opts:
+ if o == '-i': inplace = True
+ if o == '-b': backup = a
+ for line in input(args, inplace=inplace, backup=backup):
+ if line[-1:] == '\n': line = line[:-1]
+ if line[-1:] == '\r': line = line[:-1]
+ print("%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(),
+ isfirstline() and "*" or "", line))
+ print("%d: %s[%d]" % (lineno(), filename(), filelineno()))
+
+if __name__ == '__main__':
+ _test()
diff --git a/llava/lib/python3.10/heapq.py b/llava/lib/python3.10/heapq.py
new file mode 100644
index 0000000000000000000000000000000000000000..fabefd87f8bf8c804e8eb3155c1aacbf05dd02bd
--- /dev/null
+++ b/llava/lib/python3.10/heapq.py
@@ -0,0 +1,601 @@
+"""Heap queue algorithm (a.k.a. priority queue).
+
+Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
+all k, counting elements from 0. For the sake of comparison,
+non-existing elements are considered to be infinite. The interesting
+property of a heap is that a[0] is always its smallest element.
+
+Usage:
+
+heap = [] # creates an empty heap
+heappush(heap, item) # pushes a new item on the heap
+item = heappop(heap) # pops the smallest item from the heap
+item = heap[0] # smallest item on the heap without popping it
+heapify(x) # transforms list into a heap, in-place, in linear time
+item = heapreplace(heap, item) # pops and returns smallest item, and adds
+ # new item; the heap size is unchanged
+
+Our API differs from textbook heap algorithms as follows:
+
+- We use 0-based indexing. This makes the relationship between the
+ index for a node and the indexes for its children slightly less
+ obvious, but is more suitable since Python uses 0-based indexing.
+
+- Our heappop() method returns the smallest item, not the largest.
+
+These two make it possible to view the heap as a regular Python list
+without surprises: heap[0] is the smallest item, and heap.sort()
+maintains the heap invariant!
+"""
+
+# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger
+
+__about__ = """Heap queues
+
+[explanation by François Pinard]
+
+Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for
+all k, counting elements from 0. For the sake of comparison,
+non-existing elements are considered to be infinite. The interesting
+property of a heap is that a[0] is always its smallest element.
+
+The strange invariant above is meant to be an efficient memory
+representation for a tournament. The numbers below are `k', not a[k]:
+
+ 0
+
+ 1 2
+
+ 3 4 5 6
+
+ 7 8 9 10 11 12 13 14
+
+ 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30
+
+
+In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In
+a usual binary tournament we see in sports, each cell is the winner
+over the two cells it tops, and we can trace the winner down the tree
+to see all opponents s/he had. However, in many computer applications
+of such tournaments, we do not need to trace the history of a winner.
+To be more memory efficient, when a winner is promoted, we try to
+replace it by something else at a lower level, and the rule becomes
+that a cell and the two cells it tops contain three different items,
+but the top cell "wins" over the two topped cells.
+
+If this heap invariant is protected at all time, index 0 is clearly
+the overall winner. The simplest algorithmic way to remove it and
+find the "next" winner is to move some loser (let's say cell 30 in the
+diagram above) into the 0 position, and then percolate this new 0 down
+the tree, exchanging values, until the invariant is re-established.
+This is clearly logarithmic on the total number of items in the tree.
+By iterating over all items, you get an O(n ln n) sort.
+
+A nice feature of this sort is that you can efficiently insert new
+items while the sort is going on, provided that the inserted items are
+not "better" than the last 0'th element you extracted. This is
+especially useful in simulation contexts, where the tree holds all
+incoming events, and the "win" condition means the smallest scheduled
+time. When an event schedule other events for execution, they are
+scheduled into the future, so they can easily go into the heap. So, a
+heap is a good structure for implementing schedulers (this is what I
+used for my MIDI sequencer :-).
+
+Various structures for implementing schedulers have been extensively
+studied, and heaps are good for this, as they are reasonably speedy,
+the speed is almost constant, and the worst case is not much different
+than the average case. However, there are other representations which
+are more efficient overall, yet the worst cases might be terrible.
+
+Heaps are also very useful in big disk sorts. You most probably all
+know that a big sort implies producing "runs" (which are pre-sorted
+sequences, which size is usually related to the amount of CPU memory),
+followed by a merging passes for these runs, which merging is often
+very cleverly organised[1]. It is very important that the initial
+sort produces the longest runs possible. Tournaments are a good way
+to that. If, using all the memory available to hold a tournament, you
+replace and percolate items that happen to fit the current run, you'll
+produce runs which are twice the size of the memory for random input,
+and much better for input fuzzily ordered.
+
+Moreover, if you output the 0'th item on disk and get an input which
+may not fit in the current tournament (because the value "wins" over
+the last output value), it cannot fit in the heap, so the size of the
+heap decreases. The freed memory could be cleverly reused immediately
+for progressively building a second heap, which grows at exactly the
+same rate the first heap is melting. When the first heap completely
+vanishes, you switch heaps and start a new run. Clever and quite
+effective!
+
+In a word, heaps are useful memory structures to know. I use them in
+a few applications, and I think it is good to keep a `heap' module
+around. :-)
+
+--------------------
+[1] The disk balancing algorithms which are current, nowadays, are
+more annoying than clever, and this is a consequence of the seeking
+capabilities of the disks. On devices which cannot seek, like big
+tape drives, the story was quite different, and one had to be very
+clever to ensure (far in advance) that each tape movement will be the
+most effective possible (that is, will best participate at
+"progressing" the merge). Some tapes were even able to read
+backwards, and this was also used to avoid the rewinding time.
+Believe me, real good tape sorts were quite spectacular to watch!
+From all times, sorting has always been a Great Art! :-)
+"""
+
+__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge',
+ 'nlargest', 'nsmallest', 'heappushpop']
+
+def heappush(heap, item):
+ """Push item onto heap, maintaining the heap invariant."""
+ heap.append(item)
+ _siftdown(heap, 0, len(heap)-1)
+
+def heappop(heap):
+ """Pop the smallest item off the heap, maintaining the heap invariant."""
+ lastelt = heap.pop() # raises appropriate IndexError if heap is empty
+ if heap:
+ returnitem = heap[0]
+ heap[0] = lastelt
+ _siftup(heap, 0)
+ return returnitem
+ return lastelt
+
+def heapreplace(heap, item):
+ """Pop and return the current smallest value, and add the new item.
+
+ This is more efficient than heappop() followed by heappush(), and can be
+ more appropriate when using a fixed-size heap. Note that the value
+ returned may be larger than item! That constrains reasonable uses of
+ this routine unless written as part of a conditional replacement:
+
+ if item > heap[0]:
+ item = heapreplace(heap, item)
+ """
+ returnitem = heap[0] # raises appropriate IndexError if heap is empty
+ heap[0] = item
+ _siftup(heap, 0)
+ return returnitem
+
+def heappushpop(heap, item):
+ """Fast version of a heappush followed by a heappop."""
+ if heap and heap[0] < item:
+ item, heap[0] = heap[0], item
+ _siftup(heap, 0)
+ return item
+
+def heapify(x):
+ """Transform list into a heap, in-place, in O(len(x)) time."""
+ n = len(x)
+ # Transform bottom-up. The largest index there's any point to looking at
+ # is the largest with a child index in-range, so must have 2*i + 1 < n,
+ # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so
+ # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is
+ # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1.
+ for i in reversed(range(n//2)):
+ _siftup(x, i)
+
+def _heappop_max(heap):
+ """Maxheap version of a heappop."""
+ lastelt = heap.pop() # raises appropriate IndexError if heap is empty
+ if heap:
+ returnitem = heap[0]
+ heap[0] = lastelt
+ _siftup_max(heap, 0)
+ return returnitem
+ return lastelt
+
+def _heapreplace_max(heap, item):
+ """Maxheap version of a heappop followed by a heappush."""
+ returnitem = heap[0] # raises appropriate IndexError if heap is empty
+ heap[0] = item
+ _siftup_max(heap, 0)
+ return returnitem
+
+def _heapify_max(x):
+ """Transform list into a maxheap, in-place, in O(len(x)) time."""
+ n = len(x)
+ for i in reversed(range(n//2)):
+ _siftup_max(x, i)
+
+# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos
+# is the index of a leaf with a possibly out-of-order value. Restore the
+# heap invariant.
+def _siftdown(heap, startpos, pos):
+ newitem = heap[pos]
+ # Follow the path to the root, moving parents down until finding a place
+ # newitem fits.
+ while pos > startpos:
+ parentpos = (pos - 1) >> 1
+ parent = heap[parentpos]
+ if newitem < parent:
+ heap[pos] = parent
+ pos = parentpos
+ continue
+ break
+ heap[pos] = newitem
+
+# The child indices of heap index pos are already heaps, and we want to make
+# a heap at index pos too. We do this by bubbling the smaller child of
+# pos up (and so on with that child's children, etc) until hitting a leaf,
+# then using _siftdown to move the oddball originally at index pos into place.
+#
+# We *could* break out of the loop as soon as we find a pos where newitem <=
+# both its children, but turns out that's not a good idea, and despite that
+# many books write the algorithm that way. During a heap pop, the last array
+# element is sifted in, and that tends to be large, so that comparing it
+# against values starting from the root usually doesn't pay (= usually doesn't
+# get us out of the loop early). See Knuth, Volume 3, where this is
+# explained and quantified in an exercise.
+#
+# Cutting the # of comparisons is important, since these routines have no
+# way to extract "the priority" from an array element, so that intelligence
+# is likely to be hiding in custom comparison methods, or in array elements
+# storing (priority, record) tuples. Comparisons are thus potentially
+# expensive.
+#
+# On random arrays of length 1000, making this change cut the number of
+# comparisons made by heapify() a little, and those made by exhaustive
+# heappop() a lot, in accord with theory. Here are typical results from 3
+# runs (3 just to demonstrate how small the variance is):
+#
+# Compares needed by heapify Compares needed by 1000 heappops
+# -------------------------- --------------------------------
+# 1837 cut to 1663 14996 cut to 8680
+# 1855 cut to 1659 14966 cut to 8678
+# 1847 cut to 1660 15024 cut to 8703
+#
+# Building the heap by using heappush() 1000 times instead required
+# 2198, 2148, and 2219 compares: heapify() is more efficient, when
+# you can use it.
+#
+# The total compares needed by list.sort() on the same lists were 8627,
+# 8627, and 8632 (this should be compared to the sum of heapify() and
+# heappop() compares): list.sort() is (unsurprisingly!) more efficient
+# for sorting.
+
+def _siftup(heap, pos):
+ endpos = len(heap)
+ startpos = pos
+ newitem = heap[pos]
+ # Bubble up the smaller child until hitting a leaf.
+ childpos = 2*pos + 1 # leftmost child position
+ while childpos < endpos:
+ # Set childpos to index of smaller child.
+ rightpos = childpos + 1
+ if rightpos < endpos and not heap[childpos] < heap[rightpos]:
+ childpos = rightpos
+ # Move the smaller child up.
+ heap[pos] = heap[childpos]
+ pos = childpos
+ childpos = 2*pos + 1
+ # The leaf at pos is empty now. Put newitem there, and bubble it up
+ # to its final resting place (by sifting its parents down).
+ heap[pos] = newitem
+ _siftdown(heap, startpos, pos)
+
+def _siftdown_max(heap, startpos, pos):
+ 'Maxheap variant of _siftdown'
+ newitem = heap[pos]
+ # Follow the path to the root, moving parents down until finding a place
+ # newitem fits.
+ while pos > startpos:
+ parentpos = (pos - 1) >> 1
+ parent = heap[parentpos]
+ if parent < newitem:
+ heap[pos] = parent
+ pos = parentpos
+ continue
+ break
+ heap[pos] = newitem
+
+def _siftup_max(heap, pos):
+ 'Maxheap variant of _siftup'
+ endpos = len(heap)
+ startpos = pos
+ newitem = heap[pos]
+ # Bubble up the larger child until hitting a leaf.
+ childpos = 2*pos + 1 # leftmost child position
+ while childpos < endpos:
+ # Set childpos to index of larger child.
+ rightpos = childpos + 1
+ if rightpos < endpos and not heap[rightpos] < heap[childpos]:
+ childpos = rightpos
+ # Move the larger child up.
+ heap[pos] = heap[childpos]
+ pos = childpos
+ childpos = 2*pos + 1
+ # The leaf at pos is empty now. Put newitem there, and bubble it up
+ # to its final resting place (by sifting its parents down).
+ heap[pos] = newitem
+ _siftdown_max(heap, startpos, pos)
+
+def merge(*iterables, key=None, reverse=False):
+ '''Merge multiple sorted inputs into a single sorted output.
+
+ Similar to sorted(itertools.chain(*iterables)) but returns a generator,
+ does not pull the data into memory all at once, and assumes that each of
+ the input streams is already sorted (smallest to largest).
+
+ >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25]))
+ [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25]
+
+ If *key* is not None, applies a key function to each element to determine
+ its sort order.
+
+ >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len))
+ ['dog', 'cat', 'fish', 'horse', 'kangaroo']
+
+ '''
+
+ h = []
+ h_append = h.append
+
+ if reverse:
+ _heapify = _heapify_max
+ _heappop = _heappop_max
+ _heapreplace = _heapreplace_max
+ direction = -1
+ else:
+ _heapify = heapify
+ _heappop = heappop
+ _heapreplace = heapreplace
+ direction = 1
+
+ if key is None:
+ for order, it in enumerate(map(iter, iterables)):
+ try:
+ next = it.__next__
+ h_append([next(), order * direction, next])
+ except StopIteration:
+ pass
+ _heapify(h)
+ while len(h) > 1:
+ try:
+ while True:
+ value, order, next = s = h[0]
+ yield value
+ s[0] = next() # raises StopIteration when exhausted
+ _heapreplace(h, s) # restore heap condition
+ except StopIteration:
+ _heappop(h) # remove empty iterator
+ if h:
+ # fast case when only a single iterator remains
+ value, order, next = h[0]
+ yield value
+ yield from next.__self__
+ return
+
+ for order, it in enumerate(map(iter, iterables)):
+ try:
+ next = it.__next__
+ value = next()
+ h_append([key(value), order * direction, value, next])
+ except StopIteration:
+ pass
+ _heapify(h)
+ while len(h) > 1:
+ try:
+ while True:
+ key_value, order, value, next = s = h[0]
+ yield value
+ value = next()
+ s[0] = key(value)
+ s[2] = value
+ _heapreplace(h, s)
+ except StopIteration:
+ _heappop(h)
+ if h:
+ key_value, order, value, next = h[0]
+ yield value
+ yield from next.__self__
+
+
+# Algorithm notes for nlargest() and nsmallest()
+# ==============================================
+#
+# Make a single pass over the data while keeping the k most extreme values
+# in a heap. Memory consumption is limited to keeping k values in a list.
+#
+# Measured performance for random inputs:
+#
+# number of comparisons
+# n inputs k-extreme values (average of 5 trials) % more than min()
+# ------------- ---------------- --------------------- -----------------
+# 1,000 100 3,317 231.7%
+# 10,000 100 14,046 40.5%
+# 100,000 100 105,749 5.7%
+# 1,000,000 100 1,007,751 0.8%
+# 10,000,000 100 10,009,401 0.1%
+#
+# Theoretical number of comparisons for k smallest of n random inputs:
+#
+# Step Comparisons Action
+# ---- -------------------------- ---------------------------
+# 1 1.66 * k heapify the first k-inputs
+# 2 n - k compare remaining elements to top of heap
+# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap
+# 4 k * lg2(k) - (k/2) final sort of the k most extreme values
+#
+# Combining and simplifying for a rough estimate gives:
+#
+# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k))
+#
+# Computing the number of comparisons for step 3:
+# -----------------------------------------------
+# * For the i-th new value from the iterable, the probability of being in the
+# k most extreme values is k/i. For example, the probability of the 101st
+# value seen being in the 100 most extreme values is 100/101.
+# * If the value is a new extreme value, the cost of inserting it into the
+# heap is 1 + log(k, 2).
+# * The probability times the cost gives:
+# (k/i) * (1 + log(k, 2))
+# * Summing across the remaining n-k elements gives:
+# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1))
+# * This reduces to:
+# (H(n) - H(k)) * k * (1 + log(k, 2))
+# * Where H(n) is the n-th harmonic number estimated by:
+# gamma = 0.5772156649
+# H(n) = log(n, e) + gamma + 1 / (2 * n)
+# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence
+# * Substituting the H(n) formula:
+# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2)
+#
+# Worst-case for step 3:
+# ----------------------
+# In the worst case, the input data is reversed sorted so that every new element
+# must be inserted in the heap:
+#
+# comparisons = 1.66 * k + log(k, 2) * (n - k)
+#
+# Alternative Algorithms
+# ----------------------
+# Other algorithms were not used because they:
+# 1) Took much more auxiliary memory,
+# 2) Made multiple passes over the data.
+# 3) Made more comparisons in common cases (small k, large n, semi-random input).
+# See the more detailed comparison of approach at:
+# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest
+
+def nsmallest(n, iterable, key=None):
+ """Find the n smallest elements in a dataset.
+
+ Equivalent to: sorted(iterable, key=key)[:n]
+ """
+
+ # Short-cut for n==1 is to use min()
+ if n == 1:
+ it = iter(iterable)
+ sentinel = object()
+ result = min(it, default=sentinel, key=key)
+ return [] if result is sentinel else [result]
+
+ # When n>=size, it's faster to use sorted()
+ try:
+ size = len(iterable)
+ except (TypeError, AttributeError):
+ pass
+ else:
+ if n >= size:
+ return sorted(iterable, key=key)[:n]
+
+ # When key is none, use simpler decoration
+ if key is None:
+ it = iter(iterable)
+ # put the range(n) first so that zip() doesn't
+ # consume one too many elements from the iterator
+ result = [(elem, i) for i, elem in zip(range(n), it)]
+ if not result:
+ return result
+ _heapify_max(result)
+ top = result[0][0]
+ order = n
+ _heapreplace = _heapreplace_max
+ for elem in it:
+ if elem < top:
+ _heapreplace(result, (elem, order))
+ top, _order = result[0]
+ order += 1
+ result.sort()
+ return [elem for (elem, order) in result]
+
+ # General case, slowest method
+ it = iter(iterable)
+ result = [(key(elem), i, elem) for i, elem in zip(range(n), it)]
+ if not result:
+ return result
+ _heapify_max(result)
+ top = result[0][0]
+ order = n
+ _heapreplace = _heapreplace_max
+ for elem in it:
+ k = key(elem)
+ if k < top:
+ _heapreplace(result, (k, order, elem))
+ top, _order, _elem = result[0]
+ order += 1
+ result.sort()
+ return [elem for (k, order, elem) in result]
+
+def nlargest(n, iterable, key=None):
+ """Find the n largest elements in a dataset.
+
+ Equivalent to: sorted(iterable, key=key, reverse=True)[:n]
+ """
+
+ # Short-cut for n==1 is to use max()
+ if n == 1:
+ it = iter(iterable)
+ sentinel = object()
+ result = max(it, default=sentinel, key=key)
+ return [] if result is sentinel else [result]
+
+ # When n>=size, it's faster to use sorted()
+ try:
+ size = len(iterable)
+ except (TypeError, AttributeError):
+ pass
+ else:
+ if n >= size:
+ return sorted(iterable, key=key, reverse=True)[:n]
+
+ # When key is none, use simpler decoration
+ if key is None:
+ it = iter(iterable)
+ result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)]
+ if not result:
+ return result
+ heapify(result)
+ top = result[0][0]
+ order = -n
+ _heapreplace = heapreplace
+ for elem in it:
+ if top < elem:
+ _heapreplace(result, (elem, order))
+ top, _order = result[0]
+ order -= 1
+ result.sort(reverse=True)
+ return [elem for (elem, order) in result]
+
+ # General case, slowest method
+ it = iter(iterable)
+ result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)]
+ if not result:
+ return result
+ heapify(result)
+ top = result[0][0]
+ order = -n
+ _heapreplace = heapreplace
+ for elem in it:
+ k = key(elem)
+ if top < k:
+ _heapreplace(result, (k, order, elem))
+ top, _order, _elem = result[0]
+ order -= 1
+ result.sort(reverse=True)
+ return [elem for (k, order, elem) in result]
+
+# If available, use C implementation
+try:
+ from _heapq import *
+except ImportError:
+ pass
+try:
+ from _heapq import _heapreplace_max
+except ImportError:
+ pass
+try:
+ from _heapq import _heapify_max
+except ImportError:
+ pass
+try:
+ from _heapq import _heappop_max
+except ImportError:
+ pass
+
+
+if __name__ == "__main__":
+
+ import doctest # pragma: no cover
+ print(doctest.testmod()) # pragma: no cover
diff --git a/llava/lib/python3.10/imp.py b/llava/lib/python3.10/imp.py
new file mode 100644
index 0000000000000000000000000000000000000000..e02aaef344c6148b43ef3954689126319504dea9
--- /dev/null
+++ b/llava/lib/python3.10/imp.py
@@ -0,0 +1,346 @@
+"""This module provides the components needed to build your own __import__
+function. Undocumented functions are obsolete.
+
+In most cases it is preferred you consider using the importlib module's
+functionality over this module.
+
+"""
+# (Probably) need to stay in _imp
+from _imp import (lock_held, acquire_lock, release_lock,
+ get_frozen_object, is_frozen_package,
+ init_frozen, is_builtin, is_frozen,
+ _fix_co_filename)
+try:
+ from _imp import create_dynamic
+except ImportError:
+ # Platform doesn't support dynamic loading.
+ create_dynamic = None
+
+from importlib._bootstrap import _ERR_MSG, _exec, _load, _builtin_from_name
+from importlib._bootstrap_external import SourcelessFileLoader
+
+from importlib import machinery
+from importlib import util
+import importlib
+import os
+import sys
+import tokenize
+import types
+import warnings
+
+warnings.warn("the imp module is deprecated in favour of importlib and slated "
+ "for removal in Python 3.12; "
+ "see the module's documentation for alternative uses",
+ DeprecationWarning, stacklevel=2)
+
+# DEPRECATED
+SEARCH_ERROR = 0
+PY_SOURCE = 1
+PY_COMPILED = 2
+C_EXTENSION = 3
+PY_RESOURCE = 4
+PKG_DIRECTORY = 5
+C_BUILTIN = 6
+PY_FROZEN = 7
+PY_CODERESOURCE = 8
+IMP_HOOK = 9
+
+
+def new_module(name):
+ """**DEPRECATED**
+
+ Create a new module.
+
+ The module is not entered into sys.modules.
+
+ """
+ return types.ModuleType(name)
+
+
+def get_magic():
+ """**DEPRECATED**
+
+ Return the magic number for .pyc files.
+ """
+ return util.MAGIC_NUMBER
+
+
+def get_tag():
+ """Return the magic tag for .pyc files."""
+ return sys.implementation.cache_tag
+
+
+def cache_from_source(path, debug_override=None):
+ """**DEPRECATED**
+
+ Given the path to a .py file, return the path to its .pyc file.
+
+ The .py file does not need to exist; this simply returns the path to the
+ .pyc file calculated as if the .py file were imported.
+
+ If debug_override is not None, then it must be a boolean and is used in
+ place of sys.flags.optimize.
+
+ If sys.implementation.cache_tag is None then NotImplementedError is raised.
+
+ """
+ with warnings.catch_warnings():
+ warnings.simplefilter('ignore')
+ return util.cache_from_source(path, debug_override)
+
+
+def source_from_cache(path):
+ """**DEPRECATED**
+
+ Given the path to a .pyc. file, return the path to its .py file.
+
+ The .pyc file does not need to exist; this simply returns the path to
+ the .py file calculated to correspond to the .pyc file. If path does
+ not conform to PEP 3147 format, ValueError will be raised. If
+ sys.implementation.cache_tag is None then NotImplementedError is raised.
+
+ """
+ return util.source_from_cache(path)
+
+
+def get_suffixes():
+ """**DEPRECATED**"""
+ extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES]
+ source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES]
+ bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES]
+
+ return extensions + source + bytecode
+
+
+class NullImporter:
+
+ """**DEPRECATED**
+
+ Null import object.
+
+ """
+
+ def __init__(self, path):
+ if path == '':
+ raise ImportError('empty pathname', path='')
+ elif os.path.isdir(path):
+ raise ImportError('existing directory', path=path)
+
+ def find_module(self, fullname):
+ """Always returns None."""
+ return None
+
+
+class _HackedGetData:
+
+ """Compatibility support for 'file' arguments of various load_*()
+ functions."""
+
+ def __init__(self, fullname, path, file=None):
+ super().__init__(fullname, path)
+ self.file = file
+
+ def get_data(self, path):
+ """Gross hack to contort loader to deal w/ load_*()'s bad API."""
+ if self.file and path == self.path:
+ # The contract of get_data() requires us to return bytes. Reopen the
+ # file in binary mode if needed.
+ if not self.file.closed:
+ file = self.file
+ if 'b' not in file.mode:
+ file.close()
+ if self.file.closed:
+ self.file = file = open(self.path, 'rb')
+
+ with file:
+ return file.read()
+ else:
+ return super().get_data(path)
+
+
+class _LoadSourceCompatibility(_HackedGetData, machinery.SourceFileLoader):
+
+ """Compatibility support for implementing load_source()."""
+
+
+def load_source(name, pathname, file=None):
+ loader = _LoadSourceCompatibility(name, pathname, file)
+ spec = util.spec_from_file_location(name, pathname, loader=loader)
+ if name in sys.modules:
+ module = _exec(spec, sys.modules[name])
+ else:
+ module = _load(spec)
+ # To allow reloading to potentially work, use a non-hacked loader which
+ # won't rely on a now-closed file object.
+ module.__loader__ = machinery.SourceFileLoader(name, pathname)
+ module.__spec__.loader = module.__loader__
+ return module
+
+
+class _LoadCompiledCompatibility(_HackedGetData, SourcelessFileLoader):
+
+ """Compatibility support for implementing load_compiled()."""
+
+
+def load_compiled(name, pathname, file=None):
+ """**DEPRECATED**"""
+ loader = _LoadCompiledCompatibility(name, pathname, file)
+ spec = util.spec_from_file_location(name, pathname, loader=loader)
+ if name in sys.modules:
+ module = _exec(spec, sys.modules[name])
+ else:
+ module = _load(spec)
+ # To allow reloading to potentially work, use a non-hacked loader which
+ # won't rely on a now-closed file object.
+ module.__loader__ = SourcelessFileLoader(name, pathname)
+ module.__spec__.loader = module.__loader__
+ return module
+
+
+def load_package(name, path):
+ """**DEPRECATED**"""
+ if os.path.isdir(path):
+ extensions = (machinery.SOURCE_SUFFIXES[:] +
+ machinery.BYTECODE_SUFFIXES[:])
+ for extension in extensions:
+ init_path = os.path.join(path, '__init__' + extension)
+ if os.path.exists(init_path):
+ path = init_path
+ break
+ else:
+ raise ValueError('{!r} is not a package'.format(path))
+ spec = util.spec_from_file_location(name, path,
+ submodule_search_locations=[])
+ if name in sys.modules:
+ return _exec(spec, sys.modules[name])
+ else:
+ return _load(spec)
+
+
+def load_module(name, file, filename, details):
+ """**DEPRECATED**
+
+ Load a module, given information returned by find_module().
+
+ The module name must include the full package name, if any.
+
+ """
+ suffix, mode, type_ = details
+ if mode and (not mode.startswith(('r', 'U')) or '+' in mode):
+ raise ValueError('invalid file open mode {!r}'.format(mode))
+ elif file is None and type_ in {PY_SOURCE, PY_COMPILED}:
+ msg = 'file object required for import (type code {})'.format(type_)
+ raise ValueError(msg)
+ elif type_ == PY_SOURCE:
+ return load_source(name, filename, file)
+ elif type_ == PY_COMPILED:
+ return load_compiled(name, filename, file)
+ elif type_ == C_EXTENSION and load_dynamic is not None:
+ if file is None:
+ with open(filename, 'rb') as opened_file:
+ return load_dynamic(name, filename, opened_file)
+ else:
+ return load_dynamic(name, filename, file)
+ elif type_ == PKG_DIRECTORY:
+ return load_package(name, filename)
+ elif type_ == C_BUILTIN:
+ return init_builtin(name)
+ elif type_ == PY_FROZEN:
+ return init_frozen(name)
+ else:
+ msg = "Don't know how to import {} (type code {})".format(name, type_)
+ raise ImportError(msg, name=name)
+
+
+def find_module(name, path=None):
+ """**DEPRECATED**
+
+ Search for a module.
+
+ If path is omitted or None, search for a built-in, frozen or special
+ module and continue search in sys.path. The module name cannot
+ contain '.'; to search for a submodule of a package, pass the
+ submodule name and the package's __path__.
+
+ """
+ if not isinstance(name, str):
+ raise TypeError("'name' must be a str, not {}".format(type(name)))
+ elif not isinstance(path, (type(None), list)):
+ # Backwards-compatibility
+ raise RuntimeError("'path' must be None or a list, "
+ "not {}".format(type(path)))
+
+ if path is None:
+ if is_builtin(name):
+ return None, None, ('', '', C_BUILTIN)
+ elif is_frozen(name):
+ return None, None, ('', '', PY_FROZEN)
+ else:
+ path = sys.path
+
+ for entry in path:
+ package_directory = os.path.join(entry, name)
+ for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]:
+ package_file_name = '__init__' + suffix
+ file_path = os.path.join(package_directory, package_file_name)
+ if os.path.isfile(file_path):
+ return None, package_directory, ('', '', PKG_DIRECTORY)
+ for suffix, mode, type_ in get_suffixes():
+ file_name = name + suffix
+ file_path = os.path.join(entry, file_name)
+ if os.path.isfile(file_path):
+ break
+ else:
+ continue
+ break # Break out of outer loop when breaking out of inner loop.
+ else:
+ raise ImportError(_ERR_MSG.format(name), name=name)
+
+ encoding = None
+ if 'b' not in mode:
+ with open(file_path, 'rb') as file:
+ encoding = tokenize.detect_encoding(file.readline)[0]
+ file = open(file_path, mode, encoding=encoding)
+ return file, file_path, (suffix, mode, type_)
+
+
+def reload(module):
+ """**DEPRECATED**
+
+ Reload the module and return it.
+
+ The module must have been successfully imported before.
+
+ """
+ return importlib.reload(module)
+
+
+def init_builtin(name):
+ """**DEPRECATED**
+
+ Load and return a built-in module by name, or None is such module doesn't
+ exist
+ """
+ try:
+ return _builtin_from_name(name)
+ except ImportError:
+ return None
+
+
+if create_dynamic:
+ def load_dynamic(name, path, file=None):
+ """**DEPRECATED**
+
+ Load an extension module.
+ """
+ import importlib.machinery
+ loader = importlib.machinery.ExtensionFileLoader(name, path)
+
+ # Issue #24748: Skip the sys.modules check in _load_module_shim;
+ # always load new extension
+ spec = importlib.machinery.ModuleSpec(
+ name=name, loader=loader, origin=path)
+ return _load(spec)
+
+else:
+ load_dynamic = None
diff --git a/llava/lib/python3.10/inspect.py b/llava/lib/python3.10/inspect.py
new file mode 100644
index 0000000000000000000000000000000000000000..2999a6019e0f60774dc4d2a1b422794cc84485a2
--- /dev/null
+++ b/llava/lib/python3.10/inspect.py
@@ -0,0 +1,3317 @@
+"""Get useful information from live Python objects.
+
+This module encapsulates the interface provided by the internal special
+attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion.
+It also provides some help for examining source code and class layout.
+
+Here are some of the useful functions provided by this module:
+
+ ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(),
+ isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(),
+ isroutine() - check object types
+ getmembers() - get members of an object that satisfy a given condition
+
+ getfile(), getsourcefile(), getsource() - find an object's source code
+ getdoc(), getcomments() - get documentation on an object
+ getmodule() - determine the module that an object came from
+ getclasstree() - arrange classes so as to represent their hierarchy
+
+ getargvalues(), getcallargs() - get info about function arguments
+ getfullargspec() - same, with support for Python 3 features
+ formatargvalues() - format an argument spec
+ getouterframes(), getinnerframes() - get info about frames
+ currentframe() - get the current stack frame
+ stack(), trace() - get info about frames on the stack or in a traceback
+
+ signature() - get a Signature object for the callable
+
+ get_annotations() - safely compute an object's annotations
+"""
+
+# This module is in the public domain. No warranties.
+
+__author__ = ('Ka-Ping Yee ',
+ 'Yury Selivanov ')
+
+import abc
+import ast
+import dis
+import collections.abc
+import enum
+import importlib.machinery
+import itertools
+import linecache
+import os
+import re
+import sys
+import tokenize
+import token
+import types
+import warnings
+import functools
+import builtins
+from operator import attrgetter
+from collections import namedtuple, OrderedDict
+
+# Create constants for the compiler flags in Include/code.h
+# We try to get them from dis to avoid duplication
+mod_dict = globals()
+for k, v in dis.COMPILER_FLAG_NAMES.items():
+ mod_dict["CO_" + v] = k
+
+# See Include/object.h
+TPFLAGS_IS_ABSTRACT = 1 << 20
+
+
+def get_annotations(obj, *, globals=None, locals=None, eval_str=False):
+ """Compute the annotations dict for an object.
+
+ obj may be a callable, class, or module.
+ Passing in an object of any other type raises TypeError.
+
+ Returns a dict. get_annotations() returns a new dict every time
+ it's called; calling it twice on the same object will return two
+ different but equivalent dicts.
+
+ This function handles several details for you:
+
+ * If eval_str is true, values of type str will
+ be un-stringized using eval(). This is intended
+ for use with stringized annotations
+ ("from __future__ import annotations").
+ * If obj doesn't have an annotations dict, returns an
+ empty dict. (Functions and methods always have an
+ annotations dict; classes, modules, and other types of
+ callables may not.)
+ * Ignores inherited annotations on classes. If a class
+ doesn't have its own annotations dict, returns an empty dict.
+ * All accesses to object members and dict values are done
+ using getattr() and dict.get() for safety.
+ * Always, always, always returns a freshly-created dict.
+
+ eval_str controls whether or not values of type str are replaced
+ with the result of calling eval() on those values:
+
+ * If eval_str is true, eval() is called on values of type str.
+ * If eval_str is false (the default), values of type str are unchanged.
+
+ globals and locals are passed in to eval(); see the documentation
+ for eval() for more information. If either globals or locals is
+ None, this function may replace that value with a context-specific
+ default, contingent on type(obj):
+
+ * If obj is a module, globals defaults to obj.__dict__.
+ * If obj is a class, globals defaults to
+ sys.modules[obj.__module__].__dict__ and locals
+ defaults to the obj class namespace.
+ * If obj is a callable, globals defaults to obj.__globals__,
+ although if obj is a wrapped function (using
+ functools.update_wrapper()) it is first unwrapped.
+ """
+ if isinstance(obj, type):
+ # class
+ obj_dict = getattr(obj, '__dict__', None)
+ if obj_dict and hasattr(obj_dict, 'get'):
+ ann = obj_dict.get('__annotations__', None)
+ if isinstance(ann, types.GetSetDescriptorType):
+ ann = None
+ else:
+ ann = None
+
+ obj_globals = None
+ module_name = getattr(obj, '__module__', None)
+ if module_name:
+ module = sys.modules.get(module_name, None)
+ if module:
+ obj_globals = getattr(module, '__dict__', None)
+ obj_locals = dict(vars(obj))
+ unwrap = obj
+ elif isinstance(obj, types.ModuleType):
+ # module
+ ann = getattr(obj, '__annotations__', None)
+ obj_globals = getattr(obj, '__dict__')
+ obj_locals = None
+ unwrap = None
+ elif callable(obj):
+ # this includes types.Function, types.BuiltinFunctionType,
+ # types.BuiltinMethodType, functools.partial, functools.singledispatch,
+ # "class funclike" from Lib/test/test_inspect... on and on it goes.
+ ann = getattr(obj, '__annotations__', None)
+ obj_globals = getattr(obj, '__globals__', None)
+ obj_locals = None
+ unwrap = obj
+ else:
+ raise TypeError(f"{obj!r} is not a module, class, or callable.")
+
+ if ann is None:
+ return {}
+
+ if not isinstance(ann, dict):
+ raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None")
+
+ if not ann:
+ return {}
+
+ if not eval_str:
+ return dict(ann)
+
+ if unwrap is not None:
+ while True:
+ if hasattr(unwrap, '__wrapped__'):
+ unwrap = unwrap.__wrapped__
+ continue
+ if isinstance(unwrap, functools.partial):
+ unwrap = unwrap.func
+ continue
+ break
+ if hasattr(unwrap, "__globals__"):
+ obj_globals = unwrap.__globals__
+
+ if globals is None:
+ globals = obj_globals
+ if locals is None:
+ locals = obj_locals
+
+ return_value = {key:
+ value if not isinstance(value, str) else eval(value, globals, locals)
+ for key, value in ann.items() }
+ return return_value
+
+
+# ----------------------------------------------------------- type-checking
+def ismodule(object):
+ """Return true if the object is a module.
+
+ Module objects provide these attributes:
+ __cached__ pathname to byte compiled file
+ __doc__ documentation string
+ __file__ filename (missing for built-in modules)"""
+ return isinstance(object, types.ModuleType)
+
+def isclass(object):
+ """Return true if the object is a class.
+
+ Class objects provide these attributes:
+ __doc__ documentation string
+ __module__ name of module in which this class was defined"""
+ return isinstance(object, type)
+
+def ismethod(object):
+ """Return true if the object is an instance method.
+
+ Instance method objects provide these attributes:
+ __doc__ documentation string
+ __name__ name with which this method was defined
+ __func__ function object containing implementation of method
+ __self__ instance to which this method is bound"""
+ return isinstance(object, types.MethodType)
+
+def ismethoddescriptor(object):
+ """Return true if the object is a method descriptor.
+
+ But not if ismethod() or isclass() or isfunction() are true.
+
+ This is new in Python 2.2, and, for example, is true of int.__add__.
+ An object passing this test has a __get__ attribute but not a __set__
+ attribute, but beyond that the set of attributes varies. __name__ is
+ usually sensible, and __doc__ often is.
+
+ Methods implemented via descriptors that also pass one of the other
+ tests return false from the ismethoddescriptor() test, simply because
+ the other tests promise more -- you can, e.g., count on having the
+ __func__ attribute (etc) when an object passes ismethod()."""
+ if isclass(object) or ismethod(object) or isfunction(object):
+ # mutual exclusion
+ return False
+ tp = type(object)
+ return hasattr(tp, "__get__") and not hasattr(tp, "__set__")
+
+def isdatadescriptor(object):
+ """Return true if the object is a data descriptor.
+
+ Data descriptors have a __set__ or a __delete__ attribute. Examples are
+ properties (defined in Python) and getsets and members (defined in C).
+ Typically, data descriptors will also have __name__ and __doc__ attributes
+ (properties, getsets, and members have both of these attributes), but this
+ is not guaranteed."""
+ if isclass(object) or ismethod(object) or isfunction(object):
+ # mutual exclusion
+ return False
+ tp = type(object)
+ return hasattr(tp, "__set__") or hasattr(tp, "__delete__")
+
+if hasattr(types, 'MemberDescriptorType'):
+ # CPython and equivalent
+ def ismemberdescriptor(object):
+ """Return true if the object is a member descriptor.
+
+ Member descriptors are specialized descriptors defined in extension
+ modules."""
+ return isinstance(object, types.MemberDescriptorType)
+else:
+ # Other implementations
+ def ismemberdescriptor(object):
+ """Return true if the object is a member descriptor.
+
+ Member descriptors are specialized descriptors defined in extension
+ modules."""
+ return False
+
+if hasattr(types, 'GetSetDescriptorType'):
+ # CPython and equivalent
+ def isgetsetdescriptor(object):
+ """Return true if the object is a getset descriptor.
+
+ getset descriptors are specialized descriptors defined in extension
+ modules."""
+ return isinstance(object, types.GetSetDescriptorType)
+else:
+ # Other implementations
+ def isgetsetdescriptor(object):
+ """Return true if the object is a getset descriptor.
+
+ getset descriptors are specialized descriptors defined in extension
+ modules."""
+ return False
+
+def isfunction(object):
+ """Return true if the object is a user-defined function.
+
+ Function objects provide these attributes:
+ __doc__ documentation string
+ __name__ name with which this function was defined
+ __code__ code object containing compiled function bytecode
+ __defaults__ tuple of any default values for arguments
+ __globals__ global namespace in which this function was defined
+ __annotations__ dict of parameter annotations
+ __kwdefaults__ dict of keyword only parameters with defaults"""
+ return isinstance(object, types.FunctionType)
+
+def _has_code_flag(f, flag):
+ """Return true if ``f`` is a function (or a method or functools.partial
+ wrapper wrapping a function) whose code object has the given ``flag``
+ set in its flags."""
+ while ismethod(f):
+ f = f.__func__
+ f = functools._unwrap_partial(f)
+ if not (isfunction(f) or _signature_is_functionlike(f)):
+ return False
+ return bool(f.__code__.co_flags & flag)
+
+def isgeneratorfunction(obj):
+ """Return true if the object is a user-defined generator function.
+
+ Generator function objects provide the same attributes as functions.
+ See help(isfunction) for a list of attributes."""
+ return _has_code_flag(obj, CO_GENERATOR)
+
+def iscoroutinefunction(obj):
+ """Return true if the object is a coroutine function.
+
+ Coroutine functions are defined with "async def" syntax.
+ """
+ return _has_code_flag(obj, CO_COROUTINE)
+
+def isasyncgenfunction(obj):
+ """Return true if the object is an asynchronous generator function.
+
+ Asynchronous generator functions are defined with "async def"
+ syntax and have "yield" expressions in their body.
+ """
+ return _has_code_flag(obj, CO_ASYNC_GENERATOR)
+
+def isasyncgen(object):
+ """Return true if the object is an asynchronous generator."""
+ return isinstance(object, types.AsyncGeneratorType)
+
+def isgenerator(object):
+ """Return true if the object is a generator.
+
+ Generator objects provide these attributes:
+ __iter__ defined to support iteration over container
+ close raises a new GeneratorExit exception inside the
+ generator to terminate the iteration
+ gi_code code object
+ gi_frame frame object or possibly None once the generator has
+ been exhausted
+ gi_running set to 1 when generator is executing, 0 otherwise
+ next return the next item from the container
+ send resumes the generator and "sends" a value that becomes
+ the result of the current yield-expression
+ throw used to raise an exception inside the generator"""
+ return isinstance(object, types.GeneratorType)
+
+def iscoroutine(object):
+ """Return true if the object is a coroutine."""
+ return isinstance(object, types.CoroutineType)
+
+def isawaitable(object):
+ """Return true if object can be passed to an ``await`` expression."""
+ return (isinstance(object, types.CoroutineType) or
+ isinstance(object, types.GeneratorType) and
+ bool(object.gi_code.co_flags & CO_ITERABLE_COROUTINE) or
+ isinstance(object, collections.abc.Awaitable))
+
+def istraceback(object):
+ """Return true if the object is a traceback.
+
+ Traceback objects provide these attributes:
+ tb_frame frame object at this level
+ tb_lasti index of last attempted instruction in bytecode
+ tb_lineno current line number in Python source code
+ tb_next next inner traceback object (called by this level)"""
+ return isinstance(object, types.TracebackType)
+
+def isframe(object):
+ """Return true if the object is a frame object.
+
+ Frame objects provide these attributes:
+ f_back next outer frame object (this frame's caller)
+ f_builtins built-in namespace seen by this frame
+ f_code code object being executed in this frame
+ f_globals global namespace seen by this frame
+ f_lasti index of last attempted instruction in bytecode
+ f_lineno current line number in Python source code
+ f_locals local namespace seen by this frame
+ f_trace tracing function for this frame, or None"""
+ return isinstance(object, types.FrameType)
+
+def iscode(object):
+ """Return true if the object is a code object.
+
+ Code objects provide these attributes:
+ co_argcount number of arguments (not including *, ** args
+ or keyword only arguments)
+ co_code string of raw compiled bytecode
+ co_cellvars tuple of names of cell variables
+ co_consts tuple of constants used in the bytecode
+ co_filename name of file in which this code object was created
+ co_firstlineno number of first line in Python source code
+ co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg
+ | 16=nested | 32=generator | 64=nofree | 128=coroutine
+ | 256=iterable_coroutine | 512=async_generator
+ co_freevars tuple of names of free variables
+ co_posonlyargcount number of positional only arguments
+ co_kwonlyargcount number of keyword only arguments (not including ** arg)
+ co_lnotab encoded mapping of line numbers to bytecode indices
+ co_name name with which this code object was defined
+ co_names tuple of names other than arguments and function locals
+ co_nlocals number of local variables
+ co_stacksize virtual machine stack space required
+ co_varnames tuple of names of arguments and local variables"""
+ return isinstance(object, types.CodeType)
+
+def isbuiltin(object):
+ """Return true if the object is a built-in function or method.
+
+ Built-in functions and methods provide these attributes:
+ __doc__ documentation string
+ __name__ original name of this function or method
+ __self__ instance to which a method is bound, or None"""
+ return isinstance(object, types.BuiltinFunctionType)
+
+def isroutine(object):
+ """Return true if the object is any kind of function or method."""
+ return (isbuiltin(object)
+ or isfunction(object)
+ or ismethod(object)
+ or ismethoddescriptor(object))
+
+def isabstract(object):
+ """Return true if the object is an abstract base class (ABC)."""
+ if not isinstance(object, type):
+ return False
+ if object.__flags__ & TPFLAGS_IS_ABSTRACT:
+ return True
+ if not issubclass(type(object), abc.ABCMeta):
+ return False
+ if hasattr(object, '__abstractmethods__'):
+ # It looks like ABCMeta.__new__ has finished running;
+ # TPFLAGS_IS_ABSTRACT should have been accurate.
+ return False
+ # It looks like ABCMeta.__new__ has not finished running yet; we're
+ # probably in __init_subclass__. We'll look for abstractmethods manually.
+ for name, value in object.__dict__.items():
+ if getattr(value, "__isabstractmethod__", False):
+ return True
+ for base in object.__bases__:
+ for name in getattr(base, "__abstractmethods__", ()):
+ value = getattr(object, name, None)
+ if getattr(value, "__isabstractmethod__", False):
+ return True
+ return False
+
+def getmembers(object, predicate=None):
+ """Return all members of an object as (name, value) pairs sorted by name.
+ Optionally, only return members that satisfy a given predicate."""
+ if isclass(object):
+ mro = (object,) + getmro(object)
+ else:
+ mro = ()
+ results = []
+ processed = set()
+ names = dir(object)
+ # :dd any DynamicClassAttributes to the list of names if object is a class;
+ # this may result in duplicate entries if, for example, a virtual
+ # attribute with the same name as a DynamicClassAttribute exists
+ try:
+ for base in object.__bases__:
+ for k, v in base.__dict__.items():
+ if isinstance(v, types.DynamicClassAttribute):
+ names.append(k)
+ except AttributeError:
+ pass
+ for key in names:
+ # First try to get the value via getattr. Some descriptors don't
+ # like calling their __get__ (see bug #1785), so fall back to
+ # looking in the __dict__.
+ try:
+ value = getattr(object, key)
+ # handle the duplicate key
+ if key in processed:
+ raise AttributeError
+ except AttributeError:
+ for base in mro:
+ if key in base.__dict__:
+ value = base.__dict__[key]
+ break
+ else:
+ # could be a (currently) missing slot member, or a buggy
+ # __dir__; discard and move on
+ continue
+ if not predicate or predicate(value):
+ results.append((key, value))
+ processed.add(key)
+ results.sort(key=lambda pair: pair[0])
+ return results
+
+Attribute = namedtuple('Attribute', 'name kind defining_class object')
+
+def classify_class_attrs(cls):
+ """Return list of attribute-descriptor tuples.
+
+ For each name in dir(cls), the return list contains a 4-tuple
+ with these elements:
+
+ 0. The name (a string).
+
+ 1. The kind of attribute this is, one of these strings:
+ 'class method' created via classmethod()
+ 'static method' created via staticmethod()
+ 'property' created via property()
+ 'method' any other flavor of method or descriptor
+ 'data' not a method
+
+ 2. The class which defined this attribute (a class).
+
+ 3. The object as obtained by calling getattr; if this fails, or if the
+ resulting object does not live anywhere in the class' mro (including
+ metaclasses) then the object is looked up in the defining class's
+ dict (found by walking the mro).
+
+ If one of the items in dir(cls) is stored in the metaclass it will now
+ be discovered and not have None be listed as the class in which it was
+ defined. Any items whose home class cannot be discovered are skipped.
+ """
+
+ mro = getmro(cls)
+ metamro = getmro(type(cls)) # for attributes stored in the metaclass
+ metamro = tuple(cls for cls in metamro if cls not in (type, object))
+ class_bases = (cls,) + mro
+ all_bases = class_bases + metamro
+ names = dir(cls)
+ # :dd any DynamicClassAttributes to the list of names;
+ # this may result in duplicate entries if, for example, a virtual
+ # attribute with the same name as a DynamicClassAttribute exists.
+ for base in mro:
+ for k, v in base.__dict__.items():
+ if isinstance(v, types.DynamicClassAttribute) and v.fget is not None:
+ names.append(k)
+ result = []
+ processed = set()
+
+ for name in names:
+ # Get the object associated with the name, and where it was defined.
+ # Normal objects will be looked up with both getattr and directly in
+ # its class' dict (in case getattr fails [bug #1785], and also to look
+ # for a docstring).
+ # For DynamicClassAttributes on the second pass we only look in the
+ # class's dict.
+ #
+ # Getting an obj from the __dict__ sometimes reveals more than
+ # using getattr. Static and class methods are dramatic examples.
+ homecls = None
+ get_obj = None
+ dict_obj = None
+ if name not in processed:
+ try:
+ if name == '__dict__':
+ raise Exception("__dict__ is special, don't want the proxy")
+ get_obj = getattr(cls, name)
+ except Exception as exc:
+ pass
+ else:
+ homecls = getattr(get_obj, "__objclass__", homecls)
+ if homecls not in class_bases:
+ # if the resulting object does not live somewhere in the
+ # mro, drop it and search the mro manually
+ homecls = None
+ last_cls = None
+ # first look in the classes
+ for srch_cls in class_bases:
+ srch_obj = getattr(srch_cls, name, None)
+ if srch_obj is get_obj:
+ last_cls = srch_cls
+ # then check the metaclasses
+ for srch_cls in metamro:
+ try:
+ srch_obj = srch_cls.__getattr__(cls, name)
+ except AttributeError:
+ continue
+ if srch_obj is get_obj:
+ last_cls = srch_cls
+ if last_cls is not None:
+ homecls = last_cls
+ for base in all_bases:
+ if name in base.__dict__:
+ dict_obj = base.__dict__[name]
+ if homecls not in metamro:
+ homecls = base
+ break
+ if homecls is None:
+ # unable to locate the attribute anywhere, most likely due to
+ # buggy custom __dir__; discard and move on
+ continue
+ obj = get_obj if get_obj is not None else dict_obj
+ # Classify the object or its descriptor.
+ if isinstance(dict_obj, (staticmethod, types.BuiltinMethodType)):
+ kind = "static method"
+ obj = dict_obj
+ elif isinstance(dict_obj, (classmethod, types.ClassMethodDescriptorType)):
+ kind = "class method"
+ obj = dict_obj
+ elif isinstance(dict_obj, property):
+ kind = "property"
+ obj = dict_obj
+ elif isroutine(obj):
+ kind = "method"
+ else:
+ kind = "data"
+ result.append(Attribute(name, kind, homecls, obj))
+ processed.add(name)
+ return result
+
+# ----------------------------------------------------------- class helpers
+
+def getmro(cls):
+ "Return tuple of base classes (including cls) in method resolution order."
+ return cls.__mro__
+
+# -------------------------------------------------------- function helpers
+
+def unwrap(func, *, stop=None):
+ """Get the object wrapped by *func*.
+
+ Follows the chain of :attr:`__wrapped__` attributes returning the last
+ object in the chain.
+
+ *stop* is an optional callback accepting an object in the wrapper chain
+ as its sole argument that allows the unwrapping to be terminated early if
+ the callback returns a true value. If the callback never returns a true
+ value, the last object in the chain is returned as usual. For example,
+ :func:`signature` uses this to stop unwrapping if any object in the
+ chain has a ``__signature__`` attribute defined.
+
+ :exc:`ValueError` is raised if a cycle is encountered.
+
+ """
+ if stop is None:
+ def _is_wrapper(f):
+ return hasattr(f, '__wrapped__')
+ else:
+ def _is_wrapper(f):
+ return hasattr(f, '__wrapped__') and not stop(f)
+ f = func # remember the original func for error reporting
+ # Memoise by id to tolerate non-hashable objects, but store objects to
+ # ensure they aren't destroyed, which would allow their IDs to be reused.
+ memo = {id(f): f}
+ recursion_limit = sys.getrecursionlimit()
+ while _is_wrapper(func):
+ func = func.__wrapped__
+ id_func = id(func)
+ if (id_func in memo) or (len(memo) >= recursion_limit):
+ raise ValueError('wrapper loop when unwrapping {!r}'.format(f))
+ memo[id_func] = func
+ return func
+
+# -------------------------------------------------- source code extraction
+def indentsize(line):
+ """Return the indent size, in spaces, at the start of a line of text."""
+ expline = line.expandtabs()
+ return len(expline) - len(expline.lstrip())
+
+def _findclass(func):
+ cls = sys.modules.get(func.__module__)
+ if cls is None:
+ return None
+ for name in func.__qualname__.split('.')[:-1]:
+ cls = getattr(cls, name)
+ if not isclass(cls):
+ return None
+ return cls
+
+def _finddoc(obj):
+ if isclass(obj):
+ for base in obj.__mro__:
+ if base is not object:
+ try:
+ doc = base.__doc__
+ except AttributeError:
+ continue
+ if doc is not None:
+ return doc
+ return None
+
+ if ismethod(obj):
+ name = obj.__func__.__name__
+ self = obj.__self__
+ if (isclass(self) and
+ getattr(getattr(self, name, None), '__func__') is obj.__func__):
+ # classmethod
+ cls = self
+ else:
+ cls = self.__class__
+ elif isfunction(obj):
+ name = obj.__name__
+ cls = _findclass(obj)
+ if cls is None or getattr(cls, name) is not obj:
+ return None
+ elif isbuiltin(obj):
+ name = obj.__name__
+ self = obj.__self__
+ if (isclass(self) and
+ self.__qualname__ + '.' + name == obj.__qualname__):
+ # classmethod
+ cls = self
+ else:
+ cls = self.__class__
+ # Should be tested before isdatadescriptor().
+ elif isinstance(obj, property):
+ func = obj.fget
+ name = func.__name__
+ cls = _findclass(func)
+ if cls is None or getattr(cls, name) is not obj:
+ return None
+ elif ismethoddescriptor(obj) or isdatadescriptor(obj):
+ name = obj.__name__
+ cls = obj.__objclass__
+ if getattr(cls, name) is not obj:
+ return None
+ if ismemberdescriptor(obj):
+ slots = getattr(cls, '__slots__', None)
+ if isinstance(slots, dict) and name in slots:
+ return slots[name]
+ else:
+ return None
+ for base in cls.__mro__:
+ try:
+ doc = getattr(base, name).__doc__
+ except AttributeError:
+ continue
+ if doc is not None:
+ return doc
+ return None
+
+def getdoc(object):
+ """Get the documentation string for an object.
+
+ All tabs are expanded to spaces. To clean up docstrings that are
+ indented to line up with blocks of code, any whitespace than can be
+ uniformly removed from the second line onwards is removed."""
+ try:
+ doc = object.__doc__
+ except AttributeError:
+ return None
+ if doc is None:
+ try:
+ doc = _finddoc(object)
+ except (AttributeError, TypeError):
+ return None
+ if not isinstance(doc, str):
+ return None
+ return cleandoc(doc)
+
+def cleandoc(doc):
+ """Clean up indentation from docstrings.
+
+ Any whitespace that can be uniformly removed from the second line
+ onwards is removed."""
+ try:
+ lines = doc.expandtabs().split('\n')
+ except UnicodeError:
+ return None
+ else:
+ # Find minimum indentation of any non-blank lines after first line.
+ margin = sys.maxsize
+ for line in lines[1:]:
+ content = len(line.lstrip())
+ if content:
+ indent = len(line) - content
+ margin = min(margin, indent)
+ # Remove indentation.
+ if lines:
+ lines[0] = lines[0].lstrip()
+ if margin < sys.maxsize:
+ for i in range(1, len(lines)): lines[i] = lines[i][margin:]
+ # Remove any trailing or leading blank lines.
+ while lines and not lines[-1]:
+ lines.pop()
+ while lines and not lines[0]:
+ lines.pop(0)
+ return '\n'.join(lines)
+
+def getfile(object):
+ """Work out which source or compiled file an object was defined in."""
+ if ismodule(object):
+ if getattr(object, '__file__', None):
+ return object.__file__
+ raise TypeError('{!r} is a built-in module'.format(object))
+ if isclass(object):
+ if hasattr(object, '__module__'):
+ module = sys.modules.get(object.__module__)
+ if getattr(module, '__file__', None):
+ return module.__file__
+ if object.__module__ == '__main__':
+ raise OSError('source code not available')
+ raise TypeError('{!r} is a built-in class'.format(object))
+ if ismethod(object):
+ object = object.__func__
+ if isfunction(object):
+ object = object.__code__
+ if istraceback(object):
+ object = object.tb_frame
+ if isframe(object):
+ object = object.f_code
+ if iscode(object):
+ return object.co_filename
+ raise TypeError('module, class, method, function, traceback, frame, or '
+ 'code object was expected, got {}'.format(
+ type(object).__name__))
+
+def getmodulename(path):
+ """Return the module name for a given file, or None."""
+ fname = os.path.basename(path)
+ # Check for paths that look like an actual module file
+ suffixes = [(-len(suffix), suffix)
+ for suffix in importlib.machinery.all_suffixes()]
+ suffixes.sort() # try longest suffixes first, in case they overlap
+ for neglen, suffix in suffixes:
+ if fname.endswith(suffix):
+ return fname[:neglen]
+ return None
+
+def getsourcefile(object):
+ """Return the filename that can be used to locate an object's source.
+ Return None if no way can be identified to get the source.
+ """
+ filename = getfile(object)
+ all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:]
+ all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:]
+ if any(filename.endswith(s) for s in all_bytecode_suffixes):
+ filename = (os.path.splitext(filename)[0] +
+ importlib.machinery.SOURCE_SUFFIXES[0])
+ elif any(filename.endswith(s) for s in
+ importlib.machinery.EXTENSION_SUFFIXES):
+ return None
+ if os.path.exists(filename):
+ return filename
+ # only return a non-existent filename if the module has a PEP 302 loader
+ module = getmodule(object, filename)
+ if getattr(module, '__loader__', None) is not None:
+ return filename
+ elif getattr(getattr(module, "__spec__", None), "loader", None) is not None:
+ return filename
+ # or it is in the linecache
+ elif filename in linecache.cache:
+ return filename
+
+def getabsfile(object, _filename=None):
+ """Return an absolute path to the source or compiled file for an object.
+
+ The idea is for each object to have a unique origin, so this routine
+ normalizes the result as much as possible."""
+ if _filename is None:
+ _filename = getsourcefile(object) or getfile(object)
+ return os.path.normcase(os.path.abspath(_filename))
+
+modulesbyfile = {}
+_filesbymodname = {}
+
+def getmodule(object, _filename=None):
+ """Return the module an object was defined in, or None if not found."""
+ if ismodule(object):
+ return object
+ if hasattr(object, '__module__'):
+ return sys.modules.get(object.__module__)
+ # Try the filename to modulename cache
+ if _filename is not None and _filename in modulesbyfile:
+ return sys.modules.get(modulesbyfile[_filename])
+ # Try the cache again with the absolute file name
+ try:
+ file = getabsfile(object, _filename)
+ except (TypeError, FileNotFoundError):
+ return None
+ if file in modulesbyfile:
+ return sys.modules.get(modulesbyfile[file])
+ # Update the filename to module name cache and check yet again
+ # Copy sys.modules in order to cope with changes while iterating
+ for modname, module in sys.modules.copy().items():
+ if ismodule(module) and hasattr(module, '__file__'):
+ f = module.__file__
+ if f == _filesbymodname.get(modname, None):
+ # Have already mapped this module, so skip it
+ continue
+ _filesbymodname[modname] = f
+ f = getabsfile(module)
+ # Always map to the name the module knows itself by
+ modulesbyfile[f] = modulesbyfile[
+ os.path.realpath(f)] = module.__name__
+ if file in modulesbyfile:
+ return sys.modules.get(modulesbyfile[file])
+ # Check the main module
+ main = sys.modules['__main__']
+ if not hasattr(object, '__name__'):
+ return None
+ if hasattr(main, object.__name__):
+ mainobject = getattr(main, object.__name__)
+ if mainobject is object:
+ return main
+ # Check builtins
+ builtin = sys.modules['builtins']
+ if hasattr(builtin, object.__name__):
+ builtinobject = getattr(builtin, object.__name__)
+ if builtinobject is object:
+ return builtin
+
+
+class ClassFoundException(Exception):
+ pass
+
+
+class _ClassFinder(ast.NodeVisitor):
+
+ def __init__(self, qualname):
+ self.stack = []
+ self.qualname = qualname
+
+ def visit_FunctionDef(self, node):
+ self.stack.append(node.name)
+ self.stack.append('')
+ self.generic_visit(node)
+ self.stack.pop()
+ self.stack.pop()
+
+ visit_AsyncFunctionDef = visit_FunctionDef
+
+ def visit_ClassDef(self, node):
+ self.stack.append(node.name)
+ if self.qualname == '.'.join(self.stack):
+ # Return the decorator for the class if present
+ if node.decorator_list:
+ line_number = node.decorator_list[0].lineno
+ else:
+ line_number = node.lineno
+
+ # decrement by one since lines starts with indexing by zero
+ line_number -= 1
+ raise ClassFoundException(line_number)
+ self.generic_visit(node)
+ self.stack.pop()
+
+
+def findsource(object):
+ """Return the entire source file and starting line number for an object.
+
+ The argument may be a module, class, method, function, traceback, frame,
+ or code object. The source code is returned as a list of all the lines
+ in the file and the line number indexes a line in that list. An OSError
+ is raised if the source code cannot be retrieved."""
+
+ file = getsourcefile(object)
+ if file:
+ # Invalidate cache if needed.
+ linecache.checkcache(file)
+ else:
+ file = getfile(object)
+ # Allow filenames in form of "" to pass through.
+ # `doctest` monkeypatches `linecache` module to enable
+ # inspection, so let `linecache.getlines` to be called.
+ if not (file.startswith('<') and file.endswith('>')):
+ raise OSError('source code not available')
+
+ module = getmodule(object, file)
+ if module:
+ lines = linecache.getlines(file, module.__dict__)
+ else:
+ lines = linecache.getlines(file)
+ if not lines:
+ raise OSError('could not get source code')
+
+ if ismodule(object):
+ return lines, 0
+
+ if isclass(object):
+ qualname = object.__qualname__
+ source = ''.join(lines)
+ tree = ast.parse(source)
+ class_finder = _ClassFinder(qualname)
+ try:
+ class_finder.visit(tree)
+ except ClassFoundException as e:
+ line_number = e.args[0]
+ return lines, line_number
+ else:
+ raise OSError('could not find class definition')
+
+ if ismethod(object):
+ object = object.__func__
+ if isfunction(object):
+ object = object.__code__
+ if istraceback(object):
+ object = object.tb_frame
+ if isframe(object):
+ object = object.f_code
+ if iscode(object):
+ if not hasattr(object, 'co_firstlineno'):
+ raise OSError('could not find function definition')
+ lnum = object.co_firstlineno - 1
+ pat = re.compile(r'^(\s*def\s)|(\s*async\s+def\s)|(.*(? 0:
+ try:
+ line = lines[lnum]
+ except IndexError:
+ raise OSError('lineno is out of bounds')
+ if pat.match(line):
+ break
+ lnum = lnum - 1
+ return lines, lnum
+ raise OSError('could not find code object')
+
+def getcomments(object):
+ """Get lines of comments immediately preceding an object's source code.
+
+ Returns None when source can't be found.
+ """
+ try:
+ lines, lnum = findsource(object)
+ except (OSError, TypeError):
+ return None
+
+ if ismodule(object):
+ # Look for a comment block at the top of the file.
+ start = 0
+ if lines and lines[0][:2] == '#!': start = 1
+ while start < len(lines) and lines[start].strip() in ('', '#'):
+ start = start + 1
+ if start < len(lines) and lines[start][:1] == '#':
+ comments = []
+ end = start
+ while end < len(lines) and lines[end][:1] == '#':
+ comments.append(lines[end].expandtabs())
+ end = end + 1
+ return ''.join(comments)
+
+ # Look for a preceding block of comments at the same indentation.
+ elif lnum > 0:
+ indent = indentsize(lines[lnum])
+ end = lnum - 1
+ if end >= 0 and lines[end].lstrip()[:1] == '#' and \
+ indentsize(lines[end]) == indent:
+ comments = [lines[end].expandtabs().lstrip()]
+ if end > 0:
+ end = end - 1
+ comment = lines[end].expandtabs().lstrip()
+ while comment[:1] == '#' and indentsize(lines[end]) == indent:
+ comments[:0] = [comment]
+ end = end - 1
+ if end < 0: break
+ comment = lines[end].expandtabs().lstrip()
+ while comments and comments[0].strip() == '#':
+ comments[:1] = []
+ while comments and comments[-1].strip() == '#':
+ comments[-1:] = []
+ return ''.join(comments)
+
+class EndOfBlock(Exception): pass
+
+class BlockFinder:
+ """Provide a tokeneater() method to detect the end of a code block."""
+ def __init__(self):
+ self.indent = 0
+ self.islambda = False
+ self.started = False
+ self.passline = False
+ self.indecorator = False
+ self.last = 1
+ self.body_col0 = None
+
+ def tokeneater(self, type, token, srowcol, erowcol, line):
+ if not self.started and not self.indecorator:
+ # skip any decorators
+ if token == "@":
+ self.indecorator = True
+ # look for the first "def", "class" or "lambda"
+ elif token in ("def", "class", "lambda"):
+ if token == "lambda":
+ self.islambda = True
+ self.started = True
+ self.passline = True # skip to the end of the line
+ elif type == tokenize.NEWLINE:
+ self.passline = False # stop skipping when a NEWLINE is seen
+ self.last = srowcol[0]
+ if self.islambda: # lambdas always end at the first NEWLINE
+ raise EndOfBlock
+ # hitting a NEWLINE when in a decorator without args
+ # ends the decorator
+ if self.indecorator:
+ self.indecorator = False
+ elif self.passline:
+ pass
+ elif type == tokenize.INDENT:
+ if self.body_col0 is None and self.started:
+ self.body_col0 = erowcol[1]
+ self.indent = self.indent + 1
+ self.passline = True
+ elif type == tokenize.DEDENT:
+ self.indent = self.indent - 1
+ # the end of matching indent/dedent pairs end a block
+ # (note that this only works for "def"/"class" blocks,
+ # not e.g. for "if: else:" or "try: finally:" blocks)
+ if self.indent <= 0:
+ raise EndOfBlock
+ elif type == tokenize.COMMENT:
+ if self.body_col0 is not None and srowcol[1] >= self.body_col0:
+ # Include comments if indented at least as much as the block
+ self.last = srowcol[0]
+ elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL):
+ # any other token on the same indentation level end the previous
+ # block as well, except the pseudo-tokens COMMENT and NL.
+ raise EndOfBlock
+
+def getblock(lines):
+ """Extract the block of code at the top of the given list of lines."""
+ blockfinder = BlockFinder()
+ try:
+ tokens = tokenize.generate_tokens(iter(lines).__next__)
+ for _token in tokens:
+ blockfinder.tokeneater(*_token)
+ except (EndOfBlock, IndentationError):
+ pass
+ return lines[:blockfinder.last]
+
+def getsourcelines(object):
+ """Return a list of source lines and starting line number for an object.
+
+ The argument may be a module, class, method, function, traceback, frame,
+ or code object. The source code is returned as a list of the lines
+ corresponding to the object and the line number indicates where in the
+ original source file the first line of code was found. An OSError is
+ raised if the source code cannot be retrieved."""
+ object = unwrap(object)
+ lines, lnum = findsource(object)
+
+ if istraceback(object):
+ object = object.tb_frame
+
+ # for module or frame that corresponds to module, return all source lines
+ if (ismodule(object) or
+ (isframe(object) and object.f_code.co_name == "")):
+ return lines, 0
+ else:
+ return getblock(lines[lnum:]), lnum + 1
+
+def getsource(object):
+ """Return the text of the source code for an object.
+
+ The argument may be a module, class, method, function, traceback, frame,
+ or code object. The source code is returned as a single string. An
+ OSError is raised if the source code cannot be retrieved."""
+ lines, lnum = getsourcelines(object)
+ return ''.join(lines)
+
+# --------------------------------------------------- class tree extraction
+def walktree(classes, children, parent):
+ """Recursive helper function for getclasstree()."""
+ results = []
+ classes.sort(key=attrgetter('__module__', '__name__'))
+ for c in classes:
+ results.append((c, c.__bases__))
+ if c in children:
+ results.append(walktree(children[c], children, c))
+ return results
+
+def getclasstree(classes, unique=False):
+ """Arrange the given list of classes into a hierarchy of nested lists.
+
+ Where a nested list appears, it contains classes derived from the class
+ whose entry immediately precedes the list. Each entry is a 2-tuple
+ containing a class and a tuple of its base classes. If the 'unique'
+ argument is true, exactly one entry appears in the returned structure
+ for each class in the given list. Otherwise, classes using multiple
+ inheritance and their descendants will appear multiple times."""
+ children = {}
+ roots = []
+ for c in classes:
+ if c.__bases__:
+ for parent in c.__bases__:
+ if parent not in children:
+ children[parent] = []
+ if c not in children[parent]:
+ children[parent].append(c)
+ if unique and parent in classes: break
+ elif c not in roots:
+ roots.append(c)
+ for parent in children:
+ if parent not in classes:
+ roots.append(parent)
+ return walktree(roots, children, None)
+
+# ------------------------------------------------ argument list extraction
+Arguments = namedtuple('Arguments', 'args, varargs, varkw')
+
+def getargs(co):
+ """Get information about the arguments accepted by a code object.
+
+ Three things are returned: (args, varargs, varkw), where
+ 'args' is the list of argument names. Keyword-only arguments are
+ appended. 'varargs' and 'varkw' are the names of the * and **
+ arguments or None."""
+ if not iscode(co):
+ raise TypeError('{!r} is not a code object'.format(co))
+
+ names = co.co_varnames
+ nargs = co.co_argcount
+ nkwargs = co.co_kwonlyargcount
+ args = list(names[:nargs])
+ kwonlyargs = list(names[nargs:nargs+nkwargs])
+ step = 0
+
+ nargs += nkwargs
+ varargs = None
+ if co.co_flags & CO_VARARGS:
+ varargs = co.co_varnames[nargs]
+ nargs = nargs + 1
+ varkw = None
+ if co.co_flags & CO_VARKEYWORDS:
+ varkw = co.co_varnames[nargs]
+ return Arguments(args + kwonlyargs, varargs, varkw)
+
+ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults')
+
+def getargspec(func):
+ """Get the names and default values of a function's parameters.
+
+ A tuple of four things is returned: (args, varargs, keywords, defaults).
+ 'args' is a list of the argument names, including keyword-only argument names.
+ 'varargs' and 'keywords' are the names of the * and ** parameters or None.
+ 'defaults' is an n-tuple of the default values of the last n parameters.
+
+ This function is deprecated, as it does not support annotations or
+ keyword-only parameters and will raise ValueError if either is present
+ on the supplied callable.
+
+ For a more structured introspection API, use inspect.signature() instead.
+
+ Alternatively, use getfullargspec() for an API with a similar namedtuple
+ based interface, but full support for annotations and keyword-only
+ parameters.
+
+ Deprecated since Python 3.5, use `inspect.getfullargspec()`.
+ """
+ warnings.warn("inspect.getargspec() is deprecated since Python 3.0, "
+ "use inspect.signature() or inspect.getfullargspec()",
+ DeprecationWarning, stacklevel=2)
+ args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \
+ getfullargspec(func)
+ if kwonlyargs or ann:
+ raise ValueError("Function has keyword-only parameters or annotations"
+ ", use inspect.signature() API which can support them")
+ return ArgSpec(args, varargs, varkw, defaults)
+
+FullArgSpec = namedtuple('FullArgSpec',
+ 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations')
+
+def getfullargspec(func):
+ """Get the names and default values of a callable object's parameters.
+
+ A tuple of seven things is returned:
+ (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations).
+ 'args' is a list of the parameter names.
+ 'varargs' and 'varkw' are the names of the * and ** parameters or None.
+ 'defaults' is an n-tuple of the default values of the last n parameters.
+ 'kwonlyargs' is a list of keyword-only parameter names.
+ 'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults.
+ 'annotations' is a dictionary mapping parameter names to annotations.
+
+ Notable differences from inspect.signature():
+ - the "self" parameter is always reported, even for bound methods
+ - wrapper chains defined by __wrapped__ *not* unwrapped automatically
+ """
+ try:
+ # Re: `skip_bound_arg=False`
+ #
+ # There is a notable difference in behaviour between getfullargspec
+ # and Signature: the former always returns 'self' parameter for bound
+ # methods, whereas the Signature always shows the actual calling
+ # signature of the passed object.
+ #
+ # To simulate this behaviour, we "unbind" bound methods, to trick
+ # inspect.signature to always return their first parameter ("self",
+ # usually)
+
+ # Re: `follow_wrapper_chains=False`
+ #
+ # getfullargspec() historically ignored __wrapped__ attributes,
+ # so we ensure that remains the case in 3.3+
+
+ sig = _signature_from_callable(func,
+ follow_wrapper_chains=False,
+ skip_bound_arg=False,
+ sigcls=Signature,
+ eval_str=False)
+ except Exception as ex:
+ # Most of the times 'signature' will raise ValueError.
+ # But, it can also raise AttributeError, and, maybe something
+ # else. So to be fully backwards compatible, we catch all
+ # possible exceptions here, and reraise a TypeError.
+ raise TypeError('unsupported callable') from ex
+
+ args = []
+ varargs = None
+ varkw = None
+ posonlyargs = []
+ kwonlyargs = []
+ annotations = {}
+ defaults = ()
+ kwdefaults = {}
+
+ if sig.return_annotation is not sig.empty:
+ annotations['return'] = sig.return_annotation
+
+ for param in sig.parameters.values():
+ kind = param.kind
+ name = param.name
+
+ if kind is _POSITIONAL_ONLY:
+ posonlyargs.append(name)
+ if param.default is not param.empty:
+ defaults += (param.default,)
+ elif kind is _POSITIONAL_OR_KEYWORD:
+ args.append(name)
+ if param.default is not param.empty:
+ defaults += (param.default,)
+ elif kind is _VAR_POSITIONAL:
+ varargs = name
+ elif kind is _KEYWORD_ONLY:
+ kwonlyargs.append(name)
+ if param.default is not param.empty:
+ kwdefaults[name] = param.default
+ elif kind is _VAR_KEYWORD:
+ varkw = name
+
+ if param.annotation is not param.empty:
+ annotations[name] = param.annotation
+
+ if not kwdefaults:
+ # compatibility with 'func.__kwdefaults__'
+ kwdefaults = None
+
+ if not defaults:
+ # compatibility with 'func.__defaults__'
+ defaults = None
+
+ return FullArgSpec(posonlyargs + args, varargs, varkw, defaults,
+ kwonlyargs, kwdefaults, annotations)
+
+
+ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals')
+
+def getargvalues(frame):
+ """Get information about arguments passed into a particular frame.
+
+ A tuple of four things is returned: (args, varargs, varkw, locals).
+ 'args' is a list of the argument names.
+ 'varargs' and 'varkw' are the names of the * and ** arguments or None.
+ 'locals' is the locals dictionary of the given frame."""
+ args, varargs, varkw = getargs(frame.f_code)
+ return ArgInfo(args, varargs, varkw, frame.f_locals)
+
+def formatannotation(annotation, base_module=None):
+ if getattr(annotation, '__module__', None) == 'typing':
+ def repl(match):
+ text = match.group()
+ return text.removeprefix('typing.')
+ return re.sub(r'[\w\.]+', repl, repr(annotation))
+ if isinstance(annotation, types.GenericAlias):
+ return str(annotation)
+ if isinstance(annotation, type):
+ if annotation.__module__ in ('builtins', base_module):
+ return annotation.__qualname__
+ return annotation.__module__+'.'+annotation.__qualname__
+ return repr(annotation)
+
+def formatannotationrelativeto(object):
+ module = getattr(object, '__module__', None)
+ def _formatannotation(annotation):
+ return formatannotation(annotation, module)
+ return _formatannotation
+
+def formatargspec(args, varargs=None, varkw=None, defaults=None,
+ kwonlyargs=(), kwonlydefaults={}, annotations={},
+ formatarg=str,
+ formatvarargs=lambda name: '*' + name,
+ formatvarkw=lambda name: '**' + name,
+ formatvalue=lambda value: '=' + repr(value),
+ formatreturns=lambda text: ' -> ' + text,
+ formatannotation=formatannotation):
+ """Format an argument spec from the values returned by getfullargspec.
+
+ The first seven arguments are (args, varargs, varkw, defaults,
+ kwonlyargs, kwonlydefaults, annotations). The other five arguments
+ are the corresponding optional formatting functions that are called to
+ turn names and values into strings. The last argument is an optional
+ function to format the sequence of arguments.
+
+ Deprecated since Python 3.5: use the `signature` function and `Signature`
+ objects.
+ """
+
+ from warnings import warn
+
+ warn("`formatargspec` is deprecated since Python 3.5. Use `signature` and "
+ "the `Signature` object directly",
+ DeprecationWarning,
+ stacklevel=2)
+
+ def formatargandannotation(arg):
+ result = formatarg(arg)
+ if arg in annotations:
+ result += ': ' + formatannotation(annotations[arg])
+ return result
+ specs = []
+ if defaults:
+ firstdefault = len(args) - len(defaults)
+ for i, arg in enumerate(args):
+ spec = formatargandannotation(arg)
+ if defaults and i >= firstdefault:
+ spec = spec + formatvalue(defaults[i - firstdefault])
+ specs.append(spec)
+ if varargs is not None:
+ specs.append(formatvarargs(formatargandannotation(varargs)))
+ else:
+ if kwonlyargs:
+ specs.append('*')
+ if kwonlyargs:
+ for kwonlyarg in kwonlyargs:
+ spec = formatargandannotation(kwonlyarg)
+ if kwonlydefaults and kwonlyarg in kwonlydefaults:
+ spec += formatvalue(kwonlydefaults[kwonlyarg])
+ specs.append(spec)
+ if varkw is not None:
+ specs.append(formatvarkw(formatargandannotation(varkw)))
+ result = '(' + ', '.join(specs) + ')'
+ if 'return' in annotations:
+ result += formatreturns(formatannotation(annotations['return']))
+ return result
+
+def formatargvalues(args, varargs, varkw, locals,
+ formatarg=str,
+ formatvarargs=lambda name: '*' + name,
+ formatvarkw=lambda name: '**' + name,
+ formatvalue=lambda value: '=' + repr(value)):
+ """Format an argument spec from the 4 values returned by getargvalues.
+
+ The first four arguments are (args, varargs, varkw, locals). The
+ next four arguments are the corresponding optional formatting functions
+ that are called to turn names and values into strings. The ninth
+ argument is an optional function to format the sequence of arguments."""
+ def convert(name, locals=locals,
+ formatarg=formatarg, formatvalue=formatvalue):
+ return formatarg(name) + formatvalue(locals[name])
+ specs = []
+ for i in range(len(args)):
+ specs.append(convert(args[i]))
+ if varargs:
+ specs.append(formatvarargs(varargs) + formatvalue(locals[varargs]))
+ if varkw:
+ specs.append(formatvarkw(varkw) + formatvalue(locals[varkw]))
+ return '(' + ', '.join(specs) + ')'
+
+def _missing_arguments(f_name, argnames, pos, values):
+ names = [repr(name) for name in argnames if name not in values]
+ missing = len(names)
+ if missing == 1:
+ s = names[0]
+ elif missing == 2:
+ s = "{} and {}".format(*names)
+ else:
+ tail = ", {} and {}".format(*names[-2:])
+ del names[-2:]
+ s = ", ".join(names) + tail
+ raise TypeError("%s() missing %i required %s argument%s: %s" %
+ (f_name, missing,
+ "positional" if pos else "keyword-only",
+ "" if missing == 1 else "s", s))
+
+def _too_many(f_name, args, kwonly, varargs, defcount, given, values):
+ atleast = len(args) - defcount
+ kwonly_given = len([arg for arg in kwonly if arg in values])
+ if varargs:
+ plural = atleast != 1
+ sig = "at least %d" % (atleast,)
+ elif defcount:
+ plural = True
+ sig = "from %d to %d" % (atleast, len(args))
+ else:
+ plural = len(args) != 1
+ sig = str(len(args))
+ kwonly_sig = ""
+ if kwonly_given:
+ msg = " positional argument%s (and %d keyword-only argument%s)"
+ kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given,
+ "s" if kwonly_given != 1 else ""))
+ raise TypeError("%s() takes %s positional argument%s but %d%s %s given" %
+ (f_name, sig, "s" if plural else "", given, kwonly_sig,
+ "was" if given == 1 and not kwonly_given else "were"))
+
+def getcallargs(func, /, *positional, **named):
+ """Get the mapping of arguments to values.
+
+ A dict is returned, with keys the function argument names (including the
+ names of the * and ** arguments, if any), and values the respective bound
+ values from 'positional' and 'named'."""
+ spec = getfullargspec(func)
+ args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec
+ f_name = func.__name__
+ arg2value = {}
+
+
+ if ismethod(func) and func.__self__ is not None:
+ # implicit 'self' (or 'cls' for classmethods) argument
+ positional = (func.__self__,) + positional
+ num_pos = len(positional)
+ num_args = len(args)
+ num_defaults = len(defaults) if defaults else 0
+
+ n = min(num_pos, num_args)
+ for i in range(n):
+ arg2value[args[i]] = positional[i]
+ if varargs:
+ arg2value[varargs] = tuple(positional[n:])
+ possible_kwargs = set(args + kwonlyargs)
+ if varkw:
+ arg2value[varkw] = {}
+ for kw, value in named.items():
+ if kw not in possible_kwargs:
+ if not varkw:
+ raise TypeError("%s() got an unexpected keyword argument %r" %
+ (f_name, kw))
+ arg2value[varkw][kw] = value
+ continue
+ if kw in arg2value:
+ raise TypeError("%s() got multiple values for argument %r" %
+ (f_name, kw))
+ arg2value[kw] = value
+ if num_pos > num_args and not varargs:
+ _too_many(f_name, args, kwonlyargs, varargs, num_defaults,
+ num_pos, arg2value)
+ if num_pos < num_args:
+ req = args[:num_args - num_defaults]
+ for arg in req:
+ if arg not in arg2value:
+ _missing_arguments(f_name, req, True, arg2value)
+ for i, arg in enumerate(args[num_args - num_defaults:]):
+ if arg not in arg2value:
+ arg2value[arg] = defaults[i]
+ missing = 0
+ for kwarg in kwonlyargs:
+ if kwarg not in arg2value:
+ if kwonlydefaults and kwarg in kwonlydefaults:
+ arg2value[kwarg] = kwonlydefaults[kwarg]
+ else:
+ missing += 1
+ if missing:
+ _missing_arguments(f_name, kwonlyargs, False, arg2value)
+ return arg2value
+
+ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound')
+
+def getclosurevars(func):
+ """
+ Get the mapping of free variables to their current values.
+
+ Returns a named tuple of dicts mapping the current nonlocal, global
+ and builtin references as seen by the body of the function. A final
+ set of unbound names that could not be resolved is also provided.
+ """
+
+ if ismethod(func):
+ func = func.__func__
+
+ if not isfunction(func):
+ raise TypeError("{!r} is not a Python function".format(func))
+
+ code = func.__code__
+ # Nonlocal references are named in co_freevars and resolved
+ # by looking them up in __closure__ by positional index
+ if func.__closure__ is None:
+ nonlocal_vars = {}
+ else:
+ nonlocal_vars = {
+ var : cell.cell_contents
+ for var, cell in zip(code.co_freevars, func.__closure__)
+ }
+
+ # Global and builtin references are named in co_names and resolved
+ # by looking them up in __globals__ or __builtins__
+ global_ns = func.__globals__
+ builtin_ns = global_ns.get("__builtins__", builtins.__dict__)
+ if ismodule(builtin_ns):
+ builtin_ns = builtin_ns.__dict__
+ global_vars = {}
+ builtin_vars = {}
+ unbound_names = set()
+ for name in code.co_names:
+ if name in ("None", "True", "False"):
+ # Because these used to be builtins instead of keywords, they
+ # may still show up as name references. We ignore them.
+ continue
+ try:
+ global_vars[name] = global_ns[name]
+ except KeyError:
+ try:
+ builtin_vars[name] = builtin_ns[name]
+ except KeyError:
+ unbound_names.add(name)
+
+ return ClosureVars(nonlocal_vars, global_vars,
+ builtin_vars, unbound_names)
+
+# -------------------------------------------------- stack frame extraction
+
+Traceback = namedtuple('Traceback', 'filename lineno function code_context index')
+
+def getframeinfo(frame, context=1):
+ """Get information about a frame or traceback object.
+
+ A tuple of five things is returned: the filename, the line number of
+ the current line, the function name, a list of lines of context from
+ the source code, and the index of the current line within that list.
+ The optional second argument specifies the number of lines of context
+ to return, which are centered around the current line."""
+ if istraceback(frame):
+ lineno = frame.tb_lineno
+ frame = frame.tb_frame
+ else:
+ lineno = frame.f_lineno
+ if not isframe(frame):
+ raise TypeError('{!r} is not a frame or traceback object'.format(frame))
+
+ filename = getsourcefile(frame) or getfile(frame)
+ if context > 0:
+ start = lineno - 1 - context//2
+ try:
+ lines, lnum = findsource(frame)
+ except OSError:
+ lines = index = None
+ else:
+ start = max(0, min(start, len(lines) - context))
+ lines = lines[start:start+context]
+ index = lineno - 1 - start
+ else:
+ lines = index = None
+
+ return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
+
+def getlineno(frame):
+ """Get the line number from a frame object, allowing for optimization."""
+ # FrameType.f_lineno is now a descriptor that grovels co_lnotab
+ return frame.f_lineno
+
+FrameInfo = namedtuple('FrameInfo', ('frame',) + Traceback._fields)
+
+def getouterframes(frame, context=1):
+ """Get a list of records for a frame and all higher (calling) frames.
+
+ Each record contains a frame object, filename, line number, function
+ name, a list of lines of context, and index within the context."""
+ framelist = []
+ while frame:
+ frameinfo = (frame,) + getframeinfo(frame, context)
+ framelist.append(FrameInfo(*frameinfo))
+ frame = frame.f_back
+ return framelist
+
+def getinnerframes(tb, context=1):
+ """Get a list of records for a traceback's frame and all lower frames.
+
+ Each record contains a frame object, filename, line number, function
+ name, a list of lines of context, and index within the context."""
+ framelist = []
+ while tb:
+ frameinfo = (tb.tb_frame,) + getframeinfo(tb, context)
+ framelist.append(FrameInfo(*frameinfo))
+ tb = tb.tb_next
+ return framelist
+
+def currentframe():
+ """Return the frame of the caller or None if this is not possible."""
+ return sys._getframe(1) if hasattr(sys, "_getframe") else None
+
+def stack(context=1):
+ """Return a list of records for the stack above the caller's frame."""
+ return getouterframes(sys._getframe(1), context)
+
+def trace(context=1):
+ """Return a list of records for the stack below the current exception."""
+ return getinnerframes(sys.exc_info()[2], context)
+
+
+# ------------------------------------------------ static version of getattr
+
+_sentinel = object()
+
+def _static_getmro(klass):
+ return type.__dict__['__mro__'].__get__(klass)
+
+def _check_instance(obj, attr):
+ instance_dict = {}
+ try:
+ instance_dict = object.__getattribute__(obj, "__dict__")
+ except AttributeError:
+ pass
+ return dict.get(instance_dict, attr, _sentinel)
+
+
+def _check_class(klass, attr):
+ for entry in _static_getmro(klass):
+ if _shadowed_dict(type(entry)) is _sentinel:
+ try:
+ return entry.__dict__[attr]
+ except KeyError:
+ pass
+ return _sentinel
+
+def _is_type(obj):
+ try:
+ _static_getmro(obj)
+ except TypeError:
+ return False
+ return True
+
+def _shadowed_dict(klass):
+ dict_attr = type.__dict__["__dict__"]
+ for entry in _static_getmro(klass):
+ try:
+ class_dict = dict_attr.__get__(entry)["__dict__"]
+ except KeyError:
+ pass
+ else:
+ if not (type(class_dict) is types.GetSetDescriptorType and
+ class_dict.__name__ == "__dict__" and
+ class_dict.__objclass__ is entry):
+ return class_dict
+ return _sentinel
+
+def getattr_static(obj, attr, default=_sentinel):
+ """Retrieve attributes without triggering dynamic lookup via the
+ descriptor protocol, __getattr__ or __getattribute__.
+
+ Note: this function may not be able to retrieve all attributes
+ that getattr can fetch (like dynamically created attributes)
+ and may find attributes that getattr can't (like descriptors
+ that raise AttributeError). It can also return descriptor objects
+ instead of instance members in some cases. See the
+ documentation for details.
+ """
+ instance_result = _sentinel
+ if not _is_type(obj):
+ klass = type(obj)
+ dict_attr = _shadowed_dict(klass)
+ if (dict_attr is _sentinel or
+ type(dict_attr) is types.MemberDescriptorType):
+ instance_result = _check_instance(obj, attr)
+ else:
+ klass = obj
+
+ klass_result = _check_class(klass, attr)
+
+ if instance_result is not _sentinel and klass_result is not _sentinel:
+ if (_check_class(type(klass_result), '__get__') is not _sentinel and
+ _check_class(type(klass_result), '__set__') is not _sentinel):
+ return klass_result
+
+ if instance_result is not _sentinel:
+ return instance_result
+ if klass_result is not _sentinel:
+ return klass_result
+
+ if obj is klass:
+ # for types we check the metaclass too
+ for entry in _static_getmro(type(klass)):
+ if _shadowed_dict(type(entry)) is _sentinel:
+ try:
+ return entry.__dict__[attr]
+ except KeyError:
+ pass
+ if default is not _sentinel:
+ return default
+ raise AttributeError(attr)
+
+
+# ------------------------------------------------ generator introspection
+
+GEN_CREATED = 'GEN_CREATED'
+GEN_RUNNING = 'GEN_RUNNING'
+GEN_SUSPENDED = 'GEN_SUSPENDED'
+GEN_CLOSED = 'GEN_CLOSED'
+
+def getgeneratorstate(generator):
+ """Get current state of a generator-iterator.
+
+ Possible states are:
+ GEN_CREATED: Waiting to start execution.
+ GEN_RUNNING: Currently being executed by the interpreter.
+ GEN_SUSPENDED: Currently suspended at a yield expression.
+ GEN_CLOSED: Execution has completed.
+ """
+ if generator.gi_running:
+ return GEN_RUNNING
+ if generator.gi_frame is None:
+ return GEN_CLOSED
+ if generator.gi_frame.f_lasti == -1:
+ return GEN_CREATED
+ return GEN_SUSPENDED
+
+
+def getgeneratorlocals(generator):
+ """
+ Get the mapping of generator local variables to their current values.
+
+ A dict is returned, with the keys the local variable names and values the
+ bound values."""
+
+ if not isgenerator(generator):
+ raise TypeError("{!r} is not a Python generator".format(generator))
+
+ frame = getattr(generator, "gi_frame", None)
+ if frame is not None:
+ return generator.gi_frame.f_locals
+ else:
+ return {}
+
+
+# ------------------------------------------------ coroutine introspection
+
+CORO_CREATED = 'CORO_CREATED'
+CORO_RUNNING = 'CORO_RUNNING'
+CORO_SUSPENDED = 'CORO_SUSPENDED'
+CORO_CLOSED = 'CORO_CLOSED'
+
+def getcoroutinestate(coroutine):
+ """Get current state of a coroutine object.
+
+ Possible states are:
+ CORO_CREATED: Waiting to start execution.
+ CORO_RUNNING: Currently being executed by the interpreter.
+ CORO_SUSPENDED: Currently suspended at an await expression.
+ CORO_CLOSED: Execution has completed.
+ """
+ if coroutine.cr_running:
+ return CORO_RUNNING
+ if coroutine.cr_frame is None:
+ return CORO_CLOSED
+ if coroutine.cr_frame.f_lasti == -1:
+ return CORO_CREATED
+ return CORO_SUSPENDED
+
+
+def getcoroutinelocals(coroutine):
+ """
+ Get the mapping of coroutine local variables to their current values.
+
+ A dict is returned, with the keys the local variable names and values the
+ bound values."""
+ frame = getattr(coroutine, "cr_frame", None)
+ if frame is not None:
+ return frame.f_locals
+ else:
+ return {}
+
+
+###############################################################################
+### Function Signature Object (PEP 362)
+###############################################################################
+
+
+_WrapperDescriptor = type(type.__call__)
+_MethodWrapper = type(all.__call__)
+_ClassMethodWrapper = type(int.__dict__['from_bytes'])
+
+_NonUserDefinedCallables = (_WrapperDescriptor,
+ _MethodWrapper,
+ _ClassMethodWrapper,
+ types.BuiltinFunctionType)
+
+
+def _signature_get_user_defined_method(cls, method_name):
+ """Private helper. Checks if ``cls`` has an attribute
+ named ``method_name`` and returns it only if it is a
+ pure python function.
+ """
+ try:
+ meth = getattr(cls, method_name)
+ except AttributeError:
+ return
+ else:
+ if not isinstance(meth, _NonUserDefinedCallables):
+ # Once '__signature__' will be added to 'C'-level
+ # callables, this check won't be necessary
+ return meth
+
+
+def _signature_get_partial(wrapped_sig, partial, extra_args=()):
+ """Private helper to calculate how 'wrapped_sig' signature will
+ look like after applying a 'functools.partial' object (or alike)
+ on it.
+ """
+
+ old_params = wrapped_sig.parameters
+ new_params = OrderedDict(old_params.items())
+
+ partial_args = partial.args or ()
+ partial_keywords = partial.keywords or {}
+
+ if extra_args:
+ partial_args = extra_args + partial_args
+
+ try:
+ ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords)
+ except TypeError as ex:
+ msg = 'partial object {!r} has incorrect arguments'.format(partial)
+ raise ValueError(msg) from ex
+
+
+ transform_to_kwonly = False
+ for param_name, param in old_params.items():
+ try:
+ arg_value = ba.arguments[param_name]
+ except KeyError:
+ pass
+ else:
+ if param.kind is _POSITIONAL_ONLY:
+ # If positional-only parameter is bound by partial,
+ # it effectively disappears from the signature
+ new_params.pop(param_name)
+ continue
+
+ if param.kind is _POSITIONAL_OR_KEYWORD:
+ if param_name in partial_keywords:
+ # This means that this parameter, and all parameters
+ # after it should be keyword-only (and var-positional
+ # should be removed). Here's why. Consider the following
+ # function:
+ # foo(a, b, *args, c):
+ # pass
+ #
+ # "partial(foo, a='spam')" will have the following
+ # signature: "(*, a='spam', b, c)". Because attempting
+ # to call that partial with "(10, 20)" arguments will
+ # raise a TypeError, saying that "a" argument received
+ # multiple values.
+ transform_to_kwonly = True
+ # Set the new default value
+ new_params[param_name] = param.replace(default=arg_value)
+ else:
+ # was passed as a positional argument
+ new_params.pop(param.name)
+ continue
+
+ if param.kind is _KEYWORD_ONLY:
+ # Set the new default value
+ new_params[param_name] = param.replace(default=arg_value)
+
+ if transform_to_kwonly:
+ assert param.kind is not _POSITIONAL_ONLY
+
+ if param.kind is _POSITIONAL_OR_KEYWORD:
+ new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY)
+ new_params[param_name] = new_param
+ new_params.move_to_end(param_name)
+ elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD):
+ new_params.move_to_end(param_name)
+ elif param.kind is _VAR_POSITIONAL:
+ new_params.pop(param.name)
+
+ return wrapped_sig.replace(parameters=new_params.values())
+
+
+def _signature_bound_method(sig):
+ """Private helper to transform signatures for unbound
+ functions to bound methods.
+ """
+
+ params = tuple(sig.parameters.values())
+
+ if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
+ raise ValueError('invalid method signature')
+
+ kind = params[0].kind
+ if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY):
+ # Drop first parameter:
+ # '(p1, p2[, ...])' -> '(p2[, ...])'
+ params = params[1:]
+ else:
+ if kind is not _VAR_POSITIONAL:
+ # Unless we add a new parameter type we never
+ # get here
+ raise ValueError('invalid argument type')
+ # It's a var-positional parameter.
+ # Do nothing. '(*args[, ...])' -> '(*args[, ...])'
+
+ return sig.replace(parameters=params)
+
+
+def _signature_is_builtin(obj):
+ """Private helper to test if `obj` is a callable that might
+ support Argument Clinic's __text_signature__ protocol.
+ """
+ return (isbuiltin(obj) or
+ ismethoddescriptor(obj) or
+ isinstance(obj, _NonUserDefinedCallables) or
+ # Can't test 'isinstance(type)' here, as it would
+ # also be True for regular python classes
+ obj in (type, object))
+
+
+def _signature_is_functionlike(obj):
+ """Private helper to test if `obj` is a duck type of FunctionType.
+ A good example of such objects are functions compiled with
+ Cython, which have all attributes that a pure Python function
+ would have, but have their code statically compiled.
+ """
+
+ if not callable(obj) or isclass(obj):
+ # All function-like objects are obviously callables,
+ # and not classes.
+ return False
+
+ name = getattr(obj, '__name__', None)
+ code = getattr(obj, '__code__', None)
+ defaults = getattr(obj, '__defaults__', _void) # Important to use _void ...
+ kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here
+ annotations = getattr(obj, '__annotations__', None)
+
+ return (isinstance(code, types.CodeType) and
+ isinstance(name, str) and
+ (defaults is None or isinstance(defaults, tuple)) and
+ (kwdefaults is None or isinstance(kwdefaults, dict)) and
+ (isinstance(annotations, (dict)) or annotations is None) )
+
+
+def _signature_get_bound_param(spec):
+ """ Private helper to get first parameter name from a
+ __text_signature__ of a builtin method, which should
+ be in the following format: '($param1, ...)'.
+ Assumptions are that the first argument won't have
+ a default value or an annotation.
+ """
+
+ assert spec.startswith('($')
+
+ pos = spec.find(',')
+ if pos == -1:
+ pos = spec.find(')')
+
+ cpos = spec.find(':')
+ assert cpos == -1 or cpos > pos
+
+ cpos = spec.find('=')
+ assert cpos == -1 or cpos > pos
+
+ return spec[2:pos]
+
+
+def _signature_strip_non_python_syntax(signature):
+ """
+ Private helper function. Takes a signature in Argument Clinic's
+ extended signature format.
+
+ Returns a tuple of three things:
+ * that signature re-rendered in standard Python syntax,
+ * the index of the "self" parameter (generally 0), or None if
+ the function does not have a "self" parameter, and
+ * the index of the last "positional only" parameter,
+ or None if the signature has no positional-only parameters.
+ """
+
+ if not signature:
+ return signature, None, None
+
+ self_parameter = None
+ last_positional_only = None
+
+ lines = [l.encode('ascii') for l in signature.split('\n') if l]
+ generator = iter(lines).__next__
+ token_stream = tokenize.tokenize(generator)
+
+ delayed_comma = False
+ skip_next_comma = False
+ text = []
+ add = text.append
+
+ current_parameter = 0
+ OP = token.OP
+ ERRORTOKEN = token.ERRORTOKEN
+
+ # token stream always starts with ENCODING token, skip it
+ t = next(token_stream)
+ assert t.type == tokenize.ENCODING
+
+ for t in token_stream:
+ type, string = t.type, t.string
+
+ if type == OP:
+ if string == ',':
+ if skip_next_comma:
+ skip_next_comma = False
+ else:
+ assert not delayed_comma
+ delayed_comma = True
+ current_parameter += 1
+ continue
+
+ if string == '/':
+ assert not skip_next_comma
+ assert last_positional_only is None
+ skip_next_comma = True
+ last_positional_only = current_parameter - 1
+ continue
+
+ if (type == ERRORTOKEN) and (string == '$'):
+ assert self_parameter is None
+ self_parameter = current_parameter
+ continue
+
+ if delayed_comma:
+ delayed_comma = False
+ if not ((type == OP) and (string == ')')):
+ add(', ')
+ add(string)
+ if (string == ','):
+ add(' ')
+ clean_signature = ''.join(text)
+ return clean_signature, self_parameter, last_positional_only
+
+
+def _signature_fromstr(cls, obj, s, skip_bound_arg=True):
+ """Private helper to parse content of '__text_signature__'
+ and return a Signature based on it.
+ """
+ # Lazy import ast because it's relatively heavy and
+ # it's not used for other than this function.
+ import ast
+
+ Parameter = cls._parameter_cls
+
+ clean_signature, self_parameter, last_positional_only = \
+ _signature_strip_non_python_syntax(s)
+
+ program = "def foo" + clean_signature + ": pass"
+
+ try:
+ module = ast.parse(program)
+ except SyntaxError:
+ module = None
+
+ if not isinstance(module, ast.Module):
+ raise ValueError("{!r} builtin has invalid signature".format(obj))
+
+ f = module.body[0]
+
+ parameters = []
+ empty = Parameter.empty
+
+ module = None
+ module_dict = {}
+ module_name = getattr(obj, '__module__', None)
+ if module_name:
+ module = sys.modules.get(module_name, None)
+ if module:
+ module_dict = module.__dict__
+ sys_module_dict = sys.modules.copy()
+
+ def parse_name(node):
+ assert isinstance(node, ast.arg)
+ if node.annotation is not None:
+ raise ValueError("Annotations are not currently supported")
+ return node.arg
+
+ def wrap_value(s):
+ try:
+ value = eval(s, module_dict)
+ except NameError:
+ try:
+ value = eval(s, sys_module_dict)
+ except NameError:
+ raise ValueError
+
+ if isinstance(value, (str, int, float, bytes, bool, type(None))):
+ return ast.Constant(value)
+ raise ValueError
+
+ class RewriteSymbolics(ast.NodeTransformer):
+ def visit_Attribute(self, node):
+ a = []
+ n = node
+ while isinstance(n, ast.Attribute):
+ a.append(n.attr)
+ n = n.value
+ if not isinstance(n, ast.Name):
+ raise ValueError
+ a.append(n.id)
+ value = ".".join(reversed(a))
+ return wrap_value(value)
+
+ def visit_Name(self, node):
+ if not isinstance(node.ctx, ast.Load):
+ raise ValueError()
+ return wrap_value(node.id)
+
+ def visit_BinOp(self, node):
+ # Support constant folding of a couple simple binary operations
+ # commonly used to define default values in text signatures
+ left = self.visit(node.left)
+ right = self.visit(node.right)
+ if not isinstance(left, ast.Constant) or not isinstance(right, ast.Constant):
+ raise ValueError
+ if isinstance(node.op, ast.Add):
+ return ast.Constant(left.value + right.value)
+ elif isinstance(node.op, ast.Sub):
+ return ast.Constant(left.value - right.value)
+ elif isinstance(node.op, ast.BitOr):
+ return ast.Constant(left.value | right.value)
+ raise ValueError
+
+ def p(name_node, default_node, default=empty):
+ name = parse_name(name_node)
+ if default_node and default_node is not _empty:
+ try:
+ default_node = RewriteSymbolics().visit(default_node)
+ default = ast.literal_eval(default_node)
+ except ValueError:
+ raise ValueError("{!r} builtin has invalid signature".format(obj)) from None
+ parameters.append(Parameter(name, kind, default=default, annotation=empty))
+
+ # non-keyword-only parameters
+ args = reversed(f.args.args)
+ defaults = reversed(f.args.defaults)
+ iter = itertools.zip_longest(args, defaults, fillvalue=None)
+ if last_positional_only is not None:
+ kind = Parameter.POSITIONAL_ONLY
+ else:
+ kind = Parameter.POSITIONAL_OR_KEYWORD
+ for i, (name, default) in enumerate(reversed(list(iter))):
+ p(name, default)
+ if i == last_positional_only:
+ kind = Parameter.POSITIONAL_OR_KEYWORD
+
+ # *args
+ if f.args.vararg:
+ kind = Parameter.VAR_POSITIONAL
+ p(f.args.vararg, empty)
+
+ # keyword-only arguments
+ kind = Parameter.KEYWORD_ONLY
+ for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults):
+ p(name, default)
+
+ # **kwargs
+ if f.args.kwarg:
+ kind = Parameter.VAR_KEYWORD
+ p(f.args.kwarg, empty)
+
+ if self_parameter is not None:
+ # Possibly strip the bound argument:
+ # - We *always* strip first bound argument if
+ # it is a module.
+ # - We don't strip first bound argument if
+ # skip_bound_arg is False.
+ assert parameters
+ _self = getattr(obj, '__self__', None)
+ self_isbound = _self is not None
+ self_ismodule = ismodule(_self)
+ if self_isbound and (self_ismodule or skip_bound_arg):
+ parameters.pop(0)
+ else:
+ # for builtins, self parameter is always positional-only!
+ p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY)
+ parameters[0] = p
+
+ return cls(parameters, return_annotation=cls.empty)
+
+
+def _signature_from_builtin(cls, func, skip_bound_arg=True):
+ """Private helper function to get signature for
+ builtin callables.
+ """
+
+ if not _signature_is_builtin(func):
+ raise TypeError("{!r} is not a Python builtin "
+ "function".format(func))
+
+ s = getattr(func, "__text_signature__", None)
+ if not s:
+ raise ValueError("no signature found for builtin {!r}".format(func))
+
+ return _signature_fromstr(cls, func, s, skip_bound_arg)
+
+
+def _signature_from_function(cls, func, skip_bound_arg=True,
+ globals=None, locals=None, eval_str=False):
+ """Private helper: constructs Signature for the given python function."""
+
+ is_duck_function = False
+ if not isfunction(func):
+ if _signature_is_functionlike(func):
+ is_duck_function = True
+ else:
+ # If it's not a pure Python function, and not a duck type
+ # of pure function:
+ raise TypeError('{!r} is not a Python function'.format(func))
+
+ s = getattr(func, "__text_signature__", None)
+ if s:
+ return _signature_fromstr(cls, func, s, skip_bound_arg)
+
+ Parameter = cls._parameter_cls
+
+ # Parameter information.
+ func_code = func.__code__
+ pos_count = func_code.co_argcount
+ arg_names = func_code.co_varnames
+ posonly_count = func_code.co_posonlyargcount
+ positional = arg_names[:pos_count]
+ keyword_only_count = func_code.co_kwonlyargcount
+ keyword_only = arg_names[pos_count:pos_count + keyword_only_count]
+ annotations = get_annotations(func, globals=globals, locals=locals, eval_str=eval_str)
+ defaults = func.__defaults__
+ kwdefaults = func.__kwdefaults__
+
+ if defaults:
+ pos_default_count = len(defaults)
+ else:
+ pos_default_count = 0
+
+ parameters = []
+
+ non_default_count = pos_count - pos_default_count
+ posonly_left = posonly_count
+
+ # Non-keyword-only parameters w/o defaults.
+ for name in positional[:non_default_count]:
+ kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=kind))
+ if posonly_left:
+ posonly_left -= 1
+
+ # ... w/ defaults.
+ for offset, name in enumerate(positional[non_default_count:]):
+ kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=kind,
+ default=defaults[offset]))
+ if posonly_left:
+ posonly_left -= 1
+
+ # *args
+ if func_code.co_flags & CO_VARARGS:
+ name = arg_names[pos_count + keyword_only_count]
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=_VAR_POSITIONAL))
+
+ # Keyword-only parameters.
+ for name in keyword_only:
+ default = _empty
+ if kwdefaults is not None:
+ default = kwdefaults.get(name, _empty)
+
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=_KEYWORD_ONLY,
+ default=default))
+ # **kwargs
+ if func_code.co_flags & CO_VARKEYWORDS:
+ index = pos_count + keyword_only_count
+ if func_code.co_flags & CO_VARARGS:
+ index += 1
+
+ name = arg_names[index]
+ annotation = annotations.get(name, _empty)
+ parameters.append(Parameter(name, annotation=annotation,
+ kind=_VAR_KEYWORD))
+
+ # Is 'func' is a pure Python function - don't validate the
+ # parameters list (for correct order and defaults), it should be OK.
+ return cls(parameters,
+ return_annotation=annotations.get('return', _empty),
+ __validate_parameters__=is_duck_function)
+
+
+def _signature_from_callable(obj, *,
+ follow_wrapper_chains=True,
+ skip_bound_arg=True,
+ globals=None,
+ locals=None,
+ eval_str=False,
+ sigcls):
+
+ """Private helper function to get signature for arbitrary
+ callable objects.
+ """
+
+ _get_signature_of = functools.partial(_signature_from_callable,
+ follow_wrapper_chains=follow_wrapper_chains,
+ skip_bound_arg=skip_bound_arg,
+ globals=globals,
+ locals=locals,
+ sigcls=sigcls,
+ eval_str=eval_str)
+
+ if not callable(obj):
+ raise TypeError('{!r} is not a callable object'.format(obj))
+
+ if isinstance(obj, types.MethodType):
+ # In this case we skip the first parameter of the underlying
+ # function (usually `self` or `cls`).
+ sig = _get_signature_of(obj.__func__)
+
+ if skip_bound_arg:
+ return _signature_bound_method(sig)
+ else:
+ return sig
+
+ # Was this function wrapped by a decorator?
+ if follow_wrapper_chains:
+ # Unwrap until we find an explicit signature or a MethodType (which will be
+ # handled explicitly below).
+ obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__")
+ or isinstance(f, types.MethodType)))
+ if isinstance(obj, types.MethodType):
+ # If the unwrapped object is a *method*, we might want to
+ # skip its first parameter (self).
+ # See test_signature_wrapped_bound_method for details.
+ return _get_signature_of(obj)
+
+ try:
+ sig = obj.__signature__
+ except AttributeError:
+ pass
+ else:
+ if sig is not None:
+ if not isinstance(sig, Signature):
+ raise TypeError(
+ 'unexpected object {!r} in __signature__ '
+ 'attribute'.format(sig))
+ return sig
+
+ try:
+ partialmethod = obj._partialmethod
+ except AttributeError:
+ pass
+ else:
+ if isinstance(partialmethod, functools.partialmethod):
+ # Unbound partialmethod (see functools.partialmethod)
+ # This means, that we need to calculate the signature
+ # as if it's a regular partial object, but taking into
+ # account that the first positional argument
+ # (usually `self`, or `cls`) will not be passed
+ # automatically (as for boundmethods)
+
+ wrapped_sig = _get_signature_of(partialmethod.func)
+
+ sig = _signature_get_partial(wrapped_sig, partialmethod, (None,))
+ first_wrapped_param = tuple(wrapped_sig.parameters.values())[0]
+ if first_wrapped_param.kind is Parameter.VAR_POSITIONAL:
+ # First argument of the wrapped callable is `*args`, as in
+ # `partialmethod(lambda *args)`.
+ return sig
+ else:
+ sig_params = tuple(sig.parameters.values())
+ assert (not sig_params or
+ first_wrapped_param is not sig_params[0])
+ new_params = (first_wrapped_param,) + sig_params
+ return sig.replace(parameters=new_params)
+
+ if isfunction(obj) or _signature_is_functionlike(obj):
+ # If it's a pure Python function, or an object that is duck type
+ # of a Python function (Cython functions, for instance), then:
+ return _signature_from_function(sigcls, obj,
+ skip_bound_arg=skip_bound_arg,
+ globals=globals, locals=locals, eval_str=eval_str)
+
+ if _signature_is_builtin(obj):
+ return _signature_from_builtin(sigcls, obj,
+ skip_bound_arg=skip_bound_arg)
+
+ if isinstance(obj, functools.partial):
+ wrapped_sig = _get_signature_of(obj.func)
+ return _signature_get_partial(wrapped_sig, obj)
+
+ sig = None
+ if isinstance(obj, type):
+ # obj is a class or a metaclass
+
+ # First, let's see if it has an overloaded __call__ defined
+ # in its metaclass
+ call = _signature_get_user_defined_method(type(obj), '__call__')
+ if call is not None:
+ sig = _get_signature_of(call)
+ else:
+ factory_method = None
+ new = _signature_get_user_defined_method(obj, '__new__')
+ init = _signature_get_user_defined_method(obj, '__init__')
+ # Now we check if the 'obj' class has an own '__new__' method
+ if '__new__' in obj.__dict__:
+ factory_method = new
+ # or an own '__init__' method
+ elif '__init__' in obj.__dict__:
+ factory_method = init
+ # If not, we take inherited '__new__' or '__init__', if present
+ elif new is not None:
+ factory_method = new
+ elif init is not None:
+ factory_method = init
+
+ if factory_method is not None:
+ sig = _get_signature_of(factory_method)
+
+ if sig is None:
+ # At this point we know, that `obj` is a class, with no user-
+ # defined '__init__', '__new__', or class-level '__call__'
+
+ for base in obj.__mro__[:-1]:
+ # Since '__text_signature__' is implemented as a
+ # descriptor that extracts text signature from the
+ # class docstring, if 'obj' is derived from a builtin
+ # class, its own '__text_signature__' may be 'None'.
+ # Therefore, we go through the MRO (except the last
+ # class in there, which is 'object') to find the first
+ # class with non-empty text signature.
+ try:
+ text_sig = base.__text_signature__
+ except AttributeError:
+ pass
+ else:
+ if text_sig:
+ # If 'base' class has a __text_signature__ attribute:
+ # return a signature based on it
+ return _signature_fromstr(sigcls, base, text_sig)
+
+ # No '__text_signature__' was found for the 'obj' class.
+ # Last option is to check if its '__init__' is
+ # object.__init__ or type.__init__.
+ if type not in obj.__mro__:
+ # We have a class (not metaclass), but no user-defined
+ # __init__ or __new__ for it
+ if (obj.__init__ is object.__init__ and
+ obj.__new__ is object.__new__):
+ # Return a signature of 'object' builtin.
+ return sigcls.from_callable(object)
+ else:
+ raise ValueError(
+ 'no signature found for builtin type {!r}'.format(obj))
+
+ elif not isinstance(obj, _NonUserDefinedCallables):
+ # An object with __call__
+ # We also check that the 'obj' is not an instance of
+ # _WrapperDescriptor or _MethodWrapper to avoid
+ # infinite recursion (and even potential segfault)
+ call = _signature_get_user_defined_method(type(obj), '__call__')
+ if call is not None:
+ try:
+ sig = _get_signature_of(call)
+ except ValueError as ex:
+ msg = 'no signature found for {!r}'.format(obj)
+ raise ValueError(msg) from ex
+
+ if sig is not None:
+ # For classes and objects we skip the first parameter of their
+ # __call__, __new__, or __init__ methods
+ if skip_bound_arg:
+ return _signature_bound_method(sig)
+ else:
+ return sig
+
+ if isinstance(obj, types.BuiltinFunctionType):
+ # Raise a nicer error message for builtins
+ msg = 'no signature found for builtin function {!r}'.format(obj)
+ raise ValueError(msg)
+
+ raise ValueError('callable {!r} is not supported by signature'.format(obj))
+
+
+class _void:
+ """A private marker - used in Parameter & Signature."""
+
+
+class _empty:
+ """Marker object for Signature.empty and Parameter.empty."""
+
+
+class _ParameterKind(enum.IntEnum):
+ POSITIONAL_ONLY = 0
+ POSITIONAL_OR_KEYWORD = 1
+ VAR_POSITIONAL = 2
+ KEYWORD_ONLY = 3
+ VAR_KEYWORD = 4
+
+ def __str__(self):
+ return self._name_
+
+ @property
+ def description(self):
+ return _PARAM_NAME_MAPPING[self]
+
+_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY
+_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD
+_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL
+_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY
+_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD
+
+_PARAM_NAME_MAPPING = {
+ _POSITIONAL_ONLY: 'positional-only',
+ _POSITIONAL_OR_KEYWORD: 'positional or keyword',
+ _VAR_POSITIONAL: 'variadic positional',
+ _KEYWORD_ONLY: 'keyword-only',
+ _VAR_KEYWORD: 'variadic keyword'
+}
+
+
+class Parameter:
+ """Represents a parameter in a function signature.
+
+ Has the following public attributes:
+
+ * name : str
+ The name of the parameter as a string.
+ * default : object
+ The default value for the parameter if specified. If the
+ parameter has no default value, this attribute is set to
+ `Parameter.empty`.
+ * annotation
+ The annotation for the parameter if specified. If the
+ parameter has no annotation, this attribute is set to
+ `Parameter.empty`.
+ * kind : str
+ Describes how argument values are bound to the parameter.
+ Possible values: `Parameter.POSITIONAL_ONLY`,
+ `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`,
+ `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`.
+ """
+
+ __slots__ = ('_name', '_kind', '_default', '_annotation')
+
+ POSITIONAL_ONLY = _POSITIONAL_ONLY
+ POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD
+ VAR_POSITIONAL = _VAR_POSITIONAL
+ KEYWORD_ONLY = _KEYWORD_ONLY
+ VAR_KEYWORD = _VAR_KEYWORD
+
+ empty = _empty
+
+ def __init__(self, name, kind, *, default=_empty, annotation=_empty):
+ try:
+ self._kind = _ParameterKind(kind)
+ except ValueError:
+ raise ValueError(f'value {kind!r} is not a valid Parameter.kind')
+ if default is not _empty:
+ if self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD):
+ msg = '{} parameters cannot have default values'
+ msg = msg.format(self._kind.description)
+ raise ValueError(msg)
+ self._default = default
+ self._annotation = annotation
+
+ if name is _empty:
+ raise ValueError('name is a required attribute for Parameter')
+
+ if not isinstance(name, str):
+ msg = 'name must be a str, not a {}'.format(type(name).__name__)
+ raise TypeError(msg)
+
+ if name[0] == '.' and name[1:].isdigit():
+ # These are implicit arguments generated by comprehensions. In
+ # order to provide a friendlier interface to users, we recast
+ # their name as "implicitN" and treat them as positional-only.
+ # See issue 19611.
+ if self._kind != _POSITIONAL_OR_KEYWORD:
+ msg = (
+ 'implicit arguments must be passed as '
+ 'positional or keyword arguments, not {}'
+ )
+ msg = msg.format(self._kind.description)
+ raise ValueError(msg)
+ self._kind = _POSITIONAL_ONLY
+ name = 'implicit{}'.format(name[1:])
+
+ if not name.isidentifier():
+ raise ValueError('{!r} is not a valid parameter name'.format(name))
+
+ self._name = name
+
+ def __reduce__(self):
+ return (type(self),
+ (self._name, self._kind),
+ {'_default': self._default,
+ '_annotation': self._annotation})
+
+ def __setstate__(self, state):
+ self._default = state['_default']
+ self._annotation = state['_annotation']
+
+ @property
+ def name(self):
+ return self._name
+
+ @property
+ def default(self):
+ return self._default
+
+ @property
+ def annotation(self):
+ return self._annotation
+
+ @property
+ def kind(self):
+ return self._kind
+
+ def replace(self, *, name=_void, kind=_void,
+ annotation=_void, default=_void):
+ """Creates a customized copy of the Parameter."""
+
+ if name is _void:
+ name = self._name
+
+ if kind is _void:
+ kind = self._kind
+
+ if annotation is _void:
+ annotation = self._annotation
+
+ if default is _void:
+ default = self._default
+
+ return type(self)(name, kind, default=default, annotation=annotation)
+
+ def __str__(self):
+ kind = self.kind
+ formatted = self._name
+
+ # Add annotation and default value
+ if self._annotation is not _empty:
+ formatted = '{}: {}'.format(formatted,
+ formatannotation(self._annotation))
+
+ if self._default is not _empty:
+ if self._annotation is not _empty:
+ formatted = '{} = {}'.format(formatted, repr(self._default))
+ else:
+ formatted = '{}={}'.format(formatted, repr(self._default))
+
+ if kind == _VAR_POSITIONAL:
+ formatted = '*' + formatted
+ elif kind == _VAR_KEYWORD:
+ formatted = '**' + formatted
+
+ return formatted
+
+ def __repr__(self):
+ return '<{} "{}">'.format(self.__class__.__name__, self)
+
+ def __hash__(self):
+ return hash((self.name, self.kind, self.annotation, self.default))
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if not isinstance(other, Parameter):
+ return NotImplemented
+ return (self._name == other._name and
+ self._kind == other._kind and
+ self._default == other._default and
+ self._annotation == other._annotation)
+
+
+class BoundArguments:
+ """Result of `Signature.bind` call. Holds the mapping of arguments
+ to the function's parameters.
+
+ Has the following public attributes:
+
+ * arguments : dict
+ An ordered mutable mapping of parameters' names to arguments' values.
+ Does not contain arguments' default values.
+ * signature : Signature
+ The Signature object that created this instance.
+ * args : tuple
+ Tuple of positional arguments values.
+ * kwargs : dict
+ Dict of keyword arguments values.
+ """
+
+ __slots__ = ('arguments', '_signature', '__weakref__')
+
+ def __init__(self, signature, arguments):
+ self.arguments = arguments
+ self._signature = signature
+
+ @property
+ def signature(self):
+ return self._signature
+
+ @property
+ def args(self):
+ args = []
+ for param_name, param in self._signature.parameters.items():
+ if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
+ break
+
+ try:
+ arg = self.arguments[param_name]
+ except KeyError:
+ # We're done here. Other arguments
+ # will be mapped in 'BoundArguments.kwargs'
+ break
+ else:
+ if param.kind == _VAR_POSITIONAL:
+ # *args
+ args.extend(arg)
+ else:
+ # plain argument
+ args.append(arg)
+
+ return tuple(args)
+
+ @property
+ def kwargs(self):
+ kwargs = {}
+ kwargs_started = False
+ for param_name, param in self._signature.parameters.items():
+ if not kwargs_started:
+ if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
+ kwargs_started = True
+ else:
+ if param_name not in self.arguments:
+ kwargs_started = True
+ continue
+
+ if not kwargs_started:
+ continue
+
+ try:
+ arg = self.arguments[param_name]
+ except KeyError:
+ pass
+ else:
+ if param.kind == _VAR_KEYWORD:
+ # **kwargs
+ kwargs.update(arg)
+ else:
+ # plain keyword argument
+ kwargs[param_name] = arg
+
+ return kwargs
+
+ def apply_defaults(self):
+ """Set default values for missing arguments.
+
+ For variable-positional arguments (*args) the default is an
+ empty tuple.
+
+ For variable-keyword arguments (**kwargs) the default is an
+ empty dict.
+ """
+ arguments = self.arguments
+ new_arguments = []
+ for name, param in self._signature.parameters.items():
+ try:
+ new_arguments.append((name, arguments[name]))
+ except KeyError:
+ if param.default is not _empty:
+ val = param.default
+ elif param.kind is _VAR_POSITIONAL:
+ val = ()
+ elif param.kind is _VAR_KEYWORD:
+ val = {}
+ else:
+ # This BoundArguments was likely produced by
+ # Signature.bind_partial().
+ continue
+ new_arguments.append((name, val))
+ self.arguments = dict(new_arguments)
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if not isinstance(other, BoundArguments):
+ return NotImplemented
+ return (self.signature == other.signature and
+ self.arguments == other.arguments)
+
+ def __setstate__(self, state):
+ self._signature = state['_signature']
+ self.arguments = state['arguments']
+
+ def __getstate__(self):
+ return {'_signature': self._signature, 'arguments': self.arguments}
+
+ def __repr__(self):
+ args = []
+ for arg, value in self.arguments.items():
+ args.append('{}={!r}'.format(arg, value))
+ return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args))
+
+
+class Signature:
+ """A Signature object represents the overall signature of a function.
+ It stores a Parameter object for each parameter accepted by the
+ function, as well as information specific to the function itself.
+
+ A Signature object has the following public attributes and methods:
+
+ * parameters : OrderedDict
+ An ordered mapping of parameters' names to the corresponding
+ Parameter objects (keyword-only arguments are in the same order
+ as listed in `code.co_varnames`).
+ * return_annotation : object
+ The annotation for the return type of the function if specified.
+ If the function has no annotation for its return type, this
+ attribute is set to `Signature.empty`.
+ * bind(*args, **kwargs) -> BoundArguments
+ Creates a mapping from positional and keyword arguments to
+ parameters.
+ * bind_partial(*args, **kwargs) -> BoundArguments
+ Creates a partial mapping from positional and keyword arguments
+ to parameters (simulating 'functools.partial' behavior.)
+ """
+
+ __slots__ = ('_return_annotation', '_parameters')
+
+ _parameter_cls = Parameter
+ _bound_arguments_cls = BoundArguments
+
+ empty = _empty
+
+ def __init__(self, parameters=None, *, return_annotation=_empty,
+ __validate_parameters__=True):
+ """Constructs Signature from the given list of Parameter
+ objects and 'return_annotation'. All arguments are optional.
+ """
+
+ if parameters is None:
+ params = OrderedDict()
+ else:
+ if __validate_parameters__:
+ params = OrderedDict()
+ top_kind = _POSITIONAL_ONLY
+ kind_defaults = False
+
+ for param in parameters:
+ kind = param.kind
+ name = param.name
+
+ if kind < top_kind:
+ msg = (
+ 'wrong parameter order: {} parameter before {} '
+ 'parameter'
+ )
+ msg = msg.format(top_kind.description,
+ kind.description)
+ raise ValueError(msg)
+ elif kind > top_kind:
+ kind_defaults = False
+ top_kind = kind
+
+ if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD):
+ if param.default is _empty:
+ if kind_defaults:
+ # No default for this parameter, but the
+ # previous parameter of the same kind had
+ # a default
+ msg = 'non-default argument follows default ' \
+ 'argument'
+ raise ValueError(msg)
+ else:
+ # There is a default for this parameter.
+ kind_defaults = True
+
+ if name in params:
+ msg = 'duplicate parameter name: {!r}'.format(name)
+ raise ValueError(msg)
+
+ params[name] = param
+ else:
+ params = OrderedDict((param.name, param) for param in parameters)
+
+ self._parameters = types.MappingProxyType(params)
+ self._return_annotation = return_annotation
+
+ @classmethod
+ def from_function(cls, func):
+ """Constructs Signature for the given python function.
+
+ Deprecated since Python 3.5, use `Signature.from_callable()`.
+ """
+
+ warnings.warn("inspect.Signature.from_function() is deprecated since "
+ "Python 3.5, use Signature.from_callable()",
+ DeprecationWarning, stacklevel=2)
+ return _signature_from_function(cls, func)
+
+ @classmethod
+ def from_builtin(cls, func):
+ """Constructs Signature for the given builtin function.
+
+ Deprecated since Python 3.5, use `Signature.from_callable()`.
+ """
+
+ warnings.warn("inspect.Signature.from_builtin() is deprecated since "
+ "Python 3.5, use Signature.from_callable()",
+ DeprecationWarning, stacklevel=2)
+ return _signature_from_builtin(cls, func)
+
+ @classmethod
+ def from_callable(cls, obj, *,
+ follow_wrapped=True, globals=None, locals=None, eval_str=False):
+ """Constructs Signature for the given callable object."""
+ return _signature_from_callable(obj, sigcls=cls,
+ follow_wrapper_chains=follow_wrapped,
+ globals=globals, locals=locals, eval_str=eval_str)
+
+ @property
+ def parameters(self):
+ return self._parameters
+
+ @property
+ def return_annotation(self):
+ return self._return_annotation
+
+ def replace(self, *, parameters=_void, return_annotation=_void):
+ """Creates a customized copy of the Signature.
+ Pass 'parameters' and/or 'return_annotation' arguments
+ to override them in the new copy.
+ """
+
+ if parameters is _void:
+ parameters = self.parameters.values()
+
+ if return_annotation is _void:
+ return_annotation = self._return_annotation
+
+ return type(self)(parameters,
+ return_annotation=return_annotation)
+
+ def _hash_basis(self):
+ params = tuple(param for param in self.parameters.values()
+ if param.kind != _KEYWORD_ONLY)
+
+ kwo_params = {param.name: param for param in self.parameters.values()
+ if param.kind == _KEYWORD_ONLY}
+
+ return params, kwo_params, self.return_annotation
+
+ def __hash__(self):
+ params, kwo_params, return_annotation = self._hash_basis()
+ kwo_params = frozenset(kwo_params.values())
+ return hash((params, kwo_params, return_annotation))
+
+ def __eq__(self, other):
+ if self is other:
+ return True
+ if not isinstance(other, Signature):
+ return NotImplemented
+ return self._hash_basis() == other._hash_basis()
+
+ def _bind(self, args, kwargs, *, partial=False):
+ """Private method. Don't use directly."""
+
+ arguments = {}
+
+ parameters = iter(self.parameters.values())
+ parameters_ex = ()
+ arg_vals = iter(args)
+
+ while True:
+ # Let's iterate through the positional arguments and corresponding
+ # parameters
+ try:
+ arg_val = next(arg_vals)
+ except StopIteration:
+ # No more positional arguments
+ try:
+ param = next(parameters)
+ except StopIteration:
+ # No more parameters. That's it. Just need to check that
+ # we have no `kwargs` after this while loop
+ break
+ else:
+ if param.kind == _VAR_POSITIONAL:
+ # That's OK, just empty *args. Let's start parsing
+ # kwargs
+ break
+ elif param.name in kwargs:
+ if param.kind == _POSITIONAL_ONLY:
+ msg = '{arg!r} parameter is positional only, ' \
+ 'but was passed as a keyword'
+ msg = msg.format(arg=param.name)
+ raise TypeError(msg) from None
+ parameters_ex = (param,)
+ break
+ elif (param.kind == _VAR_KEYWORD or
+ param.default is not _empty):
+ # That's fine too - we have a default value for this
+ # parameter. So, lets start parsing `kwargs`, starting
+ # with the current parameter
+ parameters_ex = (param,)
+ break
+ else:
+ # No default, not VAR_KEYWORD, not VAR_POSITIONAL,
+ # not in `kwargs`
+ if partial:
+ parameters_ex = (param,)
+ break
+ else:
+ msg = 'missing a required argument: {arg!r}'
+ msg = msg.format(arg=param.name)
+ raise TypeError(msg) from None
+ else:
+ # We have a positional argument to process
+ try:
+ param = next(parameters)
+ except StopIteration:
+ raise TypeError('too many positional arguments') from None
+ else:
+ if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY):
+ # Looks like we have no parameter for this positional
+ # argument
+ raise TypeError(
+ 'too many positional arguments') from None
+
+ if param.kind == _VAR_POSITIONAL:
+ # We have an '*args'-like argument, let's fill it with
+ # all positional arguments we have left and move on to
+ # the next phase
+ values = [arg_val]
+ values.extend(arg_vals)
+ arguments[param.name] = tuple(values)
+ break
+
+ if param.name in kwargs and param.kind != _POSITIONAL_ONLY:
+ raise TypeError(
+ 'multiple values for argument {arg!r}'.format(
+ arg=param.name)) from None
+
+ arguments[param.name] = arg_val
+
+ # Now, we iterate through the remaining parameters to process
+ # keyword arguments
+ kwargs_param = None
+ for param in itertools.chain(parameters_ex, parameters):
+ if param.kind == _VAR_KEYWORD:
+ # Memorize that we have a '**kwargs'-like parameter
+ kwargs_param = param
+ continue
+
+ if param.kind == _VAR_POSITIONAL:
+ # Named arguments don't refer to '*args'-like parameters.
+ # We only arrive here if the positional arguments ended
+ # before reaching the last parameter before *args.
+ continue
+
+ param_name = param.name
+ try:
+ arg_val = kwargs.pop(param_name)
+ except KeyError:
+ # We have no value for this parameter. It's fine though,
+ # if it has a default value, or it is an '*args'-like
+ # parameter, left alone by the processing of positional
+ # arguments.
+ if (not partial and param.kind != _VAR_POSITIONAL and
+ param.default is _empty):
+ raise TypeError('missing a required argument: {arg!r}'. \
+ format(arg=param_name)) from None
+
+ else:
+ if param.kind == _POSITIONAL_ONLY:
+ # This should never happen in case of a properly built
+ # Signature object (but let's have this check here
+ # to ensure correct behaviour just in case)
+ raise TypeError('{arg!r} parameter is positional only, '
+ 'but was passed as a keyword'. \
+ format(arg=param.name))
+
+ arguments[param_name] = arg_val
+
+ if kwargs:
+ if kwargs_param is not None:
+ # Process our '**kwargs'-like parameter
+ arguments[kwargs_param.name] = kwargs
+ else:
+ raise TypeError(
+ 'got an unexpected keyword argument {arg!r}'.format(
+ arg=next(iter(kwargs))))
+
+ return self._bound_arguments_cls(self, arguments)
+
+ def bind(self, /, *args, **kwargs):
+ """Get a BoundArguments object, that maps the passed `args`
+ and `kwargs` to the function's signature. Raises `TypeError`
+ if the passed arguments can not be bound.
+ """
+ return self._bind(args, kwargs)
+
+ def bind_partial(self, /, *args, **kwargs):
+ """Get a BoundArguments object, that partially maps the
+ passed `args` and `kwargs` to the function's signature.
+ Raises `TypeError` if the passed arguments can not be bound.
+ """
+ return self._bind(args, kwargs, partial=True)
+
+ def __reduce__(self):
+ return (type(self),
+ (tuple(self._parameters.values()),),
+ {'_return_annotation': self._return_annotation})
+
+ def __setstate__(self, state):
+ self._return_annotation = state['_return_annotation']
+
+ def __repr__(self):
+ return '<{} {}>'.format(self.__class__.__name__, self)
+
+ def __str__(self):
+ result = []
+ render_pos_only_separator = False
+ render_kw_only_separator = True
+ for param in self.parameters.values():
+ formatted = str(param)
+
+ kind = param.kind
+
+ if kind == _POSITIONAL_ONLY:
+ render_pos_only_separator = True
+ elif render_pos_only_separator:
+ # It's not a positional-only parameter, and the flag
+ # is set to 'True' (there were pos-only params before.)
+ result.append('/')
+ render_pos_only_separator = False
+
+ if kind == _VAR_POSITIONAL:
+ # OK, we have an '*args'-like parameter, so we won't need
+ # a '*' to separate keyword-only arguments
+ render_kw_only_separator = False
+ elif kind == _KEYWORD_ONLY and render_kw_only_separator:
+ # We have a keyword-only parameter to render and we haven't
+ # rendered an '*args'-like parameter before, so add a '*'
+ # separator to the parameters list ("foo(arg1, *, arg2)" case)
+ result.append('*')
+ # This condition should be only triggered once, so
+ # reset the flag
+ render_kw_only_separator = False
+
+ result.append(formatted)
+
+ if render_pos_only_separator:
+ # There were only positional-only parameters, hence the
+ # flag was not reset to 'False'
+ result.append('/')
+
+ rendered = '({})'.format(', '.join(result))
+
+ if self.return_annotation is not _empty:
+ anno = formatannotation(self.return_annotation)
+ rendered += ' -> {}'.format(anno)
+
+ return rendered
+
+
+def signature(obj, *, follow_wrapped=True, globals=None, locals=None, eval_str=False):
+ """Get a signature object for the passed callable."""
+ return Signature.from_callable(obj, follow_wrapped=follow_wrapped,
+ globals=globals, locals=locals, eval_str=eval_str)
+
+
+def _main():
+ """ Logic for inspecting an object given at command line """
+ import argparse
+ import importlib
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ 'object',
+ help="The object to be analysed. "
+ "It supports the 'module:qualname' syntax")
+ parser.add_argument(
+ '-d', '--details', action='store_true',
+ help='Display info about the module rather than its source code')
+
+ args = parser.parse_args()
+
+ target = args.object
+ mod_name, has_attrs, attrs = target.partition(":")
+ try:
+ obj = module = importlib.import_module(mod_name)
+ except Exception as exc:
+ msg = "Failed to import {} ({}: {})".format(mod_name,
+ type(exc).__name__,
+ exc)
+ print(msg, file=sys.stderr)
+ sys.exit(2)
+
+ if has_attrs:
+ parts = attrs.split(".")
+ obj = module
+ for part in parts:
+ obj = getattr(obj, part)
+
+ if module.__name__ in sys.builtin_module_names:
+ print("Can't get info for builtin modules.", file=sys.stderr)
+ sys.exit(1)
+
+ if args.details:
+ print('Target: {}'.format(target))
+ print('Origin: {}'.format(getsourcefile(module)))
+ print('Cached: {}'.format(module.__cached__))
+ if obj is module:
+ print('Loader: {}'.format(repr(module.__loader__)))
+ if hasattr(module, '__path__'):
+ print('Submodule search path: {}'.format(module.__path__))
+ else:
+ try:
+ __, lineno = findsource(obj)
+ except Exception:
+ pass
+ else:
+ print('Line: {}'.format(lineno))
+
+ print('\n')
+ else:
+ print(getsource(obj))
+
+
+if __name__ == "__main__":
+ _main()
diff --git a/llava/lib/python3.10/lzma.py b/llava/lib/python3.10/lzma.py
new file mode 100644
index 0000000000000000000000000000000000000000..800f52198fbb794077fe43425df83db44e13960d
--- /dev/null
+++ b/llava/lib/python3.10/lzma.py
@@ -0,0 +1,356 @@
+"""Interface to the liblzma compression library.
+
+This module provides a class for reading and writing compressed files,
+classes for incremental (de)compression, and convenience functions for
+one-shot (de)compression.
+
+These classes and functions support both the XZ and legacy LZMA
+container formats, as well as raw compressed data streams.
+"""
+
+__all__ = [
+ "CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256",
+ "CHECK_ID_MAX", "CHECK_UNKNOWN",
+ "FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64",
+ "FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC",
+ "FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW",
+ "MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4",
+ "MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME",
+
+ "LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError",
+ "open", "compress", "decompress", "is_check_supported",
+]
+
+import builtins
+import io
+import os
+from _lzma import *
+from _lzma import _encode_filter_properties, _decode_filter_properties
+import _compression
+
+
+_MODE_CLOSED = 0
+_MODE_READ = 1
+# Value 2 no longer used
+_MODE_WRITE = 3
+
+
+class LZMAFile(_compression.BaseStream):
+
+ """A file object providing transparent LZMA (de)compression.
+
+ An LZMAFile can act as a wrapper for an existing file object, or
+ refer directly to a named file on disk.
+
+ Note that LZMAFile provides a *binary* file interface - data read
+ is returned as bytes, and data to be written must be given as bytes.
+ """
+
+ def __init__(self, filename=None, mode="r", *,
+ format=None, check=-1, preset=None, filters=None):
+ """Open an LZMA-compressed file in binary mode.
+
+ filename can be either an actual file name (given as a str,
+ bytes, or PathLike object), in which case the named file is
+ opened, or it can be an existing file object to read from or
+ write to.
+
+ mode can be "r" for reading (default), "w" for (over)writing,
+ "x" for creating exclusively, or "a" for appending. These can
+ equivalently be given as "rb", "wb", "xb" and "ab" respectively.
+
+ format specifies the container format to use for the file.
+ If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the
+ default is FORMAT_XZ.
+
+ check specifies the integrity check to use. This argument can
+ only be used when opening a file for writing. For FORMAT_XZ,
+ the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not
+ support integrity checks - for these formats, check must be
+ omitted, or be CHECK_NONE.
+
+ When opening a file for reading, the *preset* argument is not
+ meaningful, and should be omitted. The *filters* argument should
+ also be omitted, except when format is FORMAT_RAW (in which case
+ it is required).
+
+ When opening a file for writing, the settings used by the
+ compressor can be specified either as a preset compression
+ level (with the *preset* argument), or in detail as a custom
+ filter chain (with the *filters* argument). For FORMAT_XZ and
+ FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset
+ level. For FORMAT_RAW, the caller must always specify a filter
+ chain; the raw compressor does not support preset compression
+ levels.
+
+ preset (if provided) should be an integer in the range 0-9,
+ optionally OR-ed with the constant PRESET_EXTREME.
+
+ filters (if provided) should be a sequence of dicts. Each dict
+ should have an entry for "id" indicating ID of the filter, plus
+ additional entries for options to the filter.
+ """
+ self._fp = None
+ self._closefp = False
+ self._mode = _MODE_CLOSED
+
+ if mode in ("r", "rb"):
+ if check != -1:
+ raise ValueError("Cannot specify an integrity check "
+ "when opening a file for reading")
+ if preset is not None:
+ raise ValueError("Cannot specify a preset compression "
+ "level when opening a file for reading")
+ if format is None:
+ format = FORMAT_AUTO
+ mode_code = _MODE_READ
+ elif mode in ("w", "wb", "a", "ab", "x", "xb"):
+ if format is None:
+ format = FORMAT_XZ
+ mode_code = _MODE_WRITE
+ self._compressor = LZMACompressor(format=format, check=check,
+ preset=preset, filters=filters)
+ self._pos = 0
+ else:
+ raise ValueError("Invalid mode: {!r}".format(mode))
+
+ if isinstance(filename, (str, bytes, os.PathLike)):
+ if "b" not in mode:
+ mode += "b"
+ self._fp = builtins.open(filename, mode)
+ self._closefp = True
+ self._mode = mode_code
+ elif hasattr(filename, "read") or hasattr(filename, "write"):
+ self._fp = filename
+ self._mode = mode_code
+ else:
+ raise TypeError("filename must be a str, bytes, file or PathLike object")
+
+ if self._mode == _MODE_READ:
+ raw = _compression.DecompressReader(self._fp, LZMADecompressor,
+ trailing_error=LZMAError, format=format, filters=filters)
+ self._buffer = io.BufferedReader(raw)
+
+ def close(self):
+ """Flush and close the file.
+
+ May be called more than once without error. Once the file is
+ closed, any other operation on it will raise a ValueError.
+ """
+ if self._mode == _MODE_CLOSED:
+ return
+ try:
+ if self._mode == _MODE_READ:
+ self._buffer.close()
+ self._buffer = None
+ elif self._mode == _MODE_WRITE:
+ self._fp.write(self._compressor.flush())
+ self._compressor = None
+ finally:
+ try:
+ if self._closefp:
+ self._fp.close()
+ finally:
+ self._fp = None
+ self._closefp = False
+ self._mode = _MODE_CLOSED
+
+ @property
+ def closed(self):
+ """True if this file is closed."""
+ return self._mode == _MODE_CLOSED
+
+ def fileno(self):
+ """Return the file descriptor for the underlying file."""
+ self._check_not_closed()
+ return self._fp.fileno()
+
+ def seekable(self):
+ """Return whether the file supports seeking."""
+ return self.readable() and self._buffer.seekable()
+
+ def readable(self):
+ """Return whether the file was opened for reading."""
+ self._check_not_closed()
+ return self._mode == _MODE_READ
+
+ def writable(self):
+ """Return whether the file was opened for writing."""
+ self._check_not_closed()
+ return self._mode == _MODE_WRITE
+
+ def peek(self, size=-1):
+ """Return buffered data without advancing the file position.
+
+ Always returns at least one byte of data, unless at EOF.
+ The exact number of bytes returned is unspecified.
+ """
+ self._check_can_read()
+ # Relies on the undocumented fact that BufferedReader.peek() always
+ # returns at least one byte (except at EOF)
+ return self._buffer.peek(size)
+
+ def read(self, size=-1):
+ """Read up to size uncompressed bytes from the file.
+
+ If size is negative or omitted, read until EOF is reached.
+ Returns b"" if the file is already at EOF.
+ """
+ self._check_can_read()
+ return self._buffer.read(size)
+
+ def read1(self, size=-1):
+ """Read up to size uncompressed bytes, while trying to avoid
+ making multiple reads from the underlying stream. Reads up to a
+ buffer's worth of data if size is negative.
+
+ Returns b"" if the file is at EOF.
+ """
+ self._check_can_read()
+ if size < 0:
+ size = io.DEFAULT_BUFFER_SIZE
+ return self._buffer.read1(size)
+
+ def readline(self, size=-1):
+ """Read a line of uncompressed bytes from the file.
+
+ The terminating newline (if present) is retained. If size is
+ non-negative, no more than size bytes will be read (in which
+ case the line may be incomplete). Returns b'' if already at EOF.
+ """
+ self._check_can_read()
+ return self._buffer.readline(size)
+
+ def write(self, data):
+ """Write a bytes object to the file.
+
+ Returns the number of uncompressed bytes written, which is
+ always the length of data in bytes. Note that due to buffering,
+ the file on disk may not reflect the data written until close()
+ is called.
+ """
+ self._check_can_write()
+ if isinstance(data, (bytes, bytearray)):
+ length = len(data)
+ else:
+ # accept any data that supports the buffer protocol
+ data = memoryview(data)
+ length = data.nbytes
+
+ compressed = self._compressor.compress(data)
+ self._fp.write(compressed)
+ self._pos += length
+ return length
+
+ def seek(self, offset, whence=io.SEEK_SET):
+ """Change the file position.
+
+ The new position is specified by offset, relative to the
+ position indicated by whence. Possible values for whence are:
+
+ 0: start of stream (default): offset must not be negative
+ 1: current stream position
+ 2: end of stream; offset must not be positive
+
+ Returns the new file position.
+
+ Note that seeking is emulated, so depending on the parameters,
+ this operation may be extremely slow.
+ """
+ self._check_can_seek()
+ return self._buffer.seek(offset, whence)
+
+ def tell(self):
+ """Return the current file position."""
+ self._check_not_closed()
+ if self._mode == _MODE_READ:
+ return self._buffer.tell()
+ return self._pos
+
+
+def open(filename, mode="rb", *,
+ format=None, check=-1, preset=None, filters=None,
+ encoding=None, errors=None, newline=None):
+ """Open an LZMA-compressed file in binary or text mode.
+
+ filename can be either an actual file name (given as a str, bytes,
+ or PathLike object), in which case the named file is opened, or it
+ can be an existing file object to read from or write to.
+
+ The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb",
+ "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text
+ mode.
+
+ The format, check, preset and filters arguments specify the
+ compression settings, as for LZMACompressor, LZMADecompressor and
+ LZMAFile.
+
+ For binary mode, this function is equivalent to the LZMAFile
+ constructor: LZMAFile(filename, mode, ...). In this case, the
+ encoding, errors and newline arguments must not be provided.
+
+ For text mode, an LZMAFile object is created, and wrapped in an
+ io.TextIOWrapper instance with the specified encoding, error
+ handling behavior, and line ending(s).
+
+ """
+ if "t" in mode:
+ if "b" in mode:
+ raise ValueError("Invalid mode: %r" % (mode,))
+ else:
+ if encoding is not None:
+ raise ValueError("Argument 'encoding' not supported in binary mode")
+ if errors is not None:
+ raise ValueError("Argument 'errors' not supported in binary mode")
+ if newline is not None:
+ raise ValueError("Argument 'newline' not supported in binary mode")
+
+ lz_mode = mode.replace("t", "")
+ binary_file = LZMAFile(filename, lz_mode, format=format, check=check,
+ preset=preset, filters=filters)
+
+ if "t" in mode:
+ encoding = io.text_encoding(encoding)
+ return io.TextIOWrapper(binary_file, encoding, errors, newline)
+ else:
+ return binary_file
+
+
+def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None):
+ """Compress a block of data.
+
+ Refer to LZMACompressor's docstring for a description of the
+ optional arguments *format*, *check*, *preset* and *filters*.
+
+ For incremental compression, use an LZMACompressor instead.
+ """
+ comp = LZMACompressor(format, check, preset, filters)
+ return comp.compress(data) + comp.flush()
+
+
+def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None):
+ """Decompress a block of data.
+
+ Refer to LZMADecompressor's docstring for a description of the
+ optional arguments *format*, *check* and *filters*.
+
+ For incremental decompression, use an LZMADecompressor instead.
+ """
+ results = []
+ while True:
+ decomp = LZMADecompressor(format, memlimit, filters)
+ try:
+ res = decomp.decompress(data)
+ except LZMAError:
+ if results:
+ break # Leftover data is not a valid LZMA/XZ stream; ignore it.
+ else:
+ raise # Error on the first iteration; bail out.
+ results.append(res)
+ if not decomp.eof:
+ raise LZMAError("Compressed data ended before the "
+ "end-of-stream marker was reached")
+ data = decomp.unused_data
+ if not data:
+ break
+ return b"".join(results)
diff --git a/llava/lib/python3.10/pprint.py b/llava/lib/python3.10/pprint.py
new file mode 100644
index 0000000000000000000000000000000000000000..d91421f0a6bf60a5a79b0c46050941d58637f180
--- /dev/null
+++ b/llava/lib/python3.10/pprint.py
@@ -0,0 +1,670 @@
+# Author: Fred L. Drake, Jr.
+# fdrake@acm.org
+#
+# This is a simple little module I wrote to make life easier. I didn't
+# see anything quite like it in the library, though I may have overlooked
+# something. I wrote this when I was trying to read some heavily nested
+# tuples with fairly non-descriptive content. This is modeled very much
+# after Lisp/Scheme - style pretty-printing of lists. If you find it
+# useful, thank small children who sleep at night.
+
+"""Support to pretty-print lists, tuples, & dictionaries recursively.
+
+Very simple, but useful, especially in debugging data structures.
+
+Classes
+-------
+
+PrettyPrinter()
+ Handle pretty-printing operations onto a stream using a configured
+ set of formatting parameters.
+
+Functions
+---------
+
+pformat()
+ Format a Python object into a pretty-printed representation.
+
+pprint()
+ Pretty-print a Python object to a stream [default is sys.stdout].
+
+saferepr()
+ Generate a 'standard' repr()-like value, but protect against recursive
+ data structures.
+
+"""
+
+import collections as _collections
+import dataclasses as _dataclasses
+import re
+import sys as _sys
+import types as _types
+from io import StringIO as _StringIO
+
+__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr",
+ "PrettyPrinter", "pp"]
+
+
+def pprint(object, stream=None, indent=1, width=80, depth=None, *,
+ compact=False, sort_dicts=True, underscore_numbers=False):
+ """Pretty-print a Python object to a stream [default is sys.stdout]."""
+ printer = PrettyPrinter(
+ stream=stream, indent=indent, width=width, depth=depth,
+ compact=compact, sort_dicts=sort_dicts,
+ underscore_numbers=underscore_numbers)
+ printer.pprint(object)
+
+def pformat(object, indent=1, width=80, depth=None, *,
+ compact=False, sort_dicts=True, underscore_numbers=False):
+ """Format a Python object into a pretty-printed representation."""
+ return PrettyPrinter(indent=indent, width=width, depth=depth,
+ compact=compact, sort_dicts=sort_dicts,
+ underscore_numbers=underscore_numbers).pformat(object)
+
+def pp(object, *args, sort_dicts=False, **kwargs):
+ """Pretty-print a Python object"""
+ pprint(object, *args, sort_dicts=sort_dicts, **kwargs)
+
+def saferepr(object):
+ """Version of repr() which can handle recursive data structures."""
+ return PrettyPrinter()._safe_repr(object, {}, None, 0)[0]
+
+def isreadable(object):
+ """Determine if saferepr(object) is readable by eval()."""
+ return PrettyPrinter()._safe_repr(object, {}, None, 0)[1]
+
+def isrecursive(object):
+ """Determine if object requires a recursive representation."""
+ return PrettyPrinter()._safe_repr(object, {}, None, 0)[2]
+
+class _safe_key:
+ """Helper function for key functions when sorting unorderable objects.
+
+ The wrapped-object will fallback to a Py2.x style comparison for
+ unorderable types (sorting first comparing the type name and then by
+ the obj ids). Does not work recursively, so dict.items() must have
+ _safe_key applied to both the key and the value.
+
+ """
+
+ __slots__ = ['obj']
+
+ def __init__(self, obj):
+ self.obj = obj
+
+ def __lt__(self, other):
+ try:
+ return self.obj < other.obj
+ except TypeError:
+ return ((str(type(self.obj)), id(self.obj)) < \
+ (str(type(other.obj)), id(other.obj)))
+
+def _safe_tuple(t):
+ "Helper function for comparing 2-tuples"
+ return _safe_key(t[0]), _safe_key(t[1])
+
+class PrettyPrinter:
+ def __init__(self, indent=1, width=80, depth=None, stream=None, *,
+ compact=False, sort_dicts=True, underscore_numbers=False):
+ """Handle pretty printing operations onto a stream using a set of
+ configured parameters.
+
+ indent
+ Number of spaces to indent for each level of nesting.
+
+ width
+ Attempted maximum number of columns in the output.
+
+ depth
+ The maximum depth to print out nested structures.
+
+ stream
+ The desired output stream. If omitted (or false), the standard
+ output stream available at construction will be used.
+
+ compact
+ If true, several items will be combined in one line.
+
+ sort_dicts
+ If true, dict keys are sorted.
+
+ """
+ indent = int(indent)
+ width = int(width)
+ if indent < 0:
+ raise ValueError('indent must be >= 0')
+ if depth is not None and depth <= 0:
+ raise ValueError('depth must be > 0')
+ if not width:
+ raise ValueError('width must be != 0')
+ self._depth = depth
+ self._indent_per_level = indent
+ self._width = width
+ if stream is not None:
+ self._stream = stream
+ else:
+ self._stream = _sys.stdout
+ self._compact = bool(compact)
+ self._sort_dicts = sort_dicts
+ self._underscore_numbers = underscore_numbers
+
+ def pprint(self, object):
+ self._format(object, self._stream, 0, 0, {}, 0)
+ self._stream.write("\n")
+
+ def pformat(self, object):
+ sio = _StringIO()
+ self._format(object, sio, 0, 0, {}, 0)
+ return sio.getvalue()
+
+ def isrecursive(self, object):
+ return self.format(object, {}, 0, 0)[2]
+
+ def isreadable(self, object):
+ s, readable, recursive = self.format(object, {}, 0, 0)
+ return readable and not recursive
+
+ def _format(self, object, stream, indent, allowance, context, level):
+ objid = id(object)
+ if objid in context:
+ stream.write(_recursion(object))
+ self._recursive = True
+ self._readable = False
+ return
+ rep = self._repr(object, context, level)
+ max_width = self._width - indent - allowance
+ if len(rep) > max_width:
+ p = self._dispatch.get(type(object).__repr__, None)
+ if p is not None:
+ context[objid] = 1
+ p(self, object, stream, indent, allowance, context, level + 1)
+ del context[objid]
+ return
+ elif (_dataclasses.is_dataclass(object) and
+ not isinstance(object, type) and
+ object.__dataclass_params__.repr and
+ # Check dataclass has generated repr method.
+ hasattr(object.__repr__, "__wrapped__") and
+ "__create_fn__" in object.__repr__.__wrapped__.__qualname__):
+ context[objid] = 1
+ self._pprint_dataclass(object, stream, indent, allowance, context, level + 1)
+ del context[objid]
+ return
+ stream.write(rep)
+
+ def _pprint_dataclass(self, object, stream, indent, allowance, context, level):
+ cls_name = object.__class__.__name__
+ indent += len(cls_name) + 1
+ items = [(f.name, getattr(object, f.name)) for f in _dataclasses.fields(object) if f.repr]
+ stream.write(cls_name + '(')
+ self._format_namespace_items(items, stream, indent, allowance, context, level)
+ stream.write(')')
+
+ _dispatch = {}
+
+ def _pprint_dict(self, object, stream, indent, allowance, context, level):
+ write = stream.write
+ write('{')
+ if self._indent_per_level > 1:
+ write((self._indent_per_level - 1) * ' ')
+ length = len(object)
+ if length:
+ if self._sort_dicts:
+ items = sorted(object.items(), key=_safe_tuple)
+ else:
+ items = object.items()
+ self._format_dict_items(items, stream, indent, allowance + 1,
+ context, level)
+ write('}')
+
+ _dispatch[dict.__repr__] = _pprint_dict
+
+ def _pprint_ordered_dict(self, object, stream, indent, allowance, context, level):
+ if not len(object):
+ stream.write(repr(object))
+ return
+ cls = object.__class__
+ stream.write(cls.__name__ + '(')
+ self._format(list(object.items()), stream,
+ indent + len(cls.__name__) + 1, allowance + 1,
+ context, level)
+ stream.write(')')
+
+ _dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict
+
+ def _pprint_list(self, object, stream, indent, allowance, context, level):
+ stream.write('[')
+ self._format_items(object, stream, indent, allowance + 1,
+ context, level)
+ stream.write(']')
+
+ _dispatch[list.__repr__] = _pprint_list
+
+ def _pprint_tuple(self, object, stream, indent, allowance, context, level):
+ stream.write('(')
+ endchar = ',)' if len(object) == 1 else ')'
+ self._format_items(object, stream, indent, allowance + len(endchar),
+ context, level)
+ stream.write(endchar)
+
+ _dispatch[tuple.__repr__] = _pprint_tuple
+
+ def _pprint_set(self, object, stream, indent, allowance, context, level):
+ if not len(object):
+ stream.write(repr(object))
+ return
+ typ = object.__class__
+ if typ is set:
+ stream.write('{')
+ endchar = '}'
+ else:
+ stream.write(typ.__name__ + '({')
+ endchar = '})'
+ indent += len(typ.__name__) + 1
+ object = sorted(object, key=_safe_key)
+ self._format_items(object, stream, indent, allowance + len(endchar),
+ context, level)
+ stream.write(endchar)
+
+ _dispatch[set.__repr__] = _pprint_set
+ _dispatch[frozenset.__repr__] = _pprint_set
+
+ def _pprint_str(self, object, stream, indent, allowance, context, level):
+ write = stream.write
+ if not len(object):
+ write(repr(object))
+ return
+ chunks = []
+ lines = object.splitlines(True)
+ if level == 1:
+ indent += 1
+ allowance += 1
+ max_width1 = max_width = self._width - indent
+ for i, line in enumerate(lines):
+ rep = repr(line)
+ if i == len(lines) - 1:
+ max_width1 -= allowance
+ if len(rep) <= max_width1:
+ chunks.append(rep)
+ else:
+ # A list of alternating (non-space, space) strings
+ parts = re.findall(r'\S*\s*', line)
+ assert parts
+ assert not parts[-1]
+ parts.pop() # drop empty last part
+ max_width2 = max_width
+ current = ''
+ for j, part in enumerate(parts):
+ candidate = current + part
+ if j == len(parts) - 1 and i == len(lines) - 1:
+ max_width2 -= allowance
+ if len(repr(candidate)) > max_width2:
+ if current:
+ chunks.append(repr(current))
+ current = part
+ else:
+ current = candidate
+ if current:
+ chunks.append(repr(current))
+ if len(chunks) == 1:
+ write(rep)
+ return
+ if level == 1:
+ write('(')
+ for i, rep in enumerate(chunks):
+ if i > 0:
+ write('\n' + ' '*indent)
+ write(rep)
+ if level == 1:
+ write(')')
+
+ _dispatch[str.__repr__] = _pprint_str
+
+ def _pprint_bytes(self, object, stream, indent, allowance, context, level):
+ write = stream.write
+ if len(object) <= 4:
+ write(repr(object))
+ return
+ parens = level == 1
+ if parens:
+ indent += 1
+ allowance += 1
+ write('(')
+ delim = ''
+ for rep in _wrap_bytes_repr(object, self._width - indent, allowance):
+ write(delim)
+ write(rep)
+ if not delim:
+ delim = '\n' + ' '*indent
+ if parens:
+ write(')')
+
+ _dispatch[bytes.__repr__] = _pprint_bytes
+
+ def _pprint_bytearray(self, object, stream, indent, allowance, context, level):
+ write = stream.write
+ write('bytearray(')
+ self._pprint_bytes(bytes(object), stream, indent + 10,
+ allowance + 1, context, level + 1)
+ write(')')
+
+ _dispatch[bytearray.__repr__] = _pprint_bytearray
+
+ def _pprint_mappingproxy(self, object, stream, indent, allowance, context, level):
+ stream.write('mappingproxy(')
+ self._format(object.copy(), stream, indent + 13, allowance + 1,
+ context, level)
+ stream.write(')')
+
+ _dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy
+
+ def _pprint_simplenamespace(self, object, stream, indent, allowance, context, level):
+ if type(object) is _types.SimpleNamespace:
+ # The SimpleNamespace repr is "namespace" instead of the class
+ # name, so we do the same here. For subclasses; use the class name.
+ cls_name = 'namespace'
+ else:
+ cls_name = object.__class__.__name__
+ indent += len(cls_name) + 1
+ items = object.__dict__.items()
+ stream.write(cls_name + '(')
+ self._format_namespace_items(items, stream, indent, allowance, context, level)
+ stream.write(')')
+
+ _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace
+
+ def _format_dict_items(self, items, stream, indent, allowance, context,
+ level):
+ write = stream.write
+ indent += self._indent_per_level
+ delimnl = ',\n' + ' ' * indent
+ last_index = len(items) - 1
+ for i, (key, ent) in enumerate(items):
+ last = i == last_index
+ rep = self._repr(key, context, level)
+ write(rep)
+ write(': ')
+ self._format(ent, stream, indent + len(rep) + 2,
+ allowance if last else 1,
+ context, level)
+ if not last:
+ write(delimnl)
+
+ def _format_namespace_items(self, items, stream, indent, allowance, context, level):
+ write = stream.write
+ delimnl = ',\n' + ' ' * indent
+ last_index = len(items) - 1
+ for i, (key, ent) in enumerate(items):
+ last = i == last_index
+ write(key)
+ write('=')
+ if id(ent) in context:
+ # Special-case representation of recursion to match standard
+ # recursive dataclass repr.
+ write("...")
+ else:
+ self._format(ent, stream, indent + len(key) + 1,
+ allowance if last else 1,
+ context, level)
+ if not last:
+ write(delimnl)
+
+ def _format_items(self, items, stream, indent, allowance, context, level):
+ write = stream.write
+ indent += self._indent_per_level
+ if self._indent_per_level > 1:
+ write((self._indent_per_level - 1) * ' ')
+ delimnl = ',\n' + ' ' * indent
+ delim = ''
+ width = max_width = self._width - indent + 1
+ it = iter(items)
+ try:
+ next_ent = next(it)
+ except StopIteration:
+ return
+ last = False
+ while not last:
+ ent = next_ent
+ try:
+ next_ent = next(it)
+ except StopIteration:
+ last = True
+ max_width -= allowance
+ width -= allowance
+ if self._compact:
+ rep = self._repr(ent, context, level)
+ w = len(rep) + 2
+ if width < w:
+ width = max_width
+ if delim:
+ delim = delimnl
+ if width >= w:
+ width -= w
+ write(delim)
+ delim = ', '
+ write(rep)
+ continue
+ write(delim)
+ delim = delimnl
+ self._format(ent, stream, indent,
+ allowance if last else 1,
+ context, level)
+
+ def _repr(self, object, context, level):
+ repr, readable, recursive = self.format(object, context.copy(),
+ self._depth, level)
+ if not readable:
+ self._readable = False
+ if recursive:
+ self._recursive = True
+ return repr
+
+ def format(self, object, context, maxlevels, level):
+ """Format object for a specific context, returning a string
+ and flags indicating whether the representation is 'readable'
+ and whether the object represents a recursive construct.
+ """
+ return self._safe_repr(object, context, maxlevels, level)
+
+ def _pprint_default_dict(self, object, stream, indent, allowance, context, level):
+ if not len(object):
+ stream.write(repr(object))
+ return
+ rdf = self._repr(object.default_factory, context, level)
+ cls = object.__class__
+ indent += len(cls.__name__) + 1
+ stream.write('%s(%s,\n%s' % (cls.__name__, rdf, ' ' * indent))
+ self._pprint_dict(object, stream, indent, allowance + 1, context, level)
+ stream.write(')')
+
+ _dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict
+
+ def _pprint_counter(self, object, stream, indent, allowance, context, level):
+ if not len(object):
+ stream.write(repr(object))
+ return
+ cls = object.__class__
+ stream.write(cls.__name__ + '({')
+ if self._indent_per_level > 1:
+ stream.write((self._indent_per_level - 1) * ' ')
+ items = object.most_common()
+ self._format_dict_items(items, stream,
+ indent + len(cls.__name__) + 1, allowance + 2,
+ context, level)
+ stream.write('})')
+
+ _dispatch[_collections.Counter.__repr__] = _pprint_counter
+
+ def _pprint_chain_map(self, object, stream, indent, allowance, context, level):
+ if not len(object.maps):
+ stream.write(repr(object))
+ return
+ cls = object.__class__
+ stream.write(cls.__name__ + '(')
+ indent += len(cls.__name__) + 1
+ for i, m in enumerate(object.maps):
+ if i == len(object.maps) - 1:
+ self._format(m, stream, indent, allowance + 1, context, level)
+ stream.write(')')
+ else:
+ self._format(m, stream, indent, 1, context, level)
+ stream.write(',\n' + ' ' * indent)
+
+ _dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map
+
+ def _pprint_deque(self, object, stream, indent, allowance, context, level):
+ if not len(object):
+ stream.write(repr(object))
+ return
+ cls = object.__class__
+ stream.write(cls.__name__ + '(')
+ indent += len(cls.__name__) + 1
+ stream.write('[')
+ if object.maxlen is None:
+ self._format_items(object, stream, indent, allowance + 2,
+ context, level)
+ stream.write('])')
+ else:
+ self._format_items(object, stream, indent, 2,
+ context, level)
+ rml = self._repr(object.maxlen, context, level)
+ stream.write('],\n%smaxlen=%s)' % (' ' * indent, rml))
+
+ _dispatch[_collections.deque.__repr__] = _pprint_deque
+
+ def _pprint_user_dict(self, object, stream, indent, allowance, context, level):
+ self._format(object.data, stream, indent, allowance, context, level - 1)
+
+ _dispatch[_collections.UserDict.__repr__] = _pprint_user_dict
+
+ def _pprint_user_list(self, object, stream, indent, allowance, context, level):
+ self._format(object.data, stream, indent, allowance, context, level - 1)
+
+ _dispatch[_collections.UserList.__repr__] = _pprint_user_list
+
+ def _pprint_user_string(self, object, stream, indent, allowance, context, level):
+ self._format(object.data, stream, indent, allowance, context, level - 1)
+
+ _dispatch[_collections.UserString.__repr__] = _pprint_user_string
+
+ def _safe_repr(self, object, context, maxlevels, level):
+ # Return triple (repr_string, isreadable, isrecursive).
+ typ = type(object)
+ if typ in _builtin_scalars:
+ return repr(object), True, False
+
+ r = getattr(typ, "__repr__", None)
+
+ if issubclass(typ, int) and r is int.__repr__:
+ if self._underscore_numbers:
+ return f"{object:_d}", True, False
+ else:
+ return repr(object), True, False
+
+ if issubclass(typ, dict) and r is dict.__repr__:
+ if not object:
+ return "{}", True, False
+ objid = id(object)
+ if maxlevels and level >= maxlevels:
+ return "{...}", False, objid in context
+ if objid in context:
+ return _recursion(object), False, True
+ context[objid] = 1
+ readable = True
+ recursive = False
+ components = []
+ append = components.append
+ level += 1
+ if self._sort_dicts:
+ items = sorted(object.items(), key=_safe_tuple)
+ else:
+ items = object.items()
+ for k, v in items:
+ krepr, kreadable, krecur = self.format(
+ k, context, maxlevels, level)
+ vrepr, vreadable, vrecur = self.format(
+ v, context, maxlevels, level)
+ append("%s: %s" % (krepr, vrepr))
+ readable = readable and kreadable and vreadable
+ if krecur or vrecur:
+ recursive = True
+ del context[objid]
+ return "{%s}" % ", ".join(components), readable, recursive
+
+ if (issubclass(typ, list) and r is list.__repr__) or \
+ (issubclass(typ, tuple) and r is tuple.__repr__):
+ if issubclass(typ, list):
+ if not object:
+ return "[]", True, False
+ format = "[%s]"
+ elif len(object) == 1:
+ format = "(%s,)"
+ else:
+ if not object:
+ return "()", True, False
+ format = "(%s)"
+ objid = id(object)
+ if maxlevels and level >= maxlevels:
+ return format % "...", False, objid in context
+ if objid in context:
+ return _recursion(object), False, True
+ context[objid] = 1
+ readable = True
+ recursive = False
+ components = []
+ append = components.append
+ level += 1
+ for o in object:
+ orepr, oreadable, orecur = self.format(
+ o, context, maxlevels, level)
+ append(orepr)
+ if not oreadable:
+ readable = False
+ if orecur:
+ recursive = True
+ del context[objid]
+ return format % ", ".join(components), readable, recursive
+
+ rep = repr(object)
+ return rep, (rep and not rep.startswith('<')), False
+
+_builtin_scalars = frozenset({str, bytes, bytearray, float, complex,
+ bool, type(None)})
+
+def _recursion(object):
+ return (""
+ % (type(object).__name__, id(object)))
+
+
+def _perfcheck(object=None):
+ import time
+ if object is None:
+ object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000
+ p = PrettyPrinter()
+ t1 = time.perf_counter()
+ p._safe_repr(object, {}, None, 0, True)
+ t2 = time.perf_counter()
+ p.pformat(object)
+ t3 = time.perf_counter()
+ print("_safe_repr:", t2 - t1)
+ print("pformat:", t3 - t2)
+
+def _wrap_bytes_repr(object, width, allowance):
+ current = b''
+ last = len(object) // 4 * 4
+ for i in range(0, len(object), 4):
+ part = object[i: i+4]
+ candidate = current + part
+ if i == last:
+ width -= allowance
+ if len(repr(candidate)) > width:
+ if current:
+ yield repr(current)
+ current = part
+ else:
+ current = candidate
+ if current:
+ yield repr(current)
+
+if __name__ == "__main__":
+ _perfcheck()
diff --git a/llava/lib/python3.10/profile.py b/llava/lib/python3.10/profile.py
new file mode 100644
index 0000000000000000000000000000000000000000..90c4e4c9ff583e43e164179c0c6fd37e22434e76
--- /dev/null
+++ b/llava/lib/python3.10/profile.py
@@ -0,0 +1,611 @@
+#! /usr/bin/env python3
+#
+# Class for profiling python code. rev 1.0 6/2/94
+#
+# Written by James Roskind
+# Based on prior profile module by Sjoerd Mullender...
+# which was hacked somewhat by: Guido van Rossum
+
+"""Class for profiling Python code."""
+
+# Copyright Disney Enterprises, Inc. All Rights Reserved.
+# Licensed to PSF under a Contributor Agreement
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
+# either express or implied. See the License for the specific language
+# governing permissions and limitations under the License.
+
+
+import io
+import sys
+import time
+import marshal
+
+__all__ = ["run", "runctx", "Profile"]
+
+# Sample timer for use with
+#i_count = 0
+#def integer_timer():
+# global i_count
+# i_count = i_count + 1
+# return i_count
+#itimes = integer_timer # replace with C coded timer returning integers
+
+class _Utils:
+ """Support class for utility functions which are shared by
+ profile.py and cProfile.py modules.
+ Not supposed to be used directly.
+ """
+
+ def __init__(self, profiler):
+ self.profiler = profiler
+
+ def run(self, statement, filename, sort):
+ prof = self.profiler()
+ try:
+ prof.run(statement)
+ except SystemExit:
+ pass
+ finally:
+ self._show(prof, filename, sort)
+
+ def runctx(self, statement, globals, locals, filename, sort):
+ prof = self.profiler()
+ try:
+ prof.runctx(statement, globals, locals)
+ except SystemExit:
+ pass
+ finally:
+ self._show(prof, filename, sort)
+
+ def _show(self, prof, filename, sort):
+ if filename is not None:
+ prof.dump_stats(filename)
+ else:
+ prof.print_stats(sort)
+
+
+#**************************************************************************
+# The following are the static member functions for the profiler class
+# Note that an instance of Profile() is *not* needed to call them.
+#**************************************************************************
+
+def run(statement, filename=None, sort=-1):
+ """Run statement under profiler optionally saving results in filename
+
+ This function takes a single argument that can be passed to the
+ "exec" statement, and an optional file name. In all cases this
+ routine attempts to "exec" its first argument and gather profiling
+ statistics from the execution. If no file name is present, then this
+ function automatically prints a simple profiling report, sorted by the
+ standard name string (file/line/function-name) that is presented in
+ each line.
+ """
+ return _Utils(Profile).run(statement, filename, sort)
+
+def runctx(statement, globals, locals, filename=None, sort=-1):
+ """Run statement under profiler, supplying your own globals and locals,
+ optionally saving results in filename.
+
+ statement and filename have the same semantics as profile.run
+ """
+ return _Utils(Profile).runctx(statement, globals, locals, filename, sort)
+
+
+class Profile:
+ """Profiler class.
+
+ self.cur is always a tuple. Each such tuple corresponds to a stack
+ frame that is currently active (self.cur[-2]). The following are the
+ definitions of its members. We use this external "parallel stack" to
+ avoid contaminating the program that we are profiling. (old profiler
+ used to write into the frames local dictionary!!) Derived classes
+ can change the definition of some entries, as long as they leave
+ [-2:] intact (frame and previous tuple). In case an internal error is
+ detected, the -3 element is used as the function name.
+
+ [ 0] = Time that needs to be charged to the parent frame's function.
+ It is used so that a function call will not have to access the
+ timing data for the parent frame.
+ [ 1] = Total time spent in this frame's function, excluding time in
+ subfunctions (this latter is tallied in cur[2]).
+ [ 2] = Total time spent in subfunctions, excluding time executing the
+ frame's function (this latter is tallied in cur[1]).
+ [-3] = Name of the function that corresponds to this frame.
+ [-2] = Actual frame that we correspond to (used to sync exception handling).
+ [-1] = Our parent 6-tuple (corresponds to frame.f_back).
+
+ Timing data for each function is stored as a 5-tuple in the dictionary
+ self.timings[]. The index is always the name stored in self.cur[-3].
+ The following are the definitions of the members:
+
+ [0] = The number of times this function was called, not counting direct
+ or indirect recursion,
+ [1] = Number of times this function appears on the stack, minus one
+ [2] = Total time spent internal to this function
+ [3] = Cumulative time that this function was present on the stack. In
+ non-recursive functions, this is the total execution time from start
+ to finish of each invocation of a function, including time spent in
+ all subfunctions.
+ [4] = A dictionary indicating for each function name, the number of times
+ it was called by us.
+ """
+
+ bias = 0 # calibration constant
+
+ def __init__(self, timer=None, bias=None):
+ self.timings = {}
+ self.cur = None
+ self.cmd = ""
+ self.c_func_name = ""
+
+ if bias is None:
+ bias = self.bias
+ self.bias = bias # Materialize in local dict for lookup speed.
+
+ if not timer:
+ self.timer = self.get_time = time.process_time
+ self.dispatcher = self.trace_dispatch_i
+ else:
+ self.timer = timer
+ t = self.timer() # test out timer function
+ try:
+ length = len(t)
+ except TypeError:
+ self.get_time = timer
+ self.dispatcher = self.trace_dispatch_i
+ else:
+ if length == 2:
+ self.dispatcher = self.trace_dispatch
+ else:
+ self.dispatcher = self.trace_dispatch_l
+ # This get_time() implementation needs to be defined
+ # here to capture the passed-in timer in the parameter
+ # list (for performance). Note that we can't assume
+ # the timer() result contains two values in all
+ # cases.
+ def get_time_timer(timer=timer, sum=sum):
+ return sum(timer())
+ self.get_time = get_time_timer
+ self.t = self.get_time()
+ self.simulate_call('profiler')
+
+ # Heavily optimized dispatch routine for time.process_time() timer
+
+ def trace_dispatch(self, frame, event, arg):
+ timer = self.timer
+ t = timer()
+ t = t[0] + t[1] - self.t - self.bias
+
+ if event == "c_call":
+ self.c_func_name = arg.__name__
+
+ if self.dispatch[event](self, frame,t):
+ t = timer()
+ self.t = t[0] + t[1]
+ else:
+ r = timer()
+ self.t = r[0] + r[1] - t # put back unrecorded delta
+
+ # Dispatch routine for best timer program (return = scalar, fastest if
+ # an integer but float works too -- and time.process_time() relies on that).
+
+ def trace_dispatch_i(self, frame, event, arg):
+ timer = self.timer
+ t = timer() - self.t - self.bias
+
+ if event == "c_call":
+ self.c_func_name = arg.__name__
+
+ if self.dispatch[event](self, frame, t):
+ self.t = timer()
+ else:
+ self.t = timer() - t # put back unrecorded delta
+
+ # Dispatch routine for macintosh (timer returns time in ticks of
+ # 1/60th second)
+
+ def trace_dispatch_mac(self, frame, event, arg):
+ timer = self.timer
+ t = timer()/60.0 - self.t - self.bias
+
+ if event == "c_call":
+ self.c_func_name = arg.__name__
+
+ if self.dispatch[event](self, frame, t):
+ self.t = timer()/60.0
+ else:
+ self.t = timer()/60.0 - t # put back unrecorded delta
+
+ # SLOW generic dispatch routine for timer returning lists of numbers
+
+ def trace_dispatch_l(self, frame, event, arg):
+ get_time = self.get_time
+ t = get_time() - self.t - self.bias
+
+ if event == "c_call":
+ self.c_func_name = arg.__name__
+
+ if self.dispatch[event](self, frame, t):
+ self.t = get_time()
+ else:
+ self.t = get_time() - t # put back unrecorded delta
+
+ # In the event handlers, the first 3 elements of self.cur are unpacked
+ # into vrbls w/ 3-letter names. The last two characters are meant to be
+ # mnemonic:
+ # _pt self.cur[0] "parent time" time to be charged to parent frame
+ # _it self.cur[1] "internal time" time spent directly in the function
+ # _et self.cur[2] "external time" time spent in subfunctions
+
+ def trace_dispatch_exception(self, frame, t):
+ rpt, rit, ret, rfn, rframe, rcur = self.cur
+ if (rframe is not frame) and rcur:
+ return self.trace_dispatch_return(rframe, t)
+ self.cur = rpt, rit+t, ret, rfn, rframe, rcur
+ return 1
+
+
+ def trace_dispatch_call(self, frame, t):
+ if self.cur and frame.f_back is not self.cur[-2]:
+ rpt, rit, ret, rfn, rframe, rcur = self.cur
+ if not isinstance(rframe, Profile.fake_frame):
+ assert rframe.f_back is frame.f_back, ("Bad call", rfn,
+ rframe, rframe.f_back,
+ frame, frame.f_back)
+ self.trace_dispatch_return(rframe, 0)
+ assert (self.cur is None or \
+ frame.f_back is self.cur[-2]), ("Bad call",
+ self.cur[-3])
+ fcode = frame.f_code
+ fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name)
+ self.cur = (t, 0, 0, fn, frame, self.cur)
+ timings = self.timings
+ if fn in timings:
+ cc, ns, tt, ct, callers = timings[fn]
+ timings[fn] = cc, ns + 1, tt, ct, callers
+ else:
+ timings[fn] = 0, 0, 0, 0, {}
+ return 1
+
+ def trace_dispatch_c_call (self, frame, t):
+ fn = ("", 0, self.c_func_name)
+ self.cur = (t, 0, 0, fn, frame, self.cur)
+ timings = self.timings
+ if fn in timings:
+ cc, ns, tt, ct, callers = timings[fn]
+ timings[fn] = cc, ns+1, tt, ct, callers
+ else:
+ timings[fn] = 0, 0, 0, 0, {}
+ return 1
+
+ def trace_dispatch_return(self, frame, t):
+ if frame is not self.cur[-2]:
+ assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3])
+ self.trace_dispatch_return(self.cur[-2], 0)
+
+ # Prefix "r" means part of the Returning or exiting frame.
+ # Prefix "p" means part of the Previous or Parent or older frame.
+
+ rpt, rit, ret, rfn, frame, rcur = self.cur
+ rit = rit + t
+ frame_total = rit + ret
+
+ ppt, pit, pet, pfn, pframe, pcur = rcur
+ self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur
+
+ timings = self.timings
+ cc, ns, tt, ct, callers = timings[rfn]
+ if not ns:
+ # This is the only occurrence of the function on the stack.
+ # Else this is a (directly or indirectly) recursive call, and
+ # its cumulative time will get updated when the topmost call to
+ # it returns.
+ ct = ct + frame_total
+ cc = cc + 1
+
+ if pfn in callers:
+ callers[pfn] = callers[pfn] + 1 # hack: gather more
+ # stats such as the amount of time added to ct courtesy
+ # of this specific call, and the contribution to cc
+ # courtesy of this call.
+ else:
+ callers[pfn] = 1
+
+ timings[rfn] = cc, ns - 1, tt + rit, ct, callers
+
+ return 1
+
+
+ dispatch = {
+ "call": trace_dispatch_call,
+ "exception": trace_dispatch_exception,
+ "return": trace_dispatch_return,
+ "c_call": trace_dispatch_c_call,
+ "c_exception": trace_dispatch_return, # the C function returned
+ "c_return": trace_dispatch_return,
+ }
+
+
+ # The next few functions play with self.cmd. By carefully preloading
+ # our parallel stack, we can force the profiled result to include
+ # an arbitrary string as the name of the calling function.
+ # We use self.cmd as that string, and the resulting stats look
+ # very nice :-).
+
+ def set_cmd(self, cmd):
+ if self.cur[-1]: return # already set
+ self.cmd = cmd
+ self.simulate_call(cmd)
+
+ class fake_code:
+ def __init__(self, filename, line, name):
+ self.co_filename = filename
+ self.co_line = line
+ self.co_name = name
+ self.co_firstlineno = 0
+
+ def __repr__(self):
+ return repr((self.co_filename, self.co_line, self.co_name))
+
+ class fake_frame:
+ def __init__(self, code, prior):
+ self.f_code = code
+ self.f_back = prior
+
+ def simulate_call(self, name):
+ code = self.fake_code('profile', 0, name)
+ if self.cur:
+ pframe = self.cur[-2]
+ else:
+ pframe = None
+ frame = self.fake_frame(code, pframe)
+ self.dispatch['call'](self, frame, 0)
+
+ # collect stats from pending stack, including getting final
+ # timings for self.cmd frame.
+
+ def simulate_cmd_complete(self):
+ get_time = self.get_time
+ t = get_time() - self.t
+ while self.cur[-1]:
+ # We *can* cause assertion errors here if
+ # dispatch_trace_return checks for a frame match!
+ self.dispatch['return'](self, self.cur[-2], t)
+ t = 0
+ self.t = get_time() - t
+
+
+ def print_stats(self, sort=-1):
+ import pstats
+ pstats.Stats(self).strip_dirs().sort_stats(sort). \
+ print_stats()
+
+ def dump_stats(self, file):
+ with open(file, 'wb') as f:
+ self.create_stats()
+ marshal.dump(self.stats, f)
+
+ def create_stats(self):
+ self.simulate_cmd_complete()
+ self.snapshot_stats()
+
+ def snapshot_stats(self):
+ self.stats = {}
+ for func, (cc, ns, tt, ct, callers) in self.timings.items():
+ callers = callers.copy()
+ nc = 0
+ for callcnt in callers.values():
+ nc += callcnt
+ self.stats[func] = cc, nc, tt, ct, callers
+
+
+ # The following two methods can be called by clients to use
+ # a profiler to profile a statement, given as a string.
+
+ def run(self, cmd):
+ import __main__
+ dict = __main__.__dict__
+ return self.runctx(cmd, dict, dict)
+
+ def runctx(self, cmd, globals, locals):
+ self.set_cmd(cmd)
+ sys.setprofile(self.dispatcher)
+ try:
+ exec(cmd, globals, locals)
+ finally:
+ sys.setprofile(None)
+ return self
+
+ # This method is more useful to profile a single function call.
+ def runcall(self, func, /, *args, **kw):
+ self.set_cmd(repr(func))
+ sys.setprofile(self.dispatcher)
+ try:
+ return func(*args, **kw)
+ finally:
+ sys.setprofile(None)
+
+
+ #******************************************************************
+ # The following calculates the overhead for using a profiler. The
+ # problem is that it takes a fair amount of time for the profiler
+ # to stop the stopwatch (from the time it receives an event).
+ # Similarly, there is a delay from the time that the profiler
+ # re-starts the stopwatch before the user's code really gets to
+ # continue. The following code tries to measure the difference on
+ # a per-event basis.
+ #
+ # Note that this difference is only significant if there are a lot of
+ # events, and relatively little user code per event. For example,
+ # code with small functions will typically benefit from having the
+ # profiler calibrated for the current platform. This *could* be
+ # done on the fly during init() time, but it is not worth the
+ # effort. Also note that if too large a value specified, then
+ # execution time on some functions will actually appear as a
+ # negative number. It is *normal* for some functions (with very
+ # low call counts) to have such negative stats, even if the
+ # calibration figure is "correct."
+ #
+ # One alternative to profile-time calibration adjustments (i.e.,
+ # adding in the magic little delta during each event) is to track
+ # more carefully the number of events (and cumulatively, the number
+ # of events during sub functions) that are seen. If this were
+ # done, then the arithmetic could be done after the fact (i.e., at
+ # display time). Currently, we track only call/return events.
+ # These values can be deduced by examining the callees and callers
+ # vectors for each functions. Hence we *can* almost correct the
+ # internal time figure at print time (note that we currently don't
+ # track exception event processing counts). Unfortunately, there
+ # is currently no similar information for cumulative sub-function
+ # time. It would not be hard to "get all this info" at profiler
+ # time. Specifically, we would have to extend the tuples to keep
+ # counts of this in each frame, and then extend the defs of timing
+ # tuples to include the significant two figures. I'm a bit fearful
+ # that this additional feature will slow the heavily optimized
+ # event/time ratio (i.e., the profiler would run slower, fur a very
+ # low "value added" feature.)
+ #**************************************************************
+
+ def calibrate(self, m, verbose=0):
+ if self.__class__ is not Profile:
+ raise TypeError("Subclasses must override .calibrate().")
+
+ saved_bias = self.bias
+ self.bias = 0
+ try:
+ return self._calibrate_inner(m, verbose)
+ finally:
+ self.bias = saved_bias
+
+ def _calibrate_inner(self, m, verbose):
+ get_time = self.get_time
+
+ # Set up a test case to be run with and without profiling. Include
+ # lots of calls, because we're trying to quantify stopwatch overhead.
+ # Do not raise any exceptions, though, because we want to know
+ # exactly how many profile events are generated (one call event, +
+ # one return event, per Python-level call).
+
+ def f1(n):
+ for i in range(n):
+ x = 1
+
+ def f(m, f1=f1):
+ for i in range(m):
+ f1(100)
+
+ f(m) # warm up the cache
+
+ # elapsed_noprofile <- time f(m) takes without profiling.
+ t0 = get_time()
+ f(m)
+ t1 = get_time()
+ elapsed_noprofile = t1 - t0
+ if verbose:
+ print("elapsed time without profiling =", elapsed_noprofile)
+
+ # elapsed_profile <- time f(m) takes with profiling. The difference
+ # is profiling overhead, only some of which the profiler subtracts
+ # out on its own.
+ p = Profile()
+ t0 = get_time()
+ p.runctx('f(m)', globals(), locals())
+ t1 = get_time()
+ elapsed_profile = t1 - t0
+ if verbose:
+ print("elapsed time with profiling =", elapsed_profile)
+
+ # reported_time <- "CPU seconds" the profiler charged to f and f1.
+ total_calls = 0.0
+ reported_time = 0.0
+ for (filename, line, funcname), (cc, ns, tt, ct, callers) in \
+ p.timings.items():
+ if funcname in ("f", "f1"):
+ total_calls += cc
+ reported_time += tt
+
+ if verbose:
+ print("'CPU seconds' profiler reported =", reported_time)
+ print("total # calls =", total_calls)
+ if total_calls != m + 1:
+ raise ValueError("internal error: total calls = %d" % total_calls)
+
+ # reported_time - elapsed_noprofile = overhead the profiler wasn't
+ # able to measure. Divide by twice the number of calls (since there
+ # are two profiler events per call in this test) to get the hidden
+ # overhead per event.
+ mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls
+ if verbose:
+ print("mean stopwatch overhead per profile event =", mean)
+ return mean
+
+#****************************************************************************
+
+def main():
+ import os
+ from optparse import OptionParser
+
+ usage = "profile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..."
+ parser = OptionParser(usage=usage)
+ parser.allow_interspersed_args = False
+ parser.add_option('-o', '--outfile', dest="outfile",
+ help="Save stats to ", default=None)
+ parser.add_option('-m', dest="module", action="store_true",
+ help="Profile a library module.", default=False)
+ parser.add_option('-s', '--sort', dest="sort",
+ help="Sort order when printing to stdout, based on pstats.Stats class",
+ default=-1)
+
+ if not sys.argv[1:]:
+ parser.print_usage()
+ sys.exit(2)
+
+ (options, args) = parser.parse_args()
+ sys.argv[:] = args
+
+ # The script that we're profiling may chdir, so capture the absolute path
+ # to the output file at startup.
+ if options.outfile is not None:
+ options.outfile = os.path.abspath(options.outfile)
+
+ if len(args) > 0:
+ if options.module:
+ import runpy
+ code = "run_module(modname, run_name='__main__')"
+ globs = {
+ 'run_module': runpy.run_module,
+ 'modname': args[0]
+ }
+ else:
+ progname = args[0]
+ sys.path.insert(0, os.path.dirname(progname))
+ with io.open_code(progname) as fp:
+ code = compile(fp.read(), progname, 'exec')
+ globs = {
+ '__file__': progname,
+ '__name__': '__main__',
+ '__package__': None,
+ '__cached__': None,
+ }
+ try:
+ runctx(code, globs, None, options.outfile, options.sort)
+ except BrokenPipeError as exc:
+ # Prevent "Exception ignored" during interpreter shutdown.
+ sys.stdout = None
+ sys.exit(exc.errno)
+ else:
+ parser.print_usage()
+ return parser
+
+# When invoked as main program, invoke the profiler on a script
+if __name__ == '__main__':
+ main()
diff --git a/llava/lib/python3.10/pty.py b/llava/lib/python3.10/pty.py
new file mode 100644
index 0000000000000000000000000000000000000000..8d8ce40df541c1872b52ae6ea069e0e975bd8a6b
--- /dev/null
+++ b/llava/lib/python3.10/pty.py
@@ -0,0 +1,187 @@
+"""Pseudo terminal utilities."""
+
+# Bugs: No signal handling. Doesn't set slave termios and window size.
+# Only tested on Linux, FreeBSD, and macOS.
+# See: W. Richard Stevens. 1992. Advanced Programming in the
+# UNIX Environment. Chapter 19.
+# Author: Steen Lumholt -- with additions by Guido.
+
+from select import select
+import os
+import sys
+import tty
+
+# names imported directly for test mocking purposes
+from os import close, waitpid
+from tty import setraw, tcgetattr, tcsetattr
+
+__all__ = ["openpty", "fork", "spawn"]
+
+STDIN_FILENO = 0
+STDOUT_FILENO = 1
+STDERR_FILENO = 2
+
+CHILD = 0
+
+def openpty():
+ """openpty() -> (master_fd, slave_fd)
+ Open a pty master/slave pair, using os.openpty() if possible."""
+
+ try:
+ return os.openpty()
+ except (AttributeError, OSError):
+ pass
+ master_fd, slave_name = _open_terminal()
+ slave_fd = slave_open(slave_name)
+ return master_fd, slave_fd
+
+def master_open():
+ """master_open() -> (master_fd, slave_name)
+ Open a pty master and return the fd, and the filename of the slave end.
+ Deprecated, use openpty() instead."""
+
+ try:
+ master_fd, slave_fd = os.openpty()
+ except (AttributeError, OSError):
+ pass
+ else:
+ slave_name = os.ttyname(slave_fd)
+ os.close(slave_fd)
+ return master_fd, slave_name
+
+ return _open_terminal()
+
+def _open_terminal():
+ """Open pty master and return (master_fd, tty_name)."""
+ for x in 'pqrstuvwxyzPQRST':
+ for y in '0123456789abcdef':
+ pty_name = '/dev/pty' + x + y
+ try:
+ fd = os.open(pty_name, os.O_RDWR)
+ except OSError:
+ continue
+ return (fd, '/dev/tty' + x + y)
+ raise OSError('out of pty devices')
+
+def slave_open(tty_name):
+ """slave_open(tty_name) -> slave_fd
+ Open the pty slave and acquire the controlling terminal, returning
+ opened filedescriptor.
+ Deprecated, use openpty() instead."""
+
+ result = os.open(tty_name, os.O_RDWR)
+ try:
+ from fcntl import ioctl, I_PUSH
+ except ImportError:
+ return result
+ try:
+ ioctl(result, I_PUSH, "ptem")
+ ioctl(result, I_PUSH, "ldterm")
+ except OSError:
+ pass
+ return result
+
+def fork():
+ """fork() -> (pid, master_fd)
+ Fork and make the child a session leader with a controlling terminal."""
+
+ try:
+ pid, fd = os.forkpty()
+ except (AttributeError, OSError):
+ pass
+ else:
+ if pid == CHILD:
+ try:
+ os.setsid()
+ except OSError:
+ # os.forkpty() already set us session leader
+ pass
+ return pid, fd
+
+ master_fd, slave_fd = openpty()
+ pid = os.fork()
+ if pid == CHILD:
+ # Establish a new session.
+ os.setsid()
+ os.close(master_fd)
+
+ # Slave becomes stdin/stdout/stderr of child.
+ os.dup2(slave_fd, STDIN_FILENO)
+ os.dup2(slave_fd, STDOUT_FILENO)
+ os.dup2(slave_fd, STDERR_FILENO)
+ if slave_fd > STDERR_FILENO:
+ os.close(slave_fd)
+
+ # Explicitly open the tty to make it become a controlling tty.
+ tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR)
+ os.close(tmp_fd)
+ else:
+ os.close(slave_fd)
+
+ # Parent and child process.
+ return pid, master_fd
+
+def _writen(fd, data):
+ """Write all the data to a descriptor."""
+ while data:
+ n = os.write(fd, data)
+ data = data[n:]
+
+def _read(fd):
+ """Default read function."""
+ return os.read(fd, 1024)
+
+def _copy(master_fd, master_read=_read, stdin_read=_read):
+ """Parent copy loop.
+ Copies
+ pty master -> standard output (master_read)
+ standard input -> pty master (stdin_read)"""
+ fds = [master_fd, STDIN_FILENO]
+ while fds:
+ rfds, _wfds, _xfds = select(fds, [], [])
+
+ if master_fd in rfds:
+ # Some OSes signal EOF by returning an empty byte string,
+ # some throw OSErrors.
+ try:
+ data = master_read(master_fd)
+ except OSError:
+ data = b""
+ if not data: # Reached EOF.
+ return # Assume the child process has exited and is
+ # unreachable, so we clean up.
+ else:
+ os.write(STDOUT_FILENO, data)
+
+ if STDIN_FILENO in rfds:
+ data = stdin_read(STDIN_FILENO)
+ if not data:
+ fds.remove(STDIN_FILENO)
+ else:
+ _writen(master_fd, data)
+
+def spawn(argv, master_read=_read, stdin_read=_read):
+ """Create a spawned process."""
+ if type(argv) == type(''):
+ argv = (argv,)
+ sys.audit('pty.spawn', argv)
+
+ pid, master_fd = fork()
+ if pid == CHILD:
+ os.execlp(argv[0], *argv)
+
+ try:
+ mode = tcgetattr(STDIN_FILENO)
+ setraw(STDIN_FILENO)
+ restore = True
+ except tty.error: # This is the same as termios.error
+ restore = False
+
+ try:
+ _copy(master_fd, master_read, stdin_read)
+ finally:
+ if restore:
+ tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode)
+
+ close(master_fd)
+ return waitpid(pid, 0)[1]
diff --git a/llava/lib/python3.10/runpy.py b/llava/lib/python3.10/runpy.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7d3d8caad1611ed52f1be8d517ad2ac906f04db
--- /dev/null
+++ b/llava/lib/python3.10/runpy.py
@@ -0,0 +1,321 @@
+"""runpy.py - locating and running Python code using the module namespace
+
+Provides support for locating and running Python scripts using the Python
+module namespace instead of the native filesystem.
+
+This allows Python code to play nicely with non-filesystem based PEP 302
+importers when locating support scripts as well as when importing modules.
+"""
+# Written by Nick Coghlan
+# to implement PEP 338 (Executing Modules as Scripts)
+
+
+import sys
+import importlib.machinery # importlib first so we can test #15386 via -m
+import importlib.util
+import io
+import types
+import os
+
+__all__ = [
+ "run_module", "run_path",
+]
+
+class _TempModule(object):
+ """Temporarily replace a module in sys.modules with an empty namespace"""
+ def __init__(self, mod_name):
+ self.mod_name = mod_name
+ self.module = types.ModuleType(mod_name)
+ self._saved_module = []
+
+ def __enter__(self):
+ mod_name = self.mod_name
+ try:
+ self._saved_module.append(sys.modules[mod_name])
+ except KeyError:
+ pass
+ sys.modules[mod_name] = self.module
+ return self
+
+ def __exit__(self, *args):
+ if self._saved_module:
+ sys.modules[self.mod_name] = self._saved_module[0]
+ else:
+ del sys.modules[self.mod_name]
+ self._saved_module = []
+
+class _ModifiedArgv0(object):
+ def __init__(self, value):
+ self.value = value
+ self._saved_value = self._sentinel = object()
+
+ def __enter__(self):
+ if self._saved_value is not self._sentinel:
+ raise RuntimeError("Already preserving saved value")
+ self._saved_value = sys.argv[0]
+ sys.argv[0] = self.value
+
+ def __exit__(self, *args):
+ self.value = self._sentinel
+ sys.argv[0] = self._saved_value
+
+# TODO: Replace these helpers with importlib._bootstrap_external functions.
+def _run_code(code, run_globals, init_globals=None,
+ mod_name=None, mod_spec=None,
+ pkg_name=None, script_name=None):
+ """Helper to run code in nominated namespace"""
+ if init_globals is not None:
+ run_globals.update(init_globals)
+ if mod_spec is None:
+ loader = None
+ fname = script_name
+ cached = None
+ else:
+ loader = mod_spec.loader
+ fname = mod_spec.origin
+ cached = mod_spec.cached
+ if pkg_name is None:
+ pkg_name = mod_spec.parent
+ run_globals.update(__name__ = mod_name,
+ __file__ = fname,
+ __cached__ = cached,
+ __doc__ = None,
+ __loader__ = loader,
+ __package__ = pkg_name,
+ __spec__ = mod_spec)
+ exec(code, run_globals)
+ return run_globals
+
+def _run_module_code(code, init_globals=None,
+ mod_name=None, mod_spec=None,
+ pkg_name=None, script_name=None):
+ """Helper to run code in new namespace with sys modified"""
+ fname = script_name if mod_spec is None else mod_spec.origin
+ with _TempModule(mod_name) as temp_module, _ModifiedArgv0(fname):
+ mod_globals = temp_module.module.__dict__
+ _run_code(code, mod_globals, init_globals,
+ mod_name, mod_spec, pkg_name, script_name)
+ # Copy the globals of the temporary module, as they
+ # may be cleared when the temporary module goes away
+ return mod_globals.copy()
+
+# Helper to get the full name, spec and code for a module
+def _get_module_details(mod_name, error=ImportError):
+ if mod_name.startswith("."):
+ raise error("Relative module names not supported")
+ pkg_name, _, _ = mod_name.rpartition(".")
+ if pkg_name:
+ # Try importing the parent to avoid catching initialization errors
+ try:
+ __import__(pkg_name)
+ except ImportError as e:
+ # If the parent or higher ancestor package is missing, let the
+ # error be raised by find_spec() below and then be caught. But do
+ # not allow other errors to be caught.
+ if e.name is None or (e.name != pkg_name and
+ not pkg_name.startswith(e.name + ".")):
+ raise
+ # Warn if the module has already been imported under its normal name
+ existing = sys.modules.get(mod_name)
+ if existing is not None and not hasattr(existing, "__path__"):
+ from warnings import warn
+ msg = "{mod_name!r} found in sys.modules after import of " \
+ "package {pkg_name!r}, but prior to execution of " \
+ "{mod_name!r}; this may result in unpredictable " \
+ "behaviour".format(mod_name=mod_name, pkg_name=pkg_name)
+ warn(RuntimeWarning(msg))
+
+ try:
+ spec = importlib.util.find_spec(mod_name)
+ except (ImportError, AttributeError, TypeError, ValueError) as ex:
+ # This hack fixes an impedance mismatch between pkgutil and
+ # importlib, where the latter raises other errors for cases where
+ # pkgutil previously raised ImportError
+ msg = "Error while finding module specification for {!r} ({}: {})"
+ if mod_name.endswith(".py"):
+ msg += (f". Try using '{mod_name[:-3]}' instead of "
+ f"'{mod_name}' as the module name.")
+ raise error(msg.format(mod_name, type(ex).__name__, ex)) from ex
+ if spec is None:
+ raise error("No module named %s" % mod_name)
+ if spec.submodule_search_locations is not None:
+ if mod_name == "__main__" or mod_name.endswith(".__main__"):
+ raise error("Cannot use package as __main__ module")
+ try:
+ pkg_main_name = mod_name + ".__main__"
+ return _get_module_details(pkg_main_name, error)
+ except error as e:
+ if mod_name not in sys.modules:
+ raise # No module loaded; being a package is irrelevant
+ raise error(("%s; %r is a package and cannot " +
+ "be directly executed") %(e, mod_name))
+ loader = spec.loader
+ if loader is None:
+ raise error("%r is a namespace package and cannot be executed"
+ % mod_name)
+ try:
+ code = loader.get_code(mod_name)
+ except ImportError as e:
+ raise error(format(e)) from e
+ if code is None:
+ raise error("No code object available for %s" % mod_name)
+ return mod_name, spec, code
+
+class _Error(Exception):
+ """Error that _run_module_as_main() should report without a traceback"""
+
+# XXX ncoghlan: Should this be documented and made public?
+# (Current thoughts: don't repeat the mistake that lead to its
+# creation when run_module() no longer met the needs of
+# mainmodule.c, but couldn't be changed because it was public)
+def _run_module_as_main(mod_name, alter_argv=True):
+ """Runs the designated module in the __main__ namespace
+
+ Note that the executed module will have full access to the
+ __main__ namespace. If this is not desirable, the run_module()
+ function should be used to run the module code in a fresh namespace.
+
+ At the very least, these variables in __main__ will be overwritten:
+ __name__
+ __file__
+ __cached__
+ __loader__
+ __package__
+ """
+ try:
+ if alter_argv or mod_name != "__main__": # i.e. -m switch
+ mod_name, mod_spec, code = _get_module_details(mod_name, _Error)
+ else: # i.e. directory or zipfile execution
+ mod_name, mod_spec, code = _get_main_module_details(_Error)
+ except _Error as exc:
+ msg = "%s: %s" % (sys.executable, exc)
+ sys.exit(msg)
+ main_globals = sys.modules["__main__"].__dict__
+ if alter_argv:
+ sys.argv[0] = mod_spec.origin
+ return _run_code(code, main_globals, None,
+ "__main__", mod_spec)
+
+def run_module(mod_name, init_globals=None,
+ run_name=None, alter_sys=False):
+ """Execute a module's code without importing it.
+
+ mod_name -- an absolute module name or package name.
+
+ Optional arguments:
+ init_globals -- dictionary used to pre-populate the module’s
+ globals dictionary before the code is executed.
+
+ run_name -- if not None, this will be used for setting __name__;
+ otherwise, __name__ will be set to mod_name + '__main__' if the
+ named module is a package and to just mod_name otherwise.
+
+ alter_sys -- if True, sys.argv[0] is updated with the value of
+ __file__ and sys.modules[__name__] is updated with a temporary
+ module object for the module being executed. Both are
+ restored to their original values before the function returns.
+
+ Returns the resulting module globals dictionary.
+ """
+ mod_name, mod_spec, code = _get_module_details(mod_name)
+ if run_name is None:
+ run_name = mod_name
+ if alter_sys:
+ return _run_module_code(code, init_globals, run_name, mod_spec)
+ else:
+ # Leave the sys module alone
+ return _run_code(code, {}, init_globals, run_name, mod_spec)
+
+def _get_main_module_details(error=ImportError):
+ # Helper that gives a nicer error message when attempting to
+ # execute a zipfile or directory by invoking __main__.py
+ # Also moves the standard __main__ out of the way so that the
+ # preexisting __loader__ entry doesn't cause issues
+ main_name = "__main__"
+ saved_main = sys.modules[main_name]
+ del sys.modules[main_name]
+ try:
+ return _get_module_details(main_name)
+ except ImportError as exc:
+ if main_name in str(exc):
+ raise error("can't find %r module in %r" %
+ (main_name, sys.path[0])) from exc
+ raise
+ finally:
+ sys.modules[main_name] = saved_main
+
+
+def _get_code_from_file(run_name, fname):
+ # Check for a compiled file first
+ from pkgutil import read_code
+ decoded_path = os.path.abspath(os.fsdecode(fname))
+ with io.open_code(decoded_path) as f:
+ code = read_code(f)
+ if code is None:
+ # That didn't work, so try it as normal source code
+ with io.open_code(decoded_path) as f:
+ code = compile(f.read(), fname, 'exec')
+ return code, fname
+
+def run_path(path_name, init_globals=None, run_name=None):
+ """Execute code located at the specified filesystem location.
+
+ path_name -- filesystem location of a Python script, zipfile,
+ or directory containing a top level __main__.py script.
+
+ Optional arguments:
+ init_globals -- dictionary used to pre-populate the module’s
+ globals dictionary before the code is executed.
+
+ run_name -- if not None, this will be used to set __name__;
+ otherwise, '' will be used for __name__.
+
+ Returns the resulting module globals dictionary.
+ """
+ if run_name is None:
+ run_name = ""
+ pkg_name = run_name.rpartition(".")[0]
+ from pkgutil import get_importer
+ importer = get_importer(path_name)
+ # Trying to avoid importing imp so as to not consume the deprecation warning.
+ is_NullImporter = False
+ if type(importer).__module__ == 'imp':
+ if type(importer).__name__ == 'NullImporter':
+ is_NullImporter = True
+ if isinstance(importer, type(None)) or is_NullImporter:
+ # Not a valid sys.path entry, so run the code directly
+ # execfile() doesn't help as we want to allow compiled files
+ code, fname = _get_code_from_file(run_name, path_name)
+ return _run_module_code(code, init_globals, run_name,
+ pkg_name=pkg_name, script_name=fname)
+ else:
+ # Finder is defined for path, so add it to
+ # the start of sys.path
+ sys.path.insert(0, path_name)
+ try:
+ # Here's where things are a little different from the run_module
+ # case. There, we only had to replace the module in sys while the
+ # code was running and doing so was somewhat optional. Here, we
+ # have no choice and we have to remove it even while we read the
+ # code. If we don't do this, a __loader__ attribute in the
+ # existing __main__ module may prevent location of the new module.
+ mod_name, mod_spec, code = _get_main_module_details()
+ with _TempModule(run_name) as temp_module, \
+ _ModifiedArgv0(path_name):
+ mod_globals = temp_module.module.__dict__
+ return _run_code(code, mod_globals, init_globals,
+ run_name, mod_spec, pkg_name).copy()
+ finally:
+ try:
+ sys.path.remove(path_name)
+ except ValueError:
+ pass
+
+
+if __name__ == "__main__":
+ # Run the module specified as the next command line argument
+ if len(sys.argv) < 2:
+ print("No module specified for execution", file=sys.stderr)
+ else:
+ del sys.argv[0] # Make the requested module sys.argv[0]
+ _run_module_as_main(sys.argv[0])
diff --git a/llava/lib/python3.10/sndhdr.py b/llava/lib/python3.10/sndhdr.py
new file mode 100644
index 0000000000000000000000000000000000000000..96595c6974468213e0a93414af95f4981bb609c5
--- /dev/null
+++ b/llava/lib/python3.10/sndhdr.py
@@ -0,0 +1,257 @@
+"""Routines to help recognizing sound files.
+
+Function whathdr() recognizes various types of sound file headers.
+It understands almost all headers that SOX can decode.
+
+The return tuple contains the following items, in this order:
+- file type (as SOX understands it)
+- sampling rate (0 if unknown or hard to decode)
+- number of channels (0 if unknown or hard to decode)
+- number of frames in the file (-1 if unknown or hard to decode)
+- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW
+
+If the file doesn't have a recognizable type, it returns None.
+If the file can't be opened, OSError is raised.
+
+To compute the total time, divide the number of frames by the
+sampling rate (a frame contains a sample for each channel).
+
+Function what() calls whathdr(). (It used to also use some
+heuristics for raw data, but this doesn't work very well.)
+
+Finally, the function test() is a simple main program that calls
+what() for all files mentioned on the argument list. For directory
+arguments it calls what() for all files in that directory. Default
+argument is "." (testing all files in the current directory). The
+option -r tells it to recurse down directories found inside
+explicitly given directories.
+"""
+
+# The file structure is top-down except that the test program and its
+# subroutine come last.
+
+__all__ = ['what', 'whathdr']
+
+from collections import namedtuple
+
+SndHeaders = namedtuple('SndHeaders',
+ 'filetype framerate nchannels nframes sampwidth')
+
+SndHeaders.filetype.__doc__ = ("""The value for type indicates the data type
+and will be one of the strings 'aifc', 'aiff', 'au','hcom',
+'sndr', 'sndt', 'voc', 'wav', '8svx', 'sb', 'ub', or 'ul'.""")
+SndHeaders.framerate.__doc__ = ("""The sampling_rate will be either the actual
+value or 0 if unknown or difficult to decode.""")
+SndHeaders.nchannels.__doc__ = ("""The number of channels or 0 if it cannot be
+determined or if the value is difficult to decode.""")
+SndHeaders.nframes.__doc__ = ("""The value for frames will be either the number
+of frames or -1.""")
+SndHeaders.sampwidth.__doc__ = ("""Either the sample size in bits or
+'A' for A-LAW or 'U' for u-LAW.""")
+
+def what(filename):
+ """Guess the type of a sound file."""
+ res = whathdr(filename)
+ return res
+
+
+def whathdr(filename):
+ """Recognize sound headers."""
+ with open(filename, 'rb') as f:
+ h = f.read(512)
+ for tf in tests:
+ res = tf(h, f)
+ if res:
+ return SndHeaders(*res)
+ return None
+
+
+#-----------------------------------#
+# Subroutines per sound header type #
+#-----------------------------------#
+
+tests = []
+
+def test_aifc(h, f):
+ import aifc
+ if not h.startswith(b'FORM'):
+ return None
+ if h[8:12] == b'AIFC':
+ fmt = 'aifc'
+ elif h[8:12] == b'AIFF':
+ fmt = 'aiff'
+ else:
+ return None
+ f.seek(0)
+ try:
+ a = aifc.open(f, 'r')
+ except (EOFError, aifc.Error):
+ return None
+ return (fmt, a.getframerate(), a.getnchannels(),
+ a.getnframes(), 8 * a.getsampwidth())
+
+tests.append(test_aifc)
+
+
+def test_au(h, f):
+ if h.startswith(b'.snd'):
+ func = get_long_be
+ elif h[:4] in (b'\0ds.', b'dns.'):
+ func = get_long_le
+ else:
+ return None
+ filetype = 'au'
+ hdr_size = func(h[4:8])
+ data_size = func(h[8:12])
+ encoding = func(h[12:16])
+ rate = func(h[16:20])
+ nchannels = func(h[20:24])
+ sample_size = 1 # default
+ if encoding == 1:
+ sample_bits = 'U'
+ elif encoding == 2:
+ sample_bits = 8
+ elif encoding == 3:
+ sample_bits = 16
+ sample_size = 2
+ else:
+ sample_bits = '?'
+ frame_size = sample_size * nchannels
+ if frame_size:
+ nframe = data_size / frame_size
+ else:
+ nframe = -1
+ return filetype, rate, nchannels, nframe, sample_bits
+
+tests.append(test_au)
+
+
+def test_hcom(h, f):
+ if h[65:69] != b'FSSD' or h[128:132] != b'HCOM':
+ return None
+ divisor = get_long_be(h[144:148])
+ if divisor:
+ rate = 22050 / divisor
+ else:
+ rate = 0
+ return 'hcom', rate, 1, -1, 8
+
+tests.append(test_hcom)
+
+
+def test_voc(h, f):
+ if not h.startswith(b'Creative Voice File\032'):
+ return None
+ sbseek = get_short_le(h[20:22])
+ rate = 0
+ if 0 <= sbseek < 500 and h[sbseek] == 1:
+ ratecode = 256 - h[sbseek+4]
+ if ratecode:
+ rate = int(1000000.0 / ratecode)
+ return 'voc', rate, 1, -1, 8
+
+tests.append(test_voc)
+
+
+def test_wav(h, f):
+ import wave
+ # 'RIFF' 'WAVE' 'fmt '
+ if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ':
+ return None
+ f.seek(0)
+ try:
+ w = wave.open(f, 'r')
+ except (EOFError, wave.Error):
+ return None
+ return ('wav', w.getframerate(), w.getnchannels(),
+ w.getnframes(), 8*w.getsampwidth())
+
+tests.append(test_wav)
+
+
+def test_8svx(h, f):
+ if not h.startswith(b'FORM') or h[8:12] != b'8SVX':
+ return None
+ # Should decode it to get #channels -- assume always 1
+ return '8svx', 0, 1, 0, 8
+
+tests.append(test_8svx)
+
+
+def test_sndt(h, f):
+ if h.startswith(b'SOUND'):
+ nsamples = get_long_le(h[8:12])
+ rate = get_short_le(h[20:22])
+ return 'sndt', rate, 1, nsamples, 8
+
+tests.append(test_sndt)
+
+
+def test_sndr(h, f):
+ if h.startswith(b'\0\0'):
+ rate = get_short_le(h[2:4])
+ if 4000 <= rate <= 25000:
+ return 'sndr', rate, 1, -1, 8
+
+tests.append(test_sndr)
+
+
+#-------------------------------------------#
+# Subroutines to extract numbers from bytes #
+#-------------------------------------------#
+
+def get_long_be(b):
+ return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3]
+
+def get_long_le(b):
+ return (b[3] << 24) | (b[2] << 16) | (b[1] << 8) | b[0]
+
+def get_short_be(b):
+ return (b[0] << 8) | b[1]
+
+def get_short_le(b):
+ return (b[1] << 8) | b[0]
+
+
+#--------------------#
+# Small test program #
+#--------------------#
+
+def test():
+ import sys
+ recursive = 0
+ if sys.argv[1:] and sys.argv[1] == '-r':
+ del sys.argv[1:2]
+ recursive = 1
+ try:
+ if sys.argv[1:]:
+ testall(sys.argv[1:], recursive, 1)
+ else:
+ testall(['.'], recursive, 1)
+ except KeyboardInterrupt:
+ sys.stderr.write('\n[Interrupted]\n')
+ sys.exit(1)
+
+def testall(list, recursive, toplevel):
+ import sys
+ import os
+ for filename in list:
+ if os.path.isdir(filename):
+ print(filename + '/:', end=' ')
+ if recursive or toplevel:
+ print('recursing down:')
+ import glob
+ names = glob.glob(os.path.join(glob.escape(filename), '*'))
+ testall(names, recursive, 0)
+ else:
+ print('*** directory (use -r) ***')
+ else:
+ print(filename + ':', end=' ')
+ sys.stdout.flush()
+ try:
+ print(what(filename))
+ except OSError:
+ print('*** not found ***')
+
+if __name__ == '__main__':
+ test()
diff --git a/llava/lib/python3.10/socket.py b/llava/lib/python3.10/socket.py
new file mode 100644
index 0000000000000000000000000000000000000000..ecaf73cf307cfa65aeca8d0f9e516205814b54d2
--- /dev/null
+++ b/llava/lib/python3.10/socket.py
@@ -0,0 +1,972 @@
+# Wrapper module for _socket, providing some additional facilities
+# implemented in Python.
+
+"""\
+This module provides socket operations and some related functions.
+On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
+On other systems, it only supports IP. Functions specific for a
+socket are available as methods of the socket object.
+
+Functions:
+
+socket() -- create a new socket object
+socketpair() -- create a pair of new socket objects [*]
+fromfd() -- create a socket object from an open file descriptor [*]
+send_fds() -- Send file descriptor to the socket.
+recv_fds() -- Recieve file descriptors from the socket.
+fromshare() -- create a socket object from data received from socket.share() [*]
+gethostname() -- return the current hostname
+gethostbyname() -- map a hostname to its IP number
+gethostbyaddr() -- map an IP number or hostname to DNS info
+getservbyname() -- map a service name and a protocol name to a port number
+getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
+ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
+htons(), htonl() -- convert 16, 32 bit int from host to network byte order
+inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
+inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
+socket.getdefaulttimeout() -- get the default timeout value
+socket.setdefaulttimeout() -- set the default timeout value
+create_connection() -- connects to an address, with an optional timeout and
+ optional source address.
+
+ [*] not available on all platforms!
+
+Special objects:
+
+SocketType -- type object for socket objects
+error -- exception raised for I/O errors
+has_ipv6 -- boolean value indicating if IPv6 is supported
+
+IntEnum constants:
+
+AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
+SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
+
+Integer constants:
+
+Many other constants may be defined; these may be used in calls to
+the setsockopt() and getsockopt() methods.
+"""
+
+import _socket
+from _socket import *
+
+import os, sys, io, selectors
+from enum import IntEnum, IntFlag
+
+try:
+ import errno
+except ImportError:
+ errno = None
+EBADF = getattr(errno, 'EBADF', 9)
+EAGAIN = getattr(errno, 'EAGAIN', 11)
+EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11)
+
+__all__ = ["fromfd", "getfqdn", "create_connection", "create_server",
+ "has_dualstack_ipv6", "AddressFamily", "SocketKind"]
+__all__.extend(os._get_exports_list(_socket))
+
+# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for
+# nicer string representations.
+# Note that _socket only knows about the integer values. The public interface
+# in this module understands the enums and translates them back from integers
+# where needed (e.g. .family property of a socket object).
+
+IntEnum._convert_(
+ 'AddressFamily',
+ __name__,
+ lambda C: C.isupper() and C.startswith('AF_'))
+
+IntEnum._convert_(
+ 'SocketKind',
+ __name__,
+ lambda C: C.isupper() and C.startswith('SOCK_'))
+
+IntFlag._convert_(
+ 'MsgFlag',
+ __name__,
+ lambda C: C.isupper() and C.startswith('MSG_'))
+
+IntFlag._convert_(
+ 'AddressInfo',
+ __name__,
+ lambda C: C.isupper() and C.startswith('AI_'))
+
+_LOCALHOST = '127.0.0.1'
+_LOCALHOST_V6 = '::1'
+
+
+def _intenum_converter(value, enum_klass):
+ """Convert a numeric family value to an IntEnum member.
+
+ If it's not a known member, return the numeric value itself.
+ """
+ try:
+ return enum_klass(value)
+ except ValueError:
+ return value
+
+
+# WSA error codes
+if sys.platform.lower().startswith("win"):
+ errorTab = {}
+ errorTab[6] = "Specified event object handle is invalid."
+ errorTab[8] = "Insufficient memory available."
+ errorTab[87] = "One or more parameters are invalid."
+ errorTab[995] = "Overlapped operation aborted."
+ errorTab[996] = "Overlapped I/O event object not in signaled state."
+ errorTab[997] = "Overlapped operation will complete later."
+ errorTab[10004] = "The operation was interrupted."
+ errorTab[10009] = "A bad file handle was passed."
+ errorTab[10013] = "Permission denied."
+ errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
+ errorTab[10022] = "An invalid operation was attempted."
+ errorTab[10024] = "Too many open files."
+ errorTab[10035] = "The socket operation would block"
+ errorTab[10036] = "A blocking operation is already in progress."
+ errorTab[10037] = "Operation already in progress."
+ errorTab[10038] = "Socket operation on nonsocket."
+ errorTab[10039] = "Destination address required."
+ errorTab[10040] = "Message too long."
+ errorTab[10041] = "Protocol wrong type for socket."
+ errorTab[10042] = "Bad protocol option."
+ errorTab[10043] = "Protocol not supported."
+ errorTab[10044] = "Socket type not supported."
+ errorTab[10045] = "Operation not supported."
+ errorTab[10046] = "Protocol family not supported."
+ errorTab[10047] = "Address family not supported by protocol family."
+ errorTab[10048] = "The network address is in use."
+ errorTab[10049] = "Cannot assign requested address."
+ errorTab[10050] = "Network is down."
+ errorTab[10051] = "Network is unreachable."
+ errorTab[10052] = "Network dropped connection on reset."
+ errorTab[10053] = "Software caused connection abort."
+ errorTab[10054] = "The connection has been reset."
+ errorTab[10055] = "No buffer space available."
+ errorTab[10056] = "Socket is already connected."
+ errorTab[10057] = "Socket is not connected."
+ errorTab[10058] = "The network has been shut down."
+ errorTab[10059] = "Too many references."
+ errorTab[10060] = "The operation timed out."
+ errorTab[10061] = "Connection refused."
+ errorTab[10062] = "Cannot translate name."
+ errorTab[10063] = "The name is too long."
+ errorTab[10064] = "The host is down."
+ errorTab[10065] = "The host is unreachable."
+ errorTab[10066] = "Directory not empty."
+ errorTab[10067] = "Too many processes."
+ errorTab[10068] = "User quota exceeded."
+ errorTab[10069] = "Disk quota exceeded."
+ errorTab[10070] = "Stale file handle reference."
+ errorTab[10071] = "Item is remote."
+ errorTab[10091] = "Network subsystem is unavailable."
+ errorTab[10092] = "Winsock.dll version out of range."
+ errorTab[10093] = "Successful WSAStartup not yet performed."
+ errorTab[10101] = "Graceful shutdown in progress."
+ errorTab[10102] = "No more results from WSALookupServiceNext."
+ errorTab[10103] = "Call has been canceled."
+ errorTab[10104] = "Procedure call table is invalid."
+ errorTab[10105] = "Service provider is invalid."
+ errorTab[10106] = "Service provider failed to initialize."
+ errorTab[10107] = "System call failure."
+ errorTab[10108] = "Service not found."
+ errorTab[10109] = "Class type not found."
+ errorTab[10110] = "No more results from WSALookupServiceNext."
+ errorTab[10111] = "Call was canceled."
+ errorTab[10112] = "Database query was refused."
+ errorTab[11001] = "Host not found."
+ errorTab[11002] = "Nonauthoritative host not found."
+ errorTab[11003] = "This is a nonrecoverable error."
+ errorTab[11004] = "Valid name, no data record requested type."
+ errorTab[11005] = "QoS receivers."
+ errorTab[11006] = "QoS senders."
+ errorTab[11007] = "No QoS senders."
+ errorTab[11008] = "QoS no receivers."
+ errorTab[11009] = "QoS request confirmed."
+ errorTab[11010] = "QoS admission error."
+ errorTab[11011] = "QoS policy failure."
+ errorTab[11012] = "QoS bad style."
+ errorTab[11013] = "QoS bad object."
+ errorTab[11014] = "QoS traffic control error."
+ errorTab[11015] = "QoS generic error."
+ errorTab[11016] = "QoS service type error."
+ errorTab[11017] = "QoS flowspec error."
+ errorTab[11018] = "Invalid QoS provider buffer."
+ errorTab[11019] = "Invalid QoS filter style."
+ errorTab[11020] = "Invalid QoS filter style."
+ errorTab[11021] = "Incorrect QoS filter count."
+ errorTab[11022] = "Invalid QoS object length."
+ errorTab[11023] = "Incorrect QoS flow count."
+ errorTab[11024] = "Unrecognized QoS object."
+ errorTab[11025] = "Invalid QoS policy object."
+ errorTab[11026] = "Invalid QoS flow descriptor."
+ errorTab[11027] = "Invalid QoS provider-specific flowspec."
+ errorTab[11028] = "Invalid QoS provider-specific filterspec."
+ errorTab[11029] = "Invalid QoS shape discard mode object."
+ errorTab[11030] = "Invalid QoS shaping rate object."
+ errorTab[11031] = "Reserved policy QoS element type."
+ __all__.append("errorTab")
+
+
+class _GiveupOnSendfile(Exception): pass
+
+
+class socket(_socket.socket):
+
+ """A subclass of _socket.socket adding the makefile() method."""
+
+ __slots__ = ["__weakref__", "_io_refs", "_closed"]
+
+ def __init__(self, family=-1, type=-1, proto=-1, fileno=None):
+ # For user code address family and type values are IntEnum members, but
+ # for the underlying _socket.socket they're just integers. The
+ # constructor of _socket.socket converts the given argument to an
+ # integer automatically.
+ if fileno is None:
+ if family == -1:
+ family = AF_INET
+ if type == -1:
+ type = SOCK_STREAM
+ if proto == -1:
+ proto = 0
+ _socket.socket.__init__(self, family, type, proto, fileno)
+ self._io_refs = 0
+ self._closed = False
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ if not self._closed:
+ self.close()
+
+ def __repr__(self):
+ """Wrap __repr__() to reveal the real class name and socket
+ address(es).
+ """
+ closed = getattr(self, '_closed', False)
+ s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \
+ % (self.__class__.__module__,
+ self.__class__.__qualname__,
+ " [closed]" if closed else "",
+ self.fileno(),
+ self.family,
+ self.type,
+ self.proto)
+ if not closed:
+ try:
+ laddr = self.getsockname()
+ if laddr:
+ s += ", laddr=%s" % str(laddr)
+ except error:
+ pass
+ try:
+ raddr = self.getpeername()
+ if raddr:
+ s += ", raddr=%s" % str(raddr)
+ except error:
+ pass
+ s += '>'
+ return s
+
+ def __getstate__(self):
+ raise TypeError(f"cannot pickle {self.__class__.__name__!r} object")
+
+ def dup(self):
+ """dup() -> socket object
+
+ Duplicate the socket. Return a new socket object connected to the same
+ system resource. The new socket is non-inheritable.
+ """
+ fd = dup(self.fileno())
+ sock = self.__class__(self.family, self.type, self.proto, fileno=fd)
+ sock.settimeout(self.gettimeout())
+ return sock
+
+ def accept(self):
+ """accept() -> (socket object, address info)
+
+ Wait for an incoming connection. Return a new socket
+ representing the connection, and the address of the client.
+ For IP sockets, the address info is a pair (hostaddr, port).
+ """
+ fd, addr = self._accept()
+ sock = socket(self.family, self.type, self.proto, fileno=fd)
+ # Issue #7995: if no default timeout is set and the listening
+ # socket had a (non-zero) timeout, force the new socket in blocking
+ # mode to override platform-specific socket flags inheritance.
+ if getdefaulttimeout() is None and self.gettimeout():
+ sock.setblocking(True)
+ return sock, addr
+
+ def makefile(self, mode="r", buffering=None, *,
+ encoding=None, errors=None, newline=None):
+ """makefile(...) -> an I/O stream connected to the socket
+
+ The arguments are as for io.open() after the filename, except the only
+ supported mode values are 'r' (default), 'w' and 'b'.
+ """
+ # XXX refactor to share code?
+ if not set(mode) <= {"r", "w", "b"}:
+ raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,))
+ writing = "w" in mode
+ reading = "r" in mode or not writing
+ assert reading or writing
+ binary = "b" in mode
+ rawmode = ""
+ if reading:
+ rawmode += "r"
+ if writing:
+ rawmode += "w"
+ raw = SocketIO(self, rawmode)
+ self._io_refs += 1
+ if buffering is None:
+ buffering = -1
+ if buffering < 0:
+ buffering = io.DEFAULT_BUFFER_SIZE
+ if buffering == 0:
+ if not binary:
+ raise ValueError("unbuffered streams must be binary")
+ return raw
+ if reading and writing:
+ buffer = io.BufferedRWPair(raw, raw, buffering)
+ elif reading:
+ buffer = io.BufferedReader(raw, buffering)
+ else:
+ assert writing
+ buffer = io.BufferedWriter(raw, buffering)
+ if binary:
+ return buffer
+ encoding = io.text_encoding(encoding)
+ text = io.TextIOWrapper(buffer, encoding, errors, newline)
+ text.mode = mode
+ return text
+
+ if hasattr(os, 'sendfile'):
+
+ def _sendfile_use_sendfile(self, file, offset=0, count=None):
+ self._check_sendfile_params(file, offset, count)
+ sockno = self.fileno()
+ try:
+ fileno = file.fileno()
+ except (AttributeError, io.UnsupportedOperation) as err:
+ raise _GiveupOnSendfile(err) # not a regular file
+ try:
+ fsize = os.fstat(fileno).st_size
+ except OSError as err:
+ raise _GiveupOnSendfile(err) # not a regular file
+ if not fsize:
+ return 0 # empty file
+ # Truncate to 1GiB to avoid OverflowError, see bpo-38319.
+ blocksize = min(count or fsize, 2 ** 30)
+ timeout = self.gettimeout()
+ if timeout == 0:
+ raise ValueError("non-blocking sockets are not supported")
+ # poll/select have the advantage of not requiring any
+ # extra file descriptor, contrarily to epoll/kqueue
+ # (also, they require a single syscall).
+ if hasattr(selectors, 'PollSelector'):
+ selector = selectors.PollSelector()
+ else:
+ selector = selectors.SelectSelector()
+ selector.register(sockno, selectors.EVENT_WRITE)
+
+ total_sent = 0
+ # localize variable access to minimize overhead
+ selector_select = selector.select
+ os_sendfile = os.sendfile
+ try:
+ while True:
+ if timeout and not selector_select(timeout):
+ raise TimeoutError('timed out')
+ if count:
+ blocksize = count - total_sent
+ if blocksize <= 0:
+ break
+ try:
+ sent = os_sendfile(sockno, fileno, offset, blocksize)
+ except BlockingIOError:
+ if not timeout:
+ # Block until the socket is ready to send some
+ # data; avoids hogging CPU resources.
+ selector_select()
+ continue
+ except OSError as err:
+ if total_sent == 0:
+ # We can get here for different reasons, the main
+ # one being 'file' is not a regular mmap(2)-like
+ # file, in which case we'll fall back on using
+ # plain send().
+ raise _GiveupOnSendfile(err)
+ raise err from None
+ else:
+ if sent == 0:
+ break # EOF
+ offset += sent
+ total_sent += sent
+ return total_sent
+ finally:
+ if total_sent > 0 and hasattr(file, 'seek'):
+ file.seek(offset)
+ else:
+ def _sendfile_use_sendfile(self, file, offset=0, count=None):
+ raise _GiveupOnSendfile(
+ "os.sendfile() not available on this platform")
+
+ def _sendfile_use_send(self, file, offset=0, count=None):
+ self._check_sendfile_params(file, offset, count)
+ if self.gettimeout() == 0:
+ raise ValueError("non-blocking sockets are not supported")
+ if offset:
+ file.seek(offset)
+ blocksize = min(count, 8192) if count else 8192
+ total_sent = 0
+ # localize variable access to minimize overhead
+ file_read = file.read
+ sock_send = self.send
+ try:
+ while True:
+ if count:
+ blocksize = min(count - total_sent, blocksize)
+ if blocksize <= 0:
+ break
+ data = memoryview(file_read(blocksize))
+ if not data:
+ break # EOF
+ while True:
+ try:
+ sent = sock_send(data)
+ except BlockingIOError:
+ continue
+ else:
+ total_sent += sent
+ if sent < len(data):
+ data = data[sent:]
+ else:
+ break
+ return total_sent
+ finally:
+ if total_sent > 0 and hasattr(file, 'seek'):
+ file.seek(offset + total_sent)
+
+ def _check_sendfile_params(self, file, offset, count):
+ if 'b' not in getattr(file, 'mode', 'b'):
+ raise ValueError("file should be opened in binary mode")
+ if not self.type & SOCK_STREAM:
+ raise ValueError("only SOCK_STREAM type sockets are supported")
+ if count is not None:
+ if not isinstance(count, int):
+ raise TypeError(
+ "count must be a positive integer (got {!r})".format(count))
+ if count <= 0:
+ raise ValueError(
+ "count must be a positive integer (got {!r})".format(count))
+
+ def sendfile(self, file, offset=0, count=None):
+ """sendfile(file[, offset[, count]]) -> sent
+
+ Send a file until EOF is reached by using high-performance
+ os.sendfile() and return the total number of bytes which
+ were sent.
+ *file* must be a regular file object opened in binary mode.
+ If os.sendfile() is not available (e.g. Windows) or file is
+ not a regular file socket.send() will be used instead.
+ *offset* tells from where to start reading the file.
+ If specified, *count* is the total number of bytes to transmit
+ as opposed to sending the file until EOF is reached.
+ File position is updated on return or also in case of error in
+ which case file.tell() can be used to figure out the number of
+ bytes which were sent.
+ The socket must be of SOCK_STREAM type.
+ Non-blocking sockets are not supported.
+ """
+ try:
+ return self._sendfile_use_sendfile(file, offset, count)
+ except _GiveupOnSendfile:
+ return self._sendfile_use_send(file, offset, count)
+
+ def _decref_socketios(self):
+ if self._io_refs > 0:
+ self._io_refs -= 1
+ if self._closed:
+ self.close()
+
+ def _real_close(self, _ss=_socket.socket):
+ # This function should not reference any globals. See issue #808164.
+ _ss.close(self)
+
+ def close(self):
+ # This function should not reference any globals. See issue #808164.
+ self._closed = True
+ if self._io_refs <= 0:
+ self._real_close()
+
+ def detach(self):
+ """detach() -> file descriptor
+
+ Close the socket object without closing the underlying file descriptor.
+ The object cannot be used after this call, but the file descriptor
+ can be reused for other purposes. The file descriptor is returned.
+ """
+ self._closed = True
+ return super().detach()
+
+ @property
+ def family(self):
+ """Read-only access to the address family for this socket.
+ """
+ return _intenum_converter(super().family, AddressFamily)
+
+ @property
+ def type(self):
+ """Read-only access to the socket type.
+ """
+ return _intenum_converter(super().type, SocketKind)
+
+ if os.name == 'nt':
+ def get_inheritable(self):
+ return os.get_handle_inheritable(self.fileno())
+ def set_inheritable(self, inheritable):
+ os.set_handle_inheritable(self.fileno(), inheritable)
+ else:
+ def get_inheritable(self):
+ return os.get_inheritable(self.fileno())
+ def set_inheritable(self, inheritable):
+ os.set_inheritable(self.fileno(), inheritable)
+ get_inheritable.__doc__ = "Get the inheritable flag of the socket"
+ set_inheritable.__doc__ = "Set the inheritable flag of the socket"
+
+def fromfd(fd, family, type, proto=0):
+ """ fromfd(fd, family, type[, proto]) -> socket object
+
+ Create a socket object from a duplicate of the given file
+ descriptor. The remaining arguments are the same as for socket().
+ """
+ nfd = dup(fd)
+ return socket(family, type, proto, nfd)
+
+if hasattr(_socket.socket, "sendmsg"):
+ import array
+
+ def send_fds(sock, buffers, fds, flags=0, address=None):
+ """ send_fds(sock, buffers, fds[, flags[, address]]) -> integer
+
+ Send the list of file descriptors fds over an AF_UNIX socket.
+ """
+ return sock.sendmsg(buffers, [(_socket.SOL_SOCKET,
+ _socket.SCM_RIGHTS, array.array("i", fds))])
+ __all__.append("send_fds")
+
+if hasattr(_socket.socket, "recvmsg"):
+ import array
+
+ def recv_fds(sock, bufsize, maxfds, flags=0):
+ """ recv_fds(sock, bufsize, maxfds[, flags]) -> (data, list of file
+ descriptors, msg_flags, address)
+
+ Receive up to maxfds file descriptors returning the message
+ data and a list containing the descriptors.
+ """
+ # Array of ints
+ fds = array.array("i")
+ msg, ancdata, flags, addr = sock.recvmsg(bufsize,
+ _socket.CMSG_LEN(maxfds * fds.itemsize))
+ for cmsg_level, cmsg_type, cmsg_data in ancdata:
+ if (cmsg_level == _socket.SOL_SOCKET and cmsg_type == _socket.SCM_RIGHTS):
+ fds.frombytes(cmsg_data[:
+ len(cmsg_data) - (len(cmsg_data) % fds.itemsize)])
+
+ return msg, list(fds), flags, addr
+ __all__.append("recv_fds")
+
+if hasattr(_socket.socket, "share"):
+ def fromshare(info):
+ """ fromshare(info) -> socket object
+
+ Create a socket object from the bytes object returned by
+ socket.share(pid).
+ """
+ return socket(0, 0, 0, info)
+ __all__.append("fromshare")
+
+# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain.
+# This is used if _socket doesn't natively provide socketpair. It's
+# always defined so that it can be patched in for testing purposes.
+def _fallback_socketpair(family=AF_INET, type=SOCK_STREAM, proto=0):
+ if family == AF_INET:
+ host = _LOCALHOST
+ elif family == AF_INET6:
+ host = _LOCALHOST_V6
+ else:
+ raise ValueError("Only AF_INET and AF_INET6 socket address families "
+ "are supported")
+ if type != SOCK_STREAM:
+ raise ValueError("Only SOCK_STREAM socket type is supported")
+ if proto != 0:
+ raise ValueError("Only protocol zero is supported")
+
+ # We create a connected TCP socket. Note the trick with
+ # setblocking(False) that prevents us from having to create a thread.
+ lsock = socket(family, type, proto)
+ try:
+ lsock.bind((host, 0))
+ lsock.listen()
+ # On IPv6, ignore flow_info and scope_id
+ addr, port = lsock.getsockname()[:2]
+ csock = socket(family, type, proto)
+ try:
+ csock.setblocking(False)
+ try:
+ csock.connect((addr, port))
+ except (BlockingIOError, InterruptedError):
+ pass
+ csock.setblocking(True)
+ ssock, _ = lsock.accept()
+ except:
+ csock.close()
+ raise
+ finally:
+ lsock.close()
+
+ # Authenticating avoids using a connection from something else
+ # able to connect to {host}:{port} instead of us.
+ # We expect only AF_INET and AF_INET6 families.
+ try:
+ if (
+ ssock.getsockname() != csock.getpeername()
+ or csock.getsockname() != ssock.getpeername()
+ ):
+ raise ConnectionError("Unexpected peer connection")
+ except:
+ # getsockname() and getpeername() can fail
+ # if either socket isn't connected.
+ ssock.close()
+ csock.close()
+ raise
+
+ return (ssock, csock)
+
+if hasattr(_socket, "socketpair"):
+ def socketpair(family=None, type=SOCK_STREAM, proto=0):
+ if family is None:
+ try:
+ family = AF_UNIX
+ except NameError:
+ family = AF_INET
+ a, b = _socket.socketpair(family, type, proto)
+ a = socket(family, type, proto, a.detach())
+ b = socket(family, type, proto, b.detach())
+ return a, b
+
+else:
+ socketpair = _fallback_socketpair
+ __all__.append("socketpair")
+
+socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object)
+Create a pair of socket objects from the sockets returned by the platform
+socketpair() function.
+The arguments are the same as for socket() except the default family is AF_UNIX
+if defined on the platform; otherwise, the default is AF_INET.
+"""
+
+_blocking_errnos = { EAGAIN, EWOULDBLOCK }
+
+class SocketIO(io.RawIOBase):
+
+ """Raw I/O implementation for stream sockets.
+
+ This class supports the makefile() method on sockets. It provides
+ the raw I/O interface on top of a socket object.
+ """
+
+ # One might wonder why not let FileIO do the job instead. There are two
+ # main reasons why FileIO is not adapted:
+ # - it wouldn't work under Windows (where you can't used read() and
+ # write() on a socket handle)
+ # - it wouldn't work with socket timeouts (FileIO would ignore the
+ # timeout and consider the socket non-blocking)
+
+ # XXX More docs
+
+ def __init__(self, sock, mode):
+ if mode not in ("r", "w", "rw", "rb", "wb", "rwb"):
+ raise ValueError("invalid mode: %r" % mode)
+ io.RawIOBase.__init__(self)
+ self._sock = sock
+ if "b" not in mode:
+ mode += "b"
+ self._mode = mode
+ self._reading = "r" in mode
+ self._writing = "w" in mode
+ self._timeout_occurred = False
+
+ def readinto(self, b):
+ """Read up to len(b) bytes into the writable buffer *b* and return
+ the number of bytes read. If the socket is non-blocking and no bytes
+ are available, None is returned.
+
+ If *b* is non-empty, a 0 return value indicates that the connection
+ was shutdown at the other end.
+ """
+ self._checkClosed()
+ self._checkReadable()
+ if self._timeout_occurred:
+ raise OSError("cannot read from timed out object")
+ while True:
+ try:
+ return self._sock.recv_into(b)
+ except timeout:
+ self._timeout_occurred = True
+ raise
+ except error as e:
+ if e.errno in _blocking_errnos:
+ return None
+ raise
+
+ def write(self, b):
+ """Write the given bytes or bytearray object *b* to the socket
+ and return the number of bytes written. This can be less than
+ len(b) if not all data could be written. If the socket is
+ non-blocking and no bytes could be written None is returned.
+ """
+ self._checkClosed()
+ self._checkWritable()
+ try:
+ return self._sock.send(b)
+ except error as e:
+ # XXX what about EINTR?
+ if e.errno in _blocking_errnos:
+ return None
+ raise
+
+ def readable(self):
+ """True if the SocketIO is open for reading.
+ """
+ if self.closed:
+ raise ValueError("I/O operation on closed socket.")
+ return self._reading
+
+ def writable(self):
+ """True if the SocketIO is open for writing.
+ """
+ if self.closed:
+ raise ValueError("I/O operation on closed socket.")
+ return self._writing
+
+ def seekable(self):
+ """True if the SocketIO is open for seeking.
+ """
+ if self.closed:
+ raise ValueError("I/O operation on closed socket.")
+ return super().seekable()
+
+ def fileno(self):
+ """Return the file descriptor of the underlying socket.
+ """
+ self._checkClosed()
+ return self._sock.fileno()
+
+ @property
+ def name(self):
+ if not self.closed:
+ return self.fileno()
+ else:
+ return -1
+
+ @property
+ def mode(self):
+ return self._mode
+
+ def close(self):
+ """Close the SocketIO object. This doesn't close the underlying
+ socket, except if all references to it have disappeared.
+ """
+ if self.closed:
+ return
+ io.RawIOBase.close(self)
+ self._sock._decref_socketios()
+ self._sock = None
+
+
+def getfqdn(name=''):
+ """Get fully qualified domain name from name.
+
+ An empty argument is interpreted as meaning the local host.
+
+ First the hostname returned by gethostbyaddr() is checked, then
+ possibly existing aliases. In case no FQDN is available and `name`
+ was given, it is returned unchanged. If `name` was empty, '0.0.0.0' or '::',
+ hostname from gethostname() is returned.
+ """
+ name = name.strip()
+ if not name or name in ('0.0.0.0', '::'):
+ name = gethostname()
+ try:
+ hostname, aliases, ipaddrs = gethostbyaddr(name)
+ except error:
+ pass
+ else:
+ aliases.insert(0, hostname)
+ for name in aliases:
+ if '.' in name:
+ break
+ else:
+ name = hostname
+ return name
+
+
+_GLOBAL_DEFAULT_TIMEOUT = object()
+
+def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
+ source_address=None):
+ """Connect to *address* and return the socket object.
+
+ Convenience function. Connect to *address* (a 2-tuple ``(host,
+ port)``) and return the socket object. Passing the optional
+ *timeout* parameter will set the timeout on the socket instance
+ before attempting to connect. If no *timeout* is supplied, the
+ global default timeout setting returned by :func:`getdefaulttimeout`
+ is used. If *source_address* is set it must be a tuple of (host, port)
+ for the socket to bind as a source address before making the connection.
+ A host of '' or port 0 tells the OS to use the default.
+ """
+
+ host, port = address
+ err = None
+ for res in getaddrinfo(host, port, 0, SOCK_STREAM):
+ af, socktype, proto, canonname, sa = res
+ sock = None
+ try:
+ sock = socket(af, socktype, proto)
+ if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
+ sock.settimeout(timeout)
+ if source_address:
+ sock.bind(source_address)
+ sock.connect(sa)
+ # Break explicitly a reference cycle
+ err = None
+ return sock
+
+ except error as _:
+ err = _
+ if sock is not None:
+ sock.close()
+
+ if err is not None:
+ try:
+ raise err
+ finally:
+ # Break explicitly a reference cycle
+ err = None
+ else:
+ raise error("getaddrinfo returns an empty list")
+
+
+def has_dualstack_ipv6():
+ """Return True if the platform supports creating a SOCK_STREAM socket
+ which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections.
+ """
+ if not has_ipv6 \
+ or not hasattr(_socket, 'IPPROTO_IPV6') \
+ or not hasattr(_socket, 'IPV6_V6ONLY'):
+ return False
+ try:
+ with socket(AF_INET6, SOCK_STREAM) as sock:
+ sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0)
+ return True
+ except error:
+ return False
+
+
+def create_server(address, *, family=AF_INET, backlog=None, reuse_port=False,
+ dualstack_ipv6=False):
+ """Convenience function which creates a SOCK_STREAM type socket
+ bound to *address* (a 2-tuple (host, port)) and return the socket
+ object.
+
+ *family* should be either AF_INET or AF_INET6.
+ *backlog* is the queue size passed to socket.listen().
+ *reuse_port* dictates whether to use the SO_REUSEPORT socket option.
+ *dualstack_ipv6*: if true and the platform supports it, it will
+ create an AF_INET6 socket able to accept both IPv4 or IPv6
+ connections. When false it will explicitly disable this option on
+ platforms that enable it by default (e.g. Linux).
+
+ >>> with create_server(('', 8000)) as server:
+ ... while True:
+ ... conn, addr = server.accept()
+ ... # handle new connection
+ """
+ if reuse_port and not hasattr(_socket, "SO_REUSEPORT"):
+ raise ValueError("SO_REUSEPORT not supported on this platform")
+ if dualstack_ipv6:
+ if not has_dualstack_ipv6():
+ raise ValueError("dualstack_ipv6 not supported on this platform")
+ if family != AF_INET6:
+ raise ValueError("dualstack_ipv6 requires AF_INET6 family")
+ sock = socket(family, SOCK_STREAM)
+ try:
+ # Note about Windows. We don't set SO_REUSEADDR because:
+ # 1) It's unnecessary: bind() will succeed even in case of a
+ # previous closed socket on the same address and still in
+ # TIME_WAIT state.
+ # 2) If set, another socket is free to bind() on the same
+ # address, effectively preventing this one from accepting
+ # connections. Also, it may set the process in a state where
+ # it'll no longer respond to any signals or graceful kills.
+ # See: https://learn.microsoft.com/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse
+ if os.name not in ('nt', 'cygwin') and \
+ hasattr(_socket, 'SO_REUSEADDR'):
+ try:
+ sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
+ except error:
+ # Fail later on bind(), for platforms which may not
+ # support this option.
+ pass
+ if reuse_port:
+ sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)
+ if has_ipv6 and family == AF_INET6:
+ if dualstack_ipv6:
+ sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0)
+ elif hasattr(_socket, "IPV6_V6ONLY") and \
+ hasattr(_socket, "IPPROTO_IPV6"):
+ sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1)
+ try:
+ sock.bind(address)
+ except error as err:
+ msg = '%s (while attempting to bind on address %r)' % \
+ (err.strerror, address)
+ raise error(err.errno, msg) from None
+ if backlog is None:
+ sock.listen()
+ else:
+ sock.listen(backlog)
+ return sock
+ except error:
+ sock.close()
+ raise
+
+
+def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0):
+ """Resolve host and port into list of address info entries.
+
+ Translate the host/port argument into a sequence of 5-tuples that contain
+ all the necessary arguments for creating a socket connected to that service.
+ host is a domain name, a string representation of an IPv4/v6 address or
+ None. port is a string service name such as 'http', a numeric port number or
+ None. By passing None as the value of host and port, you can pass NULL to
+ the underlying C API.
+
+ The family, type and proto arguments can be optionally specified in order to
+ narrow the list of addresses returned. Passing zero as a value for each of
+ these arguments selects the full range of results.
+ """
+ # We override this function since we want to translate the numeric family
+ # and socket type values to enum constants.
+ addrlist = []
+ for res in _socket.getaddrinfo(host, port, family, type, proto, flags):
+ af, socktype, proto, canonname, sa = res
+ addrlist.append((_intenum_converter(af, AddressFamily),
+ _intenum_converter(socktype, SocketKind),
+ proto, canonname, sa))
+ return addrlist
diff --git a/llava/lib/python3.10/sre_compile.py b/llava/lib/python3.10/sre_compile.py
new file mode 100644
index 0000000000000000000000000000000000000000..aed752d11d2e5e947a8557d48f5fb9b6854f4338
--- /dev/null
+++ b/llava/lib/python3.10/sre_compile.py
@@ -0,0 +1,808 @@
+#
+# Secret Labs' Regular Expression Engine
+#
+# convert template to internal format
+#
+# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved.
+#
+# See the sre.py file for information on usage and redistribution.
+#
+
+"""Internal support module for sre"""
+
+import _sre
+import sre_parse
+from sre_constants import *
+
+assert _sre.MAGIC == MAGIC, "SRE module mismatch"
+
+_LITERAL_CODES = {LITERAL, NOT_LITERAL}
+_REPEATING_CODES = {REPEAT, MIN_REPEAT, MAX_REPEAT}
+_SUCCESS_CODES = {SUCCESS, FAILURE}
+_ASSERT_CODES = {ASSERT, ASSERT_NOT}
+_UNIT_CODES = _LITERAL_CODES | {ANY, IN}
+
+# Sets of lowercase characters which have the same uppercase.
+_equivalences = (
+ # LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I
+ (0x69, 0x131), # iı
+ # LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S
+ (0x73, 0x17f), # sſ
+ # MICRO SIGN, GREEK SMALL LETTER MU
+ (0xb5, 0x3bc), # µμ
+ # COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI
+ (0x345, 0x3b9, 0x1fbe), # \u0345ιι
+ # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA
+ (0x390, 0x1fd3), # ΐΐ
+ # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA
+ (0x3b0, 0x1fe3), # ΰΰ
+ # GREEK SMALL LETTER BETA, GREEK BETA SYMBOL
+ (0x3b2, 0x3d0), # βϐ
+ # GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL
+ (0x3b5, 0x3f5), # εϵ
+ # GREEK SMALL LETTER THETA, GREEK THETA SYMBOL
+ (0x3b8, 0x3d1), # θϑ
+ # GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL
+ (0x3ba, 0x3f0), # κϰ
+ # GREEK SMALL LETTER PI, GREEK PI SYMBOL
+ (0x3c0, 0x3d6), # πϖ
+ # GREEK SMALL LETTER RHO, GREEK RHO SYMBOL
+ (0x3c1, 0x3f1), # ρϱ
+ # GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA
+ (0x3c2, 0x3c3), # ςσ
+ # GREEK SMALL LETTER PHI, GREEK PHI SYMBOL
+ (0x3c6, 0x3d5), # φϕ
+ # CYRILLIC SMALL LETTER VE, CYRILLIC SMALL LETTER ROUNDED VE
+ (0x432, 0x1c80), # вᲀ
+ # CYRILLIC SMALL LETTER DE, CYRILLIC SMALL LETTER LONG-LEGGED DE
+ (0x434, 0x1c81), # дᲁ
+ # CYRILLIC SMALL LETTER O, CYRILLIC SMALL LETTER NARROW O
+ (0x43e, 0x1c82), # оᲂ
+ # CYRILLIC SMALL LETTER ES, CYRILLIC SMALL LETTER WIDE ES
+ (0x441, 0x1c83), # сᲃ
+ # CYRILLIC SMALL LETTER TE, CYRILLIC SMALL LETTER TALL TE, CYRILLIC SMALL LETTER THREE-LEGGED TE
+ (0x442, 0x1c84, 0x1c85), # тᲄᲅ
+ # CYRILLIC SMALL LETTER HARD SIGN, CYRILLIC SMALL LETTER TALL HARD SIGN
+ (0x44a, 0x1c86), # ъᲆ
+ # CYRILLIC SMALL LETTER YAT, CYRILLIC SMALL LETTER TALL YAT
+ (0x463, 0x1c87), # ѣᲇ
+ # CYRILLIC SMALL LETTER UNBLENDED UK, CYRILLIC SMALL LETTER MONOGRAPH UK
+ (0x1c88, 0xa64b), # ᲈꙋ
+ # LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE
+ (0x1e61, 0x1e9b), # ṡẛ
+ # LATIN SMALL LIGATURE LONG S T, LATIN SMALL LIGATURE ST
+ (0xfb05, 0xfb06), # ſtst
+)
+
+# Maps the lowercase code to lowercase codes which have the same uppercase.
+_ignorecase_fixes = {i: tuple(j for j in t if i != j)
+ for t in _equivalences for i in t}
+
+def _combine_flags(flags, add_flags, del_flags,
+ TYPE_FLAGS=sre_parse.TYPE_FLAGS):
+ if add_flags & TYPE_FLAGS:
+ flags &= ~TYPE_FLAGS
+ return (flags | add_flags) & ~del_flags
+
+def _compile(code, pattern, flags):
+ # internal: compile a (sub)pattern
+ emit = code.append
+ _len = len
+ LITERAL_CODES = _LITERAL_CODES
+ REPEATING_CODES = _REPEATING_CODES
+ SUCCESS_CODES = _SUCCESS_CODES
+ ASSERT_CODES = _ASSERT_CODES
+ iscased = None
+ tolower = None
+ fixes = None
+ if flags & SRE_FLAG_IGNORECASE and not flags & SRE_FLAG_LOCALE:
+ if flags & SRE_FLAG_UNICODE:
+ iscased = _sre.unicode_iscased
+ tolower = _sre.unicode_tolower
+ fixes = _ignorecase_fixes
+ else:
+ iscased = _sre.ascii_iscased
+ tolower = _sre.ascii_tolower
+ for op, av in pattern:
+ if op in LITERAL_CODES:
+ if not flags & SRE_FLAG_IGNORECASE:
+ emit(op)
+ emit(av)
+ elif flags & SRE_FLAG_LOCALE:
+ emit(OP_LOCALE_IGNORE[op])
+ emit(av)
+ elif not iscased(av):
+ emit(op)
+ emit(av)
+ else:
+ lo = tolower(av)
+ if not fixes: # ascii
+ emit(OP_IGNORE[op])
+ emit(lo)
+ elif lo not in fixes:
+ emit(OP_UNICODE_IGNORE[op])
+ emit(lo)
+ else:
+ emit(IN_UNI_IGNORE)
+ skip = _len(code); emit(0)
+ if op is NOT_LITERAL:
+ emit(NEGATE)
+ for k in (lo,) + fixes[lo]:
+ emit(LITERAL)
+ emit(k)
+ emit(FAILURE)
+ code[skip] = _len(code) - skip
+ elif op is IN:
+ charset, hascased = _optimize_charset(av, iscased, tolower, fixes)
+ if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE:
+ emit(IN_LOC_IGNORE)
+ elif not hascased:
+ emit(IN)
+ elif not fixes: # ascii
+ emit(IN_IGNORE)
+ else:
+ emit(IN_UNI_IGNORE)
+ skip = _len(code); emit(0)
+ _compile_charset(charset, flags, code)
+ code[skip] = _len(code) - skip
+ elif op is ANY:
+ if flags & SRE_FLAG_DOTALL:
+ emit(ANY_ALL)
+ else:
+ emit(ANY)
+ elif op in REPEATING_CODES:
+ if flags & SRE_FLAG_TEMPLATE:
+ raise error("internal: unsupported template operator %r" % (op,))
+ if _simple(av[2]):
+ if op is MAX_REPEAT:
+ emit(REPEAT_ONE)
+ else:
+ emit(MIN_REPEAT_ONE)
+ skip = _len(code); emit(0)
+ emit(av[0])
+ emit(av[1])
+ _compile(code, av[2], flags)
+ emit(SUCCESS)
+ code[skip] = _len(code) - skip
+ else:
+ emit(REPEAT)
+ skip = _len(code); emit(0)
+ emit(av[0])
+ emit(av[1])
+ _compile(code, av[2], flags)
+ code[skip] = _len(code) - skip
+ if op is MAX_REPEAT:
+ emit(MAX_UNTIL)
+ else:
+ emit(MIN_UNTIL)
+ elif op is SUBPATTERN:
+ group, add_flags, del_flags, p = av
+ if group:
+ emit(MARK)
+ emit((group-1)*2)
+ # _compile_info(code, p, _combine_flags(flags, add_flags, del_flags))
+ _compile(code, p, _combine_flags(flags, add_flags, del_flags))
+ if group:
+ emit(MARK)
+ emit((group-1)*2+1)
+ elif op in SUCCESS_CODES:
+ emit(op)
+ elif op in ASSERT_CODES:
+ emit(op)
+ skip = _len(code); emit(0)
+ if av[0] >= 0:
+ emit(0) # look ahead
+ else:
+ lo, hi = av[1].getwidth()
+ if lo != hi:
+ raise error("look-behind requires fixed-width pattern")
+ emit(lo) # look behind
+ _compile(code, av[1], flags)
+ emit(SUCCESS)
+ code[skip] = _len(code) - skip
+ elif op is CALL:
+ emit(op)
+ skip = _len(code); emit(0)
+ _compile(code, av, flags)
+ emit(SUCCESS)
+ code[skip] = _len(code) - skip
+ elif op is AT:
+ emit(op)
+ if flags & SRE_FLAG_MULTILINE:
+ av = AT_MULTILINE.get(av, av)
+ if flags & SRE_FLAG_LOCALE:
+ av = AT_LOCALE.get(av, av)
+ elif flags & SRE_FLAG_UNICODE:
+ av = AT_UNICODE.get(av, av)
+ emit(av)
+ elif op is BRANCH:
+ emit(op)
+ tail = []
+ tailappend = tail.append
+ for av in av[1]:
+ skip = _len(code); emit(0)
+ # _compile_info(code, av, flags)
+ _compile(code, av, flags)
+ emit(JUMP)
+ tailappend(_len(code)); emit(0)
+ code[skip] = _len(code) - skip
+ emit(FAILURE) # end of branch
+ for tail in tail:
+ code[tail] = _len(code) - tail
+ elif op is CATEGORY:
+ emit(op)
+ if flags & SRE_FLAG_LOCALE:
+ av = CH_LOCALE[av]
+ elif flags & SRE_FLAG_UNICODE:
+ av = CH_UNICODE[av]
+ emit(av)
+ elif op is GROUPREF:
+ if not flags & SRE_FLAG_IGNORECASE:
+ emit(op)
+ elif flags & SRE_FLAG_LOCALE:
+ emit(GROUPREF_LOC_IGNORE)
+ elif not fixes: # ascii
+ emit(GROUPREF_IGNORE)
+ else:
+ emit(GROUPREF_UNI_IGNORE)
+ emit(av-1)
+ elif op is GROUPREF_EXISTS:
+ emit(op)
+ emit(av[0]-1)
+ skipyes = _len(code); emit(0)
+ _compile(code, av[1], flags)
+ if av[2]:
+ emit(JUMP)
+ skipno = _len(code); emit(0)
+ code[skipyes] = _len(code) - skipyes + 1
+ _compile(code, av[2], flags)
+ code[skipno] = _len(code) - skipno
+ else:
+ code[skipyes] = _len(code) - skipyes + 1
+ else:
+ raise error("internal: unsupported operand type %r" % (op,))
+
+def _compile_charset(charset, flags, code):
+ # compile charset subprogram
+ emit = code.append
+ for op, av in charset:
+ emit(op)
+ if op is NEGATE:
+ pass
+ elif op is LITERAL:
+ emit(av)
+ elif op is RANGE or op is RANGE_UNI_IGNORE:
+ emit(av[0])
+ emit(av[1])
+ elif op is CHARSET:
+ code.extend(av)
+ elif op is BIGCHARSET:
+ code.extend(av)
+ elif op is CATEGORY:
+ if flags & SRE_FLAG_LOCALE:
+ emit(CH_LOCALE[av])
+ elif flags & SRE_FLAG_UNICODE:
+ emit(CH_UNICODE[av])
+ else:
+ emit(av)
+ else:
+ raise error("internal: unsupported set operator %r" % (op,))
+ emit(FAILURE)
+
+def _optimize_charset(charset, iscased=None, fixup=None, fixes=None):
+ # internal: optimize character set
+ out = []
+ tail = []
+ charmap = bytearray(256)
+ hascased = False
+ for op, av in charset:
+ while True:
+ try:
+ if op is LITERAL:
+ if fixup:
+ lo = fixup(av)
+ charmap[lo] = 1
+ if fixes and lo in fixes:
+ for k in fixes[lo]:
+ charmap[k] = 1
+ if not hascased and iscased(av):
+ hascased = True
+ else:
+ charmap[av] = 1
+ elif op is RANGE:
+ r = range(av[0], av[1]+1)
+ if fixup:
+ if fixes:
+ for i in map(fixup, r):
+ charmap[i] = 1
+ if i in fixes:
+ for k in fixes[i]:
+ charmap[k] = 1
+ else:
+ for i in map(fixup, r):
+ charmap[i] = 1
+ if not hascased:
+ hascased = any(map(iscased, r))
+ else:
+ for i in r:
+ charmap[i] = 1
+ elif op is NEGATE:
+ out.append((op, av))
+ else:
+ tail.append((op, av))
+ except IndexError:
+ if len(charmap) == 256:
+ # character set contains non-UCS1 character codes
+ charmap += b'\0' * 0xff00
+ continue
+ # Character set contains non-BMP character codes.
+ # For range, all BMP characters in the range are already
+ # proceeded.
+ if fixup:
+ hascased = True
+ # For now, IN_UNI_IGNORE+LITERAL and
+ # IN_UNI_IGNORE+RANGE_UNI_IGNORE work for all non-BMP
+ # characters, because two characters (at least one of
+ # which is not in the BMP) match case-insensitively
+ # if and only if:
+ # 1) c1.lower() == c2.lower()
+ # 2) c1.lower() == c2 or c1.lower().upper() == c2
+ # Also, both c.lower() and c.lower().upper() are single
+ # characters for every non-BMP character.
+ if op is RANGE:
+ op = RANGE_UNI_IGNORE
+ tail.append((op, av))
+ break
+
+ # compress character map
+ runs = []
+ q = 0
+ while True:
+ p = charmap.find(1, q)
+ if p < 0:
+ break
+ if len(runs) >= 2:
+ runs = None
+ break
+ q = charmap.find(0, p)
+ if q < 0:
+ runs.append((p, len(charmap)))
+ break
+ runs.append((p, q))
+ if runs is not None:
+ # use literal/range
+ for p, q in runs:
+ if q - p == 1:
+ out.append((LITERAL, p))
+ else:
+ out.append((RANGE, (p, q - 1)))
+ out += tail
+ # if the case was changed or new representation is more compact
+ if hascased or len(out) < len(charset):
+ return out, hascased
+ # else original character set is good enough
+ return charset, hascased
+
+ # use bitmap
+ if len(charmap) == 256:
+ data = _mk_bitmap(charmap)
+ out.append((CHARSET, data))
+ out += tail
+ return out, hascased
+
+ # To represent a big charset, first a bitmap of all characters in the
+ # set is constructed. Then, this bitmap is sliced into chunks of 256
+ # characters, duplicate chunks are eliminated, and each chunk is
+ # given a number. In the compiled expression, the charset is
+ # represented by a 32-bit word sequence, consisting of one word for
+ # the number of different chunks, a sequence of 256 bytes (64 words)
+ # of chunk numbers indexed by their original chunk position, and a
+ # sequence of 256-bit chunks (8 words each).
+
+ # Compression is normally good: in a typical charset, large ranges of
+ # Unicode will be either completely excluded (e.g. if only cyrillic
+ # letters are to be matched), or completely included (e.g. if large
+ # subranges of Kanji match). These ranges will be represented by
+ # chunks of all one-bits or all zero-bits.
+
+ # Matching can be also done efficiently: the more significant byte of
+ # the Unicode character is an index into the chunk number, and the
+ # less significant byte is a bit index in the chunk (just like the
+ # CHARSET matching).
+
+ charmap = bytes(charmap) # should be hashable
+ comps = {}
+ mapping = bytearray(256)
+ block = 0
+ data = bytearray()
+ for i in range(0, 65536, 256):
+ chunk = charmap[i: i + 256]
+ if chunk in comps:
+ mapping[i // 256] = comps[chunk]
+ else:
+ mapping[i // 256] = comps[chunk] = block
+ block += 1
+ data += chunk
+ data = _mk_bitmap(data)
+ data[0:0] = [block] + _bytes_to_codes(mapping)
+ out.append((BIGCHARSET, data))
+ out += tail
+ return out, hascased
+
+_CODEBITS = _sre.CODESIZE * 8
+MAXCODE = (1 << _CODEBITS) - 1
+_BITS_TRANS = b'0' + b'1' * 255
+def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int):
+ s = bits.translate(_BITS_TRANS)[::-1]
+ return [_int(s[i - _CODEBITS: i], 2)
+ for i in range(len(s), 0, -_CODEBITS)]
+
+def _bytes_to_codes(b):
+ # Convert block indices to word array
+ a = memoryview(b).cast('I')
+ assert a.itemsize == _sre.CODESIZE
+ assert len(a) * a.itemsize == len(b)
+ return a.tolist()
+
+def _simple(p):
+ # check if this subpattern is a "simple" operator
+ if len(p) != 1:
+ return False
+ op, av = p[0]
+ if op is SUBPATTERN:
+ return av[0] is None and _simple(av[-1])
+ return op in _UNIT_CODES
+
+def _generate_overlap_table(prefix):
+ """
+ Generate an overlap table for the following prefix.
+ An overlap table is a table of the same size as the prefix which
+ informs about the potential self-overlap for each index in the prefix:
+ - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...]
+ - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with
+ prefix[0:k]
+ """
+ table = [0] * len(prefix)
+ for i in range(1, len(prefix)):
+ idx = table[i - 1]
+ while prefix[i] != prefix[idx]:
+ if idx == 0:
+ table[i] = 0
+ break
+ idx = table[idx - 1]
+ else:
+ table[i] = idx + 1
+ return table
+
+def _get_iscased(flags):
+ if not flags & SRE_FLAG_IGNORECASE:
+ return None
+ elif flags & SRE_FLAG_UNICODE:
+ return _sre.unicode_iscased
+ else:
+ return _sre.ascii_iscased
+
+def _get_literal_prefix(pattern, flags):
+ # look for literal prefix
+ prefix = []
+ prefixappend = prefix.append
+ prefix_skip = None
+ iscased = _get_iscased(flags)
+ for op, av in pattern.data:
+ if op is LITERAL:
+ if iscased and iscased(av):
+ break
+ prefixappend(av)
+ elif op is SUBPATTERN:
+ group, add_flags, del_flags, p = av
+ flags1 = _combine_flags(flags, add_flags, del_flags)
+ if flags1 & SRE_FLAG_IGNORECASE and flags1 & SRE_FLAG_LOCALE:
+ break
+ prefix1, prefix_skip1, got_all = _get_literal_prefix(p, flags1)
+ if prefix_skip is None:
+ if group is not None:
+ prefix_skip = len(prefix)
+ elif prefix_skip1 is not None:
+ prefix_skip = len(prefix) + prefix_skip1
+ prefix.extend(prefix1)
+ if not got_all:
+ break
+ else:
+ break
+ else:
+ return prefix, prefix_skip, True
+ return prefix, prefix_skip, False
+
+def _get_charset_prefix(pattern, flags):
+ while True:
+ if not pattern.data:
+ return None
+ op, av = pattern.data[0]
+ if op is not SUBPATTERN:
+ break
+ group, add_flags, del_flags, pattern = av
+ flags = _combine_flags(flags, add_flags, del_flags)
+ if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE:
+ return None
+
+ iscased = _get_iscased(flags)
+ if op is LITERAL:
+ if iscased and iscased(av):
+ return None
+ return [(op, av)]
+ elif op is BRANCH:
+ charset = []
+ charsetappend = charset.append
+ for p in av[1]:
+ if not p:
+ return None
+ op, av = p[0]
+ if op is LITERAL and not (iscased and iscased(av)):
+ charsetappend((op, av))
+ else:
+ return None
+ return charset
+ elif op is IN:
+ charset = av
+ if iscased:
+ for op, av in charset:
+ if op is LITERAL:
+ if iscased(av):
+ return None
+ elif op is RANGE:
+ if av[1] > 0xffff:
+ return None
+ if any(map(iscased, range(av[0], av[1]+1))):
+ return None
+ return charset
+ return None
+
+def _compile_info(code, pattern, flags):
+ # internal: compile an info block. in the current version,
+ # this contains min/max pattern width, and an optional literal
+ # prefix or a character map
+ lo, hi = pattern.getwidth()
+ if hi > MAXCODE:
+ hi = MAXCODE
+ if lo == 0:
+ code.extend([INFO, 4, 0, lo, hi])
+ return
+ # look for a literal prefix
+ prefix = []
+ prefix_skip = 0
+ charset = [] # not used
+ if not (flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE):
+ # look for literal prefix
+ prefix, prefix_skip, got_all = _get_literal_prefix(pattern, flags)
+ # if no prefix, look for charset prefix
+ if not prefix:
+ charset = _get_charset_prefix(pattern, flags)
+## if prefix:
+## print("*** PREFIX", prefix, prefix_skip)
+## if charset:
+## print("*** CHARSET", charset)
+ # add an info block
+ emit = code.append
+ emit(INFO)
+ skip = len(code); emit(0)
+ # literal flag
+ mask = 0
+ if prefix:
+ mask = SRE_INFO_PREFIX
+ if prefix_skip is None and got_all:
+ mask = mask | SRE_INFO_LITERAL
+ elif charset:
+ mask = mask | SRE_INFO_CHARSET
+ emit(mask)
+ # pattern length
+ if lo < MAXCODE:
+ emit(lo)
+ else:
+ emit(MAXCODE)
+ prefix = prefix[:MAXCODE]
+ emit(min(hi, MAXCODE))
+ # add literal prefix
+ if prefix:
+ emit(len(prefix)) # length
+ if prefix_skip is None:
+ prefix_skip = len(prefix)
+ emit(prefix_skip) # skip
+ code.extend(prefix)
+ # generate overlap table
+ code.extend(_generate_overlap_table(prefix))
+ elif charset:
+ charset, hascased = _optimize_charset(charset)
+ assert not hascased
+ _compile_charset(charset, flags, code)
+ code[skip] = len(code) - skip
+
+def isstring(obj):
+ return isinstance(obj, (str, bytes))
+
+def _code(p, flags):
+
+ flags = p.state.flags | flags
+ code = []
+
+ # compile info block
+ _compile_info(code, p, flags)
+
+ # compile the pattern
+ _compile(code, p.data, flags)
+
+ code.append(SUCCESS)
+
+ return code
+
+def _hex_code(code):
+ return '[%s]' % ', '.join('%#0*x' % (_sre.CODESIZE*2+2, x) for x in code)
+
+def dis(code):
+ import sys
+
+ labels = set()
+ level = 0
+ offset_width = len(str(len(code) - 1))
+
+ def dis_(start, end):
+ def print_(*args, to=None):
+ if to is not None:
+ labels.add(to)
+ args += ('(to %d)' % (to,),)
+ print('%*d%s ' % (offset_width, start, ':' if start in labels else '.'),
+ end=' '*(level-1))
+ print(*args)
+
+ def print_2(*args):
+ print(end=' '*(offset_width + 2*level))
+ print(*args)
+
+ nonlocal level
+ level += 1
+ i = start
+ while i < end:
+ start = i
+ op = code[i]
+ i += 1
+ op = OPCODES[op]
+ if op in (SUCCESS, FAILURE, ANY, ANY_ALL,
+ MAX_UNTIL, MIN_UNTIL, NEGATE):
+ print_(op)
+ elif op in (LITERAL, NOT_LITERAL,
+ LITERAL_IGNORE, NOT_LITERAL_IGNORE,
+ LITERAL_UNI_IGNORE, NOT_LITERAL_UNI_IGNORE,
+ LITERAL_LOC_IGNORE, NOT_LITERAL_LOC_IGNORE):
+ arg = code[i]
+ i += 1
+ print_(op, '%#02x (%r)' % (arg, chr(arg)))
+ elif op is AT:
+ arg = code[i]
+ i += 1
+ arg = str(ATCODES[arg])
+ assert arg[:3] == 'AT_'
+ print_(op, arg[3:])
+ elif op is CATEGORY:
+ arg = code[i]
+ i += 1
+ arg = str(CHCODES[arg])
+ assert arg[:9] == 'CATEGORY_'
+ print_(op, arg[9:])
+ elif op in (IN, IN_IGNORE, IN_UNI_IGNORE, IN_LOC_IGNORE):
+ skip = code[i]
+ print_(op, skip, to=i+skip)
+ dis_(i+1, i+skip)
+ i += skip
+ elif op in (RANGE, RANGE_UNI_IGNORE):
+ lo, hi = code[i: i+2]
+ i += 2
+ print_(op, '%#02x %#02x (%r-%r)' % (lo, hi, chr(lo), chr(hi)))
+ elif op is CHARSET:
+ print_(op, _hex_code(code[i: i + 256//_CODEBITS]))
+ i += 256//_CODEBITS
+ elif op is BIGCHARSET:
+ arg = code[i]
+ i += 1
+ mapping = list(b''.join(x.to_bytes(_sre.CODESIZE, sys.byteorder)
+ for x in code[i: i + 256//_sre.CODESIZE]))
+ print_(op, arg, mapping)
+ i += 256//_sre.CODESIZE
+ level += 1
+ for j in range(arg):
+ print_2(_hex_code(code[i: i + 256//_CODEBITS]))
+ i += 256//_CODEBITS
+ level -= 1
+ elif op in (MARK, GROUPREF, GROUPREF_IGNORE, GROUPREF_UNI_IGNORE,
+ GROUPREF_LOC_IGNORE):
+ arg = code[i]
+ i += 1
+ print_(op, arg)
+ elif op is JUMP:
+ skip = code[i]
+ print_(op, skip, to=i+skip)
+ i += 1
+ elif op is BRANCH:
+ skip = code[i]
+ print_(op, skip, to=i+skip)
+ while skip:
+ dis_(i+1, i+skip)
+ i += skip
+ start = i
+ skip = code[i]
+ if skip:
+ print_('branch', skip, to=i+skip)
+ else:
+ print_(FAILURE)
+ i += 1
+ elif op in (REPEAT, REPEAT_ONE, MIN_REPEAT_ONE):
+ skip, min, max = code[i: i+3]
+ if max == MAXREPEAT:
+ max = 'MAXREPEAT'
+ print_(op, skip, min, max, to=i+skip)
+ dis_(i+3, i+skip)
+ i += skip
+ elif op is GROUPREF_EXISTS:
+ arg, skip = code[i: i+2]
+ print_(op, arg, skip, to=i+skip)
+ i += 2
+ elif op in (ASSERT, ASSERT_NOT):
+ skip, arg = code[i: i+2]
+ print_(op, skip, arg, to=i+skip)
+ dis_(i+2, i+skip)
+ i += skip
+ elif op is INFO:
+ skip, flags, min, max = code[i: i+4]
+ if max == MAXREPEAT:
+ max = 'MAXREPEAT'
+ print_(op, skip, bin(flags), min, max, to=i+skip)
+ start = i+4
+ if flags & SRE_INFO_PREFIX:
+ prefix_len, prefix_skip = code[i+4: i+6]
+ print_2(' prefix_skip', prefix_skip)
+ start = i + 6
+ prefix = code[start: start+prefix_len]
+ print_2(' prefix',
+ '[%s]' % ', '.join('%#02x' % x for x in prefix),
+ '(%r)' % ''.join(map(chr, prefix)))
+ start += prefix_len
+ print_2(' overlap', code[start: start+prefix_len])
+ start += prefix_len
+ if flags & SRE_INFO_CHARSET:
+ level += 1
+ print_2('in')
+ dis_(start, i+skip)
+ level -= 1
+ i += skip
+ else:
+ raise ValueError(op)
+
+ level -= 1
+
+ dis_(0, len(code))
+
+
+def compile(p, flags=0):
+ # internal: convert pattern list to internal format
+
+ if isstring(p):
+ pattern = p
+ p = sre_parse.parse(p, flags)
+ else:
+ pattern = None
+
+ code = _code(p, flags)
+
+ if flags & SRE_FLAG_DEBUG:
+ print()
+ dis(code)
+
+ # map in either direction
+ groupindex = p.state.groupdict
+ indexgroup = [None] * p.state.groups
+ for k, i in groupindex.items():
+ indexgroup[i] = k
+
+ return _sre.compile(
+ pattern, flags | p.state.flags, code,
+ p.state.groups-1,
+ groupindex, tuple(indexgroup)
+ )
diff --git a/llava/lib/python3.10/struct.py b/llava/lib/python3.10/struct.py
new file mode 100644
index 0000000000000000000000000000000000000000..d6bba58863649898a3f075cdda51cade9dc07f06
--- /dev/null
+++ b/llava/lib/python3.10/struct.py
@@ -0,0 +1,15 @@
+__all__ = [
+ # Functions
+ 'calcsize', 'pack', 'pack_into', 'unpack', 'unpack_from',
+ 'iter_unpack',
+
+ # Classes
+ 'Struct',
+
+ # Exceptions
+ 'error'
+ ]
+
+from _struct import *
+from _struct import _clearcache
+from _struct import __doc__
diff --git a/llava/lib/python3.10/symtable.py b/llava/lib/python3.10/symtable.py
new file mode 100644
index 0000000000000000000000000000000000000000..e11e5fffc4e1be49defe1c54803d65e398ba56bb
--- /dev/null
+++ b/llava/lib/python3.10/symtable.py
@@ -0,0 +1,322 @@
+"""Interface to the compiler's internal symbol tables"""
+
+import _symtable
+from _symtable import (USE, DEF_GLOBAL, DEF_NONLOCAL, DEF_LOCAL, DEF_PARAM,
+ DEF_IMPORT, DEF_BOUND, DEF_ANNOT, SCOPE_OFF, SCOPE_MASK, FREE,
+ LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL)
+
+import weakref
+
+__all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"]
+
+def symtable(code, filename, compile_type):
+ """ Return the toplevel *SymbolTable* for the source code.
+
+ *filename* is the name of the file with the code
+ and *compile_type* is the *compile()* mode argument.
+ """
+ top = _symtable.symtable(code, filename, compile_type)
+ return _newSymbolTable(top, filename)
+
+class SymbolTableFactory:
+ def __init__(self):
+ self.__memo = weakref.WeakValueDictionary()
+
+ def new(self, table, filename):
+ if table.type == _symtable.TYPE_FUNCTION:
+ return Function(table, filename)
+ if table.type == _symtable.TYPE_CLASS:
+ return Class(table, filename)
+ return SymbolTable(table, filename)
+
+ def __call__(self, table, filename):
+ key = table, filename
+ obj = self.__memo.get(key, None)
+ if obj is None:
+ obj = self.__memo[key] = self.new(table, filename)
+ return obj
+
+_newSymbolTable = SymbolTableFactory()
+
+
+class SymbolTable:
+
+ def __init__(self, raw_table, filename):
+ self._table = raw_table
+ self._filename = filename
+ self._symbols = {}
+
+ def __repr__(self):
+ if self.__class__ == SymbolTable:
+ kind = ""
+ else:
+ kind = "%s " % self.__class__.__name__
+
+ if self._table.name == "top":
+ return "<{0}SymbolTable for module {1}>".format(kind, self._filename)
+ else:
+ return "<{0}SymbolTable for {1} in {2}>".format(kind,
+ self._table.name,
+ self._filename)
+
+ def get_type(self):
+ """Return the type of the symbol table.
+
+ The values retuned are 'class', 'module' and
+ 'function'.
+ """
+ if self._table.type == _symtable.TYPE_MODULE:
+ return "module"
+ if self._table.type == _symtable.TYPE_FUNCTION:
+ return "function"
+ if self._table.type == _symtable.TYPE_CLASS:
+ return "class"
+ assert self._table.type in (1, 2, 3), \
+ "unexpected type: {0}".format(self._table.type)
+
+ def get_id(self):
+ """Return an identifier for the table.
+ """
+ return self._table.id
+
+ def get_name(self):
+ """Return the table's name.
+
+ This corresponds to the name of the class, function
+ or 'top' if the table is for a class, function or
+ global respectively.
+ """
+ return self._table.name
+
+ def get_lineno(self):
+ """Return the number of the first line in the
+ block for the table.
+ """
+ return self._table.lineno
+
+ def is_optimized(self):
+ """Return *True* if the locals in the table
+ are optimizable.
+ """
+ return bool(self._table.type == _symtable.TYPE_FUNCTION)
+
+ def is_nested(self):
+ """Return *True* if the block is a nested class
+ or function."""
+ return bool(self._table.nested)
+
+ def has_children(self):
+ """Return *True* if the block has nested namespaces.
+ """
+ return bool(self._table.children)
+
+ def get_identifiers(self):
+ """Return a view object containing the names of symbols in the table.
+ """
+ return self._table.symbols.keys()
+
+ def lookup(self, name):
+ """Lookup a *name* in the table.
+
+ Returns a *Symbol* instance.
+ """
+ sym = self._symbols.get(name)
+ if sym is None:
+ flags = self._table.symbols[name]
+ namespaces = self.__check_children(name)
+ module_scope = (self._table.name == "top")
+ sym = self._symbols[name] = Symbol(name, flags, namespaces,
+ module_scope=module_scope)
+ return sym
+
+ def get_symbols(self):
+ """Return a list of *Symbol* instances for
+ names in the table.
+ """
+ return [self.lookup(ident) for ident in self.get_identifiers()]
+
+ def __check_children(self, name):
+ return [_newSymbolTable(st, self._filename)
+ for st in self._table.children
+ if st.name == name]
+
+ def get_children(self):
+ """Return a list of the nested symbol tables.
+ """
+ return [_newSymbolTable(st, self._filename)
+ for st in self._table.children]
+
+
+class Function(SymbolTable):
+
+ # Default values for instance variables
+ __params = None
+ __locals = None
+ __frees = None
+ __globals = None
+ __nonlocals = None
+
+ def __idents_matching(self, test_func):
+ return tuple(ident for ident in self.get_identifiers()
+ if test_func(self._table.symbols[ident]))
+
+ def get_parameters(self):
+ """Return a tuple of parameters to the function.
+ """
+ if self.__params is None:
+ self.__params = self.__idents_matching(lambda x:x & DEF_PARAM)
+ return self.__params
+
+ def get_locals(self):
+ """Return a tuple of locals in the function.
+ """
+ if self.__locals is None:
+ locs = (LOCAL, CELL)
+ test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs
+ self.__locals = self.__idents_matching(test)
+ return self.__locals
+
+ def get_globals(self):
+ """Return a tuple of globals in the function.
+ """
+ if self.__globals is None:
+ glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
+ test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob
+ self.__globals = self.__idents_matching(test)
+ return self.__globals
+
+ def get_nonlocals(self):
+ """Return a tuple of nonlocals in the function.
+ """
+ if self.__nonlocals is None:
+ self.__nonlocals = self.__idents_matching(lambda x:x & DEF_NONLOCAL)
+ return self.__nonlocals
+
+ def get_frees(self):
+ """Return a tuple of free variables in the function.
+ """
+ if self.__frees is None:
+ is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE
+ self.__frees = self.__idents_matching(is_free)
+ return self.__frees
+
+
+class Class(SymbolTable):
+
+ __methods = None
+
+ def get_methods(self):
+ """Return a tuple of methods declared in the class.
+ """
+ if self.__methods is None:
+ d = {}
+ for st in self._table.children:
+ d[st.name] = 1
+ self.__methods = tuple(d)
+ return self.__methods
+
+
+class Symbol:
+
+ def __init__(self, name, flags, namespaces=None, *, module_scope=False):
+ self.__name = name
+ self.__flags = flags
+ self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope()
+ self.__namespaces = namespaces or ()
+ self.__module_scope = module_scope
+
+ def __repr__(self):
+ return "".format(self.__name)
+
+ def get_name(self):
+ """Return a name of a symbol.
+ """
+ return self.__name
+
+ def is_referenced(self):
+ """Return *True* if the symbol is used in
+ its block.
+ """
+ return bool(self.__flags & _symtable.USE)
+
+ def is_parameter(self):
+ """Return *True* if the symbol is a parameter.
+ """
+ return bool(self.__flags & DEF_PARAM)
+
+ def is_global(self):
+ """Return *True* if the sysmbol is global.
+ """
+ return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT)
+ or (self.__module_scope and self.__flags & DEF_BOUND))
+
+ def is_nonlocal(self):
+ """Return *True* if the symbol is nonlocal."""
+ return bool(self.__flags & DEF_NONLOCAL)
+
+ def is_declared_global(self):
+ """Return *True* if the symbol is declared global
+ with a global statement."""
+ return bool(self.__scope == GLOBAL_EXPLICIT)
+
+ def is_local(self):
+ """Return *True* if the symbol is local.
+ """
+ return bool(self.__scope in (LOCAL, CELL)
+ or (self.__module_scope and self.__flags & DEF_BOUND))
+
+ def is_annotated(self):
+ """Return *True* if the symbol is annotated.
+ """
+ return bool(self.__flags & DEF_ANNOT)
+
+ def is_free(self):
+ """Return *True* if a referenced symbol is
+ not assigned to.
+ """
+ return bool(self.__scope == FREE)
+
+ def is_imported(self):
+ """Return *True* if the symbol is created from
+ an import statement.
+ """
+ return bool(self.__flags & DEF_IMPORT)
+
+ def is_assigned(self):
+ """Return *True* if a symbol is assigned to."""
+ return bool(self.__flags & DEF_LOCAL)
+
+ def is_namespace(self):
+ """Returns *True* if name binding introduces new namespace.
+
+ If the name is used as the target of a function or class
+ statement, this will be true.
+
+ Note that a single name can be bound to multiple objects. If
+ is_namespace() is true, the name may also be bound to other
+ objects, like an int or list, that does not introduce a new
+ namespace.
+ """
+ return bool(self.__namespaces)
+
+ def get_namespaces(self):
+ """Return a list of namespaces bound to this name"""
+ return self.__namespaces
+
+ def get_namespace(self):
+ """Return the single namespace bound to this name.
+
+ Raises ValueError if the name is bound to multiple namespaces.
+ """
+ if len(self.__namespaces) != 1:
+ raise ValueError("name is bound to multiple namespaces")
+ return self.__namespaces[0]
+
+if __name__ == "__main__":
+ import os, sys
+ with open(sys.argv[0]) as f:
+ src = f.read()
+ mod = symtable(src, os.path.split(sys.argv[0])[1], "exec")
+ for ident in mod.get_identifiers():
+ info = mod.lookup(ident)
+ print(info, info.is_local(), info.is_namespace())
diff --git a/llava/lib/python3.10/turtle.py b/llava/lib/python3.10/turtle.py
new file mode 100644
index 0000000000000000000000000000000000000000..d287c15543528a8c24609ea1ef7b479ffc82d35f
--- /dev/null
+++ b/llava/lib/python3.10/turtle.py
@@ -0,0 +1,4141 @@
+#
+# turtle.py: a Tkinter based turtle graphics module for Python
+# Version 1.1b - 4. 5. 2009
+#
+# Copyright (C) 2006 - 2010 Gregor Lingl
+# email: glingl@aon.at
+#
+# This software is provided 'as-is', without any express or implied
+# warranty. In no event will the authors be held liable for any damages
+# arising from the use of this software.
+#
+# Permission is granted to anyone to use this software for any purpose,
+# including commercial applications, and to alter it and redistribute it
+# freely, subject to the following restrictions:
+#
+# 1. The origin of this software must not be misrepresented; you must not
+# claim that you wrote the original software. If you use this software
+# in a product, an acknowledgment in the product documentation would be
+# appreciated but is not required.
+# 2. Altered source versions must be plainly marked as such, and must not be
+# misrepresented as being the original software.
+# 3. This notice may not be removed or altered from any source distribution.
+
+
+"""
+Turtle graphics is a popular way for introducing programming to
+kids. It was part of the original Logo programming language developed
+by Wally Feurzig and Seymour Papert in 1966.
+
+Imagine a robotic turtle starting at (0, 0) in the x-y plane. After an ``import turtle``, give it
+the command turtle.forward(15), and it moves (on-screen!) 15 pixels in
+the direction it is facing, drawing a line as it moves. Give it the
+command turtle.right(25), and it rotates in-place 25 degrees clockwise.
+
+By combining together these and similar commands, intricate shapes and
+pictures can easily be drawn.
+
+----- turtle.py
+
+This module is an extended reimplementation of turtle.py from the
+Python standard distribution up to Python 2.5. (See: https://www.python.org)
+
+It tries to keep the merits of turtle.py and to be (nearly) 100%
+compatible with it. This means in the first place to enable the
+learning programmer to use all the commands, classes and methods
+interactively when using the module from within IDLE run with
+the -n switch.
+
+Roughly it has the following features added:
+
+- Better animation of the turtle movements, especially of turning the
+ turtle. So the turtles can more easily be used as a visual feedback
+ instrument by the (beginning) programmer.
+
+- Different turtle shapes, gif-images as turtle shapes, user defined
+ and user controllable turtle shapes, among them compound
+ (multicolored) shapes. Turtle shapes can be stretched and tilted, which
+ makes turtles very versatile geometrical objects.
+
+- Fine control over turtle movement and screen updates via delay(),
+ and enhanced tracer() and speed() methods.
+
+- Aliases for the most commonly used commands, like fd for forward etc.,
+ following the early Logo traditions. This reduces the boring work of
+ typing long sequences of commands, which often occur in a natural way
+ when kids try to program fancy pictures on their first encounter with
+ turtle graphics.
+
+- Turtles now have an undo()-method with configurable undo-buffer.
+
+- Some simple commands/methods for creating event driven programs
+ (mouse-, key-, timer-events). Especially useful for programming games.
+
+- A scrollable Canvas class. The default scrollable Canvas can be
+ extended interactively as needed while playing around with the turtle(s).
+
+- A TurtleScreen class with methods controlling background color or
+ background image, window and canvas size and other properties of the
+ TurtleScreen.
+
+- There is a method, setworldcoordinates(), to install a user defined
+ coordinate-system for the TurtleScreen.
+
+- The implementation uses a 2-vector class named Vec2D, derived from tuple.
+ This class is public, so it can be imported by the application programmer,
+ which makes certain types of computations very natural and compact.
+
+- Appearance of the TurtleScreen and the Turtles at startup/import can be
+ configured by means of a turtle.cfg configuration file.
+ The default configuration mimics the appearance of the old turtle module.
+
+- If configured appropriately the module reads in docstrings from a docstring
+ dictionary in some different language, supplied separately and replaces
+ the English ones by those read in. There is a utility function
+ write_docstringdict() to write a dictionary with the original (English)
+ docstrings to disc, so it can serve as a template for translations.
+
+Behind the scenes there are some features included with possible
+extensions in mind. These will be commented and documented elsewhere.
+
+"""
+
+_ver = "turtle 1.1b- - for Python 3.1 - 4. 5. 2009"
+
+# print(_ver)
+
+import tkinter as TK
+import types
+import math
+import time
+import inspect
+import sys
+
+from os.path import isfile, split, join
+from copy import deepcopy
+from tkinter import simpledialog
+
+_tg_classes = ['ScrolledCanvas', 'TurtleScreen', 'Screen',
+ 'RawTurtle', 'Turtle', 'RawPen', 'Pen', 'Shape', 'Vec2D']
+_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye',
+ 'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas',
+ 'getshapes', 'listen', 'mainloop', 'mode', 'numinput',
+ 'onkey', 'onkeypress', 'onkeyrelease', 'onscreenclick', 'ontimer',
+ 'register_shape', 'resetscreen', 'screensize', 'setup',
+ 'setworldcoordinates', 'textinput', 'title', 'tracer', 'turtles', 'update',
+ 'window_height', 'window_width']
+_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk',
+ 'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color',
+ 'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd',
+ 'fillcolor', 'filling', 'forward', 'get_poly', 'getpen', 'getscreen', 'get_shapepoly',
+ 'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown',
+ 'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd',
+ 'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position',
+ 'pu', 'radians', 'right', 'reset', 'resizemode', 'rt',
+ 'seth', 'setheading', 'setpos', 'setposition', 'settiltangle',
+ 'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'shapetransform', 'shearfactor', 'showturtle',
+ 'speed', 'st', 'stamp', 'tilt', 'tiltangle', 'towards',
+ 'turtlesize', 'undo', 'undobufferentries', 'up', 'width',
+ 'write', 'xcor', 'ycor']
+_tg_utilities = ['write_docstringdict', 'done']
+
+__all__ = (_tg_classes + _tg_screen_functions + _tg_turtle_functions +
+ _tg_utilities + ['Terminator']) # + _math_functions)
+
+_alias_list = ['addshape', 'backward', 'bk', 'fd', 'ht', 'lt', 'pd', 'pos',
+ 'pu', 'rt', 'seth', 'setpos', 'setposition', 'st',
+ 'turtlesize', 'up', 'width']
+
+_CFG = {"width" : 0.5, # Screen
+ "height" : 0.75,
+ "canvwidth" : 400,
+ "canvheight": 300,
+ "leftright": None,
+ "topbottom": None,
+ "mode": "standard", # TurtleScreen
+ "colormode": 1.0,
+ "delay": 10,
+ "undobuffersize": 1000, # RawTurtle
+ "shape": "classic",
+ "pencolor" : "black",
+ "fillcolor" : "black",
+ "resizemode" : "noresize",
+ "visible" : True,
+ "language": "english", # docstrings
+ "exampleturtle": "turtle",
+ "examplescreen": "screen",
+ "title": "Python Turtle Graphics",
+ "using_IDLE": False
+ }
+
+def config_dict(filename):
+ """Convert content of config-file into dictionary."""
+ with open(filename, "r") as f:
+ cfglines = f.readlines()
+ cfgdict = {}
+ for line in cfglines:
+ line = line.strip()
+ if not line or line.startswith("#"):
+ continue
+ try:
+ key, value = line.split("=")
+ except ValueError:
+ print("Bad line in config-file %s:\n%s" % (filename,line))
+ continue
+ key = key.strip()
+ value = value.strip()
+ if value in ["True", "False", "None", "''", '""']:
+ value = eval(value)
+ else:
+ try:
+ if "." in value:
+ value = float(value)
+ else:
+ value = int(value)
+ except ValueError:
+ pass # value need not be converted
+ cfgdict[key] = value
+ return cfgdict
+
+def readconfig(cfgdict):
+ """Read config-files, change configuration-dict accordingly.
+
+ If there is a turtle.cfg file in the current working directory,
+ read it from there. If this contains an importconfig-value,
+ say 'myway', construct filename turtle_mayway.cfg else use
+ turtle.cfg and read it from the import-directory, where
+ turtle.py is located.
+ Update configuration dictionary first according to config-file,
+ in the import directory, then according to config-file in the
+ current working directory.
+ If no config-file is found, the default configuration is used.
+ """
+ default_cfg = "turtle.cfg"
+ cfgdict1 = {}
+ cfgdict2 = {}
+ if isfile(default_cfg):
+ cfgdict1 = config_dict(default_cfg)
+ if "importconfig" in cfgdict1:
+ default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"]
+ try:
+ head, tail = split(__file__)
+ cfg_file2 = join(head, default_cfg)
+ except Exception:
+ cfg_file2 = ""
+ if isfile(cfg_file2):
+ cfgdict2 = config_dict(cfg_file2)
+ _CFG.update(cfgdict2)
+ _CFG.update(cfgdict1)
+
+try:
+ readconfig(_CFG)
+except Exception:
+ print ("No configfile read, reason unknown")
+
+
+class Vec2D(tuple):
+ """A 2 dimensional vector class, used as a helper class
+ for implementing turtle graphics.
+ May be useful for turtle graphics programs also.
+ Derived from tuple, so a vector is a tuple!
+
+ Provides (for a, b vectors, k number):
+ a+b vector addition
+ a-b vector subtraction
+ a*b inner product
+ k*a and a*k multiplication with scalar
+ |a| absolute value of a
+ a.rotate(angle) rotation
+ """
+ def __new__(cls, x, y):
+ return tuple.__new__(cls, (x, y))
+ def __add__(self, other):
+ return Vec2D(self[0]+other[0], self[1]+other[1])
+ def __mul__(self, other):
+ if isinstance(other, Vec2D):
+ return self[0]*other[0]+self[1]*other[1]
+ return Vec2D(self[0]*other, self[1]*other)
+ def __rmul__(self, other):
+ if isinstance(other, int) or isinstance(other, float):
+ return Vec2D(self[0]*other, self[1]*other)
+ return NotImplemented
+ def __sub__(self, other):
+ return Vec2D(self[0]-other[0], self[1]-other[1])
+ def __neg__(self):
+ return Vec2D(-self[0], -self[1])
+ def __abs__(self):
+ return math.hypot(*self)
+ def rotate(self, angle):
+ """rotate self counterclockwise by angle
+ """
+ perp = Vec2D(-self[1], self[0])
+ angle = math.radians(angle)
+ c, s = math.cos(angle), math.sin(angle)
+ return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s)
+ def __getnewargs__(self):
+ return (self[0], self[1])
+ def __repr__(self):
+ return "(%.2f,%.2f)" % self
+
+
+##############################################################################
+### From here up to line : Tkinter - Interface for turtle.py ###
+### May be replaced by an interface to some different graphics toolkit ###
+##############################################################################
+
+## helper functions for Scrolled Canvas, to forward Canvas-methods
+## to ScrolledCanvas class
+
+def __methodDict(cls, _dict):
+ """helper function for Scrolled Canvas"""
+ baseList = list(cls.__bases__)
+ baseList.reverse()
+ for _super in baseList:
+ __methodDict(_super, _dict)
+ for key, value in cls.__dict__.items():
+ if type(value) == types.FunctionType:
+ _dict[key] = value
+
+def __methods(cls):
+ """helper function for Scrolled Canvas"""
+ _dict = {}
+ __methodDict(cls, _dict)
+ return _dict.keys()
+
+__stringBody = (
+ 'def %(method)s(self, *args, **kw): return ' +
+ 'self.%(attribute)s.%(method)s(*args, **kw)')
+
+def __forwardmethods(fromClass, toClass, toPart, exclude = ()):
+ ### MANY CHANGES ###
+ _dict_1 = {}
+ __methodDict(toClass, _dict_1)
+ _dict = {}
+ mfc = __methods(fromClass)
+ for ex in _dict_1.keys():
+ if ex[:1] == '_' or ex[-1:] == '_' or ex in exclude or ex in mfc:
+ pass
+ else:
+ _dict[ex] = _dict_1[ex]
+
+ for method, func in _dict.items():
+ d = {'method': method, 'func': func}
+ if isinstance(toPart, str):
+ execString = \
+ __stringBody % {'method' : method, 'attribute' : toPart}
+ exec(execString, d)
+ setattr(fromClass, method, d[method]) ### NEWU!
+
+
+class ScrolledCanvas(TK.Frame):
+ """Modeled after the scrolled canvas class from Grayons's Tkinter book.
+
+ Used as the default canvas, which pops up automatically when
+ using turtle graphics functions or the Turtle class.
+ """
+ def __init__(self, master, width=500, height=350,
+ canvwidth=600, canvheight=500):
+ TK.Frame.__init__(self, master, width=width, height=height)
+ self._rootwindow = self.winfo_toplevel()
+ self.width, self.height = width, height
+ self.canvwidth, self.canvheight = canvwidth, canvheight
+ self.bg = "white"
+ self._canvas = TK.Canvas(master, width=width, height=height,
+ bg=self.bg, relief=TK.SUNKEN, borderwidth=2)
+ self.hscroll = TK.Scrollbar(master, command=self._canvas.xview,
+ orient=TK.HORIZONTAL)
+ self.vscroll = TK.Scrollbar(master, command=self._canvas.yview)
+ self._canvas.configure(xscrollcommand=self.hscroll.set,
+ yscrollcommand=self.vscroll.set)
+ self.rowconfigure(0, weight=1, minsize=0)
+ self.columnconfigure(0, weight=1, minsize=0)
+ self._canvas.grid(padx=1, in_ = self, pady=1, row=0,
+ column=0, rowspan=1, columnspan=1, sticky='news')
+ self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
+ column=1, rowspan=1, columnspan=1, sticky='news')
+ self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
+ column=0, rowspan=1, columnspan=1, sticky='news')
+ self.reset()
+ self._rootwindow.bind('', self.onResize)
+
+ def reset(self, canvwidth=None, canvheight=None, bg = None):
+ """Adjust canvas and scrollbars according to given canvas size."""
+ if canvwidth:
+ self.canvwidth = canvwidth
+ if canvheight:
+ self.canvheight = canvheight
+ if bg:
+ self.bg = bg
+ self._canvas.config(bg=bg,
+ scrollregion=(-self.canvwidth//2, -self.canvheight//2,
+ self.canvwidth//2, self.canvheight//2))
+ self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) /
+ self.canvwidth)
+ self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) /
+ self.canvheight)
+ self.adjustScrolls()
+
+
+ def adjustScrolls(self):
+ """ Adjust scrollbars according to window- and canvas-size.
+ """
+ cwidth = self._canvas.winfo_width()
+ cheight = self._canvas.winfo_height()
+ self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth)
+ self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight)
+ if cwidth < self.canvwidth or cheight < self.canvheight:
+ self.hscroll.grid(padx=1, in_ = self, pady=1, row=1,
+ column=0, rowspan=1, columnspan=1, sticky='news')
+ self.vscroll.grid(padx=1, in_ = self, pady=1, row=0,
+ column=1, rowspan=1, columnspan=1, sticky='news')
+ else:
+ self.hscroll.grid_forget()
+ self.vscroll.grid_forget()
+
+ def onResize(self, event):
+ """self-explanatory"""
+ self.adjustScrolls()
+
+ def bbox(self, *args):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ return self._canvas.bbox(*args)
+
+ def cget(self, *args, **kwargs):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ return self._canvas.cget(*args, **kwargs)
+
+ def config(self, *args, **kwargs):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ self._canvas.config(*args, **kwargs)
+
+ def bind(self, *args, **kwargs):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ self._canvas.bind(*args, **kwargs)
+
+ def unbind(self, *args, **kwargs):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ self._canvas.unbind(*args, **kwargs)
+
+ def focus_force(self):
+ """ 'forward' method, which canvas itself has inherited...
+ """
+ self._canvas.focus_force()
+
+__forwardmethods(ScrolledCanvas, TK.Canvas, '_canvas')
+
+
+class _Root(TK.Tk):
+ """Root class for Screen based on Tkinter."""
+ def __init__(self):
+ TK.Tk.__init__(self)
+
+ def setupcanvas(self, width, height, cwidth, cheight):
+ self._canvas = ScrolledCanvas(self, width, height, cwidth, cheight)
+ self._canvas.pack(expand=1, fill="both")
+
+ def _getcanvas(self):
+ return self._canvas
+
+ def set_geometry(self, width, height, startx, starty):
+ self.geometry("%dx%d%+d%+d"%(width, height, startx, starty))
+
+ def ondestroy(self, destroy):
+ self.wm_protocol("WM_DELETE_WINDOW", destroy)
+
+ def win_width(self):
+ return self.winfo_screenwidth()
+
+ def win_height(self):
+ return self.winfo_screenheight()
+
+Canvas = TK.Canvas
+
+
+class TurtleScreenBase(object):
+ """Provide the basic graphics functionality.
+ Interface between Tkinter and turtle.py.
+
+ To port turtle.py to some different graphics toolkit
+ a corresponding TurtleScreenBase class has to be implemented.
+ """
+
+ def _blankimage(self):
+ """return a blank image object
+ """
+ img = TK.PhotoImage(width=1, height=1, master=self.cv)
+ img.blank()
+ return img
+
+ def _image(self, filename):
+ """return an image object containing the
+ imagedata from a gif-file named filename.
+ """
+ return TK.PhotoImage(file=filename, master=self.cv)
+
+ def __init__(self, cv):
+ self.cv = cv
+ if isinstance(cv, ScrolledCanvas):
+ w = self.cv.canvwidth
+ h = self.cv.canvheight
+ else: # expected: ordinary TK.Canvas
+ w = int(self.cv.cget("width"))
+ h = int(self.cv.cget("height"))
+ self.cv.config(scrollregion = (-w//2, -h//2, w//2, h//2 ))
+ self.canvwidth = w
+ self.canvheight = h
+ self.xscale = self.yscale = 1.0
+
+ def _createpoly(self):
+ """Create an invisible polygon item on canvas self.cv)
+ """
+ return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="")
+
+ def _drawpoly(self, polyitem, coordlist, fill=None,
+ outline=None, width=None, top=False):
+ """Configure polygonitem polyitem according to provided
+ arguments:
+ coordlist is sequence of coordinates
+ fill is filling color
+ outline is outline color
+ top is a boolean value, which specifies if polyitem
+ will be put on top of the canvas' displaylist so it
+ will not be covered by other items.
+ """
+ cl = []
+ for x, y in coordlist:
+ cl.append(x * self.xscale)
+ cl.append(-y * self.yscale)
+ self.cv.coords(polyitem, *cl)
+ if fill is not None:
+ self.cv.itemconfigure(polyitem, fill=fill)
+ if outline is not None:
+ self.cv.itemconfigure(polyitem, outline=outline)
+ if width is not None:
+ self.cv.itemconfigure(polyitem, width=width)
+ if top:
+ self.cv.tag_raise(polyitem)
+
+ def _createline(self):
+ """Create an invisible line item on canvas self.cv)
+ """
+ return self.cv.create_line(0, 0, 0, 0, fill="", width=2,
+ capstyle = TK.ROUND)
+
+ def _drawline(self, lineitem, coordlist=None,
+ fill=None, width=None, top=False):
+ """Configure lineitem according to provided arguments:
+ coordlist is sequence of coordinates
+ fill is drawing color
+ width is width of drawn line.
+ top is a boolean value, which specifies if polyitem
+ will be put on top of the canvas' displaylist so it
+ will not be covered by other items.
+ """
+ if coordlist is not None:
+ cl = []
+ for x, y in coordlist:
+ cl.append(x * self.xscale)
+ cl.append(-y * self.yscale)
+ self.cv.coords(lineitem, *cl)
+ if fill is not None:
+ self.cv.itemconfigure(lineitem, fill=fill)
+ if width is not None:
+ self.cv.itemconfigure(lineitem, width=width)
+ if top:
+ self.cv.tag_raise(lineitem)
+
+ def _delete(self, item):
+ """Delete graphics item from canvas.
+ If item is"all" delete all graphics items.
+ """
+ self.cv.delete(item)
+
+ def _update(self):
+ """Redraw graphics items on canvas
+ """
+ self.cv.update()
+
+ def _delay(self, delay):
+ """Delay subsequent canvas actions for delay ms."""
+ self.cv.after(delay)
+
+ def _iscolorstring(self, color):
+ """Check if the string color is a legal Tkinter color string.
+ """
+ try:
+ rgb = self.cv.winfo_rgb(color)
+ ok = True
+ except TK.TclError:
+ ok = False
+ return ok
+
+ def _bgcolor(self, color=None):
+ """Set canvas' backgroundcolor if color is not None,
+ else return backgroundcolor."""
+ if color is not None:
+ self.cv.config(bg = color)
+ self._update()
+ else:
+ return self.cv.cget("bg")
+
+ def _write(self, pos, txt, align, font, pencolor):
+ """Write txt at pos in canvas with specified font
+ and color.
+ Return text item and x-coord of right bottom corner
+ of text's bounding box."""
+ x, y = pos
+ x = x * self.xscale
+ y = y * self.yscale
+ anchor = {"left":"sw", "center":"s", "right":"se" }
+ item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align],
+ fill = pencolor, font = font)
+ x0, y0, x1, y1 = self.cv.bbox(item)
+ return item, x1-1
+
+## def _dot(self, pos, size, color):
+## """may be implemented for some other graphics toolkit"""
+
+ def _onclick(self, item, fun, num=1, add=None):
+ """Bind fun to mouse-click event on turtle.
+ fun must be a function with two arguments, the coordinates
+ of the clicked point on the canvas.
+ num, the number of the mouse-button defaults to 1
+ """
+ if fun is None:
+ self.cv.tag_unbind(item, "" % num)
+ else:
+ def eventfun(event):
+ x, y = (self.cv.canvasx(event.x)/self.xscale,
+ -self.cv.canvasy(event.y)/self.yscale)
+ fun(x, y)
+ self.cv.tag_bind(item, "" % num, eventfun, add)
+
+ def _onrelease(self, item, fun, num=1, add=None):
+ """Bind fun to mouse-button-release event on turtle.
+ fun must be a function with two arguments, the coordinates
+ of the point on the canvas where mouse button is released.
+ num, the number of the mouse-button defaults to 1
+
+ If a turtle is clicked, first _onclick-event will be performed,
+ then _onscreensclick-event.
+ """
+ if fun is None:
+ self.cv.tag_unbind(item, "