diff --git a/.gitattributes b/.gitattributes index 30ec0e597b7b0e6ab959acc8936e6bc6553ce9a8..b47c6ee015f24bf484a6be27d8acc7601fe81ff2 100644 --- a/.gitattributes +++ b/.gitattributes @@ -418,3 +418,7 @@ llava/lib/libform.a filter=lfs diff=lfs merge=lfs -text llava/lib/liblsan.so filter=lfs diff=lfs merge=lfs -text llava/lib/libgcc_s.so.1 filter=lfs diff=lfs merge=lfs -text llava/lib/libtinfow.so filter=lfs diff=lfs merge=lfs -text +llava/lib/libz.so.1 filter=lfs diff=lfs merge=lfs -text +llava/lib/libatomic.so.1.2.0 filter=lfs diff=lfs merge=lfs -text +llava/lib/libncurses.a filter=lfs diff=lfs merge=lfs -text +llava/lib/libtinfow.so.6.4 filter=lfs diff=lfs merge=lfs -text diff --git a/llava/lib/libatomic.so.1.2.0 b/llava/lib/libatomic.so.1.2.0 new file mode 100644 index 0000000000000000000000000000000000000000..0ac3eb4db4afc304563eb9424ebd8f5d8e2a38e0 --- /dev/null +++ b/llava/lib/libatomic.so.1.2.0 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2f1a92c18f01c13c9a89908fb86a7309ae5b89a882db9914114957bc4b6fed92 +size 143648 diff --git a/llava/lib/libncurses.a b/llava/lib/libncurses.a new file mode 100644 index 0000000000000000000000000000000000000000..4c536d2474d7625be31799c8d17f2fbfe1609436 --- /dev/null +++ b/llava/lib/libncurses.a @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:873bc902fcba042e3e980451cae21218095aa52298dcce1a9ac7e415beecb1f0 +size 544910 diff --git a/llava/lib/libtinfow.so.6.4 b/llava/lib/libtinfow.so.6.4 new file mode 100644 index 0000000000000000000000000000000000000000..a645f87a3ef9feaf2f53a1911825fe742fb52521 --- /dev/null +++ b/llava/lib/libtinfow.so.6.4 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5679c9d7cc0ec2d6b08c6058198667efe71f657e89dcc0bd7adcf5d6cbc80c5 +size 287080 diff --git a/llava/lib/libz.so.1 b/llava/lib/libz.so.1 new file mode 100644 index 0000000000000000000000000000000000000000..64cd6a309bad00dc38040a577191f6681e18bf89 --- /dev/null +++ b/llava/lib/libz.so.1 @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0b0e682a9dc7fd4895a6783288f851b793dc89633f28714027974fa4d66f3914 +size 124744 diff --git a/llava/lib/python3.10/_compat_pickle.py b/llava/lib/python3.10/_compat_pickle.py new file mode 100644 index 0000000000000000000000000000000000000000..f68496ae639f5f880ae5f4ca0a220a29d2e354be --- /dev/null +++ b/llava/lib/python3.10/_compat_pickle.py @@ -0,0 +1,251 @@ +# This module is used to map the old Python 2 names to the new names used in +# Python 3 for the pickle module. This needed to make pickle streams +# generated with Python 2 loadable by Python 3. + +# This is a copy of lib2to3.fixes.fix_imports.MAPPING. We cannot import +# lib2to3 and use the mapping defined there, because lib2to3 uses pickle. +# Thus, this could cause the module to be imported recursively. +IMPORT_MAPPING = { + '__builtin__' : 'builtins', + 'copy_reg': 'copyreg', + 'Queue': 'queue', + 'SocketServer': 'socketserver', + 'ConfigParser': 'configparser', + 'repr': 'reprlib', + 'tkFileDialog': 'tkinter.filedialog', + 'tkSimpleDialog': 'tkinter.simpledialog', + 'tkColorChooser': 'tkinter.colorchooser', + 'tkCommonDialog': 'tkinter.commondialog', + 'Dialog': 'tkinter.dialog', + 'Tkdnd': 'tkinter.dnd', + 'tkFont': 'tkinter.font', + 'tkMessageBox': 'tkinter.messagebox', + 'ScrolledText': 'tkinter.scrolledtext', + 'Tkconstants': 'tkinter.constants', + 'Tix': 'tkinter.tix', + 'ttk': 'tkinter.ttk', + 'Tkinter': 'tkinter', + 'markupbase': '_markupbase', + '_winreg': 'winreg', + 'thread': '_thread', + 'dummy_thread': '_dummy_thread', + 'dbhash': 'dbm.bsd', + 'dumbdbm': 'dbm.dumb', + 'dbm': 'dbm.ndbm', + 'gdbm': 'dbm.gnu', + 'xmlrpclib': 'xmlrpc.client', + 'SimpleXMLRPCServer': 'xmlrpc.server', + 'httplib': 'http.client', + 'htmlentitydefs' : 'html.entities', + 'HTMLParser' : 'html.parser', + 'Cookie': 'http.cookies', + 'cookielib': 'http.cookiejar', + 'BaseHTTPServer': 'http.server', + 'test.test_support': 'test.support', + 'commands': 'subprocess', + 'urlparse' : 'urllib.parse', + 'robotparser' : 'urllib.robotparser', + 'urllib2': 'urllib.request', + 'anydbm': 'dbm', + '_abcoll' : 'collections.abc', +} + + +# This contains rename rules that are easy to handle. We ignore the more +# complex stuff (e.g. mapping the names in the urllib and types modules). +# These rules should be run before import names are fixed. +NAME_MAPPING = { + ('__builtin__', 'xrange'): ('builtins', 'range'), + ('__builtin__', 'reduce'): ('functools', 'reduce'), + ('__builtin__', 'intern'): ('sys', 'intern'), + ('__builtin__', 'unichr'): ('builtins', 'chr'), + ('__builtin__', 'unicode'): ('builtins', 'str'), + ('__builtin__', 'long'): ('builtins', 'int'), + ('itertools', 'izip'): ('builtins', 'zip'), + ('itertools', 'imap'): ('builtins', 'map'), + ('itertools', 'ifilter'): ('builtins', 'filter'), + ('itertools', 'ifilterfalse'): ('itertools', 'filterfalse'), + ('itertools', 'izip_longest'): ('itertools', 'zip_longest'), + ('UserDict', 'IterableUserDict'): ('collections', 'UserDict'), + ('UserList', 'UserList'): ('collections', 'UserList'), + ('UserString', 'UserString'): ('collections', 'UserString'), + ('whichdb', 'whichdb'): ('dbm', 'whichdb'), + ('_socket', 'fromfd'): ('socket', 'fromfd'), + ('_multiprocessing', 'Connection'): ('multiprocessing.connection', 'Connection'), + ('multiprocessing.process', 'Process'): ('multiprocessing.context', 'Process'), + ('multiprocessing.forking', 'Popen'): ('multiprocessing.popen_fork', 'Popen'), + ('urllib', 'ContentTooShortError'): ('urllib.error', 'ContentTooShortError'), + ('urllib', 'getproxies'): ('urllib.request', 'getproxies'), + ('urllib', 'pathname2url'): ('urllib.request', 'pathname2url'), + ('urllib', 'quote_plus'): ('urllib.parse', 'quote_plus'), + ('urllib', 'quote'): ('urllib.parse', 'quote'), + ('urllib', 'unquote_plus'): ('urllib.parse', 'unquote_plus'), + ('urllib', 'unquote'): ('urllib.parse', 'unquote'), + ('urllib', 'url2pathname'): ('urllib.request', 'url2pathname'), + ('urllib', 'urlcleanup'): ('urllib.request', 'urlcleanup'), + ('urllib', 'urlencode'): ('urllib.parse', 'urlencode'), + ('urllib', 'urlopen'): ('urllib.request', 'urlopen'), + ('urllib', 'urlretrieve'): ('urllib.request', 'urlretrieve'), + ('urllib2', 'HTTPError'): ('urllib.error', 'HTTPError'), + ('urllib2', 'URLError'): ('urllib.error', 'URLError'), +} + +PYTHON2_EXCEPTIONS = ( + "ArithmeticError", + "AssertionError", + "AttributeError", + "BaseException", + "BufferError", + "BytesWarning", + "DeprecationWarning", + "EOFError", + "EnvironmentError", + "Exception", + "FloatingPointError", + "FutureWarning", + "GeneratorExit", + "IOError", + "ImportError", + "ImportWarning", + "IndentationError", + "IndexError", + "KeyError", + "KeyboardInterrupt", + "LookupError", + "MemoryError", + "NameError", + "NotImplementedError", + "OSError", + "OverflowError", + "PendingDeprecationWarning", + "ReferenceError", + "RuntimeError", + "RuntimeWarning", + # StandardError is gone in Python 3, so we map it to Exception + "StopIteration", + "SyntaxError", + "SyntaxWarning", + "SystemError", + "SystemExit", + "TabError", + "TypeError", + "UnboundLocalError", + "UnicodeDecodeError", + "UnicodeEncodeError", + "UnicodeError", + "UnicodeTranslateError", + "UnicodeWarning", + "UserWarning", + "ValueError", + "Warning", + "ZeroDivisionError", +) + +try: + WindowsError +except NameError: + pass +else: + PYTHON2_EXCEPTIONS += ("WindowsError",) + +for excname in PYTHON2_EXCEPTIONS: + NAME_MAPPING[("exceptions", excname)] = ("builtins", excname) + +MULTIPROCESSING_EXCEPTIONS = ( + 'AuthenticationError', + 'BufferTooShort', + 'ProcessError', + 'TimeoutError', +) + +for excname in MULTIPROCESSING_EXCEPTIONS: + NAME_MAPPING[("multiprocessing", excname)] = ("multiprocessing.context", excname) + +# Same, but for 3.x to 2.x +REVERSE_IMPORT_MAPPING = dict((v, k) for (k, v) in IMPORT_MAPPING.items()) +assert len(REVERSE_IMPORT_MAPPING) == len(IMPORT_MAPPING) +REVERSE_NAME_MAPPING = dict((v, k) for (k, v) in NAME_MAPPING.items()) +assert len(REVERSE_NAME_MAPPING) == len(NAME_MAPPING) + +# Non-mutual mappings. + +IMPORT_MAPPING.update({ + 'cPickle': 'pickle', + '_elementtree': 'xml.etree.ElementTree', + 'FileDialog': 'tkinter.filedialog', + 'SimpleDialog': 'tkinter.simpledialog', + 'DocXMLRPCServer': 'xmlrpc.server', + 'SimpleHTTPServer': 'http.server', + 'CGIHTTPServer': 'http.server', + # For compatibility with broken pickles saved in old Python 3 versions + 'UserDict': 'collections', + 'UserList': 'collections', + 'UserString': 'collections', + 'whichdb': 'dbm', + 'StringIO': 'io', + 'cStringIO': 'io', +}) + +REVERSE_IMPORT_MAPPING.update({ + '_bz2': 'bz2', + '_dbm': 'dbm', + '_functools': 'functools', + '_gdbm': 'gdbm', + '_pickle': 'pickle', +}) + +NAME_MAPPING.update({ + ('__builtin__', 'basestring'): ('builtins', 'str'), + ('exceptions', 'StandardError'): ('builtins', 'Exception'), + ('UserDict', 'UserDict'): ('collections', 'UserDict'), + ('socket', '_socketobject'): ('socket', 'SocketType'), +}) + +REVERSE_NAME_MAPPING.update({ + ('_functools', 'reduce'): ('__builtin__', 'reduce'), + ('tkinter.filedialog', 'FileDialog'): ('FileDialog', 'FileDialog'), + ('tkinter.filedialog', 'LoadFileDialog'): ('FileDialog', 'LoadFileDialog'), + ('tkinter.filedialog', 'SaveFileDialog'): ('FileDialog', 'SaveFileDialog'), + ('tkinter.simpledialog', 'SimpleDialog'): ('SimpleDialog', 'SimpleDialog'), + ('xmlrpc.server', 'ServerHTMLDoc'): ('DocXMLRPCServer', 'ServerHTMLDoc'), + ('xmlrpc.server', 'XMLRPCDocGenerator'): + ('DocXMLRPCServer', 'XMLRPCDocGenerator'), + ('xmlrpc.server', 'DocXMLRPCRequestHandler'): + ('DocXMLRPCServer', 'DocXMLRPCRequestHandler'), + ('xmlrpc.server', 'DocXMLRPCServer'): + ('DocXMLRPCServer', 'DocXMLRPCServer'), + ('xmlrpc.server', 'DocCGIXMLRPCRequestHandler'): + ('DocXMLRPCServer', 'DocCGIXMLRPCRequestHandler'), + ('http.server', 'SimpleHTTPRequestHandler'): + ('SimpleHTTPServer', 'SimpleHTTPRequestHandler'), + ('http.server', 'CGIHTTPRequestHandler'): + ('CGIHTTPServer', 'CGIHTTPRequestHandler'), + ('_socket', 'socket'): ('socket', '_socketobject'), +}) + +PYTHON3_OSERROR_EXCEPTIONS = ( + 'BrokenPipeError', + 'ChildProcessError', + 'ConnectionAbortedError', + 'ConnectionError', + 'ConnectionRefusedError', + 'ConnectionResetError', + 'FileExistsError', + 'FileNotFoundError', + 'InterruptedError', + 'IsADirectoryError', + 'NotADirectoryError', + 'PermissionError', + 'ProcessLookupError', + 'TimeoutError', +) + +for excname in PYTHON3_OSERROR_EXCEPTIONS: + REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'OSError') + +PYTHON3_IMPORTERROR_EXCEPTIONS = ( + 'ModuleNotFoundError', +) + +for excname in PYTHON3_IMPORTERROR_EXCEPTIONS: + REVERSE_NAME_MAPPING[('builtins', excname)] = ('exceptions', 'ImportError') diff --git a/llava/lib/python3.10/_compression.py b/llava/lib/python3.10/_compression.py new file mode 100644 index 0000000000000000000000000000000000000000..e8b70aa0a3e6806c0f2b60ffaf9944291abcf4c4 --- /dev/null +++ b/llava/lib/python3.10/_compression.py @@ -0,0 +1,162 @@ +"""Internal classes used by the gzip, lzma and bz2 modules""" + +import io +import sys + +BUFFER_SIZE = io.DEFAULT_BUFFER_SIZE # Compressed data read chunk size + + +class BaseStream(io.BufferedIOBase): + """Mode-checking helper functions.""" + + def _check_not_closed(self): + if self.closed: + raise ValueError("I/O operation on closed file") + + def _check_can_read(self): + if not self.readable(): + raise io.UnsupportedOperation("File not open for reading") + + def _check_can_write(self): + if not self.writable(): + raise io.UnsupportedOperation("File not open for writing") + + def _check_can_seek(self): + if not self.readable(): + raise io.UnsupportedOperation("Seeking is only supported " + "on files open for reading") + if not self.seekable(): + raise io.UnsupportedOperation("The underlying file object " + "does not support seeking") + + +class DecompressReader(io.RawIOBase): + """Adapts the decompressor API to a RawIOBase reader API""" + + def readable(self): + return True + + def __init__(self, fp, decomp_factory, trailing_error=(), **decomp_args): + self._fp = fp + self._eof = False + self._pos = 0 # Current offset in decompressed stream + + # Set to size of decompressed stream once it is known, for SEEK_END + self._size = -1 + + # Save the decompressor factory and arguments. + # If the file contains multiple compressed streams, each + # stream will need a separate decompressor object. A new decompressor + # object is also needed when implementing a backwards seek(). + self._decomp_factory = decomp_factory + self._decomp_args = decomp_args + self._decompressor = self._decomp_factory(**self._decomp_args) + + # Exception class to catch from decompressor signifying invalid + # trailing data to ignore + self._trailing_error = trailing_error + + def close(self): + self._decompressor = None + return super().close() + + def seekable(self): + return self._fp.seekable() + + def readinto(self, b): + with memoryview(b) as view, view.cast("B") as byte_view: + data = self.read(len(byte_view)) + byte_view[:len(data)] = data + return len(data) + + def read(self, size=-1): + if size < 0: + return self.readall() + + if not size or self._eof: + return b"" + data = None # Default if EOF is encountered + # Depending on the input data, our call to the decompressor may not + # return any data. In this case, try again after reading another block. + while True: + if self._decompressor.eof: + rawblock = (self._decompressor.unused_data or + self._fp.read(BUFFER_SIZE)) + if not rawblock: + break + # Continue to next stream. + self._decompressor = self._decomp_factory( + **self._decomp_args) + try: + data = self._decompressor.decompress(rawblock, size) + except self._trailing_error: + # Trailing data isn't a valid compressed stream; ignore it. + break + else: + if self._decompressor.needs_input: + rawblock = self._fp.read(BUFFER_SIZE) + if not rawblock: + raise EOFError("Compressed file ended before the " + "end-of-stream marker was reached") + else: + rawblock = b"" + data = self._decompressor.decompress(rawblock, size) + if data: + break + if not data: + self._eof = True + self._size = self._pos + return b"" + self._pos += len(data) + return data + + def readall(self): + chunks = [] + # sys.maxsize means the max length of output buffer is unlimited, + # so that the whole input buffer can be decompressed within one + # .decompress() call. + while data := self.read(sys.maxsize): + chunks.append(data) + + return b"".join(chunks) + + # Rewind the file to the beginning of the data stream. + def _rewind(self): + self._fp.seek(0) + self._eof = False + self._pos = 0 + self._decompressor = self._decomp_factory(**self._decomp_args) + + def seek(self, offset, whence=io.SEEK_SET): + # Recalculate offset as an absolute file position. + if whence == io.SEEK_SET: + pass + elif whence == io.SEEK_CUR: + offset = self._pos + offset + elif whence == io.SEEK_END: + # Seeking relative to EOF - we need to know the file's size. + if self._size < 0: + while self.read(io.DEFAULT_BUFFER_SIZE): + pass + offset = self._size + offset + else: + raise ValueError("Invalid value for whence: {}".format(whence)) + + # Make it so that offset is the number of bytes to skip forward. + if offset < self._pos: + self._rewind() + else: + offset -= self._pos + + # Read and discard data until we reach the desired position. + while offset > 0: + data = self.read(min(io.DEFAULT_BUFFER_SIZE, offset)) + if not data: + break + offset -= len(data) + + return self._pos + + def tell(self): + """Return the current file position.""" + return self._pos diff --git a/llava/lib/python3.10/_py_abc.py b/llava/lib/python3.10/_py_abc.py new file mode 100644 index 0000000000000000000000000000000000000000..c870ae9048b4f131a71beb1b5827ba13f0eda2f3 --- /dev/null +++ b/llava/lib/python3.10/_py_abc.py @@ -0,0 +1,147 @@ +from _weakrefset import WeakSet + + +def get_cache_token(): + """Returns the current ABC cache token. + + The token is an opaque object (supporting equality testing) identifying the + current version of the ABC cache for virtual subclasses. The token changes + with every call to ``register()`` on any ABC. + """ + return ABCMeta._abc_invalidation_counter + + +class ABCMeta(type): + """Metaclass for defining Abstract Base Classes (ABCs). + + Use this metaclass to create an ABC. An ABC can be subclassed + directly, and then acts as a mix-in class. You can also register + unrelated concrete classes (even built-in classes) and unrelated + ABCs as 'virtual subclasses' -- these and their descendants will + be considered subclasses of the registering ABC by the built-in + issubclass() function, but the registering ABC won't show up in + their MRO (Method Resolution Order) nor will method + implementations defined by the registering ABC be callable (not + even via super()). + """ + + # A global counter that is incremented each time a class is + # registered as a virtual subclass of anything. It forces the + # negative cache to be cleared before its next use. + # Note: this counter is private. Use `abc.get_cache_token()` for + # external code. + _abc_invalidation_counter = 0 + + def __new__(mcls, name, bases, namespace, /, **kwargs): + cls = super().__new__(mcls, name, bases, namespace, **kwargs) + # Compute set of abstract method names + abstracts = {name + for name, value in namespace.items() + if getattr(value, "__isabstractmethod__", False)} + for base in bases: + for name in getattr(base, "__abstractmethods__", set()): + value = getattr(cls, name, None) + if getattr(value, "__isabstractmethod__", False): + abstracts.add(name) + cls.__abstractmethods__ = frozenset(abstracts) + # Set up inheritance registry + cls._abc_registry = WeakSet() + cls._abc_cache = WeakSet() + cls._abc_negative_cache = WeakSet() + cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter + return cls + + def register(cls, subclass): + """Register a virtual subclass of an ABC. + + Returns the subclass, to allow usage as a class decorator. + """ + if not isinstance(subclass, type): + raise TypeError("Can only register classes") + if issubclass(subclass, cls): + return subclass # Already a subclass + # Subtle: test for cycles *after* testing for "already a subclass"; + # this means we allow X.register(X) and interpret it as a no-op. + if issubclass(cls, subclass): + # This would create a cycle, which is bad for the algorithm below + raise RuntimeError("Refusing to create an inheritance cycle") + cls._abc_registry.add(subclass) + ABCMeta._abc_invalidation_counter += 1 # Invalidate negative cache + return subclass + + def _dump_registry(cls, file=None): + """Debug helper to print the ABC registry.""" + print(f"Class: {cls.__module__}.{cls.__qualname__}", file=file) + print(f"Inv. counter: {get_cache_token()}", file=file) + for name in cls.__dict__: + if name.startswith("_abc_"): + value = getattr(cls, name) + if isinstance(value, WeakSet): + value = set(value) + print(f"{name}: {value!r}", file=file) + + def _abc_registry_clear(cls): + """Clear the registry (for debugging or testing).""" + cls._abc_registry.clear() + + def _abc_caches_clear(cls): + """Clear the caches (for debugging or testing).""" + cls._abc_cache.clear() + cls._abc_negative_cache.clear() + + def __instancecheck__(cls, instance): + """Override for isinstance(instance, cls).""" + # Inline the cache checking + subclass = instance.__class__ + if subclass in cls._abc_cache: + return True + subtype = type(instance) + if subtype is subclass: + if (cls._abc_negative_cache_version == + ABCMeta._abc_invalidation_counter and + subclass in cls._abc_negative_cache): + return False + # Fall back to the subclass check. + return cls.__subclasscheck__(subclass) + return any(cls.__subclasscheck__(c) for c in (subclass, subtype)) + + def __subclasscheck__(cls, subclass): + """Override for issubclass(subclass, cls).""" + if not isinstance(subclass, type): + raise TypeError('issubclass() arg 1 must be a class') + # Check cache + if subclass in cls._abc_cache: + return True + # Check negative cache; may have to invalidate + if cls._abc_negative_cache_version < ABCMeta._abc_invalidation_counter: + # Invalidate the negative cache + cls._abc_negative_cache = WeakSet() + cls._abc_negative_cache_version = ABCMeta._abc_invalidation_counter + elif subclass in cls._abc_negative_cache: + return False + # Check the subclass hook + ok = cls.__subclasshook__(subclass) + if ok is not NotImplemented: + assert isinstance(ok, bool) + if ok: + cls._abc_cache.add(subclass) + else: + cls._abc_negative_cache.add(subclass) + return ok + # Check if it's a direct subclass + if cls in getattr(subclass, '__mro__', ()): + cls._abc_cache.add(subclass) + return True + # Check if it's a subclass of a registered class (recursive) + for rcls in cls._abc_registry: + if issubclass(subclass, rcls): + cls._abc_cache.add(subclass) + return True + # Check if it's a subclass of a subclass (recursive) + for scls in cls.__subclasses__(): + if issubclass(subclass, scls): + cls._abc_cache.add(subclass) + return True + # No dice; update negative cache + cls._abc_negative_cache.add(subclass) + return False diff --git a/llava/lib/python3.10/_sitebuiltins.py b/llava/lib/python3.10/_sitebuiltins.py new file mode 100644 index 0000000000000000000000000000000000000000..c66269a571967f89aa678e0becb30baf8b1c69b7 --- /dev/null +++ b/llava/lib/python3.10/_sitebuiltins.py @@ -0,0 +1,103 @@ +""" +The objects used by the site module to add custom builtins. +""" + +# Those objects are almost immortal and they keep a reference to their module +# globals. Defining them in the site module would keep too many references +# alive. +# Note this means this module should also avoid keep things alive in its +# globals. + +import sys + +class Quitter(object): + def __init__(self, name, eof): + self.name = name + self.eof = eof + def __repr__(self): + return 'Use %s() or %s to exit' % (self.name, self.eof) + def __call__(self, code=None): + # Shells like IDLE catch the SystemExit, but listen when their + # stdin wrapper is closed. + try: + sys.stdin.close() + except: + pass + raise SystemExit(code) + + +class _Printer(object): + """interactive prompt objects for printing the license text, a list of + contributors and the copyright notice.""" + + MAXLINES = 23 + + def __init__(self, name, data, files=(), dirs=()): + import os + self.__name = name + self.__data = data + self.__lines = None + self.__filenames = [os.path.join(dir, filename) + for dir in dirs + for filename in files] + + def __setup(self): + if self.__lines: + return + data = None + for filename in self.__filenames: + try: + with open(filename, encoding='utf-8') as fp: + data = fp.read() + break + except OSError: + pass + if not data: + data = self.__data + self.__lines = data.split('\n') + self.__linecnt = len(self.__lines) + + def __repr__(self): + self.__setup() + if len(self.__lines) <= self.MAXLINES: + return "\n".join(self.__lines) + else: + return "Type %s() to see the full %s text" % ((self.__name,)*2) + + def __call__(self): + self.__setup() + prompt = 'Hit Return for more, or q (and Return) to quit: ' + lineno = 0 + while 1: + try: + for i in range(lineno, lineno + self.MAXLINES): + print(self.__lines[i]) + except IndexError: + break + else: + lineno += self.MAXLINES + key = None + while key is None: + key = input(prompt) + if key not in ('', 'q'): + key = None + if key == 'q': + break + + +class _Helper(object): + """Define the builtin 'help'. + + This is a wrapper around pydoc.help that provides a helpful message + when 'help' is typed at the Python interactive prompt. + + Calling help() at the Python prompt starts an interactive help session. + Calling help(thing) prints help for the python object 'thing'. + """ + + def __repr__(self): + return "Type help() for interactive help, " \ + "or help(object) for help about object." + def __call__(self, *args, **kwds): + import pydoc + return pydoc.help(*args, **kwds) diff --git a/llava/lib/python3.10/_sysconfigdata__linux_x86_64-linux-gnu.py.orig b/llava/lib/python3.10/_sysconfigdata__linux_x86_64-linux-gnu.py.orig new file mode 100644 index 0000000000000000000000000000000000000000..99b46cbcff6647b9af4528443a0eb7ede99ea9de --- /dev/null +++ b/llava/lib/python3.10/_sysconfigdata__linux_x86_64-linux-gnu.py.orig @@ -0,0 +1,986 @@ +# system configuration generated and used by the sysconfig module +build_time_vars = {'ABIFLAGS': '', + 'AC_APPLE_UNIVERSAL_BUILD': 0, + 'AIX_BUILDDATE': 0, + 'AIX_GENUINE_CPLUSPLUS': 0, + 'ALIGNOF_LONG': 8, + 'ALIGNOF_SIZE_T': 8, + 'ALT_SOABI': 0, + 'ANDROID_API_LEVEL': 0, + 'AR': 'x86_64-conda-linux-gnu-ar', + 'ARFLAGS': 'rcs', + 'BASECFLAGS': '-Wno-unused-result -Wsign-compare', + 'BASECPPFLAGS': '-IObjects -IInclude -IPython', + 'BASEMODLIBS': '', + 'BINDIR': '/root/envs/llava/bin', + 'BINLIBDEST': '/root/envs/llava/lib/python3.10', + 'BLDLIBRARY': 'libpython3.10.a', + 'BLDSHARED': 'x86_64-conda-linux-gnu-gcc -pthread -shared -Wl,-O2 ' + '-Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now ' + '-Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib ' + '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib', + 'BUILDEXE': '', + 'BUILDPYTHON': 'python', + 'BUILD_GNU_TYPE': 'x86_64-conda-linux-gnu', + 'BYTESTR_DEPS': '\\', + 'CC': 'x86_64-conda-linux-gnu-gcc -pthread', + 'CCSHARED': '-fPIC', + 'CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 -Wall ' + '-march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe ' + '-isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + '-march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe ' + '-isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ', + 'CFLAGSFORSHARED': '', + 'CFLAGS_ALIASING': '', + 'CONFIGFILES': 'configure configure.ac acconfig.h pyconfig.h.in ' + 'Makefile.pre.in', + 'CONFIGURE_CFLAGS': '-march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 ' + '-ffunction-sections -pipe -isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + ' ', + 'CONFIGURE_CFLAGS_NODIST': '-fno-semantic-interposition ' + ' ' + ' -g -std=c99 -Wextra ' + '-Wno-unused-result -Wno-unused-parameter ' + '-Wno-missing-field-initializers ' + '-Werror=implicit-function-declaration ' + '-fvisibility=hidden', + 'CONFIGURE_CPPFLAGS': '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include', + 'CONFIGURE_LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib', + 'CONFIGURE_LDFLAGS_NODIST': '-fno-semantic-interposition ' + ' ' + ' -g', + 'CONFIG_ARGS': "'--prefix=/root/envs/llava' " + "'--build=x86_64-conda-linux-gnu' " + "'--host=x86_64-conda-linux-gnu' '--enable-ipv6' " + "'--with-ensurepip=no' " + "'--with-tzpath=/root/envs/llava/share/zoneinfo' " + "'--with-computed-gotos' '--with-system-ffi' " + "'--enable-loadable-sqlite-extensions' " + "'--with-tcltk-includes=-I/root/envs/llava/include' " + "'--with-tcltk-libs=-L/root/envs/llava/lib " + "-ltcl8.6 -ltk8.6' '--with-platlibdir=lib' '--with-lto' " + "'--enable-optimizations' " + "'-oldincludedir=/croot/python-split_1733933809325/_build_env/x86_64-conda-linux-gnu/sysroot/usr/include' " + "'--disable-shared' 'PROFILE_TASK=-m test --pgo' " + "'build_alias=x86_64-conda-linux-gnu' " + "'host_alias=x86_64-conda-linux-gnu' 'MACHDEP=linux' " + "'CC=x86_64-conda-linux-gnu-gcc' 'CFLAGS=-march=nocona " + '-mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections ' + '-pipe -isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + "' 'LDFLAGS=-Wl,-O2 -Wl,--sort-common -Wl,--as-needed " + '-Wl,-z,relro -Wl,-z,now -Wl,--disable-new-dtags ' + '-Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + "-L/root/envs/llava/lib' " + "'CPPFLAGS=-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem " + '/root/envs/llava/include ' + "-I/root/envs/llava/include' " + "'CPP=/croot/python-split_1733933809325/_build_env/bin/x86_64-conda-linux-gnu-cpp' " + "'PKG_CONFIG_PATH=/root/envs/llava/lib/pkgconfig'", + 'CONFINCLUDEDIR': '/root/envs/llava/include', + 'CONFINCLUDEPY': '/root/envs/llava/include/python3.10', + 'COREPYTHONPATH': '', + 'COVERAGE_INFO': '/croot/python-split_1733933809325/work/build-static/coverage.info', + 'COVERAGE_REPORT': '/croot/python-split_1733933809325/work/build-static/lcov-report', + 'COVERAGE_REPORT_OPTIONS': '--no-branch-coverage --title "CPython lcov ' + 'report"', + 'CPPFLAGS': '-IObjects -IInclude -IPython -I. ' + '-I/croot/python-split_1733933809325/work/Include -DNDEBUG ' + '-D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include', + 'CXX': 'x86_64-conda-linux-gnu-c++ -pthread', + 'DESTDIRS': '/root/envs/llava ' + '/root/envs/llava/lib ' + '/root/envs/llava/lib/python3.10 ' + '/root/envs/llava/lib/python3.10/lib-dynload', + 'DESTLIB': '/root/envs/llava/lib/python3.10', + 'DESTPATH': '', + 'DESTSHARED': '/root/envs/llava/lib/python3.10/lib-dynload', + 'DFLAGS': '', + 'DIRMODE': 755, + 'DIST': 'README.rst ChangeLog configure configure.ac acconfig.h pyconfig.h.in ' + 'Makefile.pre.in Include Lib Misc Ext-dummy', + 'DISTDIRS': 'Include Lib Misc Ext-dummy', + 'DISTFILES': 'README.rst ChangeLog configure configure.ac acconfig.h ' + 'pyconfig.h.in Makefile.pre.in', + 'DLINCLDIR': '.', + 'DLLLIBRARY': '', + 'DOUBLE_IS_ARM_MIXED_ENDIAN_IEEE754': 0, + 'DOUBLE_IS_BIG_ENDIAN_IEEE754': 0, + 'DOUBLE_IS_LITTLE_ENDIAN_IEEE754': 1, + 'DTRACE': '', + 'DTRACE_DEPS': '\\', + 'DTRACE_HEADERS': '', + 'DTRACE_OBJS': '', + 'DYNLOADFILE': 'dynload_shlib.o', + 'ENABLE_IPV6': 1, + 'ENSUREPIP': 'no', + 'EXE': '', + 'EXEMODE': 755, + 'EXPERIMENTAL_ISOLATED_SUBINTERPRETERS': 0, + 'EXPORTSFROM': '', + 'EXPORTSYMS': '', + 'EXTRATESTOPTS': '', + 'EXT_SUFFIX': '.cpython-310-x86_64-linux-gnu.so', + 'FILEMODE': 644, + 'FLOAT_WORDS_BIGENDIAN': 0, + 'FLOCK_NEEDS_LIBBSD': 0, + 'GETPGRP_HAVE_ARG': 0, + 'GITBRANCH': '', + 'GITTAG': '', + 'GITVERSION': '', + 'GNULD': 'yes', + 'HAVE_ACCEPT4': 1, + 'HAVE_ACOSH': 1, + 'HAVE_ADDRINFO': 1, + 'HAVE_ALARM': 1, + 'HAVE_ALIGNED_REQUIRED': 0, + 'HAVE_ALLOCA_H': 1, + 'HAVE_ALTZONE': 0, + 'HAVE_ASINH': 1, + 'HAVE_ASM_TYPES_H': 1, + 'HAVE_ATANH': 1, + 'HAVE_BIND_TEXTDOMAIN_CODESET': 1, + 'HAVE_BLUETOOTH_BLUETOOTH_H': 0, + 'HAVE_BLUETOOTH_H': 0, + 'HAVE_BROKEN_MBSTOWCS': 0, + 'HAVE_BROKEN_NICE': 0, + 'HAVE_BROKEN_PIPE_BUF': 0, + 'HAVE_BROKEN_POLL': 0, + 'HAVE_BROKEN_POSIX_SEMAPHORES': 0, + 'HAVE_BROKEN_PTHREAD_SIGMASK': 0, + 'HAVE_BROKEN_SEM_GETVALUE': 0, + 'HAVE_BROKEN_UNSETENV': 0, + 'HAVE_BUILTIN_ATOMIC': 1, + 'HAVE_CHFLAGS': 0, + 'HAVE_CHOWN': 1, + 'HAVE_CHROOT': 1, + 'HAVE_CLOCK': 1, + 'HAVE_CLOCK_GETRES': 1, + 'HAVE_CLOCK_GETTIME': 1, + 'HAVE_CLOCK_SETTIME': 1, + 'HAVE_CLOSE_RANGE': 0, + 'HAVE_COMPUTED_GOTOS': 1, + 'HAVE_CONFSTR': 1, + 'HAVE_CONIO_H': 0, + 'HAVE_COPYSIGN': 1, + 'HAVE_COPY_FILE_RANGE': 0, + 'HAVE_CRYPT_H': 1, + 'HAVE_CRYPT_R': 1, + 'HAVE_CTERMID': 1, + 'HAVE_CTERMID_R': 0, + 'HAVE_CURSES_FILTER': 1, + 'HAVE_CURSES_H': 1, + 'HAVE_CURSES_HAS_KEY': 1, + 'HAVE_CURSES_IMMEDOK': 1, + 'HAVE_CURSES_IS_PAD': 1, + 'HAVE_CURSES_IS_TERM_RESIZED': 1, + 'HAVE_CURSES_RESIZETERM': 1, + 'HAVE_CURSES_RESIZE_TERM': 1, + 'HAVE_CURSES_SYNCOK': 1, + 'HAVE_CURSES_TYPEAHEAD': 1, + 'HAVE_CURSES_USE_ENV': 1, + 'HAVE_CURSES_WCHGAT': 1, + 'HAVE_DECL_ISFINITE': 1, + 'HAVE_DECL_ISINF': 1, + 'HAVE_DECL_ISNAN': 1, + 'HAVE_DECL_RTLD_DEEPBIND': 1, + 'HAVE_DECL_RTLD_GLOBAL': 1, + 'HAVE_DECL_RTLD_LAZY': 1, + 'HAVE_DECL_RTLD_LOCAL': 1, + 'HAVE_DECL_RTLD_MEMBER': 0, + 'HAVE_DECL_RTLD_NODELETE': 1, + 'HAVE_DECL_RTLD_NOLOAD': 1, + 'HAVE_DECL_RTLD_NOW': 1, + 'HAVE_DECL_TZNAME': 0, + 'HAVE_DEVICE_MACROS': 1, + 'HAVE_DEV_PTC': 0, + 'HAVE_DEV_PTMX': 1, + 'HAVE_DIRECT_H': 0, + 'HAVE_DIRENT_D_TYPE': 1, + 'HAVE_DIRENT_H': 1, + 'HAVE_DIRFD': 1, + 'HAVE_DLFCN_H': 1, + 'HAVE_DLOPEN': 1, + 'HAVE_DUP2': 1, + 'HAVE_DUP3': 1, + 'HAVE_DYLD_SHARED_CACHE_CONTAINS_PATH': 0, + 'HAVE_DYNAMIC_LOADING': 1, + 'HAVE_ENDIAN_H': 1, + 'HAVE_EPOLL': 1, + 'HAVE_EPOLL_CREATE1': 1, + 'HAVE_ERF': 1, + 'HAVE_ERFC': 1, + 'HAVE_ERRNO_H': 1, + 'HAVE_EVENTFD': 1, + 'HAVE_EXECV': 1, + 'HAVE_EXPLICIT_BZERO': 0, + 'HAVE_EXPLICIT_MEMSET': 0, + 'HAVE_EXPM1': 1, + 'HAVE_FACCESSAT': 1, + 'HAVE_FCHDIR': 1, + 'HAVE_FCHMOD': 1, + 'HAVE_FCHMODAT': 1, + 'HAVE_FCHOWN': 1, + 'HAVE_FCHOWNAT': 1, + 'HAVE_FCNTL_H': 1, + 'HAVE_FDATASYNC': 1, + 'HAVE_FDOPENDIR': 1, + 'HAVE_FDWALK': 0, + 'HAVE_FEXECVE': 1, + 'HAVE_FINITE': 1, + 'HAVE_FLOCK': 1, + 'HAVE_FORK': 1, + 'HAVE_FORKPTY': 1, + 'HAVE_FPATHCONF': 1, + 'HAVE_FSEEK64': 0, + 'HAVE_FSEEKO': 1, + 'HAVE_FSTATAT': 1, + 'HAVE_FSTATVFS': 1, + 'HAVE_FSYNC': 1, + 'HAVE_FTELL64': 0, + 'HAVE_FTELLO': 1, + 'HAVE_FTIME': 1, + 'HAVE_FTRUNCATE': 1, + 'HAVE_FUTIMENS': 1, + 'HAVE_FUTIMES': 1, + 'HAVE_FUTIMESAT': 1, + 'HAVE_GAI_STRERROR': 1, + 'HAVE_GAMMA': 1, + 'HAVE_GCC_ASM_FOR_MC68881': 0, + 'HAVE_GCC_ASM_FOR_X64': 1, + 'HAVE_GCC_ASM_FOR_X87': 1, + 'HAVE_GCC_UINT128_T': 1, + 'HAVE_GETADDRINFO': 1, + 'HAVE_GETC_UNLOCKED': 1, + 'HAVE_GETENTROPY': 0, + 'HAVE_GETGRGID_R': 1, + 'HAVE_GETGRNAM_R': 1, + 'HAVE_GETGROUPLIST': 1, + 'HAVE_GETGROUPS': 1, + 'HAVE_GETHOSTBYNAME': 0, + 'HAVE_GETHOSTBYNAME_R': 1, + 'HAVE_GETHOSTBYNAME_R_3_ARG': 0, + 'HAVE_GETHOSTBYNAME_R_5_ARG': 0, + 'HAVE_GETHOSTBYNAME_R_6_ARG': 1, + 'HAVE_GETITIMER': 1, + 'HAVE_GETLOADAVG': 1, + 'HAVE_GETLOGIN': 1, + 'HAVE_GETNAMEINFO': 1, + 'HAVE_GETPAGESIZE': 1, + 'HAVE_GETPEERNAME': 1, + 'HAVE_GETPGID': 1, + 'HAVE_GETPGRP': 1, + 'HAVE_GETPID': 1, + 'HAVE_GETPRIORITY': 1, + 'HAVE_GETPWENT': 1, + 'HAVE_GETPWNAM_R': 1, + 'HAVE_GETPWUID_R': 1, + 'HAVE_GETRANDOM': 0, + 'HAVE_GETRANDOM_SYSCALL': 1, + 'HAVE_GETRESGID': 1, + 'HAVE_GETRESUID': 1, + 'HAVE_GETSID': 1, + 'HAVE_GETSPENT': 1, + 'HAVE_GETSPNAM': 1, + 'HAVE_GETWD': 1, + 'HAVE_GLIBC_MEMMOVE_BUG': 0, + 'HAVE_GRP_H': 1, + 'HAVE_HSTRERROR': 1, + 'HAVE_HTOLE64': 1, + 'HAVE_HYPOT': 1, + 'HAVE_IEEEFP_H': 0, + 'HAVE_IF_NAMEINDEX': 1, + 'HAVE_INET_ATON': 1, + 'HAVE_INET_PTON': 1, + 'HAVE_INITGROUPS': 1, + 'HAVE_INTTYPES_H': 1, + 'HAVE_IO_H': 0, + 'HAVE_IPA_PURE_CONST_BUG': 0, + 'HAVE_KILL': 1, + 'HAVE_KILLPG': 1, + 'HAVE_KQUEUE': 0, + 'HAVE_LANGINFO_H': 1, + 'HAVE_LARGEFILE_SUPPORT': 0, + 'HAVE_LCHFLAGS': 0, + 'HAVE_LCHMOD': 0, + 'HAVE_LCHOWN': 1, + 'HAVE_LGAMMA': 1, + 'HAVE_LIBDL': 1, + 'HAVE_LIBDLD': 0, + 'HAVE_LIBIEEE': 0, + 'HAVE_LIBINTL_H': 1, + 'HAVE_LIBREADLINE': 1, + 'HAVE_LIBRESOLV': 0, + 'HAVE_LIBSENDFILE': 0, + 'HAVE_LIBUTIL_H': 0, + 'HAVE_LIBUUID': 1, + 'HAVE_LINK': 1, + 'HAVE_LINKAT': 1, + 'HAVE_LINUX_AUXVEC_H': 1, + 'HAVE_LINUX_CAN_BCM_H': 1, + 'HAVE_LINUX_CAN_H': 1, + 'HAVE_LINUX_CAN_J1939_H': 0, + 'HAVE_LINUX_CAN_RAW_FD_FRAMES': 1, + 'HAVE_LINUX_CAN_RAW_H': 1, + 'HAVE_LINUX_CAN_RAW_JOIN_FILTERS': 1, + 'HAVE_LINUX_MEMFD_H': 1, + 'HAVE_LINUX_NETLINK_H': 1, + 'HAVE_LINUX_QRTR_H': 0, + 'HAVE_LINUX_RANDOM_H': 1, + 'HAVE_LINUX_TIPC_H': 1, + 'HAVE_LINUX_VM_SOCKETS_H': 1, + 'HAVE_LINUX_WAIT_H': 1, + 'HAVE_LOCKF': 1, + 'HAVE_LOG1P': 1, + 'HAVE_LOG2': 1, + 'HAVE_LONG_DOUBLE': 1, + 'HAVE_LSTAT': 1, + 'HAVE_LUTIMES': 1, + 'HAVE_MADVISE': 1, + 'HAVE_MAKEDEV': 1, + 'HAVE_MBRTOWC': 1, + 'HAVE_MEMFD_CREATE': 0, + 'HAVE_MEMORY_H': 1, + 'HAVE_MEMRCHR': 1, + 'HAVE_MKDIRAT': 1, + 'HAVE_MKFIFO': 1, + 'HAVE_MKFIFOAT': 1, + 'HAVE_MKNOD': 1, + 'HAVE_MKNODAT': 1, + 'HAVE_MKTIME': 1, + 'HAVE_MMAP': 1, + 'HAVE_MREMAP': 1, + 'HAVE_NCURSES_H': 1, + 'HAVE_NDIR_H': 0, + 'HAVE_NETPACKET_PACKET_H': 1, + 'HAVE_NET_IF_H': 1, + 'HAVE_NICE': 1, + 'HAVE_NON_UNICODE_WCHAR_T_REPRESENTATION': 0, + 'HAVE_OPENAT': 1, + 'HAVE_OPENPTY': 1, + 'HAVE_PATHCONF': 1, + 'HAVE_PAUSE': 1, + 'HAVE_PIPE2': 1, + 'HAVE_PLOCK': 0, + 'HAVE_POLL': 1, + 'HAVE_POLL_H': 1, + 'HAVE_POSIX_FADVISE': 1, + 'HAVE_POSIX_FALLOCATE': 1, + 'HAVE_POSIX_SPAWN': 1, + 'HAVE_POSIX_SPAWNP': 1, + 'HAVE_PREAD': 1, + 'HAVE_PREADV': 1, + 'HAVE_PREADV2': 0, + 'HAVE_PRLIMIT': 1, + 'HAVE_PROCESS_H': 0, + 'HAVE_PROTOTYPES': 1, + 'HAVE_PTHREAD_CONDATTR_SETCLOCK': 1, + 'HAVE_PTHREAD_DESTRUCTOR': 0, + 'HAVE_PTHREAD_GETCPUCLOCKID': 1, + 'HAVE_PTHREAD_H': 1, + 'HAVE_PTHREAD_INIT': 0, + 'HAVE_PTHREAD_KILL': 1, + 'HAVE_PTHREAD_SIGMASK': 1, + 'HAVE_PTY_H': 1, + 'HAVE_PWRITE': 1, + 'HAVE_PWRITEV': 1, + 'HAVE_PWRITEV2': 0, + 'HAVE_READLINK': 1, + 'HAVE_READLINKAT': 1, + 'HAVE_READV': 1, + 'HAVE_REALPATH': 1, + 'HAVE_RENAMEAT': 1, + 'HAVE_RL_APPEND_HISTORY': 1, + 'HAVE_RL_CATCH_SIGNAL': 1, + 'HAVE_RL_COMPLETION_APPEND_CHARACTER': 1, + 'HAVE_RL_COMPLETION_DISPLAY_MATCHES_HOOK': 1, + 'HAVE_RL_COMPLETION_MATCHES': 1, + 'HAVE_RL_COMPLETION_SUPPRESS_APPEND': 1, + 'HAVE_RL_PRE_INPUT_HOOK': 1, + 'HAVE_RL_RESIZE_TERMINAL': 1, + 'HAVE_ROUND': 1, + 'HAVE_RTPSPAWN': 0, + 'HAVE_SCHED_GET_PRIORITY_MAX': 1, + 'HAVE_SCHED_H': 1, + 'HAVE_SCHED_RR_GET_INTERVAL': 1, + 'HAVE_SCHED_SETAFFINITY': 1, + 'HAVE_SCHED_SETPARAM': 1, + 'HAVE_SCHED_SETSCHEDULER': 1, + 'HAVE_SEM_CLOCKWAIT': 0, + 'HAVE_SEM_GETVALUE': 1, + 'HAVE_SEM_OPEN': 1, + 'HAVE_SEM_TIMEDWAIT': 1, + 'HAVE_SEM_UNLINK': 1, + 'HAVE_SENDFILE': 1, + 'HAVE_SETEGID': 1, + 'HAVE_SETEUID': 1, + 'HAVE_SETGID': 1, + 'HAVE_SETGROUPS': 1, + 'HAVE_SETHOSTNAME': 1, + 'HAVE_SETITIMER': 1, + 'HAVE_SETLOCALE': 1, + 'HAVE_SETPGID': 1, + 'HAVE_SETPGRP': 1, + 'HAVE_SETPRIORITY': 1, + 'HAVE_SETREGID': 1, + 'HAVE_SETRESGID': 1, + 'HAVE_SETRESUID': 1, + 'HAVE_SETREUID': 1, + 'HAVE_SETSID': 1, + 'HAVE_SETUID': 1, + 'HAVE_SETVBUF': 1, + 'HAVE_SHADOW_H': 1, + 'HAVE_SHM_OPEN': 1, + 'HAVE_SHM_UNLINK': 1, + 'HAVE_SIGACTION': 1, + 'HAVE_SIGALTSTACK': 1, + 'HAVE_SIGFILLSET': 1, + 'HAVE_SIGINFO_T_SI_BAND': 1, + 'HAVE_SIGINTERRUPT': 1, + 'HAVE_SIGNAL_H': 1, + 'HAVE_SIGPENDING': 1, + 'HAVE_SIGRELSE': 1, + 'HAVE_SIGTIMEDWAIT': 1, + 'HAVE_SIGWAIT': 1, + 'HAVE_SIGWAITINFO': 1, + 'HAVE_SNPRINTF': 1, + 'HAVE_SOCKADDR_ALG': 1, + 'HAVE_SOCKADDR_SA_LEN': 0, + 'HAVE_SOCKADDR_STORAGE': 1, + 'HAVE_SOCKETPAIR': 1, + 'HAVE_SPAWN_H': 1, + 'HAVE_SPLICE': 1, + 'HAVE_SSIZE_T': 1, + 'HAVE_STATVFS': 1, + 'HAVE_STAT_TV_NSEC': 1, + 'HAVE_STAT_TV_NSEC2': 0, + 'HAVE_STDARG_PROTOTYPES': 1, + 'HAVE_STDINT_H': 1, + 'HAVE_STDLIB_H': 1, + 'HAVE_STD_ATOMIC': 1, + 'HAVE_STRFTIME': 1, + 'HAVE_STRINGS_H': 1, + 'HAVE_STRING_H': 1, + 'HAVE_STRLCPY': 0, + 'HAVE_STROPTS_H': 0, + 'HAVE_STRSIGNAL': 1, + 'HAVE_STRUCT_PASSWD_PW_GECOS': 1, + 'HAVE_STRUCT_PASSWD_PW_PASSWD': 1, + 'HAVE_STRUCT_STAT_ST_BIRTHTIME': 0, + 'HAVE_STRUCT_STAT_ST_BLKSIZE': 1, + 'HAVE_STRUCT_STAT_ST_BLOCKS': 1, + 'HAVE_STRUCT_STAT_ST_FLAGS': 0, + 'HAVE_STRUCT_STAT_ST_GEN': 0, + 'HAVE_STRUCT_STAT_ST_RDEV': 1, + 'HAVE_STRUCT_TM_TM_ZONE': 1, + 'HAVE_SYMLINK': 1, + 'HAVE_SYMLINKAT': 1, + 'HAVE_SYNC': 1, + 'HAVE_SYSCONF': 1, + 'HAVE_SYSEXITS_H': 1, + 'HAVE_SYS_AUDIOIO_H': 0, + 'HAVE_SYS_AUXV_H': 1, + 'HAVE_SYS_BSDTTY_H': 0, + 'HAVE_SYS_DEVPOLL_H': 0, + 'HAVE_SYS_DIR_H': 0, + 'HAVE_SYS_ENDIAN_H': 0, + 'HAVE_SYS_EPOLL_H': 1, + 'HAVE_SYS_EVENTFD_H': 1, + 'HAVE_SYS_EVENT_H': 0, + 'HAVE_SYS_FILE_H': 1, + 'HAVE_SYS_IOCTL_H': 1, + 'HAVE_SYS_KERN_CONTROL_H': 0, + 'HAVE_SYS_LOADAVG_H': 0, + 'HAVE_SYS_LOCK_H': 0, + 'HAVE_SYS_MEMFD_H': 0, + 'HAVE_SYS_MKDEV_H': 0, + 'HAVE_SYS_MMAN_H': 1, + 'HAVE_SYS_MODEM_H': 0, + 'HAVE_SYS_NDIR_H': 0, + 'HAVE_SYS_PARAM_H': 1, + 'HAVE_SYS_POLL_H': 1, + 'HAVE_SYS_RANDOM_H': 0, + 'HAVE_SYS_RESOURCE_H': 1, + 'HAVE_SYS_SELECT_H': 1, + 'HAVE_SYS_SENDFILE_H': 1, + 'HAVE_SYS_SOCKET_H': 1, + 'HAVE_SYS_STATVFS_H': 1, + 'HAVE_SYS_STAT_H': 1, + 'HAVE_SYS_SYSCALL_H': 1, + 'HAVE_SYS_SYSMACROS_H': 1, + 'HAVE_SYS_SYS_DOMAIN_H': 0, + 'HAVE_SYS_TERMIO_H': 0, + 'HAVE_SYS_TIMES_H': 1, + 'HAVE_SYS_TIME_H': 1, + 'HAVE_SYS_TYPES_H': 1, + 'HAVE_SYS_UIO_H': 1, + 'HAVE_SYS_UN_H': 1, + 'HAVE_SYS_UTSNAME_H': 1, + 'HAVE_SYS_WAIT_H': 1, + 'HAVE_SYS_XATTR_H': 1, + 'HAVE_TCGETPGRP': 1, + 'HAVE_TCSETPGRP': 1, + 'HAVE_TEMPNAM': 1, + 'HAVE_TERMIOS_H': 1, + 'HAVE_TERM_H': 1, + 'HAVE_TGAMMA': 1, + 'HAVE_TIMEGM': 1, + 'HAVE_TIMES': 1, + 'HAVE_TMPFILE': 1, + 'HAVE_TMPNAM': 1, + 'HAVE_TMPNAM_R': 1, + 'HAVE_TM_ZONE': 1, + 'HAVE_TRUNCATE': 1, + 'HAVE_TZNAME': 0, + 'HAVE_UCS4_TCL': 0, + 'HAVE_UNAME': 1, + 'HAVE_UNISTD_H': 1, + 'HAVE_UNLINKAT': 1, + 'HAVE_USABLE_WCHAR_T': 0, + 'HAVE_UTIL_H': 0, + 'HAVE_UTIMENSAT': 1, + 'HAVE_UTIMES': 1, + 'HAVE_UTIME_H': 1, + 'HAVE_UUID_CREATE': 0, + 'HAVE_UUID_ENC_BE': 0, + 'HAVE_UUID_GENERATE_TIME_SAFE': 1, + 'HAVE_UUID_H': 1, + 'HAVE_UUID_UUID_H': 1, + 'HAVE_VFORK': 1, + 'HAVE_WAIT3': 1, + 'HAVE_WAIT4': 1, + 'HAVE_WAITID': 1, + 'HAVE_WAITPID': 1, + 'HAVE_WCHAR_H': 1, + 'HAVE_WCSCOLL': 1, + 'HAVE_WCSFTIME': 1, + 'HAVE_WCSXFRM': 1, + 'HAVE_WMEMCMP': 1, + 'HAVE_WORKING_TZSET': 1, + 'HAVE_WRITEV': 1, + 'HAVE_ZLIB_COPY': 1, + 'HAVE__GETPTY': 0, + 'HOST_GNU_TYPE': 'x86_64-conda-linux-gnu', + 'INCLDIRSTOMAKE': '/root/envs/llava/include ' + '/root/envs/llava/include ' + '/root/envs/llava/include/python3.10 ' + '/root/envs/llava/include/python3.10', + 'INCLUDEDIR': '/root/envs/llava/include', + 'INCLUDEPY': '/root/envs/llava/include/python3.10', + 'INSTALL': '/usr/bin/install -c', + 'INSTALL_DATA': '/usr/bin/install -c -m 644', + 'INSTALL_PROGRAM': '/usr/bin/install -c', + 'INSTALL_SCRIPT': '/usr/bin/install -c', + 'INSTALL_SHARED': '/usr/bin/install -c -m 755', + 'INSTSONAME': 'libpython3.10.a', + 'IO_H': 'Modules/_io/_iomodule.h', + 'IO_OBJS': '\\', + 'LDCXXSHARED': 'x86_64-conda-linux-gnu-c++ -pthread -shared', + 'LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now ' + '-Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib ' + '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now ' + '-Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib', + 'LDLIBRARY': 'libpython3.10.a', + 'LDLIBRARYDIR': '', + 'LDSHARED': 'x86_64-conda-linux-gnu-gcc -pthread -shared -Wl,-O2 ' + '-Wl,--sort-common -Wl,--as-needed -Wl,-z,relro -Wl,-z,now ' + '-Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib ' + '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib', + 'LDVERSION': '3.10', + 'LIBC': '', + 'LIBDEST': '/root/envs/llava/lib/python3.10', + 'LIBDIR': '/root/envs/llava/lib', + 'LIBFFI_INCLUDEDIR': '/root/envs/llava/include', + 'LIBM': '-lm', + 'LIBOBJDIR': 'Python/', + 'LIBOBJS': '', + 'LIBPC': '/root/envs/llava/lib/pkgconfig', + 'LIBPL': '/root/envs/llava/lib/python3.10/config-3.10-x86_64-linux-gnu', + 'LIBPYTHON': '', + 'LIBRARY': 'libpython3.10.a', + 'LIBRARY_DEPS': 'libpython3.10.a', + 'LIBRARY_OBJS': '\\', + 'LIBRARY_OBJS_OMIT_FROZEN': '\\', + 'LIBS': '-lcrypt -lpthread -ldl -lutil -lm', + 'LIBSUBDIRS': 'asyncio \\', + 'LINKCC': 'x86_64-conda-linux-gnu-gcc -pthread', + 'LINKFORSHARED': '-Xlinker -export-dynamic', + 'LIPO_32BIT_FLAGS': '', + 'LIPO_INTEL64_FLAGS': '', + 'LLVM_PROF_ERR': 'no', + 'LLVM_PROF_FILE': '', + 'LLVM_PROF_MERGER': 'true', + 'LN': 'ln', + 'LOCALMODLIBS': '', + 'MACHDEP': 'linux', + 'MACHDEP_OBJS': '', + 'MACHDESTLIB': '/root/envs/llava/lib/python3.10', + 'MACOSX_DEPLOYMENT_TARGET': '', + 'MAINCC': 'x86_64-conda-linux-gnu-gcc -pthread', + 'MAJOR_IN_MKDEV': 0, + 'MAJOR_IN_SYSMACROS': 0, + 'MAKESETUP': '/croot/python-split_1733933809325/work/Modules/makesetup', + 'MANDIR': '/root/envs/llava/share/man', + 'MKDIR_P': '/usr/bin/mkdir -p', + 'MODBUILT_NAMES': 'posix errno pwd _sre _codecs _weakref _functools ' + '_operator _collections _abc itertools atexit _signal ' + '_stat time _thread _locale _io faulthandler ' + '_tracemalloc _symtable xxsubtype', + 'MODDISABLED_NAMES': '', + 'MODLIBS': '', + 'MODOBJS': 'Modules/posixmodule.o Modules/errnomodule.o ' + 'Modules/pwdmodule.o Modules/_sre.o Modules/_codecsmodule.o ' + 'Modules/_weakref.o Modules/_functoolsmodule.o ' + 'Modules/_operator.o Modules/_collectionsmodule.o ' + 'Modules/_abc.o Modules/itertoolsmodule.o ' + 'Modules/atexitmodule.o Modules/signalmodule.o Modules/_stat.o ' + 'Modules/timemodule.o Modules/_threadmodule.o ' + 'Modules/_localemodule.o Modules/_iomodule.o Modules/iobase.o ' + 'Modules/fileio.o Modules/bytesio.o Modules/bufferedio.o ' + 'Modules/textio.o Modules/stringio.o Modules/faulthandler.o ' + 'Modules/_tracemalloc.o Modules/symtablemodule.o ' + 'Modules/xxsubtype.o', + 'MODULE_OBJS': '\\', + 'MULTIARCH': 'x86_64-linux-gnu', + 'MULTIARCH_CPPFLAGS': '-DMULTIARCH=\\"x86_64-linux-gnu\\"', + 'MVWDELCH_IS_EXPRESSION': 1, + 'NO_AS_NEEDED': '-Wl,--no-as-needed', + 'OBJECT_OBJS': '\\', + 'OPENSSL_INCLUDES': '-I/root/envs/llava/include', + 'OPENSSL_LDFLAGS': '-L/root/envs/llava/lib', + 'OPENSSL_LIBS': '-lssl -lcrypto', + 'OPENSSL_RPATH': '', + 'OPT': '-DNDEBUG -fwrapv -O2 -Wall', + 'OTHER_LIBTOOL_OPT': '', + 'PACKAGE_BUGREPORT': 0, + 'PACKAGE_NAME': 0, + 'PACKAGE_STRING': 0, + 'PACKAGE_TARNAME': 0, + 'PACKAGE_URL': 0, + 'PACKAGE_VERSION': 0, + 'PARSER_HEADERS': '\\', + 'PARSER_OBJS': '\\ \\ Parser/myreadline.o Parser/tokenizer.o', + 'PEGEN_HEADERS': '\\', + 'PEGEN_OBJS': '\\', + 'PGO_PROF_GEN_FLAG': '-fprofile-generate', + 'PGO_PROF_USE_FLAG': ' ', + 'PLATLIBDIR': 'lib', + 'POBJS': '\\', + 'POSIX_SEMAPHORES_NOT_ENABLED': 0, + 'PROFILE_TASK': '-m test --pgo', + 'PTHREAD_KEY_T_IS_COMPATIBLE_WITH_INT': 1, + 'PTHREAD_SYSTEM_SCHED_SUPPORTED': 1, + 'PURIFY': '', + 'PY3LIBRARY': '', + 'PYLONG_BITS_IN_DIGIT': 0, + 'PYTHON': 'python', + 'PYTHONFRAMEWORK': '', + 'PYTHONFRAMEWORKDIR': 'no-framework', + 'PYTHONFRAMEWORKINSTALLDIR': '', + 'PYTHONFRAMEWORKPREFIX': '', + 'PYTHONPATH': '', + 'PYTHON_FOR_BUILD': './python -E', + 'PYTHON_FOR_REGEN': '', + 'PYTHON_HEADERS': '\\', + 'PYTHON_OBJS': '\\', + 'PY_BUILD_ENVIRON': '', + 'PY_BUILTIN_HASHLIB_HASHES': '"md5,sha1,sha256,sha512,sha3,blake2"', + 'PY_BUILTIN_MODULE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG ' + '-fwrapv -O2 -Wall -march=nocona -mtune=haswell ' + '-ftree-vectorize -fPIC -fstack-protector-strong ' + '-fno-plt -O2 -ffunction-sections -pipe -isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + ' -march=nocona ' + '-mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 ' + '-ffunction-sections -pipe -isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + ' ' + '-fno-semantic-interposition ' + ' ' + ' -g -std=c99 -Wextra ' + '-Wno-unused-result -Wno-unused-parameter ' + '-Wno-missing-field-initializers ' + '-Werror=implicit-function-declaration ' + '-fvisibility=hidden ' + ' ' + '-I/croot/python-split_1733933809325/work/Include/internal ' + '-IObjects -IInclude -IPython -I. ' + '-I/croot/python-split_1733933809325/work/Include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include ' + '-DPy_BUILD_CORE_BUILTIN', + 'PY_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 -Wall ' + '-march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe ' + '-isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + ' -march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections -pipe ' + '-isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + '', + 'PY_CFLAGS_NODIST': '-fno-semantic-interposition ' + ' -g -std=c99 ' + '-Wextra -Wno-unused-result -Wno-unused-parameter ' + '-Wno-missing-field-initializers ' + '-Werror=implicit-function-declaration ' + '-fvisibility=hidden ' + '-I/croot/python-split_1733933809325/work/Include/internal', + 'PY_COERCE_C_LOCALE': 1, + 'PY_CORE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv -O2 ' + '-Wall -march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections ' + '-pipe -isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + ' -march=nocona -mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 -ffunction-sections ' + '-pipe -isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + ' -fno-semantic-interposition ' + ' ' + '-g -std=c99 -Wextra -Wno-unused-result ' + '-Wno-unused-parameter -Wno-missing-field-initializers ' + '-Werror=implicit-function-declaration -fvisibility=hidden ' + ' ' + '-I/croot/python-split_1733933809325/work/Include/internal ' + '-IObjects -IInclude -IPython -I. ' + '-I/croot/python-split_1733933809325/work/Include -DNDEBUG ' + '-D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include ' + '-DPy_BUILD_CORE', + 'PY_CORE_LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib ' + '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib ' + '-fno-semantic-interposition ' + ' -g', + 'PY_CPPFLAGS': '-IObjects -IInclude -IPython -I. ' + '-I/croot/python-split_1733933809325/work/Include -DNDEBUG ' + '-D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include', + 'PY_ENABLE_SHARED': 0, + 'PY_FORMAT_SIZE_T': '"z"', + 'PY_LDFLAGS': '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib ' + '-Wl,-O2 -Wl,--sort-common -Wl,--as-needed -Wl,-z,relro ' + '-Wl,-z,now -Wl,--disable-new-dtags -Wl,--gc-sections ' + '-Wl,-rpath,/root/envs/llava/lib ' + '-Wl,-rpath-link,/root/envs/llava/lib ' + '-L/root/envs/llava/lib', + 'PY_LDFLAGS_NODIST': '-fno-semantic-interposition ' + ' -g', + 'PY_SSL_DEFAULT_CIPHERS': 1, + 'PY_SSL_DEFAULT_CIPHER_STRING': 0, + 'PY_STDMODULE_CFLAGS': '-Wno-unused-result -Wsign-compare -DNDEBUG -fwrapv ' + '-O2 -Wall -march=nocona -mtune=haswell ' + '-ftree-vectorize -fPIC -fstack-protector-strong ' + '-fno-plt -O2 -ffunction-sections -pipe -isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + ' -march=nocona ' + '-mtune=haswell -ftree-vectorize -fPIC ' + '-fstack-protector-strong -fno-plt -O2 ' + '-ffunction-sections -pipe -isystem ' + '/root/envs/llava/include ' + '-fdebug-prefix-map=/croot/python-split_1733933809325/work=/usr/local/src/conda/python-3.10.16 ' + '-fdebug-prefix-map=/root/envs/llava=/usr/local/src/conda-prefix ' + ' ' + ' ' + '-fno-semantic-interposition ' + ' -g -std=c99 ' + '-Wextra -Wno-unused-result -Wno-unused-parameter ' + '-Wno-missing-field-initializers ' + '-Werror=implicit-function-declaration ' + '-fvisibility=hidden ' + ' ' + '-I/croot/python-split_1733933809325/work/Include/internal ' + '-IObjects -IInclude -IPython -I. ' + '-I/croot/python-split_1733933809325/work/Include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include ' + '-DNDEBUG -D_FORTIFY_SOURCE=2 -O2 -isystem ' + '/root/envs/llava/include ' + '-I/root/envs/llava/include', + 'Py_DEBUG': 0, + 'Py_ENABLE_SHARED': 0, + 'Py_HASH_ALGORITHM': 0, + 'Py_TRACE_REFS': 0, + 'QUICKTESTOPTS': '-x test_subprocess test_io test_lib2to3 \\', + 'READELF': 'x86_64-conda-linux-gnu-readelf', + 'RESSRCDIR': 'Mac/Resources/framework', + 'RETSIGTYPE': 'void', + 'RUNSHARED': '', + 'SCRIPTDIR': '/root/envs/llava/lib', + 'SETPGRP_HAVE_ARG': 0, + 'SHELL': '/bin/sh', + 'SHLIBS': '-lcrypt -lpthread -ldl -lutil -lm', + 'SHLIB_SUFFIX': '.so', + 'SHM_NEEDS_LIBRT': 1, + 'SIGNED_RIGHT_SHIFT_ZERO_FILLS': 0, + 'SITEPATH': '', + 'SIZEOF_DOUBLE': 8, + 'SIZEOF_FLOAT': 4, + 'SIZEOF_FPOS_T': 16, + 'SIZEOF_INT': 4, + 'SIZEOF_LONG': 8, + 'SIZEOF_LONG_DOUBLE': 16, + 'SIZEOF_LONG_LONG': 8, + 'SIZEOF_OFF_T': 8, + 'SIZEOF_PID_T': 4, + 'SIZEOF_PTHREAD_KEY_T': 4, + 'SIZEOF_PTHREAD_T': 8, + 'SIZEOF_SHORT': 2, + 'SIZEOF_SIZE_T': 8, + 'SIZEOF_TIME_T': 8, + 'SIZEOF_UINTPTR_T': 8, + 'SIZEOF_VOID_P': 8, + 'SIZEOF_WCHAR_T': 4, + 'SIZEOF__BOOL': 1, + 'SOABI': 'cpython-310-x86_64-linux-gnu', + 'SRCDIRS': 'Parser Objects Python Modules Modules/_io Programs', + 'SRC_GDB_HOOKS': '/croot/python-split_1733933809325/work/Tools/gdb/libpython.py', + 'STATIC_LIBPYTHON': 1, + 'STDC_HEADERS': 1, + 'STRICT_SYSV_CURSES': "/* Don't use ncurses extensions */", + 'STRIPFLAG': '-s', + 'SUBDIRS': '', + 'SUBDIRSTOO': 'Include Lib Misc', + 'SYSLIBS': '-lm', + 'SYS_SELECT_WITH_SYS_TIME': 1, + 'TCLTK_INCLUDES': '-I/root/envs/llava/include', + 'TCLTK_LIBS': '-L/root/envs/llava/lib ' + '-ltcl8.6 -ltk8.6', + 'TESTOPTS': '', + 'TESTPATH': '', + 'TESTPYTHON': './python', + 'TESTPYTHONOPTS': '', + 'TESTRUNNER': './python ' + '/croot/python-split_1733933809325/work/Tools/scripts/run_tests.py', + 'TESTSUBDIRS': 'ctypes/test \\', + 'TESTTIMEOUT': 1200, + 'TEST_MODULES': 'yes', + 'THREAD_STACK_SIZE': 0, + 'TIMEMODULE_LIB': 0, + 'TIME_WITH_SYS_TIME': 1, + 'TM_IN_SYS_TIME': 0, + 'TZPATH': '/root/envs/llava/share/zoneinfo', + 'UNICODE_DEPS': '\\', + 'UNIVERSALSDK': '', + 'UPDATE_FILE': '/croot/python-split_1733933809325/work/Tools/scripts/update_file.py', + 'USE_COMPUTED_GOTOS': 1, + 'VERSION': '3.10', + 'VPATH': '/croot/python-split_1733933809325/work', + 'WHEEL_PKG_DIR': '', + 'WINDOW_HAS_FLAGS': 1, + 'WITH_DECIMAL_CONTEXTVAR': 1, + 'WITH_DOC_STRINGS': 1, + 'WITH_DTRACE': 0, + 'WITH_DYLD': 0, + 'WITH_EDITLINE': 0, + 'WITH_LIBINTL': 0, + 'WITH_NEXT_FRAMEWORK': 0, + 'WITH_PYMALLOC': 1, + 'WITH_VALGRIND': 0, + 'X87_DOUBLE_ROUNDING': 0, + 'XMLLIBSUBDIRS': 'xml xml/dom xml/etree xml/parsers xml/sax', + 'abs_builddir': '/croot/python-split_1733933809325/work/build-static', + 'abs_srcdir': '/croot/python-split_1733933809325/work', + 'datarootdir': '/root/envs/llava/share', + 'exec_prefix': '/root/envs/llava', + 'prefix': '/root/envs/llava', + 'srcdir': '/croot/python-split_1733933809325/work'} diff --git a/llava/lib/python3.10/_threading_local.py b/llava/lib/python3.10/_threading_local.py new file mode 100644 index 0000000000000000000000000000000000000000..b006d76c4e23df7dbf09bc7e668b9eb87e4044af --- /dev/null +++ b/llava/lib/python3.10/_threading_local.py @@ -0,0 +1,242 @@ +"""Thread-local objects. + +(Note that this module provides a Python version of the threading.local + class. Depending on the version of Python you're using, there may be a + faster one available. You should always import the `local` class from + `threading`.) + +Thread-local objects support the management of thread-local data. +If you have data that you want to be local to a thread, simply create +a thread-local object and use its attributes: + + >>> mydata = local() + >>> mydata.number = 42 + >>> mydata.number + 42 + +You can also access the local-object's dictionary: + + >>> mydata.__dict__ + {'number': 42} + >>> mydata.__dict__.setdefault('widgets', []) + [] + >>> mydata.widgets + [] + +What's important about thread-local objects is that their data are +local to a thread. If we access the data in a different thread: + + >>> log = [] + >>> def f(): + ... items = sorted(mydata.__dict__.items()) + ... log.append(items) + ... mydata.number = 11 + ... log.append(mydata.number) + + >>> import threading + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[], 11] + +we get different data. Furthermore, changes made in the other thread +don't affect data seen in this thread: + + >>> mydata.number + 42 + +Of course, values you get from a local object, including a __dict__ +attribute, are for whatever thread was current at the time the +attribute was read. For that reason, you generally don't want to save +these values across threads, as they apply only to the thread they +came from. + +You can create custom local objects by subclassing the local class: + + >>> class MyLocal(local): + ... number = 2 + ... def __init__(self, /, **kw): + ... self.__dict__.update(kw) + ... def squared(self): + ... return self.number ** 2 + +This can be useful to support default values, methods and +initialization. Note that if you define an __init__ method, it will be +called each time the local object is used in a separate thread. This +is necessary to initialize each thread's dictionary. + +Now if we create a local object: + + >>> mydata = MyLocal(color='red') + +Now we have a default number: + + >>> mydata.number + 2 + +an initial color: + + >>> mydata.color + 'red' + >>> del mydata.color + +And a method that operates on the data: + + >>> mydata.squared() + 4 + +As before, we can access the data in a separate thread: + + >>> log = [] + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + >>> log + [[('color', 'red')], 11] + +without affecting this thread's data: + + >>> mydata.number + 2 + >>> mydata.color + Traceback (most recent call last): + ... + AttributeError: 'MyLocal' object has no attribute 'color' + +Note that subclasses can define slots, but they are not thread +local. They are shared across threads: + + >>> class MyLocal(local): + ... __slots__ = 'number' + + >>> mydata = MyLocal() + >>> mydata.number = 42 + >>> mydata.color = 'red' + +So, the separate thread: + + >>> thread = threading.Thread(target=f) + >>> thread.start() + >>> thread.join() + +affects what we see: + + >>> mydata.number + 11 + +>>> del mydata +""" + +from weakref import ref +from contextlib import contextmanager + +__all__ = ["local"] + +# We need to use objects from the threading module, but the threading +# module may also want to use our `local` class, if support for locals +# isn't compiled in to the `thread` module. This creates potential problems +# with circular imports. For that reason, we don't import `threading` +# until the bottom of this file (a hack sufficient to worm around the +# potential problems). Note that all platforms on CPython do have support +# for locals in the `thread` module, and there is no circular import problem +# then, so problems introduced by fiddling the order of imports here won't +# manifest. + +class _localimpl: + """A class managing thread-local dicts""" + __slots__ = 'key', 'dicts', 'localargs', 'locallock', '__weakref__' + + def __init__(self): + # The key used in the Thread objects' attribute dicts. + # We keep it a string for speed but make it unlikely to clash with + # a "real" attribute. + self.key = '_threading_local._localimpl.' + str(id(self)) + # { id(Thread) -> (ref(Thread), thread-local dict) } + self.dicts = {} + + def get_dict(self): + """Return the dict for the current thread. Raises KeyError if none + defined.""" + thread = current_thread() + return self.dicts[id(thread)][1] + + def create_dict(self): + """Create a new dict for the current thread, and return it.""" + localdict = {} + key = self.key + thread = current_thread() + idt = id(thread) + def local_deleted(_, key=key): + # When the localimpl is deleted, remove the thread attribute. + thread = wrthread() + if thread is not None: + del thread.__dict__[key] + def thread_deleted(_, idt=idt): + # When the thread is deleted, remove the local dict. + # Note that this is suboptimal if the thread object gets + # caught in a reference loop. We would like to be called + # as soon as the OS-level thread ends instead. + local = wrlocal() + if local is not None: + dct = local.dicts.pop(idt) + wrlocal = ref(self, local_deleted) + wrthread = ref(thread, thread_deleted) + thread.__dict__[key] = wrlocal + self.dicts[idt] = wrthread, localdict + return localdict + + +@contextmanager +def _patch(self): + impl = object.__getattribute__(self, '_local__impl') + try: + dct = impl.get_dict() + except KeyError: + dct = impl.create_dict() + args, kw = impl.localargs + self.__init__(*args, **kw) + with impl.locallock: + object.__setattr__(self, '__dict__', dct) + yield + + +class local: + __slots__ = '_local__impl', '__dict__' + + def __new__(cls, /, *args, **kw): + if (args or kw) and (cls.__init__ is object.__init__): + raise TypeError("Initialization arguments are not supported") + self = object.__new__(cls) + impl = _localimpl() + impl.localargs = (args, kw) + impl.locallock = RLock() + object.__setattr__(self, '_local__impl', impl) + # We need to create the thread dict in anticipation of + # __init__ being called, to make sure we don't call it + # again ourselves. + impl.create_dict() + return self + + def __getattribute__(self, name): + with _patch(self): + return object.__getattribute__(self, name) + + def __setattr__(self, name, value): + if name == '__dict__': + raise AttributeError( + "%r object attribute '__dict__' is read-only" + % self.__class__.__name__) + with _patch(self): + return object.__setattr__(self, name, value) + + def __delattr__(self, name): + if name == '__dict__': + raise AttributeError( + "%r object attribute '__dict__' is read-only" + % self.__class__.__name__) + with _patch(self): + return object.__delattr__(self, name) + + +from threading import current_thread, RLock diff --git a/llava/lib/python3.10/cgi.py b/llava/lib/python3.10/cgi.py new file mode 100644 index 0000000000000000000000000000000000000000..6cb8cf28bd66457ef05a8cc19bb53b8f2afbb780 --- /dev/null +++ b/llava/lib/python3.10/cgi.py @@ -0,0 +1,1004 @@ +#! /usr/local/bin/python + +# NOTE: the above "/usr/local/bin/python" is NOT a mistake. It is +# intentionally NOT "/usr/bin/env python". On many systems +# (e.g. Solaris), /usr/local/bin is not in $PATH as passed to CGI +# scripts, and /usr/local/bin is the default directory where Python is +# installed, so /usr/bin/env would be unable to find python. Granted, +# binary installations by Linux vendors often install Python in +# /usr/bin. So let those vendors patch cgi.py to match their choice +# of installation. + +"""Support module for CGI (Common Gateway Interface) scripts. + +This module defines a number of utilities for use by CGI scripts +written in Python. +""" + +# History +# ------- +# +# Michael McLay started this module. Steve Majewski changed the +# interface to SvFormContentDict and FormContentDict. The multipart +# parsing was inspired by code submitted by Andreas Paepcke. Guido van +# Rossum rewrote, reformatted and documented the module and is currently +# responsible for its maintenance. +# + +__version__ = "2.6" + + +# Imports +# ======= + +from io import StringIO, BytesIO, TextIOWrapper +from collections.abc import Mapping +import sys +import os +import urllib.parse +from email.parser import FeedParser +from email.message import Message +import html +import locale +import tempfile +import warnings + +__all__ = ["MiniFieldStorage", "FieldStorage", "parse", "parse_multipart", + "parse_header", "test", "print_exception", "print_environ", + "print_form", "print_directory", "print_arguments", + "print_environ_usage"] + +# Logging support +# =============== + +logfile = "" # Filename to log to, if not empty +logfp = None # File object to log to, if not None + +def initlog(*allargs): + """Write a log message, if there is a log file. + + Even though this function is called initlog(), you should always + use log(); log is a variable that is set either to initlog + (initially), to dolog (once the log file has been opened), or to + nolog (when logging is disabled). + + The first argument is a format string; the remaining arguments (if + any) are arguments to the % operator, so e.g. + log("%s: %s", "a", "b") + will write "a: b" to the log file, followed by a newline. + + If the global logfp is not None, it should be a file object to + which log data is written. + + If the global logfp is None, the global logfile may be a string + giving a filename to open, in append mode. This file should be + world writable!!! If the file can't be opened, logging is + silently disabled (since there is no safe place where we could + send an error message). + + """ + global log, logfile, logfp + warnings.warn("cgi.log() is deprecated as of 3.10. Use logging instead", + DeprecationWarning, stacklevel=2) + if logfile and not logfp: + try: + logfp = open(logfile, "a", encoding="locale") + except OSError: + pass + if not logfp: + log = nolog + else: + log = dolog + log(*allargs) + +def dolog(fmt, *args): + """Write a log message to the log file. See initlog() for docs.""" + logfp.write(fmt%args + "\n") + +def nolog(*allargs): + """Dummy function, assigned to log when logging is disabled.""" + pass + +def closelog(): + """Close the log file.""" + global log, logfile, logfp + logfile = '' + if logfp: + logfp.close() + logfp = None + log = initlog + +log = initlog # The current logging function + + +# Parsing functions +# ================= + +# Maximum input we will accept when REQUEST_METHOD is POST +# 0 ==> unlimited input +maxlen = 0 + +def parse(fp=None, environ=os.environ, keep_blank_values=0, + strict_parsing=0, separator='&'): + """Parse a query in the environment or from a file (default stdin) + + Arguments, all optional: + + fp : file pointer; default: sys.stdin.buffer + + environ : environment dictionary; default: os.environ + + keep_blank_values: flag indicating whether blank values in + percent-encoded forms should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + separator: str. The symbol to use for separating the query arguments. + Defaults to &. + """ + if fp is None: + fp = sys.stdin + + # field keys and values (except for files) are returned as strings + # an encoding is required to decode the bytes read from self.fp + if hasattr(fp,'encoding'): + encoding = fp.encoding + else: + encoding = 'latin-1' + + # fp.read() must return bytes + if isinstance(fp, TextIOWrapper): + fp = fp.buffer + + if not 'REQUEST_METHOD' in environ: + environ['REQUEST_METHOD'] = 'GET' # For testing stand-alone + if environ['REQUEST_METHOD'] == 'POST': + ctype, pdict = parse_header(environ['CONTENT_TYPE']) + if ctype == 'multipart/form-data': + return parse_multipart(fp, pdict, separator=separator) + elif ctype == 'application/x-www-form-urlencoded': + clength = int(environ['CONTENT_LENGTH']) + if maxlen and clength > maxlen: + raise ValueError('Maximum content length exceeded') + qs = fp.read(clength).decode(encoding) + else: + qs = '' # Unknown content-type + if 'QUERY_STRING' in environ: + if qs: qs = qs + '&' + qs = qs + environ['QUERY_STRING'] + elif sys.argv[1:]: + if qs: qs = qs + '&' + qs = qs + sys.argv[1] + environ['QUERY_STRING'] = qs # XXX Shouldn't, really + elif 'QUERY_STRING' in environ: + qs = environ['QUERY_STRING'] + else: + if sys.argv[1:]: + qs = sys.argv[1] + else: + qs = "" + environ['QUERY_STRING'] = qs # XXX Shouldn't, really + return urllib.parse.parse_qs(qs, keep_blank_values, strict_parsing, + encoding=encoding, separator=separator) + + +def parse_multipart(fp, pdict, encoding="utf-8", errors="replace", separator='&'): + """Parse multipart input. + + Arguments: + fp : input file + pdict: dictionary containing other parameters of content-type header + encoding, errors: request encoding and error handler, passed to + FieldStorage + + Returns a dictionary just like parse_qs(): keys are the field names, each + value is a list of values for that field. For non-file fields, the value + is a list of strings. + """ + # RFC 2046, Section 5.1 : The "multipart" boundary delimiters are always + # represented as 7bit US-ASCII. + boundary = pdict['boundary'].decode('ascii') + ctype = "multipart/form-data; boundary={}".format(boundary) + headers = Message() + headers.set_type(ctype) + try: + headers['Content-Length'] = pdict['CONTENT-LENGTH'] + except KeyError: + pass + fs = FieldStorage(fp, headers=headers, encoding=encoding, errors=errors, + environ={'REQUEST_METHOD': 'POST'}, separator=separator) + return {k: fs.getlist(k) for k in fs} + +def _parseparam(s): + while s[:1] == ';': + s = s[1:] + end = s.find(';') + while end > 0 and (s.count('"', 0, end) - s.count('\\"', 0, end)) % 2: + end = s.find(';', end + 1) + if end < 0: + end = len(s) + f = s[:end] + yield f.strip() + s = s[end:] + +def parse_header(line): + """Parse a Content-type like header. + + Return the main content-type and a dictionary of options. + + """ + parts = _parseparam(';' + line) + key = parts.__next__() + pdict = {} + for p in parts: + i = p.find('=') + if i >= 0: + name = p[:i].strip().lower() + value = p[i+1:].strip() + if len(value) >= 2 and value[0] == value[-1] == '"': + value = value[1:-1] + value = value.replace('\\\\', '\\').replace('\\"', '"') + pdict[name] = value + return key, pdict + + +# Classes for field storage +# ========================= + +class MiniFieldStorage: + + """Like FieldStorage, for use when no file uploads are possible.""" + + # Dummy attributes + filename = None + list = None + type = None + file = None + type_options = {} + disposition = None + disposition_options = {} + headers = {} + + def __init__(self, name, value): + """Constructor from field name and value.""" + self.name = name + self.value = value + # self.file = StringIO(value) + + def __repr__(self): + """Return printable representation.""" + return "MiniFieldStorage(%r, %r)" % (self.name, self.value) + + +class FieldStorage: + + """Store a sequence of fields, reading multipart/form-data. + + This class provides naming, typing, files stored on disk, and + more. At the top level, it is accessible like a dictionary, whose + keys are the field names. (Note: None can occur as a field name.) + The items are either a Python list (if there's multiple values) or + another FieldStorage or MiniFieldStorage object. If it's a single + object, it has the following attributes: + + name: the field name, if specified; otherwise None + + filename: the filename, if specified; otherwise None; this is the + client side filename, *not* the file name on which it is + stored (that's a temporary file you don't deal with) + + value: the value as a *string*; for file uploads, this + transparently reads the file every time you request the value + and returns *bytes* + + file: the file(-like) object from which you can read the data *as + bytes* ; None if the data is stored a simple string + + type: the content-type, or None if not specified + + type_options: dictionary of options specified on the content-type + line + + disposition: content-disposition, or None if not specified + + disposition_options: dictionary of corresponding options + + headers: a dictionary(-like) object (sometimes email.message.Message or a + subclass thereof) containing *all* headers + + The class is subclassable, mostly for the purpose of overriding + the make_file() method, which is called internally to come up with + a file open for reading and writing. This makes it possible to + override the default choice of storing all files in a temporary + directory and unlinking them as soon as they have been opened. + + """ + def __init__(self, fp=None, headers=None, outerboundary=b'', + environ=os.environ, keep_blank_values=0, strict_parsing=0, + limit=None, encoding='utf-8', errors='replace', + max_num_fields=None, separator='&'): + """Constructor. Read multipart/* until last part. + + Arguments, all optional: + + fp : file pointer; default: sys.stdin.buffer + (not used when the request method is GET) + Can be : + 1. a TextIOWrapper object + 2. an object whose read() and readline() methods return bytes + + headers : header dictionary-like object; default: + taken from environ as per CGI spec + + outerboundary : terminating multipart boundary + (for internal use only) + + environ : environment dictionary; default: os.environ + + keep_blank_values: flag indicating whether blank values in + percent-encoded forms should be treated as blank strings. + A true value indicates that blanks should be retained as + blank strings. The default false value indicates that + blank values are to be ignored and treated as if they were + not included. + + strict_parsing: flag indicating what to do with parsing errors. + If false (the default), errors are silently ignored. + If true, errors raise a ValueError exception. + + limit : used internally to read parts of multipart/form-data forms, + to exit from the reading loop when reached. It is the difference + between the form content-length and the number of bytes already + read + + encoding, errors : the encoding and error handler used to decode the + binary stream to strings. Must be the same as the charset defined + for the page sending the form (content-type : meta http-equiv or + header) + + max_num_fields: int. If set, then __init__ throws a ValueError + if there are more than n fields read by parse_qsl(). + + """ + method = 'GET' + self.keep_blank_values = keep_blank_values + self.strict_parsing = strict_parsing + self.max_num_fields = max_num_fields + self.separator = separator + if 'REQUEST_METHOD' in environ: + method = environ['REQUEST_METHOD'].upper() + self.qs_on_post = None + if method == 'GET' or method == 'HEAD': + if 'QUERY_STRING' in environ: + qs = environ['QUERY_STRING'] + elif sys.argv[1:]: + qs = sys.argv[1] + else: + qs = "" + qs = qs.encode(locale.getpreferredencoding(), 'surrogateescape') + fp = BytesIO(qs) + if headers is None: + headers = {'content-type': + "application/x-www-form-urlencoded"} + if headers is None: + headers = {} + if method == 'POST': + # Set default content-type for POST to what's traditional + headers['content-type'] = "application/x-www-form-urlencoded" + if 'CONTENT_TYPE' in environ: + headers['content-type'] = environ['CONTENT_TYPE'] + if 'QUERY_STRING' in environ: + self.qs_on_post = environ['QUERY_STRING'] + if 'CONTENT_LENGTH' in environ: + headers['content-length'] = environ['CONTENT_LENGTH'] + else: + if not (isinstance(headers, (Mapping, Message))): + raise TypeError("headers must be mapping or an instance of " + "email.message.Message") + self.headers = headers + if fp is None: + self.fp = sys.stdin.buffer + # self.fp.read() must return bytes + elif isinstance(fp, TextIOWrapper): + self.fp = fp.buffer + else: + if not (hasattr(fp, 'read') and hasattr(fp, 'readline')): + raise TypeError("fp must be file pointer") + self.fp = fp + + self.encoding = encoding + self.errors = errors + + if not isinstance(outerboundary, bytes): + raise TypeError('outerboundary must be bytes, not %s' + % type(outerboundary).__name__) + self.outerboundary = outerboundary + + self.bytes_read = 0 + self.limit = limit + + # Process content-disposition header + cdisp, pdict = "", {} + if 'content-disposition' in self.headers: + cdisp, pdict = parse_header(self.headers['content-disposition']) + self.disposition = cdisp + self.disposition_options = pdict + self.name = None + if 'name' in pdict: + self.name = pdict['name'] + self.filename = None + if 'filename' in pdict: + self.filename = pdict['filename'] + self._binary_file = self.filename is not None + + # Process content-type header + # + # Honor any existing content-type header. But if there is no + # content-type header, use some sensible defaults. Assume + # outerboundary is "" at the outer level, but something non-false + # inside a multi-part. The default for an inner part is text/plain, + # but for an outer part it should be urlencoded. This should catch + # bogus clients which erroneously forget to include a content-type + # header. + # + # See below for what we do if there does exist a content-type header, + # but it happens to be something we don't understand. + if 'content-type' in self.headers: + ctype, pdict = parse_header(self.headers['content-type']) + elif self.outerboundary or method != 'POST': + ctype, pdict = "text/plain", {} + else: + ctype, pdict = 'application/x-www-form-urlencoded', {} + self.type = ctype + self.type_options = pdict + if 'boundary' in pdict: + self.innerboundary = pdict['boundary'].encode(self.encoding, + self.errors) + else: + self.innerboundary = b"" + + clen = -1 + if 'content-length' in self.headers: + try: + clen = int(self.headers['content-length']) + except ValueError: + pass + if maxlen and clen > maxlen: + raise ValueError('Maximum content length exceeded') + self.length = clen + if self.limit is None and clen >= 0: + self.limit = clen + + self.list = self.file = None + self.done = 0 + if ctype == 'application/x-www-form-urlencoded': + self.read_urlencoded() + elif ctype[:10] == 'multipart/': + self.read_multi(environ, keep_blank_values, strict_parsing) + else: + self.read_single() + + def __del__(self): + try: + self.file.close() + except AttributeError: + pass + + def __enter__(self): + return self + + def __exit__(self, *args): + self.file.close() + + def __repr__(self): + """Return a printable representation.""" + return "FieldStorage(%r, %r, %r)" % ( + self.name, self.filename, self.value) + + def __iter__(self): + return iter(self.keys()) + + def __getattr__(self, name): + if name != 'value': + raise AttributeError(name) + if self.file: + self.file.seek(0) + value = self.file.read() + self.file.seek(0) + elif self.list is not None: + value = self.list + else: + value = None + return value + + def __getitem__(self, key): + """Dictionary style indexing.""" + if self.list is None: + raise TypeError("not indexable") + found = [] + for item in self.list: + if item.name == key: found.append(item) + if not found: + raise KeyError(key) + if len(found) == 1: + return found[0] + else: + return found + + def getvalue(self, key, default=None): + """Dictionary style get() method, including 'value' lookup.""" + if key in self: + value = self[key] + if isinstance(value, list): + return [x.value for x in value] + else: + return value.value + else: + return default + + def getfirst(self, key, default=None): + """ Return the first value received.""" + if key in self: + value = self[key] + if isinstance(value, list): + return value[0].value + else: + return value.value + else: + return default + + def getlist(self, key): + """ Return list of received values.""" + if key in self: + value = self[key] + if isinstance(value, list): + return [x.value for x in value] + else: + return [value.value] + else: + return [] + + def keys(self): + """Dictionary style keys() method.""" + if self.list is None: + raise TypeError("not indexable") + return list(set(item.name for item in self.list)) + + def __contains__(self, key): + """Dictionary style __contains__ method.""" + if self.list is None: + raise TypeError("not indexable") + return any(item.name == key for item in self.list) + + def __len__(self): + """Dictionary style len(x) support.""" + return len(self.keys()) + + def __bool__(self): + if self.list is None: + raise TypeError("Cannot be converted to bool.") + return bool(self.list) + + def read_urlencoded(self): + """Internal: read data in query string format.""" + qs = self.fp.read(self.length) + if not isinstance(qs, bytes): + raise ValueError("%s should return bytes, got %s" \ + % (self.fp, type(qs).__name__)) + qs = qs.decode(self.encoding, self.errors) + if self.qs_on_post: + qs += '&' + self.qs_on_post + query = urllib.parse.parse_qsl( + qs, self.keep_blank_values, self.strict_parsing, + encoding=self.encoding, errors=self.errors, + max_num_fields=self.max_num_fields, separator=self.separator) + self.list = [MiniFieldStorage(key, value) for key, value in query] + self.skip_lines() + + FieldStorageClass = None + + def read_multi(self, environ, keep_blank_values, strict_parsing): + """Internal: read a part that is itself multipart.""" + ib = self.innerboundary + if not valid_boundary(ib): + raise ValueError('Invalid boundary in multipart form: %r' % (ib,)) + self.list = [] + if self.qs_on_post: + query = urllib.parse.parse_qsl( + self.qs_on_post, self.keep_blank_values, self.strict_parsing, + encoding=self.encoding, errors=self.errors, + max_num_fields=self.max_num_fields, separator=self.separator) + self.list.extend(MiniFieldStorage(key, value) for key, value in query) + + klass = self.FieldStorageClass or self.__class__ + first_line = self.fp.readline() # bytes + if not isinstance(first_line, bytes): + raise ValueError("%s should return bytes, got %s" \ + % (self.fp, type(first_line).__name__)) + self.bytes_read += len(first_line) + + # Ensure that we consume the file until we've hit our inner boundary + while (first_line.strip() != (b"--" + self.innerboundary) and + first_line): + first_line = self.fp.readline() + self.bytes_read += len(first_line) + + # Propagate max_num_fields into the sub class appropriately + max_num_fields = self.max_num_fields + if max_num_fields is not None: + max_num_fields -= len(self.list) + + while True: + parser = FeedParser() + hdr_text = b"" + while True: + data = self.fp.readline() + hdr_text += data + if not data.strip(): + break + if not hdr_text: + break + # parser takes strings, not bytes + self.bytes_read += len(hdr_text) + parser.feed(hdr_text.decode(self.encoding, self.errors)) + headers = parser.close() + + # Some clients add Content-Length for part headers, ignore them + if 'content-length' in headers: + del headers['content-length'] + + limit = None if self.limit is None \ + else self.limit - self.bytes_read + part = klass(self.fp, headers, ib, environ, keep_blank_values, + strict_parsing, limit, + self.encoding, self.errors, max_num_fields, self.separator) + + if max_num_fields is not None: + max_num_fields -= 1 + if part.list: + max_num_fields -= len(part.list) + if max_num_fields < 0: + raise ValueError('Max number of fields exceeded') + + self.bytes_read += part.bytes_read + self.list.append(part) + if part.done or self.bytes_read >= self.length > 0: + break + self.skip_lines() + + def read_single(self): + """Internal: read an atomic part.""" + if self.length >= 0: + self.read_binary() + self.skip_lines() + else: + self.read_lines() + self.file.seek(0) + + bufsize = 8*1024 # I/O buffering size for copy to file + + def read_binary(self): + """Internal: read binary data.""" + self.file = self.make_file() + todo = self.length + if todo >= 0: + while todo > 0: + data = self.fp.read(min(todo, self.bufsize)) # bytes + if not isinstance(data, bytes): + raise ValueError("%s should return bytes, got %s" + % (self.fp, type(data).__name__)) + self.bytes_read += len(data) + if not data: + self.done = -1 + break + self.file.write(data) + todo = todo - len(data) + + def read_lines(self): + """Internal: read lines until EOF or outerboundary.""" + if self._binary_file: + self.file = self.__file = BytesIO() # store data as bytes for files + else: + self.file = self.__file = StringIO() # as strings for other fields + if self.outerboundary: + self.read_lines_to_outerboundary() + else: + self.read_lines_to_eof() + + def __write(self, line): + """line is always bytes, not string""" + if self.__file is not None: + if self.__file.tell() + len(line) > 1000: + self.file = self.make_file() + data = self.__file.getvalue() + self.file.write(data) + self.__file = None + if self._binary_file: + # keep bytes + self.file.write(line) + else: + # decode to string + self.file.write(line.decode(self.encoding, self.errors)) + + def read_lines_to_eof(self): + """Internal: read lines until EOF.""" + while 1: + line = self.fp.readline(1<<16) # bytes + self.bytes_read += len(line) + if not line: + self.done = -1 + break + self.__write(line) + + def read_lines_to_outerboundary(self): + """Internal: read lines until outerboundary. + Data is read as bytes: boundaries and line ends must be converted + to bytes for comparisons. + """ + next_boundary = b"--" + self.outerboundary + last_boundary = next_boundary + b"--" + delim = b"" + last_line_lfend = True + _read = 0 + while 1: + + if self.limit is not None and 0 <= self.limit <= _read: + break + line = self.fp.readline(1<<16) # bytes + self.bytes_read += len(line) + _read += len(line) + if not line: + self.done = -1 + break + if delim == b"\r": + line = delim + line + delim = b"" + if line.startswith(b"--") and last_line_lfend: + strippedline = line.rstrip() + if strippedline == next_boundary: + break + if strippedline == last_boundary: + self.done = 1 + break + odelim = delim + if line.endswith(b"\r\n"): + delim = b"\r\n" + line = line[:-2] + last_line_lfend = True + elif line.endswith(b"\n"): + delim = b"\n" + line = line[:-1] + last_line_lfend = True + elif line.endswith(b"\r"): + # We may interrupt \r\n sequences if they span the 2**16 + # byte boundary + delim = b"\r" + line = line[:-1] + last_line_lfend = False + else: + delim = b"" + last_line_lfend = False + self.__write(odelim + line) + + def skip_lines(self): + """Internal: skip lines until outer boundary if defined.""" + if not self.outerboundary or self.done: + return + next_boundary = b"--" + self.outerboundary + last_boundary = next_boundary + b"--" + last_line_lfend = True + while True: + line = self.fp.readline(1<<16) + self.bytes_read += len(line) + if not line: + self.done = -1 + break + if line.endswith(b"--") and last_line_lfend: + strippedline = line.strip() + if strippedline == next_boundary: + break + if strippedline == last_boundary: + self.done = 1 + break + last_line_lfend = line.endswith(b'\n') + + def make_file(self): + """Overridable: return a readable & writable file. + + The file will be used as follows: + - data is written to it + - seek(0) + - data is read from it + + The file is opened in binary mode for files, in text mode + for other fields + + This version opens a temporary file for reading and writing, + and immediately deletes (unlinks) it. The trick (on Unix!) is + that the file can still be used, but it can't be opened by + another process, and it will automatically be deleted when it + is closed or when the current process terminates. + + If you want a more permanent file, you derive a class which + overrides this method. If you want a visible temporary file + that is nevertheless automatically deleted when the script + terminates, try defining a __del__ method in a derived class + which unlinks the temporary files you have created. + + """ + if self._binary_file: + return tempfile.TemporaryFile("wb+") + else: + return tempfile.TemporaryFile("w+", + encoding=self.encoding, newline = '\n') + + +# Test/debug code +# =============== + +def test(environ=os.environ): + """Robust test CGI script, usable as main program. + + Write minimal HTTP headers and dump all information provided to + the script in HTML form. + + """ + print("Content-type: text/html") + print() + sys.stderr = sys.stdout + try: + form = FieldStorage() # Replace with other classes to test those + print_directory() + print_arguments() + print_form(form) + print_environ(environ) + print_environ_usage() + def f(): + exec("testing print_exception() -- italics?") + def g(f=f): + f() + print("

What follows is a test, not an actual exception:

") + g() + except: + print_exception() + + print("

Second try with a small maxlen...

") + + global maxlen + maxlen = 50 + try: + form = FieldStorage() # Replace with other classes to test those + print_directory() + print_arguments() + print_form(form) + print_environ(environ) + except: + print_exception() + +def print_exception(type=None, value=None, tb=None, limit=None): + if type is None: + type, value, tb = sys.exc_info() + import traceback + print() + print("

Traceback (most recent call last):

") + list = traceback.format_tb(tb, limit) + \ + traceback.format_exception_only(type, value) + print("
%s%s
" % ( + html.escape("".join(list[:-1])), + html.escape(list[-1]), + )) + del tb + +def print_environ(environ=os.environ): + """Dump the shell environment as HTML.""" + keys = sorted(environ.keys()) + print() + print("

Shell Environment:

") + print("
") + for key in keys: + print("
", html.escape(key), "
", html.escape(environ[key])) + print("
") + print() + +def print_form(form): + """Dump the contents of a form as HTML.""" + keys = sorted(form.keys()) + print() + print("

Form Contents:

") + if not keys: + print("

No form fields.") + print("

") + for key in keys: + print("
" + html.escape(key) + ":", end=' ') + value = form[key] + print("" + html.escape(repr(type(value))) + "") + print("
" + html.escape(repr(value))) + print("
") + print() + +def print_directory(): + """Dump the current directory as HTML.""" + print() + print("

Current Working Directory:

") + try: + pwd = os.getcwd() + except OSError as msg: + print("OSError:", html.escape(str(msg))) + else: + print(html.escape(pwd)) + print() + +def print_arguments(): + print() + print("

Command Line Arguments:

") + print() + print(sys.argv) + print() + +def print_environ_usage(): + """Dump a list of environment variables used by CGI as HTML.""" + print(""" +

These environment variables could have been set:

+ +In addition, HTTP headers sent by the server may be passed in the +environment as well. Here are some common variable names: + +""") + + +# Utilities +# ========= + +def valid_boundary(s): + import re + if isinstance(s, bytes): + _vb_pattern = b"^[ -~]{0,200}[!-~]$" + else: + _vb_pattern = "^[ -~]{0,200}[!-~]$" + return re.match(_vb_pattern, s) + +# Invoke mainline +# =============== + +# Call test() when this file is run as a script (not imported as a module) +if __name__ == '__main__': + test() diff --git a/llava/lib/python3.10/cmd.py b/llava/lib/python3.10/cmd.py new file mode 100644 index 0000000000000000000000000000000000000000..859e91096d8f57d906c00023ef1a1c0e663178d6 --- /dev/null +++ b/llava/lib/python3.10/cmd.py @@ -0,0 +1,401 @@ +"""A generic class to build line-oriented command interpreters. + +Interpreters constructed with this class obey the following conventions: + +1. End of file on input is processed as the command 'EOF'. +2. A command is parsed out of each line by collecting the prefix composed + of characters in the identchars member. +3. A command `foo' is dispatched to a method 'do_foo()'; the do_ method + is passed a single argument consisting of the remainder of the line. +4. Typing an empty line repeats the last command. (Actually, it calls the + method `emptyline', which may be overridden in a subclass.) +5. There is a predefined `help' method. Given an argument `topic', it + calls the command `help_topic'. With no arguments, it lists all topics + with defined help_ functions, broken into up to three topics; documented + commands, miscellaneous help topics, and undocumented commands. +6. The command '?' is a synonym for `help'. The command '!' is a synonym + for `shell', if a do_shell method exists. +7. If completion is enabled, completing commands will be done automatically, + and completing of commands args is done by calling complete_foo() with + arguments text, line, begidx, endidx. text is string we are matching + against, all returned matches must begin with it. line is the current + input line (lstripped), begidx and endidx are the beginning and end + indexes of the text being matched, which could be used to provide + different completion depending upon which position the argument is in. + +The `default' method may be overridden to intercept commands for which there +is no do_ method. + +The `completedefault' method may be overridden to intercept completions for +commands that have no complete_ method. + +The data member `self.ruler' sets the character used to draw separator lines +in the help messages. If empty, no ruler line is drawn. It defaults to "=". + +If the value of `self.intro' is nonempty when the cmdloop method is called, +it is printed out on interpreter startup. This value may be overridden +via an optional argument to the cmdloop() method. + +The data members `self.doc_header', `self.misc_header', and +`self.undoc_header' set the headers used for the help function's +listings of documented functions, miscellaneous topics, and undocumented +functions respectively. +""" + +import string, sys + +__all__ = ["Cmd"] + +PROMPT = '(Cmd) ' +IDENTCHARS = string.ascii_letters + string.digits + '_' + +class Cmd: + """A simple framework for writing line-oriented command interpreters. + + These are often useful for test harnesses, administrative tools, and + prototypes that will later be wrapped in a more sophisticated interface. + + A Cmd instance or subclass instance is a line-oriented interpreter + framework. There is no good reason to instantiate Cmd itself; rather, + it's useful as a superclass of an interpreter class you define yourself + in order to inherit Cmd's methods and encapsulate action methods. + + """ + prompt = PROMPT + identchars = IDENTCHARS + ruler = '=' + lastcmd = '' + intro = None + doc_leader = "" + doc_header = "Documented commands (type help ):" + misc_header = "Miscellaneous help topics:" + undoc_header = "Undocumented commands:" + nohelp = "*** No help on %s" + use_rawinput = 1 + + def __init__(self, completekey='tab', stdin=None, stdout=None): + """Instantiate a line-oriented interpreter framework. + + The optional argument 'completekey' is the readline name of a + completion key; it defaults to the Tab key. If completekey is + not None and the readline module is available, command completion + is done automatically. The optional arguments stdin and stdout + specify alternate input and output file objects; if not specified, + sys.stdin and sys.stdout are used. + + """ + if stdin is not None: + self.stdin = stdin + else: + self.stdin = sys.stdin + if stdout is not None: + self.stdout = stdout + else: + self.stdout = sys.stdout + self.cmdqueue = [] + self.completekey = completekey + + def cmdloop(self, intro=None): + """Repeatedly issue a prompt, accept input, parse an initial prefix + off the received input, and dispatch to action methods, passing them + the remainder of the line as argument. + + """ + + self.preloop() + if self.use_rawinput and self.completekey: + try: + import readline + self.old_completer = readline.get_completer() + readline.set_completer(self.complete) + readline.parse_and_bind(self.completekey+": complete") + except ImportError: + pass + try: + if intro is not None: + self.intro = intro + if self.intro: + self.stdout.write(str(self.intro)+"\n") + stop = None + while not stop: + if self.cmdqueue: + line = self.cmdqueue.pop(0) + else: + if self.use_rawinput: + try: + line = input(self.prompt) + except EOFError: + line = 'EOF' + else: + self.stdout.write(self.prompt) + self.stdout.flush() + line = self.stdin.readline() + if not len(line): + line = 'EOF' + else: + line = line.rstrip('\r\n') + line = self.precmd(line) + stop = self.onecmd(line) + stop = self.postcmd(stop, line) + self.postloop() + finally: + if self.use_rawinput and self.completekey: + try: + import readline + readline.set_completer(self.old_completer) + except ImportError: + pass + + + def precmd(self, line): + """Hook method executed just before the command line is + interpreted, but after the input prompt is generated and issued. + + """ + return line + + def postcmd(self, stop, line): + """Hook method executed just after a command dispatch is finished.""" + return stop + + def preloop(self): + """Hook method executed once when the cmdloop() method is called.""" + pass + + def postloop(self): + """Hook method executed once when the cmdloop() method is about to + return. + + """ + pass + + def parseline(self, line): + """Parse the line into a command name and a string containing + the arguments. Returns a tuple containing (command, args, line). + 'command' and 'args' may be None if the line couldn't be parsed. + """ + line = line.strip() + if not line: + return None, None, line + elif line[0] == '?': + line = 'help ' + line[1:] + elif line[0] == '!': + if hasattr(self, 'do_shell'): + line = 'shell ' + line[1:] + else: + return None, None, line + i, n = 0, len(line) + while i < n and line[i] in self.identchars: i = i+1 + cmd, arg = line[:i], line[i:].strip() + return cmd, arg, line + + def onecmd(self, line): + """Interpret the argument as though it had been typed in response + to the prompt. + + This may be overridden, but should not normally need to be; + see the precmd() and postcmd() methods for useful execution hooks. + The return value is a flag indicating whether interpretation of + commands by the interpreter should stop. + + """ + cmd, arg, line = self.parseline(line) + if not line: + return self.emptyline() + if cmd is None: + return self.default(line) + self.lastcmd = line + if line == 'EOF' : + self.lastcmd = '' + if cmd == '': + return self.default(line) + else: + try: + func = getattr(self, 'do_' + cmd) + except AttributeError: + return self.default(line) + return func(arg) + + def emptyline(self): + """Called when an empty line is entered in response to the prompt. + + If this method is not overridden, it repeats the last nonempty + command entered. + + """ + if self.lastcmd: + return self.onecmd(self.lastcmd) + + def default(self, line): + """Called on an input line when the command prefix is not recognized. + + If this method is not overridden, it prints an error message and + returns. + + """ + self.stdout.write('*** Unknown syntax: %s\n'%line) + + def completedefault(self, *ignored): + """Method called to complete an input line when no command-specific + complete_*() method is available. + + By default, it returns an empty list. + + """ + return [] + + def completenames(self, text, *ignored): + dotext = 'do_'+text + return [a[3:] for a in self.get_names() if a.startswith(dotext)] + + def complete(self, text, state): + """Return the next possible completion for 'text'. + + If a command has not been entered, then complete against command list. + Otherwise try to call complete_ to get list of completions. + """ + if state == 0: + import readline + origline = readline.get_line_buffer() + line = origline.lstrip() + stripped = len(origline) - len(line) + begidx = readline.get_begidx() - stripped + endidx = readline.get_endidx() - stripped + if begidx>0: + cmd, args, foo = self.parseline(line) + if cmd == '': + compfunc = self.completedefault + else: + try: + compfunc = getattr(self, 'complete_' + cmd) + except AttributeError: + compfunc = self.completedefault + else: + compfunc = self.completenames + self.completion_matches = compfunc(text, line, begidx, endidx) + try: + return self.completion_matches[state] + except IndexError: + return None + + def get_names(self): + # This method used to pull in base class attributes + # at a time dir() didn't do it yet. + return dir(self.__class__) + + def complete_help(self, *args): + commands = set(self.completenames(*args)) + topics = set(a[5:] for a in self.get_names() + if a.startswith('help_' + args[0])) + return list(commands | topics) + + def do_help(self, arg): + 'List available commands with "help" or detailed help with "help cmd".' + if arg: + # XXX check arg syntax + try: + func = getattr(self, 'help_' + arg) + except AttributeError: + try: + doc=getattr(self, 'do_' + arg).__doc__ + if doc: + self.stdout.write("%s\n"%str(doc)) + return + except AttributeError: + pass + self.stdout.write("%s\n"%str(self.nohelp % (arg,))) + return + func() + else: + names = self.get_names() + cmds_doc = [] + cmds_undoc = [] + help = {} + for name in names: + if name[:5] == 'help_': + help[name[5:]]=1 + names.sort() + # There can be duplicates if routines overridden + prevname = '' + for name in names: + if name[:3] == 'do_': + if name == prevname: + continue + prevname = name + cmd=name[3:] + if cmd in help: + cmds_doc.append(cmd) + del help[cmd] + elif getattr(self, name).__doc__: + cmds_doc.append(cmd) + else: + cmds_undoc.append(cmd) + self.stdout.write("%s\n"%str(self.doc_leader)) + self.print_topics(self.doc_header, cmds_doc, 15,80) + self.print_topics(self.misc_header, list(help.keys()),15,80) + self.print_topics(self.undoc_header, cmds_undoc, 15,80) + + def print_topics(self, header, cmds, cmdlen, maxcol): + if cmds: + self.stdout.write("%s\n"%str(header)) + if self.ruler: + self.stdout.write("%s\n"%str(self.ruler * len(header))) + self.columnize(cmds, maxcol-1) + self.stdout.write("\n") + + def columnize(self, list, displaywidth=80): + """Display a list of strings as a compact set of columns. + + Each column is only as wide as necessary. + Columns are separated by two spaces (one was not legible enough). + """ + if not list: + self.stdout.write("\n") + return + + nonstrings = [i for i in range(len(list)) + if not isinstance(list[i], str)] + if nonstrings: + raise TypeError("list[i] not a string for i in %s" + % ", ".join(map(str, nonstrings))) + size = len(list) + if size == 1: + self.stdout.write('%s\n'%str(list[0])) + return + # Try every row count from 1 upwards + for nrows in range(1, len(list)): + ncols = (size+nrows-1) // nrows + colwidths = [] + totwidth = -2 + for col in range(ncols): + colwidth = 0 + for row in range(nrows): + i = row + nrows*col + if i >= size: + break + x = list[i] + colwidth = max(colwidth, len(x)) + colwidths.append(colwidth) + totwidth += colwidth + 2 + if totwidth > displaywidth: + break + if totwidth <= displaywidth: + break + else: + nrows = len(list) + ncols = 1 + colwidths = [0] + for row in range(nrows): + texts = [] + for col in range(ncols): + i = row + nrows*col + if i >= size: + x = "" + else: + x = list[i] + texts.append(x) + while texts and not texts[-1]: + del texts[-1] + for col in range(len(texts)): + texts[col] = texts[col].ljust(colwidths[col]) + self.stdout.write("%s\n"%str(" ".join(texts))) diff --git a/llava/lib/python3.10/enum.py b/llava/lib/python3.10/enum.py new file mode 100644 index 0000000000000000000000000000000000000000..f5657a6eba29c19e139bff5089e01d5f1eb362d5 --- /dev/null +++ b/llava/lib/python3.10/enum.py @@ -0,0 +1,1053 @@ +import sys +from types import MappingProxyType, DynamicClassAttribute + + +__all__ = [ + 'EnumMeta', + 'Enum', 'IntEnum', 'Flag', 'IntFlag', + 'auto', 'unique', + ] + + +def _is_descriptor(obj): + """ + Returns True if obj is a descriptor, False otherwise. + """ + return ( + hasattr(obj, '__get__') or + hasattr(obj, '__set__') or + hasattr(obj, '__delete__') + ) + +def _is_dunder(name): + """ + Returns True if a __dunder__ name, False otherwise. + """ + return ( + len(name) > 4 and + name[:2] == name[-2:] == '__' and + name[2] != '_' and + name[-3] != '_' + ) + +def _is_sunder(name): + """ + Returns True if a _sunder_ name, False otherwise. + """ + return ( + len(name) > 2 and + name[0] == name[-1] == '_' and + name[1:2] != '_' and + name[-2:-1] != '_' + ) + +def _is_private(cls_name, name): + # do not use `re` as `re` imports `enum` + pattern = '_%s__' % (cls_name, ) + pat_len = len(pattern) + if ( + len(name) > pat_len + and name.startswith(pattern) + and name[pat_len:pat_len+1] != ['_'] + and (name[-1] != '_' or name[-2] != '_') + ): + return True + else: + return False + +def _make_class_unpicklable(cls): + """ + Make the given class un-picklable. + """ + def _break_on_call_reduce(self, proto): + raise TypeError('%r cannot be pickled' % self) + cls.__reduce_ex__ = _break_on_call_reduce + cls.__module__ = '' + +_auto_null = object() +class auto: + """ + Instances are replaced with an appropriate value in Enum class suites. + """ + value = _auto_null + + +class _EnumDict(dict): + """ + Track enum member order and ensure member names are not reused. + + EnumMeta will use the names found in self._member_names as the + enumeration member names. + """ + def __init__(self): + super().__init__() + self._member_names = [] + self._last_values = [] + self._ignore = [] + self._auto_called = False + + def __setitem__(self, key, value): + """ + Changes anything not dundered or not a descriptor. + + If an enum member name is used twice, an error is raised; duplicate + values are not checked for. + + Single underscore (sunder) names are reserved. + """ + if _is_private(self._cls_name, key): + import warnings + warnings.warn( + "private variables, such as %r, will be normal attributes in 3.11" + % (key, ), + DeprecationWarning, + stacklevel=2, + ) + if _is_sunder(key): + if key not in ( + '_order_', '_create_pseudo_member_', + '_generate_next_value_', '_missing_', '_ignore_', + ): + raise ValueError('_names_ are reserved for future Enum use') + if key == '_generate_next_value_': + # check if members already defined as auto() + if self._auto_called: + raise TypeError("_generate_next_value_ must be defined before members") + setattr(self, '_generate_next_value', value) + elif key == '_ignore_': + if isinstance(value, str): + value = value.replace(',',' ').split() + else: + value = list(value) + self._ignore = value + already = set(value) & set(self._member_names) + if already: + raise ValueError( + '_ignore_ cannot specify already set names: %r' + % (already, ) + ) + elif _is_dunder(key): + if key == '__order__': + key = '_order_' + elif key in self._member_names: + # descriptor overwriting an enum? + raise TypeError('Attempted to reuse key: %r' % key) + elif key in self._ignore: + pass + elif not _is_descriptor(value): + if key in self: + # enum overwriting a descriptor? + raise TypeError('%r already defined as: %r' % (key, self[key])) + if isinstance(value, auto): + if value.value == _auto_null: + value.value = self._generate_next_value( + key, + 1, + len(self._member_names), + self._last_values[:], + ) + self._auto_called = True + value = value.value + self._member_names.append(key) + self._last_values.append(value) + super().__setitem__(key, value) + + +# Dummy value for Enum as EnumMeta explicitly checks for it, but of course +# until EnumMeta finishes running the first time the Enum class doesn't exist. +# This is also why there are checks in EnumMeta like `if Enum is not None` +Enum = None + +class EnumMeta(type): + """ + Metaclass for Enum + """ + @classmethod + def __prepare__(metacls, cls, bases, **kwds): + # check that previous enum members do not exist + metacls._check_for_existing_members(cls, bases) + # create the namespace dict + enum_dict = _EnumDict() + enum_dict._cls_name = cls + # inherit previous flags and _generate_next_value_ function + member_type, first_enum = metacls._get_mixins_(cls, bases) + if first_enum is not None: + enum_dict['_generate_next_value_'] = getattr( + first_enum, '_generate_next_value_', None, + ) + return enum_dict + + def __new__(metacls, cls, bases, classdict, **kwds): + # an Enum class is final once enumeration items have been defined; it + # cannot be mixed with other types (int, float, etc.) if it has an + # inherited __new__ unless a new __new__ is defined (or the resulting + # class will fail). + # + # remove any keys listed in _ignore_ + classdict.setdefault('_ignore_', []).append('_ignore_') + ignore = classdict['_ignore_'] + for key in ignore: + classdict.pop(key, None) + member_type, first_enum = metacls._get_mixins_(cls, bases) + __new__, save_new, use_args = metacls._find_new_( + classdict, member_type, first_enum, + ) + + # save enum items into separate mapping so they don't get baked into + # the new class + enum_members = {k: classdict[k] for k in classdict._member_names} + for name in classdict._member_names: + del classdict[name] + + # adjust the sunders + _order_ = classdict.pop('_order_', None) + + # check for illegal enum names (any others?) + invalid_names = set(enum_members) & {'mro', ''} + if invalid_names: + raise ValueError('Invalid enum member name: {0}'.format( + ','.join(invalid_names))) + + # create a default docstring if one has not been provided + if '__doc__' not in classdict: + classdict['__doc__'] = 'An enumeration.' + + enum_class = super().__new__(metacls, cls, bases, classdict, **kwds) + enum_class._member_names_ = [] # names in definition order + enum_class._member_map_ = {} # name->value map + enum_class._member_type_ = member_type + + # save DynamicClassAttribute attributes from super classes so we know + # if we can take the shortcut of storing members in the class dict + dynamic_attributes = { + k for c in enum_class.mro() + for k, v in c.__dict__.items() + if isinstance(v, DynamicClassAttribute) + } + + # Reverse value->name map for hashable values. + enum_class._value2member_map_ = {} + + # If a custom type is mixed into the Enum, and it does not know how + # to pickle itself, pickle.dumps will succeed but pickle.loads will + # fail. Rather than have the error show up later and possibly far + # from the source, sabotage the pickle protocol for this class so + # that pickle.dumps also fails. + # + # However, if the new class implements its own __reduce_ex__, do not + # sabotage -- it's on them to make sure it works correctly. We use + # __reduce_ex__ instead of any of the others as it is preferred by + # pickle over __reduce__, and it handles all pickle protocols. + if '__reduce_ex__' not in classdict: + if member_type is not object: + methods = ('__getnewargs_ex__', '__getnewargs__', + '__reduce_ex__', '__reduce__') + if not any(m in member_type.__dict__ for m in methods): + if '__new__' in classdict: + # too late, sabotage + _make_class_unpicklable(enum_class) + else: + # final attempt to verify that pickling would work: + # travel mro until __new__ is found, checking for + # __reduce__ and friends along the way -- if any of them + # are found before/when __new__ is found, pickling should + # work + sabotage = None + for chain in bases: + for base in chain.__mro__: + if base is object: + continue + elif any(m in base.__dict__ for m in methods): + # found one, we're good + sabotage = False + break + elif '__new__' in base.__dict__: + # not good + sabotage = True + break + if sabotage is not None: + break + if sabotage: + _make_class_unpicklable(enum_class) + # instantiate them, checking for duplicates as we go + # we instantiate first instead of checking for duplicates first in case + # a custom __new__ is doing something funky with the values -- such as + # auto-numbering ;) + for member_name in classdict._member_names: + value = enum_members[member_name] + if not isinstance(value, tuple): + args = (value, ) + else: + args = value + if member_type is tuple: # special case for tuple enums + args = (args, ) # wrap it one more time + if not use_args: + enum_member = __new__(enum_class) + if not hasattr(enum_member, '_value_'): + enum_member._value_ = value + else: + enum_member = __new__(enum_class, *args) + if not hasattr(enum_member, '_value_'): + if member_type is object: + enum_member._value_ = value + else: + enum_member._value_ = member_type(*args) + value = enum_member._value_ + enum_member._name_ = member_name + enum_member.__objclass__ = enum_class + enum_member.__init__(*args) + # If another member with the same value was already defined, the + # new member becomes an alias to the existing one. + for name, canonical_member in enum_class._member_map_.items(): + if canonical_member._value_ == enum_member._value_: + enum_member = canonical_member + break + else: + # Aliases don't appear in member names (only in __members__). + enum_class._member_names_.append(member_name) + # performance boost for any member that would not shadow + # a DynamicClassAttribute + if member_name not in dynamic_attributes: + setattr(enum_class, member_name, enum_member) + # now add to _member_map_ + enum_class._member_map_[member_name] = enum_member + try: + # This may fail if value is not hashable. We can't add the value + # to the map, and by-value lookups for this value will be + # linear. + enum_class._value2member_map_[value] = enum_member + except TypeError: + pass + + # double check that repr and friends are not the mixin's or various + # things break (such as pickle) + # however, if the method is defined in the Enum itself, don't replace + # it + for name in ('__repr__', '__str__', '__format__', '__reduce_ex__'): + if name in classdict: + continue + class_method = getattr(enum_class, name) + obj_method = getattr(member_type, name, None) + enum_method = getattr(first_enum, name, None) + if obj_method is not None and obj_method is class_method: + setattr(enum_class, name, enum_method) + + # replace any other __new__ with our own (as long as Enum is not None, + # anyway) -- again, this is to support pickle + if Enum is not None: + # if the user defined their own __new__, save it before it gets + # clobbered in case they subclass later + if save_new: + enum_class.__new_member__ = __new__ + enum_class.__new__ = Enum.__new__ + + # py3 support for definition order (helps keep py2/py3 code in sync) + if _order_ is not None: + if isinstance(_order_, str): + _order_ = _order_.replace(',', ' ').split() + if _order_ != enum_class._member_names_: + raise TypeError('member order does not match _order_') + + return enum_class + + def __bool__(self): + """ + classes/types should always be True. + """ + return True + + def __call__(cls, value, names=None, *, module=None, qualname=None, type=None, start=1): + """ + Either returns an existing member, or creates a new enum class. + + This method is used both when an enum class is given a value to match + to an enumeration member (i.e. Color(3)) and for the functional API + (i.e. Color = Enum('Color', names='RED GREEN BLUE')). + + When used for the functional API: + + `value` will be the name of the new class. + + `names` should be either a string of white-space/comma delimited names + (values will start at `start`), or an iterator/mapping of name, value pairs. + + `module` should be set to the module this class is being created in; + if it is not set, an attempt to find that module will be made, but if + it fails the class will not be picklable. + + `qualname` should be set to the actual location this class can be found + at in its module; by default it is set to the global scope. If this is + not correct, unpickling will fail in some circumstances. + + `type`, if set, will be mixed in as the first base class. + """ + if names is None: # simple value lookup + return cls.__new__(cls, value) + # otherwise, functional API: we're creating a new Enum type + return cls._create_( + value, + names, + module=module, + qualname=qualname, + type=type, + start=start, + ) + + def __contains__(cls, obj): + if not isinstance(obj, Enum): + import warnings + warnings.warn( + "in 3.12 __contains__ will no longer raise TypeError, but will return True if\n" + "obj is a member or a member's value", + DeprecationWarning, + stacklevel=2, + ) + raise TypeError( + "unsupported operand type(s) for 'in': '%s' and '%s'" % ( + type(obj).__qualname__, cls.__class__.__qualname__)) + return isinstance(obj, cls) and obj._name_ in cls._member_map_ + + def __delattr__(cls, attr): + # nicer error message when someone tries to delete an attribute + # (see issue19025). + if attr in cls._member_map_: + raise AttributeError("%s: cannot delete Enum member." % cls.__name__) + super().__delattr__(attr) + + def __dir__(self): + return ( + ['__class__', '__doc__', '__members__', '__module__'] + + self._member_names_ + ) + + def __getattr__(cls, name): + """ + Return the enum member matching `name` + + We use __getattr__ instead of descriptors or inserting into the enum + class' __dict__ in order to support `name` and `value` being both + properties for enum members (which live in the class' __dict__) and + enum members themselves. + """ + if _is_dunder(name): + raise AttributeError(name) + try: + return cls._member_map_[name] + except KeyError: + raise AttributeError(name) from None + + def __getitem__(cls, name): + return cls._member_map_[name] + + def __iter__(cls): + """ + Returns members in definition order. + """ + return (cls._member_map_[name] for name in cls._member_names_) + + def __len__(cls): + return len(cls._member_names_) + + @property + def __members__(cls): + """ + Returns a mapping of member name->value. + + This mapping lists all enum members, including aliases. Note that this + is a read-only view of the internal mapping. + """ + return MappingProxyType(cls._member_map_) + + def __repr__(cls): + return "" % cls.__name__ + + def __reversed__(cls): + """ + Returns members in reverse definition order. + """ + return (cls._member_map_[name] for name in reversed(cls._member_names_)) + + def __setattr__(cls, name, value): + """ + Block attempts to reassign Enum members. + + A simple assignment to the class namespace only changes one of the + several possible ways to get an Enum member from the Enum class, + resulting in an inconsistent Enumeration. + """ + member_map = cls.__dict__.get('_member_map_', {}) + if name in member_map: + raise AttributeError('Cannot reassign members.') + super().__setattr__(name, value) + + def _create_(cls, class_name, names, *, module=None, qualname=None, type=None, start=1): + """ + Convenience method to create a new Enum class. + + `names` can be: + + * A string containing member names, separated either with spaces or + commas. Values are incremented by 1 from `start`. + * An iterable of member names. Values are incremented by 1 from `start`. + * An iterable of (member name, value) pairs. + * A mapping of member name -> value pairs. + """ + metacls = cls.__class__ + bases = (cls, ) if type is None else (type, cls) + _, first_enum = cls._get_mixins_(cls, bases) + classdict = metacls.__prepare__(class_name, bases) + + # special processing needed for names? + if isinstance(names, str): + names = names.replace(',', ' ').split() + if isinstance(names, (tuple, list)) and names and isinstance(names[0], str): + original_names, names = names, [] + last_values = [] + for count, name in enumerate(original_names): + value = first_enum._generate_next_value_(name, start, count, last_values[:]) + last_values.append(value) + names.append((name, value)) + + # Here, names is either an iterable of (name, value) or a mapping. + for item in names: + if isinstance(item, str): + member_name, member_value = item, names[item] + else: + member_name, member_value = item + classdict[member_name] = member_value + enum_class = metacls.__new__(metacls, class_name, bases, classdict) + + # TODO: replace the frame hack if a blessed way to know the calling + # module is ever developed + if module is None: + try: + module = sys._getframe(2).f_globals['__name__'] + except (AttributeError, ValueError, KeyError): + pass + if module is None: + _make_class_unpicklable(enum_class) + else: + enum_class.__module__ = module + if qualname is not None: + enum_class.__qualname__ = qualname + + return enum_class + + def _convert_(cls, name, module, filter, source=None): + """ + Create a new Enum subclass that replaces a collection of global constants + """ + # convert all constants from source (or module) that pass filter() to + # a new Enum called name, and export the enum and its members back to + # module; + # also, replace the __reduce_ex__ method so unpickling works in + # previous Python versions + module_globals = vars(sys.modules[module]) + if source: + source = vars(source) + else: + source = module_globals + # _value2member_map_ is populated in the same order every time + # for a consistent reverse mapping of number to name when there + # are multiple names for the same number. + members = [ + (name, value) + for name, value in source.items() + if filter(name)] + try: + # sort by value + members.sort(key=lambda t: (t[1], t[0])) + except TypeError: + # unless some values aren't comparable, in which case sort by name + members.sort(key=lambda t: t[0]) + cls = cls(name, members, module=module) + cls.__reduce_ex__ = _reduce_ex_by_name + module_globals.update(cls.__members__) + module_globals[name] = cls + return cls + + @staticmethod + def _check_for_existing_members(class_name, bases): + for chain in bases: + for base in chain.__mro__: + if issubclass(base, Enum) and base._member_names_: + raise TypeError( + "%s: cannot extend enumeration %r" + % (class_name, base.__name__) + ) + + @staticmethod + def _get_mixins_(class_name, bases): + """ + Returns the type for creating enum members, and the first inherited + enum class. + + bases: the tuple of bases that was given to __new__ + """ + if not bases: + return object, Enum + + def _find_data_type(bases): + data_types = set() + for chain in bases: + candidate = None + for base in chain.__mro__: + if base is object: + continue + elif issubclass(base, Enum): + if base._member_type_ is not object: + data_types.add(base._member_type_) + break + elif '__new__' in base.__dict__: + if issubclass(base, Enum): + continue + data_types.add(candidate or base) + break + else: + candidate = candidate or base + if len(data_types) > 1: + raise TypeError('%r: too many data types: %r' % (class_name, data_types)) + elif data_types: + return data_types.pop() + else: + return None + + # ensure final parent class is an Enum derivative, find any concrete + # data type, and check that Enum has no members + first_enum = bases[-1] + if not issubclass(first_enum, Enum): + raise TypeError("new enumerations should be created as " + "`EnumName([mixin_type, ...] [data_type,] enum_type)`") + member_type = _find_data_type(bases) or object + if first_enum._member_names_: + raise TypeError("Cannot extend enumerations") + return member_type, first_enum + + @staticmethod + def _find_new_(classdict, member_type, first_enum): + """ + Returns the __new__ to be used for creating the enum members. + + classdict: the class dictionary given to __new__ + member_type: the data type whose __new__ will be used by default + first_enum: enumeration to check for an overriding __new__ + """ + # now find the correct __new__, checking to see of one was defined + # by the user; also check earlier enum classes in case a __new__ was + # saved as __new_member__ + __new__ = classdict.get('__new__', None) + + # should __new__ be saved as __new_member__ later? + save_new = __new__ is not None + + if __new__ is None: + # check all possibles for __new_member__ before falling back to + # __new__ + for method in ('__new_member__', '__new__'): + for possible in (member_type, first_enum): + target = getattr(possible, method, None) + if target not in { + None, + None.__new__, + object.__new__, + Enum.__new__, + }: + __new__ = target + break + if __new__ is not None: + break + else: + __new__ = object.__new__ + + # if a non-object.__new__ is used then whatever value/tuple was + # assigned to the enum member name will be passed to __new__ and to the + # new enum member's __init__ + if __new__ is object.__new__: + use_args = False + else: + use_args = True + return __new__, save_new, use_args + + +class Enum(metaclass=EnumMeta): + """ + Generic enumeration. + + Derive from this class to define new enumerations. + """ + def __new__(cls, value): + # all enum instances are actually created during class construction + # without calling this method; this method is called by the metaclass' + # __call__ (i.e. Color(3) ), and by pickle + if type(value) is cls: + # For lookups like Color(Color.RED) + return value + # by-value search for a matching enum member + # see if it's in the reverse mapping (for hashable values) + try: + return cls._value2member_map_[value] + except KeyError: + # Not found, no need to do long O(n) search + pass + except TypeError: + # not there, now do long search -- O(n) behavior + for member in cls._member_map_.values(): + if member._value_ == value: + return member + # still not found -- try _missing_ hook + try: + exc = None + result = cls._missing_(value) + except Exception as e: + exc = e + result = None + try: + if isinstance(result, cls): + return result + else: + ve_exc = ValueError("%r is not a valid %s" % (value, cls.__qualname__)) + if result is None and exc is None: + raise ve_exc + elif exc is None: + exc = TypeError( + 'error in %s._missing_: returned %r instead of None or a valid member' + % (cls.__name__, result) + ) + if not isinstance(exc, ValueError): + exc.__context__ = ve_exc + raise exc + finally: + # ensure all variables that could hold an exception are destroyed + exc = None + ve_exc = None + + def _generate_next_value_(name, start, count, last_values): + """ + Generate the next value when not given. + + name: the name of the member + start: the initial start value or None + count: the number of existing members + last_value: the last value assigned or None + """ + for last_value in reversed(last_values): + try: + return last_value + 1 + except TypeError: + pass + else: + return start + + @classmethod + def _missing_(cls, value): + return None + + def __repr__(self): + return "<%s.%s: %r>" % ( + self.__class__.__name__, self._name_, self._value_) + + def __str__(self): + return "%s.%s" % (self.__class__.__name__, self._name_) + + def __dir__(self): + """ + Returns all members and all public methods + """ + added_behavior = [ + m + for cls in self.__class__.mro() + for m in cls.__dict__ + if m[0] != '_' and m not in self._member_map_ + ] + [m for m in self.__dict__ if m[0] != '_'] + return (['__class__', '__doc__', '__module__'] + added_behavior) + + def __format__(self, format_spec): + """ + Returns format using actual value type unless __str__ has been overridden. + """ + # mixed-in Enums should use the mixed-in type's __format__, otherwise + # we can get strange results with the Enum name showing up instead of + # the value + + # pure Enum branch, or branch with __str__ explicitly overridden + str_overridden = type(self).__str__ not in (Enum.__str__, Flag.__str__) + if self._member_type_ is object or str_overridden: + cls = str + val = str(self) + # mix-in branch + else: + cls = self._member_type_ + val = self._value_ + return cls.__format__(val, format_spec) + + def __hash__(self): + return hash(self._name_) + + def __reduce_ex__(self, proto): + return self.__class__, (self._value_, ) + + # DynamicClassAttribute is used to provide access to the `name` and + # `value` properties of enum members while keeping some measure of + # protection from modification, while still allowing for an enumeration + # to have members named `name` and `value`. This works because enumeration + # members are not set directly on the enum class -- __getattr__ is + # used to look them up. + + @DynamicClassAttribute + def name(self): + """The name of the Enum member.""" + return self._name_ + + @DynamicClassAttribute + def value(self): + """The value of the Enum member.""" + return self._value_ + + +class IntEnum(int, Enum): + """Enum where members are also (and must be) ints""" + + +def _reduce_ex_by_name(self, proto): + return self.name + +class Flag(Enum): + """ + Support for flags + """ + + def _generate_next_value_(name, start, count, last_values): + """ + Generate the next value when not given. + + name: the name of the member + start: the initial start value or None + count: the number of existing members + last_value: the last value assigned or None + """ + if not count: + return start if start is not None else 1 + for last_value in reversed(last_values): + try: + high_bit = _high_bit(last_value) + break + except Exception: + raise TypeError('Invalid Flag value: %r' % last_value) from None + return 2 ** (high_bit+1) + + @classmethod + def _missing_(cls, value): + """ + Returns member (possibly creating it) if one can be found for value. + """ + original_value = value + if value < 0: + value = ~value + possible_member = cls._create_pseudo_member_(value) + if original_value < 0: + possible_member = ~possible_member + return possible_member + + @classmethod + def _create_pseudo_member_(cls, value): + """ + Create a composite member iff value contains only members. + """ + pseudo_member = cls._value2member_map_.get(value, None) + if pseudo_member is None: + # verify all bits are accounted for + _, extra_flags = _decompose(cls, value) + if extra_flags: + raise ValueError("%r is not a valid %s" % (value, cls.__qualname__)) + # construct a singleton enum pseudo-member + pseudo_member = object.__new__(cls) + pseudo_member._name_ = None + pseudo_member._value_ = value + # use setdefault in case another thread already created a composite + # with this value + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) + return pseudo_member + + def __contains__(self, other): + """ + Returns True if self has at least the same flags set as other. + """ + if not isinstance(other, self.__class__): + raise TypeError( + "unsupported operand type(s) for 'in': '%s' and '%s'" % ( + type(other).__qualname__, self.__class__.__qualname__)) + return other._value_ & self._value_ == other._value_ + + def __repr__(self): + cls = self.__class__ + if self._name_ is not None: + return '<%s.%s: %r>' % (cls.__name__, self._name_, self._value_) + members, uncovered = _decompose(cls, self._value_) + return '<%s.%s: %r>' % ( + cls.__name__, + '|'.join([str(m._name_ or m._value_) for m in members]), + self._value_, + ) + + def __str__(self): + cls = self.__class__ + if self._name_ is not None: + return '%s.%s' % (cls.__name__, self._name_) + members, uncovered = _decompose(cls, self._value_) + if len(members) == 1 and members[0]._name_ is None: + return '%s.%r' % (cls.__name__, members[0]._value_) + else: + return '%s.%s' % ( + cls.__name__, + '|'.join([str(m._name_ or m._value_) for m in members]), + ) + + def __bool__(self): + return bool(self._value_) + + def __or__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.__class__(self._value_ | other._value_) + + def __and__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.__class__(self._value_ & other._value_) + + def __xor__(self, other): + if not isinstance(other, self.__class__): + return NotImplemented + return self.__class__(self._value_ ^ other._value_) + + def __invert__(self): + members, uncovered = _decompose(self.__class__, self._value_) + inverted = self.__class__(0) + for m in self.__class__: + if m not in members and not (m._value_ & self._value_): + inverted = inverted | m + return self.__class__(inverted) + + +class IntFlag(int, Flag): + """ + Support for integer-based Flags + """ + + @classmethod + def _missing_(cls, value): + """ + Returns member (possibly creating it) if one can be found for value. + """ + if not isinstance(value, int): + raise ValueError("%r is not a valid %s" % (value, cls.__qualname__)) + new_member = cls._create_pseudo_member_(value) + return new_member + + @classmethod + def _create_pseudo_member_(cls, value): + """ + Create a composite member iff value contains only members. + """ + pseudo_member = cls._value2member_map_.get(value, None) + if pseudo_member is None: + need_to_create = [value] + # get unaccounted for bits + _, extra_flags = _decompose(cls, value) + # timer = 10 + while extra_flags: + # timer -= 1 + bit = _high_bit(extra_flags) + flag_value = 2 ** bit + if (flag_value not in cls._value2member_map_ and + flag_value not in need_to_create + ): + need_to_create.append(flag_value) + if extra_flags == -flag_value: + extra_flags = 0 + else: + extra_flags ^= flag_value + for value in reversed(need_to_create): + # construct singleton pseudo-members + pseudo_member = int.__new__(cls, value) + pseudo_member._name_ = None + pseudo_member._value_ = value + # use setdefault in case another thread already created a composite + # with this value + pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) + return pseudo_member + + def __or__(self, other): + if not isinstance(other, (self.__class__, int)): + return NotImplemented + result = self.__class__(self._value_ | self.__class__(other)._value_) + return result + + def __and__(self, other): + if not isinstance(other, (self.__class__, int)): + return NotImplemented + return self.__class__(self._value_ & self.__class__(other)._value_) + + def __xor__(self, other): + if not isinstance(other, (self.__class__, int)): + return NotImplemented + return self.__class__(self._value_ ^ self.__class__(other)._value_) + + __ror__ = __or__ + __rand__ = __and__ + __rxor__ = __xor__ + + def __invert__(self): + result = self.__class__(~self._value_) + return result + + +def _high_bit(value): + """ + returns index of highest bit, or -1 if value is zero or negative + """ + return value.bit_length() - 1 + +def unique(enumeration): + """ + Class decorator for enumerations ensuring unique member values. + """ + duplicates = [] + for name, member in enumeration.__members__.items(): + if name != member.name: + duplicates.append((name, member.name)) + if duplicates: + alias_details = ', '.join( + ["%s -> %s" % (alias, name) for (alias, name) in duplicates]) + raise ValueError('duplicate values found in %r: %s' % + (enumeration, alias_details)) + return enumeration + +def _decompose(flag, value): + """ + Extract all members from the value. + """ + # _decompose is only called if the value is not named + not_covered = value + negative = value < 0 + members = [] + for member in flag: + member_value = member.value + if member_value and member_value & value == member_value: + members.append(member) + not_covered &= ~member_value + if not negative: + tmp = not_covered + while tmp: + flag_value = 2 ** _high_bit(tmp) + if flag_value in flag._value2member_map_: + members.append(flag._value2member_map_[flag_value]) + not_covered &= ~flag_value + tmp &= ~flag_value + if not members and value in flag._value2member_map_: + members.append(flag._value2member_map_[value]) + members.sort(key=lambda m: m._value_, reverse=True) + if len(members) > 1 and members[0].value == value: + # we have the breakdown, don't need the value member itself + members.pop(0) + return members, not_covered diff --git a/llava/lib/python3.10/fileinput.py b/llava/lib/python3.10/fileinput.py new file mode 100644 index 0000000000000000000000000000000000000000..3bd19906dcf5d276b9259d5e1147cde270f372de --- /dev/null +++ b/llava/lib/python3.10/fileinput.py @@ -0,0 +1,462 @@ +"""Helper class to quickly write a loop over all standard input files. + +Typical use is: + + import fileinput + for line in fileinput.input(encoding="utf-8"): + process(line) + +This iterates over the lines of all files listed in sys.argv[1:], +defaulting to sys.stdin if the list is empty. If a filename is '-' it +is also replaced by sys.stdin and the optional arguments mode and +openhook are ignored. To specify an alternative list of filenames, +pass it as the argument to input(). A single file name is also allowed. + +Functions filename(), lineno() return the filename and cumulative line +number of the line that has just been read; filelineno() returns its +line number in the current file; isfirstline() returns true iff the +line just read is the first line of its file; isstdin() returns true +iff the line was read from sys.stdin. Function nextfile() closes the +current file so that the next iteration will read the first line from +the next file (if any); lines not read from the file will not count +towards the cumulative line count; the filename is not changed until +after the first line of the next file has been read. Function close() +closes the sequence. + +Before any lines have been read, filename() returns None and both line +numbers are zero; nextfile() has no effect. After all lines have been +read, filename() and the line number functions return the values +pertaining to the last line read; nextfile() has no effect. + +All files are opened in text mode by default, you can override this by +setting the mode parameter to input() or FileInput.__init__(). +If an I/O error occurs during opening or reading a file, the OSError +exception is raised. + +If sys.stdin is used more than once, the second and further use will +return no lines, except perhaps for interactive use, or if it has been +explicitly reset (e.g. using sys.stdin.seek(0)). + +Empty files are opened and immediately closed; the only time their +presence in the list of filenames is noticeable at all is when the +last file opened is empty. + +It is possible that the last line of a file doesn't end in a newline +character; otherwise lines are returned including the trailing +newline. + +Class FileInput is the implementation; its methods filename(), +lineno(), fileline(), isfirstline(), isstdin(), nextfile() and close() +correspond to the functions in the module. In addition it has a +readline() method which returns the next input line, and a +__getitem__() method which implements the sequence behavior. The +sequence must be accessed in strictly sequential order; sequence +access and readline() cannot be mixed. + +Optional in-place filtering: if the keyword argument inplace=1 is +passed to input() or to the FileInput constructor, the file is moved +to a backup file and standard output is directed to the input file. +This makes it possible to write a filter that rewrites its input file +in place. If the keyword argument backup="." is also +given, it specifies the extension for the backup file, and the backup +file remains around; by default, the extension is ".bak" and it is +deleted when the output file is closed. In-place filtering is +disabled when standard input is read. XXX The current implementation +does not work for MS-DOS 8+3 filesystems. +""" + +import io +import sys, os +from types import GenericAlias + +__all__ = ["input", "close", "nextfile", "filename", "lineno", "filelineno", + "fileno", "isfirstline", "isstdin", "FileInput", "hook_compressed", + "hook_encoded"] + +_state = None + +def input(files=None, inplace=False, backup="", *, mode="r", openhook=None, + encoding=None, errors=None): + """Return an instance of the FileInput class, which can be iterated. + + The parameters are passed to the constructor of the FileInput class. + The returned instance, in addition to being an iterator, + keeps global state for the functions of this module,. + """ + global _state + if _state and _state._file: + raise RuntimeError("input() already active") + _state = FileInput(files, inplace, backup, mode=mode, openhook=openhook, + encoding=encoding, errors=errors) + return _state + +def close(): + """Close the sequence.""" + global _state + state = _state + _state = None + if state: + state.close() + +def nextfile(): + """ + Close the current file so that the next iteration will read the first + line from the next file (if any); lines not read from the file will + not count towards the cumulative line count. The filename is not + changed until after the first line of the next file has been read. + Before the first line has been read, this function has no effect; + it cannot be used to skip the first file. After the last line of the + last file has been read, this function has no effect. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.nextfile() + +def filename(): + """ + Return the name of the file currently being read. + Before the first line has been read, returns None. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.filename() + +def lineno(): + """ + Return the cumulative line number of the line that has just been read. + Before the first line has been read, returns 0. After the last line + of the last file has been read, returns the line number of that line. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.lineno() + +def filelineno(): + """ + Return the line number in the current file. Before the first line + has been read, returns 0. After the last line of the last file has + been read, returns the line number of that line within the file. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.filelineno() + +def fileno(): + """ + Return the file number of the current file. When no file is currently + opened, returns -1. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.fileno() + +def isfirstline(): + """ + Returns true the line just read is the first line of its file, + otherwise returns false. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.isfirstline() + +def isstdin(): + """ + Returns true if the last line was read from sys.stdin, + otherwise returns false. + """ + if not _state: + raise RuntimeError("no active input()") + return _state.isstdin() + +class FileInput: + """FileInput([files[, inplace[, backup]]], *, mode=None, openhook=None) + + Class FileInput is the implementation of the module; its methods + filename(), lineno(), fileline(), isfirstline(), isstdin(), fileno(), + nextfile() and close() correspond to the functions of the same name + in the module. + In addition it has a readline() method which returns the next + input line, and a __getitem__() method which implements the + sequence behavior. The sequence must be accessed in strictly + sequential order; random access and readline() cannot be mixed. + """ + + def __init__(self, files=None, inplace=False, backup="", *, + mode="r", openhook=None, encoding=None, errors=None): + if isinstance(files, str): + files = (files,) + elif isinstance(files, os.PathLike): + files = (os.fspath(files), ) + else: + if files is None: + files = sys.argv[1:] + if not files: + files = ('-',) + else: + files = tuple(files) + self._files = files + self._inplace = inplace + self._backup = backup + self._savestdout = None + self._output = None + self._filename = None + self._startlineno = 0 + self._filelineno = 0 + self._file = None + self._isstdin = False + self._backupfilename = None + self._encoding = encoding + self._errors = errors + + # We can not use io.text_encoding() here because old openhook doesn't + # take encoding parameter. + if (sys.flags.warn_default_encoding and + "b" not in mode and encoding is None and openhook is None): + import warnings + warnings.warn("'encoding' argument not specified.", + EncodingWarning, 2) + + # restrict mode argument to reading modes + if mode not in ('r', 'rU', 'U', 'rb'): + raise ValueError("FileInput opening mode must be one of " + "'r', 'rU', 'U' and 'rb'") + if 'U' in mode: + import warnings + warnings.warn("'U' mode is deprecated", + DeprecationWarning, 2) + self._mode = mode + self._write_mode = mode.replace('r', 'w') if 'U' not in mode else 'w' + if openhook: + if inplace: + raise ValueError("FileInput cannot use an opening hook in inplace mode") + if not callable(openhook): + raise ValueError("FileInput openhook must be callable") + self._openhook = openhook + + def __del__(self): + self.close() + + def close(self): + try: + self.nextfile() + finally: + self._files = () + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __iter__(self): + return self + + def __next__(self): + while True: + line = self._readline() + if line: + self._filelineno += 1 + return line + if not self._file: + raise StopIteration + self.nextfile() + # repeat with next file + + def __getitem__(self, i): + import warnings + warnings.warn( + "Support for indexing FileInput objects is deprecated. " + "Use iterator protocol instead.", + DeprecationWarning, + stacklevel=2 + ) + if i != self.lineno(): + raise RuntimeError("accessing lines out of order") + try: + return self.__next__() + except StopIteration: + raise IndexError("end of input reached") + + def nextfile(self): + savestdout = self._savestdout + self._savestdout = None + if savestdout: + sys.stdout = savestdout + + output = self._output + self._output = None + try: + if output: + output.close() + finally: + file = self._file + self._file = None + try: + del self._readline # restore FileInput._readline + except AttributeError: + pass + try: + if file and not self._isstdin: + file.close() + finally: + backupfilename = self._backupfilename + self._backupfilename = None + if backupfilename and not self._backup: + try: os.unlink(backupfilename) + except OSError: pass + + self._isstdin = False + + def readline(self): + while True: + line = self._readline() + if line: + self._filelineno += 1 + return line + if not self._file: + return line + self.nextfile() + # repeat with next file + + def _readline(self): + if not self._files: + if 'b' in self._mode: + return b'' + else: + return '' + self._filename = self._files[0] + self._files = self._files[1:] + self._startlineno = self.lineno() + self._filelineno = 0 + self._file = None + self._isstdin = False + self._backupfilename = 0 + + # EncodingWarning is emitted in __init__() already + if "b" not in self._mode: + encoding = self._encoding or "locale" + else: + encoding = None + + if self._filename == '-': + self._filename = '' + if 'b' in self._mode: + self._file = getattr(sys.stdin, 'buffer', sys.stdin) + else: + self._file = sys.stdin + self._isstdin = True + else: + if self._inplace: + self._backupfilename = ( + os.fspath(self._filename) + (self._backup or ".bak")) + try: + os.unlink(self._backupfilename) + except OSError: + pass + # The next few lines may raise OSError + os.rename(self._filename, self._backupfilename) + self._file = open(self._backupfilename, self._mode, + encoding=encoding, errors=self._errors) + try: + perm = os.fstat(self._file.fileno()).st_mode + except OSError: + self._output = open(self._filename, self._write_mode, + encoding=encoding, errors=self._errors) + else: + mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC + if hasattr(os, 'O_BINARY'): + mode |= os.O_BINARY + + fd = os.open(self._filename, mode, perm) + self._output = os.fdopen(fd, self._write_mode, + encoding=encoding, errors=self._errors) + try: + os.chmod(self._filename, perm) + except OSError: + pass + self._savestdout = sys.stdout + sys.stdout = self._output + else: + # This may raise OSError + if self._openhook: + # Custom hooks made previous to Python 3.10 didn't have + # encoding argument + if self._encoding is None: + self._file = self._openhook(self._filename, self._mode) + else: + self._file = self._openhook( + self._filename, self._mode, encoding=self._encoding, errors=self._errors) + else: + self._file = open(self._filename, self._mode, encoding=encoding, errors=self._errors) + self._readline = self._file.readline # hide FileInput._readline + return self._readline() + + def filename(self): + return self._filename + + def lineno(self): + return self._startlineno + self._filelineno + + def filelineno(self): + return self._filelineno + + def fileno(self): + if self._file: + try: + return self._file.fileno() + except ValueError: + return -1 + else: + return -1 + + def isfirstline(self): + return self._filelineno == 1 + + def isstdin(self): + return self._isstdin + + __class_getitem__ = classmethod(GenericAlias) + + +def hook_compressed(filename, mode, *, encoding=None, errors=None): + if encoding is None and "b" not in mode: # EncodingWarning is emitted in FileInput() already. + encoding = "locale" + ext = os.path.splitext(filename)[1] + if ext == '.gz': + import gzip + stream = gzip.open(filename, mode) + elif ext == '.bz2': + import bz2 + stream = bz2.BZ2File(filename, mode) + else: + return open(filename, mode, encoding=encoding, errors=errors) + + # gzip and bz2 are binary mode by default. + if "b" not in mode: + stream = io.TextIOWrapper(stream, encoding=encoding, errors=errors) + return stream + + +def hook_encoded(encoding, errors=None): + def openhook(filename, mode): + return open(filename, mode, encoding=encoding, errors=errors) + return openhook + + +def _test(): + import getopt + inplace = False + backup = False + opts, args = getopt.getopt(sys.argv[1:], "ib:") + for o, a in opts: + if o == '-i': inplace = True + if o == '-b': backup = a + for line in input(args, inplace=inplace, backup=backup): + if line[-1:] == '\n': line = line[:-1] + if line[-1:] == '\r': line = line[:-1] + print("%d: %s[%d]%s %s" % (lineno(), filename(), filelineno(), + isfirstline() and "*" or "", line)) + print("%d: %s[%d]" % (lineno(), filename(), filelineno())) + +if __name__ == '__main__': + _test() diff --git a/llava/lib/python3.10/heapq.py b/llava/lib/python3.10/heapq.py new file mode 100644 index 0000000000000000000000000000000000000000..fabefd87f8bf8c804e8eb3155c1aacbf05dd02bd --- /dev/null +++ b/llava/lib/python3.10/heapq.py @@ -0,0 +1,601 @@ +"""Heap queue algorithm (a.k.a. priority queue). + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +Usage: + +heap = [] # creates an empty heap +heappush(heap, item) # pushes a new item on the heap +item = heappop(heap) # pops the smallest item from the heap +item = heap[0] # smallest item on the heap without popping it +heapify(x) # transforms list into a heap, in-place, in linear time +item = heapreplace(heap, item) # pops and returns smallest item, and adds + # new item; the heap size is unchanged + +Our API differs from textbook heap algorithms as follows: + +- We use 0-based indexing. This makes the relationship between the + index for a node and the indexes for its children slightly less + obvious, but is more suitable since Python uses 0-based indexing. + +- Our heappop() method returns the smallest item, not the largest. + +These two make it possible to view the heap as a regular Python list +without surprises: heap[0] is the smallest item, and heap.sort() +maintains the heap invariant! +""" + +# Original code by Kevin O'Connor, augmented by Tim Peters and Raymond Hettinger + +__about__ = """Heap queues + +[explanation by François Pinard] + +Heaps are arrays for which a[k] <= a[2*k+1] and a[k] <= a[2*k+2] for +all k, counting elements from 0. For the sake of comparison, +non-existing elements are considered to be infinite. The interesting +property of a heap is that a[0] is always its smallest element. + +The strange invariant above is meant to be an efficient memory +representation for a tournament. The numbers below are `k', not a[k]: + + 0 + + 1 2 + + 3 4 5 6 + + 7 8 9 10 11 12 13 14 + + 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 + + +In the tree above, each cell `k' is topping `2*k+1' and `2*k+2'. In +a usual binary tournament we see in sports, each cell is the winner +over the two cells it tops, and we can trace the winner down the tree +to see all opponents s/he had. However, in many computer applications +of such tournaments, we do not need to trace the history of a winner. +To be more memory efficient, when a winner is promoted, we try to +replace it by something else at a lower level, and the rule becomes +that a cell and the two cells it tops contain three different items, +but the top cell "wins" over the two topped cells. + +If this heap invariant is protected at all time, index 0 is clearly +the overall winner. The simplest algorithmic way to remove it and +find the "next" winner is to move some loser (let's say cell 30 in the +diagram above) into the 0 position, and then percolate this new 0 down +the tree, exchanging values, until the invariant is re-established. +This is clearly logarithmic on the total number of items in the tree. +By iterating over all items, you get an O(n ln n) sort. + +A nice feature of this sort is that you can efficiently insert new +items while the sort is going on, provided that the inserted items are +not "better" than the last 0'th element you extracted. This is +especially useful in simulation contexts, where the tree holds all +incoming events, and the "win" condition means the smallest scheduled +time. When an event schedule other events for execution, they are +scheduled into the future, so they can easily go into the heap. So, a +heap is a good structure for implementing schedulers (this is what I +used for my MIDI sequencer :-). + +Various structures for implementing schedulers have been extensively +studied, and heaps are good for this, as they are reasonably speedy, +the speed is almost constant, and the worst case is not much different +than the average case. However, there are other representations which +are more efficient overall, yet the worst cases might be terrible. + +Heaps are also very useful in big disk sorts. You most probably all +know that a big sort implies producing "runs" (which are pre-sorted +sequences, which size is usually related to the amount of CPU memory), +followed by a merging passes for these runs, which merging is often +very cleverly organised[1]. It is very important that the initial +sort produces the longest runs possible. Tournaments are a good way +to that. If, using all the memory available to hold a tournament, you +replace and percolate items that happen to fit the current run, you'll +produce runs which are twice the size of the memory for random input, +and much better for input fuzzily ordered. + +Moreover, if you output the 0'th item on disk and get an input which +may not fit in the current tournament (because the value "wins" over +the last output value), it cannot fit in the heap, so the size of the +heap decreases. The freed memory could be cleverly reused immediately +for progressively building a second heap, which grows at exactly the +same rate the first heap is melting. When the first heap completely +vanishes, you switch heaps and start a new run. Clever and quite +effective! + +In a word, heaps are useful memory structures to know. I use them in +a few applications, and I think it is good to keep a `heap' module +around. :-) + +-------------------- +[1] The disk balancing algorithms which are current, nowadays, are +more annoying than clever, and this is a consequence of the seeking +capabilities of the disks. On devices which cannot seek, like big +tape drives, the story was quite different, and one had to be very +clever to ensure (far in advance) that each tape movement will be the +most effective possible (that is, will best participate at +"progressing" the merge). Some tapes were even able to read +backwards, and this was also used to avoid the rewinding time. +Believe me, real good tape sorts were quite spectacular to watch! +From all times, sorting has always been a Great Art! :-) +""" + +__all__ = ['heappush', 'heappop', 'heapify', 'heapreplace', 'merge', + 'nlargest', 'nsmallest', 'heappushpop'] + +def heappush(heap, item): + """Push item onto heap, maintaining the heap invariant.""" + heap.append(item) + _siftdown(heap, 0, len(heap)-1) + +def heappop(heap): + """Pop the smallest item off the heap, maintaining the heap invariant.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup(heap, 0) + return returnitem + return lastelt + +def heapreplace(heap, item): + """Pop and return the current smallest value, and add the new item. + + This is more efficient than heappop() followed by heappush(), and can be + more appropriate when using a fixed-size heap. Note that the value + returned may be larger than item! That constrains reasonable uses of + this routine unless written as part of a conditional replacement: + + if item > heap[0]: + item = heapreplace(heap, item) + """ + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup(heap, 0) + return returnitem + +def heappushpop(heap, item): + """Fast version of a heappush followed by a heappop.""" + if heap and heap[0] < item: + item, heap[0] = heap[0], item + _siftup(heap, 0) + return item + +def heapify(x): + """Transform list into a heap, in-place, in O(len(x)) time.""" + n = len(x) + # Transform bottom-up. The largest index there's any point to looking at + # is the largest with a child index in-range, so must have 2*i + 1 < n, + # or i < (n-1)/2. If n is even = 2*j, this is (2*j-1)/2 = j-1/2 so + # j-1 is the largest, which is n//2 - 1. If n is odd = 2*j+1, this is + # (2*j+1-1)/2 = j so j-1 is the largest, and that's again n//2-1. + for i in reversed(range(n//2)): + _siftup(x, i) + +def _heappop_max(heap): + """Maxheap version of a heappop.""" + lastelt = heap.pop() # raises appropriate IndexError if heap is empty + if heap: + returnitem = heap[0] + heap[0] = lastelt + _siftup_max(heap, 0) + return returnitem + return lastelt + +def _heapreplace_max(heap, item): + """Maxheap version of a heappop followed by a heappush.""" + returnitem = heap[0] # raises appropriate IndexError if heap is empty + heap[0] = item + _siftup_max(heap, 0) + return returnitem + +def _heapify_max(x): + """Transform list into a maxheap, in-place, in O(len(x)) time.""" + n = len(x) + for i in reversed(range(n//2)): + _siftup_max(x, i) + +# 'heap' is a heap at all indices >= startpos, except possibly for pos. pos +# is the index of a leaf with a possibly out-of-order value. Restore the +# heap invariant. +def _siftdown(heap, startpos, pos): + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if newitem < parent: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +# The child indices of heap index pos are already heaps, and we want to make +# a heap at index pos too. We do this by bubbling the smaller child of +# pos up (and so on with that child's children, etc) until hitting a leaf, +# then using _siftdown to move the oddball originally at index pos into place. +# +# We *could* break out of the loop as soon as we find a pos where newitem <= +# both its children, but turns out that's not a good idea, and despite that +# many books write the algorithm that way. During a heap pop, the last array +# element is sifted in, and that tends to be large, so that comparing it +# against values starting from the root usually doesn't pay (= usually doesn't +# get us out of the loop early). See Knuth, Volume 3, where this is +# explained and quantified in an exercise. +# +# Cutting the # of comparisons is important, since these routines have no +# way to extract "the priority" from an array element, so that intelligence +# is likely to be hiding in custom comparison methods, or in array elements +# storing (priority, record) tuples. Comparisons are thus potentially +# expensive. +# +# On random arrays of length 1000, making this change cut the number of +# comparisons made by heapify() a little, and those made by exhaustive +# heappop() a lot, in accord with theory. Here are typical results from 3 +# runs (3 just to demonstrate how small the variance is): +# +# Compares needed by heapify Compares needed by 1000 heappops +# -------------------------- -------------------------------- +# 1837 cut to 1663 14996 cut to 8680 +# 1855 cut to 1659 14966 cut to 8678 +# 1847 cut to 1660 15024 cut to 8703 +# +# Building the heap by using heappush() 1000 times instead required +# 2198, 2148, and 2219 compares: heapify() is more efficient, when +# you can use it. +# +# The total compares needed by list.sort() on the same lists were 8627, +# 8627, and 8632 (this should be compared to the sum of heapify() and +# heappop() compares): list.sort() is (unsurprisingly!) more efficient +# for sorting. + +def _siftup(heap, pos): + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the smaller child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of smaller child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[childpos] < heap[rightpos]: + childpos = rightpos + # Move the smaller child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown(heap, startpos, pos) + +def _siftdown_max(heap, startpos, pos): + 'Maxheap variant of _siftdown' + newitem = heap[pos] + # Follow the path to the root, moving parents down until finding a place + # newitem fits. + while pos > startpos: + parentpos = (pos - 1) >> 1 + parent = heap[parentpos] + if parent < newitem: + heap[pos] = parent + pos = parentpos + continue + break + heap[pos] = newitem + +def _siftup_max(heap, pos): + 'Maxheap variant of _siftup' + endpos = len(heap) + startpos = pos + newitem = heap[pos] + # Bubble up the larger child until hitting a leaf. + childpos = 2*pos + 1 # leftmost child position + while childpos < endpos: + # Set childpos to index of larger child. + rightpos = childpos + 1 + if rightpos < endpos and not heap[rightpos] < heap[childpos]: + childpos = rightpos + # Move the larger child up. + heap[pos] = heap[childpos] + pos = childpos + childpos = 2*pos + 1 + # The leaf at pos is empty now. Put newitem there, and bubble it up + # to its final resting place (by sifting its parents down). + heap[pos] = newitem + _siftdown_max(heap, startpos, pos) + +def merge(*iterables, key=None, reverse=False): + '''Merge multiple sorted inputs into a single sorted output. + + Similar to sorted(itertools.chain(*iterables)) but returns a generator, + does not pull the data into memory all at once, and assumes that each of + the input streams is already sorted (smallest to largest). + + >>> list(merge([1,3,5,7], [0,2,4,8], [5,10,15,20], [], [25])) + [0, 1, 2, 3, 4, 5, 5, 7, 8, 10, 15, 20, 25] + + If *key* is not None, applies a key function to each element to determine + its sort order. + + >>> list(merge(['dog', 'horse'], ['cat', 'fish', 'kangaroo'], key=len)) + ['dog', 'cat', 'fish', 'horse', 'kangaroo'] + + ''' + + h = [] + h_append = h.append + + if reverse: + _heapify = _heapify_max + _heappop = _heappop_max + _heapreplace = _heapreplace_max + direction = -1 + else: + _heapify = heapify + _heappop = heappop + _heapreplace = heapreplace + direction = 1 + + if key is None: + for order, it in enumerate(map(iter, iterables)): + try: + next = it.__next__ + h_append([next(), order * direction, next]) + except StopIteration: + pass + _heapify(h) + while len(h) > 1: + try: + while True: + value, order, next = s = h[0] + yield value + s[0] = next() # raises StopIteration when exhausted + _heapreplace(h, s) # restore heap condition + except StopIteration: + _heappop(h) # remove empty iterator + if h: + # fast case when only a single iterator remains + value, order, next = h[0] + yield value + yield from next.__self__ + return + + for order, it in enumerate(map(iter, iterables)): + try: + next = it.__next__ + value = next() + h_append([key(value), order * direction, value, next]) + except StopIteration: + pass + _heapify(h) + while len(h) > 1: + try: + while True: + key_value, order, value, next = s = h[0] + yield value + value = next() + s[0] = key(value) + s[2] = value + _heapreplace(h, s) + except StopIteration: + _heappop(h) + if h: + key_value, order, value, next = h[0] + yield value + yield from next.__self__ + + +# Algorithm notes for nlargest() and nsmallest() +# ============================================== +# +# Make a single pass over the data while keeping the k most extreme values +# in a heap. Memory consumption is limited to keeping k values in a list. +# +# Measured performance for random inputs: +# +# number of comparisons +# n inputs k-extreme values (average of 5 trials) % more than min() +# ------------- ---------------- --------------------- ----------------- +# 1,000 100 3,317 231.7% +# 10,000 100 14,046 40.5% +# 100,000 100 105,749 5.7% +# 1,000,000 100 1,007,751 0.8% +# 10,000,000 100 10,009,401 0.1% +# +# Theoretical number of comparisons for k smallest of n random inputs: +# +# Step Comparisons Action +# ---- -------------------------- --------------------------- +# 1 1.66 * k heapify the first k-inputs +# 2 n - k compare remaining elements to top of heap +# 3 k * (1 + lg2(k)) * ln(n/k) replace the topmost value on the heap +# 4 k * lg2(k) - (k/2) final sort of the k most extreme values +# +# Combining and simplifying for a rough estimate gives: +# +# comparisons = n + k * (log(k, 2) * log(n/k) + log(k, 2) + log(n/k)) +# +# Computing the number of comparisons for step 3: +# ----------------------------------------------- +# * For the i-th new value from the iterable, the probability of being in the +# k most extreme values is k/i. For example, the probability of the 101st +# value seen being in the 100 most extreme values is 100/101. +# * If the value is a new extreme value, the cost of inserting it into the +# heap is 1 + log(k, 2). +# * The probability times the cost gives: +# (k/i) * (1 + log(k, 2)) +# * Summing across the remaining n-k elements gives: +# sum((k/i) * (1 + log(k, 2)) for i in range(k+1, n+1)) +# * This reduces to: +# (H(n) - H(k)) * k * (1 + log(k, 2)) +# * Where H(n) is the n-th harmonic number estimated by: +# gamma = 0.5772156649 +# H(n) = log(n, e) + gamma + 1 / (2 * n) +# http://en.wikipedia.org/wiki/Harmonic_series_(mathematics)#Rate_of_divergence +# * Substituting the H(n) formula: +# comparisons = k * (1 + log(k, 2)) * (log(n/k, e) + (1/n - 1/k) / 2) +# +# Worst-case for step 3: +# ---------------------- +# In the worst case, the input data is reversed sorted so that every new element +# must be inserted in the heap: +# +# comparisons = 1.66 * k + log(k, 2) * (n - k) +# +# Alternative Algorithms +# ---------------------- +# Other algorithms were not used because they: +# 1) Took much more auxiliary memory, +# 2) Made multiple passes over the data. +# 3) Made more comparisons in common cases (small k, large n, semi-random input). +# See the more detailed comparison of approach at: +# http://code.activestate.com/recipes/577573-compare-algorithms-for-heapqsmallest + +def nsmallest(n, iterable, key=None): + """Find the n smallest elements in a dataset. + + Equivalent to: sorted(iterable, key=key)[:n] + """ + + # Short-cut for n==1 is to use min() + if n == 1: + it = iter(iterable) + sentinel = object() + result = min(it, default=sentinel, key=key) + return [] if result is sentinel else [result] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key)[:n] + + # When key is none, use simpler decoration + if key is None: + it = iter(iterable) + # put the range(n) first so that zip() doesn't + # consume one too many elements from the iterator + result = [(elem, i) for i, elem in zip(range(n), it)] + if not result: + return result + _heapify_max(result) + top = result[0][0] + order = n + _heapreplace = _heapreplace_max + for elem in it: + if elem < top: + _heapreplace(result, (elem, order)) + top, _order = result[0] + order += 1 + result.sort() + return [elem for (elem, order) in result] + + # General case, slowest method + it = iter(iterable) + result = [(key(elem), i, elem) for i, elem in zip(range(n), it)] + if not result: + return result + _heapify_max(result) + top = result[0][0] + order = n + _heapreplace = _heapreplace_max + for elem in it: + k = key(elem) + if k < top: + _heapreplace(result, (k, order, elem)) + top, _order, _elem = result[0] + order += 1 + result.sort() + return [elem for (k, order, elem) in result] + +def nlargest(n, iterable, key=None): + """Find the n largest elements in a dataset. + + Equivalent to: sorted(iterable, key=key, reverse=True)[:n] + """ + + # Short-cut for n==1 is to use max() + if n == 1: + it = iter(iterable) + sentinel = object() + result = max(it, default=sentinel, key=key) + return [] if result is sentinel else [result] + + # When n>=size, it's faster to use sorted() + try: + size = len(iterable) + except (TypeError, AttributeError): + pass + else: + if n >= size: + return sorted(iterable, key=key, reverse=True)[:n] + + # When key is none, use simpler decoration + if key is None: + it = iter(iterable) + result = [(elem, i) for i, elem in zip(range(0, -n, -1), it)] + if not result: + return result + heapify(result) + top = result[0][0] + order = -n + _heapreplace = heapreplace + for elem in it: + if top < elem: + _heapreplace(result, (elem, order)) + top, _order = result[0] + order -= 1 + result.sort(reverse=True) + return [elem for (elem, order) in result] + + # General case, slowest method + it = iter(iterable) + result = [(key(elem), i, elem) for i, elem in zip(range(0, -n, -1), it)] + if not result: + return result + heapify(result) + top = result[0][0] + order = -n + _heapreplace = heapreplace + for elem in it: + k = key(elem) + if top < k: + _heapreplace(result, (k, order, elem)) + top, _order, _elem = result[0] + order -= 1 + result.sort(reverse=True) + return [elem for (k, order, elem) in result] + +# If available, use C implementation +try: + from _heapq import * +except ImportError: + pass +try: + from _heapq import _heapreplace_max +except ImportError: + pass +try: + from _heapq import _heapify_max +except ImportError: + pass +try: + from _heapq import _heappop_max +except ImportError: + pass + + +if __name__ == "__main__": + + import doctest # pragma: no cover + print(doctest.testmod()) # pragma: no cover diff --git a/llava/lib/python3.10/imp.py b/llava/lib/python3.10/imp.py new file mode 100644 index 0000000000000000000000000000000000000000..e02aaef344c6148b43ef3954689126319504dea9 --- /dev/null +++ b/llava/lib/python3.10/imp.py @@ -0,0 +1,346 @@ +"""This module provides the components needed to build your own __import__ +function. Undocumented functions are obsolete. + +In most cases it is preferred you consider using the importlib module's +functionality over this module. + +""" +# (Probably) need to stay in _imp +from _imp import (lock_held, acquire_lock, release_lock, + get_frozen_object, is_frozen_package, + init_frozen, is_builtin, is_frozen, + _fix_co_filename) +try: + from _imp import create_dynamic +except ImportError: + # Platform doesn't support dynamic loading. + create_dynamic = None + +from importlib._bootstrap import _ERR_MSG, _exec, _load, _builtin_from_name +from importlib._bootstrap_external import SourcelessFileLoader + +from importlib import machinery +from importlib import util +import importlib +import os +import sys +import tokenize +import types +import warnings + +warnings.warn("the imp module is deprecated in favour of importlib and slated " + "for removal in Python 3.12; " + "see the module's documentation for alternative uses", + DeprecationWarning, stacklevel=2) + +# DEPRECATED +SEARCH_ERROR = 0 +PY_SOURCE = 1 +PY_COMPILED = 2 +C_EXTENSION = 3 +PY_RESOURCE = 4 +PKG_DIRECTORY = 5 +C_BUILTIN = 6 +PY_FROZEN = 7 +PY_CODERESOURCE = 8 +IMP_HOOK = 9 + + +def new_module(name): + """**DEPRECATED** + + Create a new module. + + The module is not entered into sys.modules. + + """ + return types.ModuleType(name) + + +def get_magic(): + """**DEPRECATED** + + Return the magic number for .pyc files. + """ + return util.MAGIC_NUMBER + + +def get_tag(): + """Return the magic tag for .pyc files.""" + return sys.implementation.cache_tag + + +def cache_from_source(path, debug_override=None): + """**DEPRECATED** + + Given the path to a .py file, return the path to its .pyc file. + + The .py file does not need to exist; this simply returns the path to the + .pyc file calculated as if the .py file were imported. + + If debug_override is not None, then it must be a boolean and is used in + place of sys.flags.optimize. + + If sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + with warnings.catch_warnings(): + warnings.simplefilter('ignore') + return util.cache_from_source(path, debug_override) + + +def source_from_cache(path): + """**DEPRECATED** + + Given the path to a .pyc. file, return the path to its .py file. + + The .pyc file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc file. If path does + not conform to PEP 3147 format, ValueError will be raised. If + sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + return util.source_from_cache(path) + + +def get_suffixes(): + """**DEPRECATED**""" + extensions = [(s, 'rb', C_EXTENSION) for s in machinery.EXTENSION_SUFFIXES] + source = [(s, 'r', PY_SOURCE) for s in machinery.SOURCE_SUFFIXES] + bytecode = [(s, 'rb', PY_COMPILED) for s in machinery.BYTECODE_SUFFIXES] + + return extensions + source + bytecode + + +class NullImporter: + + """**DEPRECATED** + + Null import object. + + """ + + def __init__(self, path): + if path == '': + raise ImportError('empty pathname', path='') + elif os.path.isdir(path): + raise ImportError('existing directory', path=path) + + def find_module(self, fullname): + """Always returns None.""" + return None + + +class _HackedGetData: + + """Compatibility support for 'file' arguments of various load_*() + functions.""" + + def __init__(self, fullname, path, file=None): + super().__init__(fullname, path) + self.file = file + + def get_data(self, path): + """Gross hack to contort loader to deal w/ load_*()'s bad API.""" + if self.file and path == self.path: + # The contract of get_data() requires us to return bytes. Reopen the + # file in binary mode if needed. + if not self.file.closed: + file = self.file + if 'b' not in file.mode: + file.close() + if self.file.closed: + self.file = file = open(self.path, 'rb') + + with file: + return file.read() + else: + return super().get_data(path) + + +class _LoadSourceCompatibility(_HackedGetData, machinery.SourceFileLoader): + + """Compatibility support for implementing load_source().""" + + +def load_source(name, pathname, file=None): + loader = _LoadSourceCompatibility(name, pathname, file) + spec = util.spec_from_file_location(name, pathname, loader=loader) + if name in sys.modules: + module = _exec(spec, sys.modules[name]) + else: + module = _load(spec) + # To allow reloading to potentially work, use a non-hacked loader which + # won't rely on a now-closed file object. + module.__loader__ = machinery.SourceFileLoader(name, pathname) + module.__spec__.loader = module.__loader__ + return module + + +class _LoadCompiledCompatibility(_HackedGetData, SourcelessFileLoader): + + """Compatibility support for implementing load_compiled().""" + + +def load_compiled(name, pathname, file=None): + """**DEPRECATED**""" + loader = _LoadCompiledCompatibility(name, pathname, file) + spec = util.spec_from_file_location(name, pathname, loader=loader) + if name in sys.modules: + module = _exec(spec, sys.modules[name]) + else: + module = _load(spec) + # To allow reloading to potentially work, use a non-hacked loader which + # won't rely on a now-closed file object. + module.__loader__ = SourcelessFileLoader(name, pathname) + module.__spec__.loader = module.__loader__ + return module + + +def load_package(name, path): + """**DEPRECATED**""" + if os.path.isdir(path): + extensions = (machinery.SOURCE_SUFFIXES[:] + + machinery.BYTECODE_SUFFIXES[:]) + for extension in extensions: + init_path = os.path.join(path, '__init__' + extension) + if os.path.exists(init_path): + path = init_path + break + else: + raise ValueError('{!r} is not a package'.format(path)) + spec = util.spec_from_file_location(name, path, + submodule_search_locations=[]) + if name in sys.modules: + return _exec(spec, sys.modules[name]) + else: + return _load(spec) + + +def load_module(name, file, filename, details): + """**DEPRECATED** + + Load a module, given information returned by find_module(). + + The module name must include the full package name, if any. + + """ + suffix, mode, type_ = details + if mode and (not mode.startswith(('r', 'U')) or '+' in mode): + raise ValueError('invalid file open mode {!r}'.format(mode)) + elif file is None and type_ in {PY_SOURCE, PY_COMPILED}: + msg = 'file object required for import (type code {})'.format(type_) + raise ValueError(msg) + elif type_ == PY_SOURCE: + return load_source(name, filename, file) + elif type_ == PY_COMPILED: + return load_compiled(name, filename, file) + elif type_ == C_EXTENSION and load_dynamic is not None: + if file is None: + with open(filename, 'rb') as opened_file: + return load_dynamic(name, filename, opened_file) + else: + return load_dynamic(name, filename, file) + elif type_ == PKG_DIRECTORY: + return load_package(name, filename) + elif type_ == C_BUILTIN: + return init_builtin(name) + elif type_ == PY_FROZEN: + return init_frozen(name) + else: + msg = "Don't know how to import {} (type code {})".format(name, type_) + raise ImportError(msg, name=name) + + +def find_module(name, path=None): + """**DEPRECATED** + + Search for a module. + + If path is omitted or None, search for a built-in, frozen or special + module and continue search in sys.path. The module name cannot + contain '.'; to search for a submodule of a package, pass the + submodule name and the package's __path__. + + """ + if not isinstance(name, str): + raise TypeError("'name' must be a str, not {}".format(type(name))) + elif not isinstance(path, (type(None), list)): + # Backwards-compatibility + raise RuntimeError("'path' must be None or a list, " + "not {}".format(type(path))) + + if path is None: + if is_builtin(name): + return None, None, ('', '', C_BUILTIN) + elif is_frozen(name): + return None, None, ('', '', PY_FROZEN) + else: + path = sys.path + + for entry in path: + package_directory = os.path.join(entry, name) + for suffix in ['.py', machinery.BYTECODE_SUFFIXES[0]]: + package_file_name = '__init__' + suffix + file_path = os.path.join(package_directory, package_file_name) + if os.path.isfile(file_path): + return None, package_directory, ('', '', PKG_DIRECTORY) + for suffix, mode, type_ in get_suffixes(): + file_name = name + suffix + file_path = os.path.join(entry, file_name) + if os.path.isfile(file_path): + break + else: + continue + break # Break out of outer loop when breaking out of inner loop. + else: + raise ImportError(_ERR_MSG.format(name), name=name) + + encoding = None + if 'b' not in mode: + with open(file_path, 'rb') as file: + encoding = tokenize.detect_encoding(file.readline)[0] + file = open(file_path, mode, encoding=encoding) + return file, file_path, (suffix, mode, type_) + + +def reload(module): + """**DEPRECATED** + + Reload the module and return it. + + The module must have been successfully imported before. + + """ + return importlib.reload(module) + + +def init_builtin(name): + """**DEPRECATED** + + Load and return a built-in module by name, or None is such module doesn't + exist + """ + try: + return _builtin_from_name(name) + except ImportError: + return None + + +if create_dynamic: + def load_dynamic(name, path, file=None): + """**DEPRECATED** + + Load an extension module. + """ + import importlib.machinery + loader = importlib.machinery.ExtensionFileLoader(name, path) + + # Issue #24748: Skip the sys.modules check in _load_module_shim; + # always load new extension + spec = importlib.machinery.ModuleSpec( + name=name, loader=loader, origin=path) + return _load(spec) + +else: + load_dynamic = None diff --git a/llava/lib/python3.10/inspect.py b/llava/lib/python3.10/inspect.py new file mode 100644 index 0000000000000000000000000000000000000000..2999a6019e0f60774dc4d2a1b422794cc84485a2 --- /dev/null +++ b/llava/lib/python3.10/inspect.py @@ -0,0 +1,3317 @@ +"""Get useful information from live Python objects. + +This module encapsulates the interface provided by the internal special +attributes (co_*, im_*, tb_*, etc.) in a friendlier fashion. +It also provides some help for examining source code and class layout. + +Here are some of the useful functions provided by this module: + + ismodule(), isclass(), ismethod(), isfunction(), isgeneratorfunction(), + isgenerator(), istraceback(), isframe(), iscode(), isbuiltin(), + isroutine() - check object types + getmembers() - get members of an object that satisfy a given condition + + getfile(), getsourcefile(), getsource() - find an object's source code + getdoc(), getcomments() - get documentation on an object + getmodule() - determine the module that an object came from + getclasstree() - arrange classes so as to represent their hierarchy + + getargvalues(), getcallargs() - get info about function arguments + getfullargspec() - same, with support for Python 3 features + formatargvalues() - format an argument spec + getouterframes(), getinnerframes() - get info about frames + currentframe() - get the current stack frame + stack(), trace() - get info about frames on the stack or in a traceback + + signature() - get a Signature object for the callable + + get_annotations() - safely compute an object's annotations +""" + +# This module is in the public domain. No warranties. + +__author__ = ('Ka-Ping Yee ', + 'Yury Selivanov ') + +import abc +import ast +import dis +import collections.abc +import enum +import importlib.machinery +import itertools +import linecache +import os +import re +import sys +import tokenize +import token +import types +import warnings +import functools +import builtins +from operator import attrgetter +from collections import namedtuple, OrderedDict + +# Create constants for the compiler flags in Include/code.h +# We try to get them from dis to avoid duplication +mod_dict = globals() +for k, v in dis.COMPILER_FLAG_NAMES.items(): + mod_dict["CO_" + v] = k + +# See Include/object.h +TPFLAGS_IS_ABSTRACT = 1 << 20 + + +def get_annotations(obj, *, globals=None, locals=None, eval_str=False): + """Compute the annotations dict for an object. + + obj may be a callable, class, or module. + Passing in an object of any other type raises TypeError. + + Returns a dict. get_annotations() returns a new dict every time + it's called; calling it twice on the same object will return two + different but equivalent dicts. + + This function handles several details for you: + + * If eval_str is true, values of type str will + be un-stringized using eval(). This is intended + for use with stringized annotations + ("from __future__ import annotations"). + * If obj doesn't have an annotations dict, returns an + empty dict. (Functions and methods always have an + annotations dict; classes, modules, and other types of + callables may not.) + * Ignores inherited annotations on classes. If a class + doesn't have its own annotations dict, returns an empty dict. + * All accesses to object members and dict values are done + using getattr() and dict.get() for safety. + * Always, always, always returns a freshly-created dict. + + eval_str controls whether or not values of type str are replaced + with the result of calling eval() on those values: + + * If eval_str is true, eval() is called on values of type str. + * If eval_str is false (the default), values of type str are unchanged. + + globals and locals are passed in to eval(); see the documentation + for eval() for more information. If either globals or locals is + None, this function may replace that value with a context-specific + default, contingent on type(obj): + + * If obj is a module, globals defaults to obj.__dict__. + * If obj is a class, globals defaults to + sys.modules[obj.__module__].__dict__ and locals + defaults to the obj class namespace. + * If obj is a callable, globals defaults to obj.__globals__, + although if obj is a wrapped function (using + functools.update_wrapper()) it is first unwrapped. + """ + if isinstance(obj, type): + # class + obj_dict = getattr(obj, '__dict__', None) + if obj_dict and hasattr(obj_dict, 'get'): + ann = obj_dict.get('__annotations__', None) + if isinstance(ann, types.GetSetDescriptorType): + ann = None + else: + ann = None + + obj_globals = None + module_name = getattr(obj, '__module__', None) + if module_name: + module = sys.modules.get(module_name, None) + if module: + obj_globals = getattr(module, '__dict__', None) + obj_locals = dict(vars(obj)) + unwrap = obj + elif isinstance(obj, types.ModuleType): + # module + ann = getattr(obj, '__annotations__', None) + obj_globals = getattr(obj, '__dict__') + obj_locals = None + unwrap = None + elif callable(obj): + # this includes types.Function, types.BuiltinFunctionType, + # types.BuiltinMethodType, functools.partial, functools.singledispatch, + # "class funclike" from Lib/test/test_inspect... on and on it goes. + ann = getattr(obj, '__annotations__', None) + obj_globals = getattr(obj, '__globals__', None) + obj_locals = None + unwrap = obj + else: + raise TypeError(f"{obj!r} is not a module, class, or callable.") + + if ann is None: + return {} + + if not isinstance(ann, dict): + raise ValueError(f"{obj!r}.__annotations__ is neither a dict nor None") + + if not ann: + return {} + + if not eval_str: + return dict(ann) + + if unwrap is not None: + while True: + if hasattr(unwrap, '__wrapped__'): + unwrap = unwrap.__wrapped__ + continue + if isinstance(unwrap, functools.partial): + unwrap = unwrap.func + continue + break + if hasattr(unwrap, "__globals__"): + obj_globals = unwrap.__globals__ + + if globals is None: + globals = obj_globals + if locals is None: + locals = obj_locals + + return_value = {key: + value if not isinstance(value, str) else eval(value, globals, locals) + for key, value in ann.items() } + return return_value + + +# ----------------------------------------------------------- type-checking +def ismodule(object): + """Return true if the object is a module. + + Module objects provide these attributes: + __cached__ pathname to byte compiled file + __doc__ documentation string + __file__ filename (missing for built-in modules)""" + return isinstance(object, types.ModuleType) + +def isclass(object): + """Return true if the object is a class. + + Class objects provide these attributes: + __doc__ documentation string + __module__ name of module in which this class was defined""" + return isinstance(object, type) + +def ismethod(object): + """Return true if the object is an instance method. + + Instance method objects provide these attributes: + __doc__ documentation string + __name__ name with which this method was defined + __func__ function object containing implementation of method + __self__ instance to which this method is bound""" + return isinstance(object, types.MethodType) + +def ismethoddescriptor(object): + """Return true if the object is a method descriptor. + + But not if ismethod() or isclass() or isfunction() are true. + + This is new in Python 2.2, and, for example, is true of int.__add__. + An object passing this test has a __get__ attribute but not a __set__ + attribute, but beyond that the set of attributes varies. __name__ is + usually sensible, and __doc__ often is. + + Methods implemented via descriptors that also pass one of the other + tests return false from the ismethoddescriptor() test, simply because + the other tests promise more -- you can, e.g., count on having the + __func__ attribute (etc) when an object passes ismethod().""" + if isclass(object) or ismethod(object) or isfunction(object): + # mutual exclusion + return False + tp = type(object) + return hasattr(tp, "__get__") and not hasattr(tp, "__set__") + +def isdatadescriptor(object): + """Return true if the object is a data descriptor. + + Data descriptors have a __set__ or a __delete__ attribute. Examples are + properties (defined in Python) and getsets and members (defined in C). + Typically, data descriptors will also have __name__ and __doc__ attributes + (properties, getsets, and members have both of these attributes), but this + is not guaranteed.""" + if isclass(object) or ismethod(object) or isfunction(object): + # mutual exclusion + return False + tp = type(object) + return hasattr(tp, "__set__") or hasattr(tp, "__delete__") + +if hasattr(types, 'MemberDescriptorType'): + # CPython and equivalent + def ismemberdescriptor(object): + """Return true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.""" + return isinstance(object, types.MemberDescriptorType) +else: + # Other implementations + def ismemberdescriptor(object): + """Return true if the object is a member descriptor. + + Member descriptors are specialized descriptors defined in extension + modules.""" + return False + +if hasattr(types, 'GetSetDescriptorType'): + # CPython and equivalent + def isgetsetdescriptor(object): + """Return true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.""" + return isinstance(object, types.GetSetDescriptorType) +else: + # Other implementations + def isgetsetdescriptor(object): + """Return true if the object is a getset descriptor. + + getset descriptors are specialized descriptors defined in extension + modules.""" + return False + +def isfunction(object): + """Return true if the object is a user-defined function. + + Function objects provide these attributes: + __doc__ documentation string + __name__ name with which this function was defined + __code__ code object containing compiled function bytecode + __defaults__ tuple of any default values for arguments + __globals__ global namespace in which this function was defined + __annotations__ dict of parameter annotations + __kwdefaults__ dict of keyword only parameters with defaults""" + return isinstance(object, types.FunctionType) + +def _has_code_flag(f, flag): + """Return true if ``f`` is a function (or a method or functools.partial + wrapper wrapping a function) whose code object has the given ``flag`` + set in its flags.""" + while ismethod(f): + f = f.__func__ + f = functools._unwrap_partial(f) + if not (isfunction(f) or _signature_is_functionlike(f)): + return False + return bool(f.__code__.co_flags & flag) + +def isgeneratorfunction(obj): + """Return true if the object is a user-defined generator function. + + Generator function objects provide the same attributes as functions. + See help(isfunction) for a list of attributes.""" + return _has_code_flag(obj, CO_GENERATOR) + +def iscoroutinefunction(obj): + """Return true if the object is a coroutine function. + + Coroutine functions are defined with "async def" syntax. + """ + return _has_code_flag(obj, CO_COROUTINE) + +def isasyncgenfunction(obj): + """Return true if the object is an asynchronous generator function. + + Asynchronous generator functions are defined with "async def" + syntax and have "yield" expressions in their body. + """ + return _has_code_flag(obj, CO_ASYNC_GENERATOR) + +def isasyncgen(object): + """Return true if the object is an asynchronous generator.""" + return isinstance(object, types.AsyncGeneratorType) + +def isgenerator(object): + """Return true if the object is a generator. + + Generator objects provide these attributes: + __iter__ defined to support iteration over container + close raises a new GeneratorExit exception inside the + generator to terminate the iteration + gi_code code object + gi_frame frame object or possibly None once the generator has + been exhausted + gi_running set to 1 when generator is executing, 0 otherwise + next return the next item from the container + send resumes the generator and "sends" a value that becomes + the result of the current yield-expression + throw used to raise an exception inside the generator""" + return isinstance(object, types.GeneratorType) + +def iscoroutine(object): + """Return true if the object is a coroutine.""" + return isinstance(object, types.CoroutineType) + +def isawaitable(object): + """Return true if object can be passed to an ``await`` expression.""" + return (isinstance(object, types.CoroutineType) or + isinstance(object, types.GeneratorType) and + bool(object.gi_code.co_flags & CO_ITERABLE_COROUTINE) or + isinstance(object, collections.abc.Awaitable)) + +def istraceback(object): + """Return true if the object is a traceback. + + Traceback objects provide these attributes: + tb_frame frame object at this level + tb_lasti index of last attempted instruction in bytecode + tb_lineno current line number in Python source code + tb_next next inner traceback object (called by this level)""" + return isinstance(object, types.TracebackType) + +def isframe(object): + """Return true if the object is a frame object. + + Frame objects provide these attributes: + f_back next outer frame object (this frame's caller) + f_builtins built-in namespace seen by this frame + f_code code object being executed in this frame + f_globals global namespace seen by this frame + f_lasti index of last attempted instruction in bytecode + f_lineno current line number in Python source code + f_locals local namespace seen by this frame + f_trace tracing function for this frame, or None""" + return isinstance(object, types.FrameType) + +def iscode(object): + """Return true if the object is a code object. + + Code objects provide these attributes: + co_argcount number of arguments (not including *, ** args + or keyword only arguments) + co_code string of raw compiled bytecode + co_cellvars tuple of names of cell variables + co_consts tuple of constants used in the bytecode + co_filename name of file in which this code object was created + co_firstlineno number of first line in Python source code + co_flags bitmap: 1=optimized | 2=newlocals | 4=*arg | 8=**arg + | 16=nested | 32=generator | 64=nofree | 128=coroutine + | 256=iterable_coroutine | 512=async_generator + co_freevars tuple of names of free variables + co_posonlyargcount number of positional only arguments + co_kwonlyargcount number of keyword only arguments (not including ** arg) + co_lnotab encoded mapping of line numbers to bytecode indices + co_name name with which this code object was defined + co_names tuple of names other than arguments and function locals + co_nlocals number of local variables + co_stacksize virtual machine stack space required + co_varnames tuple of names of arguments and local variables""" + return isinstance(object, types.CodeType) + +def isbuiltin(object): + """Return true if the object is a built-in function or method. + + Built-in functions and methods provide these attributes: + __doc__ documentation string + __name__ original name of this function or method + __self__ instance to which a method is bound, or None""" + return isinstance(object, types.BuiltinFunctionType) + +def isroutine(object): + """Return true if the object is any kind of function or method.""" + return (isbuiltin(object) + or isfunction(object) + or ismethod(object) + or ismethoddescriptor(object)) + +def isabstract(object): + """Return true if the object is an abstract base class (ABC).""" + if not isinstance(object, type): + return False + if object.__flags__ & TPFLAGS_IS_ABSTRACT: + return True + if not issubclass(type(object), abc.ABCMeta): + return False + if hasattr(object, '__abstractmethods__'): + # It looks like ABCMeta.__new__ has finished running; + # TPFLAGS_IS_ABSTRACT should have been accurate. + return False + # It looks like ABCMeta.__new__ has not finished running yet; we're + # probably in __init_subclass__. We'll look for abstractmethods manually. + for name, value in object.__dict__.items(): + if getattr(value, "__isabstractmethod__", False): + return True + for base in object.__bases__: + for name in getattr(base, "__abstractmethods__", ()): + value = getattr(object, name, None) + if getattr(value, "__isabstractmethod__", False): + return True + return False + +def getmembers(object, predicate=None): + """Return all members of an object as (name, value) pairs sorted by name. + Optionally, only return members that satisfy a given predicate.""" + if isclass(object): + mro = (object,) + getmro(object) + else: + mro = () + results = [] + processed = set() + names = dir(object) + # :dd any DynamicClassAttributes to the list of names if object is a class; + # this may result in duplicate entries if, for example, a virtual + # attribute with the same name as a DynamicClassAttribute exists + try: + for base in object.__bases__: + for k, v in base.__dict__.items(): + if isinstance(v, types.DynamicClassAttribute): + names.append(k) + except AttributeError: + pass + for key in names: + # First try to get the value via getattr. Some descriptors don't + # like calling their __get__ (see bug #1785), so fall back to + # looking in the __dict__. + try: + value = getattr(object, key) + # handle the duplicate key + if key in processed: + raise AttributeError + except AttributeError: + for base in mro: + if key in base.__dict__: + value = base.__dict__[key] + break + else: + # could be a (currently) missing slot member, or a buggy + # __dir__; discard and move on + continue + if not predicate or predicate(value): + results.append((key, value)) + processed.add(key) + results.sort(key=lambda pair: pair[0]) + return results + +Attribute = namedtuple('Attribute', 'name kind defining_class object') + +def classify_class_attrs(cls): + """Return list of attribute-descriptor tuples. + + For each name in dir(cls), the return list contains a 4-tuple + with these elements: + + 0. The name (a string). + + 1. The kind of attribute this is, one of these strings: + 'class method' created via classmethod() + 'static method' created via staticmethod() + 'property' created via property() + 'method' any other flavor of method or descriptor + 'data' not a method + + 2. The class which defined this attribute (a class). + + 3. The object as obtained by calling getattr; if this fails, or if the + resulting object does not live anywhere in the class' mro (including + metaclasses) then the object is looked up in the defining class's + dict (found by walking the mro). + + If one of the items in dir(cls) is stored in the metaclass it will now + be discovered and not have None be listed as the class in which it was + defined. Any items whose home class cannot be discovered are skipped. + """ + + mro = getmro(cls) + metamro = getmro(type(cls)) # for attributes stored in the metaclass + metamro = tuple(cls for cls in metamro if cls not in (type, object)) + class_bases = (cls,) + mro + all_bases = class_bases + metamro + names = dir(cls) + # :dd any DynamicClassAttributes to the list of names; + # this may result in duplicate entries if, for example, a virtual + # attribute with the same name as a DynamicClassAttribute exists. + for base in mro: + for k, v in base.__dict__.items(): + if isinstance(v, types.DynamicClassAttribute) and v.fget is not None: + names.append(k) + result = [] + processed = set() + + for name in names: + # Get the object associated with the name, and where it was defined. + # Normal objects will be looked up with both getattr and directly in + # its class' dict (in case getattr fails [bug #1785], and also to look + # for a docstring). + # For DynamicClassAttributes on the second pass we only look in the + # class's dict. + # + # Getting an obj from the __dict__ sometimes reveals more than + # using getattr. Static and class methods are dramatic examples. + homecls = None + get_obj = None + dict_obj = None + if name not in processed: + try: + if name == '__dict__': + raise Exception("__dict__ is special, don't want the proxy") + get_obj = getattr(cls, name) + except Exception as exc: + pass + else: + homecls = getattr(get_obj, "__objclass__", homecls) + if homecls not in class_bases: + # if the resulting object does not live somewhere in the + # mro, drop it and search the mro manually + homecls = None + last_cls = None + # first look in the classes + for srch_cls in class_bases: + srch_obj = getattr(srch_cls, name, None) + if srch_obj is get_obj: + last_cls = srch_cls + # then check the metaclasses + for srch_cls in metamro: + try: + srch_obj = srch_cls.__getattr__(cls, name) + except AttributeError: + continue + if srch_obj is get_obj: + last_cls = srch_cls + if last_cls is not None: + homecls = last_cls + for base in all_bases: + if name in base.__dict__: + dict_obj = base.__dict__[name] + if homecls not in metamro: + homecls = base + break + if homecls is None: + # unable to locate the attribute anywhere, most likely due to + # buggy custom __dir__; discard and move on + continue + obj = get_obj if get_obj is not None else dict_obj + # Classify the object or its descriptor. + if isinstance(dict_obj, (staticmethod, types.BuiltinMethodType)): + kind = "static method" + obj = dict_obj + elif isinstance(dict_obj, (classmethod, types.ClassMethodDescriptorType)): + kind = "class method" + obj = dict_obj + elif isinstance(dict_obj, property): + kind = "property" + obj = dict_obj + elif isroutine(obj): + kind = "method" + else: + kind = "data" + result.append(Attribute(name, kind, homecls, obj)) + processed.add(name) + return result + +# ----------------------------------------------------------- class helpers + +def getmro(cls): + "Return tuple of base classes (including cls) in method resolution order." + return cls.__mro__ + +# -------------------------------------------------------- function helpers + +def unwrap(func, *, stop=None): + """Get the object wrapped by *func*. + + Follows the chain of :attr:`__wrapped__` attributes returning the last + object in the chain. + + *stop* is an optional callback accepting an object in the wrapper chain + as its sole argument that allows the unwrapping to be terminated early if + the callback returns a true value. If the callback never returns a true + value, the last object in the chain is returned as usual. For example, + :func:`signature` uses this to stop unwrapping if any object in the + chain has a ``__signature__`` attribute defined. + + :exc:`ValueError` is raised if a cycle is encountered. + + """ + if stop is None: + def _is_wrapper(f): + return hasattr(f, '__wrapped__') + else: + def _is_wrapper(f): + return hasattr(f, '__wrapped__') and not stop(f) + f = func # remember the original func for error reporting + # Memoise by id to tolerate non-hashable objects, but store objects to + # ensure they aren't destroyed, which would allow their IDs to be reused. + memo = {id(f): f} + recursion_limit = sys.getrecursionlimit() + while _is_wrapper(func): + func = func.__wrapped__ + id_func = id(func) + if (id_func in memo) or (len(memo) >= recursion_limit): + raise ValueError('wrapper loop when unwrapping {!r}'.format(f)) + memo[id_func] = func + return func + +# -------------------------------------------------- source code extraction +def indentsize(line): + """Return the indent size, in spaces, at the start of a line of text.""" + expline = line.expandtabs() + return len(expline) - len(expline.lstrip()) + +def _findclass(func): + cls = sys.modules.get(func.__module__) + if cls is None: + return None + for name in func.__qualname__.split('.')[:-1]: + cls = getattr(cls, name) + if not isclass(cls): + return None + return cls + +def _finddoc(obj): + if isclass(obj): + for base in obj.__mro__: + if base is not object: + try: + doc = base.__doc__ + except AttributeError: + continue + if doc is not None: + return doc + return None + + if ismethod(obj): + name = obj.__func__.__name__ + self = obj.__self__ + if (isclass(self) and + getattr(getattr(self, name, None), '__func__') is obj.__func__): + # classmethod + cls = self + else: + cls = self.__class__ + elif isfunction(obj): + name = obj.__name__ + cls = _findclass(obj) + if cls is None or getattr(cls, name) is not obj: + return None + elif isbuiltin(obj): + name = obj.__name__ + self = obj.__self__ + if (isclass(self) and + self.__qualname__ + '.' + name == obj.__qualname__): + # classmethod + cls = self + else: + cls = self.__class__ + # Should be tested before isdatadescriptor(). + elif isinstance(obj, property): + func = obj.fget + name = func.__name__ + cls = _findclass(func) + if cls is None or getattr(cls, name) is not obj: + return None + elif ismethoddescriptor(obj) or isdatadescriptor(obj): + name = obj.__name__ + cls = obj.__objclass__ + if getattr(cls, name) is not obj: + return None + if ismemberdescriptor(obj): + slots = getattr(cls, '__slots__', None) + if isinstance(slots, dict) and name in slots: + return slots[name] + else: + return None + for base in cls.__mro__: + try: + doc = getattr(base, name).__doc__ + except AttributeError: + continue + if doc is not None: + return doc + return None + +def getdoc(object): + """Get the documentation string for an object. + + All tabs are expanded to spaces. To clean up docstrings that are + indented to line up with blocks of code, any whitespace than can be + uniformly removed from the second line onwards is removed.""" + try: + doc = object.__doc__ + except AttributeError: + return None + if doc is None: + try: + doc = _finddoc(object) + except (AttributeError, TypeError): + return None + if not isinstance(doc, str): + return None + return cleandoc(doc) + +def cleandoc(doc): + """Clean up indentation from docstrings. + + Any whitespace that can be uniformly removed from the second line + onwards is removed.""" + try: + lines = doc.expandtabs().split('\n') + except UnicodeError: + return None + else: + # Find minimum indentation of any non-blank lines after first line. + margin = sys.maxsize + for line in lines[1:]: + content = len(line.lstrip()) + if content: + indent = len(line) - content + margin = min(margin, indent) + # Remove indentation. + if lines: + lines[0] = lines[0].lstrip() + if margin < sys.maxsize: + for i in range(1, len(lines)): lines[i] = lines[i][margin:] + # Remove any trailing or leading blank lines. + while lines and not lines[-1]: + lines.pop() + while lines and not lines[0]: + lines.pop(0) + return '\n'.join(lines) + +def getfile(object): + """Work out which source or compiled file an object was defined in.""" + if ismodule(object): + if getattr(object, '__file__', None): + return object.__file__ + raise TypeError('{!r} is a built-in module'.format(object)) + if isclass(object): + if hasattr(object, '__module__'): + module = sys.modules.get(object.__module__) + if getattr(module, '__file__', None): + return module.__file__ + if object.__module__ == '__main__': + raise OSError('source code not available') + raise TypeError('{!r} is a built-in class'.format(object)) + if ismethod(object): + object = object.__func__ + if isfunction(object): + object = object.__code__ + if istraceback(object): + object = object.tb_frame + if isframe(object): + object = object.f_code + if iscode(object): + return object.co_filename + raise TypeError('module, class, method, function, traceback, frame, or ' + 'code object was expected, got {}'.format( + type(object).__name__)) + +def getmodulename(path): + """Return the module name for a given file, or None.""" + fname = os.path.basename(path) + # Check for paths that look like an actual module file + suffixes = [(-len(suffix), suffix) + for suffix in importlib.machinery.all_suffixes()] + suffixes.sort() # try longest suffixes first, in case they overlap + for neglen, suffix in suffixes: + if fname.endswith(suffix): + return fname[:neglen] + return None + +def getsourcefile(object): + """Return the filename that can be used to locate an object's source. + Return None if no way can be identified to get the source. + """ + filename = getfile(object) + all_bytecode_suffixes = importlib.machinery.DEBUG_BYTECODE_SUFFIXES[:] + all_bytecode_suffixes += importlib.machinery.OPTIMIZED_BYTECODE_SUFFIXES[:] + if any(filename.endswith(s) for s in all_bytecode_suffixes): + filename = (os.path.splitext(filename)[0] + + importlib.machinery.SOURCE_SUFFIXES[0]) + elif any(filename.endswith(s) for s in + importlib.machinery.EXTENSION_SUFFIXES): + return None + if os.path.exists(filename): + return filename + # only return a non-existent filename if the module has a PEP 302 loader + module = getmodule(object, filename) + if getattr(module, '__loader__', None) is not None: + return filename + elif getattr(getattr(module, "__spec__", None), "loader", None) is not None: + return filename + # or it is in the linecache + elif filename in linecache.cache: + return filename + +def getabsfile(object, _filename=None): + """Return an absolute path to the source or compiled file for an object. + + The idea is for each object to have a unique origin, so this routine + normalizes the result as much as possible.""" + if _filename is None: + _filename = getsourcefile(object) or getfile(object) + return os.path.normcase(os.path.abspath(_filename)) + +modulesbyfile = {} +_filesbymodname = {} + +def getmodule(object, _filename=None): + """Return the module an object was defined in, or None if not found.""" + if ismodule(object): + return object + if hasattr(object, '__module__'): + return sys.modules.get(object.__module__) + # Try the filename to modulename cache + if _filename is not None and _filename in modulesbyfile: + return sys.modules.get(modulesbyfile[_filename]) + # Try the cache again with the absolute file name + try: + file = getabsfile(object, _filename) + except (TypeError, FileNotFoundError): + return None + if file in modulesbyfile: + return sys.modules.get(modulesbyfile[file]) + # Update the filename to module name cache and check yet again + # Copy sys.modules in order to cope with changes while iterating + for modname, module in sys.modules.copy().items(): + if ismodule(module) and hasattr(module, '__file__'): + f = module.__file__ + if f == _filesbymodname.get(modname, None): + # Have already mapped this module, so skip it + continue + _filesbymodname[modname] = f + f = getabsfile(module) + # Always map to the name the module knows itself by + modulesbyfile[f] = modulesbyfile[ + os.path.realpath(f)] = module.__name__ + if file in modulesbyfile: + return sys.modules.get(modulesbyfile[file]) + # Check the main module + main = sys.modules['__main__'] + if not hasattr(object, '__name__'): + return None + if hasattr(main, object.__name__): + mainobject = getattr(main, object.__name__) + if mainobject is object: + return main + # Check builtins + builtin = sys.modules['builtins'] + if hasattr(builtin, object.__name__): + builtinobject = getattr(builtin, object.__name__) + if builtinobject is object: + return builtin + + +class ClassFoundException(Exception): + pass + + +class _ClassFinder(ast.NodeVisitor): + + def __init__(self, qualname): + self.stack = [] + self.qualname = qualname + + def visit_FunctionDef(self, node): + self.stack.append(node.name) + self.stack.append('') + self.generic_visit(node) + self.stack.pop() + self.stack.pop() + + visit_AsyncFunctionDef = visit_FunctionDef + + def visit_ClassDef(self, node): + self.stack.append(node.name) + if self.qualname == '.'.join(self.stack): + # Return the decorator for the class if present + if node.decorator_list: + line_number = node.decorator_list[0].lineno + else: + line_number = node.lineno + + # decrement by one since lines starts with indexing by zero + line_number -= 1 + raise ClassFoundException(line_number) + self.generic_visit(node) + self.stack.pop() + + +def findsource(object): + """Return the entire source file and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of all the lines + in the file and the line number indexes a line in that list. An OSError + is raised if the source code cannot be retrieved.""" + + file = getsourcefile(object) + if file: + # Invalidate cache if needed. + linecache.checkcache(file) + else: + file = getfile(object) + # Allow filenames in form of "" to pass through. + # `doctest` monkeypatches `linecache` module to enable + # inspection, so let `linecache.getlines` to be called. + if not (file.startswith('<') and file.endswith('>')): + raise OSError('source code not available') + + module = getmodule(object, file) + if module: + lines = linecache.getlines(file, module.__dict__) + else: + lines = linecache.getlines(file) + if not lines: + raise OSError('could not get source code') + + if ismodule(object): + return lines, 0 + + if isclass(object): + qualname = object.__qualname__ + source = ''.join(lines) + tree = ast.parse(source) + class_finder = _ClassFinder(qualname) + try: + class_finder.visit(tree) + except ClassFoundException as e: + line_number = e.args[0] + return lines, line_number + else: + raise OSError('could not find class definition') + + if ismethod(object): + object = object.__func__ + if isfunction(object): + object = object.__code__ + if istraceback(object): + object = object.tb_frame + if isframe(object): + object = object.f_code + if iscode(object): + if not hasattr(object, 'co_firstlineno'): + raise OSError('could not find function definition') + lnum = object.co_firstlineno - 1 + pat = re.compile(r'^(\s*def\s)|(\s*async\s+def\s)|(.*(? 0: + try: + line = lines[lnum] + except IndexError: + raise OSError('lineno is out of bounds') + if pat.match(line): + break + lnum = lnum - 1 + return lines, lnum + raise OSError('could not find code object') + +def getcomments(object): + """Get lines of comments immediately preceding an object's source code. + + Returns None when source can't be found. + """ + try: + lines, lnum = findsource(object) + except (OSError, TypeError): + return None + + if ismodule(object): + # Look for a comment block at the top of the file. + start = 0 + if lines and lines[0][:2] == '#!': start = 1 + while start < len(lines) and lines[start].strip() in ('', '#'): + start = start + 1 + if start < len(lines) and lines[start][:1] == '#': + comments = [] + end = start + while end < len(lines) and lines[end][:1] == '#': + comments.append(lines[end].expandtabs()) + end = end + 1 + return ''.join(comments) + + # Look for a preceding block of comments at the same indentation. + elif lnum > 0: + indent = indentsize(lines[lnum]) + end = lnum - 1 + if end >= 0 and lines[end].lstrip()[:1] == '#' and \ + indentsize(lines[end]) == indent: + comments = [lines[end].expandtabs().lstrip()] + if end > 0: + end = end - 1 + comment = lines[end].expandtabs().lstrip() + while comment[:1] == '#' and indentsize(lines[end]) == indent: + comments[:0] = [comment] + end = end - 1 + if end < 0: break + comment = lines[end].expandtabs().lstrip() + while comments and comments[0].strip() == '#': + comments[:1] = [] + while comments and comments[-1].strip() == '#': + comments[-1:] = [] + return ''.join(comments) + +class EndOfBlock(Exception): pass + +class BlockFinder: + """Provide a tokeneater() method to detect the end of a code block.""" + def __init__(self): + self.indent = 0 + self.islambda = False + self.started = False + self.passline = False + self.indecorator = False + self.last = 1 + self.body_col0 = None + + def tokeneater(self, type, token, srowcol, erowcol, line): + if not self.started and not self.indecorator: + # skip any decorators + if token == "@": + self.indecorator = True + # look for the first "def", "class" or "lambda" + elif token in ("def", "class", "lambda"): + if token == "lambda": + self.islambda = True + self.started = True + self.passline = True # skip to the end of the line + elif type == tokenize.NEWLINE: + self.passline = False # stop skipping when a NEWLINE is seen + self.last = srowcol[0] + if self.islambda: # lambdas always end at the first NEWLINE + raise EndOfBlock + # hitting a NEWLINE when in a decorator without args + # ends the decorator + if self.indecorator: + self.indecorator = False + elif self.passline: + pass + elif type == tokenize.INDENT: + if self.body_col0 is None and self.started: + self.body_col0 = erowcol[1] + self.indent = self.indent + 1 + self.passline = True + elif type == tokenize.DEDENT: + self.indent = self.indent - 1 + # the end of matching indent/dedent pairs end a block + # (note that this only works for "def"/"class" blocks, + # not e.g. for "if: else:" or "try: finally:" blocks) + if self.indent <= 0: + raise EndOfBlock + elif type == tokenize.COMMENT: + if self.body_col0 is not None and srowcol[1] >= self.body_col0: + # Include comments if indented at least as much as the block + self.last = srowcol[0] + elif self.indent == 0 and type not in (tokenize.COMMENT, tokenize.NL): + # any other token on the same indentation level end the previous + # block as well, except the pseudo-tokens COMMENT and NL. + raise EndOfBlock + +def getblock(lines): + """Extract the block of code at the top of the given list of lines.""" + blockfinder = BlockFinder() + try: + tokens = tokenize.generate_tokens(iter(lines).__next__) + for _token in tokens: + blockfinder.tokeneater(*_token) + except (EndOfBlock, IndentationError): + pass + return lines[:blockfinder.last] + +def getsourcelines(object): + """Return a list of source lines and starting line number for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a list of the lines + corresponding to the object and the line number indicates where in the + original source file the first line of code was found. An OSError is + raised if the source code cannot be retrieved.""" + object = unwrap(object) + lines, lnum = findsource(object) + + if istraceback(object): + object = object.tb_frame + + # for module or frame that corresponds to module, return all source lines + if (ismodule(object) or + (isframe(object) and object.f_code.co_name == "")): + return lines, 0 + else: + return getblock(lines[lnum:]), lnum + 1 + +def getsource(object): + """Return the text of the source code for an object. + + The argument may be a module, class, method, function, traceback, frame, + or code object. The source code is returned as a single string. An + OSError is raised if the source code cannot be retrieved.""" + lines, lnum = getsourcelines(object) + return ''.join(lines) + +# --------------------------------------------------- class tree extraction +def walktree(classes, children, parent): + """Recursive helper function for getclasstree().""" + results = [] + classes.sort(key=attrgetter('__module__', '__name__')) + for c in classes: + results.append((c, c.__bases__)) + if c in children: + results.append(walktree(children[c], children, c)) + return results + +def getclasstree(classes, unique=False): + """Arrange the given list of classes into a hierarchy of nested lists. + + Where a nested list appears, it contains classes derived from the class + whose entry immediately precedes the list. Each entry is a 2-tuple + containing a class and a tuple of its base classes. If the 'unique' + argument is true, exactly one entry appears in the returned structure + for each class in the given list. Otherwise, classes using multiple + inheritance and their descendants will appear multiple times.""" + children = {} + roots = [] + for c in classes: + if c.__bases__: + for parent in c.__bases__: + if parent not in children: + children[parent] = [] + if c not in children[parent]: + children[parent].append(c) + if unique and parent in classes: break + elif c not in roots: + roots.append(c) + for parent in children: + if parent not in classes: + roots.append(parent) + return walktree(roots, children, None) + +# ------------------------------------------------ argument list extraction +Arguments = namedtuple('Arguments', 'args, varargs, varkw') + +def getargs(co): + """Get information about the arguments accepted by a code object. + + Three things are returned: (args, varargs, varkw), where + 'args' is the list of argument names. Keyword-only arguments are + appended. 'varargs' and 'varkw' are the names of the * and ** + arguments or None.""" + if not iscode(co): + raise TypeError('{!r} is not a code object'.format(co)) + + names = co.co_varnames + nargs = co.co_argcount + nkwargs = co.co_kwonlyargcount + args = list(names[:nargs]) + kwonlyargs = list(names[nargs:nargs+nkwargs]) + step = 0 + + nargs += nkwargs + varargs = None + if co.co_flags & CO_VARARGS: + varargs = co.co_varnames[nargs] + nargs = nargs + 1 + varkw = None + if co.co_flags & CO_VARKEYWORDS: + varkw = co.co_varnames[nargs] + return Arguments(args + kwonlyargs, varargs, varkw) + +ArgSpec = namedtuple('ArgSpec', 'args varargs keywords defaults') + +def getargspec(func): + """Get the names and default values of a function's parameters. + + A tuple of four things is returned: (args, varargs, keywords, defaults). + 'args' is a list of the argument names, including keyword-only argument names. + 'varargs' and 'keywords' are the names of the * and ** parameters or None. + 'defaults' is an n-tuple of the default values of the last n parameters. + + This function is deprecated, as it does not support annotations or + keyword-only parameters and will raise ValueError if either is present + on the supplied callable. + + For a more structured introspection API, use inspect.signature() instead. + + Alternatively, use getfullargspec() for an API with a similar namedtuple + based interface, but full support for annotations and keyword-only + parameters. + + Deprecated since Python 3.5, use `inspect.getfullargspec()`. + """ + warnings.warn("inspect.getargspec() is deprecated since Python 3.0, " + "use inspect.signature() or inspect.getfullargspec()", + DeprecationWarning, stacklevel=2) + args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = \ + getfullargspec(func) + if kwonlyargs or ann: + raise ValueError("Function has keyword-only parameters or annotations" + ", use inspect.signature() API which can support them") + return ArgSpec(args, varargs, varkw, defaults) + +FullArgSpec = namedtuple('FullArgSpec', + 'args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations') + +def getfullargspec(func): + """Get the names and default values of a callable object's parameters. + + A tuple of seven things is returned: + (args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, annotations). + 'args' is a list of the parameter names. + 'varargs' and 'varkw' are the names of the * and ** parameters or None. + 'defaults' is an n-tuple of the default values of the last n parameters. + 'kwonlyargs' is a list of keyword-only parameter names. + 'kwonlydefaults' is a dictionary mapping names from kwonlyargs to defaults. + 'annotations' is a dictionary mapping parameter names to annotations. + + Notable differences from inspect.signature(): + - the "self" parameter is always reported, even for bound methods + - wrapper chains defined by __wrapped__ *not* unwrapped automatically + """ + try: + # Re: `skip_bound_arg=False` + # + # There is a notable difference in behaviour between getfullargspec + # and Signature: the former always returns 'self' parameter for bound + # methods, whereas the Signature always shows the actual calling + # signature of the passed object. + # + # To simulate this behaviour, we "unbind" bound methods, to trick + # inspect.signature to always return their first parameter ("self", + # usually) + + # Re: `follow_wrapper_chains=False` + # + # getfullargspec() historically ignored __wrapped__ attributes, + # so we ensure that remains the case in 3.3+ + + sig = _signature_from_callable(func, + follow_wrapper_chains=False, + skip_bound_arg=False, + sigcls=Signature, + eval_str=False) + except Exception as ex: + # Most of the times 'signature' will raise ValueError. + # But, it can also raise AttributeError, and, maybe something + # else. So to be fully backwards compatible, we catch all + # possible exceptions here, and reraise a TypeError. + raise TypeError('unsupported callable') from ex + + args = [] + varargs = None + varkw = None + posonlyargs = [] + kwonlyargs = [] + annotations = {} + defaults = () + kwdefaults = {} + + if sig.return_annotation is not sig.empty: + annotations['return'] = sig.return_annotation + + for param in sig.parameters.values(): + kind = param.kind + name = param.name + + if kind is _POSITIONAL_ONLY: + posonlyargs.append(name) + if param.default is not param.empty: + defaults += (param.default,) + elif kind is _POSITIONAL_OR_KEYWORD: + args.append(name) + if param.default is not param.empty: + defaults += (param.default,) + elif kind is _VAR_POSITIONAL: + varargs = name + elif kind is _KEYWORD_ONLY: + kwonlyargs.append(name) + if param.default is not param.empty: + kwdefaults[name] = param.default + elif kind is _VAR_KEYWORD: + varkw = name + + if param.annotation is not param.empty: + annotations[name] = param.annotation + + if not kwdefaults: + # compatibility with 'func.__kwdefaults__' + kwdefaults = None + + if not defaults: + # compatibility with 'func.__defaults__' + defaults = None + + return FullArgSpec(posonlyargs + args, varargs, varkw, defaults, + kwonlyargs, kwdefaults, annotations) + + +ArgInfo = namedtuple('ArgInfo', 'args varargs keywords locals') + +def getargvalues(frame): + """Get information about arguments passed into a particular frame. + + A tuple of four things is returned: (args, varargs, varkw, locals). + 'args' is a list of the argument names. + 'varargs' and 'varkw' are the names of the * and ** arguments or None. + 'locals' is the locals dictionary of the given frame.""" + args, varargs, varkw = getargs(frame.f_code) + return ArgInfo(args, varargs, varkw, frame.f_locals) + +def formatannotation(annotation, base_module=None): + if getattr(annotation, '__module__', None) == 'typing': + def repl(match): + text = match.group() + return text.removeprefix('typing.') + return re.sub(r'[\w\.]+', repl, repr(annotation)) + if isinstance(annotation, types.GenericAlias): + return str(annotation) + if isinstance(annotation, type): + if annotation.__module__ in ('builtins', base_module): + return annotation.__qualname__ + return annotation.__module__+'.'+annotation.__qualname__ + return repr(annotation) + +def formatannotationrelativeto(object): + module = getattr(object, '__module__', None) + def _formatannotation(annotation): + return formatannotation(annotation, module) + return _formatannotation + +def formatargspec(args, varargs=None, varkw=None, defaults=None, + kwonlyargs=(), kwonlydefaults={}, annotations={}, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value), + formatreturns=lambda text: ' -> ' + text, + formatannotation=formatannotation): + """Format an argument spec from the values returned by getfullargspec. + + The first seven arguments are (args, varargs, varkw, defaults, + kwonlyargs, kwonlydefaults, annotations). The other five arguments + are the corresponding optional formatting functions that are called to + turn names and values into strings. The last argument is an optional + function to format the sequence of arguments. + + Deprecated since Python 3.5: use the `signature` function and `Signature` + objects. + """ + + from warnings import warn + + warn("`formatargspec` is deprecated since Python 3.5. Use `signature` and " + "the `Signature` object directly", + DeprecationWarning, + stacklevel=2) + + def formatargandannotation(arg): + result = formatarg(arg) + if arg in annotations: + result += ': ' + formatannotation(annotations[arg]) + return result + specs = [] + if defaults: + firstdefault = len(args) - len(defaults) + for i, arg in enumerate(args): + spec = formatargandannotation(arg) + if defaults and i >= firstdefault: + spec = spec + formatvalue(defaults[i - firstdefault]) + specs.append(spec) + if varargs is not None: + specs.append(formatvarargs(formatargandannotation(varargs))) + else: + if kwonlyargs: + specs.append('*') + if kwonlyargs: + for kwonlyarg in kwonlyargs: + spec = formatargandannotation(kwonlyarg) + if kwonlydefaults and kwonlyarg in kwonlydefaults: + spec += formatvalue(kwonlydefaults[kwonlyarg]) + specs.append(spec) + if varkw is not None: + specs.append(formatvarkw(formatargandannotation(varkw))) + result = '(' + ', '.join(specs) + ')' + if 'return' in annotations: + result += formatreturns(formatannotation(annotations['return'])) + return result + +def formatargvalues(args, varargs, varkw, locals, + formatarg=str, + formatvarargs=lambda name: '*' + name, + formatvarkw=lambda name: '**' + name, + formatvalue=lambda value: '=' + repr(value)): + """Format an argument spec from the 4 values returned by getargvalues. + + The first four arguments are (args, varargs, varkw, locals). The + next four arguments are the corresponding optional formatting functions + that are called to turn names and values into strings. The ninth + argument is an optional function to format the sequence of arguments.""" + def convert(name, locals=locals, + formatarg=formatarg, formatvalue=formatvalue): + return formatarg(name) + formatvalue(locals[name]) + specs = [] + for i in range(len(args)): + specs.append(convert(args[i])) + if varargs: + specs.append(formatvarargs(varargs) + formatvalue(locals[varargs])) + if varkw: + specs.append(formatvarkw(varkw) + formatvalue(locals[varkw])) + return '(' + ', '.join(specs) + ')' + +def _missing_arguments(f_name, argnames, pos, values): + names = [repr(name) for name in argnames if name not in values] + missing = len(names) + if missing == 1: + s = names[0] + elif missing == 2: + s = "{} and {}".format(*names) + else: + tail = ", {} and {}".format(*names[-2:]) + del names[-2:] + s = ", ".join(names) + tail + raise TypeError("%s() missing %i required %s argument%s: %s" % + (f_name, missing, + "positional" if pos else "keyword-only", + "" if missing == 1 else "s", s)) + +def _too_many(f_name, args, kwonly, varargs, defcount, given, values): + atleast = len(args) - defcount + kwonly_given = len([arg for arg in kwonly if arg in values]) + if varargs: + plural = atleast != 1 + sig = "at least %d" % (atleast,) + elif defcount: + plural = True + sig = "from %d to %d" % (atleast, len(args)) + else: + plural = len(args) != 1 + sig = str(len(args)) + kwonly_sig = "" + if kwonly_given: + msg = " positional argument%s (and %d keyword-only argument%s)" + kwonly_sig = (msg % ("s" if given != 1 else "", kwonly_given, + "s" if kwonly_given != 1 else "")) + raise TypeError("%s() takes %s positional argument%s but %d%s %s given" % + (f_name, sig, "s" if plural else "", given, kwonly_sig, + "was" if given == 1 and not kwonly_given else "were")) + +def getcallargs(func, /, *positional, **named): + """Get the mapping of arguments to values. + + A dict is returned, with keys the function argument names (including the + names of the * and ** arguments, if any), and values the respective bound + values from 'positional' and 'named'.""" + spec = getfullargspec(func) + args, varargs, varkw, defaults, kwonlyargs, kwonlydefaults, ann = spec + f_name = func.__name__ + arg2value = {} + + + if ismethod(func) and func.__self__ is not None: + # implicit 'self' (or 'cls' for classmethods) argument + positional = (func.__self__,) + positional + num_pos = len(positional) + num_args = len(args) + num_defaults = len(defaults) if defaults else 0 + + n = min(num_pos, num_args) + for i in range(n): + arg2value[args[i]] = positional[i] + if varargs: + arg2value[varargs] = tuple(positional[n:]) + possible_kwargs = set(args + kwonlyargs) + if varkw: + arg2value[varkw] = {} + for kw, value in named.items(): + if kw not in possible_kwargs: + if not varkw: + raise TypeError("%s() got an unexpected keyword argument %r" % + (f_name, kw)) + arg2value[varkw][kw] = value + continue + if kw in arg2value: + raise TypeError("%s() got multiple values for argument %r" % + (f_name, kw)) + arg2value[kw] = value + if num_pos > num_args and not varargs: + _too_many(f_name, args, kwonlyargs, varargs, num_defaults, + num_pos, arg2value) + if num_pos < num_args: + req = args[:num_args - num_defaults] + for arg in req: + if arg not in arg2value: + _missing_arguments(f_name, req, True, arg2value) + for i, arg in enumerate(args[num_args - num_defaults:]): + if arg not in arg2value: + arg2value[arg] = defaults[i] + missing = 0 + for kwarg in kwonlyargs: + if kwarg not in arg2value: + if kwonlydefaults and kwarg in kwonlydefaults: + arg2value[kwarg] = kwonlydefaults[kwarg] + else: + missing += 1 + if missing: + _missing_arguments(f_name, kwonlyargs, False, arg2value) + return arg2value + +ClosureVars = namedtuple('ClosureVars', 'nonlocals globals builtins unbound') + +def getclosurevars(func): + """ + Get the mapping of free variables to their current values. + + Returns a named tuple of dicts mapping the current nonlocal, global + and builtin references as seen by the body of the function. A final + set of unbound names that could not be resolved is also provided. + """ + + if ismethod(func): + func = func.__func__ + + if not isfunction(func): + raise TypeError("{!r} is not a Python function".format(func)) + + code = func.__code__ + # Nonlocal references are named in co_freevars and resolved + # by looking them up in __closure__ by positional index + if func.__closure__ is None: + nonlocal_vars = {} + else: + nonlocal_vars = { + var : cell.cell_contents + for var, cell in zip(code.co_freevars, func.__closure__) + } + + # Global and builtin references are named in co_names and resolved + # by looking them up in __globals__ or __builtins__ + global_ns = func.__globals__ + builtin_ns = global_ns.get("__builtins__", builtins.__dict__) + if ismodule(builtin_ns): + builtin_ns = builtin_ns.__dict__ + global_vars = {} + builtin_vars = {} + unbound_names = set() + for name in code.co_names: + if name in ("None", "True", "False"): + # Because these used to be builtins instead of keywords, they + # may still show up as name references. We ignore them. + continue + try: + global_vars[name] = global_ns[name] + except KeyError: + try: + builtin_vars[name] = builtin_ns[name] + except KeyError: + unbound_names.add(name) + + return ClosureVars(nonlocal_vars, global_vars, + builtin_vars, unbound_names) + +# -------------------------------------------------- stack frame extraction + +Traceback = namedtuple('Traceback', 'filename lineno function code_context index') + +def getframeinfo(frame, context=1): + """Get information about a frame or traceback object. + + A tuple of five things is returned: the filename, the line number of + the current line, the function name, a list of lines of context from + the source code, and the index of the current line within that list. + The optional second argument specifies the number of lines of context + to return, which are centered around the current line.""" + if istraceback(frame): + lineno = frame.tb_lineno + frame = frame.tb_frame + else: + lineno = frame.f_lineno + if not isframe(frame): + raise TypeError('{!r} is not a frame or traceback object'.format(frame)) + + filename = getsourcefile(frame) or getfile(frame) + if context > 0: + start = lineno - 1 - context//2 + try: + lines, lnum = findsource(frame) + except OSError: + lines = index = None + else: + start = max(0, min(start, len(lines) - context)) + lines = lines[start:start+context] + index = lineno - 1 - start + else: + lines = index = None + + return Traceback(filename, lineno, frame.f_code.co_name, lines, index) + +def getlineno(frame): + """Get the line number from a frame object, allowing for optimization.""" + # FrameType.f_lineno is now a descriptor that grovels co_lnotab + return frame.f_lineno + +FrameInfo = namedtuple('FrameInfo', ('frame',) + Traceback._fields) + +def getouterframes(frame, context=1): + """Get a list of records for a frame and all higher (calling) frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.""" + framelist = [] + while frame: + frameinfo = (frame,) + getframeinfo(frame, context) + framelist.append(FrameInfo(*frameinfo)) + frame = frame.f_back + return framelist + +def getinnerframes(tb, context=1): + """Get a list of records for a traceback's frame and all lower frames. + + Each record contains a frame object, filename, line number, function + name, a list of lines of context, and index within the context.""" + framelist = [] + while tb: + frameinfo = (tb.tb_frame,) + getframeinfo(tb, context) + framelist.append(FrameInfo(*frameinfo)) + tb = tb.tb_next + return framelist + +def currentframe(): + """Return the frame of the caller or None if this is not possible.""" + return sys._getframe(1) if hasattr(sys, "_getframe") else None + +def stack(context=1): + """Return a list of records for the stack above the caller's frame.""" + return getouterframes(sys._getframe(1), context) + +def trace(context=1): + """Return a list of records for the stack below the current exception.""" + return getinnerframes(sys.exc_info()[2], context) + + +# ------------------------------------------------ static version of getattr + +_sentinel = object() + +def _static_getmro(klass): + return type.__dict__['__mro__'].__get__(klass) + +def _check_instance(obj, attr): + instance_dict = {} + try: + instance_dict = object.__getattribute__(obj, "__dict__") + except AttributeError: + pass + return dict.get(instance_dict, attr, _sentinel) + + +def _check_class(klass, attr): + for entry in _static_getmro(klass): + if _shadowed_dict(type(entry)) is _sentinel: + try: + return entry.__dict__[attr] + except KeyError: + pass + return _sentinel + +def _is_type(obj): + try: + _static_getmro(obj) + except TypeError: + return False + return True + +def _shadowed_dict(klass): + dict_attr = type.__dict__["__dict__"] + for entry in _static_getmro(klass): + try: + class_dict = dict_attr.__get__(entry)["__dict__"] + except KeyError: + pass + else: + if not (type(class_dict) is types.GetSetDescriptorType and + class_dict.__name__ == "__dict__" and + class_dict.__objclass__ is entry): + return class_dict + return _sentinel + +def getattr_static(obj, attr, default=_sentinel): + """Retrieve attributes without triggering dynamic lookup via the + descriptor protocol, __getattr__ or __getattribute__. + + Note: this function may not be able to retrieve all attributes + that getattr can fetch (like dynamically created attributes) + and may find attributes that getattr can't (like descriptors + that raise AttributeError). It can also return descriptor objects + instead of instance members in some cases. See the + documentation for details. + """ + instance_result = _sentinel + if not _is_type(obj): + klass = type(obj) + dict_attr = _shadowed_dict(klass) + if (dict_attr is _sentinel or + type(dict_attr) is types.MemberDescriptorType): + instance_result = _check_instance(obj, attr) + else: + klass = obj + + klass_result = _check_class(klass, attr) + + if instance_result is not _sentinel and klass_result is not _sentinel: + if (_check_class(type(klass_result), '__get__') is not _sentinel and + _check_class(type(klass_result), '__set__') is not _sentinel): + return klass_result + + if instance_result is not _sentinel: + return instance_result + if klass_result is not _sentinel: + return klass_result + + if obj is klass: + # for types we check the metaclass too + for entry in _static_getmro(type(klass)): + if _shadowed_dict(type(entry)) is _sentinel: + try: + return entry.__dict__[attr] + except KeyError: + pass + if default is not _sentinel: + return default + raise AttributeError(attr) + + +# ------------------------------------------------ generator introspection + +GEN_CREATED = 'GEN_CREATED' +GEN_RUNNING = 'GEN_RUNNING' +GEN_SUSPENDED = 'GEN_SUSPENDED' +GEN_CLOSED = 'GEN_CLOSED' + +def getgeneratorstate(generator): + """Get current state of a generator-iterator. + + Possible states are: + GEN_CREATED: Waiting to start execution. + GEN_RUNNING: Currently being executed by the interpreter. + GEN_SUSPENDED: Currently suspended at a yield expression. + GEN_CLOSED: Execution has completed. + """ + if generator.gi_running: + return GEN_RUNNING + if generator.gi_frame is None: + return GEN_CLOSED + if generator.gi_frame.f_lasti == -1: + return GEN_CREATED + return GEN_SUSPENDED + + +def getgeneratorlocals(generator): + """ + Get the mapping of generator local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.""" + + if not isgenerator(generator): + raise TypeError("{!r} is not a Python generator".format(generator)) + + frame = getattr(generator, "gi_frame", None) + if frame is not None: + return generator.gi_frame.f_locals + else: + return {} + + +# ------------------------------------------------ coroutine introspection + +CORO_CREATED = 'CORO_CREATED' +CORO_RUNNING = 'CORO_RUNNING' +CORO_SUSPENDED = 'CORO_SUSPENDED' +CORO_CLOSED = 'CORO_CLOSED' + +def getcoroutinestate(coroutine): + """Get current state of a coroutine object. + + Possible states are: + CORO_CREATED: Waiting to start execution. + CORO_RUNNING: Currently being executed by the interpreter. + CORO_SUSPENDED: Currently suspended at an await expression. + CORO_CLOSED: Execution has completed. + """ + if coroutine.cr_running: + return CORO_RUNNING + if coroutine.cr_frame is None: + return CORO_CLOSED + if coroutine.cr_frame.f_lasti == -1: + return CORO_CREATED + return CORO_SUSPENDED + + +def getcoroutinelocals(coroutine): + """ + Get the mapping of coroutine local variables to their current values. + + A dict is returned, with the keys the local variable names and values the + bound values.""" + frame = getattr(coroutine, "cr_frame", None) + if frame is not None: + return frame.f_locals + else: + return {} + + +############################################################################### +### Function Signature Object (PEP 362) +############################################################################### + + +_WrapperDescriptor = type(type.__call__) +_MethodWrapper = type(all.__call__) +_ClassMethodWrapper = type(int.__dict__['from_bytes']) + +_NonUserDefinedCallables = (_WrapperDescriptor, + _MethodWrapper, + _ClassMethodWrapper, + types.BuiltinFunctionType) + + +def _signature_get_user_defined_method(cls, method_name): + """Private helper. Checks if ``cls`` has an attribute + named ``method_name`` and returns it only if it is a + pure python function. + """ + try: + meth = getattr(cls, method_name) + except AttributeError: + return + else: + if not isinstance(meth, _NonUserDefinedCallables): + # Once '__signature__' will be added to 'C'-level + # callables, this check won't be necessary + return meth + + +def _signature_get_partial(wrapped_sig, partial, extra_args=()): + """Private helper to calculate how 'wrapped_sig' signature will + look like after applying a 'functools.partial' object (or alike) + on it. + """ + + old_params = wrapped_sig.parameters + new_params = OrderedDict(old_params.items()) + + partial_args = partial.args or () + partial_keywords = partial.keywords or {} + + if extra_args: + partial_args = extra_args + partial_args + + try: + ba = wrapped_sig.bind_partial(*partial_args, **partial_keywords) + except TypeError as ex: + msg = 'partial object {!r} has incorrect arguments'.format(partial) + raise ValueError(msg) from ex + + + transform_to_kwonly = False + for param_name, param in old_params.items(): + try: + arg_value = ba.arguments[param_name] + except KeyError: + pass + else: + if param.kind is _POSITIONAL_ONLY: + # If positional-only parameter is bound by partial, + # it effectively disappears from the signature + new_params.pop(param_name) + continue + + if param.kind is _POSITIONAL_OR_KEYWORD: + if param_name in partial_keywords: + # This means that this parameter, and all parameters + # after it should be keyword-only (and var-positional + # should be removed). Here's why. Consider the following + # function: + # foo(a, b, *args, c): + # pass + # + # "partial(foo, a='spam')" will have the following + # signature: "(*, a='spam', b, c)". Because attempting + # to call that partial with "(10, 20)" arguments will + # raise a TypeError, saying that "a" argument received + # multiple values. + transform_to_kwonly = True + # Set the new default value + new_params[param_name] = param.replace(default=arg_value) + else: + # was passed as a positional argument + new_params.pop(param.name) + continue + + if param.kind is _KEYWORD_ONLY: + # Set the new default value + new_params[param_name] = param.replace(default=arg_value) + + if transform_to_kwonly: + assert param.kind is not _POSITIONAL_ONLY + + if param.kind is _POSITIONAL_OR_KEYWORD: + new_param = new_params[param_name].replace(kind=_KEYWORD_ONLY) + new_params[param_name] = new_param + new_params.move_to_end(param_name) + elif param.kind in (_KEYWORD_ONLY, _VAR_KEYWORD): + new_params.move_to_end(param_name) + elif param.kind is _VAR_POSITIONAL: + new_params.pop(param.name) + + return wrapped_sig.replace(parameters=new_params.values()) + + +def _signature_bound_method(sig): + """Private helper to transform signatures for unbound + functions to bound methods. + """ + + params = tuple(sig.parameters.values()) + + if not params or params[0].kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + raise ValueError('invalid method signature') + + kind = params[0].kind + if kind in (_POSITIONAL_OR_KEYWORD, _POSITIONAL_ONLY): + # Drop first parameter: + # '(p1, p2[, ...])' -> '(p2[, ...])' + params = params[1:] + else: + if kind is not _VAR_POSITIONAL: + # Unless we add a new parameter type we never + # get here + raise ValueError('invalid argument type') + # It's a var-positional parameter. + # Do nothing. '(*args[, ...])' -> '(*args[, ...])' + + return sig.replace(parameters=params) + + +def _signature_is_builtin(obj): + """Private helper to test if `obj` is a callable that might + support Argument Clinic's __text_signature__ protocol. + """ + return (isbuiltin(obj) or + ismethoddescriptor(obj) or + isinstance(obj, _NonUserDefinedCallables) or + # Can't test 'isinstance(type)' here, as it would + # also be True for regular python classes + obj in (type, object)) + + +def _signature_is_functionlike(obj): + """Private helper to test if `obj` is a duck type of FunctionType. + A good example of such objects are functions compiled with + Cython, which have all attributes that a pure Python function + would have, but have their code statically compiled. + """ + + if not callable(obj) or isclass(obj): + # All function-like objects are obviously callables, + # and not classes. + return False + + name = getattr(obj, '__name__', None) + code = getattr(obj, '__code__', None) + defaults = getattr(obj, '__defaults__', _void) # Important to use _void ... + kwdefaults = getattr(obj, '__kwdefaults__', _void) # ... and not None here + annotations = getattr(obj, '__annotations__', None) + + return (isinstance(code, types.CodeType) and + isinstance(name, str) and + (defaults is None or isinstance(defaults, tuple)) and + (kwdefaults is None or isinstance(kwdefaults, dict)) and + (isinstance(annotations, (dict)) or annotations is None) ) + + +def _signature_get_bound_param(spec): + """ Private helper to get first parameter name from a + __text_signature__ of a builtin method, which should + be in the following format: '($param1, ...)'. + Assumptions are that the first argument won't have + a default value or an annotation. + """ + + assert spec.startswith('($') + + pos = spec.find(',') + if pos == -1: + pos = spec.find(')') + + cpos = spec.find(':') + assert cpos == -1 or cpos > pos + + cpos = spec.find('=') + assert cpos == -1 or cpos > pos + + return spec[2:pos] + + +def _signature_strip_non_python_syntax(signature): + """ + Private helper function. Takes a signature in Argument Clinic's + extended signature format. + + Returns a tuple of three things: + * that signature re-rendered in standard Python syntax, + * the index of the "self" parameter (generally 0), or None if + the function does not have a "self" parameter, and + * the index of the last "positional only" parameter, + or None if the signature has no positional-only parameters. + """ + + if not signature: + return signature, None, None + + self_parameter = None + last_positional_only = None + + lines = [l.encode('ascii') for l in signature.split('\n') if l] + generator = iter(lines).__next__ + token_stream = tokenize.tokenize(generator) + + delayed_comma = False + skip_next_comma = False + text = [] + add = text.append + + current_parameter = 0 + OP = token.OP + ERRORTOKEN = token.ERRORTOKEN + + # token stream always starts with ENCODING token, skip it + t = next(token_stream) + assert t.type == tokenize.ENCODING + + for t in token_stream: + type, string = t.type, t.string + + if type == OP: + if string == ',': + if skip_next_comma: + skip_next_comma = False + else: + assert not delayed_comma + delayed_comma = True + current_parameter += 1 + continue + + if string == '/': + assert not skip_next_comma + assert last_positional_only is None + skip_next_comma = True + last_positional_only = current_parameter - 1 + continue + + if (type == ERRORTOKEN) and (string == '$'): + assert self_parameter is None + self_parameter = current_parameter + continue + + if delayed_comma: + delayed_comma = False + if not ((type == OP) and (string == ')')): + add(', ') + add(string) + if (string == ','): + add(' ') + clean_signature = ''.join(text) + return clean_signature, self_parameter, last_positional_only + + +def _signature_fromstr(cls, obj, s, skip_bound_arg=True): + """Private helper to parse content of '__text_signature__' + and return a Signature based on it. + """ + # Lazy import ast because it's relatively heavy and + # it's not used for other than this function. + import ast + + Parameter = cls._parameter_cls + + clean_signature, self_parameter, last_positional_only = \ + _signature_strip_non_python_syntax(s) + + program = "def foo" + clean_signature + ": pass" + + try: + module = ast.parse(program) + except SyntaxError: + module = None + + if not isinstance(module, ast.Module): + raise ValueError("{!r} builtin has invalid signature".format(obj)) + + f = module.body[0] + + parameters = [] + empty = Parameter.empty + + module = None + module_dict = {} + module_name = getattr(obj, '__module__', None) + if module_name: + module = sys.modules.get(module_name, None) + if module: + module_dict = module.__dict__ + sys_module_dict = sys.modules.copy() + + def parse_name(node): + assert isinstance(node, ast.arg) + if node.annotation is not None: + raise ValueError("Annotations are not currently supported") + return node.arg + + def wrap_value(s): + try: + value = eval(s, module_dict) + except NameError: + try: + value = eval(s, sys_module_dict) + except NameError: + raise ValueError + + if isinstance(value, (str, int, float, bytes, bool, type(None))): + return ast.Constant(value) + raise ValueError + + class RewriteSymbolics(ast.NodeTransformer): + def visit_Attribute(self, node): + a = [] + n = node + while isinstance(n, ast.Attribute): + a.append(n.attr) + n = n.value + if not isinstance(n, ast.Name): + raise ValueError + a.append(n.id) + value = ".".join(reversed(a)) + return wrap_value(value) + + def visit_Name(self, node): + if not isinstance(node.ctx, ast.Load): + raise ValueError() + return wrap_value(node.id) + + def visit_BinOp(self, node): + # Support constant folding of a couple simple binary operations + # commonly used to define default values in text signatures + left = self.visit(node.left) + right = self.visit(node.right) + if not isinstance(left, ast.Constant) or not isinstance(right, ast.Constant): + raise ValueError + if isinstance(node.op, ast.Add): + return ast.Constant(left.value + right.value) + elif isinstance(node.op, ast.Sub): + return ast.Constant(left.value - right.value) + elif isinstance(node.op, ast.BitOr): + return ast.Constant(left.value | right.value) + raise ValueError + + def p(name_node, default_node, default=empty): + name = parse_name(name_node) + if default_node and default_node is not _empty: + try: + default_node = RewriteSymbolics().visit(default_node) + default = ast.literal_eval(default_node) + except ValueError: + raise ValueError("{!r} builtin has invalid signature".format(obj)) from None + parameters.append(Parameter(name, kind, default=default, annotation=empty)) + + # non-keyword-only parameters + args = reversed(f.args.args) + defaults = reversed(f.args.defaults) + iter = itertools.zip_longest(args, defaults, fillvalue=None) + if last_positional_only is not None: + kind = Parameter.POSITIONAL_ONLY + else: + kind = Parameter.POSITIONAL_OR_KEYWORD + for i, (name, default) in enumerate(reversed(list(iter))): + p(name, default) + if i == last_positional_only: + kind = Parameter.POSITIONAL_OR_KEYWORD + + # *args + if f.args.vararg: + kind = Parameter.VAR_POSITIONAL + p(f.args.vararg, empty) + + # keyword-only arguments + kind = Parameter.KEYWORD_ONLY + for name, default in zip(f.args.kwonlyargs, f.args.kw_defaults): + p(name, default) + + # **kwargs + if f.args.kwarg: + kind = Parameter.VAR_KEYWORD + p(f.args.kwarg, empty) + + if self_parameter is not None: + # Possibly strip the bound argument: + # - We *always* strip first bound argument if + # it is a module. + # - We don't strip first bound argument if + # skip_bound_arg is False. + assert parameters + _self = getattr(obj, '__self__', None) + self_isbound = _self is not None + self_ismodule = ismodule(_self) + if self_isbound and (self_ismodule or skip_bound_arg): + parameters.pop(0) + else: + # for builtins, self parameter is always positional-only! + p = parameters[0].replace(kind=Parameter.POSITIONAL_ONLY) + parameters[0] = p + + return cls(parameters, return_annotation=cls.empty) + + +def _signature_from_builtin(cls, func, skip_bound_arg=True): + """Private helper function to get signature for + builtin callables. + """ + + if not _signature_is_builtin(func): + raise TypeError("{!r} is not a Python builtin " + "function".format(func)) + + s = getattr(func, "__text_signature__", None) + if not s: + raise ValueError("no signature found for builtin {!r}".format(func)) + + return _signature_fromstr(cls, func, s, skip_bound_arg) + + +def _signature_from_function(cls, func, skip_bound_arg=True, + globals=None, locals=None, eval_str=False): + """Private helper: constructs Signature for the given python function.""" + + is_duck_function = False + if not isfunction(func): + if _signature_is_functionlike(func): + is_duck_function = True + else: + # If it's not a pure Python function, and not a duck type + # of pure function: + raise TypeError('{!r} is not a Python function'.format(func)) + + s = getattr(func, "__text_signature__", None) + if s: + return _signature_fromstr(cls, func, s, skip_bound_arg) + + Parameter = cls._parameter_cls + + # Parameter information. + func_code = func.__code__ + pos_count = func_code.co_argcount + arg_names = func_code.co_varnames + posonly_count = func_code.co_posonlyargcount + positional = arg_names[:pos_count] + keyword_only_count = func_code.co_kwonlyargcount + keyword_only = arg_names[pos_count:pos_count + keyword_only_count] + annotations = get_annotations(func, globals=globals, locals=locals, eval_str=eval_str) + defaults = func.__defaults__ + kwdefaults = func.__kwdefaults__ + + if defaults: + pos_default_count = len(defaults) + else: + pos_default_count = 0 + + parameters = [] + + non_default_count = pos_count - pos_default_count + posonly_left = posonly_count + + # Non-keyword-only parameters w/o defaults. + for name in positional[:non_default_count]: + kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=kind)) + if posonly_left: + posonly_left -= 1 + + # ... w/ defaults. + for offset, name in enumerate(positional[non_default_count:]): + kind = _POSITIONAL_ONLY if posonly_left else _POSITIONAL_OR_KEYWORD + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=kind, + default=defaults[offset])) + if posonly_left: + posonly_left -= 1 + + # *args + if func_code.co_flags & CO_VARARGS: + name = arg_names[pos_count + keyword_only_count] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_POSITIONAL)) + + # Keyword-only parameters. + for name in keyword_only: + default = _empty + if kwdefaults is not None: + default = kwdefaults.get(name, _empty) + + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_KEYWORD_ONLY, + default=default)) + # **kwargs + if func_code.co_flags & CO_VARKEYWORDS: + index = pos_count + keyword_only_count + if func_code.co_flags & CO_VARARGS: + index += 1 + + name = arg_names[index] + annotation = annotations.get(name, _empty) + parameters.append(Parameter(name, annotation=annotation, + kind=_VAR_KEYWORD)) + + # Is 'func' is a pure Python function - don't validate the + # parameters list (for correct order and defaults), it should be OK. + return cls(parameters, + return_annotation=annotations.get('return', _empty), + __validate_parameters__=is_duck_function) + + +def _signature_from_callable(obj, *, + follow_wrapper_chains=True, + skip_bound_arg=True, + globals=None, + locals=None, + eval_str=False, + sigcls): + + """Private helper function to get signature for arbitrary + callable objects. + """ + + _get_signature_of = functools.partial(_signature_from_callable, + follow_wrapper_chains=follow_wrapper_chains, + skip_bound_arg=skip_bound_arg, + globals=globals, + locals=locals, + sigcls=sigcls, + eval_str=eval_str) + + if not callable(obj): + raise TypeError('{!r} is not a callable object'.format(obj)) + + if isinstance(obj, types.MethodType): + # In this case we skip the first parameter of the underlying + # function (usually `self` or `cls`). + sig = _get_signature_of(obj.__func__) + + if skip_bound_arg: + return _signature_bound_method(sig) + else: + return sig + + # Was this function wrapped by a decorator? + if follow_wrapper_chains: + # Unwrap until we find an explicit signature or a MethodType (which will be + # handled explicitly below). + obj = unwrap(obj, stop=(lambda f: hasattr(f, "__signature__") + or isinstance(f, types.MethodType))) + if isinstance(obj, types.MethodType): + # If the unwrapped object is a *method*, we might want to + # skip its first parameter (self). + # See test_signature_wrapped_bound_method for details. + return _get_signature_of(obj) + + try: + sig = obj.__signature__ + except AttributeError: + pass + else: + if sig is not None: + if not isinstance(sig, Signature): + raise TypeError( + 'unexpected object {!r} in __signature__ ' + 'attribute'.format(sig)) + return sig + + try: + partialmethod = obj._partialmethod + except AttributeError: + pass + else: + if isinstance(partialmethod, functools.partialmethod): + # Unbound partialmethod (see functools.partialmethod) + # This means, that we need to calculate the signature + # as if it's a regular partial object, but taking into + # account that the first positional argument + # (usually `self`, or `cls`) will not be passed + # automatically (as for boundmethods) + + wrapped_sig = _get_signature_of(partialmethod.func) + + sig = _signature_get_partial(wrapped_sig, partialmethod, (None,)) + first_wrapped_param = tuple(wrapped_sig.parameters.values())[0] + if first_wrapped_param.kind is Parameter.VAR_POSITIONAL: + # First argument of the wrapped callable is `*args`, as in + # `partialmethod(lambda *args)`. + return sig + else: + sig_params = tuple(sig.parameters.values()) + assert (not sig_params or + first_wrapped_param is not sig_params[0]) + new_params = (first_wrapped_param,) + sig_params + return sig.replace(parameters=new_params) + + if isfunction(obj) or _signature_is_functionlike(obj): + # If it's a pure Python function, or an object that is duck type + # of a Python function (Cython functions, for instance), then: + return _signature_from_function(sigcls, obj, + skip_bound_arg=skip_bound_arg, + globals=globals, locals=locals, eval_str=eval_str) + + if _signature_is_builtin(obj): + return _signature_from_builtin(sigcls, obj, + skip_bound_arg=skip_bound_arg) + + if isinstance(obj, functools.partial): + wrapped_sig = _get_signature_of(obj.func) + return _signature_get_partial(wrapped_sig, obj) + + sig = None + if isinstance(obj, type): + # obj is a class or a metaclass + + # First, let's see if it has an overloaded __call__ defined + # in its metaclass + call = _signature_get_user_defined_method(type(obj), '__call__') + if call is not None: + sig = _get_signature_of(call) + else: + factory_method = None + new = _signature_get_user_defined_method(obj, '__new__') + init = _signature_get_user_defined_method(obj, '__init__') + # Now we check if the 'obj' class has an own '__new__' method + if '__new__' in obj.__dict__: + factory_method = new + # or an own '__init__' method + elif '__init__' in obj.__dict__: + factory_method = init + # If not, we take inherited '__new__' or '__init__', if present + elif new is not None: + factory_method = new + elif init is not None: + factory_method = init + + if factory_method is not None: + sig = _get_signature_of(factory_method) + + if sig is None: + # At this point we know, that `obj` is a class, with no user- + # defined '__init__', '__new__', or class-level '__call__' + + for base in obj.__mro__[:-1]: + # Since '__text_signature__' is implemented as a + # descriptor that extracts text signature from the + # class docstring, if 'obj' is derived from a builtin + # class, its own '__text_signature__' may be 'None'. + # Therefore, we go through the MRO (except the last + # class in there, which is 'object') to find the first + # class with non-empty text signature. + try: + text_sig = base.__text_signature__ + except AttributeError: + pass + else: + if text_sig: + # If 'base' class has a __text_signature__ attribute: + # return a signature based on it + return _signature_fromstr(sigcls, base, text_sig) + + # No '__text_signature__' was found for the 'obj' class. + # Last option is to check if its '__init__' is + # object.__init__ or type.__init__. + if type not in obj.__mro__: + # We have a class (not metaclass), but no user-defined + # __init__ or __new__ for it + if (obj.__init__ is object.__init__ and + obj.__new__ is object.__new__): + # Return a signature of 'object' builtin. + return sigcls.from_callable(object) + else: + raise ValueError( + 'no signature found for builtin type {!r}'.format(obj)) + + elif not isinstance(obj, _NonUserDefinedCallables): + # An object with __call__ + # We also check that the 'obj' is not an instance of + # _WrapperDescriptor or _MethodWrapper to avoid + # infinite recursion (and even potential segfault) + call = _signature_get_user_defined_method(type(obj), '__call__') + if call is not None: + try: + sig = _get_signature_of(call) + except ValueError as ex: + msg = 'no signature found for {!r}'.format(obj) + raise ValueError(msg) from ex + + if sig is not None: + # For classes and objects we skip the first parameter of their + # __call__, __new__, or __init__ methods + if skip_bound_arg: + return _signature_bound_method(sig) + else: + return sig + + if isinstance(obj, types.BuiltinFunctionType): + # Raise a nicer error message for builtins + msg = 'no signature found for builtin function {!r}'.format(obj) + raise ValueError(msg) + + raise ValueError('callable {!r} is not supported by signature'.format(obj)) + + +class _void: + """A private marker - used in Parameter & Signature.""" + + +class _empty: + """Marker object for Signature.empty and Parameter.empty.""" + + +class _ParameterKind(enum.IntEnum): + POSITIONAL_ONLY = 0 + POSITIONAL_OR_KEYWORD = 1 + VAR_POSITIONAL = 2 + KEYWORD_ONLY = 3 + VAR_KEYWORD = 4 + + def __str__(self): + return self._name_ + + @property + def description(self): + return _PARAM_NAME_MAPPING[self] + +_POSITIONAL_ONLY = _ParameterKind.POSITIONAL_ONLY +_POSITIONAL_OR_KEYWORD = _ParameterKind.POSITIONAL_OR_KEYWORD +_VAR_POSITIONAL = _ParameterKind.VAR_POSITIONAL +_KEYWORD_ONLY = _ParameterKind.KEYWORD_ONLY +_VAR_KEYWORD = _ParameterKind.VAR_KEYWORD + +_PARAM_NAME_MAPPING = { + _POSITIONAL_ONLY: 'positional-only', + _POSITIONAL_OR_KEYWORD: 'positional or keyword', + _VAR_POSITIONAL: 'variadic positional', + _KEYWORD_ONLY: 'keyword-only', + _VAR_KEYWORD: 'variadic keyword' +} + + +class Parameter: + """Represents a parameter in a function signature. + + Has the following public attributes: + + * name : str + The name of the parameter as a string. + * default : object + The default value for the parameter if specified. If the + parameter has no default value, this attribute is set to + `Parameter.empty`. + * annotation + The annotation for the parameter if specified. If the + parameter has no annotation, this attribute is set to + `Parameter.empty`. + * kind : str + Describes how argument values are bound to the parameter. + Possible values: `Parameter.POSITIONAL_ONLY`, + `Parameter.POSITIONAL_OR_KEYWORD`, `Parameter.VAR_POSITIONAL`, + `Parameter.KEYWORD_ONLY`, `Parameter.VAR_KEYWORD`. + """ + + __slots__ = ('_name', '_kind', '_default', '_annotation') + + POSITIONAL_ONLY = _POSITIONAL_ONLY + POSITIONAL_OR_KEYWORD = _POSITIONAL_OR_KEYWORD + VAR_POSITIONAL = _VAR_POSITIONAL + KEYWORD_ONLY = _KEYWORD_ONLY + VAR_KEYWORD = _VAR_KEYWORD + + empty = _empty + + def __init__(self, name, kind, *, default=_empty, annotation=_empty): + try: + self._kind = _ParameterKind(kind) + except ValueError: + raise ValueError(f'value {kind!r} is not a valid Parameter.kind') + if default is not _empty: + if self._kind in (_VAR_POSITIONAL, _VAR_KEYWORD): + msg = '{} parameters cannot have default values' + msg = msg.format(self._kind.description) + raise ValueError(msg) + self._default = default + self._annotation = annotation + + if name is _empty: + raise ValueError('name is a required attribute for Parameter') + + if not isinstance(name, str): + msg = 'name must be a str, not a {}'.format(type(name).__name__) + raise TypeError(msg) + + if name[0] == '.' and name[1:].isdigit(): + # These are implicit arguments generated by comprehensions. In + # order to provide a friendlier interface to users, we recast + # their name as "implicitN" and treat them as positional-only. + # See issue 19611. + if self._kind != _POSITIONAL_OR_KEYWORD: + msg = ( + 'implicit arguments must be passed as ' + 'positional or keyword arguments, not {}' + ) + msg = msg.format(self._kind.description) + raise ValueError(msg) + self._kind = _POSITIONAL_ONLY + name = 'implicit{}'.format(name[1:]) + + if not name.isidentifier(): + raise ValueError('{!r} is not a valid parameter name'.format(name)) + + self._name = name + + def __reduce__(self): + return (type(self), + (self._name, self._kind), + {'_default': self._default, + '_annotation': self._annotation}) + + def __setstate__(self, state): + self._default = state['_default'] + self._annotation = state['_annotation'] + + @property + def name(self): + return self._name + + @property + def default(self): + return self._default + + @property + def annotation(self): + return self._annotation + + @property + def kind(self): + return self._kind + + def replace(self, *, name=_void, kind=_void, + annotation=_void, default=_void): + """Creates a customized copy of the Parameter.""" + + if name is _void: + name = self._name + + if kind is _void: + kind = self._kind + + if annotation is _void: + annotation = self._annotation + + if default is _void: + default = self._default + + return type(self)(name, kind, default=default, annotation=annotation) + + def __str__(self): + kind = self.kind + formatted = self._name + + # Add annotation and default value + if self._annotation is not _empty: + formatted = '{}: {}'.format(formatted, + formatannotation(self._annotation)) + + if self._default is not _empty: + if self._annotation is not _empty: + formatted = '{} = {}'.format(formatted, repr(self._default)) + else: + formatted = '{}={}'.format(formatted, repr(self._default)) + + if kind == _VAR_POSITIONAL: + formatted = '*' + formatted + elif kind == _VAR_KEYWORD: + formatted = '**' + formatted + + return formatted + + def __repr__(self): + return '<{} "{}">'.format(self.__class__.__name__, self) + + def __hash__(self): + return hash((self.name, self.kind, self.annotation, self.default)) + + def __eq__(self, other): + if self is other: + return True + if not isinstance(other, Parameter): + return NotImplemented + return (self._name == other._name and + self._kind == other._kind and + self._default == other._default and + self._annotation == other._annotation) + + +class BoundArguments: + """Result of `Signature.bind` call. Holds the mapping of arguments + to the function's parameters. + + Has the following public attributes: + + * arguments : dict + An ordered mutable mapping of parameters' names to arguments' values. + Does not contain arguments' default values. + * signature : Signature + The Signature object that created this instance. + * args : tuple + Tuple of positional arguments values. + * kwargs : dict + Dict of keyword arguments values. + """ + + __slots__ = ('arguments', '_signature', '__weakref__') + + def __init__(self, signature, arguments): + self.arguments = arguments + self._signature = signature + + @property + def signature(self): + return self._signature + + @property + def args(self): + args = [] + for param_name, param in self._signature.parameters.items(): + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + break + + try: + arg = self.arguments[param_name] + except KeyError: + # We're done here. Other arguments + # will be mapped in 'BoundArguments.kwargs' + break + else: + if param.kind == _VAR_POSITIONAL: + # *args + args.extend(arg) + else: + # plain argument + args.append(arg) + + return tuple(args) + + @property + def kwargs(self): + kwargs = {} + kwargs_started = False + for param_name, param in self._signature.parameters.items(): + if not kwargs_started: + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + kwargs_started = True + else: + if param_name not in self.arguments: + kwargs_started = True + continue + + if not kwargs_started: + continue + + try: + arg = self.arguments[param_name] + except KeyError: + pass + else: + if param.kind == _VAR_KEYWORD: + # **kwargs + kwargs.update(arg) + else: + # plain keyword argument + kwargs[param_name] = arg + + return kwargs + + def apply_defaults(self): + """Set default values for missing arguments. + + For variable-positional arguments (*args) the default is an + empty tuple. + + For variable-keyword arguments (**kwargs) the default is an + empty dict. + """ + arguments = self.arguments + new_arguments = [] + for name, param in self._signature.parameters.items(): + try: + new_arguments.append((name, arguments[name])) + except KeyError: + if param.default is not _empty: + val = param.default + elif param.kind is _VAR_POSITIONAL: + val = () + elif param.kind is _VAR_KEYWORD: + val = {} + else: + # This BoundArguments was likely produced by + # Signature.bind_partial(). + continue + new_arguments.append((name, val)) + self.arguments = dict(new_arguments) + + def __eq__(self, other): + if self is other: + return True + if not isinstance(other, BoundArguments): + return NotImplemented + return (self.signature == other.signature and + self.arguments == other.arguments) + + def __setstate__(self, state): + self._signature = state['_signature'] + self.arguments = state['arguments'] + + def __getstate__(self): + return {'_signature': self._signature, 'arguments': self.arguments} + + def __repr__(self): + args = [] + for arg, value in self.arguments.items(): + args.append('{}={!r}'.format(arg, value)) + return '<{} ({})>'.format(self.__class__.__name__, ', '.join(args)) + + +class Signature: + """A Signature object represents the overall signature of a function. + It stores a Parameter object for each parameter accepted by the + function, as well as information specific to the function itself. + + A Signature object has the following public attributes and methods: + + * parameters : OrderedDict + An ordered mapping of parameters' names to the corresponding + Parameter objects (keyword-only arguments are in the same order + as listed in `code.co_varnames`). + * return_annotation : object + The annotation for the return type of the function if specified. + If the function has no annotation for its return type, this + attribute is set to `Signature.empty`. + * bind(*args, **kwargs) -> BoundArguments + Creates a mapping from positional and keyword arguments to + parameters. + * bind_partial(*args, **kwargs) -> BoundArguments + Creates a partial mapping from positional and keyword arguments + to parameters (simulating 'functools.partial' behavior.) + """ + + __slots__ = ('_return_annotation', '_parameters') + + _parameter_cls = Parameter + _bound_arguments_cls = BoundArguments + + empty = _empty + + def __init__(self, parameters=None, *, return_annotation=_empty, + __validate_parameters__=True): + """Constructs Signature from the given list of Parameter + objects and 'return_annotation'. All arguments are optional. + """ + + if parameters is None: + params = OrderedDict() + else: + if __validate_parameters__: + params = OrderedDict() + top_kind = _POSITIONAL_ONLY + kind_defaults = False + + for param in parameters: + kind = param.kind + name = param.name + + if kind < top_kind: + msg = ( + 'wrong parameter order: {} parameter before {} ' + 'parameter' + ) + msg = msg.format(top_kind.description, + kind.description) + raise ValueError(msg) + elif kind > top_kind: + kind_defaults = False + top_kind = kind + + if kind in (_POSITIONAL_ONLY, _POSITIONAL_OR_KEYWORD): + if param.default is _empty: + if kind_defaults: + # No default for this parameter, but the + # previous parameter of the same kind had + # a default + msg = 'non-default argument follows default ' \ + 'argument' + raise ValueError(msg) + else: + # There is a default for this parameter. + kind_defaults = True + + if name in params: + msg = 'duplicate parameter name: {!r}'.format(name) + raise ValueError(msg) + + params[name] = param + else: + params = OrderedDict((param.name, param) for param in parameters) + + self._parameters = types.MappingProxyType(params) + self._return_annotation = return_annotation + + @classmethod + def from_function(cls, func): + """Constructs Signature for the given python function. + + Deprecated since Python 3.5, use `Signature.from_callable()`. + """ + + warnings.warn("inspect.Signature.from_function() is deprecated since " + "Python 3.5, use Signature.from_callable()", + DeprecationWarning, stacklevel=2) + return _signature_from_function(cls, func) + + @classmethod + def from_builtin(cls, func): + """Constructs Signature for the given builtin function. + + Deprecated since Python 3.5, use `Signature.from_callable()`. + """ + + warnings.warn("inspect.Signature.from_builtin() is deprecated since " + "Python 3.5, use Signature.from_callable()", + DeprecationWarning, stacklevel=2) + return _signature_from_builtin(cls, func) + + @classmethod + def from_callable(cls, obj, *, + follow_wrapped=True, globals=None, locals=None, eval_str=False): + """Constructs Signature for the given callable object.""" + return _signature_from_callable(obj, sigcls=cls, + follow_wrapper_chains=follow_wrapped, + globals=globals, locals=locals, eval_str=eval_str) + + @property + def parameters(self): + return self._parameters + + @property + def return_annotation(self): + return self._return_annotation + + def replace(self, *, parameters=_void, return_annotation=_void): + """Creates a customized copy of the Signature. + Pass 'parameters' and/or 'return_annotation' arguments + to override them in the new copy. + """ + + if parameters is _void: + parameters = self.parameters.values() + + if return_annotation is _void: + return_annotation = self._return_annotation + + return type(self)(parameters, + return_annotation=return_annotation) + + def _hash_basis(self): + params = tuple(param for param in self.parameters.values() + if param.kind != _KEYWORD_ONLY) + + kwo_params = {param.name: param for param in self.parameters.values() + if param.kind == _KEYWORD_ONLY} + + return params, kwo_params, self.return_annotation + + def __hash__(self): + params, kwo_params, return_annotation = self._hash_basis() + kwo_params = frozenset(kwo_params.values()) + return hash((params, kwo_params, return_annotation)) + + def __eq__(self, other): + if self is other: + return True + if not isinstance(other, Signature): + return NotImplemented + return self._hash_basis() == other._hash_basis() + + def _bind(self, args, kwargs, *, partial=False): + """Private method. Don't use directly.""" + + arguments = {} + + parameters = iter(self.parameters.values()) + parameters_ex = () + arg_vals = iter(args) + + while True: + # Let's iterate through the positional arguments and corresponding + # parameters + try: + arg_val = next(arg_vals) + except StopIteration: + # No more positional arguments + try: + param = next(parameters) + except StopIteration: + # No more parameters. That's it. Just need to check that + # we have no `kwargs` after this while loop + break + else: + if param.kind == _VAR_POSITIONAL: + # That's OK, just empty *args. Let's start parsing + # kwargs + break + elif param.name in kwargs: + if param.kind == _POSITIONAL_ONLY: + msg = '{arg!r} parameter is positional only, ' \ + 'but was passed as a keyword' + msg = msg.format(arg=param.name) + raise TypeError(msg) from None + parameters_ex = (param,) + break + elif (param.kind == _VAR_KEYWORD or + param.default is not _empty): + # That's fine too - we have a default value for this + # parameter. So, lets start parsing `kwargs`, starting + # with the current parameter + parameters_ex = (param,) + break + else: + # No default, not VAR_KEYWORD, not VAR_POSITIONAL, + # not in `kwargs` + if partial: + parameters_ex = (param,) + break + else: + msg = 'missing a required argument: {arg!r}' + msg = msg.format(arg=param.name) + raise TypeError(msg) from None + else: + # We have a positional argument to process + try: + param = next(parameters) + except StopIteration: + raise TypeError('too many positional arguments') from None + else: + if param.kind in (_VAR_KEYWORD, _KEYWORD_ONLY): + # Looks like we have no parameter for this positional + # argument + raise TypeError( + 'too many positional arguments') from None + + if param.kind == _VAR_POSITIONAL: + # We have an '*args'-like argument, let's fill it with + # all positional arguments we have left and move on to + # the next phase + values = [arg_val] + values.extend(arg_vals) + arguments[param.name] = tuple(values) + break + + if param.name in kwargs and param.kind != _POSITIONAL_ONLY: + raise TypeError( + 'multiple values for argument {arg!r}'.format( + arg=param.name)) from None + + arguments[param.name] = arg_val + + # Now, we iterate through the remaining parameters to process + # keyword arguments + kwargs_param = None + for param in itertools.chain(parameters_ex, parameters): + if param.kind == _VAR_KEYWORD: + # Memorize that we have a '**kwargs'-like parameter + kwargs_param = param + continue + + if param.kind == _VAR_POSITIONAL: + # Named arguments don't refer to '*args'-like parameters. + # We only arrive here if the positional arguments ended + # before reaching the last parameter before *args. + continue + + param_name = param.name + try: + arg_val = kwargs.pop(param_name) + except KeyError: + # We have no value for this parameter. It's fine though, + # if it has a default value, or it is an '*args'-like + # parameter, left alone by the processing of positional + # arguments. + if (not partial and param.kind != _VAR_POSITIONAL and + param.default is _empty): + raise TypeError('missing a required argument: {arg!r}'. \ + format(arg=param_name)) from None + + else: + if param.kind == _POSITIONAL_ONLY: + # This should never happen in case of a properly built + # Signature object (but let's have this check here + # to ensure correct behaviour just in case) + raise TypeError('{arg!r} parameter is positional only, ' + 'but was passed as a keyword'. \ + format(arg=param.name)) + + arguments[param_name] = arg_val + + if kwargs: + if kwargs_param is not None: + # Process our '**kwargs'-like parameter + arguments[kwargs_param.name] = kwargs + else: + raise TypeError( + 'got an unexpected keyword argument {arg!r}'.format( + arg=next(iter(kwargs)))) + + return self._bound_arguments_cls(self, arguments) + + def bind(self, /, *args, **kwargs): + """Get a BoundArguments object, that maps the passed `args` + and `kwargs` to the function's signature. Raises `TypeError` + if the passed arguments can not be bound. + """ + return self._bind(args, kwargs) + + def bind_partial(self, /, *args, **kwargs): + """Get a BoundArguments object, that partially maps the + passed `args` and `kwargs` to the function's signature. + Raises `TypeError` if the passed arguments can not be bound. + """ + return self._bind(args, kwargs, partial=True) + + def __reduce__(self): + return (type(self), + (tuple(self._parameters.values()),), + {'_return_annotation': self._return_annotation}) + + def __setstate__(self, state): + self._return_annotation = state['_return_annotation'] + + def __repr__(self): + return '<{} {}>'.format(self.__class__.__name__, self) + + def __str__(self): + result = [] + render_pos_only_separator = False + render_kw_only_separator = True + for param in self.parameters.values(): + formatted = str(param) + + kind = param.kind + + if kind == _POSITIONAL_ONLY: + render_pos_only_separator = True + elif render_pos_only_separator: + # It's not a positional-only parameter, and the flag + # is set to 'True' (there were pos-only params before.) + result.append('/') + render_pos_only_separator = False + + if kind == _VAR_POSITIONAL: + # OK, we have an '*args'-like parameter, so we won't need + # a '*' to separate keyword-only arguments + render_kw_only_separator = False + elif kind == _KEYWORD_ONLY and render_kw_only_separator: + # We have a keyword-only parameter to render and we haven't + # rendered an '*args'-like parameter before, so add a '*' + # separator to the parameters list ("foo(arg1, *, arg2)" case) + result.append('*') + # This condition should be only triggered once, so + # reset the flag + render_kw_only_separator = False + + result.append(formatted) + + if render_pos_only_separator: + # There were only positional-only parameters, hence the + # flag was not reset to 'False' + result.append('/') + + rendered = '({})'.format(', '.join(result)) + + if self.return_annotation is not _empty: + anno = formatannotation(self.return_annotation) + rendered += ' -> {}'.format(anno) + + return rendered + + +def signature(obj, *, follow_wrapped=True, globals=None, locals=None, eval_str=False): + """Get a signature object for the passed callable.""" + return Signature.from_callable(obj, follow_wrapped=follow_wrapped, + globals=globals, locals=locals, eval_str=eval_str) + + +def _main(): + """ Logic for inspecting an object given at command line """ + import argparse + import importlib + + parser = argparse.ArgumentParser() + parser.add_argument( + 'object', + help="The object to be analysed. " + "It supports the 'module:qualname' syntax") + parser.add_argument( + '-d', '--details', action='store_true', + help='Display info about the module rather than its source code') + + args = parser.parse_args() + + target = args.object + mod_name, has_attrs, attrs = target.partition(":") + try: + obj = module = importlib.import_module(mod_name) + except Exception as exc: + msg = "Failed to import {} ({}: {})".format(mod_name, + type(exc).__name__, + exc) + print(msg, file=sys.stderr) + sys.exit(2) + + if has_attrs: + parts = attrs.split(".") + obj = module + for part in parts: + obj = getattr(obj, part) + + if module.__name__ in sys.builtin_module_names: + print("Can't get info for builtin modules.", file=sys.stderr) + sys.exit(1) + + if args.details: + print('Target: {}'.format(target)) + print('Origin: {}'.format(getsourcefile(module))) + print('Cached: {}'.format(module.__cached__)) + if obj is module: + print('Loader: {}'.format(repr(module.__loader__))) + if hasattr(module, '__path__'): + print('Submodule search path: {}'.format(module.__path__)) + else: + try: + __, lineno = findsource(obj) + except Exception: + pass + else: + print('Line: {}'.format(lineno)) + + print('\n') + else: + print(getsource(obj)) + + +if __name__ == "__main__": + _main() diff --git a/llava/lib/python3.10/lzma.py b/llava/lib/python3.10/lzma.py new file mode 100644 index 0000000000000000000000000000000000000000..800f52198fbb794077fe43425df83db44e13960d --- /dev/null +++ b/llava/lib/python3.10/lzma.py @@ -0,0 +1,356 @@ +"""Interface to the liblzma compression library. + +This module provides a class for reading and writing compressed files, +classes for incremental (de)compression, and convenience functions for +one-shot (de)compression. + +These classes and functions support both the XZ and legacy LZMA +container formats, as well as raw compressed data streams. +""" + +__all__ = [ + "CHECK_NONE", "CHECK_CRC32", "CHECK_CRC64", "CHECK_SHA256", + "CHECK_ID_MAX", "CHECK_UNKNOWN", + "FILTER_LZMA1", "FILTER_LZMA2", "FILTER_DELTA", "FILTER_X86", "FILTER_IA64", + "FILTER_ARM", "FILTER_ARMTHUMB", "FILTER_POWERPC", "FILTER_SPARC", + "FORMAT_AUTO", "FORMAT_XZ", "FORMAT_ALONE", "FORMAT_RAW", + "MF_HC3", "MF_HC4", "MF_BT2", "MF_BT3", "MF_BT4", + "MODE_FAST", "MODE_NORMAL", "PRESET_DEFAULT", "PRESET_EXTREME", + + "LZMACompressor", "LZMADecompressor", "LZMAFile", "LZMAError", + "open", "compress", "decompress", "is_check_supported", +] + +import builtins +import io +import os +from _lzma import * +from _lzma import _encode_filter_properties, _decode_filter_properties +import _compression + + +_MODE_CLOSED = 0 +_MODE_READ = 1 +# Value 2 no longer used +_MODE_WRITE = 3 + + +class LZMAFile(_compression.BaseStream): + + """A file object providing transparent LZMA (de)compression. + + An LZMAFile can act as a wrapper for an existing file object, or + refer directly to a named file on disk. + + Note that LZMAFile provides a *binary* file interface - data read + is returned as bytes, and data to be written must be given as bytes. + """ + + def __init__(self, filename=None, mode="r", *, + format=None, check=-1, preset=None, filters=None): + """Open an LZMA-compressed file in binary mode. + + filename can be either an actual file name (given as a str, + bytes, or PathLike object), in which case the named file is + opened, or it can be an existing file object to read from or + write to. + + mode can be "r" for reading (default), "w" for (over)writing, + "x" for creating exclusively, or "a" for appending. These can + equivalently be given as "rb", "wb", "xb" and "ab" respectively. + + format specifies the container format to use for the file. + If mode is "r", this defaults to FORMAT_AUTO. Otherwise, the + default is FORMAT_XZ. + + check specifies the integrity check to use. This argument can + only be used when opening a file for writing. For FORMAT_XZ, + the default is CHECK_CRC64. FORMAT_ALONE and FORMAT_RAW do not + support integrity checks - for these formats, check must be + omitted, or be CHECK_NONE. + + When opening a file for reading, the *preset* argument is not + meaningful, and should be omitted. The *filters* argument should + also be omitted, except when format is FORMAT_RAW (in which case + it is required). + + When opening a file for writing, the settings used by the + compressor can be specified either as a preset compression + level (with the *preset* argument), or in detail as a custom + filter chain (with the *filters* argument). For FORMAT_XZ and + FORMAT_ALONE, the default is to use the PRESET_DEFAULT preset + level. For FORMAT_RAW, the caller must always specify a filter + chain; the raw compressor does not support preset compression + levels. + + preset (if provided) should be an integer in the range 0-9, + optionally OR-ed with the constant PRESET_EXTREME. + + filters (if provided) should be a sequence of dicts. Each dict + should have an entry for "id" indicating ID of the filter, plus + additional entries for options to the filter. + """ + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + + if mode in ("r", "rb"): + if check != -1: + raise ValueError("Cannot specify an integrity check " + "when opening a file for reading") + if preset is not None: + raise ValueError("Cannot specify a preset compression " + "level when opening a file for reading") + if format is None: + format = FORMAT_AUTO + mode_code = _MODE_READ + elif mode in ("w", "wb", "a", "ab", "x", "xb"): + if format is None: + format = FORMAT_XZ + mode_code = _MODE_WRITE + self._compressor = LZMACompressor(format=format, check=check, + preset=preset, filters=filters) + self._pos = 0 + else: + raise ValueError("Invalid mode: {!r}".format(mode)) + + if isinstance(filename, (str, bytes, os.PathLike)): + if "b" not in mode: + mode += "b" + self._fp = builtins.open(filename, mode) + self._closefp = True + self._mode = mode_code + elif hasattr(filename, "read") or hasattr(filename, "write"): + self._fp = filename + self._mode = mode_code + else: + raise TypeError("filename must be a str, bytes, file or PathLike object") + + if self._mode == _MODE_READ: + raw = _compression.DecompressReader(self._fp, LZMADecompressor, + trailing_error=LZMAError, format=format, filters=filters) + self._buffer = io.BufferedReader(raw) + + def close(self): + """Flush and close the file. + + May be called more than once without error. Once the file is + closed, any other operation on it will raise a ValueError. + """ + if self._mode == _MODE_CLOSED: + return + try: + if self._mode == _MODE_READ: + self._buffer.close() + self._buffer = None + elif self._mode == _MODE_WRITE: + self._fp.write(self._compressor.flush()) + self._compressor = None + finally: + try: + if self._closefp: + self._fp.close() + finally: + self._fp = None + self._closefp = False + self._mode = _MODE_CLOSED + + @property + def closed(self): + """True if this file is closed.""" + return self._mode == _MODE_CLOSED + + def fileno(self): + """Return the file descriptor for the underlying file.""" + self._check_not_closed() + return self._fp.fileno() + + def seekable(self): + """Return whether the file supports seeking.""" + return self.readable() and self._buffer.seekable() + + def readable(self): + """Return whether the file was opened for reading.""" + self._check_not_closed() + return self._mode == _MODE_READ + + def writable(self): + """Return whether the file was opened for writing.""" + self._check_not_closed() + return self._mode == _MODE_WRITE + + def peek(self, size=-1): + """Return buffered data without advancing the file position. + + Always returns at least one byte of data, unless at EOF. + The exact number of bytes returned is unspecified. + """ + self._check_can_read() + # Relies on the undocumented fact that BufferedReader.peek() always + # returns at least one byte (except at EOF) + return self._buffer.peek(size) + + def read(self, size=-1): + """Read up to size uncompressed bytes from the file. + + If size is negative or omitted, read until EOF is reached. + Returns b"" if the file is already at EOF. + """ + self._check_can_read() + return self._buffer.read(size) + + def read1(self, size=-1): + """Read up to size uncompressed bytes, while trying to avoid + making multiple reads from the underlying stream. Reads up to a + buffer's worth of data if size is negative. + + Returns b"" if the file is at EOF. + """ + self._check_can_read() + if size < 0: + size = io.DEFAULT_BUFFER_SIZE + return self._buffer.read1(size) + + def readline(self, size=-1): + """Read a line of uncompressed bytes from the file. + + The terminating newline (if present) is retained. If size is + non-negative, no more than size bytes will be read (in which + case the line may be incomplete). Returns b'' if already at EOF. + """ + self._check_can_read() + return self._buffer.readline(size) + + def write(self, data): + """Write a bytes object to the file. + + Returns the number of uncompressed bytes written, which is + always the length of data in bytes. Note that due to buffering, + the file on disk may not reflect the data written until close() + is called. + """ + self._check_can_write() + if isinstance(data, (bytes, bytearray)): + length = len(data) + else: + # accept any data that supports the buffer protocol + data = memoryview(data) + length = data.nbytes + + compressed = self._compressor.compress(data) + self._fp.write(compressed) + self._pos += length + return length + + def seek(self, offset, whence=io.SEEK_SET): + """Change the file position. + + The new position is specified by offset, relative to the + position indicated by whence. Possible values for whence are: + + 0: start of stream (default): offset must not be negative + 1: current stream position + 2: end of stream; offset must not be positive + + Returns the new file position. + + Note that seeking is emulated, so depending on the parameters, + this operation may be extremely slow. + """ + self._check_can_seek() + return self._buffer.seek(offset, whence) + + def tell(self): + """Return the current file position.""" + self._check_not_closed() + if self._mode == _MODE_READ: + return self._buffer.tell() + return self._pos + + +def open(filename, mode="rb", *, + format=None, check=-1, preset=None, filters=None, + encoding=None, errors=None, newline=None): + """Open an LZMA-compressed file in binary or text mode. + + filename can be either an actual file name (given as a str, bytes, + or PathLike object), in which case the named file is opened, or it + can be an existing file object to read from or write to. + + The mode argument can be "r", "rb" (default), "w", "wb", "x", "xb", + "a", or "ab" for binary mode, or "rt", "wt", "xt", or "at" for text + mode. + + The format, check, preset and filters arguments specify the + compression settings, as for LZMACompressor, LZMADecompressor and + LZMAFile. + + For binary mode, this function is equivalent to the LZMAFile + constructor: LZMAFile(filename, mode, ...). In this case, the + encoding, errors and newline arguments must not be provided. + + For text mode, an LZMAFile object is created, and wrapped in an + io.TextIOWrapper instance with the specified encoding, error + handling behavior, and line ending(s). + + """ + if "t" in mode: + if "b" in mode: + raise ValueError("Invalid mode: %r" % (mode,)) + else: + if encoding is not None: + raise ValueError("Argument 'encoding' not supported in binary mode") + if errors is not None: + raise ValueError("Argument 'errors' not supported in binary mode") + if newline is not None: + raise ValueError("Argument 'newline' not supported in binary mode") + + lz_mode = mode.replace("t", "") + binary_file = LZMAFile(filename, lz_mode, format=format, check=check, + preset=preset, filters=filters) + + if "t" in mode: + encoding = io.text_encoding(encoding) + return io.TextIOWrapper(binary_file, encoding, errors, newline) + else: + return binary_file + + +def compress(data, format=FORMAT_XZ, check=-1, preset=None, filters=None): + """Compress a block of data. + + Refer to LZMACompressor's docstring for a description of the + optional arguments *format*, *check*, *preset* and *filters*. + + For incremental compression, use an LZMACompressor instead. + """ + comp = LZMACompressor(format, check, preset, filters) + return comp.compress(data) + comp.flush() + + +def decompress(data, format=FORMAT_AUTO, memlimit=None, filters=None): + """Decompress a block of data. + + Refer to LZMADecompressor's docstring for a description of the + optional arguments *format*, *check* and *filters*. + + For incremental decompression, use an LZMADecompressor instead. + """ + results = [] + while True: + decomp = LZMADecompressor(format, memlimit, filters) + try: + res = decomp.decompress(data) + except LZMAError: + if results: + break # Leftover data is not a valid LZMA/XZ stream; ignore it. + else: + raise # Error on the first iteration; bail out. + results.append(res) + if not decomp.eof: + raise LZMAError("Compressed data ended before the " + "end-of-stream marker was reached") + data = decomp.unused_data + if not data: + break + return b"".join(results) diff --git a/llava/lib/python3.10/pprint.py b/llava/lib/python3.10/pprint.py new file mode 100644 index 0000000000000000000000000000000000000000..d91421f0a6bf60a5a79b0c46050941d58637f180 --- /dev/null +++ b/llava/lib/python3.10/pprint.py @@ -0,0 +1,670 @@ +# Author: Fred L. Drake, Jr. +# fdrake@acm.org +# +# This is a simple little module I wrote to make life easier. I didn't +# see anything quite like it in the library, though I may have overlooked +# something. I wrote this when I was trying to read some heavily nested +# tuples with fairly non-descriptive content. This is modeled very much +# after Lisp/Scheme - style pretty-printing of lists. If you find it +# useful, thank small children who sleep at night. + +"""Support to pretty-print lists, tuples, & dictionaries recursively. + +Very simple, but useful, especially in debugging data structures. + +Classes +------- + +PrettyPrinter() + Handle pretty-printing operations onto a stream using a configured + set of formatting parameters. + +Functions +--------- + +pformat() + Format a Python object into a pretty-printed representation. + +pprint() + Pretty-print a Python object to a stream [default is sys.stdout]. + +saferepr() + Generate a 'standard' repr()-like value, but protect against recursive + data structures. + +""" + +import collections as _collections +import dataclasses as _dataclasses +import re +import sys as _sys +import types as _types +from io import StringIO as _StringIO + +__all__ = ["pprint","pformat","isreadable","isrecursive","saferepr", + "PrettyPrinter", "pp"] + + +def pprint(object, stream=None, indent=1, width=80, depth=None, *, + compact=False, sort_dicts=True, underscore_numbers=False): + """Pretty-print a Python object to a stream [default is sys.stdout].""" + printer = PrettyPrinter( + stream=stream, indent=indent, width=width, depth=depth, + compact=compact, sort_dicts=sort_dicts, + underscore_numbers=underscore_numbers) + printer.pprint(object) + +def pformat(object, indent=1, width=80, depth=None, *, + compact=False, sort_dicts=True, underscore_numbers=False): + """Format a Python object into a pretty-printed representation.""" + return PrettyPrinter(indent=indent, width=width, depth=depth, + compact=compact, sort_dicts=sort_dicts, + underscore_numbers=underscore_numbers).pformat(object) + +def pp(object, *args, sort_dicts=False, **kwargs): + """Pretty-print a Python object""" + pprint(object, *args, sort_dicts=sort_dicts, **kwargs) + +def saferepr(object): + """Version of repr() which can handle recursive data structures.""" + return PrettyPrinter()._safe_repr(object, {}, None, 0)[0] + +def isreadable(object): + """Determine if saferepr(object) is readable by eval().""" + return PrettyPrinter()._safe_repr(object, {}, None, 0)[1] + +def isrecursive(object): + """Determine if object requires a recursive representation.""" + return PrettyPrinter()._safe_repr(object, {}, None, 0)[2] + +class _safe_key: + """Helper function for key functions when sorting unorderable objects. + + The wrapped-object will fallback to a Py2.x style comparison for + unorderable types (sorting first comparing the type name and then by + the obj ids). Does not work recursively, so dict.items() must have + _safe_key applied to both the key and the value. + + """ + + __slots__ = ['obj'] + + def __init__(self, obj): + self.obj = obj + + def __lt__(self, other): + try: + return self.obj < other.obj + except TypeError: + return ((str(type(self.obj)), id(self.obj)) < \ + (str(type(other.obj)), id(other.obj))) + +def _safe_tuple(t): + "Helper function for comparing 2-tuples" + return _safe_key(t[0]), _safe_key(t[1]) + +class PrettyPrinter: + def __init__(self, indent=1, width=80, depth=None, stream=None, *, + compact=False, sort_dicts=True, underscore_numbers=False): + """Handle pretty printing operations onto a stream using a set of + configured parameters. + + indent + Number of spaces to indent for each level of nesting. + + width + Attempted maximum number of columns in the output. + + depth + The maximum depth to print out nested structures. + + stream + The desired output stream. If omitted (or false), the standard + output stream available at construction will be used. + + compact + If true, several items will be combined in one line. + + sort_dicts + If true, dict keys are sorted. + + """ + indent = int(indent) + width = int(width) + if indent < 0: + raise ValueError('indent must be >= 0') + if depth is not None and depth <= 0: + raise ValueError('depth must be > 0') + if not width: + raise ValueError('width must be != 0') + self._depth = depth + self._indent_per_level = indent + self._width = width + if stream is not None: + self._stream = stream + else: + self._stream = _sys.stdout + self._compact = bool(compact) + self._sort_dicts = sort_dicts + self._underscore_numbers = underscore_numbers + + def pprint(self, object): + self._format(object, self._stream, 0, 0, {}, 0) + self._stream.write("\n") + + def pformat(self, object): + sio = _StringIO() + self._format(object, sio, 0, 0, {}, 0) + return sio.getvalue() + + def isrecursive(self, object): + return self.format(object, {}, 0, 0)[2] + + def isreadable(self, object): + s, readable, recursive = self.format(object, {}, 0, 0) + return readable and not recursive + + def _format(self, object, stream, indent, allowance, context, level): + objid = id(object) + if objid in context: + stream.write(_recursion(object)) + self._recursive = True + self._readable = False + return + rep = self._repr(object, context, level) + max_width = self._width - indent - allowance + if len(rep) > max_width: + p = self._dispatch.get(type(object).__repr__, None) + if p is not None: + context[objid] = 1 + p(self, object, stream, indent, allowance, context, level + 1) + del context[objid] + return + elif (_dataclasses.is_dataclass(object) and + not isinstance(object, type) and + object.__dataclass_params__.repr and + # Check dataclass has generated repr method. + hasattr(object.__repr__, "__wrapped__") and + "__create_fn__" in object.__repr__.__wrapped__.__qualname__): + context[objid] = 1 + self._pprint_dataclass(object, stream, indent, allowance, context, level + 1) + del context[objid] + return + stream.write(rep) + + def _pprint_dataclass(self, object, stream, indent, allowance, context, level): + cls_name = object.__class__.__name__ + indent += len(cls_name) + 1 + items = [(f.name, getattr(object, f.name)) for f in _dataclasses.fields(object) if f.repr] + stream.write(cls_name + '(') + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(')') + + _dispatch = {} + + def _pprint_dict(self, object, stream, indent, allowance, context, level): + write = stream.write + write('{') + if self._indent_per_level > 1: + write((self._indent_per_level - 1) * ' ') + length = len(object) + if length: + if self._sort_dicts: + items = sorted(object.items(), key=_safe_tuple) + else: + items = object.items() + self._format_dict_items(items, stream, indent, allowance + 1, + context, level) + write('}') + + _dispatch[dict.__repr__] = _pprint_dict + + def _pprint_ordered_dict(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '(') + self._format(list(object.items()), stream, + indent + len(cls.__name__) + 1, allowance + 1, + context, level) + stream.write(')') + + _dispatch[_collections.OrderedDict.__repr__] = _pprint_ordered_dict + + def _pprint_list(self, object, stream, indent, allowance, context, level): + stream.write('[') + self._format_items(object, stream, indent, allowance + 1, + context, level) + stream.write(']') + + _dispatch[list.__repr__] = _pprint_list + + def _pprint_tuple(self, object, stream, indent, allowance, context, level): + stream.write('(') + endchar = ',)' if len(object) == 1 else ')' + self._format_items(object, stream, indent, allowance + len(endchar), + context, level) + stream.write(endchar) + + _dispatch[tuple.__repr__] = _pprint_tuple + + def _pprint_set(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + typ = object.__class__ + if typ is set: + stream.write('{') + endchar = '}' + else: + stream.write(typ.__name__ + '({') + endchar = '})' + indent += len(typ.__name__) + 1 + object = sorted(object, key=_safe_key) + self._format_items(object, stream, indent, allowance + len(endchar), + context, level) + stream.write(endchar) + + _dispatch[set.__repr__] = _pprint_set + _dispatch[frozenset.__repr__] = _pprint_set + + def _pprint_str(self, object, stream, indent, allowance, context, level): + write = stream.write + if not len(object): + write(repr(object)) + return + chunks = [] + lines = object.splitlines(True) + if level == 1: + indent += 1 + allowance += 1 + max_width1 = max_width = self._width - indent + for i, line in enumerate(lines): + rep = repr(line) + if i == len(lines) - 1: + max_width1 -= allowance + if len(rep) <= max_width1: + chunks.append(rep) + else: + # A list of alternating (non-space, space) strings + parts = re.findall(r'\S*\s*', line) + assert parts + assert not parts[-1] + parts.pop() # drop empty last part + max_width2 = max_width + current = '' + for j, part in enumerate(parts): + candidate = current + part + if j == len(parts) - 1 and i == len(lines) - 1: + max_width2 -= allowance + if len(repr(candidate)) > max_width2: + if current: + chunks.append(repr(current)) + current = part + else: + current = candidate + if current: + chunks.append(repr(current)) + if len(chunks) == 1: + write(rep) + return + if level == 1: + write('(') + for i, rep in enumerate(chunks): + if i > 0: + write('\n' + ' '*indent) + write(rep) + if level == 1: + write(')') + + _dispatch[str.__repr__] = _pprint_str + + def _pprint_bytes(self, object, stream, indent, allowance, context, level): + write = stream.write + if len(object) <= 4: + write(repr(object)) + return + parens = level == 1 + if parens: + indent += 1 + allowance += 1 + write('(') + delim = '' + for rep in _wrap_bytes_repr(object, self._width - indent, allowance): + write(delim) + write(rep) + if not delim: + delim = '\n' + ' '*indent + if parens: + write(')') + + _dispatch[bytes.__repr__] = _pprint_bytes + + def _pprint_bytearray(self, object, stream, indent, allowance, context, level): + write = stream.write + write('bytearray(') + self._pprint_bytes(bytes(object), stream, indent + 10, + allowance + 1, context, level + 1) + write(')') + + _dispatch[bytearray.__repr__] = _pprint_bytearray + + def _pprint_mappingproxy(self, object, stream, indent, allowance, context, level): + stream.write('mappingproxy(') + self._format(object.copy(), stream, indent + 13, allowance + 1, + context, level) + stream.write(')') + + _dispatch[_types.MappingProxyType.__repr__] = _pprint_mappingproxy + + def _pprint_simplenamespace(self, object, stream, indent, allowance, context, level): + if type(object) is _types.SimpleNamespace: + # The SimpleNamespace repr is "namespace" instead of the class + # name, so we do the same here. For subclasses; use the class name. + cls_name = 'namespace' + else: + cls_name = object.__class__.__name__ + indent += len(cls_name) + 1 + items = object.__dict__.items() + stream.write(cls_name + '(') + self._format_namespace_items(items, stream, indent, allowance, context, level) + stream.write(')') + + _dispatch[_types.SimpleNamespace.__repr__] = _pprint_simplenamespace + + def _format_dict_items(self, items, stream, indent, allowance, context, + level): + write = stream.write + indent += self._indent_per_level + delimnl = ',\n' + ' ' * indent + last_index = len(items) - 1 + for i, (key, ent) in enumerate(items): + last = i == last_index + rep = self._repr(key, context, level) + write(rep) + write(': ') + self._format(ent, stream, indent + len(rep) + 2, + allowance if last else 1, + context, level) + if not last: + write(delimnl) + + def _format_namespace_items(self, items, stream, indent, allowance, context, level): + write = stream.write + delimnl = ',\n' + ' ' * indent + last_index = len(items) - 1 + for i, (key, ent) in enumerate(items): + last = i == last_index + write(key) + write('=') + if id(ent) in context: + # Special-case representation of recursion to match standard + # recursive dataclass repr. + write("...") + else: + self._format(ent, stream, indent + len(key) + 1, + allowance if last else 1, + context, level) + if not last: + write(delimnl) + + def _format_items(self, items, stream, indent, allowance, context, level): + write = stream.write + indent += self._indent_per_level + if self._indent_per_level > 1: + write((self._indent_per_level - 1) * ' ') + delimnl = ',\n' + ' ' * indent + delim = '' + width = max_width = self._width - indent + 1 + it = iter(items) + try: + next_ent = next(it) + except StopIteration: + return + last = False + while not last: + ent = next_ent + try: + next_ent = next(it) + except StopIteration: + last = True + max_width -= allowance + width -= allowance + if self._compact: + rep = self._repr(ent, context, level) + w = len(rep) + 2 + if width < w: + width = max_width + if delim: + delim = delimnl + if width >= w: + width -= w + write(delim) + delim = ', ' + write(rep) + continue + write(delim) + delim = delimnl + self._format(ent, stream, indent, + allowance if last else 1, + context, level) + + def _repr(self, object, context, level): + repr, readable, recursive = self.format(object, context.copy(), + self._depth, level) + if not readable: + self._readable = False + if recursive: + self._recursive = True + return repr + + def format(self, object, context, maxlevels, level): + """Format object for a specific context, returning a string + and flags indicating whether the representation is 'readable' + and whether the object represents a recursive construct. + """ + return self._safe_repr(object, context, maxlevels, level) + + def _pprint_default_dict(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + rdf = self._repr(object.default_factory, context, level) + cls = object.__class__ + indent += len(cls.__name__) + 1 + stream.write('%s(%s,\n%s' % (cls.__name__, rdf, ' ' * indent)) + self._pprint_dict(object, stream, indent, allowance + 1, context, level) + stream.write(')') + + _dispatch[_collections.defaultdict.__repr__] = _pprint_default_dict + + def _pprint_counter(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '({') + if self._indent_per_level > 1: + stream.write((self._indent_per_level - 1) * ' ') + items = object.most_common() + self._format_dict_items(items, stream, + indent + len(cls.__name__) + 1, allowance + 2, + context, level) + stream.write('})') + + _dispatch[_collections.Counter.__repr__] = _pprint_counter + + def _pprint_chain_map(self, object, stream, indent, allowance, context, level): + if not len(object.maps): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '(') + indent += len(cls.__name__) + 1 + for i, m in enumerate(object.maps): + if i == len(object.maps) - 1: + self._format(m, stream, indent, allowance + 1, context, level) + stream.write(')') + else: + self._format(m, stream, indent, 1, context, level) + stream.write(',\n' + ' ' * indent) + + _dispatch[_collections.ChainMap.__repr__] = _pprint_chain_map + + def _pprint_deque(self, object, stream, indent, allowance, context, level): + if not len(object): + stream.write(repr(object)) + return + cls = object.__class__ + stream.write(cls.__name__ + '(') + indent += len(cls.__name__) + 1 + stream.write('[') + if object.maxlen is None: + self._format_items(object, stream, indent, allowance + 2, + context, level) + stream.write('])') + else: + self._format_items(object, stream, indent, 2, + context, level) + rml = self._repr(object.maxlen, context, level) + stream.write('],\n%smaxlen=%s)' % (' ' * indent, rml)) + + _dispatch[_collections.deque.__repr__] = _pprint_deque + + def _pprint_user_dict(self, object, stream, indent, allowance, context, level): + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserDict.__repr__] = _pprint_user_dict + + def _pprint_user_list(self, object, stream, indent, allowance, context, level): + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserList.__repr__] = _pprint_user_list + + def _pprint_user_string(self, object, stream, indent, allowance, context, level): + self._format(object.data, stream, indent, allowance, context, level - 1) + + _dispatch[_collections.UserString.__repr__] = _pprint_user_string + + def _safe_repr(self, object, context, maxlevels, level): + # Return triple (repr_string, isreadable, isrecursive). + typ = type(object) + if typ in _builtin_scalars: + return repr(object), True, False + + r = getattr(typ, "__repr__", None) + + if issubclass(typ, int) and r is int.__repr__: + if self._underscore_numbers: + return f"{object:_d}", True, False + else: + return repr(object), True, False + + if issubclass(typ, dict) and r is dict.__repr__: + if not object: + return "{}", True, False + objid = id(object) + if maxlevels and level >= maxlevels: + return "{...}", False, objid in context + if objid in context: + return _recursion(object), False, True + context[objid] = 1 + readable = True + recursive = False + components = [] + append = components.append + level += 1 + if self._sort_dicts: + items = sorted(object.items(), key=_safe_tuple) + else: + items = object.items() + for k, v in items: + krepr, kreadable, krecur = self.format( + k, context, maxlevels, level) + vrepr, vreadable, vrecur = self.format( + v, context, maxlevels, level) + append("%s: %s" % (krepr, vrepr)) + readable = readable and kreadable and vreadable + if krecur or vrecur: + recursive = True + del context[objid] + return "{%s}" % ", ".join(components), readable, recursive + + if (issubclass(typ, list) and r is list.__repr__) or \ + (issubclass(typ, tuple) and r is tuple.__repr__): + if issubclass(typ, list): + if not object: + return "[]", True, False + format = "[%s]" + elif len(object) == 1: + format = "(%s,)" + else: + if not object: + return "()", True, False + format = "(%s)" + objid = id(object) + if maxlevels and level >= maxlevels: + return format % "...", False, objid in context + if objid in context: + return _recursion(object), False, True + context[objid] = 1 + readable = True + recursive = False + components = [] + append = components.append + level += 1 + for o in object: + orepr, oreadable, orecur = self.format( + o, context, maxlevels, level) + append(orepr) + if not oreadable: + readable = False + if orecur: + recursive = True + del context[objid] + return format % ", ".join(components), readable, recursive + + rep = repr(object) + return rep, (rep and not rep.startswith('<')), False + +_builtin_scalars = frozenset({str, bytes, bytearray, float, complex, + bool, type(None)}) + +def _recursion(object): + return ("" + % (type(object).__name__, id(object))) + + +def _perfcheck(object=None): + import time + if object is None: + object = [("string", (1, 2), [3, 4], {5: 6, 7: 8})] * 100000 + p = PrettyPrinter() + t1 = time.perf_counter() + p._safe_repr(object, {}, None, 0, True) + t2 = time.perf_counter() + p.pformat(object) + t3 = time.perf_counter() + print("_safe_repr:", t2 - t1) + print("pformat:", t3 - t2) + +def _wrap_bytes_repr(object, width, allowance): + current = b'' + last = len(object) // 4 * 4 + for i in range(0, len(object), 4): + part = object[i: i+4] + candidate = current + part + if i == last: + width -= allowance + if len(repr(candidate)) > width: + if current: + yield repr(current) + current = part + else: + current = candidate + if current: + yield repr(current) + +if __name__ == "__main__": + _perfcheck() diff --git a/llava/lib/python3.10/profile.py b/llava/lib/python3.10/profile.py new file mode 100644 index 0000000000000000000000000000000000000000..90c4e4c9ff583e43e164179c0c6fd37e22434e76 --- /dev/null +++ b/llava/lib/python3.10/profile.py @@ -0,0 +1,611 @@ +#! /usr/bin/env python3 +# +# Class for profiling python code. rev 1.0 6/2/94 +# +# Written by James Roskind +# Based on prior profile module by Sjoerd Mullender... +# which was hacked somewhat by: Guido van Rossum + +"""Class for profiling Python code.""" + +# Copyright Disney Enterprises, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific language +# governing permissions and limitations under the License. + + +import io +import sys +import time +import marshal + +__all__ = ["run", "runctx", "Profile"] + +# Sample timer for use with +#i_count = 0 +#def integer_timer(): +# global i_count +# i_count = i_count + 1 +# return i_count +#itimes = integer_timer # replace with C coded timer returning integers + +class _Utils: + """Support class for utility functions which are shared by + profile.py and cProfile.py modules. + Not supposed to be used directly. + """ + + def __init__(self, profiler): + self.profiler = profiler + + def run(self, statement, filename, sort): + prof = self.profiler() + try: + prof.run(statement) + except SystemExit: + pass + finally: + self._show(prof, filename, sort) + + def runctx(self, statement, globals, locals, filename, sort): + prof = self.profiler() + try: + prof.runctx(statement, globals, locals) + except SystemExit: + pass + finally: + self._show(prof, filename, sort) + + def _show(self, prof, filename, sort): + if filename is not None: + prof.dump_stats(filename) + else: + prof.print_stats(sort) + + +#************************************************************************** +# The following are the static member functions for the profiler class +# Note that an instance of Profile() is *not* needed to call them. +#************************************************************************** + +def run(statement, filename=None, sort=-1): + """Run statement under profiler optionally saving results in filename + + This function takes a single argument that can be passed to the + "exec" statement, and an optional file name. In all cases this + routine attempts to "exec" its first argument and gather profiling + statistics from the execution. If no file name is present, then this + function automatically prints a simple profiling report, sorted by the + standard name string (file/line/function-name) that is presented in + each line. + """ + return _Utils(Profile).run(statement, filename, sort) + +def runctx(statement, globals, locals, filename=None, sort=-1): + """Run statement under profiler, supplying your own globals and locals, + optionally saving results in filename. + + statement and filename have the same semantics as profile.run + """ + return _Utils(Profile).runctx(statement, globals, locals, filename, sort) + + +class Profile: + """Profiler class. + + self.cur is always a tuple. Each such tuple corresponds to a stack + frame that is currently active (self.cur[-2]). The following are the + definitions of its members. We use this external "parallel stack" to + avoid contaminating the program that we are profiling. (old profiler + used to write into the frames local dictionary!!) Derived classes + can change the definition of some entries, as long as they leave + [-2:] intact (frame and previous tuple). In case an internal error is + detected, the -3 element is used as the function name. + + [ 0] = Time that needs to be charged to the parent frame's function. + It is used so that a function call will not have to access the + timing data for the parent frame. + [ 1] = Total time spent in this frame's function, excluding time in + subfunctions (this latter is tallied in cur[2]). + [ 2] = Total time spent in subfunctions, excluding time executing the + frame's function (this latter is tallied in cur[1]). + [-3] = Name of the function that corresponds to this frame. + [-2] = Actual frame that we correspond to (used to sync exception handling). + [-1] = Our parent 6-tuple (corresponds to frame.f_back). + + Timing data for each function is stored as a 5-tuple in the dictionary + self.timings[]. The index is always the name stored in self.cur[-3]. + The following are the definitions of the members: + + [0] = The number of times this function was called, not counting direct + or indirect recursion, + [1] = Number of times this function appears on the stack, minus one + [2] = Total time spent internal to this function + [3] = Cumulative time that this function was present on the stack. In + non-recursive functions, this is the total execution time from start + to finish of each invocation of a function, including time spent in + all subfunctions. + [4] = A dictionary indicating for each function name, the number of times + it was called by us. + """ + + bias = 0 # calibration constant + + def __init__(self, timer=None, bias=None): + self.timings = {} + self.cur = None + self.cmd = "" + self.c_func_name = "" + + if bias is None: + bias = self.bias + self.bias = bias # Materialize in local dict for lookup speed. + + if not timer: + self.timer = self.get_time = time.process_time + self.dispatcher = self.trace_dispatch_i + else: + self.timer = timer + t = self.timer() # test out timer function + try: + length = len(t) + except TypeError: + self.get_time = timer + self.dispatcher = self.trace_dispatch_i + else: + if length == 2: + self.dispatcher = self.trace_dispatch + else: + self.dispatcher = self.trace_dispatch_l + # This get_time() implementation needs to be defined + # here to capture the passed-in timer in the parameter + # list (for performance). Note that we can't assume + # the timer() result contains two values in all + # cases. + def get_time_timer(timer=timer, sum=sum): + return sum(timer()) + self.get_time = get_time_timer + self.t = self.get_time() + self.simulate_call('profiler') + + # Heavily optimized dispatch routine for time.process_time() timer + + def trace_dispatch(self, frame, event, arg): + timer = self.timer + t = timer() + t = t[0] + t[1] - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame,t): + t = timer() + self.t = t[0] + t[1] + else: + r = timer() + self.t = r[0] + r[1] - t # put back unrecorded delta + + # Dispatch routine for best timer program (return = scalar, fastest if + # an integer but float works too -- and time.process_time() relies on that). + + def trace_dispatch_i(self, frame, event, arg): + timer = self.timer + t = timer() - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = timer() + else: + self.t = timer() - t # put back unrecorded delta + + # Dispatch routine for macintosh (timer returns time in ticks of + # 1/60th second) + + def trace_dispatch_mac(self, frame, event, arg): + timer = self.timer + t = timer()/60.0 - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = timer()/60.0 + else: + self.t = timer()/60.0 - t # put back unrecorded delta + + # SLOW generic dispatch routine for timer returning lists of numbers + + def trace_dispatch_l(self, frame, event, arg): + get_time = self.get_time + t = get_time() - self.t - self.bias + + if event == "c_call": + self.c_func_name = arg.__name__ + + if self.dispatch[event](self, frame, t): + self.t = get_time() + else: + self.t = get_time() - t # put back unrecorded delta + + # In the event handlers, the first 3 elements of self.cur are unpacked + # into vrbls w/ 3-letter names. The last two characters are meant to be + # mnemonic: + # _pt self.cur[0] "parent time" time to be charged to parent frame + # _it self.cur[1] "internal time" time spent directly in the function + # _et self.cur[2] "external time" time spent in subfunctions + + def trace_dispatch_exception(self, frame, t): + rpt, rit, ret, rfn, rframe, rcur = self.cur + if (rframe is not frame) and rcur: + return self.trace_dispatch_return(rframe, t) + self.cur = rpt, rit+t, ret, rfn, rframe, rcur + return 1 + + + def trace_dispatch_call(self, frame, t): + if self.cur and frame.f_back is not self.cur[-2]: + rpt, rit, ret, rfn, rframe, rcur = self.cur + if not isinstance(rframe, Profile.fake_frame): + assert rframe.f_back is frame.f_back, ("Bad call", rfn, + rframe, rframe.f_back, + frame, frame.f_back) + self.trace_dispatch_return(rframe, 0) + assert (self.cur is None or \ + frame.f_back is self.cur[-2]), ("Bad call", + self.cur[-3]) + fcode = frame.f_code + fn = (fcode.co_filename, fcode.co_firstlineno, fcode.co_name) + self.cur = (t, 0, 0, fn, frame, self.cur) + timings = self.timings + if fn in timings: + cc, ns, tt, ct, callers = timings[fn] + timings[fn] = cc, ns + 1, tt, ct, callers + else: + timings[fn] = 0, 0, 0, 0, {} + return 1 + + def trace_dispatch_c_call (self, frame, t): + fn = ("", 0, self.c_func_name) + self.cur = (t, 0, 0, fn, frame, self.cur) + timings = self.timings + if fn in timings: + cc, ns, tt, ct, callers = timings[fn] + timings[fn] = cc, ns+1, tt, ct, callers + else: + timings[fn] = 0, 0, 0, 0, {} + return 1 + + def trace_dispatch_return(self, frame, t): + if frame is not self.cur[-2]: + assert frame is self.cur[-2].f_back, ("Bad return", self.cur[-3]) + self.trace_dispatch_return(self.cur[-2], 0) + + # Prefix "r" means part of the Returning or exiting frame. + # Prefix "p" means part of the Previous or Parent or older frame. + + rpt, rit, ret, rfn, frame, rcur = self.cur + rit = rit + t + frame_total = rit + ret + + ppt, pit, pet, pfn, pframe, pcur = rcur + self.cur = ppt, pit + rpt, pet + frame_total, pfn, pframe, pcur + + timings = self.timings + cc, ns, tt, ct, callers = timings[rfn] + if not ns: + # This is the only occurrence of the function on the stack. + # Else this is a (directly or indirectly) recursive call, and + # its cumulative time will get updated when the topmost call to + # it returns. + ct = ct + frame_total + cc = cc + 1 + + if pfn in callers: + callers[pfn] = callers[pfn] + 1 # hack: gather more + # stats such as the amount of time added to ct courtesy + # of this specific call, and the contribution to cc + # courtesy of this call. + else: + callers[pfn] = 1 + + timings[rfn] = cc, ns - 1, tt + rit, ct, callers + + return 1 + + + dispatch = { + "call": trace_dispatch_call, + "exception": trace_dispatch_exception, + "return": trace_dispatch_return, + "c_call": trace_dispatch_c_call, + "c_exception": trace_dispatch_return, # the C function returned + "c_return": trace_dispatch_return, + } + + + # The next few functions play with self.cmd. By carefully preloading + # our parallel stack, we can force the profiled result to include + # an arbitrary string as the name of the calling function. + # We use self.cmd as that string, and the resulting stats look + # very nice :-). + + def set_cmd(self, cmd): + if self.cur[-1]: return # already set + self.cmd = cmd + self.simulate_call(cmd) + + class fake_code: + def __init__(self, filename, line, name): + self.co_filename = filename + self.co_line = line + self.co_name = name + self.co_firstlineno = 0 + + def __repr__(self): + return repr((self.co_filename, self.co_line, self.co_name)) + + class fake_frame: + def __init__(self, code, prior): + self.f_code = code + self.f_back = prior + + def simulate_call(self, name): + code = self.fake_code('profile', 0, name) + if self.cur: + pframe = self.cur[-2] + else: + pframe = None + frame = self.fake_frame(code, pframe) + self.dispatch['call'](self, frame, 0) + + # collect stats from pending stack, including getting final + # timings for self.cmd frame. + + def simulate_cmd_complete(self): + get_time = self.get_time + t = get_time() - self.t + while self.cur[-1]: + # We *can* cause assertion errors here if + # dispatch_trace_return checks for a frame match! + self.dispatch['return'](self, self.cur[-2], t) + t = 0 + self.t = get_time() - t + + + def print_stats(self, sort=-1): + import pstats + pstats.Stats(self).strip_dirs().sort_stats(sort). \ + print_stats() + + def dump_stats(self, file): + with open(file, 'wb') as f: + self.create_stats() + marshal.dump(self.stats, f) + + def create_stats(self): + self.simulate_cmd_complete() + self.snapshot_stats() + + def snapshot_stats(self): + self.stats = {} + for func, (cc, ns, tt, ct, callers) in self.timings.items(): + callers = callers.copy() + nc = 0 + for callcnt in callers.values(): + nc += callcnt + self.stats[func] = cc, nc, tt, ct, callers + + + # The following two methods can be called by clients to use + # a profiler to profile a statement, given as a string. + + def run(self, cmd): + import __main__ + dict = __main__.__dict__ + return self.runctx(cmd, dict, dict) + + def runctx(self, cmd, globals, locals): + self.set_cmd(cmd) + sys.setprofile(self.dispatcher) + try: + exec(cmd, globals, locals) + finally: + sys.setprofile(None) + return self + + # This method is more useful to profile a single function call. + def runcall(self, func, /, *args, **kw): + self.set_cmd(repr(func)) + sys.setprofile(self.dispatcher) + try: + return func(*args, **kw) + finally: + sys.setprofile(None) + + + #****************************************************************** + # The following calculates the overhead for using a profiler. The + # problem is that it takes a fair amount of time for the profiler + # to stop the stopwatch (from the time it receives an event). + # Similarly, there is a delay from the time that the profiler + # re-starts the stopwatch before the user's code really gets to + # continue. The following code tries to measure the difference on + # a per-event basis. + # + # Note that this difference is only significant if there are a lot of + # events, and relatively little user code per event. For example, + # code with small functions will typically benefit from having the + # profiler calibrated for the current platform. This *could* be + # done on the fly during init() time, but it is not worth the + # effort. Also note that if too large a value specified, then + # execution time on some functions will actually appear as a + # negative number. It is *normal* for some functions (with very + # low call counts) to have such negative stats, even if the + # calibration figure is "correct." + # + # One alternative to profile-time calibration adjustments (i.e., + # adding in the magic little delta during each event) is to track + # more carefully the number of events (and cumulatively, the number + # of events during sub functions) that are seen. If this were + # done, then the arithmetic could be done after the fact (i.e., at + # display time). Currently, we track only call/return events. + # These values can be deduced by examining the callees and callers + # vectors for each functions. Hence we *can* almost correct the + # internal time figure at print time (note that we currently don't + # track exception event processing counts). Unfortunately, there + # is currently no similar information for cumulative sub-function + # time. It would not be hard to "get all this info" at profiler + # time. Specifically, we would have to extend the tuples to keep + # counts of this in each frame, and then extend the defs of timing + # tuples to include the significant two figures. I'm a bit fearful + # that this additional feature will slow the heavily optimized + # event/time ratio (i.e., the profiler would run slower, fur a very + # low "value added" feature.) + #************************************************************** + + def calibrate(self, m, verbose=0): + if self.__class__ is not Profile: + raise TypeError("Subclasses must override .calibrate().") + + saved_bias = self.bias + self.bias = 0 + try: + return self._calibrate_inner(m, verbose) + finally: + self.bias = saved_bias + + def _calibrate_inner(self, m, verbose): + get_time = self.get_time + + # Set up a test case to be run with and without profiling. Include + # lots of calls, because we're trying to quantify stopwatch overhead. + # Do not raise any exceptions, though, because we want to know + # exactly how many profile events are generated (one call event, + + # one return event, per Python-level call). + + def f1(n): + for i in range(n): + x = 1 + + def f(m, f1=f1): + for i in range(m): + f1(100) + + f(m) # warm up the cache + + # elapsed_noprofile <- time f(m) takes without profiling. + t0 = get_time() + f(m) + t1 = get_time() + elapsed_noprofile = t1 - t0 + if verbose: + print("elapsed time without profiling =", elapsed_noprofile) + + # elapsed_profile <- time f(m) takes with profiling. The difference + # is profiling overhead, only some of which the profiler subtracts + # out on its own. + p = Profile() + t0 = get_time() + p.runctx('f(m)', globals(), locals()) + t1 = get_time() + elapsed_profile = t1 - t0 + if verbose: + print("elapsed time with profiling =", elapsed_profile) + + # reported_time <- "CPU seconds" the profiler charged to f and f1. + total_calls = 0.0 + reported_time = 0.0 + for (filename, line, funcname), (cc, ns, tt, ct, callers) in \ + p.timings.items(): + if funcname in ("f", "f1"): + total_calls += cc + reported_time += tt + + if verbose: + print("'CPU seconds' profiler reported =", reported_time) + print("total # calls =", total_calls) + if total_calls != m + 1: + raise ValueError("internal error: total calls = %d" % total_calls) + + # reported_time - elapsed_noprofile = overhead the profiler wasn't + # able to measure. Divide by twice the number of calls (since there + # are two profiler events per call in this test) to get the hidden + # overhead per event. + mean = (reported_time - elapsed_noprofile) / 2.0 / total_calls + if verbose: + print("mean stopwatch overhead per profile event =", mean) + return mean + +#**************************************************************************** + +def main(): + import os + from optparse import OptionParser + + usage = "profile.py [-o output_file_path] [-s sort] [-m module | scriptfile] [arg] ..." + parser = OptionParser(usage=usage) + parser.allow_interspersed_args = False + parser.add_option('-o', '--outfile', dest="outfile", + help="Save stats to ", default=None) + parser.add_option('-m', dest="module", action="store_true", + help="Profile a library module.", default=False) + parser.add_option('-s', '--sort', dest="sort", + help="Sort order when printing to stdout, based on pstats.Stats class", + default=-1) + + if not sys.argv[1:]: + parser.print_usage() + sys.exit(2) + + (options, args) = parser.parse_args() + sys.argv[:] = args + + # The script that we're profiling may chdir, so capture the absolute path + # to the output file at startup. + if options.outfile is not None: + options.outfile = os.path.abspath(options.outfile) + + if len(args) > 0: + if options.module: + import runpy + code = "run_module(modname, run_name='__main__')" + globs = { + 'run_module': runpy.run_module, + 'modname': args[0] + } + else: + progname = args[0] + sys.path.insert(0, os.path.dirname(progname)) + with io.open_code(progname) as fp: + code = compile(fp.read(), progname, 'exec') + globs = { + '__file__': progname, + '__name__': '__main__', + '__package__': None, + '__cached__': None, + } + try: + runctx(code, globs, None, options.outfile, options.sort) + except BrokenPipeError as exc: + # Prevent "Exception ignored" during interpreter shutdown. + sys.stdout = None + sys.exit(exc.errno) + else: + parser.print_usage() + return parser + +# When invoked as main program, invoke the profiler on a script +if __name__ == '__main__': + main() diff --git a/llava/lib/python3.10/pty.py b/llava/lib/python3.10/pty.py new file mode 100644 index 0000000000000000000000000000000000000000..8d8ce40df541c1872b52ae6ea069e0e975bd8a6b --- /dev/null +++ b/llava/lib/python3.10/pty.py @@ -0,0 +1,187 @@ +"""Pseudo terminal utilities.""" + +# Bugs: No signal handling. Doesn't set slave termios and window size. +# Only tested on Linux, FreeBSD, and macOS. +# See: W. Richard Stevens. 1992. Advanced Programming in the +# UNIX Environment. Chapter 19. +# Author: Steen Lumholt -- with additions by Guido. + +from select import select +import os +import sys +import tty + +# names imported directly for test mocking purposes +from os import close, waitpid +from tty import setraw, tcgetattr, tcsetattr + +__all__ = ["openpty", "fork", "spawn"] + +STDIN_FILENO = 0 +STDOUT_FILENO = 1 +STDERR_FILENO = 2 + +CHILD = 0 + +def openpty(): + """openpty() -> (master_fd, slave_fd) + Open a pty master/slave pair, using os.openpty() if possible.""" + + try: + return os.openpty() + except (AttributeError, OSError): + pass + master_fd, slave_name = _open_terminal() + slave_fd = slave_open(slave_name) + return master_fd, slave_fd + +def master_open(): + """master_open() -> (master_fd, slave_name) + Open a pty master and return the fd, and the filename of the slave end. + Deprecated, use openpty() instead.""" + + try: + master_fd, slave_fd = os.openpty() + except (AttributeError, OSError): + pass + else: + slave_name = os.ttyname(slave_fd) + os.close(slave_fd) + return master_fd, slave_name + + return _open_terminal() + +def _open_terminal(): + """Open pty master and return (master_fd, tty_name).""" + for x in 'pqrstuvwxyzPQRST': + for y in '0123456789abcdef': + pty_name = '/dev/pty' + x + y + try: + fd = os.open(pty_name, os.O_RDWR) + except OSError: + continue + return (fd, '/dev/tty' + x + y) + raise OSError('out of pty devices') + +def slave_open(tty_name): + """slave_open(tty_name) -> slave_fd + Open the pty slave and acquire the controlling terminal, returning + opened filedescriptor. + Deprecated, use openpty() instead.""" + + result = os.open(tty_name, os.O_RDWR) + try: + from fcntl import ioctl, I_PUSH + except ImportError: + return result + try: + ioctl(result, I_PUSH, "ptem") + ioctl(result, I_PUSH, "ldterm") + except OSError: + pass + return result + +def fork(): + """fork() -> (pid, master_fd) + Fork and make the child a session leader with a controlling terminal.""" + + try: + pid, fd = os.forkpty() + except (AttributeError, OSError): + pass + else: + if pid == CHILD: + try: + os.setsid() + except OSError: + # os.forkpty() already set us session leader + pass + return pid, fd + + master_fd, slave_fd = openpty() + pid = os.fork() + if pid == CHILD: + # Establish a new session. + os.setsid() + os.close(master_fd) + + # Slave becomes stdin/stdout/stderr of child. + os.dup2(slave_fd, STDIN_FILENO) + os.dup2(slave_fd, STDOUT_FILENO) + os.dup2(slave_fd, STDERR_FILENO) + if slave_fd > STDERR_FILENO: + os.close(slave_fd) + + # Explicitly open the tty to make it become a controlling tty. + tmp_fd = os.open(os.ttyname(STDOUT_FILENO), os.O_RDWR) + os.close(tmp_fd) + else: + os.close(slave_fd) + + # Parent and child process. + return pid, master_fd + +def _writen(fd, data): + """Write all the data to a descriptor.""" + while data: + n = os.write(fd, data) + data = data[n:] + +def _read(fd): + """Default read function.""" + return os.read(fd, 1024) + +def _copy(master_fd, master_read=_read, stdin_read=_read): + """Parent copy loop. + Copies + pty master -> standard output (master_read) + standard input -> pty master (stdin_read)""" + fds = [master_fd, STDIN_FILENO] + while fds: + rfds, _wfds, _xfds = select(fds, [], []) + + if master_fd in rfds: + # Some OSes signal EOF by returning an empty byte string, + # some throw OSErrors. + try: + data = master_read(master_fd) + except OSError: + data = b"" + if not data: # Reached EOF. + return # Assume the child process has exited and is + # unreachable, so we clean up. + else: + os.write(STDOUT_FILENO, data) + + if STDIN_FILENO in rfds: + data = stdin_read(STDIN_FILENO) + if not data: + fds.remove(STDIN_FILENO) + else: + _writen(master_fd, data) + +def spawn(argv, master_read=_read, stdin_read=_read): + """Create a spawned process.""" + if type(argv) == type(''): + argv = (argv,) + sys.audit('pty.spawn', argv) + + pid, master_fd = fork() + if pid == CHILD: + os.execlp(argv[0], *argv) + + try: + mode = tcgetattr(STDIN_FILENO) + setraw(STDIN_FILENO) + restore = True + except tty.error: # This is the same as termios.error + restore = False + + try: + _copy(master_fd, master_read, stdin_read) + finally: + if restore: + tcsetattr(STDIN_FILENO, tty.TCSAFLUSH, mode) + + close(master_fd) + return waitpid(pid, 0)[1] diff --git a/llava/lib/python3.10/runpy.py b/llava/lib/python3.10/runpy.py new file mode 100644 index 0000000000000000000000000000000000000000..c7d3d8caad1611ed52f1be8d517ad2ac906f04db --- /dev/null +++ b/llava/lib/python3.10/runpy.py @@ -0,0 +1,321 @@ +"""runpy.py - locating and running Python code using the module namespace + +Provides support for locating and running Python scripts using the Python +module namespace instead of the native filesystem. + +This allows Python code to play nicely with non-filesystem based PEP 302 +importers when locating support scripts as well as when importing modules. +""" +# Written by Nick Coghlan +# to implement PEP 338 (Executing Modules as Scripts) + + +import sys +import importlib.machinery # importlib first so we can test #15386 via -m +import importlib.util +import io +import types +import os + +__all__ = [ + "run_module", "run_path", +] + +class _TempModule(object): + """Temporarily replace a module in sys.modules with an empty namespace""" + def __init__(self, mod_name): + self.mod_name = mod_name + self.module = types.ModuleType(mod_name) + self._saved_module = [] + + def __enter__(self): + mod_name = self.mod_name + try: + self._saved_module.append(sys.modules[mod_name]) + except KeyError: + pass + sys.modules[mod_name] = self.module + return self + + def __exit__(self, *args): + if self._saved_module: + sys.modules[self.mod_name] = self._saved_module[0] + else: + del sys.modules[self.mod_name] + self._saved_module = [] + +class _ModifiedArgv0(object): + def __init__(self, value): + self.value = value + self._saved_value = self._sentinel = object() + + def __enter__(self): + if self._saved_value is not self._sentinel: + raise RuntimeError("Already preserving saved value") + self._saved_value = sys.argv[0] + sys.argv[0] = self.value + + def __exit__(self, *args): + self.value = self._sentinel + sys.argv[0] = self._saved_value + +# TODO: Replace these helpers with importlib._bootstrap_external functions. +def _run_code(code, run_globals, init_globals=None, + mod_name=None, mod_spec=None, + pkg_name=None, script_name=None): + """Helper to run code in nominated namespace""" + if init_globals is not None: + run_globals.update(init_globals) + if mod_spec is None: + loader = None + fname = script_name + cached = None + else: + loader = mod_spec.loader + fname = mod_spec.origin + cached = mod_spec.cached + if pkg_name is None: + pkg_name = mod_spec.parent + run_globals.update(__name__ = mod_name, + __file__ = fname, + __cached__ = cached, + __doc__ = None, + __loader__ = loader, + __package__ = pkg_name, + __spec__ = mod_spec) + exec(code, run_globals) + return run_globals + +def _run_module_code(code, init_globals=None, + mod_name=None, mod_spec=None, + pkg_name=None, script_name=None): + """Helper to run code in new namespace with sys modified""" + fname = script_name if mod_spec is None else mod_spec.origin + with _TempModule(mod_name) as temp_module, _ModifiedArgv0(fname): + mod_globals = temp_module.module.__dict__ + _run_code(code, mod_globals, init_globals, + mod_name, mod_spec, pkg_name, script_name) + # Copy the globals of the temporary module, as they + # may be cleared when the temporary module goes away + return mod_globals.copy() + +# Helper to get the full name, spec and code for a module +def _get_module_details(mod_name, error=ImportError): + if mod_name.startswith("."): + raise error("Relative module names not supported") + pkg_name, _, _ = mod_name.rpartition(".") + if pkg_name: + # Try importing the parent to avoid catching initialization errors + try: + __import__(pkg_name) + except ImportError as e: + # If the parent or higher ancestor package is missing, let the + # error be raised by find_spec() below and then be caught. But do + # not allow other errors to be caught. + if e.name is None or (e.name != pkg_name and + not pkg_name.startswith(e.name + ".")): + raise + # Warn if the module has already been imported under its normal name + existing = sys.modules.get(mod_name) + if existing is not None and not hasattr(existing, "__path__"): + from warnings import warn + msg = "{mod_name!r} found in sys.modules after import of " \ + "package {pkg_name!r}, but prior to execution of " \ + "{mod_name!r}; this may result in unpredictable " \ + "behaviour".format(mod_name=mod_name, pkg_name=pkg_name) + warn(RuntimeWarning(msg)) + + try: + spec = importlib.util.find_spec(mod_name) + except (ImportError, AttributeError, TypeError, ValueError) as ex: + # This hack fixes an impedance mismatch between pkgutil and + # importlib, where the latter raises other errors for cases where + # pkgutil previously raised ImportError + msg = "Error while finding module specification for {!r} ({}: {})" + if mod_name.endswith(".py"): + msg += (f". Try using '{mod_name[:-3]}' instead of " + f"'{mod_name}' as the module name.") + raise error(msg.format(mod_name, type(ex).__name__, ex)) from ex + if spec is None: + raise error("No module named %s" % mod_name) + if spec.submodule_search_locations is not None: + if mod_name == "__main__" or mod_name.endswith(".__main__"): + raise error("Cannot use package as __main__ module") + try: + pkg_main_name = mod_name + ".__main__" + return _get_module_details(pkg_main_name, error) + except error as e: + if mod_name not in sys.modules: + raise # No module loaded; being a package is irrelevant + raise error(("%s; %r is a package and cannot " + + "be directly executed") %(e, mod_name)) + loader = spec.loader + if loader is None: + raise error("%r is a namespace package and cannot be executed" + % mod_name) + try: + code = loader.get_code(mod_name) + except ImportError as e: + raise error(format(e)) from e + if code is None: + raise error("No code object available for %s" % mod_name) + return mod_name, spec, code + +class _Error(Exception): + """Error that _run_module_as_main() should report without a traceback""" + +# XXX ncoghlan: Should this be documented and made public? +# (Current thoughts: don't repeat the mistake that lead to its +# creation when run_module() no longer met the needs of +# mainmodule.c, but couldn't be changed because it was public) +def _run_module_as_main(mod_name, alter_argv=True): + """Runs the designated module in the __main__ namespace + + Note that the executed module will have full access to the + __main__ namespace. If this is not desirable, the run_module() + function should be used to run the module code in a fresh namespace. + + At the very least, these variables in __main__ will be overwritten: + __name__ + __file__ + __cached__ + __loader__ + __package__ + """ + try: + if alter_argv or mod_name != "__main__": # i.e. -m switch + mod_name, mod_spec, code = _get_module_details(mod_name, _Error) + else: # i.e. directory or zipfile execution + mod_name, mod_spec, code = _get_main_module_details(_Error) + except _Error as exc: + msg = "%s: %s" % (sys.executable, exc) + sys.exit(msg) + main_globals = sys.modules["__main__"].__dict__ + if alter_argv: + sys.argv[0] = mod_spec.origin + return _run_code(code, main_globals, None, + "__main__", mod_spec) + +def run_module(mod_name, init_globals=None, + run_name=None, alter_sys=False): + """Execute a module's code without importing it. + + mod_name -- an absolute module name or package name. + + Optional arguments: + init_globals -- dictionary used to pre-populate the module’s + globals dictionary before the code is executed. + + run_name -- if not None, this will be used for setting __name__; + otherwise, __name__ will be set to mod_name + '__main__' if the + named module is a package and to just mod_name otherwise. + + alter_sys -- if True, sys.argv[0] is updated with the value of + __file__ and sys.modules[__name__] is updated with a temporary + module object for the module being executed. Both are + restored to their original values before the function returns. + + Returns the resulting module globals dictionary. + """ + mod_name, mod_spec, code = _get_module_details(mod_name) + if run_name is None: + run_name = mod_name + if alter_sys: + return _run_module_code(code, init_globals, run_name, mod_spec) + else: + # Leave the sys module alone + return _run_code(code, {}, init_globals, run_name, mod_spec) + +def _get_main_module_details(error=ImportError): + # Helper that gives a nicer error message when attempting to + # execute a zipfile or directory by invoking __main__.py + # Also moves the standard __main__ out of the way so that the + # preexisting __loader__ entry doesn't cause issues + main_name = "__main__" + saved_main = sys.modules[main_name] + del sys.modules[main_name] + try: + return _get_module_details(main_name) + except ImportError as exc: + if main_name in str(exc): + raise error("can't find %r module in %r" % + (main_name, sys.path[0])) from exc + raise + finally: + sys.modules[main_name] = saved_main + + +def _get_code_from_file(run_name, fname): + # Check for a compiled file first + from pkgutil import read_code + decoded_path = os.path.abspath(os.fsdecode(fname)) + with io.open_code(decoded_path) as f: + code = read_code(f) + if code is None: + # That didn't work, so try it as normal source code + with io.open_code(decoded_path) as f: + code = compile(f.read(), fname, 'exec') + return code, fname + +def run_path(path_name, init_globals=None, run_name=None): + """Execute code located at the specified filesystem location. + + path_name -- filesystem location of a Python script, zipfile, + or directory containing a top level __main__.py script. + + Optional arguments: + init_globals -- dictionary used to pre-populate the module’s + globals dictionary before the code is executed. + + run_name -- if not None, this will be used to set __name__; + otherwise, '' will be used for __name__. + + Returns the resulting module globals dictionary. + """ + if run_name is None: + run_name = "" + pkg_name = run_name.rpartition(".")[0] + from pkgutil import get_importer + importer = get_importer(path_name) + # Trying to avoid importing imp so as to not consume the deprecation warning. + is_NullImporter = False + if type(importer).__module__ == 'imp': + if type(importer).__name__ == 'NullImporter': + is_NullImporter = True + if isinstance(importer, type(None)) or is_NullImporter: + # Not a valid sys.path entry, so run the code directly + # execfile() doesn't help as we want to allow compiled files + code, fname = _get_code_from_file(run_name, path_name) + return _run_module_code(code, init_globals, run_name, + pkg_name=pkg_name, script_name=fname) + else: + # Finder is defined for path, so add it to + # the start of sys.path + sys.path.insert(0, path_name) + try: + # Here's where things are a little different from the run_module + # case. There, we only had to replace the module in sys while the + # code was running and doing so was somewhat optional. Here, we + # have no choice and we have to remove it even while we read the + # code. If we don't do this, a __loader__ attribute in the + # existing __main__ module may prevent location of the new module. + mod_name, mod_spec, code = _get_main_module_details() + with _TempModule(run_name) as temp_module, \ + _ModifiedArgv0(path_name): + mod_globals = temp_module.module.__dict__ + return _run_code(code, mod_globals, init_globals, + run_name, mod_spec, pkg_name).copy() + finally: + try: + sys.path.remove(path_name) + except ValueError: + pass + + +if __name__ == "__main__": + # Run the module specified as the next command line argument + if len(sys.argv) < 2: + print("No module specified for execution", file=sys.stderr) + else: + del sys.argv[0] # Make the requested module sys.argv[0] + _run_module_as_main(sys.argv[0]) diff --git a/llava/lib/python3.10/sndhdr.py b/llava/lib/python3.10/sndhdr.py new file mode 100644 index 0000000000000000000000000000000000000000..96595c6974468213e0a93414af95f4981bb609c5 --- /dev/null +++ b/llava/lib/python3.10/sndhdr.py @@ -0,0 +1,257 @@ +"""Routines to help recognizing sound files. + +Function whathdr() recognizes various types of sound file headers. +It understands almost all headers that SOX can decode. + +The return tuple contains the following items, in this order: +- file type (as SOX understands it) +- sampling rate (0 if unknown or hard to decode) +- number of channels (0 if unknown or hard to decode) +- number of frames in the file (-1 if unknown or hard to decode) +- number of bits/sample, or 'U' for U-LAW, or 'A' for A-LAW + +If the file doesn't have a recognizable type, it returns None. +If the file can't be opened, OSError is raised. + +To compute the total time, divide the number of frames by the +sampling rate (a frame contains a sample for each channel). + +Function what() calls whathdr(). (It used to also use some +heuristics for raw data, but this doesn't work very well.) + +Finally, the function test() is a simple main program that calls +what() for all files mentioned on the argument list. For directory +arguments it calls what() for all files in that directory. Default +argument is "." (testing all files in the current directory). The +option -r tells it to recurse down directories found inside +explicitly given directories. +""" + +# The file structure is top-down except that the test program and its +# subroutine come last. + +__all__ = ['what', 'whathdr'] + +from collections import namedtuple + +SndHeaders = namedtuple('SndHeaders', + 'filetype framerate nchannels nframes sampwidth') + +SndHeaders.filetype.__doc__ = ("""The value for type indicates the data type +and will be one of the strings 'aifc', 'aiff', 'au','hcom', +'sndr', 'sndt', 'voc', 'wav', '8svx', 'sb', 'ub', or 'ul'.""") +SndHeaders.framerate.__doc__ = ("""The sampling_rate will be either the actual +value or 0 if unknown or difficult to decode.""") +SndHeaders.nchannels.__doc__ = ("""The number of channels or 0 if it cannot be +determined or if the value is difficult to decode.""") +SndHeaders.nframes.__doc__ = ("""The value for frames will be either the number +of frames or -1.""") +SndHeaders.sampwidth.__doc__ = ("""Either the sample size in bits or +'A' for A-LAW or 'U' for u-LAW.""") + +def what(filename): + """Guess the type of a sound file.""" + res = whathdr(filename) + return res + + +def whathdr(filename): + """Recognize sound headers.""" + with open(filename, 'rb') as f: + h = f.read(512) + for tf in tests: + res = tf(h, f) + if res: + return SndHeaders(*res) + return None + + +#-----------------------------------# +# Subroutines per sound header type # +#-----------------------------------# + +tests = [] + +def test_aifc(h, f): + import aifc + if not h.startswith(b'FORM'): + return None + if h[8:12] == b'AIFC': + fmt = 'aifc' + elif h[8:12] == b'AIFF': + fmt = 'aiff' + else: + return None + f.seek(0) + try: + a = aifc.open(f, 'r') + except (EOFError, aifc.Error): + return None + return (fmt, a.getframerate(), a.getnchannels(), + a.getnframes(), 8 * a.getsampwidth()) + +tests.append(test_aifc) + + +def test_au(h, f): + if h.startswith(b'.snd'): + func = get_long_be + elif h[:4] in (b'\0ds.', b'dns.'): + func = get_long_le + else: + return None + filetype = 'au' + hdr_size = func(h[4:8]) + data_size = func(h[8:12]) + encoding = func(h[12:16]) + rate = func(h[16:20]) + nchannels = func(h[20:24]) + sample_size = 1 # default + if encoding == 1: + sample_bits = 'U' + elif encoding == 2: + sample_bits = 8 + elif encoding == 3: + sample_bits = 16 + sample_size = 2 + else: + sample_bits = '?' + frame_size = sample_size * nchannels + if frame_size: + nframe = data_size / frame_size + else: + nframe = -1 + return filetype, rate, nchannels, nframe, sample_bits + +tests.append(test_au) + + +def test_hcom(h, f): + if h[65:69] != b'FSSD' or h[128:132] != b'HCOM': + return None + divisor = get_long_be(h[144:148]) + if divisor: + rate = 22050 / divisor + else: + rate = 0 + return 'hcom', rate, 1, -1, 8 + +tests.append(test_hcom) + + +def test_voc(h, f): + if not h.startswith(b'Creative Voice File\032'): + return None + sbseek = get_short_le(h[20:22]) + rate = 0 + if 0 <= sbseek < 500 and h[sbseek] == 1: + ratecode = 256 - h[sbseek+4] + if ratecode: + rate = int(1000000.0 / ratecode) + return 'voc', rate, 1, -1, 8 + +tests.append(test_voc) + + +def test_wav(h, f): + import wave + # 'RIFF' 'WAVE' 'fmt ' + if not h.startswith(b'RIFF') or h[8:12] != b'WAVE' or h[12:16] != b'fmt ': + return None + f.seek(0) + try: + w = wave.open(f, 'r') + except (EOFError, wave.Error): + return None + return ('wav', w.getframerate(), w.getnchannels(), + w.getnframes(), 8*w.getsampwidth()) + +tests.append(test_wav) + + +def test_8svx(h, f): + if not h.startswith(b'FORM') or h[8:12] != b'8SVX': + return None + # Should decode it to get #channels -- assume always 1 + return '8svx', 0, 1, 0, 8 + +tests.append(test_8svx) + + +def test_sndt(h, f): + if h.startswith(b'SOUND'): + nsamples = get_long_le(h[8:12]) + rate = get_short_le(h[20:22]) + return 'sndt', rate, 1, nsamples, 8 + +tests.append(test_sndt) + + +def test_sndr(h, f): + if h.startswith(b'\0\0'): + rate = get_short_le(h[2:4]) + if 4000 <= rate <= 25000: + return 'sndr', rate, 1, -1, 8 + +tests.append(test_sndr) + + +#-------------------------------------------# +# Subroutines to extract numbers from bytes # +#-------------------------------------------# + +def get_long_be(b): + return (b[0] << 24) | (b[1] << 16) | (b[2] << 8) | b[3] + +def get_long_le(b): + return (b[3] << 24) | (b[2] << 16) | (b[1] << 8) | b[0] + +def get_short_be(b): + return (b[0] << 8) | b[1] + +def get_short_le(b): + return (b[1] << 8) | b[0] + + +#--------------------# +# Small test program # +#--------------------# + +def test(): + import sys + recursive = 0 + if sys.argv[1:] and sys.argv[1] == '-r': + del sys.argv[1:2] + recursive = 1 + try: + if sys.argv[1:]: + testall(sys.argv[1:], recursive, 1) + else: + testall(['.'], recursive, 1) + except KeyboardInterrupt: + sys.stderr.write('\n[Interrupted]\n') + sys.exit(1) + +def testall(list, recursive, toplevel): + import sys + import os + for filename in list: + if os.path.isdir(filename): + print(filename + '/:', end=' ') + if recursive or toplevel: + print('recursing down:') + import glob + names = glob.glob(os.path.join(glob.escape(filename), '*')) + testall(names, recursive, 0) + else: + print('*** directory (use -r) ***') + else: + print(filename + ':', end=' ') + sys.stdout.flush() + try: + print(what(filename)) + except OSError: + print('*** not found ***') + +if __name__ == '__main__': + test() diff --git a/llava/lib/python3.10/socket.py b/llava/lib/python3.10/socket.py new file mode 100644 index 0000000000000000000000000000000000000000..ecaf73cf307cfa65aeca8d0f9e516205814b54d2 --- /dev/null +++ b/llava/lib/python3.10/socket.py @@ -0,0 +1,972 @@ +# Wrapper module for _socket, providing some additional facilities +# implemented in Python. + +"""\ +This module provides socket operations and some related functions. +On Unix, it supports IP (Internet Protocol) and Unix domain sockets. +On other systems, it only supports IP. Functions specific for a +socket are available as methods of the socket object. + +Functions: + +socket() -- create a new socket object +socketpair() -- create a pair of new socket objects [*] +fromfd() -- create a socket object from an open file descriptor [*] +send_fds() -- Send file descriptor to the socket. +recv_fds() -- Recieve file descriptors from the socket. +fromshare() -- create a socket object from data received from socket.share() [*] +gethostname() -- return the current hostname +gethostbyname() -- map a hostname to its IP number +gethostbyaddr() -- map an IP number or hostname to DNS info +getservbyname() -- map a service name and a protocol name to a port number +getprotobyname() -- map a protocol name (e.g. 'tcp') to a number +ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order +htons(), htonl() -- convert 16, 32 bit int from host to network byte order +inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format +inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89) +socket.getdefaulttimeout() -- get the default timeout value +socket.setdefaulttimeout() -- set the default timeout value +create_connection() -- connects to an address, with an optional timeout and + optional source address. + + [*] not available on all platforms! + +Special objects: + +SocketType -- type object for socket objects +error -- exception raised for I/O errors +has_ipv6 -- boolean value indicating if IPv6 is supported + +IntEnum constants: + +AF_INET, AF_UNIX -- socket domains (first argument to socket() call) +SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument) + +Integer constants: + +Many other constants may be defined; these may be used in calls to +the setsockopt() and getsockopt() methods. +""" + +import _socket +from _socket import * + +import os, sys, io, selectors +from enum import IntEnum, IntFlag + +try: + import errno +except ImportError: + errno = None +EBADF = getattr(errno, 'EBADF', 9) +EAGAIN = getattr(errno, 'EAGAIN', 11) +EWOULDBLOCK = getattr(errno, 'EWOULDBLOCK', 11) + +__all__ = ["fromfd", "getfqdn", "create_connection", "create_server", + "has_dualstack_ipv6", "AddressFamily", "SocketKind"] +__all__.extend(os._get_exports_list(_socket)) + +# Set up the socket.AF_* socket.SOCK_* constants as members of IntEnums for +# nicer string representations. +# Note that _socket only knows about the integer values. The public interface +# in this module understands the enums and translates them back from integers +# where needed (e.g. .family property of a socket object). + +IntEnum._convert_( + 'AddressFamily', + __name__, + lambda C: C.isupper() and C.startswith('AF_')) + +IntEnum._convert_( + 'SocketKind', + __name__, + lambda C: C.isupper() and C.startswith('SOCK_')) + +IntFlag._convert_( + 'MsgFlag', + __name__, + lambda C: C.isupper() and C.startswith('MSG_')) + +IntFlag._convert_( + 'AddressInfo', + __name__, + lambda C: C.isupper() and C.startswith('AI_')) + +_LOCALHOST = '127.0.0.1' +_LOCALHOST_V6 = '::1' + + +def _intenum_converter(value, enum_klass): + """Convert a numeric family value to an IntEnum member. + + If it's not a known member, return the numeric value itself. + """ + try: + return enum_klass(value) + except ValueError: + return value + + +# WSA error codes +if sys.platform.lower().startswith("win"): + errorTab = {} + errorTab[6] = "Specified event object handle is invalid." + errorTab[8] = "Insufficient memory available." + errorTab[87] = "One or more parameters are invalid." + errorTab[995] = "Overlapped operation aborted." + errorTab[996] = "Overlapped I/O event object not in signaled state." + errorTab[997] = "Overlapped operation will complete later." + errorTab[10004] = "The operation was interrupted." + errorTab[10009] = "A bad file handle was passed." + errorTab[10013] = "Permission denied." + errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT + errorTab[10022] = "An invalid operation was attempted." + errorTab[10024] = "Too many open files." + errorTab[10035] = "The socket operation would block" + errorTab[10036] = "A blocking operation is already in progress." + errorTab[10037] = "Operation already in progress." + errorTab[10038] = "Socket operation on nonsocket." + errorTab[10039] = "Destination address required." + errorTab[10040] = "Message too long." + errorTab[10041] = "Protocol wrong type for socket." + errorTab[10042] = "Bad protocol option." + errorTab[10043] = "Protocol not supported." + errorTab[10044] = "Socket type not supported." + errorTab[10045] = "Operation not supported." + errorTab[10046] = "Protocol family not supported." + errorTab[10047] = "Address family not supported by protocol family." + errorTab[10048] = "The network address is in use." + errorTab[10049] = "Cannot assign requested address." + errorTab[10050] = "Network is down." + errorTab[10051] = "Network is unreachable." + errorTab[10052] = "Network dropped connection on reset." + errorTab[10053] = "Software caused connection abort." + errorTab[10054] = "The connection has been reset." + errorTab[10055] = "No buffer space available." + errorTab[10056] = "Socket is already connected." + errorTab[10057] = "Socket is not connected." + errorTab[10058] = "The network has been shut down." + errorTab[10059] = "Too many references." + errorTab[10060] = "The operation timed out." + errorTab[10061] = "Connection refused." + errorTab[10062] = "Cannot translate name." + errorTab[10063] = "The name is too long." + errorTab[10064] = "The host is down." + errorTab[10065] = "The host is unreachable." + errorTab[10066] = "Directory not empty." + errorTab[10067] = "Too many processes." + errorTab[10068] = "User quota exceeded." + errorTab[10069] = "Disk quota exceeded." + errorTab[10070] = "Stale file handle reference." + errorTab[10071] = "Item is remote." + errorTab[10091] = "Network subsystem is unavailable." + errorTab[10092] = "Winsock.dll version out of range." + errorTab[10093] = "Successful WSAStartup not yet performed." + errorTab[10101] = "Graceful shutdown in progress." + errorTab[10102] = "No more results from WSALookupServiceNext." + errorTab[10103] = "Call has been canceled." + errorTab[10104] = "Procedure call table is invalid." + errorTab[10105] = "Service provider is invalid." + errorTab[10106] = "Service provider failed to initialize." + errorTab[10107] = "System call failure." + errorTab[10108] = "Service not found." + errorTab[10109] = "Class type not found." + errorTab[10110] = "No more results from WSALookupServiceNext." + errorTab[10111] = "Call was canceled." + errorTab[10112] = "Database query was refused." + errorTab[11001] = "Host not found." + errorTab[11002] = "Nonauthoritative host not found." + errorTab[11003] = "This is a nonrecoverable error." + errorTab[11004] = "Valid name, no data record requested type." + errorTab[11005] = "QoS receivers." + errorTab[11006] = "QoS senders." + errorTab[11007] = "No QoS senders." + errorTab[11008] = "QoS no receivers." + errorTab[11009] = "QoS request confirmed." + errorTab[11010] = "QoS admission error." + errorTab[11011] = "QoS policy failure." + errorTab[11012] = "QoS bad style." + errorTab[11013] = "QoS bad object." + errorTab[11014] = "QoS traffic control error." + errorTab[11015] = "QoS generic error." + errorTab[11016] = "QoS service type error." + errorTab[11017] = "QoS flowspec error." + errorTab[11018] = "Invalid QoS provider buffer." + errorTab[11019] = "Invalid QoS filter style." + errorTab[11020] = "Invalid QoS filter style." + errorTab[11021] = "Incorrect QoS filter count." + errorTab[11022] = "Invalid QoS object length." + errorTab[11023] = "Incorrect QoS flow count." + errorTab[11024] = "Unrecognized QoS object." + errorTab[11025] = "Invalid QoS policy object." + errorTab[11026] = "Invalid QoS flow descriptor." + errorTab[11027] = "Invalid QoS provider-specific flowspec." + errorTab[11028] = "Invalid QoS provider-specific filterspec." + errorTab[11029] = "Invalid QoS shape discard mode object." + errorTab[11030] = "Invalid QoS shaping rate object." + errorTab[11031] = "Reserved policy QoS element type." + __all__.append("errorTab") + + +class _GiveupOnSendfile(Exception): pass + + +class socket(_socket.socket): + + """A subclass of _socket.socket adding the makefile() method.""" + + __slots__ = ["__weakref__", "_io_refs", "_closed"] + + def __init__(self, family=-1, type=-1, proto=-1, fileno=None): + # For user code address family and type values are IntEnum members, but + # for the underlying _socket.socket they're just integers. The + # constructor of _socket.socket converts the given argument to an + # integer automatically. + if fileno is None: + if family == -1: + family = AF_INET + if type == -1: + type = SOCK_STREAM + if proto == -1: + proto = 0 + _socket.socket.__init__(self, family, type, proto, fileno) + self._io_refs = 0 + self._closed = False + + def __enter__(self): + return self + + def __exit__(self, *args): + if not self._closed: + self.close() + + def __repr__(self): + """Wrap __repr__() to reveal the real class name and socket + address(es). + """ + closed = getattr(self, '_closed', False) + s = "<%s.%s%s fd=%i, family=%s, type=%s, proto=%i" \ + % (self.__class__.__module__, + self.__class__.__qualname__, + " [closed]" if closed else "", + self.fileno(), + self.family, + self.type, + self.proto) + if not closed: + try: + laddr = self.getsockname() + if laddr: + s += ", laddr=%s" % str(laddr) + except error: + pass + try: + raddr = self.getpeername() + if raddr: + s += ", raddr=%s" % str(raddr) + except error: + pass + s += '>' + return s + + def __getstate__(self): + raise TypeError(f"cannot pickle {self.__class__.__name__!r} object") + + def dup(self): + """dup() -> socket object + + Duplicate the socket. Return a new socket object connected to the same + system resource. The new socket is non-inheritable. + """ + fd = dup(self.fileno()) + sock = self.__class__(self.family, self.type, self.proto, fileno=fd) + sock.settimeout(self.gettimeout()) + return sock + + def accept(self): + """accept() -> (socket object, address info) + + Wait for an incoming connection. Return a new socket + representing the connection, and the address of the client. + For IP sockets, the address info is a pair (hostaddr, port). + """ + fd, addr = self._accept() + sock = socket(self.family, self.type, self.proto, fileno=fd) + # Issue #7995: if no default timeout is set and the listening + # socket had a (non-zero) timeout, force the new socket in blocking + # mode to override platform-specific socket flags inheritance. + if getdefaulttimeout() is None and self.gettimeout(): + sock.setblocking(True) + return sock, addr + + def makefile(self, mode="r", buffering=None, *, + encoding=None, errors=None, newline=None): + """makefile(...) -> an I/O stream connected to the socket + + The arguments are as for io.open() after the filename, except the only + supported mode values are 'r' (default), 'w' and 'b'. + """ + # XXX refactor to share code? + if not set(mode) <= {"r", "w", "b"}: + raise ValueError("invalid mode %r (only r, w, b allowed)" % (mode,)) + writing = "w" in mode + reading = "r" in mode or not writing + assert reading or writing + binary = "b" in mode + rawmode = "" + if reading: + rawmode += "r" + if writing: + rawmode += "w" + raw = SocketIO(self, rawmode) + self._io_refs += 1 + if buffering is None: + buffering = -1 + if buffering < 0: + buffering = io.DEFAULT_BUFFER_SIZE + if buffering == 0: + if not binary: + raise ValueError("unbuffered streams must be binary") + return raw + if reading and writing: + buffer = io.BufferedRWPair(raw, raw, buffering) + elif reading: + buffer = io.BufferedReader(raw, buffering) + else: + assert writing + buffer = io.BufferedWriter(raw, buffering) + if binary: + return buffer + encoding = io.text_encoding(encoding) + text = io.TextIOWrapper(buffer, encoding, errors, newline) + text.mode = mode + return text + + if hasattr(os, 'sendfile'): + + def _sendfile_use_sendfile(self, file, offset=0, count=None): + self._check_sendfile_params(file, offset, count) + sockno = self.fileno() + try: + fileno = file.fileno() + except (AttributeError, io.UnsupportedOperation) as err: + raise _GiveupOnSendfile(err) # not a regular file + try: + fsize = os.fstat(fileno).st_size + except OSError as err: + raise _GiveupOnSendfile(err) # not a regular file + if not fsize: + return 0 # empty file + # Truncate to 1GiB to avoid OverflowError, see bpo-38319. + blocksize = min(count or fsize, 2 ** 30) + timeout = self.gettimeout() + if timeout == 0: + raise ValueError("non-blocking sockets are not supported") + # poll/select have the advantage of not requiring any + # extra file descriptor, contrarily to epoll/kqueue + # (also, they require a single syscall). + if hasattr(selectors, 'PollSelector'): + selector = selectors.PollSelector() + else: + selector = selectors.SelectSelector() + selector.register(sockno, selectors.EVENT_WRITE) + + total_sent = 0 + # localize variable access to minimize overhead + selector_select = selector.select + os_sendfile = os.sendfile + try: + while True: + if timeout and not selector_select(timeout): + raise TimeoutError('timed out') + if count: + blocksize = count - total_sent + if blocksize <= 0: + break + try: + sent = os_sendfile(sockno, fileno, offset, blocksize) + except BlockingIOError: + if not timeout: + # Block until the socket is ready to send some + # data; avoids hogging CPU resources. + selector_select() + continue + except OSError as err: + if total_sent == 0: + # We can get here for different reasons, the main + # one being 'file' is not a regular mmap(2)-like + # file, in which case we'll fall back on using + # plain send(). + raise _GiveupOnSendfile(err) + raise err from None + else: + if sent == 0: + break # EOF + offset += sent + total_sent += sent + return total_sent + finally: + if total_sent > 0 and hasattr(file, 'seek'): + file.seek(offset) + else: + def _sendfile_use_sendfile(self, file, offset=0, count=None): + raise _GiveupOnSendfile( + "os.sendfile() not available on this platform") + + def _sendfile_use_send(self, file, offset=0, count=None): + self._check_sendfile_params(file, offset, count) + if self.gettimeout() == 0: + raise ValueError("non-blocking sockets are not supported") + if offset: + file.seek(offset) + blocksize = min(count, 8192) if count else 8192 + total_sent = 0 + # localize variable access to minimize overhead + file_read = file.read + sock_send = self.send + try: + while True: + if count: + blocksize = min(count - total_sent, blocksize) + if blocksize <= 0: + break + data = memoryview(file_read(blocksize)) + if not data: + break # EOF + while True: + try: + sent = sock_send(data) + except BlockingIOError: + continue + else: + total_sent += sent + if sent < len(data): + data = data[sent:] + else: + break + return total_sent + finally: + if total_sent > 0 and hasattr(file, 'seek'): + file.seek(offset + total_sent) + + def _check_sendfile_params(self, file, offset, count): + if 'b' not in getattr(file, 'mode', 'b'): + raise ValueError("file should be opened in binary mode") + if not self.type & SOCK_STREAM: + raise ValueError("only SOCK_STREAM type sockets are supported") + if count is not None: + if not isinstance(count, int): + raise TypeError( + "count must be a positive integer (got {!r})".format(count)) + if count <= 0: + raise ValueError( + "count must be a positive integer (got {!r})".format(count)) + + def sendfile(self, file, offset=0, count=None): + """sendfile(file[, offset[, count]]) -> sent + + Send a file until EOF is reached by using high-performance + os.sendfile() and return the total number of bytes which + were sent. + *file* must be a regular file object opened in binary mode. + If os.sendfile() is not available (e.g. Windows) or file is + not a regular file socket.send() will be used instead. + *offset* tells from where to start reading the file. + If specified, *count* is the total number of bytes to transmit + as opposed to sending the file until EOF is reached. + File position is updated on return or also in case of error in + which case file.tell() can be used to figure out the number of + bytes which were sent. + The socket must be of SOCK_STREAM type. + Non-blocking sockets are not supported. + """ + try: + return self._sendfile_use_sendfile(file, offset, count) + except _GiveupOnSendfile: + return self._sendfile_use_send(file, offset, count) + + def _decref_socketios(self): + if self._io_refs > 0: + self._io_refs -= 1 + if self._closed: + self.close() + + def _real_close(self, _ss=_socket.socket): + # This function should not reference any globals. See issue #808164. + _ss.close(self) + + def close(self): + # This function should not reference any globals. See issue #808164. + self._closed = True + if self._io_refs <= 0: + self._real_close() + + def detach(self): + """detach() -> file descriptor + + Close the socket object without closing the underlying file descriptor. + The object cannot be used after this call, but the file descriptor + can be reused for other purposes. The file descriptor is returned. + """ + self._closed = True + return super().detach() + + @property + def family(self): + """Read-only access to the address family for this socket. + """ + return _intenum_converter(super().family, AddressFamily) + + @property + def type(self): + """Read-only access to the socket type. + """ + return _intenum_converter(super().type, SocketKind) + + if os.name == 'nt': + def get_inheritable(self): + return os.get_handle_inheritable(self.fileno()) + def set_inheritable(self, inheritable): + os.set_handle_inheritable(self.fileno(), inheritable) + else: + def get_inheritable(self): + return os.get_inheritable(self.fileno()) + def set_inheritable(self, inheritable): + os.set_inheritable(self.fileno(), inheritable) + get_inheritable.__doc__ = "Get the inheritable flag of the socket" + set_inheritable.__doc__ = "Set the inheritable flag of the socket" + +def fromfd(fd, family, type, proto=0): + """ fromfd(fd, family, type[, proto]) -> socket object + + Create a socket object from a duplicate of the given file + descriptor. The remaining arguments are the same as for socket(). + """ + nfd = dup(fd) + return socket(family, type, proto, nfd) + +if hasattr(_socket.socket, "sendmsg"): + import array + + def send_fds(sock, buffers, fds, flags=0, address=None): + """ send_fds(sock, buffers, fds[, flags[, address]]) -> integer + + Send the list of file descriptors fds over an AF_UNIX socket. + """ + return sock.sendmsg(buffers, [(_socket.SOL_SOCKET, + _socket.SCM_RIGHTS, array.array("i", fds))]) + __all__.append("send_fds") + +if hasattr(_socket.socket, "recvmsg"): + import array + + def recv_fds(sock, bufsize, maxfds, flags=0): + """ recv_fds(sock, bufsize, maxfds[, flags]) -> (data, list of file + descriptors, msg_flags, address) + + Receive up to maxfds file descriptors returning the message + data and a list containing the descriptors. + """ + # Array of ints + fds = array.array("i") + msg, ancdata, flags, addr = sock.recvmsg(bufsize, + _socket.CMSG_LEN(maxfds * fds.itemsize)) + for cmsg_level, cmsg_type, cmsg_data in ancdata: + if (cmsg_level == _socket.SOL_SOCKET and cmsg_type == _socket.SCM_RIGHTS): + fds.frombytes(cmsg_data[: + len(cmsg_data) - (len(cmsg_data) % fds.itemsize)]) + + return msg, list(fds), flags, addr + __all__.append("recv_fds") + +if hasattr(_socket.socket, "share"): + def fromshare(info): + """ fromshare(info) -> socket object + + Create a socket object from the bytes object returned by + socket.share(pid). + """ + return socket(0, 0, 0, info) + __all__.append("fromshare") + +# Origin: https://gist.github.com/4325783, by Geert Jansen. Public domain. +# This is used if _socket doesn't natively provide socketpair. It's +# always defined so that it can be patched in for testing purposes. +def _fallback_socketpair(family=AF_INET, type=SOCK_STREAM, proto=0): + if family == AF_INET: + host = _LOCALHOST + elif family == AF_INET6: + host = _LOCALHOST_V6 + else: + raise ValueError("Only AF_INET and AF_INET6 socket address families " + "are supported") + if type != SOCK_STREAM: + raise ValueError("Only SOCK_STREAM socket type is supported") + if proto != 0: + raise ValueError("Only protocol zero is supported") + + # We create a connected TCP socket. Note the trick with + # setblocking(False) that prevents us from having to create a thread. + lsock = socket(family, type, proto) + try: + lsock.bind((host, 0)) + lsock.listen() + # On IPv6, ignore flow_info and scope_id + addr, port = lsock.getsockname()[:2] + csock = socket(family, type, proto) + try: + csock.setblocking(False) + try: + csock.connect((addr, port)) + except (BlockingIOError, InterruptedError): + pass + csock.setblocking(True) + ssock, _ = lsock.accept() + except: + csock.close() + raise + finally: + lsock.close() + + # Authenticating avoids using a connection from something else + # able to connect to {host}:{port} instead of us. + # We expect only AF_INET and AF_INET6 families. + try: + if ( + ssock.getsockname() != csock.getpeername() + or csock.getsockname() != ssock.getpeername() + ): + raise ConnectionError("Unexpected peer connection") + except: + # getsockname() and getpeername() can fail + # if either socket isn't connected. + ssock.close() + csock.close() + raise + + return (ssock, csock) + +if hasattr(_socket, "socketpair"): + def socketpair(family=None, type=SOCK_STREAM, proto=0): + if family is None: + try: + family = AF_UNIX + except NameError: + family = AF_INET + a, b = _socket.socketpair(family, type, proto) + a = socket(family, type, proto, a.detach()) + b = socket(family, type, proto, b.detach()) + return a, b + +else: + socketpair = _fallback_socketpair + __all__.append("socketpair") + +socketpair.__doc__ = """socketpair([family[, type[, proto]]]) -> (socket object, socket object) +Create a pair of socket objects from the sockets returned by the platform +socketpair() function. +The arguments are the same as for socket() except the default family is AF_UNIX +if defined on the platform; otherwise, the default is AF_INET. +""" + +_blocking_errnos = { EAGAIN, EWOULDBLOCK } + +class SocketIO(io.RawIOBase): + + """Raw I/O implementation for stream sockets. + + This class supports the makefile() method on sockets. It provides + the raw I/O interface on top of a socket object. + """ + + # One might wonder why not let FileIO do the job instead. There are two + # main reasons why FileIO is not adapted: + # - it wouldn't work under Windows (where you can't used read() and + # write() on a socket handle) + # - it wouldn't work with socket timeouts (FileIO would ignore the + # timeout and consider the socket non-blocking) + + # XXX More docs + + def __init__(self, sock, mode): + if mode not in ("r", "w", "rw", "rb", "wb", "rwb"): + raise ValueError("invalid mode: %r" % mode) + io.RawIOBase.__init__(self) + self._sock = sock + if "b" not in mode: + mode += "b" + self._mode = mode + self._reading = "r" in mode + self._writing = "w" in mode + self._timeout_occurred = False + + def readinto(self, b): + """Read up to len(b) bytes into the writable buffer *b* and return + the number of bytes read. If the socket is non-blocking and no bytes + are available, None is returned. + + If *b* is non-empty, a 0 return value indicates that the connection + was shutdown at the other end. + """ + self._checkClosed() + self._checkReadable() + if self._timeout_occurred: + raise OSError("cannot read from timed out object") + while True: + try: + return self._sock.recv_into(b) + except timeout: + self._timeout_occurred = True + raise + except error as e: + if e.errno in _blocking_errnos: + return None + raise + + def write(self, b): + """Write the given bytes or bytearray object *b* to the socket + and return the number of bytes written. This can be less than + len(b) if not all data could be written. If the socket is + non-blocking and no bytes could be written None is returned. + """ + self._checkClosed() + self._checkWritable() + try: + return self._sock.send(b) + except error as e: + # XXX what about EINTR? + if e.errno in _blocking_errnos: + return None + raise + + def readable(self): + """True if the SocketIO is open for reading. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._reading + + def writable(self): + """True if the SocketIO is open for writing. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return self._writing + + def seekable(self): + """True if the SocketIO is open for seeking. + """ + if self.closed: + raise ValueError("I/O operation on closed socket.") + return super().seekable() + + def fileno(self): + """Return the file descriptor of the underlying socket. + """ + self._checkClosed() + return self._sock.fileno() + + @property + def name(self): + if not self.closed: + return self.fileno() + else: + return -1 + + @property + def mode(self): + return self._mode + + def close(self): + """Close the SocketIO object. This doesn't close the underlying + socket, except if all references to it have disappeared. + """ + if self.closed: + return + io.RawIOBase.close(self) + self._sock._decref_socketios() + self._sock = None + + +def getfqdn(name=''): + """Get fully qualified domain name from name. + + An empty argument is interpreted as meaning the local host. + + First the hostname returned by gethostbyaddr() is checked, then + possibly existing aliases. In case no FQDN is available and `name` + was given, it is returned unchanged. If `name` was empty, '0.0.0.0' or '::', + hostname from gethostname() is returned. + """ + name = name.strip() + if not name or name in ('0.0.0.0', '::'): + name = gethostname() + try: + hostname, aliases, ipaddrs = gethostbyaddr(name) + except error: + pass + else: + aliases.insert(0, hostname) + for name in aliases: + if '.' in name: + break + else: + name = hostname + return name + + +_GLOBAL_DEFAULT_TIMEOUT = object() + +def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT, + source_address=None): + """Connect to *address* and return the socket object. + + Convenience function. Connect to *address* (a 2-tuple ``(host, + port)``) and return the socket object. Passing the optional + *timeout* parameter will set the timeout on the socket instance + before attempting to connect. If no *timeout* is supplied, the + global default timeout setting returned by :func:`getdefaulttimeout` + is used. If *source_address* is set it must be a tuple of (host, port) + for the socket to bind as a source address before making the connection. + A host of '' or port 0 tells the OS to use the default. + """ + + host, port = address + err = None + for res in getaddrinfo(host, port, 0, SOCK_STREAM): + af, socktype, proto, canonname, sa = res + sock = None + try: + sock = socket(af, socktype, proto) + if timeout is not _GLOBAL_DEFAULT_TIMEOUT: + sock.settimeout(timeout) + if source_address: + sock.bind(source_address) + sock.connect(sa) + # Break explicitly a reference cycle + err = None + return sock + + except error as _: + err = _ + if sock is not None: + sock.close() + + if err is not None: + try: + raise err + finally: + # Break explicitly a reference cycle + err = None + else: + raise error("getaddrinfo returns an empty list") + + +def has_dualstack_ipv6(): + """Return True if the platform supports creating a SOCK_STREAM socket + which can handle both AF_INET and AF_INET6 (IPv4 / IPv6) connections. + """ + if not has_ipv6 \ + or not hasattr(_socket, 'IPPROTO_IPV6') \ + or not hasattr(_socket, 'IPV6_V6ONLY'): + return False + try: + with socket(AF_INET6, SOCK_STREAM) as sock: + sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) + return True + except error: + return False + + +def create_server(address, *, family=AF_INET, backlog=None, reuse_port=False, + dualstack_ipv6=False): + """Convenience function which creates a SOCK_STREAM type socket + bound to *address* (a 2-tuple (host, port)) and return the socket + object. + + *family* should be either AF_INET or AF_INET6. + *backlog* is the queue size passed to socket.listen(). + *reuse_port* dictates whether to use the SO_REUSEPORT socket option. + *dualstack_ipv6*: if true and the platform supports it, it will + create an AF_INET6 socket able to accept both IPv4 or IPv6 + connections. When false it will explicitly disable this option on + platforms that enable it by default (e.g. Linux). + + >>> with create_server(('', 8000)) as server: + ... while True: + ... conn, addr = server.accept() + ... # handle new connection + """ + if reuse_port and not hasattr(_socket, "SO_REUSEPORT"): + raise ValueError("SO_REUSEPORT not supported on this platform") + if dualstack_ipv6: + if not has_dualstack_ipv6(): + raise ValueError("dualstack_ipv6 not supported on this platform") + if family != AF_INET6: + raise ValueError("dualstack_ipv6 requires AF_INET6 family") + sock = socket(family, SOCK_STREAM) + try: + # Note about Windows. We don't set SO_REUSEADDR because: + # 1) It's unnecessary: bind() will succeed even in case of a + # previous closed socket on the same address and still in + # TIME_WAIT state. + # 2) If set, another socket is free to bind() on the same + # address, effectively preventing this one from accepting + # connections. Also, it may set the process in a state where + # it'll no longer respond to any signals or graceful kills. + # See: https://learn.microsoft.com/windows/win32/winsock/using-so-reuseaddr-and-so-exclusiveaddruse + if os.name not in ('nt', 'cygwin') and \ + hasattr(_socket, 'SO_REUSEADDR'): + try: + sock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) + except error: + # Fail later on bind(), for platforms which may not + # support this option. + pass + if reuse_port: + sock.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1) + if has_ipv6 and family == AF_INET6: + if dualstack_ipv6: + sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 0) + elif hasattr(_socket, "IPV6_V6ONLY") and \ + hasattr(_socket, "IPPROTO_IPV6"): + sock.setsockopt(IPPROTO_IPV6, IPV6_V6ONLY, 1) + try: + sock.bind(address) + except error as err: + msg = '%s (while attempting to bind on address %r)' % \ + (err.strerror, address) + raise error(err.errno, msg) from None + if backlog is None: + sock.listen() + else: + sock.listen(backlog) + return sock + except error: + sock.close() + raise + + +def getaddrinfo(host, port, family=0, type=0, proto=0, flags=0): + """Resolve host and port into list of address info entries. + + Translate the host/port argument into a sequence of 5-tuples that contain + all the necessary arguments for creating a socket connected to that service. + host is a domain name, a string representation of an IPv4/v6 address or + None. port is a string service name such as 'http', a numeric port number or + None. By passing None as the value of host and port, you can pass NULL to + the underlying C API. + + The family, type and proto arguments can be optionally specified in order to + narrow the list of addresses returned. Passing zero as a value for each of + these arguments selects the full range of results. + """ + # We override this function since we want to translate the numeric family + # and socket type values to enum constants. + addrlist = [] + for res in _socket.getaddrinfo(host, port, family, type, proto, flags): + af, socktype, proto, canonname, sa = res + addrlist.append((_intenum_converter(af, AddressFamily), + _intenum_converter(socktype, SocketKind), + proto, canonname, sa)) + return addrlist diff --git a/llava/lib/python3.10/sre_compile.py b/llava/lib/python3.10/sre_compile.py new file mode 100644 index 0000000000000000000000000000000000000000..aed752d11d2e5e947a8557d48f5fb9b6854f4338 --- /dev/null +++ b/llava/lib/python3.10/sre_compile.py @@ -0,0 +1,808 @@ +# +# Secret Labs' Regular Expression Engine +# +# convert template to internal format +# +# Copyright (c) 1997-2001 by Secret Labs AB. All rights reserved. +# +# See the sre.py file for information on usage and redistribution. +# + +"""Internal support module for sre""" + +import _sre +import sre_parse +from sre_constants import * + +assert _sre.MAGIC == MAGIC, "SRE module mismatch" + +_LITERAL_CODES = {LITERAL, NOT_LITERAL} +_REPEATING_CODES = {REPEAT, MIN_REPEAT, MAX_REPEAT} +_SUCCESS_CODES = {SUCCESS, FAILURE} +_ASSERT_CODES = {ASSERT, ASSERT_NOT} +_UNIT_CODES = _LITERAL_CODES | {ANY, IN} + +# Sets of lowercase characters which have the same uppercase. +_equivalences = ( + # LATIN SMALL LETTER I, LATIN SMALL LETTER DOTLESS I + (0x69, 0x131), # iı + # LATIN SMALL LETTER S, LATIN SMALL LETTER LONG S + (0x73, 0x17f), # sſ + # MICRO SIGN, GREEK SMALL LETTER MU + (0xb5, 0x3bc), # µμ + # COMBINING GREEK YPOGEGRAMMENI, GREEK SMALL LETTER IOTA, GREEK PROSGEGRAMMENI + (0x345, 0x3b9, 0x1fbe), # \u0345ιι + # GREEK SMALL LETTER IOTA WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER IOTA WITH DIALYTIKA AND OXIA + (0x390, 0x1fd3), # ΐΐ + # GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND TONOS, GREEK SMALL LETTER UPSILON WITH DIALYTIKA AND OXIA + (0x3b0, 0x1fe3), # ΰΰ + # GREEK SMALL LETTER BETA, GREEK BETA SYMBOL + (0x3b2, 0x3d0), # βϐ + # GREEK SMALL LETTER EPSILON, GREEK LUNATE EPSILON SYMBOL + (0x3b5, 0x3f5), # εϵ + # GREEK SMALL LETTER THETA, GREEK THETA SYMBOL + (0x3b8, 0x3d1), # θϑ + # GREEK SMALL LETTER KAPPA, GREEK KAPPA SYMBOL + (0x3ba, 0x3f0), # κϰ + # GREEK SMALL LETTER PI, GREEK PI SYMBOL + (0x3c0, 0x3d6), # πϖ + # GREEK SMALL LETTER RHO, GREEK RHO SYMBOL + (0x3c1, 0x3f1), # ρϱ + # GREEK SMALL LETTER FINAL SIGMA, GREEK SMALL LETTER SIGMA + (0x3c2, 0x3c3), # ςσ + # GREEK SMALL LETTER PHI, GREEK PHI SYMBOL + (0x3c6, 0x3d5), # φϕ + # CYRILLIC SMALL LETTER VE, CYRILLIC SMALL LETTER ROUNDED VE + (0x432, 0x1c80), # вᲀ + # CYRILLIC SMALL LETTER DE, CYRILLIC SMALL LETTER LONG-LEGGED DE + (0x434, 0x1c81), # дᲁ + # CYRILLIC SMALL LETTER O, CYRILLIC SMALL LETTER NARROW O + (0x43e, 0x1c82), # оᲂ + # CYRILLIC SMALL LETTER ES, CYRILLIC SMALL LETTER WIDE ES + (0x441, 0x1c83), # сᲃ + # CYRILLIC SMALL LETTER TE, CYRILLIC SMALL LETTER TALL TE, CYRILLIC SMALL LETTER THREE-LEGGED TE + (0x442, 0x1c84, 0x1c85), # тᲄᲅ + # CYRILLIC SMALL LETTER HARD SIGN, CYRILLIC SMALL LETTER TALL HARD SIGN + (0x44a, 0x1c86), # ъᲆ + # CYRILLIC SMALL LETTER YAT, CYRILLIC SMALL LETTER TALL YAT + (0x463, 0x1c87), # ѣᲇ + # CYRILLIC SMALL LETTER UNBLENDED UK, CYRILLIC SMALL LETTER MONOGRAPH UK + (0x1c88, 0xa64b), # ᲈꙋ + # LATIN SMALL LETTER S WITH DOT ABOVE, LATIN SMALL LETTER LONG S WITH DOT ABOVE + (0x1e61, 0x1e9b), # ṡẛ + # LATIN SMALL LIGATURE LONG S T, LATIN SMALL LIGATURE ST + (0xfb05, 0xfb06), # ſtst +) + +# Maps the lowercase code to lowercase codes which have the same uppercase. +_ignorecase_fixes = {i: tuple(j for j in t if i != j) + for t in _equivalences for i in t} + +def _combine_flags(flags, add_flags, del_flags, + TYPE_FLAGS=sre_parse.TYPE_FLAGS): + if add_flags & TYPE_FLAGS: + flags &= ~TYPE_FLAGS + return (flags | add_flags) & ~del_flags + +def _compile(code, pattern, flags): + # internal: compile a (sub)pattern + emit = code.append + _len = len + LITERAL_CODES = _LITERAL_CODES + REPEATING_CODES = _REPEATING_CODES + SUCCESS_CODES = _SUCCESS_CODES + ASSERT_CODES = _ASSERT_CODES + iscased = None + tolower = None + fixes = None + if flags & SRE_FLAG_IGNORECASE and not flags & SRE_FLAG_LOCALE: + if flags & SRE_FLAG_UNICODE: + iscased = _sre.unicode_iscased + tolower = _sre.unicode_tolower + fixes = _ignorecase_fixes + else: + iscased = _sre.ascii_iscased + tolower = _sre.ascii_tolower + for op, av in pattern: + if op in LITERAL_CODES: + if not flags & SRE_FLAG_IGNORECASE: + emit(op) + emit(av) + elif flags & SRE_FLAG_LOCALE: + emit(OP_LOCALE_IGNORE[op]) + emit(av) + elif not iscased(av): + emit(op) + emit(av) + else: + lo = tolower(av) + if not fixes: # ascii + emit(OP_IGNORE[op]) + emit(lo) + elif lo not in fixes: + emit(OP_UNICODE_IGNORE[op]) + emit(lo) + else: + emit(IN_UNI_IGNORE) + skip = _len(code); emit(0) + if op is NOT_LITERAL: + emit(NEGATE) + for k in (lo,) + fixes[lo]: + emit(LITERAL) + emit(k) + emit(FAILURE) + code[skip] = _len(code) - skip + elif op is IN: + charset, hascased = _optimize_charset(av, iscased, tolower, fixes) + if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: + emit(IN_LOC_IGNORE) + elif not hascased: + emit(IN) + elif not fixes: # ascii + emit(IN_IGNORE) + else: + emit(IN_UNI_IGNORE) + skip = _len(code); emit(0) + _compile_charset(charset, flags, code) + code[skip] = _len(code) - skip + elif op is ANY: + if flags & SRE_FLAG_DOTALL: + emit(ANY_ALL) + else: + emit(ANY) + elif op in REPEATING_CODES: + if flags & SRE_FLAG_TEMPLATE: + raise error("internal: unsupported template operator %r" % (op,)) + if _simple(av[2]): + if op is MAX_REPEAT: + emit(REPEAT_ONE) + else: + emit(MIN_REPEAT_ONE) + skip = _len(code); emit(0) + emit(av[0]) + emit(av[1]) + _compile(code, av[2], flags) + emit(SUCCESS) + code[skip] = _len(code) - skip + else: + emit(REPEAT) + skip = _len(code); emit(0) + emit(av[0]) + emit(av[1]) + _compile(code, av[2], flags) + code[skip] = _len(code) - skip + if op is MAX_REPEAT: + emit(MAX_UNTIL) + else: + emit(MIN_UNTIL) + elif op is SUBPATTERN: + group, add_flags, del_flags, p = av + if group: + emit(MARK) + emit((group-1)*2) + # _compile_info(code, p, _combine_flags(flags, add_flags, del_flags)) + _compile(code, p, _combine_flags(flags, add_flags, del_flags)) + if group: + emit(MARK) + emit((group-1)*2+1) + elif op in SUCCESS_CODES: + emit(op) + elif op in ASSERT_CODES: + emit(op) + skip = _len(code); emit(0) + if av[0] >= 0: + emit(0) # look ahead + else: + lo, hi = av[1].getwidth() + if lo != hi: + raise error("look-behind requires fixed-width pattern") + emit(lo) # look behind + _compile(code, av[1], flags) + emit(SUCCESS) + code[skip] = _len(code) - skip + elif op is CALL: + emit(op) + skip = _len(code); emit(0) + _compile(code, av, flags) + emit(SUCCESS) + code[skip] = _len(code) - skip + elif op is AT: + emit(op) + if flags & SRE_FLAG_MULTILINE: + av = AT_MULTILINE.get(av, av) + if flags & SRE_FLAG_LOCALE: + av = AT_LOCALE.get(av, av) + elif flags & SRE_FLAG_UNICODE: + av = AT_UNICODE.get(av, av) + emit(av) + elif op is BRANCH: + emit(op) + tail = [] + tailappend = tail.append + for av in av[1]: + skip = _len(code); emit(0) + # _compile_info(code, av, flags) + _compile(code, av, flags) + emit(JUMP) + tailappend(_len(code)); emit(0) + code[skip] = _len(code) - skip + emit(FAILURE) # end of branch + for tail in tail: + code[tail] = _len(code) - tail + elif op is CATEGORY: + emit(op) + if flags & SRE_FLAG_LOCALE: + av = CH_LOCALE[av] + elif flags & SRE_FLAG_UNICODE: + av = CH_UNICODE[av] + emit(av) + elif op is GROUPREF: + if not flags & SRE_FLAG_IGNORECASE: + emit(op) + elif flags & SRE_FLAG_LOCALE: + emit(GROUPREF_LOC_IGNORE) + elif not fixes: # ascii + emit(GROUPREF_IGNORE) + else: + emit(GROUPREF_UNI_IGNORE) + emit(av-1) + elif op is GROUPREF_EXISTS: + emit(op) + emit(av[0]-1) + skipyes = _len(code); emit(0) + _compile(code, av[1], flags) + if av[2]: + emit(JUMP) + skipno = _len(code); emit(0) + code[skipyes] = _len(code) - skipyes + 1 + _compile(code, av[2], flags) + code[skipno] = _len(code) - skipno + else: + code[skipyes] = _len(code) - skipyes + 1 + else: + raise error("internal: unsupported operand type %r" % (op,)) + +def _compile_charset(charset, flags, code): + # compile charset subprogram + emit = code.append + for op, av in charset: + emit(op) + if op is NEGATE: + pass + elif op is LITERAL: + emit(av) + elif op is RANGE or op is RANGE_UNI_IGNORE: + emit(av[0]) + emit(av[1]) + elif op is CHARSET: + code.extend(av) + elif op is BIGCHARSET: + code.extend(av) + elif op is CATEGORY: + if flags & SRE_FLAG_LOCALE: + emit(CH_LOCALE[av]) + elif flags & SRE_FLAG_UNICODE: + emit(CH_UNICODE[av]) + else: + emit(av) + else: + raise error("internal: unsupported set operator %r" % (op,)) + emit(FAILURE) + +def _optimize_charset(charset, iscased=None, fixup=None, fixes=None): + # internal: optimize character set + out = [] + tail = [] + charmap = bytearray(256) + hascased = False + for op, av in charset: + while True: + try: + if op is LITERAL: + if fixup: + lo = fixup(av) + charmap[lo] = 1 + if fixes and lo in fixes: + for k in fixes[lo]: + charmap[k] = 1 + if not hascased and iscased(av): + hascased = True + else: + charmap[av] = 1 + elif op is RANGE: + r = range(av[0], av[1]+1) + if fixup: + if fixes: + for i in map(fixup, r): + charmap[i] = 1 + if i in fixes: + for k in fixes[i]: + charmap[k] = 1 + else: + for i in map(fixup, r): + charmap[i] = 1 + if not hascased: + hascased = any(map(iscased, r)) + else: + for i in r: + charmap[i] = 1 + elif op is NEGATE: + out.append((op, av)) + else: + tail.append((op, av)) + except IndexError: + if len(charmap) == 256: + # character set contains non-UCS1 character codes + charmap += b'\0' * 0xff00 + continue + # Character set contains non-BMP character codes. + # For range, all BMP characters in the range are already + # proceeded. + if fixup: + hascased = True + # For now, IN_UNI_IGNORE+LITERAL and + # IN_UNI_IGNORE+RANGE_UNI_IGNORE work for all non-BMP + # characters, because two characters (at least one of + # which is not in the BMP) match case-insensitively + # if and only if: + # 1) c1.lower() == c2.lower() + # 2) c1.lower() == c2 or c1.lower().upper() == c2 + # Also, both c.lower() and c.lower().upper() are single + # characters for every non-BMP character. + if op is RANGE: + op = RANGE_UNI_IGNORE + tail.append((op, av)) + break + + # compress character map + runs = [] + q = 0 + while True: + p = charmap.find(1, q) + if p < 0: + break + if len(runs) >= 2: + runs = None + break + q = charmap.find(0, p) + if q < 0: + runs.append((p, len(charmap))) + break + runs.append((p, q)) + if runs is not None: + # use literal/range + for p, q in runs: + if q - p == 1: + out.append((LITERAL, p)) + else: + out.append((RANGE, (p, q - 1))) + out += tail + # if the case was changed or new representation is more compact + if hascased or len(out) < len(charset): + return out, hascased + # else original character set is good enough + return charset, hascased + + # use bitmap + if len(charmap) == 256: + data = _mk_bitmap(charmap) + out.append((CHARSET, data)) + out += tail + return out, hascased + + # To represent a big charset, first a bitmap of all characters in the + # set is constructed. Then, this bitmap is sliced into chunks of 256 + # characters, duplicate chunks are eliminated, and each chunk is + # given a number. In the compiled expression, the charset is + # represented by a 32-bit word sequence, consisting of one word for + # the number of different chunks, a sequence of 256 bytes (64 words) + # of chunk numbers indexed by their original chunk position, and a + # sequence of 256-bit chunks (8 words each). + + # Compression is normally good: in a typical charset, large ranges of + # Unicode will be either completely excluded (e.g. if only cyrillic + # letters are to be matched), or completely included (e.g. if large + # subranges of Kanji match). These ranges will be represented by + # chunks of all one-bits or all zero-bits. + + # Matching can be also done efficiently: the more significant byte of + # the Unicode character is an index into the chunk number, and the + # less significant byte is a bit index in the chunk (just like the + # CHARSET matching). + + charmap = bytes(charmap) # should be hashable + comps = {} + mapping = bytearray(256) + block = 0 + data = bytearray() + for i in range(0, 65536, 256): + chunk = charmap[i: i + 256] + if chunk in comps: + mapping[i // 256] = comps[chunk] + else: + mapping[i // 256] = comps[chunk] = block + block += 1 + data += chunk + data = _mk_bitmap(data) + data[0:0] = [block] + _bytes_to_codes(mapping) + out.append((BIGCHARSET, data)) + out += tail + return out, hascased + +_CODEBITS = _sre.CODESIZE * 8 +MAXCODE = (1 << _CODEBITS) - 1 +_BITS_TRANS = b'0' + b'1' * 255 +def _mk_bitmap(bits, _CODEBITS=_CODEBITS, _int=int): + s = bits.translate(_BITS_TRANS)[::-1] + return [_int(s[i - _CODEBITS: i], 2) + for i in range(len(s), 0, -_CODEBITS)] + +def _bytes_to_codes(b): + # Convert block indices to word array + a = memoryview(b).cast('I') + assert a.itemsize == _sre.CODESIZE + assert len(a) * a.itemsize == len(b) + return a.tolist() + +def _simple(p): + # check if this subpattern is a "simple" operator + if len(p) != 1: + return False + op, av = p[0] + if op is SUBPATTERN: + return av[0] is None and _simple(av[-1]) + return op in _UNIT_CODES + +def _generate_overlap_table(prefix): + """ + Generate an overlap table for the following prefix. + An overlap table is a table of the same size as the prefix which + informs about the potential self-overlap for each index in the prefix: + - if overlap[i] == 0, prefix[i:] can't overlap prefix[0:...] + - if overlap[i] == k with 0 < k <= i, prefix[i-k+1:i+1] overlaps with + prefix[0:k] + """ + table = [0] * len(prefix) + for i in range(1, len(prefix)): + idx = table[i - 1] + while prefix[i] != prefix[idx]: + if idx == 0: + table[i] = 0 + break + idx = table[idx - 1] + else: + table[i] = idx + 1 + return table + +def _get_iscased(flags): + if not flags & SRE_FLAG_IGNORECASE: + return None + elif flags & SRE_FLAG_UNICODE: + return _sre.unicode_iscased + else: + return _sre.ascii_iscased + +def _get_literal_prefix(pattern, flags): + # look for literal prefix + prefix = [] + prefixappend = prefix.append + prefix_skip = None + iscased = _get_iscased(flags) + for op, av in pattern.data: + if op is LITERAL: + if iscased and iscased(av): + break + prefixappend(av) + elif op is SUBPATTERN: + group, add_flags, del_flags, p = av + flags1 = _combine_flags(flags, add_flags, del_flags) + if flags1 & SRE_FLAG_IGNORECASE and flags1 & SRE_FLAG_LOCALE: + break + prefix1, prefix_skip1, got_all = _get_literal_prefix(p, flags1) + if prefix_skip is None: + if group is not None: + prefix_skip = len(prefix) + elif prefix_skip1 is not None: + prefix_skip = len(prefix) + prefix_skip1 + prefix.extend(prefix1) + if not got_all: + break + else: + break + else: + return prefix, prefix_skip, True + return prefix, prefix_skip, False + +def _get_charset_prefix(pattern, flags): + while True: + if not pattern.data: + return None + op, av = pattern.data[0] + if op is not SUBPATTERN: + break + group, add_flags, del_flags, pattern = av + flags = _combine_flags(flags, add_flags, del_flags) + if flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE: + return None + + iscased = _get_iscased(flags) + if op is LITERAL: + if iscased and iscased(av): + return None + return [(op, av)] + elif op is BRANCH: + charset = [] + charsetappend = charset.append + for p in av[1]: + if not p: + return None + op, av = p[0] + if op is LITERAL and not (iscased and iscased(av)): + charsetappend((op, av)) + else: + return None + return charset + elif op is IN: + charset = av + if iscased: + for op, av in charset: + if op is LITERAL: + if iscased(av): + return None + elif op is RANGE: + if av[1] > 0xffff: + return None + if any(map(iscased, range(av[0], av[1]+1))): + return None + return charset + return None + +def _compile_info(code, pattern, flags): + # internal: compile an info block. in the current version, + # this contains min/max pattern width, and an optional literal + # prefix or a character map + lo, hi = pattern.getwidth() + if hi > MAXCODE: + hi = MAXCODE + if lo == 0: + code.extend([INFO, 4, 0, lo, hi]) + return + # look for a literal prefix + prefix = [] + prefix_skip = 0 + charset = [] # not used + if not (flags & SRE_FLAG_IGNORECASE and flags & SRE_FLAG_LOCALE): + # look for literal prefix + prefix, prefix_skip, got_all = _get_literal_prefix(pattern, flags) + # if no prefix, look for charset prefix + if not prefix: + charset = _get_charset_prefix(pattern, flags) +## if prefix: +## print("*** PREFIX", prefix, prefix_skip) +## if charset: +## print("*** CHARSET", charset) + # add an info block + emit = code.append + emit(INFO) + skip = len(code); emit(0) + # literal flag + mask = 0 + if prefix: + mask = SRE_INFO_PREFIX + if prefix_skip is None and got_all: + mask = mask | SRE_INFO_LITERAL + elif charset: + mask = mask | SRE_INFO_CHARSET + emit(mask) + # pattern length + if lo < MAXCODE: + emit(lo) + else: + emit(MAXCODE) + prefix = prefix[:MAXCODE] + emit(min(hi, MAXCODE)) + # add literal prefix + if prefix: + emit(len(prefix)) # length + if prefix_skip is None: + prefix_skip = len(prefix) + emit(prefix_skip) # skip + code.extend(prefix) + # generate overlap table + code.extend(_generate_overlap_table(prefix)) + elif charset: + charset, hascased = _optimize_charset(charset) + assert not hascased + _compile_charset(charset, flags, code) + code[skip] = len(code) - skip + +def isstring(obj): + return isinstance(obj, (str, bytes)) + +def _code(p, flags): + + flags = p.state.flags | flags + code = [] + + # compile info block + _compile_info(code, p, flags) + + # compile the pattern + _compile(code, p.data, flags) + + code.append(SUCCESS) + + return code + +def _hex_code(code): + return '[%s]' % ', '.join('%#0*x' % (_sre.CODESIZE*2+2, x) for x in code) + +def dis(code): + import sys + + labels = set() + level = 0 + offset_width = len(str(len(code) - 1)) + + def dis_(start, end): + def print_(*args, to=None): + if to is not None: + labels.add(to) + args += ('(to %d)' % (to,),) + print('%*d%s ' % (offset_width, start, ':' if start in labels else '.'), + end=' '*(level-1)) + print(*args) + + def print_2(*args): + print(end=' '*(offset_width + 2*level)) + print(*args) + + nonlocal level + level += 1 + i = start + while i < end: + start = i + op = code[i] + i += 1 + op = OPCODES[op] + if op in (SUCCESS, FAILURE, ANY, ANY_ALL, + MAX_UNTIL, MIN_UNTIL, NEGATE): + print_(op) + elif op in (LITERAL, NOT_LITERAL, + LITERAL_IGNORE, NOT_LITERAL_IGNORE, + LITERAL_UNI_IGNORE, NOT_LITERAL_UNI_IGNORE, + LITERAL_LOC_IGNORE, NOT_LITERAL_LOC_IGNORE): + arg = code[i] + i += 1 + print_(op, '%#02x (%r)' % (arg, chr(arg))) + elif op is AT: + arg = code[i] + i += 1 + arg = str(ATCODES[arg]) + assert arg[:3] == 'AT_' + print_(op, arg[3:]) + elif op is CATEGORY: + arg = code[i] + i += 1 + arg = str(CHCODES[arg]) + assert arg[:9] == 'CATEGORY_' + print_(op, arg[9:]) + elif op in (IN, IN_IGNORE, IN_UNI_IGNORE, IN_LOC_IGNORE): + skip = code[i] + print_(op, skip, to=i+skip) + dis_(i+1, i+skip) + i += skip + elif op in (RANGE, RANGE_UNI_IGNORE): + lo, hi = code[i: i+2] + i += 2 + print_(op, '%#02x %#02x (%r-%r)' % (lo, hi, chr(lo), chr(hi))) + elif op is CHARSET: + print_(op, _hex_code(code[i: i + 256//_CODEBITS])) + i += 256//_CODEBITS + elif op is BIGCHARSET: + arg = code[i] + i += 1 + mapping = list(b''.join(x.to_bytes(_sre.CODESIZE, sys.byteorder) + for x in code[i: i + 256//_sre.CODESIZE])) + print_(op, arg, mapping) + i += 256//_sre.CODESIZE + level += 1 + for j in range(arg): + print_2(_hex_code(code[i: i + 256//_CODEBITS])) + i += 256//_CODEBITS + level -= 1 + elif op in (MARK, GROUPREF, GROUPREF_IGNORE, GROUPREF_UNI_IGNORE, + GROUPREF_LOC_IGNORE): + arg = code[i] + i += 1 + print_(op, arg) + elif op is JUMP: + skip = code[i] + print_(op, skip, to=i+skip) + i += 1 + elif op is BRANCH: + skip = code[i] + print_(op, skip, to=i+skip) + while skip: + dis_(i+1, i+skip) + i += skip + start = i + skip = code[i] + if skip: + print_('branch', skip, to=i+skip) + else: + print_(FAILURE) + i += 1 + elif op in (REPEAT, REPEAT_ONE, MIN_REPEAT_ONE): + skip, min, max = code[i: i+3] + if max == MAXREPEAT: + max = 'MAXREPEAT' + print_(op, skip, min, max, to=i+skip) + dis_(i+3, i+skip) + i += skip + elif op is GROUPREF_EXISTS: + arg, skip = code[i: i+2] + print_(op, arg, skip, to=i+skip) + i += 2 + elif op in (ASSERT, ASSERT_NOT): + skip, arg = code[i: i+2] + print_(op, skip, arg, to=i+skip) + dis_(i+2, i+skip) + i += skip + elif op is INFO: + skip, flags, min, max = code[i: i+4] + if max == MAXREPEAT: + max = 'MAXREPEAT' + print_(op, skip, bin(flags), min, max, to=i+skip) + start = i+4 + if flags & SRE_INFO_PREFIX: + prefix_len, prefix_skip = code[i+4: i+6] + print_2(' prefix_skip', prefix_skip) + start = i + 6 + prefix = code[start: start+prefix_len] + print_2(' prefix', + '[%s]' % ', '.join('%#02x' % x for x in prefix), + '(%r)' % ''.join(map(chr, prefix))) + start += prefix_len + print_2(' overlap', code[start: start+prefix_len]) + start += prefix_len + if flags & SRE_INFO_CHARSET: + level += 1 + print_2('in') + dis_(start, i+skip) + level -= 1 + i += skip + else: + raise ValueError(op) + + level -= 1 + + dis_(0, len(code)) + + +def compile(p, flags=0): + # internal: convert pattern list to internal format + + if isstring(p): + pattern = p + p = sre_parse.parse(p, flags) + else: + pattern = None + + code = _code(p, flags) + + if flags & SRE_FLAG_DEBUG: + print() + dis(code) + + # map in either direction + groupindex = p.state.groupdict + indexgroup = [None] * p.state.groups + for k, i in groupindex.items(): + indexgroup[i] = k + + return _sre.compile( + pattern, flags | p.state.flags, code, + p.state.groups-1, + groupindex, tuple(indexgroup) + ) diff --git a/llava/lib/python3.10/struct.py b/llava/lib/python3.10/struct.py new file mode 100644 index 0000000000000000000000000000000000000000..d6bba58863649898a3f075cdda51cade9dc07f06 --- /dev/null +++ b/llava/lib/python3.10/struct.py @@ -0,0 +1,15 @@ +__all__ = [ + # Functions + 'calcsize', 'pack', 'pack_into', 'unpack', 'unpack_from', + 'iter_unpack', + + # Classes + 'Struct', + + # Exceptions + 'error' + ] + +from _struct import * +from _struct import _clearcache +from _struct import __doc__ diff --git a/llava/lib/python3.10/symtable.py b/llava/lib/python3.10/symtable.py new file mode 100644 index 0000000000000000000000000000000000000000..e11e5fffc4e1be49defe1c54803d65e398ba56bb --- /dev/null +++ b/llava/lib/python3.10/symtable.py @@ -0,0 +1,322 @@ +"""Interface to the compiler's internal symbol tables""" + +import _symtable +from _symtable import (USE, DEF_GLOBAL, DEF_NONLOCAL, DEF_LOCAL, DEF_PARAM, + DEF_IMPORT, DEF_BOUND, DEF_ANNOT, SCOPE_OFF, SCOPE_MASK, FREE, + LOCAL, GLOBAL_IMPLICIT, GLOBAL_EXPLICIT, CELL) + +import weakref + +__all__ = ["symtable", "SymbolTable", "Class", "Function", "Symbol"] + +def symtable(code, filename, compile_type): + """ Return the toplevel *SymbolTable* for the source code. + + *filename* is the name of the file with the code + and *compile_type* is the *compile()* mode argument. + """ + top = _symtable.symtable(code, filename, compile_type) + return _newSymbolTable(top, filename) + +class SymbolTableFactory: + def __init__(self): + self.__memo = weakref.WeakValueDictionary() + + def new(self, table, filename): + if table.type == _symtable.TYPE_FUNCTION: + return Function(table, filename) + if table.type == _symtable.TYPE_CLASS: + return Class(table, filename) + return SymbolTable(table, filename) + + def __call__(self, table, filename): + key = table, filename + obj = self.__memo.get(key, None) + if obj is None: + obj = self.__memo[key] = self.new(table, filename) + return obj + +_newSymbolTable = SymbolTableFactory() + + +class SymbolTable: + + def __init__(self, raw_table, filename): + self._table = raw_table + self._filename = filename + self._symbols = {} + + def __repr__(self): + if self.__class__ == SymbolTable: + kind = "" + else: + kind = "%s " % self.__class__.__name__ + + if self._table.name == "top": + return "<{0}SymbolTable for module {1}>".format(kind, self._filename) + else: + return "<{0}SymbolTable for {1} in {2}>".format(kind, + self._table.name, + self._filename) + + def get_type(self): + """Return the type of the symbol table. + + The values retuned are 'class', 'module' and + 'function'. + """ + if self._table.type == _symtable.TYPE_MODULE: + return "module" + if self._table.type == _symtable.TYPE_FUNCTION: + return "function" + if self._table.type == _symtable.TYPE_CLASS: + return "class" + assert self._table.type in (1, 2, 3), \ + "unexpected type: {0}".format(self._table.type) + + def get_id(self): + """Return an identifier for the table. + """ + return self._table.id + + def get_name(self): + """Return the table's name. + + This corresponds to the name of the class, function + or 'top' if the table is for a class, function or + global respectively. + """ + return self._table.name + + def get_lineno(self): + """Return the number of the first line in the + block for the table. + """ + return self._table.lineno + + def is_optimized(self): + """Return *True* if the locals in the table + are optimizable. + """ + return bool(self._table.type == _symtable.TYPE_FUNCTION) + + def is_nested(self): + """Return *True* if the block is a nested class + or function.""" + return bool(self._table.nested) + + def has_children(self): + """Return *True* if the block has nested namespaces. + """ + return bool(self._table.children) + + def get_identifiers(self): + """Return a view object containing the names of symbols in the table. + """ + return self._table.symbols.keys() + + def lookup(self, name): + """Lookup a *name* in the table. + + Returns a *Symbol* instance. + """ + sym = self._symbols.get(name) + if sym is None: + flags = self._table.symbols[name] + namespaces = self.__check_children(name) + module_scope = (self._table.name == "top") + sym = self._symbols[name] = Symbol(name, flags, namespaces, + module_scope=module_scope) + return sym + + def get_symbols(self): + """Return a list of *Symbol* instances for + names in the table. + """ + return [self.lookup(ident) for ident in self.get_identifiers()] + + def __check_children(self, name): + return [_newSymbolTable(st, self._filename) + for st in self._table.children + if st.name == name] + + def get_children(self): + """Return a list of the nested symbol tables. + """ + return [_newSymbolTable(st, self._filename) + for st in self._table.children] + + +class Function(SymbolTable): + + # Default values for instance variables + __params = None + __locals = None + __frees = None + __globals = None + __nonlocals = None + + def __idents_matching(self, test_func): + return tuple(ident for ident in self.get_identifiers() + if test_func(self._table.symbols[ident])) + + def get_parameters(self): + """Return a tuple of parameters to the function. + """ + if self.__params is None: + self.__params = self.__idents_matching(lambda x:x & DEF_PARAM) + return self.__params + + def get_locals(self): + """Return a tuple of locals in the function. + """ + if self.__locals is None: + locs = (LOCAL, CELL) + test = lambda x: ((x >> SCOPE_OFF) & SCOPE_MASK) in locs + self.__locals = self.__idents_matching(test) + return self.__locals + + def get_globals(self): + """Return a tuple of globals in the function. + """ + if self.__globals is None: + glob = (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) + test = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) in glob + self.__globals = self.__idents_matching(test) + return self.__globals + + def get_nonlocals(self): + """Return a tuple of nonlocals in the function. + """ + if self.__nonlocals is None: + self.__nonlocals = self.__idents_matching(lambda x:x & DEF_NONLOCAL) + return self.__nonlocals + + def get_frees(self): + """Return a tuple of free variables in the function. + """ + if self.__frees is None: + is_free = lambda x:((x >> SCOPE_OFF) & SCOPE_MASK) == FREE + self.__frees = self.__idents_matching(is_free) + return self.__frees + + +class Class(SymbolTable): + + __methods = None + + def get_methods(self): + """Return a tuple of methods declared in the class. + """ + if self.__methods is None: + d = {} + for st in self._table.children: + d[st.name] = 1 + self.__methods = tuple(d) + return self.__methods + + +class Symbol: + + def __init__(self, name, flags, namespaces=None, *, module_scope=False): + self.__name = name + self.__flags = flags + self.__scope = (flags >> SCOPE_OFF) & SCOPE_MASK # like PyST_GetScope() + self.__namespaces = namespaces or () + self.__module_scope = module_scope + + def __repr__(self): + return "".format(self.__name) + + def get_name(self): + """Return a name of a symbol. + """ + return self.__name + + def is_referenced(self): + """Return *True* if the symbol is used in + its block. + """ + return bool(self.__flags & _symtable.USE) + + def is_parameter(self): + """Return *True* if the symbol is a parameter. + """ + return bool(self.__flags & DEF_PARAM) + + def is_global(self): + """Return *True* if the sysmbol is global. + """ + return bool(self.__scope in (GLOBAL_IMPLICIT, GLOBAL_EXPLICIT) + or (self.__module_scope and self.__flags & DEF_BOUND)) + + def is_nonlocal(self): + """Return *True* if the symbol is nonlocal.""" + return bool(self.__flags & DEF_NONLOCAL) + + def is_declared_global(self): + """Return *True* if the symbol is declared global + with a global statement.""" + return bool(self.__scope == GLOBAL_EXPLICIT) + + def is_local(self): + """Return *True* if the symbol is local. + """ + return bool(self.__scope in (LOCAL, CELL) + or (self.__module_scope and self.__flags & DEF_BOUND)) + + def is_annotated(self): + """Return *True* if the symbol is annotated. + """ + return bool(self.__flags & DEF_ANNOT) + + def is_free(self): + """Return *True* if a referenced symbol is + not assigned to. + """ + return bool(self.__scope == FREE) + + def is_imported(self): + """Return *True* if the symbol is created from + an import statement. + """ + return bool(self.__flags & DEF_IMPORT) + + def is_assigned(self): + """Return *True* if a symbol is assigned to.""" + return bool(self.__flags & DEF_LOCAL) + + def is_namespace(self): + """Returns *True* if name binding introduces new namespace. + + If the name is used as the target of a function or class + statement, this will be true. + + Note that a single name can be bound to multiple objects. If + is_namespace() is true, the name may also be bound to other + objects, like an int or list, that does not introduce a new + namespace. + """ + return bool(self.__namespaces) + + def get_namespaces(self): + """Return a list of namespaces bound to this name""" + return self.__namespaces + + def get_namespace(self): + """Return the single namespace bound to this name. + + Raises ValueError if the name is bound to multiple namespaces. + """ + if len(self.__namespaces) != 1: + raise ValueError("name is bound to multiple namespaces") + return self.__namespaces[0] + +if __name__ == "__main__": + import os, sys + with open(sys.argv[0]) as f: + src = f.read() + mod = symtable(src, os.path.split(sys.argv[0])[1], "exec") + for ident in mod.get_identifiers(): + info = mod.lookup(ident) + print(info, info.is_local(), info.is_namespace()) diff --git a/llava/lib/python3.10/turtle.py b/llava/lib/python3.10/turtle.py new file mode 100644 index 0000000000000000000000000000000000000000..d287c15543528a8c24609ea1ef7b479ffc82d35f --- /dev/null +++ b/llava/lib/python3.10/turtle.py @@ -0,0 +1,4141 @@ +# +# turtle.py: a Tkinter based turtle graphics module for Python +# Version 1.1b - 4. 5. 2009 +# +# Copyright (C) 2006 - 2010 Gregor Lingl +# email: glingl@aon.at +# +# This software is provided 'as-is', without any express or implied +# warranty. In no event will the authors be held liable for any damages +# arising from the use of this software. +# +# Permission is granted to anyone to use this software for any purpose, +# including commercial applications, and to alter it and redistribute it +# freely, subject to the following restrictions: +# +# 1. The origin of this software must not be misrepresented; you must not +# claim that you wrote the original software. If you use this software +# in a product, an acknowledgment in the product documentation would be +# appreciated but is not required. +# 2. Altered source versions must be plainly marked as such, and must not be +# misrepresented as being the original software. +# 3. This notice may not be removed or altered from any source distribution. + + +""" +Turtle graphics is a popular way for introducing programming to +kids. It was part of the original Logo programming language developed +by Wally Feurzig and Seymour Papert in 1966. + +Imagine a robotic turtle starting at (0, 0) in the x-y plane. After an ``import turtle``, give it +the command turtle.forward(15), and it moves (on-screen!) 15 pixels in +the direction it is facing, drawing a line as it moves. Give it the +command turtle.right(25), and it rotates in-place 25 degrees clockwise. + +By combining together these and similar commands, intricate shapes and +pictures can easily be drawn. + +----- turtle.py + +This module is an extended reimplementation of turtle.py from the +Python standard distribution up to Python 2.5. (See: https://www.python.org) + +It tries to keep the merits of turtle.py and to be (nearly) 100% +compatible with it. This means in the first place to enable the +learning programmer to use all the commands, classes and methods +interactively when using the module from within IDLE run with +the -n switch. + +Roughly it has the following features added: + +- Better animation of the turtle movements, especially of turning the + turtle. So the turtles can more easily be used as a visual feedback + instrument by the (beginning) programmer. + +- Different turtle shapes, gif-images as turtle shapes, user defined + and user controllable turtle shapes, among them compound + (multicolored) shapes. Turtle shapes can be stretched and tilted, which + makes turtles very versatile geometrical objects. + +- Fine control over turtle movement and screen updates via delay(), + and enhanced tracer() and speed() methods. + +- Aliases for the most commonly used commands, like fd for forward etc., + following the early Logo traditions. This reduces the boring work of + typing long sequences of commands, which often occur in a natural way + when kids try to program fancy pictures on their first encounter with + turtle graphics. + +- Turtles now have an undo()-method with configurable undo-buffer. + +- Some simple commands/methods for creating event driven programs + (mouse-, key-, timer-events). Especially useful for programming games. + +- A scrollable Canvas class. The default scrollable Canvas can be + extended interactively as needed while playing around with the turtle(s). + +- A TurtleScreen class with methods controlling background color or + background image, window and canvas size and other properties of the + TurtleScreen. + +- There is a method, setworldcoordinates(), to install a user defined + coordinate-system for the TurtleScreen. + +- The implementation uses a 2-vector class named Vec2D, derived from tuple. + This class is public, so it can be imported by the application programmer, + which makes certain types of computations very natural and compact. + +- Appearance of the TurtleScreen and the Turtles at startup/import can be + configured by means of a turtle.cfg configuration file. + The default configuration mimics the appearance of the old turtle module. + +- If configured appropriately the module reads in docstrings from a docstring + dictionary in some different language, supplied separately and replaces + the English ones by those read in. There is a utility function + write_docstringdict() to write a dictionary with the original (English) + docstrings to disc, so it can serve as a template for translations. + +Behind the scenes there are some features included with possible +extensions in mind. These will be commented and documented elsewhere. + +""" + +_ver = "turtle 1.1b- - for Python 3.1 - 4. 5. 2009" + +# print(_ver) + +import tkinter as TK +import types +import math +import time +import inspect +import sys + +from os.path import isfile, split, join +from copy import deepcopy +from tkinter import simpledialog + +_tg_classes = ['ScrolledCanvas', 'TurtleScreen', 'Screen', + 'RawTurtle', 'Turtle', 'RawPen', 'Pen', 'Shape', 'Vec2D'] +_tg_screen_functions = ['addshape', 'bgcolor', 'bgpic', 'bye', + 'clearscreen', 'colormode', 'delay', 'exitonclick', 'getcanvas', + 'getshapes', 'listen', 'mainloop', 'mode', 'numinput', + 'onkey', 'onkeypress', 'onkeyrelease', 'onscreenclick', 'ontimer', + 'register_shape', 'resetscreen', 'screensize', 'setup', + 'setworldcoordinates', 'textinput', 'title', 'tracer', 'turtles', 'update', + 'window_height', 'window_width'] +_tg_turtle_functions = ['back', 'backward', 'begin_fill', 'begin_poly', 'bk', + 'circle', 'clear', 'clearstamp', 'clearstamps', 'clone', 'color', + 'degrees', 'distance', 'dot', 'down', 'end_fill', 'end_poly', 'fd', + 'fillcolor', 'filling', 'forward', 'get_poly', 'getpen', 'getscreen', 'get_shapepoly', + 'getturtle', 'goto', 'heading', 'hideturtle', 'home', 'ht', 'isdown', + 'isvisible', 'left', 'lt', 'onclick', 'ondrag', 'onrelease', 'pd', + 'pen', 'pencolor', 'pendown', 'pensize', 'penup', 'pos', 'position', + 'pu', 'radians', 'right', 'reset', 'resizemode', 'rt', + 'seth', 'setheading', 'setpos', 'setposition', 'settiltangle', + 'setundobuffer', 'setx', 'sety', 'shape', 'shapesize', 'shapetransform', 'shearfactor', 'showturtle', + 'speed', 'st', 'stamp', 'tilt', 'tiltangle', 'towards', + 'turtlesize', 'undo', 'undobufferentries', 'up', 'width', + 'write', 'xcor', 'ycor'] +_tg_utilities = ['write_docstringdict', 'done'] + +__all__ = (_tg_classes + _tg_screen_functions + _tg_turtle_functions + + _tg_utilities + ['Terminator']) # + _math_functions) + +_alias_list = ['addshape', 'backward', 'bk', 'fd', 'ht', 'lt', 'pd', 'pos', + 'pu', 'rt', 'seth', 'setpos', 'setposition', 'st', + 'turtlesize', 'up', 'width'] + +_CFG = {"width" : 0.5, # Screen + "height" : 0.75, + "canvwidth" : 400, + "canvheight": 300, + "leftright": None, + "topbottom": None, + "mode": "standard", # TurtleScreen + "colormode": 1.0, + "delay": 10, + "undobuffersize": 1000, # RawTurtle + "shape": "classic", + "pencolor" : "black", + "fillcolor" : "black", + "resizemode" : "noresize", + "visible" : True, + "language": "english", # docstrings + "exampleturtle": "turtle", + "examplescreen": "screen", + "title": "Python Turtle Graphics", + "using_IDLE": False + } + +def config_dict(filename): + """Convert content of config-file into dictionary.""" + with open(filename, "r") as f: + cfglines = f.readlines() + cfgdict = {} + for line in cfglines: + line = line.strip() + if not line or line.startswith("#"): + continue + try: + key, value = line.split("=") + except ValueError: + print("Bad line in config-file %s:\n%s" % (filename,line)) + continue + key = key.strip() + value = value.strip() + if value in ["True", "False", "None", "''", '""']: + value = eval(value) + else: + try: + if "." in value: + value = float(value) + else: + value = int(value) + except ValueError: + pass # value need not be converted + cfgdict[key] = value + return cfgdict + +def readconfig(cfgdict): + """Read config-files, change configuration-dict accordingly. + + If there is a turtle.cfg file in the current working directory, + read it from there. If this contains an importconfig-value, + say 'myway', construct filename turtle_mayway.cfg else use + turtle.cfg and read it from the import-directory, where + turtle.py is located. + Update configuration dictionary first according to config-file, + in the import directory, then according to config-file in the + current working directory. + If no config-file is found, the default configuration is used. + """ + default_cfg = "turtle.cfg" + cfgdict1 = {} + cfgdict2 = {} + if isfile(default_cfg): + cfgdict1 = config_dict(default_cfg) + if "importconfig" in cfgdict1: + default_cfg = "turtle_%s.cfg" % cfgdict1["importconfig"] + try: + head, tail = split(__file__) + cfg_file2 = join(head, default_cfg) + except Exception: + cfg_file2 = "" + if isfile(cfg_file2): + cfgdict2 = config_dict(cfg_file2) + _CFG.update(cfgdict2) + _CFG.update(cfgdict1) + +try: + readconfig(_CFG) +except Exception: + print ("No configfile read, reason unknown") + + +class Vec2D(tuple): + """A 2 dimensional vector class, used as a helper class + for implementing turtle graphics. + May be useful for turtle graphics programs also. + Derived from tuple, so a vector is a tuple! + + Provides (for a, b vectors, k number): + a+b vector addition + a-b vector subtraction + a*b inner product + k*a and a*k multiplication with scalar + |a| absolute value of a + a.rotate(angle) rotation + """ + def __new__(cls, x, y): + return tuple.__new__(cls, (x, y)) + def __add__(self, other): + return Vec2D(self[0]+other[0], self[1]+other[1]) + def __mul__(self, other): + if isinstance(other, Vec2D): + return self[0]*other[0]+self[1]*other[1] + return Vec2D(self[0]*other, self[1]*other) + def __rmul__(self, other): + if isinstance(other, int) or isinstance(other, float): + return Vec2D(self[0]*other, self[1]*other) + return NotImplemented + def __sub__(self, other): + return Vec2D(self[0]-other[0], self[1]-other[1]) + def __neg__(self): + return Vec2D(-self[0], -self[1]) + def __abs__(self): + return math.hypot(*self) + def rotate(self, angle): + """rotate self counterclockwise by angle + """ + perp = Vec2D(-self[1], self[0]) + angle = math.radians(angle) + c, s = math.cos(angle), math.sin(angle) + return Vec2D(self[0]*c+perp[0]*s, self[1]*c+perp[1]*s) + def __getnewargs__(self): + return (self[0], self[1]) + def __repr__(self): + return "(%.2f,%.2f)" % self + + +############################################################################## +### From here up to line : Tkinter - Interface for turtle.py ### +### May be replaced by an interface to some different graphics toolkit ### +############################################################################## + +## helper functions for Scrolled Canvas, to forward Canvas-methods +## to ScrolledCanvas class + +def __methodDict(cls, _dict): + """helper function for Scrolled Canvas""" + baseList = list(cls.__bases__) + baseList.reverse() + for _super in baseList: + __methodDict(_super, _dict) + for key, value in cls.__dict__.items(): + if type(value) == types.FunctionType: + _dict[key] = value + +def __methods(cls): + """helper function for Scrolled Canvas""" + _dict = {} + __methodDict(cls, _dict) + return _dict.keys() + +__stringBody = ( + 'def %(method)s(self, *args, **kw): return ' + + 'self.%(attribute)s.%(method)s(*args, **kw)') + +def __forwardmethods(fromClass, toClass, toPart, exclude = ()): + ### MANY CHANGES ### + _dict_1 = {} + __methodDict(toClass, _dict_1) + _dict = {} + mfc = __methods(fromClass) + for ex in _dict_1.keys(): + if ex[:1] == '_' or ex[-1:] == '_' or ex in exclude or ex in mfc: + pass + else: + _dict[ex] = _dict_1[ex] + + for method, func in _dict.items(): + d = {'method': method, 'func': func} + if isinstance(toPart, str): + execString = \ + __stringBody % {'method' : method, 'attribute' : toPart} + exec(execString, d) + setattr(fromClass, method, d[method]) ### NEWU! + + +class ScrolledCanvas(TK.Frame): + """Modeled after the scrolled canvas class from Grayons's Tkinter book. + + Used as the default canvas, which pops up automatically when + using turtle graphics functions or the Turtle class. + """ + def __init__(self, master, width=500, height=350, + canvwidth=600, canvheight=500): + TK.Frame.__init__(self, master, width=width, height=height) + self._rootwindow = self.winfo_toplevel() + self.width, self.height = width, height + self.canvwidth, self.canvheight = canvwidth, canvheight + self.bg = "white" + self._canvas = TK.Canvas(master, width=width, height=height, + bg=self.bg, relief=TK.SUNKEN, borderwidth=2) + self.hscroll = TK.Scrollbar(master, command=self._canvas.xview, + orient=TK.HORIZONTAL) + self.vscroll = TK.Scrollbar(master, command=self._canvas.yview) + self._canvas.configure(xscrollcommand=self.hscroll.set, + yscrollcommand=self.vscroll.set) + self.rowconfigure(0, weight=1, minsize=0) + self.columnconfigure(0, weight=1, minsize=0) + self._canvas.grid(padx=1, in_ = self, pady=1, row=0, + column=0, rowspan=1, columnspan=1, sticky='news') + self.vscroll.grid(padx=1, in_ = self, pady=1, row=0, + column=1, rowspan=1, columnspan=1, sticky='news') + self.hscroll.grid(padx=1, in_ = self, pady=1, row=1, + column=0, rowspan=1, columnspan=1, sticky='news') + self.reset() + self._rootwindow.bind('', self.onResize) + + def reset(self, canvwidth=None, canvheight=None, bg = None): + """Adjust canvas and scrollbars according to given canvas size.""" + if canvwidth: + self.canvwidth = canvwidth + if canvheight: + self.canvheight = canvheight + if bg: + self.bg = bg + self._canvas.config(bg=bg, + scrollregion=(-self.canvwidth//2, -self.canvheight//2, + self.canvwidth//2, self.canvheight//2)) + self._canvas.xview_moveto(0.5*(self.canvwidth - self.width + 30) / + self.canvwidth) + self._canvas.yview_moveto(0.5*(self.canvheight- self.height + 30) / + self.canvheight) + self.adjustScrolls() + + + def adjustScrolls(self): + """ Adjust scrollbars according to window- and canvas-size. + """ + cwidth = self._canvas.winfo_width() + cheight = self._canvas.winfo_height() + self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth) + self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight) + if cwidth < self.canvwidth or cheight < self.canvheight: + self.hscroll.grid(padx=1, in_ = self, pady=1, row=1, + column=0, rowspan=1, columnspan=1, sticky='news') + self.vscroll.grid(padx=1, in_ = self, pady=1, row=0, + column=1, rowspan=1, columnspan=1, sticky='news') + else: + self.hscroll.grid_forget() + self.vscroll.grid_forget() + + def onResize(self, event): + """self-explanatory""" + self.adjustScrolls() + + def bbox(self, *args): + """ 'forward' method, which canvas itself has inherited... + """ + return self._canvas.bbox(*args) + + def cget(self, *args, **kwargs): + """ 'forward' method, which canvas itself has inherited... + """ + return self._canvas.cget(*args, **kwargs) + + def config(self, *args, **kwargs): + """ 'forward' method, which canvas itself has inherited... + """ + self._canvas.config(*args, **kwargs) + + def bind(self, *args, **kwargs): + """ 'forward' method, which canvas itself has inherited... + """ + self._canvas.bind(*args, **kwargs) + + def unbind(self, *args, **kwargs): + """ 'forward' method, which canvas itself has inherited... + """ + self._canvas.unbind(*args, **kwargs) + + def focus_force(self): + """ 'forward' method, which canvas itself has inherited... + """ + self._canvas.focus_force() + +__forwardmethods(ScrolledCanvas, TK.Canvas, '_canvas') + + +class _Root(TK.Tk): + """Root class for Screen based on Tkinter.""" + def __init__(self): + TK.Tk.__init__(self) + + def setupcanvas(self, width, height, cwidth, cheight): + self._canvas = ScrolledCanvas(self, width, height, cwidth, cheight) + self._canvas.pack(expand=1, fill="both") + + def _getcanvas(self): + return self._canvas + + def set_geometry(self, width, height, startx, starty): + self.geometry("%dx%d%+d%+d"%(width, height, startx, starty)) + + def ondestroy(self, destroy): + self.wm_protocol("WM_DELETE_WINDOW", destroy) + + def win_width(self): + return self.winfo_screenwidth() + + def win_height(self): + return self.winfo_screenheight() + +Canvas = TK.Canvas + + +class TurtleScreenBase(object): + """Provide the basic graphics functionality. + Interface between Tkinter and turtle.py. + + To port turtle.py to some different graphics toolkit + a corresponding TurtleScreenBase class has to be implemented. + """ + + def _blankimage(self): + """return a blank image object + """ + img = TK.PhotoImage(width=1, height=1, master=self.cv) + img.blank() + return img + + def _image(self, filename): + """return an image object containing the + imagedata from a gif-file named filename. + """ + return TK.PhotoImage(file=filename, master=self.cv) + + def __init__(self, cv): + self.cv = cv + if isinstance(cv, ScrolledCanvas): + w = self.cv.canvwidth + h = self.cv.canvheight + else: # expected: ordinary TK.Canvas + w = int(self.cv.cget("width")) + h = int(self.cv.cget("height")) + self.cv.config(scrollregion = (-w//2, -h//2, w//2, h//2 )) + self.canvwidth = w + self.canvheight = h + self.xscale = self.yscale = 1.0 + + def _createpoly(self): + """Create an invisible polygon item on canvas self.cv) + """ + return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill="", outline="") + + def _drawpoly(self, polyitem, coordlist, fill=None, + outline=None, width=None, top=False): + """Configure polygonitem polyitem according to provided + arguments: + coordlist is sequence of coordinates + fill is filling color + outline is outline color + top is a boolean value, which specifies if polyitem + will be put on top of the canvas' displaylist so it + will not be covered by other items. + """ + cl = [] + for x, y in coordlist: + cl.append(x * self.xscale) + cl.append(-y * self.yscale) + self.cv.coords(polyitem, *cl) + if fill is not None: + self.cv.itemconfigure(polyitem, fill=fill) + if outline is not None: + self.cv.itemconfigure(polyitem, outline=outline) + if width is not None: + self.cv.itemconfigure(polyitem, width=width) + if top: + self.cv.tag_raise(polyitem) + + def _createline(self): + """Create an invisible line item on canvas self.cv) + """ + return self.cv.create_line(0, 0, 0, 0, fill="", width=2, + capstyle = TK.ROUND) + + def _drawline(self, lineitem, coordlist=None, + fill=None, width=None, top=False): + """Configure lineitem according to provided arguments: + coordlist is sequence of coordinates + fill is drawing color + width is width of drawn line. + top is a boolean value, which specifies if polyitem + will be put on top of the canvas' displaylist so it + will not be covered by other items. + """ + if coordlist is not None: + cl = [] + for x, y in coordlist: + cl.append(x * self.xscale) + cl.append(-y * self.yscale) + self.cv.coords(lineitem, *cl) + if fill is not None: + self.cv.itemconfigure(lineitem, fill=fill) + if width is not None: + self.cv.itemconfigure(lineitem, width=width) + if top: + self.cv.tag_raise(lineitem) + + def _delete(self, item): + """Delete graphics item from canvas. + If item is"all" delete all graphics items. + """ + self.cv.delete(item) + + def _update(self): + """Redraw graphics items on canvas + """ + self.cv.update() + + def _delay(self, delay): + """Delay subsequent canvas actions for delay ms.""" + self.cv.after(delay) + + def _iscolorstring(self, color): + """Check if the string color is a legal Tkinter color string. + """ + try: + rgb = self.cv.winfo_rgb(color) + ok = True + except TK.TclError: + ok = False + return ok + + def _bgcolor(self, color=None): + """Set canvas' backgroundcolor if color is not None, + else return backgroundcolor.""" + if color is not None: + self.cv.config(bg = color) + self._update() + else: + return self.cv.cget("bg") + + def _write(self, pos, txt, align, font, pencolor): + """Write txt at pos in canvas with specified font + and color. + Return text item and x-coord of right bottom corner + of text's bounding box.""" + x, y = pos + x = x * self.xscale + y = y * self.yscale + anchor = {"left":"sw", "center":"s", "right":"se" } + item = self.cv.create_text(x-1, -y, text = txt, anchor = anchor[align], + fill = pencolor, font = font) + x0, y0, x1, y1 = self.cv.bbox(item) + return item, x1-1 + +## def _dot(self, pos, size, color): +## """may be implemented for some other graphics toolkit""" + + def _onclick(self, item, fun, num=1, add=None): + """Bind fun to mouse-click event on turtle. + fun must be a function with two arguments, the coordinates + of the clicked point on the canvas. + num, the number of the mouse-button defaults to 1 + """ + if fun is None: + self.cv.tag_unbind(item, "" % num) + else: + def eventfun(event): + x, y = (self.cv.canvasx(event.x)/self.xscale, + -self.cv.canvasy(event.y)/self.yscale) + fun(x, y) + self.cv.tag_bind(item, "" % num, eventfun, add) + + def _onrelease(self, item, fun, num=1, add=None): + """Bind fun to mouse-button-release event on turtle. + fun must be a function with two arguments, the coordinates + of the point on the canvas where mouse button is released. + num, the number of the mouse-button defaults to 1 + + If a turtle is clicked, first _onclick-event will be performed, + then _onscreensclick-event. + """ + if fun is None: + self.cv.tag_unbind(item, "" % num) + else: + def eventfun(event): + x, y = (self.cv.canvasx(event.x)/self.xscale, + -self.cv.canvasy(event.y)/self.yscale) + fun(x, y) + self.cv.tag_bind(item, "" % num, + eventfun, add) + + def _ondrag(self, item, fun, num=1, add=None): + """Bind fun to mouse-move-event (with pressed mouse button) on turtle. + fun must be a function with two arguments, the coordinates of the + actual mouse position on the canvas. + num, the number of the mouse-button defaults to 1 + + Every sequence of mouse-move-events on a turtle is preceded by a + mouse-click event on that turtle. + """ + if fun is None: + self.cv.tag_unbind(item, "" % num) + else: + def eventfun(event): + try: + x, y = (self.cv.canvasx(event.x)/self.xscale, + -self.cv.canvasy(event.y)/self.yscale) + fun(x, y) + except Exception: + pass + self.cv.tag_bind(item, "" % num, eventfun, add) + + def _onscreenclick(self, fun, num=1, add=None): + """Bind fun to mouse-click event on canvas. + fun must be a function with two arguments, the coordinates + of the clicked point on the canvas. + num, the number of the mouse-button defaults to 1 + + If a turtle is clicked, first _onclick-event will be performed, + then _onscreensclick-event. + """ + if fun is None: + self.cv.unbind("" % num) + else: + def eventfun(event): + x, y = (self.cv.canvasx(event.x)/self.xscale, + -self.cv.canvasy(event.y)/self.yscale) + fun(x, y) + self.cv.bind("" % num, eventfun, add) + + def _onkeyrelease(self, fun, key): + """Bind fun to key-release event of key. + Canvas must have focus. See method listen + """ + if fun is None: + self.cv.unbind("" % key, None) + else: + def eventfun(event): + fun() + self.cv.bind("" % key, eventfun) + + def _onkeypress(self, fun, key=None): + """If key is given, bind fun to key-press event of key. + Otherwise bind fun to any key-press. + Canvas must have focus. See method listen. + """ + if fun is None: + if key is None: + self.cv.unbind("", None) + else: + self.cv.unbind("" % key, None) + else: + def eventfun(event): + fun() + if key is None: + self.cv.bind("", eventfun) + else: + self.cv.bind("" % key, eventfun) + + def _listen(self): + """Set focus on canvas (in order to collect key-events) + """ + self.cv.focus_force() + + def _ontimer(self, fun, t): + """Install a timer, which calls fun after t milliseconds. + """ + if t == 0: + self.cv.after_idle(fun) + else: + self.cv.after(t, fun) + + def _createimage(self, image): + """Create and return image item on canvas. + """ + return self.cv.create_image(0, 0, image=image) + + def _drawimage(self, item, pos, image): + """Configure image item as to draw image object + at position (x,y) on canvas) + """ + x, y = pos + self.cv.coords(item, (x * self.xscale, -y * self.yscale)) + self.cv.itemconfig(item, image=image) + + def _setbgpic(self, item, image): + """Configure image item as to draw image object + at center of canvas. Set item to the first item + in the displaylist, so it will be drawn below + any other item .""" + self.cv.itemconfig(item, image=image) + self.cv.tag_lower(item) + + def _type(self, item): + """Return 'line' or 'polygon' or 'image' depending on + type of item. + """ + return self.cv.type(item) + + def _pointlist(self, item): + """returns list of coordinate-pairs of points of item + Example (for insiders): + >>> from turtle import * + >>> getscreen()._pointlist(getturtle().turtle._item) + [(0.0, 9.9999999999999982), (0.0, -9.9999999999999982), + (9.9999999999999982, 0.0)] + >>> """ + cl = self.cv.coords(item) + pl = [(cl[i], -cl[i+1]) for i in range(0, len(cl), 2)] + return pl + + def _setscrollregion(self, srx1, sry1, srx2, sry2): + self.cv.config(scrollregion=(srx1, sry1, srx2, sry2)) + + def _rescale(self, xscalefactor, yscalefactor): + items = self.cv.find_all() + for item in items: + coordinates = list(self.cv.coords(item)) + newcoordlist = [] + while coordinates: + x, y = coordinates[:2] + newcoordlist.append(x * xscalefactor) + newcoordlist.append(y * yscalefactor) + coordinates = coordinates[2:] + self.cv.coords(item, *newcoordlist) + + def _resize(self, canvwidth=None, canvheight=None, bg=None): + """Resize the canvas the turtles are drawing on. Does + not alter the drawing window. + """ + # needs amendment + if not isinstance(self.cv, ScrolledCanvas): + return self.canvwidth, self.canvheight + if canvwidth is canvheight is bg is None: + return self.cv.canvwidth, self.cv.canvheight + if canvwidth is not None: + self.canvwidth = canvwidth + if canvheight is not None: + self.canvheight = canvheight + self.cv.reset(canvwidth, canvheight, bg) + + def _window_size(self): + """ Return the width and height of the turtle window. + """ + width = self.cv.winfo_width() + if width <= 1: # the window isn't managed by a geometry manager + width = self.cv['width'] + height = self.cv.winfo_height() + if height <= 1: # the window isn't managed by a geometry manager + height = self.cv['height'] + return width, height + + def mainloop(self): + """Starts event loop - calling Tkinter's mainloop function. + + No argument. + + Must be last statement in a turtle graphics program. + Must NOT be used if a script is run from within IDLE in -n mode + (No subprocess) - for interactive use of turtle graphics. + + Example (for a TurtleScreen instance named screen): + >>> screen.mainloop() + + """ + self.cv.tk.mainloop() + + def textinput(self, title, prompt): + """Pop up a dialog window for input of a string. + + Arguments: title is the title of the dialog window, + prompt is a text mostly describing what information to input. + + Return the string input + If the dialog is canceled, return None. + + Example (for a TurtleScreen instance named screen): + >>> screen.textinput("NIM", "Name of first player:") + + """ + return simpledialog.askstring(title, prompt, parent=self.cv) + + def numinput(self, title, prompt, default=None, minval=None, maxval=None): + """Pop up a dialog window for input of a number. + + Arguments: title is the title of the dialog window, + prompt is a text mostly describing what numerical information to input. + default: default value + minval: minimum value for input + maxval: maximum value for input + + The number input must be in the range minval .. maxval if these are + given. If not, a hint is issued and the dialog remains open for + correction. Return the number input. + If the dialog is canceled, return None. + + Example (for a TurtleScreen instance named screen): + >>> screen.numinput("Poker", "Your stakes:", 1000, minval=10, maxval=10000) + + """ + return simpledialog.askfloat(title, prompt, initialvalue=default, + minvalue=minval, maxvalue=maxval, + parent=self.cv) + + +############################################################################## +### End of Tkinter - interface ### +############################################################################## + + +class Terminator (Exception): + """Will be raised in TurtleScreen.update, if _RUNNING becomes False. + + This stops execution of a turtle graphics script. + Main purpose: use in the Demo-Viewer turtle.Demo.py. + """ + pass + + +class TurtleGraphicsError(Exception): + """Some TurtleGraphics Error + """ + + +class Shape(object): + """Data structure modeling shapes. + + attribute _type is one of "polygon", "image", "compound" + attribute _data is - depending on _type a poygon-tuple, + an image or a list constructed using the addcomponent method. + """ + def __init__(self, type_, data=None): + self._type = type_ + if type_ == "polygon": + if isinstance(data, list): + data = tuple(data) + elif type_ == "image": + if isinstance(data, str): + if data.lower().endswith(".gif") and isfile(data): + data = TurtleScreen._image(data) + # else data assumed to be Photoimage + elif type_ == "compound": + data = [] + else: + raise TurtleGraphicsError("There is no shape type %s" % type_) + self._data = data + + def addcomponent(self, poly, fill, outline=None): + """Add component to a shape of type compound. + + Arguments: poly is a polygon, i. e. a tuple of number pairs. + fill is the fillcolor of the component, + outline is the outline color of the component. + + call (for a Shapeobject namend s): + -- s.addcomponent(((0,0), (10,10), (-10,10)), "red", "blue") + + Example: + >>> poly = ((0,0),(10,-5),(0,10),(-10,-5)) + >>> s = Shape("compound") + >>> s.addcomponent(poly, "red", "blue") + >>> # .. add more components and then use register_shape() + """ + if self._type != "compound": + raise TurtleGraphicsError("Cannot add component to %s Shape" + % self._type) + if outline is None: + outline = fill + self._data.append([poly, fill, outline]) + + +class Tbuffer(object): + """Ring buffer used as undobuffer for RawTurtle objects.""" + def __init__(self, bufsize=10): + self.bufsize = bufsize + self.buffer = [[None]] * bufsize + self.ptr = -1 + self.cumulate = False + def reset(self, bufsize=None): + if bufsize is None: + for i in range(self.bufsize): + self.buffer[i] = [None] + else: + self.bufsize = bufsize + self.buffer = [[None]] * bufsize + self.ptr = -1 + def push(self, item): + if self.bufsize > 0: + if not self.cumulate: + self.ptr = (self.ptr + 1) % self.bufsize + self.buffer[self.ptr] = item + else: + self.buffer[self.ptr].append(item) + def pop(self): + if self.bufsize > 0: + item = self.buffer[self.ptr] + if item is None: + return None + else: + self.buffer[self.ptr] = [None] + self.ptr = (self.ptr - 1) % self.bufsize + return (item) + def nr_of_items(self): + return self.bufsize - self.buffer.count([None]) + def __repr__(self): + return str(self.buffer) + " " + str(self.ptr) + + + +class TurtleScreen(TurtleScreenBase): + """Provides screen oriented methods like bgcolor etc. + + Only relies upon the methods of TurtleScreenBase and NOT + upon components of the underlying graphics toolkit - + which is Tkinter in this case. + """ + _RUNNING = True + + def __init__(self, cv, mode=_CFG["mode"], + colormode=_CFG["colormode"], delay=_CFG["delay"]): + TurtleScreenBase.__init__(self, cv) + + self._shapes = { + "arrow" : Shape("polygon", ((-10,0), (10,0), (0,10))), + "turtle" : Shape("polygon", ((0,16), (-2,14), (-1,10), (-4,7), + (-7,9), (-9,8), (-6,5), (-7,1), (-5,-3), (-8,-6), + (-6,-8), (-4,-5), (0,-7), (4,-5), (6,-8), (8,-6), + (5,-3), (7,1), (6,5), (9,8), (7,9), (4,7), (1,10), + (2,14))), + "circle" : Shape("polygon", ((10,0), (9.51,3.09), (8.09,5.88), + (5.88,8.09), (3.09,9.51), (0,10), (-3.09,9.51), + (-5.88,8.09), (-8.09,5.88), (-9.51,3.09), (-10,0), + (-9.51,-3.09), (-8.09,-5.88), (-5.88,-8.09), + (-3.09,-9.51), (-0.00,-10.00), (3.09,-9.51), + (5.88,-8.09), (8.09,-5.88), (9.51,-3.09))), + "square" : Shape("polygon", ((10,-10), (10,10), (-10,10), + (-10,-10))), + "triangle" : Shape("polygon", ((10,-5.77), (0,11.55), + (-10,-5.77))), + "classic": Shape("polygon", ((0,0),(-5,-9),(0,-7),(5,-9))), + "blank" : Shape("image", self._blankimage()) + } + + self._bgpics = {"nopic" : ""} + + self._mode = mode + self._delayvalue = delay + self._colormode = _CFG["colormode"] + self._keys = [] + self.clear() + if sys.platform == 'darwin': + # Force Turtle window to the front on OS X. This is needed because + # the Turtle window will show behind the Terminal window when you + # start the demo from the command line. + rootwindow = cv.winfo_toplevel() + rootwindow.call('wm', 'attributes', '.', '-topmost', '1') + rootwindow.call('wm', 'attributes', '.', '-topmost', '0') + + def clear(self): + """Delete all drawings and all turtles from the TurtleScreen. + + No argument. + + Reset empty TurtleScreen to its initial state: white background, + no backgroundimage, no eventbindings and tracing on. + + Example (for a TurtleScreen instance named screen): + >>> screen.clear() + + Note: this method is not available as function. + """ + self._delayvalue = _CFG["delay"] + self._colormode = _CFG["colormode"] + self._delete("all") + self._bgpic = self._createimage("") + self._bgpicname = "nopic" + self._tracing = 1 + self._updatecounter = 0 + self._turtles = [] + self.bgcolor("white") + for btn in 1, 2, 3: + self.onclick(None, btn) + self.onkeypress(None) + for key in self._keys[:]: + self.onkey(None, key) + self.onkeypress(None, key) + Turtle._pen = None + + def mode(self, mode=None): + """Set turtle-mode ('standard', 'logo' or 'world') and perform reset. + + Optional argument: + mode -- one of the strings 'standard', 'logo' or 'world' + + Mode 'standard' is compatible with turtle.py. + Mode 'logo' is compatible with most Logo-Turtle-Graphics. + Mode 'world' uses userdefined 'worldcoordinates'. *Attention*: in + this mode angles appear distorted if x/y unit-ratio doesn't equal 1. + If mode is not given, return the current mode. + + Mode Initial turtle heading positive angles + ------------|-------------------------|------------------- + 'standard' to the right (east) counterclockwise + 'logo' upward (north) clockwise + + Examples: + >>> mode('logo') # resets turtle heading to north + >>> mode() + 'logo' + """ + if mode is None: + return self._mode + mode = mode.lower() + if mode not in ["standard", "logo", "world"]: + raise TurtleGraphicsError("No turtle-graphics-mode %s" % mode) + self._mode = mode + if mode in ["standard", "logo"]: + self._setscrollregion(-self.canvwidth//2, -self.canvheight//2, + self.canvwidth//2, self.canvheight//2) + self.xscale = self.yscale = 1.0 + self.reset() + + def setworldcoordinates(self, llx, lly, urx, ury): + """Set up a user defined coordinate-system. + + Arguments: + llx -- a number, x-coordinate of lower left corner of canvas + lly -- a number, y-coordinate of lower left corner of canvas + urx -- a number, x-coordinate of upper right corner of canvas + ury -- a number, y-coordinate of upper right corner of canvas + + Set up user coodinat-system and switch to mode 'world' if necessary. + This performs a screen.reset. If mode 'world' is already active, + all drawings are redrawn according to the new coordinates. + + But ATTENTION: in user-defined coordinatesystems angles may appear + distorted. (see Screen.mode()) + + Example (for a TurtleScreen instance named screen): + >>> screen.setworldcoordinates(-10,-0.5,50,1.5) + >>> for _ in range(36): + ... left(10) + ... forward(0.5) + """ + if self.mode() != "world": + self.mode("world") + xspan = float(urx - llx) + yspan = float(ury - lly) + wx, wy = self._window_size() + self.screensize(wx-20, wy-20) + oldxscale, oldyscale = self.xscale, self.yscale + self.xscale = self.canvwidth / xspan + self.yscale = self.canvheight / yspan + srx1 = llx * self.xscale + sry1 = -ury * self.yscale + srx2 = self.canvwidth + srx1 + sry2 = self.canvheight + sry1 + self._setscrollregion(srx1, sry1, srx2, sry2) + self._rescale(self.xscale/oldxscale, self.yscale/oldyscale) + self.update() + + def register_shape(self, name, shape=None): + """Adds a turtle shape to TurtleScreen's shapelist. + + Arguments: + (1) name is the name of a gif-file and shape is None. + Installs the corresponding image shape. + !! Image-shapes DO NOT rotate when turning the turtle, + !! so they do not display the heading of the turtle! + (2) name is an arbitrary string and shape is a tuple + of pairs of coordinates. Installs the corresponding + polygon shape + (3) name is an arbitrary string and shape is a + (compound) Shape object. Installs the corresponding + compound shape. + To use a shape, you have to issue the command shape(shapename). + + call: register_shape("turtle.gif") + --or: register_shape("tri", ((0,0), (10,10), (-10,10))) + + Example (for a TurtleScreen instance named screen): + >>> screen.register_shape("triangle", ((5,-3),(0,5),(-5,-3))) + + """ + if shape is None: + # image + if name.lower().endswith(".gif"): + shape = Shape("image", self._image(name)) + else: + raise TurtleGraphicsError("Bad arguments for register_shape.\n" + + "Use help(register_shape)" ) + elif isinstance(shape, tuple): + shape = Shape("polygon", shape) + ## else shape assumed to be Shape-instance + self._shapes[name] = shape + + def _colorstr(self, color): + """Return color string corresponding to args. + + Argument may be a string or a tuple of three + numbers corresponding to actual colormode, + i.e. in the range 0<=n<=colormode. + + If the argument doesn't represent a color, + an error is raised. + """ + if len(color) == 1: + color = color[0] + if isinstance(color, str): + if self._iscolorstring(color) or color == "": + return color + else: + raise TurtleGraphicsError("bad color string: %s" % str(color)) + try: + r, g, b = color + except (TypeError, ValueError): + raise TurtleGraphicsError("bad color arguments: %s" % str(color)) + if self._colormode == 1.0: + r, g, b = [round(255.0*x) for x in (r, g, b)] + if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)): + raise TurtleGraphicsError("bad color sequence: %s" % str(color)) + return "#%02x%02x%02x" % (r, g, b) + + def _color(self, cstr): + if not cstr.startswith("#"): + return cstr + if len(cstr) == 7: + cl = [int(cstr[i:i+2], 16) for i in (1, 3, 5)] + elif len(cstr) == 4: + cl = [16*int(cstr[h], 16) for h in cstr[1:]] + else: + raise TurtleGraphicsError("bad colorstring: %s" % cstr) + return tuple(c * self._colormode/255 for c in cl) + + def colormode(self, cmode=None): + """Return the colormode or set it to 1.0 or 255. + + Optional argument: + cmode -- one of the values 1.0 or 255 + + r, g, b values of colortriples have to be in range 0..cmode. + + Example (for a TurtleScreen instance named screen): + >>> screen.colormode() + 1.0 + >>> screen.colormode(255) + >>> pencolor(240,160,80) + """ + if cmode is None: + return self._colormode + if cmode == 1.0: + self._colormode = float(cmode) + elif cmode == 255: + self._colormode = int(cmode) + + def reset(self): + """Reset all Turtles on the Screen to their initial state. + + No argument. + + Example (for a TurtleScreen instance named screen): + >>> screen.reset() + """ + for turtle in self._turtles: + turtle._setmode(self._mode) + turtle.reset() + + def turtles(self): + """Return the list of turtles on the screen. + + Example (for a TurtleScreen instance named screen): + >>> screen.turtles() + [] + """ + return self._turtles + + def bgcolor(self, *args): + """Set or return backgroundcolor of the TurtleScreen. + + Arguments (if given): a color string or three numbers + in the range 0..colormode or a 3-tuple of such numbers. + + Example (for a TurtleScreen instance named screen): + >>> screen.bgcolor("orange") + >>> screen.bgcolor() + 'orange' + >>> screen.bgcolor(0.5,0,0.5) + >>> screen.bgcolor() + '#800080' + """ + if args: + color = self._colorstr(args) + else: + color = None + color = self._bgcolor(color) + if color is not None: + color = self._color(color) + return color + + def tracer(self, n=None, delay=None): + """Turns turtle animation on/off and set delay for update drawings. + + Optional arguments: + n -- nonnegative integer + delay -- nonnegative integer + + If n is given, only each n-th regular screen update is really performed. + (Can be used to accelerate the drawing of complex graphics.) + Second arguments sets delay value (see RawTurtle.delay()) + + Example (for a TurtleScreen instance named screen): + >>> screen.tracer(8, 25) + >>> dist = 2 + >>> for i in range(200): + ... fd(dist) + ... rt(90) + ... dist += 2 + """ + if n is None: + return self._tracing + self._tracing = int(n) + self._updatecounter = 0 + if delay is not None: + self._delayvalue = int(delay) + if self._tracing: + self.update() + + def delay(self, delay=None): + """ Return or set the drawing delay in milliseconds. + + Optional argument: + delay -- positive integer + + Example (for a TurtleScreen instance named screen): + >>> screen.delay(15) + >>> screen.delay() + 15 + """ + if delay is None: + return self._delayvalue + self._delayvalue = int(delay) + + def _incrementudc(self): + """Increment update counter.""" + if not TurtleScreen._RUNNING: + TurtleScreen._RUNNING = True + raise Terminator + if self._tracing > 0: + self._updatecounter += 1 + self._updatecounter %= self._tracing + + def update(self): + """Perform a TurtleScreen update. + """ + tracing = self._tracing + self._tracing = True + for t in self.turtles(): + t._update_data() + t._drawturtle() + self._tracing = tracing + self._update() + + def window_width(self): + """ Return the width of the turtle window. + + Example (for a TurtleScreen instance named screen): + >>> screen.window_width() + 640 + """ + return self._window_size()[0] + + def window_height(self): + """ Return the height of the turtle window. + + Example (for a TurtleScreen instance named screen): + >>> screen.window_height() + 480 + """ + return self._window_size()[1] + + def getcanvas(self): + """Return the Canvas of this TurtleScreen. + + No argument. + + Example (for a Screen instance named screen): + >>> cv = screen.getcanvas() + >>> cv + + """ + return self.cv + + def getshapes(self): + """Return a list of names of all currently available turtle shapes. + + No argument. + + Example (for a TurtleScreen instance named screen): + >>> screen.getshapes() + ['arrow', 'blank', 'circle', ... , 'turtle'] + """ + return sorted(self._shapes.keys()) + + def onclick(self, fun, btn=1, add=None): + """Bind fun to mouse-click event on canvas. + + Arguments: + fun -- a function with two arguments, the coordinates of the + clicked point on the canvas. + btn -- the number of the mouse-button, defaults to 1 + + Example (for a TurtleScreen instance named screen) + + >>> screen.onclick(goto) + >>> # Subsequently clicking into the TurtleScreen will + >>> # make the turtle move to the clicked point. + >>> screen.onclick(None) + """ + self._onscreenclick(fun, btn, add) + + def onkey(self, fun, key): + """Bind fun to key-release event of key. + + Arguments: + fun -- a function with no arguments + key -- a string: key (e.g. "a") or key-symbol (e.g. "space") + + In order to be able to register key-events, TurtleScreen + must have focus. (See method listen.) + + Example (for a TurtleScreen instance named screen): + + >>> def f(): + ... fd(50) + ... lt(60) + ... + >>> screen.onkey(f, "Up") + >>> screen.listen() + + Subsequently the turtle can be moved by repeatedly pressing + the up-arrow key, consequently drawing a hexagon + + """ + if fun is None: + if key in self._keys: + self._keys.remove(key) + elif key not in self._keys: + self._keys.append(key) + self._onkeyrelease(fun, key) + + def onkeypress(self, fun, key=None): + """Bind fun to key-press event of key if key is given, + or to any key-press-event if no key is given. + + Arguments: + fun -- a function with no arguments + key -- a string: key (e.g. "a") or key-symbol (e.g. "space") + + In order to be able to register key-events, TurtleScreen + must have focus. (See method listen.) + + Example (for a TurtleScreen instance named screen + and a Turtle instance named turtle): + + >>> def f(): + ... fd(50) + ... lt(60) + ... + >>> screen.onkeypress(f, "Up") + >>> screen.listen() + + Subsequently the turtle can be moved by repeatedly pressing + the up-arrow key, or by keeping pressed the up-arrow key. + consequently drawing a hexagon. + """ + if fun is None: + if key in self._keys: + self._keys.remove(key) + elif key is not None and key not in self._keys: + self._keys.append(key) + self._onkeypress(fun, key) + + def listen(self, xdummy=None, ydummy=None): + """Set focus on TurtleScreen (in order to collect key-events) + + No arguments. + Dummy arguments are provided in order + to be able to pass listen to the onclick method. + + Example (for a TurtleScreen instance named screen): + >>> screen.listen() + """ + self._listen() + + def ontimer(self, fun, t=0): + """Install a timer, which calls fun after t milliseconds. + + Arguments: + fun -- a function with no arguments. + t -- a number >= 0 + + Example (for a TurtleScreen instance named screen): + + >>> running = True + >>> def f(): + ... if running: + ... fd(50) + ... lt(60) + ... screen.ontimer(f, 250) + ... + >>> f() # makes the turtle marching around + >>> running = False + """ + self._ontimer(fun, t) + + def bgpic(self, picname=None): + """Set background image or return name of current backgroundimage. + + Optional argument: + picname -- a string, name of a gif-file or "nopic". + + If picname is a filename, set the corresponding image as background. + If picname is "nopic", delete backgroundimage, if present. + If picname is None, return the filename of the current backgroundimage. + + Example (for a TurtleScreen instance named screen): + >>> screen.bgpic() + 'nopic' + >>> screen.bgpic("landscape.gif") + >>> screen.bgpic() + 'landscape.gif' + """ + if picname is None: + return self._bgpicname + if picname not in self._bgpics: + self._bgpics[picname] = self._image(picname) + self._setbgpic(self._bgpic, self._bgpics[picname]) + self._bgpicname = picname + + def screensize(self, canvwidth=None, canvheight=None, bg=None): + """Resize the canvas the turtles are drawing on. + + Optional arguments: + canvwidth -- positive integer, new width of canvas in pixels + canvheight -- positive integer, new height of canvas in pixels + bg -- colorstring or color-tuple, new backgroundcolor + If no arguments are given, return current (canvaswidth, canvasheight) + + Do not alter the drawing window. To observe hidden parts of + the canvas use the scrollbars. (Can make visible those parts + of a drawing, which were outside the canvas before!) + + Example (for a Turtle instance named turtle): + >>> turtle.screensize(2000,1500) + >>> # e.g. to search for an erroneously escaped turtle ;-) + """ + return self._resize(canvwidth, canvheight, bg) + + onscreenclick = onclick + resetscreen = reset + clearscreen = clear + addshape = register_shape + onkeyrelease = onkey + +class TNavigator(object): + """Navigation part of the RawTurtle. + Implements methods for turtle movement. + """ + START_ORIENTATION = { + "standard": Vec2D(1.0, 0.0), + "world" : Vec2D(1.0, 0.0), + "logo" : Vec2D(0.0, 1.0) } + DEFAULT_MODE = "standard" + DEFAULT_ANGLEOFFSET = 0 + DEFAULT_ANGLEORIENT = 1 + + def __init__(self, mode=DEFAULT_MODE): + self._angleOffset = self.DEFAULT_ANGLEOFFSET + self._angleOrient = self.DEFAULT_ANGLEORIENT + self._mode = mode + self.undobuffer = None + self.degrees() + self._mode = None + self._setmode(mode) + TNavigator.reset(self) + + def reset(self): + """reset turtle to its initial values + + Will be overwritten by parent class + """ + self._position = Vec2D(0.0, 0.0) + self._orient = TNavigator.START_ORIENTATION[self._mode] + + def _setmode(self, mode=None): + """Set turtle-mode to 'standard', 'world' or 'logo'. + """ + if mode is None: + return self._mode + if mode not in ["standard", "logo", "world"]: + return + self._mode = mode + if mode in ["standard", "world"]: + self._angleOffset = 0 + self._angleOrient = 1 + else: # mode == "logo": + self._angleOffset = self._fullcircle/4. + self._angleOrient = -1 + + def _setDegreesPerAU(self, fullcircle): + """Helper function for degrees() and radians()""" + self._fullcircle = fullcircle + self._degreesPerAU = 360/fullcircle + if self._mode == "standard": + self._angleOffset = 0 + else: + self._angleOffset = fullcircle/4. + + def degrees(self, fullcircle=360.0): + """ Set angle measurement units to degrees. + + Optional argument: + fullcircle - a number + + Set angle measurement units, i. e. set number + of 'degrees' for a full circle. Default value is + 360 degrees. + + Example (for a Turtle instance named turtle): + >>> turtle.left(90) + >>> turtle.heading() + 90 + + Change angle measurement unit to grad (also known as gon, + grade, or gradian and equals 1/100-th of the right angle.) + >>> turtle.degrees(400.0) + >>> turtle.heading() + 100 + + """ + self._setDegreesPerAU(fullcircle) + + def radians(self): + """ Set the angle measurement units to radians. + + No arguments. + + Example (for a Turtle instance named turtle): + >>> turtle.heading() + 90 + >>> turtle.radians() + >>> turtle.heading() + 1.5707963267948966 + """ + self._setDegreesPerAU(math.tau) + + def _go(self, distance): + """move turtle forward by specified distance""" + ende = self._position + self._orient * distance + self._goto(ende) + + def _rotate(self, angle): + """Turn turtle counterclockwise by specified angle if angle > 0.""" + angle *= self._degreesPerAU + self._orient = self._orient.rotate(angle) + + def _goto(self, end): + """move turtle to position end.""" + self._position = end + + def forward(self, distance): + """Move the turtle forward by the specified distance. + + Aliases: forward | fd + + Argument: + distance -- a number (integer or float) + + Move the turtle forward by the specified distance, in the direction + the turtle is headed. + + Example (for a Turtle instance named turtle): + >>> turtle.position() + (0.00, 0.00) + >>> turtle.forward(25) + >>> turtle.position() + (25.00,0.00) + >>> turtle.forward(-75) + >>> turtle.position() + (-50.00,0.00) + """ + self._go(distance) + + def back(self, distance): + """Move the turtle backward by distance. + + Aliases: back | backward | bk + + Argument: + distance -- a number + + Move the turtle backward by distance, opposite to the direction the + turtle is headed. Do not change the turtle's heading. + + Example (for a Turtle instance named turtle): + >>> turtle.position() + (0.00, 0.00) + >>> turtle.backward(30) + >>> turtle.position() + (-30.00, 0.00) + """ + self._go(-distance) + + def right(self, angle): + """Turn turtle right by angle units. + + Aliases: right | rt + + Argument: + angle -- a number (integer or float) + + Turn turtle right by angle units. (Units are by default degrees, + but can be set via the degrees() and radians() functions.) + Angle orientation depends on mode. (See this.) + + Example (for a Turtle instance named turtle): + >>> turtle.heading() + 22.0 + >>> turtle.right(45) + >>> turtle.heading() + 337.0 + """ + self._rotate(-angle) + + def left(self, angle): + """Turn turtle left by angle units. + + Aliases: left | lt + + Argument: + angle -- a number (integer or float) + + Turn turtle left by angle units. (Units are by default degrees, + but can be set via the degrees() and radians() functions.) + Angle orientation depends on mode. (See this.) + + Example (for a Turtle instance named turtle): + >>> turtle.heading() + 22.0 + >>> turtle.left(45) + >>> turtle.heading() + 67.0 + """ + self._rotate(angle) + + def pos(self): + """Return the turtle's current location (x,y), as a Vec2D-vector. + + Aliases: pos | position + + No arguments. + + Example (for a Turtle instance named turtle): + >>> turtle.pos() + (0.00, 240.00) + """ + return self._position + + def xcor(self): + """ Return the turtle's x coordinate. + + No arguments. + + Example (for a Turtle instance named turtle): + >>> reset() + >>> turtle.left(60) + >>> turtle.forward(100) + >>> print turtle.xcor() + 50.0 + """ + return self._position[0] + + def ycor(self): + """ Return the turtle's y coordinate + --- + No arguments. + + Example (for a Turtle instance named turtle): + >>> reset() + >>> turtle.left(60) + >>> turtle.forward(100) + >>> print turtle.ycor() + 86.6025403784 + """ + return self._position[1] + + + def goto(self, x, y=None): + """Move turtle to an absolute position. + + Aliases: setpos | setposition | goto: + + Arguments: + x -- a number or a pair/vector of numbers + y -- a number None + + call: goto(x, y) # two coordinates + --or: goto((x, y)) # a pair (tuple) of coordinates + --or: goto(vec) # e.g. as returned by pos() + + Move turtle to an absolute position. If the pen is down, + a line will be drawn. The turtle's orientation does not change. + + Example (for a Turtle instance named turtle): + >>> tp = turtle.pos() + >>> tp + (0.00, 0.00) + >>> turtle.setpos(60,30) + >>> turtle.pos() + (60.00,30.00) + >>> turtle.setpos((20,80)) + >>> turtle.pos() + (20.00,80.00) + >>> turtle.setpos(tp) + >>> turtle.pos() + (0.00,0.00) + """ + if y is None: + self._goto(Vec2D(*x)) + else: + self._goto(Vec2D(x, y)) + + def home(self): + """Move turtle to the origin - coordinates (0,0). + + No arguments. + + Move turtle to the origin - coordinates (0,0) and set its + heading to its start-orientation (which depends on mode). + + Example (for a Turtle instance named turtle): + >>> turtle.home() + """ + self.goto(0, 0) + self.setheading(0) + + def setx(self, x): + """Set the turtle's first coordinate to x + + Argument: + x -- a number (integer or float) + + Set the turtle's first coordinate to x, leave second coordinate + unchanged. + + Example (for a Turtle instance named turtle): + >>> turtle.position() + (0.00, 240.00) + >>> turtle.setx(10) + >>> turtle.position() + (10.00, 240.00) + """ + self._goto(Vec2D(x, self._position[1])) + + def sety(self, y): + """Set the turtle's second coordinate to y + + Argument: + y -- a number (integer or float) + + Set the turtle's first coordinate to x, second coordinate remains + unchanged. + + Example (for a Turtle instance named turtle): + >>> turtle.position() + (0.00, 40.00) + >>> turtle.sety(-10) + >>> turtle.position() + (0.00, -10.00) + """ + self._goto(Vec2D(self._position[0], y)) + + def distance(self, x, y=None): + """Return the distance from the turtle to (x,y) in turtle step units. + + Arguments: + x -- a number or a pair/vector of numbers or a turtle instance + y -- a number None None + + call: distance(x, y) # two coordinates + --or: distance((x, y)) # a pair (tuple) of coordinates + --or: distance(vec) # e.g. as returned by pos() + --or: distance(mypen) # where mypen is another turtle + + Example (for a Turtle instance named turtle): + >>> turtle.pos() + (0.00, 0.00) + >>> turtle.distance(30,40) + 50.0 + >>> pen = Turtle() + >>> pen.forward(77) + >>> turtle.distance(pen) + 77.0 + """ + if y is not None: + pos = Vec2D(x, y) + if isinstance(x, Vec2D): + pos = x + elif isinstance(x, tuple): + pos = Vec2D(*x) + elif isinstance(x, TNavigator): + pos = x._position + return abs(pos - self._position) + + def towards(self, x, y=None): + """Return the angle of the line from the turtle's position to (x, y). + + Arguments: + x -- a number or a pair/vector of numbers or a turtle instance + y -- a number None None + + call: distance(x, y) # two coordinates + --or: distance((x, y)) # a pair (tuple) of coordinates + --or: distance(vec) # e.g. as returned by pos() + --or: distance(mypen) # where mypen is another turtle + + Return the angle, between the line from turtle-position to position + specified by x, y and the turtle's start orientation. (Depends on + modes - "standard" or "logo") + + Example (for a Turtle instance named turtle): + >>> turtle.pos() + (10.00, 10.00) + >>> turtle.towards(0,0) + 225.0 + """ + if y is not None: + pos = Vec2D(x, y) + if isinstance(x, Vec2D): + pos = x + elif isinstance(x, tuple): + pos = Vec2D(*x) + elif isinstance(x, TNavigator): + pos = x._position + x, y = pos - self._position + result = round(math.degrees(math.atan2(y, x)), 10) % 360.0 + result /= self._degreesPerAU + return (self._angleOffset + self._angleOrient*result) % self._fullcircle + + def heading(self): + """ Return the turtle's current heading. + + No arguments. + + Example (for a Turtle instance named turtle): + >>> turtle.left(67) + >>> turtle.heading() + 67.0 + """ + x, y = self._orient + result = round(math.degrees(math.atan2(y, x)), 10) % 360.0 + result /= self._degreesPerAU + return (self._angleOffset + self._angleOrient*result) % self._fullcircle + + def setheading(self, to_angle): + """Set the orientation of the turtle to to_angle. + + Aliases: setheading | seth + + Argument: + to_angle -- a number (integer or float) + + Set the orientation of the turtle to to_angle. + Here are some common directions in degrees: + + standard - mode: logo-mode: + -------------------|-------------------- + 0 - east 0 - north + 90 - north 90 - east + 180 - west 180 - south + 270 - south 270 - west + + Example (for a Turtle instance named turtle): + >>> turtle.setheading(90) + >>> turtle.heading() + 90 + """ + angle = (to_angle - self.heading())*self._angleOrient + full = self._fullcircle + angle = (angle+full/2.)%full - full/2. + self._rotate(angle) + + def circle(self, radius, extent = None, steps = None): + """ Draw a circle with given radius. + + Arguments: + radius -- a number + extent (optional) -- a number + steps (optional) -- an integer + + Draw a circle with given radius. The center is radius units left + of the turtle; extent - an angle - determines which part of the + circle is drawn. If extent is not given, draw the entire circle. + If extent is not a full circle, one endpoint of the arc is the + current pen position. Draw the arc in counterclockwise direction + if radius is positive, otherwise in clockwise direction. Finally + the direction of the turtle is changed by the amount of extent. + + As the circle is approximated by an inscribed regular polygon, + steps determines the number of steps to use. If not given, + it will be calculated automatically. Maybe used to draw regular + polygons. + + call: circle(radius) # full circle + --or: circle(radius, extent) # arc + --or: circle(radius, extent, steps) + --or: circle(radius, steps=6) # 6-sided polygon + + Example (for a Turtle instance named turtle): + >>> turtle.circle(50) + >>> turtle.circle(120, 180) # semicircle + """ + if self.undobuffer: + self.undobuffer.push(["seq"]) + self.undobuffer.cumulate = True + speed = self.speed() + if extent is None: + extent = self._fullcircle + if steps is None: + frac = abs(extent)/self._fullcircle + steps = 1+int(min(11+abs(radius)/6.0, 59.0)*frac) + w = 1.0 * extent / steps + w2 = 0.5 * w + l = 2.0 * radius * math.sin(math.radians(w2)*self._degreesPerAU) + if radius < 0: + l, w, w2 = -l, -w, -w2 + tr = self._tracer() + dl = self._delay() + if speed == 0: + self._tracer(0, 0) + else: + self.speed(0) + self._rotate(w2) + for i in range(steps): + self.speed(speed) + self._go(l) + self.speed(0) + self._rotate(w) + self._rotate(-w2) + if speed == 0: + self._tracer(tr, dl) + self.speed(speed) + if self.undobuffer: + self.undobuffer.cumulate = False + +## three dummy methods to be implemented by child class: + + def speed(self, s=0): + """dummy method - to be overwritten by child class""" + def _tracer(self, a=None, b=None): + """dummy method - to be overwritten by child class""" + def _delay(self, n=None): + """dummy method - to be overwritten by child class""" + + fd = forward + bk = back + backward = back + rt = right + lt = left + position = pos + setpos = goto + setposition = goto + seth = setheading + + +class TPen(object): + """Drawing part of the RawTurtle. + Implements drawing properties. + """ + def __init__(self, resizemode=_CFG["resizemode"]): + self._resizemode = resizemode # or "user" or "noresize" + self.undobuffer = None + TPen._reset(self) + + def _reset(self, pencolor=_CFG["pencolor"], + fillcolor=_CFG["fillcolor"]): + self._pensize = 1 + self._shown = True + self._pencolor = pencolor + self._fillcolor = fillcolor + self._drawing = True + self._speed = 3 + self._stretchfactor = (1., 1.) + self._shearfactor = 0. + self._tilt = 0. + self._shapetrafo = (1., 0., 0., 1.) + self._outlinewidth = 1 + + def resizemode(self, rmode=None): + """Set resizemode to one of the values: "auto", "user", "noresize". + + (Optional) Argument: + rmode -- one of the strings "auto", "user", "noresize" + + Different resizemodes have the following effects: + - "auto" adapts the appearance of the turtle + corresponding to the value of pensize. + - "user" adapts the appearance of the turtle according to the + values of stretchfactor and outlinewidth (outline), + which are set by shapesize() + - "noresize" no adaption of the turtle's appearance takes place. + If no argument is given, return current resizemode. + resizemode("user") is called by a call of shapesize with arguments. + + + Examples (for a Turtle instance named turtle): + >>> turtle.resizemode("noresize") + >>> turtle.resizemode() + 'noresize' + """ + if rmode is None: + return self._resizemode + rmode = rmode.lower() + if rmode in ["auto", "user", "noresize"]: + self.pen(resizemode=rmode) + + def pensize(self, width=None): + """Set or return the line thickness. + + Aliases: pensize | width + + Argument: + width -- positive number + + Set the line thickness to width or return it. If resizemode is set + to "auto" and turtleshape is a polygon, that polygon is drawn with + the same line thickness. If no argument is given, current pensize + is returned. + + Example (for a Turtle instance named turtle): + >>> turtle.pensize() + 1 + >>> turtle.pensize(10) # from here on lines of width 10 are drawn + """ + if width is None: + return self._pensize + self.pen(pensize=width) + + + def penup(self): + """Pull the pen up -- no drawing when moving. + + Aliases: penup | pu | up + + No argument + + Example (for a Turtle instance named turtle): + >>> turtle.penup() + """ + if not self._drawing: + return + self.pen(pendown=False) + + def pendown(self): + """Pull the pen down -- drawing when moving. + + Aliases: pendown | pd | down + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.pendown() + """ + if self._drawing: + return + self.pen(pendown=True) + + def isdown(self): + """Return True if pen is down, False if it's up. + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.penup() + >>> turtle.isdown() + False + >>> turtle.pendown() + >>> turtle.isdown() + True + """ + return self._drawing + + def speed(self, speed=None): + """ Return or set the turtle's speed. + + Optional argument: + speed -- an integer in the range 0..10 or a speedstring (see below) + + Set the turtle's speed to an integer value in the range 0 .. 10. + If no argument is given: return current speed. + + If input is a number greater than 10 or smaller than 0.5, + speed is set to 0. + Speedstrings are mapped to speedvalues in the following way: + 'fastest' : 0 + 'fast' : 10 + 'normal' : 6 + 'slow' : 3 + 'slowest' : 1 + speeds from 1 to 10 enforce increasingly faster animation of + line drawing and turtle turning. + + Attention: + speed = 0 : *no* animation takes place. forward/back makes turtle jump + and likewise left/right make the turtle turn instantly. + + Example (for a Turtle instance named turtle): + >>> turtle.speed(3) + """ + speeds = {'fastest':0, 'fast':10, 'normal':6, 'slow':3, 'slowest':1 } + if speed is None: + return self._speed + if speed in speeds: + speed = speeds[speed] + elif 0.5 < speed < 10.5: + speed = int(round(speed)) + else: + speed = 0 + self.pen(speed=speed) + + def color(self, *args): + """Return or set the pencolor and fillcolor. + + Arguments: + Several input formats are allowed. + They use 0, 1, 2, or 3 arguments as follows: + + color() + Return the current pencolor and the current fillcolor + as a pair of color specification strings as are returned + by pencolor and fillcolor. + color(colorstring), color((r,g,b)), color(r,g,b) + inputs as in pencolor, set both, fillcolor and pencolor, + to the given value. + color(colorstring1, colorstring2), + color((r1,g1,b1), (r2,g2,b2)) + equivalent to pencolor(colorstring1) and fillcolor(colorstring2) + and analogously, if the other input format is used. + + If turtleshape is a polygon, outline and interior of that polygon + is drawn with the newly set colors. + For more info see: pencolor, fillcolor + + Example (for a Turtle instance named turtle): + >>> turtle.color('red', 'green') + >>> turtle.color() + ('red', 'green') + >>> colormode(255) + >>> color((40, 80, 120), (160, 200, 240)) + >>> color() + ('#285078', '#a0c8f0') + """ + if args: + l = len(args) + if l == 1: + pcolor = fcolor = args[0] + elif l == 2: + pcolor, fcolor = args + elif l == 3: + pcolor = fcolor = args + pcolor = self._colorstr(pcolor) + fcolor = self._colorstr(fcolor) + self.pen(pencolor=pcolor, fillcolor=fcolor) + else: + return self._color(self._pencolor), self._color(self._fillcolor) + + def pencolor(self, *args): + """ Return or set the pencolor. + + Arguments: + Four input formats are allowed: + - pencolor() + Return the current pencolor as color specification string, + possibly in hex-number format (see example). + May be used as input to another color/pencolor/fillcolor call. + - pencolor(colorstring) + s is a Tk color specification string, such as "red" or "yellow" + - pencolor((r, g, b)) + *a tuple* of r, g, and b, which represent, an RGB color, + and each of r, g, and b are in the range 0..colormode, + where colormode is either 1.0 or 255 + - pencolor(r, g, b) + r, g, and b represent an RGB color, and each of r, g, and b + are in the range 0..colormode + + If turtleshape is a polygon, the outline of that polygon is drawn + with the newly set pencolor. + + Example (for a Turtle instance named turtle): + >>> turtle.pencolor('brown') + >>> tup = (0.2, 0.8, 0.55) + >>> turtle.pencolor(tup) + >>> turtle.pencolor() + '#33cc8c' + """ + if args: + color = self._colorstr(args) + if color == self._pencolor: + return + self.pen(pencolor=color) + else: + return self._color(self._pencolor) + + def fillcolor(self, *args): + """ Return or set the fillcolor. + + Arguments: + Four input formats are allowed: + - fillcolor() + Return the current fillcolor as color specification string, + possibly in hex-number format (see example). + May be used as input to another color/pencolor/fillcolor call. + - fillcolor(colorstring) + s is a Tk color specification string, such as "red" or "yellow" + - fillcolor((r, g, b)) + *a tuple* of r, g, and b, which represent, an RGB color, + and each of r, g, and b are in the range 0..colormode, + where colormode is either 1.0 or 255 + - fillcolor(r, g, b) + r, g, and b represent an RGB color, and each of r, g, and b + are in the range 0..colormode + + If turtleshape is a polygon, the interior of that polygon is drawn + with the newly set fillcolor. + + Example (for a Turtle instance named turtle): + >>> turtle.fillcolor('violet') + >>> col = turtle.pencolor() + >>> turtle.fillcolor(col) + >>> turtle.fillcolor(0, .5, 0) + """ + if args: + color = self._colorstr(args) + if color == self._fillcolor: + return + self.pen(fillcolor=color) + else: + return self._color(self._fillcolor) + + def showturtle(self): + """Makes the turtle visible. + + Aliases: showturtle | st + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.hideturtle() + >>> turtle.showturtle() + """ + self.pen(shown=True) + + def hideturtle(self): + """Makes the turtle invisible. + + Aliases: hideturtle | ht + + No argument. + + It's a good idea to do this while you're in the + middle of a complicated drawing, because hiding + the turtle speeds up the drawing observably. + + Example (for a Turtle instance named turtle): + >>> turtle.hideturtle() + """ + self.pen(shown=False) + + def isvisible(self): + """Return True if the Turtle is shown, False if it's hidden. + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.hideturtle() + >>> print turtle.isvisible(): + False + """ + return self._shown + + def pen(self, pen=None, **pendict): + """Return or set the pen's attributes. + + Arguments: + pen -- a dictionary with some or all of the below listed keys. + **pendict -- one or more keyword-arguments with the below + listed keys as keywords. + + Return or set the pen's attributes in a 'pen-dictionary' + with the following key/value pairs: + "shown" : True/False + "pendown" : True/False + "pencolor" : color-string or color-tuple + "fillcolor" : color-string or color-tuple + "pensize" : positive number + "speed" : number in range 0..10 + "resizemode" : "auto" or "user" or "noresize" + "stretchfactor": (positive number, positive number) + "shearfactor": number + "outline" : positive number + "tilt" : number + + This dictionary can be used as argument for a subsequent + pen()-call to restore the former pen-state. Moreover one + or more of these attributes can be provided as keyword-arguments. + This can be used to set several pen attributes in one statement. + + + Examples (for a Turtle instance named turtle): + >>> turtle.pen(fillcolor="black", pencolor="red", pensize=10) + >>> turtle.pen() + {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, + 'pencolor': 'red', 'pendown': True, 'fillcolor': 'black', + 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} + >>> penstate=turtle.pen() + >>> turtle.color("yellow","") + >>> turtle.penup() + >>> turtle.pen() + {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, + 'pencolor': 'yellow', 'pendown': False, 'fillcolor': '', + 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} + >>> p.pen(penstate, fillcolor="green") + >>> p.pen() + {'pensize': 10, 'shown': True, 'resizemode': 'auto', 'outline': 1, + 'pencolor': 'red', 'pendown': True, 'fillcolor': 'green', + 'stretchfactor': (1,1), 'speed': 3, 'shearfactor': 0.0} + """ + _pd = {"shown" : self._shown, + "pendown" : self._drawing, + "pencolor" : self._pencolor, + "fillcolor" : self._fillcolor, + "pensize" : self._pensize, + "speed" : self._speed, + "resizemode" : self._resizemode, + "stretchfactor" : self._stretchfactor, + "shearfactor" : self._shearfactor, + "outline" : self._outlinewidth, + "tilt" : self._tilt + } + + if not (pen or pendict): + return _pd + + if isinstance(pen, dict): + p = pen + else: + p = {} + p.update(pendict) + + _p_buf = {} + for key in p: + _p_buf[key] = _pd[key] + + if self.undobuffer: + self.undobuffer.push(("pen", _p_buf)) + + newLine = False + if "pendown" in p: + if self._drawing != p["pendown"]: + newLine = True + if "pencolor" in p: + if isinstance(p["pencolor"], tuple): + p["pencolor"] = self._colorstr((p["pencolor"],)) + if self._pencolor != p["pencolor"]: + newLine = True + if "pensize" in p: + if self._pensize != p["pensize"]: + newLine = True + if newLine: + self._newLine() + if "pendown" in p: + self._drawing = p["pendown"] + if "pencolor" in p: + self._pencolor = p["pencolor"] + if "pensize" in p: + self._pensize = p["pensize"] + if "fillcolor" in p: + if isinstance(p["fillcolor"], tuple): + p["fillcolor"] = self._colorstr((p["fillcolor"],)) + self._fillcolor = p["fillcolor"] + if "speed" in p: + self._speed = p["speed"] + if "resizemode" in p: + self._resizemode = p["resizemode"] + if "stretchfactor" in p: + sf = p["stretchfactor"] + if isinstance(sf, (int, float)): + sf = (sf, sf) + self._stretchfactor = sf + if "shearfactor" in p: + self._shearfactor = p["shearfactor"] + if "outline" in p: + self._outlinewidth = p["outline"] + if "shown" in p: + self._shown = p["shown"] + if "tilt" in p: + self._tilt = p["tilt"] + if "stretchfactor" in p or "tilt" in p or "shearfactor" in p: + scx, scy = self._stretchfactor + shf = self._shearfactor + sa, ca = math.sin(self._tilt), math.cos(self._tilt) + self._shapetrafo = ( scx*ca, scy*(shf*ca + sa), + -scx*sa, scy*(ca - shf*sa)) + self._update() + +## three dummy methods to be implemented by child class: + + def _newLine(self, usePos = True): + """dummy method - to be overwritten by child class""" + def _update(self, count=True, forced=False): + """dummy method - to be overwritten by child class""" + def _color(self, args): + """dummy method - to be overwritten by child class""" + def _colorstr(self, args): + """dummy method - to be overwritten by child class""" + + width = pensize + up = penup + pu = penup + pd = pendown + down = pendown + st = showturtle + ht = hideturtle + + +class _TurtleImage(object): + """Helper class: Datatype to store Turtle attributes + """ + + def __init__(self, screen, shapeIndex): + self.screen = screen + self._type = None + self._setshape(shapeIndex) + + def _setshape(self, shapeIndex): + screen = self.screen + self.shapeIndex = shapeIndex + if self._type == "polygon" == screen._shapes[shapeIndex]._type: + return + if self._type == "image" == screen._shapes[shapeIndex]._type: + return + if self._type in ["image", "polygon"]: + screen._delete(self._item) + elif self._type == "compound": + for item in self._item: + screen._delete(item) + self._type = screen._shapes[shapeIndex]._type + if self._type == "polygon": + self._item = screen._createpoly() + elif self._type == "image": + self._item = screen._createimage(screen._shapes["blank"]._data) + elif self._type == "compound": + self._item = [screen._createpoly() for item in + screen._shapes[shapeIndex]._data] + + +class RawTurtle(TPen, TNavigator): + """Animation part of the RawTurtle. + Puts RawTurtle upon a TurtleScreen and provides tools for + its animation. + """ + screens = [] + + def __init__(self, canvas=None, + shape=_CFG["shape"], + undobuffersize=_CFG["undobuffersize"], + visible=_CFG["visible"]): + if isinstance(canvas, _Screen): + self.screen = canvas + elif isinstance(canvas, TurtleScreen): + if canvas not in RawTurtle.screens: + RawTurtle.screens.append(canvas) + self.screen = canvas + elif isinstance(canvas, (ScrolledCanvas, Canvas)): + for screen in RawTurtle.screens: + if screen.cv == canvas: + self.screen = screen + break + else: + self.screen = TurtleScreen(canvas) + RawTurtle.screens.append(self.screen) + else: + raise TurtleGraphicsError("bad canvas argument %s" % canvas) + + screen = self.screen + TNavigator.__init__(self, screen.mode()) + TPen.__init__(self) + screen._turtles.append(self) + self.drawingLineItem = screen._createline() + self.turtle = _TurtleImage(screen, shape) + self._poly = None + self._creatingPoly = False + self._fillitem = self._fillpath = None + self._shown = visible + self._hidden_from_screen = False + self.currentLineItem = screen._createline() + self.currentLine = [self._position] + self.items = [self.currentLineItem] + self.stampItems = [] + self._undobuffersize = undobuffersize + self.undobuffer = Tbuffer(undobuffersize) + self._update() + + def reset(self): + """Delete the turtle's drawings and restore its default values. + + No argument. + + Delete the turtle's drawings from the screen, re-center the turtle + and set variables to the default values. + + Example (for a Turtle instance named turtle): + >>> turtle.position() + (0.00,-22.00) + >>> turtle.heading() + 100.0 + >>> turtle.reset() + >>> turtle.position() + (0.00,0.00) + >>> turtle.heading() + 0.0 + """ + TNavigator.reset(self) + TPen._reset(self) + self._clear() + self._drawturtle() + self._update() + + def setundobuffer(self, size): + """Set or disable undobuffer. + + Argument: + size -- an integer or None + + If size is an integer an empty undobuffer of given size is installed. + Size gives the maximum number of turtle-actions that can be undone + by the undo() function. + If size is None, no undobuffer is present. + + Example (for a Turtle instance named turtle): + >>> turtle.setundobuffer(42) + """ + if size is None or size <= 0: + self.undobuffer = None + else: + self.undobuffer = Tbuffer(size) + + def undobufferentries(self): + """Return count of entries in the undobuffer. + + No argument. + + Example (for a Turtle instance named turtle): + >>> while undobufferentries(): + ... undo() + """ + if self.undobuffer is None: + return 0 + return self.undobuffer.nr_of_items() + + def _clear(self): + """Delete all of pen's drawings""" + self._fillitem = self._fillpath = None + for item in self.items: + self.screen._delete(item) + self.currentLineItem = self.screen._createline() + self.currentLine = [] + if self._drawing: + self.currentLine.append(self._position) + self.items = [self.currentLineItem] + self.clearstamps() + self.setundobuffer(self._undobuffersize) + + + def clear(self): + """Delete the turtle's drawings from the screen. Do not move turtle. + + No arguments. + + Delete the turtle's drawings from the screen. Do not move turtle. + State and position of the turtle as well as drawings of other + turtles are not affected. + + Examples (for a Turtle instance named turtle): + >>> turtle.clear() + """ + self._clear() + self._update() + + def _update_data(self): + self.screen._incrementudc() + if self.screen._updatecounter != 0: + return + if len(self.currentLine)>1: + self.screen._drawline(self.currentLineItem, self.currentLine, + self._pencolor, self._pensize) + + def _update(self): + """Perform a Turtle-data update. + """ + screen = self.screen + if screen._tracing == 0: + return + elif screen._tracing == 1: + self._update_data() + self._drawturtle() + screen._update() # TurtleScreenBase + screen._delay(screen._delayvalue) # TurtleScreenBase + else: + self._update_data() + if screen._updatecounter == 0: + for t in screen.turtles(): + t._drawturtle() + screen._update() + + def _tracer(self, flag=None, delay=None): + """Turns turtle animation on/off and set delay for update drawings. + + Optional arguments: + n -- nonnegative integer + delay -- nonnegative integer + + If n is given, only each n-th regular screen update is really performed. + (Can be used to accelerate the drawing of complex graphics.) + Second arguments sets delay value (see RawTurtle.delay()) + + Example (for a Turtle instance named turtle): + >>> turtle.tracer(8, 25) + >>> dist = 2 + >>> for i in range(200): + ... turtle.fd(dist) + ... turtle.rt(90) + ... dist += 2 + """ + return self.screen.tracer(flag, delay) + + def _color(self, args): + return self.screen._color(args) + + def _colorstr(self, args): + return self.screen._colorstr(args) + + def _cc(self, args): + """Convert colortriples to hexstrings. + """ + if isinstance(args, str): + return args + try: + r, g, b = args + except (TypeError, ValueError): + raise TurtleGraphicsError("bad color arguments: %s" % str(args)) + if self.screen._colormode == 1.0: + r, g, b = [round(255.0*x) for x in (r, g, b)] + if not ((0 <= r <= 255) and (0 <= g <= 255) and (0 <= b <= 255)): + raise TurtleGraphicsError("bad color sequence: %s" % str(args)) + return "#%02x%02x%02x" % (r, g, b) + + def clone(self): + """Create and return a clone of the turtle. + + No argument. + + Create and return a clone of the turtle with same position, heading + and turtle properties. + + Example (for a Turtle instance named mick): + mick = Turtle() + joe = mick.clone() + """ + screen = self.screen + self._newLine(self._drawing) + + turtle = self.turtle + self.screen = None + self.turtle = None # too make self deepcopy-able + + q = deepcopy(self) + + self.screen = screen + self.turtle = turtle + + q.screen = screen + q.turtle = _TurtleImage(screen, self.turtle.shapeIndex) + + screen._turtles.append(q) + ttype = screen._shapes[self.turtle.shapeIndex]._type + if ttype == "polygon": + q.turtle._item = screen._createpoly() + elif ttype == "image": + q.turtle._item = screen._createimage(screen._shapes["blank"]._data) + elif ttype == "compound": + q.turtle._item = [screen._createpoly() for item in + screen._shapes[self.turtle.shapeIndex]._data] + q.currentLineItem = screen._createline() + q._update() + return q + + def shape(self, name=None): + """Set turtle shape to shape with given name / return current shapename. + + Optional argument: + name -- a string, which is a valid shapename + + Set turtle shape to shape with given name or, if name is not given, + return name of current shape. + Shape with name must exist in the TurtleScreen's shape dictionary. + Initially there are the following polygon shapes: + 'arrow', 'turtle', 'circle', 'square', 'triangle', 'classic'. + To learn about how to deal with shapes see Screen-method register_shape. + + Example (for a Turtle instance named turtle): + >>> turtle.shape() + 'arrow' + >>> turtle.shape("turtle") + >>> turtle.shape() + 'turtle' + """ + if name is None: + return self.turtle.shapeIndex + if not name in self.screen.getshapes(): + raise TurtleGraphicsError("There is no shape named %s" % name) + self.turtle._setshape(name) + self._update() + + def shapesize(self, stretch_wid=None, stretch_len=None, outline=None): + """Set/return turtle's stretchfactors/outline. Set resizemode to "user". + + Optional arguments: + stretch_wid : positive number + stretch_len : positive number + outline : positive number + + Return or set the pen's attributes x/y-stretchfactors and/or outline. + Set resizemode to "user". + If and only if resizemode is set to "user", the turtle will be displayed + stretched according to its stretchfactors: + stretch_wid is stretchfactor perpendicular to orientation + stretch_len is stretchfactor in direction of turtles orientation. + outline determines the width of the shapes's outline. + + Examples (for a Turtle instance named turtle): + >>> turtle.resizemode("user") + >>> turtle.shapesize(5, 5, 12) + >>> turtle.shapesize(outline=8) + """ + if stretch_wid is stretch_len is outline is None: + stretch_wid, stretch_len = self._stretchfactor + return stretch_wid, stretch_len, self._outlinewidth + if stretch_wid == 0 or stretch_len == 0: + raise TurtleGraphicsError("stretch_wid/stretch_len must not be zero") + if stretch_wid is not None: + if stretch_len is None: + stretchfactor = stretch_wid, stretch_wid + else: + stretchfactor = stretch_wid, stretch_len + elif stretch_len is not None: + stretchfactor = self._stretchfactor[0], stretch_len + else: + stretchfactor = self._stretchfactor + if outline is None: + outline = self._outlinewidth + self.pen(resizemode="user", + stretchfactor=stretchfactor, outline=outline) + + def shearfactor(self, shear=None): + """Set or return the current shearfactor. + + Optional argument: shear -- number, tangent of the shear angle + + Shear the turtleshape according to the given shearfactor shear, + which is the tangent of the shear angle. DO NOT change the + turtle's heading (direction of movement). + If shear is not given: return the current shearfactor, i. e. the + tangent of the shear angle, by which lines parallel to the + heading of the turtle are sheared. + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("circle") + >>> turtle.shapesize(5,2) + >>> turtle.shearfactor(0.5) + >>> turtle.shearfactor() + >>> 0.5 + """ + if shear is None: + return self._shearfactor + self.pen(resizemode="user", shearfactor=shear) + + def settiltangle(self, angle): + """Rotate the turtleshape to point in the specified direction + + Argument: angle -- number + + Rotate the turtleshape to point in the direction specified by angle, + regardless of its current tilt-angle. DO NOT change the turtle's + heading (direction of movement). + + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("circle") + >>> turtle.shapesize(5,2) + >>> turtle.settiltangle(45) + >>> stamp() + >>> turtle.fd(50) + >>> turtle.settiltangle(-45) + >>> stamp() + >>> turtle.fd(50) + """ + tilt = -angle * self._degreesPerAU * self._angleOrient + tilt = math.radians(tilt) % math.tau + self.pen(resizemode="user", tilt=tilt) + + def tiltangle(self, angle=None): + """Set or return the current tilt-angle. + + Optional argument: angle -- number + + Rotate the turtleshape to point in the direction specified by angle, + regardless of its current tilt-angle. DO NOT change the turtle's + heading (direction of movement). + If angle is not given: return the current tilt-angle, i. e. the angle + between the orientation of the turtleshape and the heading of the + turtle (its direction of movement). + + (Incorrectly marked as deprecated since Python 3.1, it is really + settiltangle that is deprecated.) + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("circle") + >>> turtle.shapesize(5,2) + >>> turtle.tilt(45) + >>> turtle.tiltangle() + """ + if angle is None: + tilt = -math.degrees(self._tilt) * self._angleOrient + return (tilt / self._degreesPerAU) % self._fullcircle + else: + self.settiltangle(angle) + + def tilt(self, angle): + """Rotate the turtleshape by angle. + + Argument: + angle - a number + + Rotate the turtleshape by angle from its current tilt-angle, + but do NOT change the turtle's heading (direction of movement). + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("circle") + >>> turtle.shapesize(5,2) + >>> turtle.tilt(30) + >>> turtle.fd(50) + >>> turtle.tilt(30) + >>> turtle.fd(50) + """ + self.settiltangle(angle + self.tiltangle()) + + def shapetransform(self, t11=None, t12=None, t21=None, t22=None): + """Set or return the current transformation matrix of the turtle shape. + + Optional arguments: t11, t12, t21, t22 -- numbers. + + If none of the matrix elements are given, return the transformation + matrix. + Otherwise set the given elements and transform the turtleshape + according to the matrix consisting of first row t11, t12 and + second row t21, 22. + Modify stretchfactor, shearfactor and tiltangle according to the + given matrix. + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("square") + >>> turtle.shapesize(4,2) + >>> turtle.shearfactor(-0.5) + >>> turtle.shapetransform() + (4.0, -1.0, -0.0, 2.0) + """ + if t11 is t12 is t21 is t22 is None: + return self._shapetrafo + m11, m12, m21, m22 = self._shapetrafo + if t11 is not None: m11 = t11 + if t12 is not None: m12 = t12 + if t21 is not None: m21 = t21 + if t22 is not None: m22 = t22 + if t11 * t22 - t12 * t21 == 0: + raise TurtleGraphicsError("Bad shape transform matrix: must not be singular") + self._shapetrafo = (m11, m12, m21, m22) + alfa = math.atan2(-m21, m11) % math.tau + sa, ca = math.sin(alfa), math.cos(alfa) + a11, a12, a21, a22 = (ca*m11 - sa*m21, ca*m12 - sa*m22, + sa*m11 + ca*m21, sa*m12 + ca*m22) + self._stretchfactor = a11, a22 + self._shearfactor = a12/a22 + self._tilt = alfa + self.pen(resizemode="user") + + + def _polytrafo(self, poly): + """Computes transformed polygon shapes from a shape + according to current position and heading. + """ + screen = self.screen + p0, p1 = self._position + e0, e1 = self._orient + e = Vec2D(e0, e1 * screen.yscale / screen.xscale) + e0, e1 = (1.0 / abs(e)) * e + return [(p0+(e1*x+e0*y)/screen.xscale, p1+(-e0*x+e1*y)/screen.yscale) + for (x, y) in poly] + + def get_shapepoly(self): + """Return the current shape polygon as tuple of coordinate pairs. + + No argument. + + Examples (for a Turtle instance named turtle): + >>> turtle.shape("square") + >>> turtle.shapetransform(4, -1, 0, 2) + >>> turtle.get_shapepoly() + ((50, -20), (30, 20), (-50, 20), (-30, -20)) + + """ + shape = self.screen._shapes[self.turtle.shapeIndex] + if shape._type == "polygon": + return self._getshapepoly(shape._data, shape._type == "compound") + # else return None + + def _getshapepoly(self, polygon, compound=False): + """Calculate transformed shape polygon according to resizemode + and shapetransform. + """ + if self._resizemode == "user" or compound: + t11, t12, t21, t22 = self._shapetrafo + elif self._resizemode == "auto": + l = max(1, self._pensize/5.0) + t11, t12, t21, t22 = l, 0, 0, l + elif self._resizemode == "noresize": + return polygon + return tuple((t11*x + t12*y, t21*x + t22*y) for (x, y) in polygon) + + def _drawturtle(self): + """Manages the correct rendering of the turtle with respect to + its shape, resizemode, stretch and tilt etc.""" + screen = self.screen + shape = screen._shapes[self.turtle.shapeIndex] + ttype = shape._type + titem = self.turtle._item + if self._shown and screen._updatecounter == 0 and screen._tracing > 0: + self._hidden_from_screen = False + tshape = shape._data + if ttype == "polygon": + if self._resizemode == "noresize": w = 1 + elif self._resizemode == "auto": w = self._pensize + else: w =self._outlinewidth + shape = self._polytrafo(self._getshapepoly(tshape)) + fc, oc = self._fillcolor, self._pencolor + screen._drawpoly(titem, shape, fill=fc, outline=oc, + width=w, top=True) + elif ttype == "image": + screen._drawimage(titem, self._position, tshape) + elif ttype == "compound": + for item, (poly, fc, oc) in zip(titem, tshape): + poly = self._polytrafo(self._getshapepoly(poly, True)) + screen._drawpoly(item, poly, fill=self._cc(fc), + outline=self._cc(oc), width=self._outlinewidth, top=True) + else: + if self._hidden_from_screen: + return + if ttype == "polygon": + screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), "", "") + elif ttype == "image": + screen._drawimage(titem, self._position, + screen._shapes["blank"]._data) + elif ttype == "compound": + for item in titem: + screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), "", "") + self._hidden_from_screen = True + +############################## stamp stuff ############################### + + def stamp(self): + """Stamp a copy of the turtleshape onto the canvas and return its id. + + No argument. + + Stamp a copy of the turtle shape onto the canvas at the current + turtle position. Return a stamp_id for that stamp, which can be + used to delete it by calling clearstamp(stamp_id). + + Example (for a Turtle instance named turtle): + >>> turtle.color("blue") + >>> turtle.stamp() + 13 + >>> turtle.fd(50) + """ + screen = self.screen + shape = screen._shapes[self.turtle.shapeIndex] + ttype = shape._type + tshape = shape._data + if ttype == "polygon": + stitem = screen._createpoly() + if self._resizemode == "noresize": w = 1 + elif self._resizemode == "auto": w = self._pensize + else: w =self._outlinewidth + shape = self._polytrafo(self._getshapepoly(tshape)) + fc, oc = self._fillcolor, self._pencolor + screen._drawpoly(stitem, shape, fill=fc, outline=oc, + width=w, top=True) + elif ttype == "image": + stitem = screen._createimage("") + screen._drawimage(stitem, self._position, tshape) + elif ttype == "compound": + stitem = [] + for element in tshape: + item = screen._createpoly() + stitem.append(item) + stitem = tuple(stitem) + for item, (poly, fc, oc) in zip(stitem, tshape): + poly = self._polytrafo(self._getshapepoly(poly, True)) + screen._drawpoly(item, poly, fill=self._cc(fc), + outline=self._cc(oc), width=self._outlinewidth, top=True) + self.stampItems.append(stitem) + self.undobuffer.push(("stamp", stitem)) + return stitem + + def _clearstamp(self, stampid): + """does the work for clearstamp() and clearstamps() + """ + if stampid in self.stampItems: + if isinstance(stampid, tuple): + for subitem in stampid: + self.screen._delete(subitem) + else: + self.screen._delete(stampid) + self.stampItems.remove(stampid) + # Delete stampitem from undobuffer if necessary + # if clearstamp is called directly. + item = ("stamp", stampid) + buf = self.undobuffer + if item not in buf.buffer: + return + index = buf.buffer.index(item) + buf.buffer.remove(item) + if index <= buf.ptr: + buf.ptr = (buf.ptr - 1) % buf.bufsize + buf.buffer.insert((buf.ptr+1)%buf.bufsize, [None]) + + def clearstamp(self, stampid): + """Delete stamp with given stampid + + Argument: + stampid - an integer, must be return value of previous stamp() call. + + Example (for a Turtle instance named turtle): + >>> turtle.color("blue") + >>> astamp = turtle.stamp() + >>> turtle.fd(50) + >>> turtle.clearstamp(astamp) + """ + self._clearstamp(stampid) + self._update() + + def clearstamps(self, n=None): + """Delete all or first/last n of turtle's stamps. + + Optional argument: + n -- an integer + + If n is None, delete all of pen's stamps, + else if n > 0 delete first n stamps + else if n < 0 delete last n stamps. + + Example (for a Turtle instance named turtle): + >>> for i in range(8): + ... turtle.stamp(); turtle.fd(30) + ... + >>> turtle.clearstamps(2) + >>> turtle.clearstamps(-2) + >>> turtle.clearstamps() + """ + if n is None: + toDelete = self.stampItems[:] + elif n >= 0: + toDelete = self.stampItems[:n] + else: + toDelete = self.stampItems[n:] + for item in toDelete: + self._clearstamp(item) + self._update() + + def _goto(self, end): + """Move the pen to the point end, thereby drawing a line + if pen is down. All other methods for turtle movement depend + on this one. + """ + ## Version with undo-stuff + go_modes = ( self._drawing, + self._pencolor, + self._pensize, + isinstance(self._fillpath, list)) + screen = self.screen + undo_entry = ("go", self._position, end, go_modes, + (self.currentLineItem, + self.currentLine[:], + screen._pointlist(self.currentLineItem), + self.items[:]) + ) + if self.undobuffer: + self.undobuffer.push(undo_entry) + start = self._position + if self._speed and screen._tracing == 1: + diff = (end-start) + diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2 + nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed)) + delta = diff * (1.0/nhops) + for n in range(1, nhops): + if n == 1: + top = True + else: + top = False + self._position = start + delta * n + if self._drawing: + screen._drawline(self.drawingLineItem, + (start, self._position), + self._pencolor, self._pensize, top) + self._update() + if self._drawing: + screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)), + fill="", width=self._pensize) + # Turtle now at end, + if self._drawing: # now update currentLine + self.currentLine.append(end) + if isinstance(self._fillpath, list): + self._fillpath.append(end) + ###### vererbung!!!!!!!!!!!!!!!!!!!!!! + self._position = end + if self._creatingPoly: + self._poly.append(end) + if len(self.currentLine) > 42: # 42! answer to the ultimate question + # of life, the universe and everything + self._newLine() + self._update() #count=True) + + def _undogoto(self, entry): + """Reverse a _goto. Used for undo() + """ + old, new, go_modes, coodata = entry + drawing, pc, ps, filling = go_modes + cLI, cL, pl, items = coodata + screen = self.screen + if abs(self._position - new) > 0.5: + print ("undogoto: HALLO-DA-STIMMT-WAS-NICHT!") + # restore former situation + self.currentLineItem = cLI + self.currentLine = cL + + if pl == [(0, 0), (0, 0)]: + usepc = "" + else: + usepc = pc + screen._drawline(cLI, pl, fill=usepc, width=ps) + + todelete = [i for i in self.items if (i not in items) and + (screen._type(i) == "line")] + for i in todelete: + screen._delete(i) + self.items.remove(i) + + start = old + if self._speed and screen._tracing == 1: + diff = old - new + diffsq = (diff[0]*screen.xscale)**2 + (diff[1]*screen.yscale)**2 + nhops = 1+int((diffsq**0.5)/(3*(1.1**self._speed)*self._speed)) + delta = diff * (1.0/nhops) + for n in range(1, nhops): + if n == 1: + top = True + else: + top = False + self._position = new + delta * n + if drawing: + screen._drawline(self.drawingLineItem, + (start, self._position), + pc, ps, top) + self._update() + if drawing: + screen._drawline(self.drawingLineItem, ((0, 0), (0, 0)), + fill="", width=ps) + # Turtle now at position old, + self._position = old + ## if undo is done during creating a polygon, the last vertex + ## will be deleted. if the polygon is entirely deleted, + ## creatingPoly will be set to False. + ## Polygons created before the last one will not be affected by undo() + if self._creatingPoly: + if len(self._poly) > 0: + self._poly.pop() + if self._poly == []: + self._creatingPoly = False + self._poly = None + if filling: + if self._fillpath == []: + self._fillpath = None + print("Unwahrscheinlich in _undogoto!") + elif self._fillpath is not None: + self._fillpath.pop() + self._update() #count=True) + + def _rotate(self, angle): + """Turns pen clockwise by angle. + """ + if self.undobuffer: + self.undobuffer.push(("rot", angle, self._degreesPerAU)) + angle *= self._degreesPerAU + neworient = self._orient.rotate(angle) + tracing = self.screen._tracing + if tracing == 1 and self._speed > 0: + anglevel = 3.0 * self._speed + steps = 1 + int(abs(angle)/anglevel) + delta = 1.0*angle/steps + for _ in range(steps): + self._orient = self._orient.rotate(delta) + self._update() + self._orient = neworient + self._update() + + def _newLine(self, usePos=True): + """Closes current line item and starts a new one. + Remark: if current line became too long, animation + performance (via _drawline) slowed down considerably. + """ + if len(self.currentLine) > 1: + self.screen._drawline(self.currentLineItem, self.currentLine, + self._pencolor, self._pensize) + self.currentLineItem = self.screen._createline() + self.items.append(self.currentLineItem) + else: + self.screen._drawline(self.currentLineItem, top=True) + self.currentLine = [] + if usePos: + self.currentLine = [self._position] + + def filling(self): + """Return fillstate (True if filling, False else). + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.begin_fill() + >>> if turtle.filling(): + ... turtle.pensize(5) + ... else: + ... turtle.pensize(3) + """ + return isinstance(self._fillpath, list) + + def begin_fill(self): + """Called just before drawing a shape to be filled. + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.color("black", "red") + >>> turtle.begin_fill() + >>> turtle.circle(60) + >>> turtle.end_fill() + """ + if not self.filling(): + self._fillitem = self.screen._createpoly() + self.items.append(self._fillitem) + self._fillpath = [self._position] + self._newLine() + if self.undobuffer: + self.undobuffer.push(("beginfill", self._fillitem)) + self._update() + + + def end_fill(self): + """Fill the shape drawn after the call begin_fill(). + + No argument. + + Example (for a Turtle instance named turtle): + >>> turtle.color("black", "red") + >>> turtle.begin_fill() + >>> turtle.circle(60) + >>> turtle.end_fill() + """ + if self.filling(): + if len(self._fillpath) > 2: + self.screen._drawpoly(self._fillitem, self._fillpath, + fill=self._fillcolor) + if self.undobuffer: + self.undobuffer.push(("dofill", self._fillitem)) + self._fillitem = self._fillpath = None + self._update() + + def dot(self, size=None, *color): + """Draw a dot with diameter size, using color. + + Optional arguments: + size -- an integer >= 1 (if given) + color -- a colorstring or a numeric color tuple + + Draw a circular dot with diameter size, using color. + If size is not given, the maximum of pensize+4 and 2*pensize is used. + + Example (for a Turtle instance named turtle): + >>> turtle.dot() + >>> turtle.fd(50); turtle.dot(20, "blue"); turtle.fd(50) + """ + if not color: + if isinstance(size, (str, tuple)): + color = self._colorstr(size) + size = self._pensize + max(self._pensize, 4) + else: + color = self._pencolor + if not size: + size = self._pensize + max(self._pensize, 4) + else: + if size is None: + size = self._pensize + max(self._pensize, 4) + color = self._colorstr(color) + if hasattr(self.screen, "_dot"): + item = self.screen._dot(self._position, size, color) + self.items.append(item) + if self.undobuffer: + self.undobuffer.push(("dot", item)) + else: + pen = self.pen() + if self.undobuffer: + self.undobuffer.push(["seq"]) + self.undobuffer.cumulate = True + try: + if self.resizemode() == 'auto': + self.ht() + self.pendown() + self.pensize(size) + self.pencolor(color) + self.forward(0) + finally: + self.pen(pen) + if self.undobuffer: + self.undobuffer.cumulate = False + + def _write(self, txt, align, font): + """Performs the writing for write() + """ + item, end = self.screen._write(self._position, txt, align, font, + self._pencolor) + self._update() + self.items.append(item) + if self.undobuffer: + self.undobuffer.push(("wri", item)) + return end + + def write(self, arg, move=False, align="left", font=("Arial", 8, "normal")): + """Write text at the current turtle position. + + Arguments: + arg -- info, which is to be written to the TurtleScreen + move (optional) -- True/False + align (optional) -- one of the strings "left", "center" or right" + font (optional) -- a triple (fontname, fontsize, fonttype) + + Write text - the string representation of arg - at the current + turtle position according to align ("left", "center" or right") + and with the given font. + If move is True, the pen is moved to the bottom-right corner + of the text. By default, move is False. + + Example (for a Turtle instance named turtle): + >>> turtle.write('Home = ', True, align="center") + >>> turtle.write((0,0), True) + """ + if self.undobuffer: + self.undobuffer.push(["seq"]) + self.undobuffer.cumulate = True + end = self._write(str(arg), align.lower(), font) + if move: + x, y = self.pos() + self.setpos(end, y) + if self.undobuffer: + self.undobuffer.cumulate = False + + def begin_poly(self): + """Start recording the vertices of a polygon. + + No argument. + + Start recording the vertices of a polygon. Current turtle position + is first point of polygon. + + Example (for a Turtle instance named turtle): + >>> turtle.begin_poly() + """ + self._poly = [self._position] + self._creatingPoly = True + + def end_poly(self): + """Stop recording the vertices of a polygon. + + No argument. + + Stop recording the vertices of a polygon. Current turtle position is + last point of polygon. This will be connected with the first point. + + Example (for a Turtle instance named turtle): + >>> turtle.end_poly() + """ + self._creatingPoly = False + + def get_poly(self): + """Return the lastly recorded polygon. + + No argument. + + Example (for a Turtle instance named turtle): + >>> p = turtle.get_poly() + >>> turtle.register_shape("myFavouriteShape", p) + """ + ## check if there is any poly? + if self._poly is not None: + return tuple(self._poly) + + def getscreen(self): + """Return the TurtleScreen object, the turtle is drawing on. + + No argument. + + Return the TurtleScreen object, the turtle is drawing on. + So TurtleScreen-methods can be called for that object. + + Example (for a Turtle instance named turtle): + >>> ts = turtle.getscreen() + >>> ts + + >>> ts.bgcolor("pink") + """ + return self.screen + + def getturtle(self): + """Return the Turtleobject itself. + + No argument. + + Only reasonable use: as a function to return the 'anonymous turtle': + + Example: + >>> pet = getturtle() + >>> pet.fd(50) + >>> pet + + >>> turtles() + [] + """ + return self + + getpen = getturtle + + + ################################################################ + ### screen oriented methods recurring to methods of TurtleScreen + ################################################################ + + def _delay(self, delay=None): + """Set delay value which determines speed of turtle animation. + """ + return self.screen.delay(delay) + + def onclick(self, fun, btn=1, add=None): + """Bind fun to mouse-click event on this turtle on canvas. + + Arguments: + fun -- a function with two arguments, to which will be assigned + the coordinates of the clicked point on the canvas. + btn -- number of the mouse-button defaults to 1 (left mouse button). + add -- True or False. If True, new binding will be added, otherwise + it will replace a former binding. + + Example for the anonymous turtle, i. e. the procedural way: + + >>> def turn(x, y): + ... left(360) + ... + >>> onclick(turn) # Now clicking into the turtle will turn it. + >>> onclick(None) # event-binding will be removed + """ + self.screen._onclick(self.turtle._item, fun, btn, add) + self._update() + + def onrelease(self, fun, btn=1, add=None): + """Bind fun to mouse-button-release event on this turtle on canvas. + + Arguments: + fun -- a function with two arguments, to which will be assigned + the coordinates of the clicked point on the canvas. + btn -- number of the mouse-button defaults to 1 (left mouse button). + + Example (for a MyTurtle instance named joe): + >>> class MyTurtle(Turtle): + ... def glow(self,x,y): + ... self.fillcolor("red") + ... def unglow(self,x,y): + ... self.fillcolor("") + ... + >>> joe = MyTurtle() + >>> joe.onclick(joe.glow) + >>> joe.onrelease(joe.unglow) + + Clicking on joe turns fillcolor red, unclicking turns it to + transparent. + """ + self.screen._onrelease(self.turtle._item, fun, btn, add) + self._update() + + def ondrag(self, fun, btn=1, add=None): + """Bind fun to mouse-move event on this turtle on canvas. + + Arguments: + fun -- a function with two arguments, to which will be assigned + the coordinates of the clicked point on the canvas. + btn -- number of the mouse-button defaults to 1 (left mouse button). + + Every sequence of mouse-move-events on a turtle is preceded by a + mouse-click event on that turtle. + + Example (for a Turtle instance named turtle): + >>> turtle.ondrag(turtle.goto) + + Subsequently clicking and dragging a Turtle will move it + across the screen thereby producing handdrawings (if pen is + down). + """ + self.screen._ondrag(self.turtle._item, fun, btn, add) + + + def _undo(self, action, data): + """Does the main part of the work for undo() + """ + if self.undobuffer is None: + return + if action == "rot": + angle, degPAU = data + self._rotate(-angle*degPAU/self._degreesPerAU) + dummy = self.undobuffer.pop() + elif action == "stamp": + stitem = data[0] + self.clearstamp(stitem) + elif action == "go": + self._undogoto(data) + elif action in ["wri", "dot"]: + item = data[0] + self.screen._delete(item) + self.items.remove(item) + elif action == "dofill": + item = data[0] + self.screen._drawpoly(item, ((0, 0),(0, 0),(0, 0)), + fill="", outline="") + elif action == "beginfill": + item = data[0] + self._fillitem = self._fillpath = None + if item in self.items: + self.screen._delete(item) + self.items.remove(item) + elif action == "pen": + TPen.pen(self, data[0]) + self.undobuffer.pop() + + def undo(self): + """undo (repeatedly) the last turtle action. + + No argument. + + undo (repeatedly) the last turtle action. + Number of available undo actions is determined by the size of + the undobuffer. + + Example (for a Turtle instance named turtle): + >>> for i in range(4): + ... turtle.fd(50); turtle.lt(80) + ... + >>> for i in range(8): + ... turtle.undo() + ... + """ + if self.undobuffer is None: + return + item = self.undobuffer.pop() + action = item[0] + data = item[1:] + if action == "seq": + while data: + item = data.pop() + self._undo(item[0], item[1:]) + else: + self._undo(action, data) + + turtlesize = shapesize + +RawPen = RawTurtle + +### Screen - Singleton ######################## + +def Screen(): + """Return the singleton screen object. + If none exists at the moment, create a new one and return it, + else return the existing one.""" + if Turtle._screen is None: + Turtle._screen = _Screen() + return Turtle._screen + +class _Screen(TurtleScreen): + + _root = None + _canvas = None + _title = _CFG["title"] + + def __init__(self): + # XXX there is no need for this code to be conditional, + # as there will be only a single _Screen instance, anyway + # XXX actually, the turtle demo is injecting root window, + # so perhaps the conditional creation of a root should be + # preserved (perhaps by passing it as an optional parameter) + if _Screen._root is None: + _Screen._root = self._root = _Root() + self._root.title(_Screen._title) + self._root.ondestroy(self._destroy) + if _Screen._canvas is None: + width = _CFG["width"] + height = _CFG["height"] + canvwidth = _CFG["canvwidth"] + canvheight = _CFG["canvheight"] + leftright = _CFG["leftright"] + topbottom = _CFG["topbottom"] + self._root.setupcanvas(width, height, canvwidth, canvheight) + _Screen._canvas = self._root._getcanvas() + TurtleScreen.__init__(self, _Screen._canvas) + self.setup(width, height, leftright, topbottom) + + def setup(self, width=_CFG["width"], height=_CFG["height"], + startx=_CFG["leftright"], starty=_CFG["topbottom"]): + """ Set the size and position of the main window. + + Arguments: + width: as integer a size in pixels, as float a fraction of the screen. + Default is 50% of screen. + height: as integer the height in pixels, as float a fraction of the + screen. Default is 75% of screen. + startx: if positive, starting position in pixels from the left + edge of the screen, if negative from the right edge + Default, startx=None is to center window horizontally. + starty: if positive, starting position in pixels from the top + edge of the screen, if negative from the bottom edge + Default, starty=None is to center window vertically. + + Examples (for a Screen instance named screen): + >>> screen.setup (width=200, height=200, startx=0, starty=0) + + sets window to 200x200 pixels, in upper left of screen + + >>> screen.setup(width=.75, height=0.5, startx=None, starty=None) + + sets window to 75% of screen by 50% of screen and centers + """ + if not hasattr(self._root, "set_geometry"): + return + sw = self._root.win_width() + sh = self._root.win_height() + if isinstance(width, float) and 0 <= width <= 1: + width = sw*width + if startx is None: + startx = (sw - width) / 2 + if isinstance(height, float) and 0 <= height <= 1: + height = sh*height + if starty is None: + starty = (sh - height) / 2 + self._root.set_geometry(width, height, startx, starty) + self.update() + + def title(self, titlestring): + """Set title of turtle-window + + Argument: + titlestring -- a string, to appear in the titlebar of the + turtle graphics window. + + This is a method of Screen-class. Not available for TurtleScreen- + objects. + + Example (for a Screen instance named screen): + >>> screen.title("Welcome to the turtle-zoo!") + """ + if _Screen._root is not None: + _Screen._root.title(titlestring) + _Screen._title = titlestring + + def _destroy(self): + root = self._root + if root is _Screen._root: + Turtle._pen = None + Turtle._screen = None + _Screen._root = None + _Screen._canvas = None + TurtleScreen._RUNNING = False + root.destroy() + + def bye(self): + """Shut the turtlegraphics window. + + Example (for a TurtleScreen instance named screen): + >>> screen.bye() + """ + self._destroy() + + def exitonclick(self): + """Go into mainloop until the mouse is clicked. + + No arguments. + + Bind bye() method to mouseclick on TurtleScreen. + If "using_IDLE" - value in configuration dictionary is False + (default value), enter mainloop. + If IDLE with -n switch (no subprocess) is used, this value should be + set to True in turtle.cfg. In this case IDLE's mainloop + is active also for the client script. + + This is a method of the Screen-class and not available for + TurtleScreen instances. + + Example (for a Screen instance named screen): + >>> screen.exitonclick() + + """ + def exitGracefully(x, y): + """Screen.bye() with two dummy-parameters""" + self.bye() + self.onclick(exitGracefully) + if _CFG["using_IDLE"]: + return + try: + mainloop() + except AttributeError: + exit(0) + +class Turtle(RawTurtle): + """RawTurtle auto-creating (scrolled) canvas. + + When a Turtle object is created or a function derived from some + Turtle method is called a TurtleScreen object is automatically created. + """ + _pen = None + _screen = None + + def __init__(self, + shape=_CFG["shape"], + undobuffersize=_CFG["undobuffersize"], + visible=_CFG["visible"]): + if Turtle._screen is None: + Turtle._screen = Screen() + RawTurtle.__init__(self, Turtle._screen, + shape=shape, + undobuffersize=undobuffersize, + visible=visible) + +Pen = Turtle + +def write_docstringdict(filename="turtle_docstringdict"): + """Create and write docstring-dictionary to file. + + Optional argument: + filename -- a string, used as filename + default value is turtle_docstringdict + + Has to be called explicitly, (not used by the turtle-graphics classes) + The docstring dictionary will be written to the Python script .py + It is intended to serve as a template for translation of the docstrings + into different languages. + """ + docsdict = {} + + for methodname in _tg_screen_functions: + key = "_Screen."+methodname + docsdict[key] = eval(key).__doc__ + for methodname in _tg_turtle_functions: + key = "Turtle."+methodname + docsdict[key] = eval(key).__doc__ + + with open("%s.py" % filename,"w") as f: + keys = sorted(x for x in docsdict + if x.split('.')[1] not in _alias_list) + f.write('docsdict = {\n\n') + for key in keys[:-1]: + f.write('%s :\n' % repr(key)) + f.write(' """%s\n""",\n\n' % docsdict[key]) + key = keys[-1] + f.write('%s :\n' % repr(key)) + f.write(' """%s\n"""\n\n' % docsdict[key]) + f.write("}\n") + f.close() + +def read_docstrings(lang): + """Read in docstrings from lang-specific docstring dictionary. + + Transfer docstrings, translated to lang, from a dictionary-file + to the methods of classes Screen and Turtle and - in revised form - + to the corresponding functions. + """ + modname = "turtle_docstringdict_%(language)s" % {'language':lang.lower()} + module = __import__(modname) + docsdict = module.docsdict + for key in docsdict: + try: +# eval(key).im_func.__doc__ = docsdict[key] + eval(key).__doc__ = docsdict[key] + except Exception: + print("Bad docstring-entry: %s" % key) + +_LANGUAGE = _CFG["language"] + +try: + if _LANGUAGE != "english": + read_docstrings(_LANGUAGE) +except ImportError: + print("Cannot find docsdict for", _LANGUAGE) +except Exception: + print ("Unknown Error when trying to import %s-docstring-dictionary" % + _LANGUAGE) + + +def getmethparlist(ob): + """Get strings describing the arguments for the given object + + Returns a pair of strings representing function parameter lists + including parenthesis. The first string is suitable for use in + function definition and the second is suitable for use in function + call. The "self" parameter is not included. + """ + defText = callText = "" + # bit of a hack for methods - turn it into a function + # but we drop the "self" param. + # Try and build one for Python defined functions + args, varargs, varkw = inspect.getargs(ob.__code__) + items2 = args[1:] + realArgs = args[1:] + defaults = ob.__defaults__ or [] + defaults = ["=%r" % (value,) for value in defaults] + defaults = [""] * (len(realArgs)-len(defaults)) + defaults + items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)] + if varargs is not None: + items1.append("*" + varargs) + items2.append("*" + varargs) + if varkw is not None: + items1.append("**" + varkw) + items2.append("**" + varkw) + defText = ", ".join(items1) + defText = "(%s)" % defText + callText = ", ".join(items2) + callText = "(%s)" % callText + return defText, callText + +def _turtle_docrevise(docstr): + """To reduce docstrings from RawTurtle class for functions + """ + import re + if docstr is None: + return None + turtlename = _CFG["exampleturtle"] + newdocstr = docstr.replace("%s." % turtlename,"") + parexp = re.compile(r' \(.+ %s\):' % turtlename) + newdocstr = parexp.sub(":", newdocstr) + return newdocstr + +def _screen_docrevise(docstr): + """To reduce docstrings from TurtleScreen class for functions + """ + import re + if docstr is None: + return None + screenname = _CFG["examplescreen"] + newdocstr = docstr.replace("%s." % screenname,"") + parexp = re.compile(r' \(.+ %s\):' % screenname) + newdocstr = parexp.sub(":", newdocstr) + return newdocstr + +## The following mechanism makes all methods of RawTurtle and Turtle available +## as functions. So we can enhance, change, add, delete methods to these +## classes and do not need to change anything here. + +__func_body = """\ +def {name}{paramslist}: + if {obj} is None: + if not TurtleScreen._RUNNING: + TurtleScreen._RUNNING = True + raise Terminator + {obj} = {init} + try: + return {obj}.{name}{argslist} + except TK.TclError: + if not TurtleScreen._RUNNING: + TurtleScreen._RUNNING = True + raise Terminator + raise +""" + +def _make_global_funcs(functions, cls, obj, init, docrevise): + for methodname in functions: + method = getattr(cls, methodname) + pl1, pl2 = getmethparlist(method) + if pl1 == "": + print(">>>>>>", pl1, pl2) + continue + defstr = __func_body.format(obj=obj, init=init, name=methodname, + paramslist=pl1, argslist=pl2) + exec(defstr, globals()) + globals()[methodname].__doc__ = docrevise(method.__doc__) + +_make_global_funcs(_tg_screen_functions, _Screen, + 'Turtle._screen', 'Screen()', _screen_docrevise) +_make_global_funcs(_tg_turtle_functions, Turtle, + 'Turtle._pen', 'Turtle()', _turtle_docrevise) + + +done = mainloop + +if __name__ == "__main__": + def switchpen(): + if isdown(): + pu() + else: + pd() + + def demo1(): + """Demo of old turtle.py - module""" + reset() + tracer(True) + up() + backward(100) + down() + # draw 3 squares; the last filled + width(3) + for i in range(3): + if i == 2: + begin_fill() + for _ in range(4): + forward(20) + left(90) + if i == 2: + color("maroon") + end_fill() + up() + forward(30) + down() + width(1) + color("black") + # move out of the way + tracer(False) + up() + right(90) + forward(100) + right(90) + forward(100) + right(180) + down() + # some text + write("startstart", 1) + write("start", 1) + color("red") + # staircase + for i in range(5): + forward(20) + left(90) + forward(20) + right(90) + # filled staircase + tracer(True) + begin_fill() + for i in range(5): + forward(20) + left(90) + forward(20) + right(90) + end_fill() + # more text + + def demo2(): + """Demo of some new features.""" + speed(1) + st() + pensize(3) + setheading(towards(0, 0)) + radius = distance(0, 0)/2.0 + rt(90) + for _ in range(18): + switchpen() + circle(radius, 10) + write("wait a moment...") + while undobufferentries(): + undo() + reset() + lt(90) + colormode(255) + laenge = 10 + pencolor("green") + pensize(3) + lt(180) + for i in range(-2, 16): + if i > 0: + begin_fill() + fillcolor(255-15*i, 0, 15*i) + for _ in range(3): + fd(laenge) + lt(120) + end_fill() + laenge += 10 + lt(15) + speed((speed()+1)%12) + #end_fill() + + lt(120) + pu() + fd(70) + rt(30) + pd() + color("red","yellow") + speed(0) + begin_fill() + for _ in range(4): + circle(50, 90) + rt(90) + fd(30) + rt(90) + end_fill() + lt(90) + pu() + fd(30) + pd() + shape("turtle") + + tri = getturtle() + tri.resizemode("auto") + turtle = Turtle() + turtle.resizemode("auto") + turtle.shape("turtle") + turtle.reset() + turtle.left(90) + turtle.speed(0) + turtle.up() + turtle.goto(280, 40) + turtle.lt(30) + turtle.down() + turtle.speed(6) + turtle.color("blue","orange") + turtle.pensize(2) + tri.speed(6) + setheading(towards(turtle)) + count = 1 + while tri.distance(turtle) > 4: + turtle.fd(3.5) + turtle.lt(0.6) + tri.setheading(tri.towards(turtle)) + tri.fd(4) + if count % 20 == 0: + turtle.stamp() + tri.stamp() + switchpen() + count += 1 + tri.write("CAUGHT! ", font=("Arial", 16, "bold"), align="right") + tri.pencolor("black") + tri.pencolor("red") + + def baba(xdummy, ydummy): + clearscreen() + bye() + + time.sleep(2) + + while undobufferentries(): + tri.undo() + turtle.undo() + tri.fd(50) + tri.write(" Click me!", font = ("Courier", 12, "bold") ) + tri.onclick(baba, 1) + + demo1() + demo2() + exitonclick() diff --git a/llava/lib/python3.10/uuid.py b/llava/lib/python3.10/uuid.py new file mode 100644 index 0000000000000000000000000000000000000000..fe9f87b79457fac74f7dee5643a01870dd3fd8e9 --- /dev/null +++ b/llava/lib/python3.10/uuid.py @@ -0,0 +1,733 @@ +r"""UUID objects (universally unique identifiers) according to RFC 4122. + +This module provides immutable UUID objects (class UUID) and the functions +uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5 +UUIDs as specified in RFC 4122. + +If all you want is a unique ID, you should probably call uuid1() or uuid4(). +Note that uuid1() may compromise privacy since it creates a UUID containing +the computer's network address. uuid4() creates a random UUID. + +Typical usage: + + >>> import uuid + + # make a UUID based on the host ID and current time + >>> uuid.uuid1() # doctest: +SKIP + UUID('a8098c1a-f86e-11da-bd1a-00112444be1e') + + # make a UUID using an MD5 hash of a namespace UUID and a name + >>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org') + UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e') + + # make a random UUID + >>> uuid.uuid4() # doctest: +SKIP + UUID('16fd2706-8baf-433b-82eb-8c7fada847da') + + # make a UUID using a SHA-1 hash of a namespace UUID and a name + >>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org') + UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d') + + # make a UUID from a string of hex digits (braces and hyphens ignored) + >>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}') + + # convert a UUID to a string of hex digits in standard form + >>> str(x) + '00010203-0405-0607-0809-0a0b0c0d0e0f' + + # get the raw 16 bytes of the UUID + >>> x.bytes + b'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f' + + # make a UUID from a 16-byte string + >>> uuid.UUID(bytes=x.bytes) + UUID('00010203-0405-0607-0809-0a0b0c0d0e0f') +""" + +import os +import sys + +from enum import Enum + + +__author__ = 'Ka-Ping Yee ' + +# The recognized platforms - known behaviors +if sys.platform in ('win32', 'darwin'): + _AIX = _LINUX = False +else: + import platform + _platform_system = platform.system() + _AIX = _platform_system == 'AIX' + _LINUX = _platform_system == 'Linux' + +_MAC_DELIM = b':' +_MAC_OMITS_LEADING_ZEROES = False +if _AIX: + _MAC_DELIM = b'.' + _MAC_OMITS_LEADING_ZEROES = True + +RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [ + 'reserved for NCS compatibility', 'specified in RFC 4122', + 'reserved for Microsoft compatibility', 'reserved for future definition'] + +int_ = int # The built-in int type +bytes_ = bytes # The built-in bytes type + + +class SafeUUID(Enum): + safe = 0 + unsafe = -1 + unknown = None + + +class UUID: + """Instances of the UUID class represent UUIDs as specified in RFC 4122. + UUID objects are immutable, hashable, and usable as dictionary keys. + Converting a UUID to a string with str() yields something in the form + '12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts + five possible forms: a similar string of hexadecimal digits, or a tuple + of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and + 48-bit values respectively) as an argument named 'fields', or a string + of 16 bytes (with all the integer fields in big-endian order) as an + argument named 'bytes', or a string of 16 bytes (with the first three + fields in little-endian order) as an argument named 'bytes_le', or a + single 128-bit integer as an argument named 'int'. + + UUIDs have these read-only attributes: + + bytes the UUID as a 16-byte string (containing the six + integer fields in big-endian byte order) + + bytes_le the UUID as a 16-byte string (with time_low, time_mid, + and time_hi_version in little-endian byte order) + + fields a tuple of the six integer fields of the UUID, + which are also available as six individual attributes + and two derived attributes: + + time_low the first 32 bits of the UUID + time_mid the next 16 bits of the UUID + time_hi_version the next 16 bits of the UUID + clock_seq_hi_variant the next 8 bits of the UUID + clock_seq_low the next 8 bits of the UUID + node the last 48 bits of the UUID + + time the 60-bit timestamp + clock_seq the 14-bit sequence number + + hex the UUID as a 32-character hexadecimal string + + int the UUID as a 128-bit integer + + urn the UUID as a URN as specified in RFC 4122 + + variant the UUID variant (one of the constants RESERVED_NCS, + RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE) + + version the UUID version number (1 through 5, meaningful only + when the variant is RFC_4122) + + is_safe An enum indicating whether the UUID has been generated in + a way that is safe for multiprocessing applications, via + uuid_generate_time_safe(3). + """ + + __slots__ = ('int', 'is_safe', '__weakref__') + + def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None, + int=None, version=None, + *, is_safe=SafeUUID.unknown): + r"""Create a UUID from either a string of 32 hexadecimal digits, + a string of 16 bytes as the 'bytes' argument, a string of 16 bytes + in little-endian order as the 'bytes_le' argument, a tuple of six + integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version, + 8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as + the 'fields' argument, or a single 128-bit integer as the 'int' + argument. When a string of hex digits is given, curly braces, + hyphens, and a URN prefix are all optional. For example, these + expressions all yield the same UUID: + + UUID('{12345678-1234-5678-1234-567812345678}') + UUID('12345678123456781234567812345678') + UUID('urn:uuid:12345678-1234-5678-1234-567812345678') + UUID(bytes='\x12\x34\x56\x78'*4) + UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' + + '\x12\x34\x56\x78\x12\x34\x56\x78') + UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678)) + UUID(int=0x12345678123456781234567812345678) + + Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must + be given. The 'version' argument is optional; if given, the resulting + UUID will have its variant and version set according to RFC 4122, + overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'. + + is_safe is an enum exposed as an attribute on the instance. It + indicates whether the UUID has been generated in a way that is safe + for multiprocessing applications, via uuid_generate_time_safe(3). + """ + + if [hex, bytes, bytes_le, fields, int].count(None) != 4: + raise TypeError('one of the hex, bytes, bytes_le, fields, ' + 'or int arguments must be given') + if hex is not None: + hex = hex.replace('urn:', '').replace('uuid:', '') + hex = hex.strip('{}').replace('-', '') + if len(hex) != 32: + raise ValueError('badly formed hexadecimal UUID string') + int = int_(hex, 16) + if bytes_le is not None: + if len(bytes_le) != 16: + raise ValueError('bytes_le is not a 16-char string') + bytes = (bytes_le[4-1::-1] + bytes_le[6-1:4-1:-1] + + bytes_le[8-1:6-1:-1] + bytes_le[8:]) + if bytes is not None: + if len(bytes) != 16: + raise ValueError('bytes is not a 16-char string') + assert isinstance(bytes, bytes_), repr(bytes) + int = int_.from_bytes(bytes, byteorder='big') + if fields is not None: + if len(fields) != 6: + raise ValueError('fields is not a 6-tuple') + (time_low, time_mid, time_hi_version, + clock_seq_hi_variant, clock_seq_low, node) = fields + if not 0 <= time_low < 1<<32: + raise ValueError('field 1 out of range (need a 32-bit value)') + if not 0 <= time_mid < 1<<16: + raise ValueError('field 2 out of range (need a 16-bit value)') + if not 0 <= time_hi_version < 1<<16: + raise ValueError('field 3 out of range (need a 16-bit value)') + if not 0 <= clock_seq_hi_variant < 1<<8: + raise ValueError('field 4 out of range (need an 8-bit value)') + if not 0 <= clock_seq_low < 1<<8: + raise ValueError('field 5 out of range (need an 8-bit value)') + if not 0 <= node < 1<<48: + raise ValueError('field 6 out of range (need a 48-bit value)') + clock_seq = (clock_seq_hi_variant << 8) | clock_seq_low + int = ((time_low << 96) | (time_mid << 80) | + (time_hi_version << 64) | (clock_seq << 48) | node) + if int is not None: + if not 0 <= int < 1<<128: + raise ValueError('int is out of range (need a 128-bit value)') + if version is not None: + if not 1 <= version <= 5: + raise ValueError('illegal version number') + # Set the variant to RFC 4122. + int &= ~(0xc000 << 48) + int |= 0x8000 << 48 + # Set the version number. + int &= ~(0xf000 << 64) + int |= version << 76 + object.__setattr__(self, 'int', int) + object.__setattr__(self, 'is_safe', is_safe) + + def __getstate__(self): + d = {'int': self.int} + if self.is_safe != SafeUUID.unknown: + # is_safe is a SafeUUID instance. Return just its value, so that + # it can be un-pickled in older Python versions without SafeUUID. + d['is_safe'] = self.is_safe.value + return d + + def __setstate__(self, state): + object.__setattr__(self, 'int', state['int']) + # is_safe was added in 3.7; it is also omitted when it is "unknown" + object.__setattr__(self, 'is_safe', + SafeUUID(state['is_safe']) + if 'is_safe' in state else SafeUUID.unknown) + + def __eq__(self, other): + if isinstance(other, UUID): + return self.int == other.int + return NotImplemented + + # Q. What's the value of being able to sort UUIDs? + # A. Use them as keys in a B-Tree or similar mapping. + + def __lt__(self, other): + if isinstance(other, UUID): + return self.int < other.int + return NotImplemented + + def __gt__(self, other): + if isinstance(other, UUID): + return self.int > other.int + return NotImplemented + + def __le__(self, other): + if isinstance(other, UUID): + return self.int <= other.int + return NotImplemented + + def __ge__(self, other): + if isinstance(other, UUID): + return self.int >= other.int + return NotImplemented + + def __hash__(self): + return hash(self.int) + + def __int__(self): + return self.int + + def __repr__(self): + return '%s(%r)' % (self.__class__.__name__, str(self)) + + def __setattr__(self, name, value): + raise TypeError('UUID objects are immutable') + + def __str__(self): + hex = '%032x' % self.int + return '%s-%s-%s-%s-%s' % ( + hex[:8], hex[8:12], hex[12:16], hex[16:20], hex[20:]) + + @property + def bytes(self): + return self.int.to_bytes(16, 'big') + + @property + def bytes_le(self): + bytes = self.bytes + return (bytes[4-1::-1] + bytes[6-1:4-1:-1] + bytes[8-1:6-1:-1] + + bytes[8:]) + + @property + def fields(self): + return (self.time_low, self.time_mid, self.time_hi_version, + self.clock_seq_hi_variant, self.clock_seq_low, self.node) + + @property + def time_low(self): + return self.int >> 96 + + @property + def time_mid(self): + return (self.int >> 80) & 0xffff + + @property + def time_hi_version(self): + return (self.int >> 64) & 0xffff + + @property + def clock_seq_hi_variant(self): + return (self.int >> 56) & 0xff + + @property + def clock_seq_low(self): + return (self.int >> 48) & 0xff + + @property + def time(self): + return (((self.time_hi_version & 0x0fff) << 48) | + (self.time_mid << 32) | self.time_low) + + @property + def clock_seq(self): + return (((self.clock_seq_hi_variant & 0x3f) << 8) | + self.clock_seq_low) + + @property + def node(self): + return self.int & 0xffffffffffff + + @property + def hex(self): + return '%032x' % self.int + + @property + def urn(self): + return 'urn:uuid:' + str(self) + + @property + def variant(self): + if not self.int & (0x8000 << 48): + return RESERVED_NCS + elif not self.int & (0x4000 << 48): + return RFC_4122 + elif not self.int & (0x2000 << 48): + return RESERVED_MICROSOFT + else: + return RESERVED_FUTURE + + @property + def version(self): + # The version bits are only meaningful for RFC 4122 UUIDs. + if self.variant == RFC_4122: + return int((self.int >> 76) & 0xf) + + +def _get_command_stdout(command, *args): + import io, os, shutil, subprocess + + try: + path_dirs = os.environ.get('PATH', os.defpath).split(os.pathsep) + path_dirs.extend(['/sbin', '/usr/sbin']) + executable = shutil.which(command, path=os.pathsep.join(path_dirs)) + if executable is None: + return None + # LC_ALL=C to ensure English output, stderr=DEVNULL to prevent output + # on stderr (Note: we don't have an example where the words we search + # for are actually localized, but in theory some system could do so.) + env = dict(os.environ) + env['LC_ALL'] = 'C' + # Empty strings will be quoted by popen so we should just ommit it + if args != ('',): + command = (executable, *args) + else: + command = (executable,) + proc = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.DEVNULL, + env=env) + if not proc: + return None + stdout, stderr = proc.communicate() + return io.BytesIO(stdout) + except (OSError, subprocess.SubprocessError): + return None + + +# For MAC (a.k.a. IEEE 802, or EUI-48) addresses, the second least significant +# bit of the first octet signifies whether the MAC address is universally (0) +# or locally (1) administered. Network cards from hardware manufacturers will +# always be universally administered to guarantee global uniqueness of the MAC +# address, but any particular machine may have other interfaces which are +# locally administered. An example of the latter is the bridge interface to +# the Touch Bar on MacBook Pros. +# +# This bit works out to be the 42nd bit counting from 1 being the least +# significant, or 1<<41. We'll prefer universally administered MAC addresses +# over locally administered ones since the former are globally unique, but +# we'll return the first of the latter found if that's all the machine has. +# +# See https://en.wikipedia.org/wiki/MAC_address#Universal_vs._local + +def _is_universal(mac): + return not (mac & (1 << 41)) + + +def _find_mac_near_keyword(command, args, keywords, get_word_index): + """Searches a command's output for a MAC address near a keyword. + + Each line of words in the output is case-insensitively searched for + any of the given keywords. Upon a match, get_word_index is invoked + to pick a word from the line, given the index of the match. For + example, lambda i: 0 would get the first word on the line, while + lambda i: i - 1 would get the word preceding the keyword. + """ + stdout = _get_command_stdout(command, args) + if stdout is None: + return None + + first_local_mac = None + for line in stdout: + words = line.lower().rstrip().split() + for i in range(len(words)): + if words[i] in keywords: + try: + word = words[get_word_index(i)] + mac = int(word.replace(_MAC_DELIM, b''), 16) + except (ValueError, IndexError): + # Virtual interfaces, such as those provided by + # VPNs, do not have a colon-delimited MAC address + # as expected, but a 16-byte HWAddr separated by + # dashes. These should be ignored in favor of a + # real MAC address + pass + else: + if _is_universal(mac): + return mac + first_local_mac = first_local_mac or mac + return first_local_mac or None + + +def _parse_mac(word): + # Accept 'HH:HH:HH:HH:HH:HH' MAC address (ex: '52:54:00:9d:0e:67'), + # but reject IPv6 address (ex: 'fe80::5054:ff:fe9' or '123:2:3:4:5:6:7:8'). + # + # Virtual interfaces, such as those provided by VPNs, do not have a + # colon-delimited MAC address as expected, but a 16-byte HWAddr separated + # by dashes. These should be ignored in favor of a real MAC address + parts = word.split(_MAC_DELIM) + if len(parts) != 6: + return + if _MAC_OMITS_LEADING_ZEROES: + # (Only) on AIX the macaddr value given is not prefixed by 0, e.g. + # en0 1500 link#2 fa.bc.de.f7.62.4 110854824 0 160133733 0 0 + # not + # en0 1500 link#2 fa.bc.de.f7.62.04 110854824 0 160133733 0 0 + if not all(1 <= len(part) <= 2 for part in parts): + return + hexstr = b''.join(part.rjust(2, b'0') for part in parts) + else: + if not all(len(part) == 2 for part in parts): + return + hexstr = b''.join(parts) + try: + return int(hexstr, 16) + except ValueError: + return + + +def _find_mac_under_heading(command, args, heading): + """Looks for a MAC address under a heading in a command's output. + + The first line of words in the output is searched for the given + heading. Words at the same word index as the heading in subsequent + lines are then examined to see if they look like MAC addresses. + """ + stdout = _get_command_stdout(command, args) + if stdout is None: + return None + + keywords = stdout.readline().rstrip().split() + try: + column_index = keywords.index(heading) + except ValueError: + return None + + first_local_mac = None + for line in stdout: + words = line.rstrip().split() + try: + word = words[column_index] + except IndexError: + continue + + mac = _parse_mac(word) + if mac is None: + continue + if _is_universal(mac): + return mac + if first_local_mac is None: + first_local_mac = mac + + return first_local_mac + + +# The following functions call external programs to 'get' a macaddr value to +# be used as basis for an uuid +def _ifconfig_getnode(): + """Get the hardware address on Unix by running ifconfig.""" + # This works on Linux ('' or '-a'), Tru64 ('-av'), but not all Unixes. + keywords = (b'hwaddr', b'ether', b'address:', b'lladdr') + for args in ('', '-a', '-av'): + mac = _find_mac_near_keyword('ifconfig', args, keywords, lambda i: i+1) + if mac: + return mac + return None + +def _ip_getnode(): + """Get the hardware address on Unix by running ip.""" + # This works on Linux with iproute2. + mac = _find_mac_near_keyword('ip', 'link', [b'link/ether'], lambda i: i+1) + if mac: + return mac + return None + +def _arp_getnode(): + """Get the hardware address on Unix by running arp.""" + import os, socket + try: + ip_addr = socket.gethostbyname(socket.gethostname()) + except OSError: + return None + + # Try getting the MAC addr from arp based on our IP address (Solaris). + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: -1) + if mac: + return mac + + # This works on OpenBSD + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode(ip_addr)], lambda i: i+1) + if mac: + return mac + + # This works on Linux, FreeBSD and NetBSD + mac = _find_mac_near_keyword('arp', '-an', [os.fsencode('(%s)' % ip_addr)], + lambda i: i+2) + # Return None instead of 0. + if mac: + return mac + return None + +def _lanscan_getnode(): + """Get the hardware address on Unix by running lanscan.""" + # This might work on HP-UX. + return _find_mac_near_keyword('lanscan', '-ai', [b'lan0'], lambda i: 0) + +def _netstat_getnode(): + """Get the hardware address on Unix by running netstat.""" + # This works on AIX and might work on Tru64 UNIX. + return _find_mac_under_heading('netstat', '-ian', b'Address') + +def _ipconfig_getnode(): + """[DEPRECATED] Get the hardware address on Windows.""" + # bpo-40501: UuidCreateSequential() is now the only supported approach + return _windll_getnode() + +def _netbios_getnode(): + """[DEPRECATED] Get the hardware address on Windows.""" + # bpo-40501: UuidCreateSequential() is now the only supported approach + return _windll_getnode() + + +# Import optional C extension at toplevel, to help disabling it when testing +try: + import _uuid + _generate_time_safe = getattr(_uuid, "generate_time_safe", None) + _UuidCreate = getattr(_uuid, "UuidCreate", None) + _has_uuid_generate_time_safe = _uuid.has_uuid_generate_time_safe +except ImportError: + _uuid = None + _generate_time_safe = None + _UuidCreate = None + _has_uuid_generate_time_safe = None + + +def _load_system_functions(): + """[DEPRECATED] Platform-specific functions loaded at import time""" + + +def _unix_getnode(): + """Get the hardware address on Unix using the _uuid extension module.""" + if _generate_time_safe: + uuid_time, _ = _generate_time_safe() + return UUID(bytes=uuid_time).node + +def _windll_getnode(): + """Get the hardware address on Windows using the _uuid extension module.""" + if _UuidCreate: + uuid_bytes = _UuidCreate() + return UUID(bytes_le=uuid_bytes).node + +def _random_getnode(): + """Get a random node ID.""" + # RFC 4122, $4.1.6 says "For systems with no IEEE address, a randomly or + # pseudo-randomly generated value may be used; see Section 4.5. The + # multicast bit must be set in such addresses, in order that they will + # never conflict with addresses obtained from network cards." + # + # The "multicast bit" of a MAC address is defined to be "the least + # significant bit of the first octet". This works out to be the 41st bit + # counting from 1 being the least significant bit, or 1<<40. + # + # See https://en.wikipedia.org/wiki/MAC_address#Unicast_vs._multicast + import random + return random.getrandbits(48) | (1 << 40) + + +# _OS_GETTERS, when known, are targeted for a specific OS or platform. +# The order is by 'common practice' on the specified platform. +# Note: 'posix' and 'windows' _OS_GETTERS are prefixed by a dll/dlload() method +# which, when successful, means none of these "external" methods are called. +# _GETTERS is (also) used by test_uuid.py to SkipUnless(), e.g., +# @unittest.skipUnless(_uuid._ifconfig_getnode in _uuid._GETTERS, ...) +if _LINUX: + _OS_GETTERS = [_ip_getnode, _ifconfig_getnode] +elif sys.platform == 'darwin': + _OS_GETTERS = [_ifconfig_getnode, _arp_getnode, _netstat_getnode] +elif sys.platform == 'win32': + # bpo-40201: _windll_getnode will always succeed, so these are not needed + _OS_GETTERS = [] +elif _AIX: + _OS_GETTERS = [_netstat_getnode] +else: + _OS_GETTERS = [_ifconfig_getnode, _ip_getnode, _arp_getnode, + _netstat_getnode, _lanscan_getnode] +if os.name == 'posix': + _GETTERS = [_unix_getnode] + _OS_GETTERS +elif os.name == 'nt': + _GETTERS = [_windll_getnode] + _OS_GETTERS +else: + _GETTERS = _OS_GETTERS + +_node = None + +def getnode(): + """Get the hardware address as a 48-bit positive integer. + + The first time this runs, it may launch a separate program, which could + be quite slow. If all attempts to obtain the hardware address fail, we + choose a random 48-bit number with its eighth bit set to 1 as recommended + in RFC 4122. + """ + global _node + if _node is not None: + return _node + + for getter in _GETTERS + [_random_getnode]: + try: + _node = getter() + except: + continue + if (_node is not None) and (0 <= _node < (1 << 48)): + return _node + assert False, '_random_getnode() returned invalid value: {}'.format(_node) + + +_last_timestamp = None + +def uuid1(node=None, clock_seq=None): + """Generate a UUID from a host ID, sequence number, and the current time. + If 'node' is not given, getnode() is used to obtain the hardware + address. If 'clock_seq' is given, it is used as the sequence number; + otherwise a random 14-bit sequence number is chosen.""" + + # When the system provides a version-1 UUID generator, use it (but don't + # use UuidCreate here because its UUIDs don't conform to RFC 4122). + if _generate_time_safe is not None and node is clock_seq is None: + uuid_time, safely_generated = _generate_time_safe() + try: + is_safe = SafeUUID(safely_generated) + except ValueError: + is_safe = SafeUUID.unknown + return UUID(bytes=uuid_time, is_safe=is_safe) + + global _last_timestamp + import time + nanoseconds = time.time_ns() + # 0x01b21dd213814000 is the number of 100-ns intervals between the + # UUID epoch 1582-10-15 00:00:00 and the Unix epoch 1970-01-01 00:00:00. + timestamp = nanoseconds // 100 + 0x01b21dd213814000 + if _last_timestamp is not None and timestamp <= _last_timestamp: + timestamp = _last_timestamp + 1 + _last_timestamp = timestamp + if clock_seq is None: + import random + clock_seq = random.getrandbits(14) # instead of stable storage + time_low = timestamp & 0xffffffff + time_mid = (timestamp >> 32) & 0xffff + time_hi_version = (timestamp >> 48) & 0x0fff + clock_seq_low = clock_seq & 0xff + clock_seq_hi_variant = (clock_seq >> 8) & 0x3f + if node is None: + node = getnode() + return UUID(fields=(time_low, time_mid, time_hi_version, + clock_seq_hi_variant, clock_seq_low, node), version=1) + +def uuid3(namespace, name): + """Generate a UUID from the MD5 hash of a namespace UUID and a name.""" + from hashlib import md5 + digest = md5( + namespace.bytes + bytes(name, "utf-8"), + usedforsecurity=False + ).digest() + return UUID(bytes=digest[:16], version=3) + +def uuid4(): + """Generate a random UUID.""" + return UUID(bytes=os.urandom(16), version=4) + +def uuid5(namespace, name): + """Generate a UUID from the SHA-1 hash of a namespace UUID and a name.""" + from hashlib import sha1 + hash = sha1(namespace.bytes + bytes(name, "utf-8")).digest() + return UUID(bytes=hash[:16], version=5) + +# The following standard UUIDs are for use with uuid3() or uuid5(). + +NAMESPACE_DNS = UUID('6ba7b810-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_URL = UUID('6ba7b811-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_OID = UUID('6ba7b812-9dad-11d1-80b4-00c04fd430c8') +NAMESPACE_X500 = UUID('6ba7b814-9dad-11d1-80b4-00c04fd430c8') diff --git a/llava/lib/python3.10/xdrlib.py b/llava/lib/python3.10/xdrlib.py new file mode 100644 index 0000000000000000000000000000000000000000..d6e1aeb527266af97f0338806a673894ec2f3c64 --- /dev/null +++ b/llava/lib/python3.10/xdrlib.py @@ -0,0 +1,241 @@ +"""Implements (a subset of) Sun XDR -- eXternal Data Representation. + +See: RFC 1014 + +""" + +import struct +from io import BytesIO +from functools import wraps + +__all__ = ["Error", "Packer", "Unpacker", "ConversionError"] + +# exceptions +class Error(Exception): + """Exception class for this module. Use: + + except xdrlib.Error as var: + # var has the Error instance for the exception + + Public ivars: + msg -- contains the message + + """ + def __init__(self, msg): + self.msg = msg + def __repr__(self): + return repr(self.msg) + def __str__(self): + return str(self.msg) + + +class ConversionError(Error): + pass + +def raise_conversion_error(function): + """ Wrap any raised struct.errors in a ConversionError. """ + + @wraps(function) + def result(self, value): + try: + return function(self, value) + except struct.error as e: + raise ConversionError(e.args[0]) from None + return result + + +class Packer: + """Pack various data representations into a buffer.""" + + def __init__(self): + self.reset() + + def reset(self): + self.__buf = BytesIO() + + def get_buffer(self): + return self.__buf.getvalue() + # backwards compatibility + get_buf = get_buffer + + @raise_conversion_error + def pack_uint(self, x): + self.__buf.write(struct.pack('>L', x)) + + @raise_conversion_error + def pack_int(self, x): + self.__buf.write(struct.pack('>l', x)) + + pack_enum = pack_int + + def pack_bool(self, x): + if x: self.__buf.write(b'\0\0\0\1') + else: self.__buf.write(b'\0\0\0\0') + + def pack_uhyper(self, x): + try: + self.pack_uint(x>>32 & 0xffffffff) + except (TypeError, struct.error) as e: + raise ConversionError(e.args[0]) from None + try: + self.pack_uint(x & 0xffffffff) + except (TypeError, struct.error) as e: + raise ConversionError(e.args[0]) from None + + pack_hyper = pack_uhyper + + @raise_conversion_error + def pack_float(self, x): + self.__buf.write(struct.pack('>f', x)) + + @raise_conversion_error + def pack_double(self, x): + self.__buf.write(struct.pack('>d', x)) + + def pack_fstring(self, n, s): + if n < 0: + raise ValueError('fstring size must be nonnegative') + data = s[:n] + n = ((n+3)//4)*4 + data = data + (n - len(data)) * b'\0' + self.__buf.write(data) + + pack_fopaque = pack_fstring + + def pack_string(self, s): + n = len(s) + self.pack_uint(n) + self.pack_fstring(n, s) + + pack_opaque = pack_string + pack_bytes = pack_string + + def pack_list(self, list, pack_item): + for item in list: + self.pack_uint(1) + pack_item(item) + self.pack_uint(0) + + def pack_farray(self, n, list, pack_item): + if len(list) != n: + raise ValueError('wrong array size') + for item in list: + pack_item(item) + + def pack_array(self, list, pack_item): + n = len(list) + self.pack_uint(n) + self.pack_farray(n, list, pack_item) + + + +class Unpacker: + """Unpacks various data representations from the given buffer.""" + + def __init__(self, data): + self.reset(data) + + def reset(self, data): + self.__buf = data + self.__pos = 0 + + def get_position(self): + return self.__pos + + def set_position(self, position): + self.__pos = position + + def get_buffer(self): + return self.__buf + + def done(self): + if self.__pos < len(self.__buf): + raise Error('unextracted data remains') + + def unpack_uint(self): + i = self.__pos + self.__pos = j = i+4 + data = self.__buf[i:j] + if len(data) < 4: + raise EOFError + return struct.unpack('>L', data)[0] + + def unpack_int(self): + i = self.__pos + self.__pos = j = i+4 + data = self.__buf[i:j] + if len(data) < 4: + raise EOFError + return struct.unpack('>l', data)[0] + + unpack_enum = unpack_int + + def unpack_bool(self): + return bool(self.unpack_int()) + + def unpack_uhyper(self): + hi = self.unpack_uint() + lo = self.unpack_uint() + return int(hi)<<32 | lo + + def unpack_hyper(self): + x = self.unpack_uhyper() + if x >= 0x8000000000000000: + x = x - 0x10000000000000000 + return x + + def unpack_float(self): + i = self.__pos + self.__pos = j = i+4 + data = self.__buf[i:j] + if len(data) < 4: + raise EOFError + return struct.unpack('>f', data)[0] + + def unpack_double(self): + i = self.__pos + self.__pos = j = i+8 + data = self.__buf[i:j] + if len(data) < 8: + raise EOFError + return struct.unpack('>d', data)[0] + + def unpack_fstring(self, n): + if n < 0: + raise ValueError('fstring size must be nonnegative') + i = self.__pos + j = i + (n+3)//4*4 + if j > len(self.__buf): + raise EOFError + self.__pos = j + return self.__buf[i:i+n] + + unpack_fopaque = unpack_fstring + + def unpack_string(self): + n = self.unpack_uint() + return self.unpack_fstring(n) + + unpack_opaque = unpack_string + unpack_bytes = unpack_string + + def unpack_list(self, unpack_item): + list = [] + while 1: + x = self.unpack_uint() + if x == 0: break + if x != 1: + raise ConversionError('0 or 1 expected, got %r' % (x,)) + item = unpack_item() + list.append(item) + return list + + def unpack_farray(self, n, unpack_item): + list = [] + for i in range(n): + list.append(unpack_item()) + return list + + def unpack_array(self, unpack_item): + n = self.unpack_uint() + return self.unpack_farray(n, unpack_item) diff --git a/llava/lib/python3.10/zipfile.py b/llava/lib/python3.10/zipfile.py new file mode 100644 index 0000000000000000000000000000000000000000..4cd44fb1e4a8f0a3774f4a18ad30d4a41ab08e3d --- /dev/null +++ b/llava/lib/python3.10/zipfile.py @@ -0,0 +1,2510 @@ +""" +Read and write ZIP files. + +XXX references to utf-8 need further investigation. +""" +import binascii +import importlib.util +import io +import itertools +import os +import posixpath +import re +import shutil +import stat +import struct +import sys +import threading +import time +import contextlib +import pathlib + +try: + import zlib # We may need its compression method + crc32 = zlib.crc32 +except ImportError: + zlib = None + crc32 = binascii.crc32 + +try: + import bz2 # We may need its compression method +except ImportError: + bz2 = None + +try: + import lzma # We may need its compression method +except ImportError: + lzma = None + +__all__ = ["BadZipFile", "BadZipfile", "error", + "ZIP_STORED", "ZIP_DEFLATED", "ZIP_BZIP2", "ZIP_LZMA", + "is_zipfile", "ZipInfo", "ZipFile", "PyZipFile", "LargeZipFile", + "Path"] + +class BadZipFile(Exception): + pass + + +class LargeZipFile(Exception): + """ + Raised when writing a zipfile, the zipfile requires ZIP64 extensions + and those extensions are disabled. + """ + +error = BadZipfile = BadZipFile # Pre-3.2 compatibility names + + +ZIP64_LIMIT = (1 << 31) - 1 +ZIP_FILECOUNT_LIMIT = (1 << 16) - 1 +ZIP_MAX_COMMENT = (1 << 16) - 1 + +# constants for Zip file compression methods +ZIP_STORED = 0 +ZIP_DEFLATED = 8 +ZIP_BZIP2 = 12 +ZIP_LZMA = 14 +# Other ZIP compression methods not supported + +DEFAULT_VERSION = 20 +ZIP64_VERSION = 45 +BZIP2_VERSION = 46 +LZMA_VERSION = 63 +# we recognize (but not necessarily support) all features up to that version +MAX_EXTRACT_VERSION = 63 + +# Below are some formats and associated data for reading/writing headers using +# the struct module. The names and structures of headers/records are those used +# in the PKWARE description of the ZIP file format: +# http://www.pkware.com/documents/casestudies/APPNOTE.TXT +# (URL valid as of January 2008) + +# The "end of central directory" structure, magic number, size, and indices +# (section V.I in the format document) +structEndArchive = b"<4s4H2LH" +stringEndArchive = b"PK\005\006" +sizeEndCentDir = struct.calcsize(structEndArchive) + +_ECD_SIGNATURE = 0 +_ECD_DISK_NUMBER = 1 +_ECD_DISK_START = 2 +_ECD_ENTRIES_THIS_DISK = 3 +_ECD_ENTRIES_TOTAL = 4 +_ECD_SIZE = 5 +_ECD_OFFSET = 6 +_ECD_COMMENT_SIZE = 7 +# These last two indices are not part of the structure as defined in the +# spec, but they are used internally by this module as a convenience +_ECD_COMMENT = 8 +_ECD_LOCATION = 9 + +# The "central directory" structure, magic number, size, and indices +# of entries in the structure (section V.F in the format document) +structCentralDir = "<4s4B4HL2L5H2L" +stringCentralDir = b"PK\001\002" +sizeCentralDir = struct.calcsize(structCentralDir) + +# indexes of entries in the central directory structure +_CD_SIGNATURE = 0 +_CD_CREATE_VERSION = 1 +_CD_CREATE_SYSTEM = 2 +_CD_EXTRACT_VERSION = 3 +_CD_EXTRACT_SYSTEM = 4 +_CD_FLAG_BITS = 5 +_CD_COMPRESS_TYPE = 6 +_CD_TIME = 7 +_CD_DATE = 8 +_CD_CRC = 9 +_CD_COMPRESSED_SIZE = 10 +_CD_UNCOMPRESSED_SIZE = 11 +_CD_FILENAME_LENGTH = 12 +_CD_EXTRA_FIELD_LENGTH = 13 +_CD_COMMENT_LENGTH = 14 +_CD_DISK_NUMBER_START = 15 +_CD_INTERNAL_FILE_ATTRIBUTES = 16 +_CD_EXTERNAL_FILE_ATTRIBUTES = 17 +_CD_LOCAL_HEADER_OFFSET = 18 + +# The "local file header" structure, magic number, size, and indices +# (section V.A in the format document) +structFileHeader = "<4s2B4HL2L2H" +stringFileHeader = b"PK\003\004" +sizeFileHeader = struct.calcsize(structFileHeader) + +_FH_SIGNATURE = 0 +_FH_EXTRACT_VERSION = 1 +_FH_EXTRACT_SYSTEM = 2 +_FH_GENERAL_PURPOSE_FLAG_BITS = 3 +_FH_COMPRESSION_METHOD = 4 +_FH_LAST_MOD_TIME = 5 +_FH_LAST_MOD_DATE = 6 +_FH_CRC = 7 +_FH_COMPRESSED_SIZE = 8 +_FH_UNCOMPRESSED_SIZE = 9 +_FH_FILENAME_LENGTH = 10 +_FH_EXTRA_FIELD_LENGTH = 11 + +# The "Zip64 end of central directory locator" structure, magic number, and size +structEndArchive64Locator = "<4sLQL" +stringEndArchive64Locator = b"PK\x06\x07" +sizeEndCentDir64Locator = struct.calcsize(structEndArchive64Locator) + +# The "Zip64 end of central directory" record, magic number, size, and indices +# (section V.G in the format document) +structEndArchive64 = "<4sQ2H2L4Q" +stringEndArchive64 = b"PK\x06\x06" +sizeEndCentDir64 = struct.calcsize(structEndArchive64) + +_CD64_SIGNATURE = 0 +_CD64_DIRECTORY_RECSIZE = 1 +_CD64_CREATE_VERSION = 2 +_CD64_EXTRACT_VERSION = 3 +_CD64_DISK_NUMBER = 4 +_CD64_DISK_NUMBER_START = 5 +_CD64_NUMBER_ENTRIES_THIS_DISK = 6 +_CD64_NUMBER_ENTRIES_TOTAL = 7 +_CD64_DIRECTORY_SIZE = 8 +_CD64_OFFSET_START_CENTDIR = 9 + +_DD_SIGNATURE = 0x08074b50 + +_EXTRA_FIELD_STRUCT = struct.Struct(' 1: + raise BadZipFile("zipfiles that span multiple disks are not supported") + + # Assume no 'zip64 extensible data' + fpin.seek(offset - sizeEndCentDir64Locator - sizeEndCentDir64, 2) + data = fpin.read(sizeEndCentDir64) + if len(data) != sizeEndCentDir64: + return endrec + sig, sz, create_version, read_version, disk_num, disk_dir, \ + dircount, dircount2, dirsize, diroffset = \ + struct.unpack(structEndArchive64, data) + if sig != stringEndArchive64: + return endrec + + # Update the original endrec using data from the ZIP64 record + endrec[_ECD_SIGNATURE] = sig + endrec[_ECD_DISK_NUMBER] = disk_num + endrec[_ECD_DISK_START] = disk_dir + endrec[_ECD_ENTRIES_THIS_DISK] = dircount + endrec[_ECD_ENTRIES_TOTAL] = dircount2 + endrec[_ECD_SIZE] = dirsize + endrec[_ECD_OFFSET] = diroffset + return endrec + + +def _EndRecData(fpin): + """Return data from the "End of Central Directory" record, or None. + + The data is a list of the nine items in the ZIP "End of central dir" + record followed by a tenth item, the file seek offset of this record.""" + + # Determine file size + fpin.seek(0, 2) + filesize = fpin.tell() + + # Check to see if this is ZIP file with no archive comment (the + # "end of central directory" structure should be the last item in the + # file if this is the case). + try: + fpin.seek(-sizeEndCentDir, 2) + except OSError: + return None + data = fpin.read() + if (len(data) == sizeEndCentDir and + data[0:4] == stringEndArchive and + data[-2:] == b"\000\000"): + # the signature is correct and there's no comment, unpack structure + endrec = struct.unpack(structEndArchive, data) + endrec=list(endrec) + + # Append a blank comment and record start offset + endrec.append(b"") + endrec.append(filesize - sizeEndCentDir) + + # Try to read the "Zip64 end of central directory" structure + return _EndRecData64(fpin, -sizeEndCentDir, endrec) + + # Either this is not a ZIP file, or it is a ZIP file with an archive + # comment. Search the end of the file for the "end of central directory" + # record signature. The comment is the last item in the ZIP file and may be + # up to 64K long. It is assumed that the "end of central directory" magic + # number does not appear in the comment. + maxCommentStart = max(filesize - (1 << 16) - sizeEndCentDir, 0) + fpin.seek(maxCommentStart, 0) + data = fpin.read() + start = data.rfind(stringEndArchive) + if start >= 0: + # found the magic number; attempt to unpack and interpret + recData = data[start:start+sizeEndCentDir] + if len(recData) != sizeEndCentDir: + # Zip file is corrupted. + return None + endrec = list(struct.unpack(structEndArchive, recData)) + commentSize = endrec[_ECD_COMMENT_SIZE] #as claimed by the zip file + comment = data[start+sizeEndCentDir:start+sizeEndCentDir+commentSize] + endrec.append(comment) + endrec.append(maxCommentStart + start) + + # Try to read the "Zip64 end of central directory" structure + return _EndRecData64(fpin, maxCommentStart + start - filesize, + endrec) + + # Unable to find a valid end of central directory structure + return None + + +class ZipInfo (object): + """Class with attributes describing each file in the ZIP archive.""" + + __slots__ = ( + 'orig_filename', + 'filename', + 'date_time', + 'compress_type', + '_compresslevel', + 'comment', + 'extra', + 'create_system', + 'create_version', + 'extract_version', + 'reserved', + 'flag_bits', + 'volume', + 'internal_attr', + 'external_attr', + 'header_offset', + 'CRC', + 'compress_size', + 'file_size', + '_raw_time', + '_end_offset', + ) + + def __init__(self, filename="NoName", date_time=(1980,1,1,0,0,0)): + self.orig_filename = filename # Original file name in archive + + # Terminate the file name at the first null byte. Null bytes in file + # names are used as tricks by viruses in archives. + null_byte = filename.find(chr(0)) + if null_byte >= 0: + filename = filename[0:null_byte] + # This is used to ensure paths in generated ZIP files always use + # forward slashes as the directory separator, as required by the + # ZIP format specification. + if os.sep != "/" and os.sep in filename: + filename = filename.replace(os.sep, "/") + + self.filename = filename # Normalized file name + self.date_time = date_time # year, month, day, hour, min, sec + + if date_time[0] < 1980: + raise ValueError('ZIP does not support timestamps before 1980') + + # Standard values: + self.compress_type = ZIP_STORED # Type of compression for the file + self._compresslevel = None # Level for the compressor + self.comment = b"" # Comment for each file + self.extra = b"" # ZIP extra data + if sys.platform == 'win32': + self.create_system = 0 # System which created ZIP archive + else: + # Assume everything else is unix-y + self.create_system = 3 # System which created ZIP archive + self.create_version = DEFAULT_VERSION # Version which created ZIP archive + self.extract_version = DEFAULT_VERSION # Version needed to extract archive + self.reserved = 0 # Must be zero + self.flag_bits = 0 # ZIP flag bits + self.volume = 0 # Volume number of file header + self.internal_attr = 0 # Internal attributes + self.external_attr = 0 # External file attributes + self.compress_size = 0 # Size of the compressed file + self.file_size = 0 # Size of the uncompressed file + self._end_offset = None # Start of the next local header or central directory + # Other attributes are set by class ZipFile: + # header_offset Byte offset to the file header + # CRC CRC-32 of the uncompressed file + + def __repr__(self): + result = ['<%s filename=%r' % (self.__class__.__name__, self.filename)] + if self.compress_type != ZIP_STORED: + result.append(' compress_type=%s' % + compressor_names.get(self.compress_type, + self.compress_type)) + hi = self.external_attr >> 16 + lo = self.external_attr & 0xFFFF + if hi: + result.append(' filemode=%r' % stat.filemode(hi)) + if lo: + result.append(' external_attr=%#x' % lo) + isdir = self.is_dir() + if not isdir or self.file_size: + result.append(' file_size=%r' % self.file_size) + if ((not isdir or self.compress_size) and + (self.compress_type != ZIP_STORED or + self.file_size != self.compress_size)): + result.append(' compress_size=%r' % self.compress_size) + result.append('>') + return ''.join(result) + + def FileHeader(self, zip64=None): + """Return the per-file header as a bytes object.""" + dt = self.date_time + dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] + dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) + if self.flag_bits & 0x08: + # Set these to zero because we write them after the file data + CRC = compress_size = file_size = 0 + else: + CRC = self.CRC + compress_size = self.compress_size + file_size = self.file_size + + extra = self.extra + + min_version = 0 + if zip64 is None: + zip64 = file_size > ZIP64_LIMIT or compress_size > ZIP64_LIMIT + if zip64: + fmt = ' ZIP64_LIMIT or compress_size > ZIP64_LIMIT: + if not zip64: + raise LargeZipFile("Filesize would require ZIP64 extensions") + # File is larger than what fits into a 4 byte integer, + # fall back to the ZIP64 extension + file_size = 0xffffffff + compress_size = 0xffffffff + min_version = ZIP64_VERSION + + if self.compress_type == ZIP_BZIP2: + min_version = max(BZIP2_VERSION, min_version) + elif self.compress_type == ZIP_LZMA: + min_version = max(LZMA_VERSION, min_version) + + self.extract_version = max(min_version, self.extract_version) + self.create_version = max(min_version, self.create_version) + filename, flag_bits = self._encodeFilenameFlags() + header = struct.pack(structFileHeader, stringFileHeader, + self.extract_version, self.reserved, flag_bits, + self.compress_type, dostime, dosdate, CRC, + compress_size, file_size, + len(filename), len(extra)) + return header + filename + extra + + def _encodeFilenameFlags(self): + try: + return self.filename.encode('ascii'), self.flag_bits + except UnicodeEncodeError: + return self.filename.encode('utf-8'), self.flag_bits | 0x800 + + def _decodeExtra(self): + # Try to decode the extra field. + extra = self.extra + unpack = struct.unpack + while len(extra) >= 4: + tp, ln = unpack(' len(extra): + raise BadZipFile("Corrupt extra field %04x (size=%d)" % (tp, ln)) + if tp == 0x0001: + data = extra[4:ln+4] + # ZIP64 extension (large files and/or large archives) + try: + if self.file_size in (0xFFFF_FFFF_FFFF_FFFF, 0xFFFF_FFFF): + field = "File size" + self.file_size, = unpack(' 2107: + date_time = (2107, 12, 31, 23, 59, 59) + # Create ZipInfo instance to store file information + if arcname is None: + arcname = filename + arcname = os.path.normpath(os.path.splitdrive(arcname)[1]) + while arcname[0] in (os.sep, os.altsep): + arcname = arcname[1:] + if isdir: + arcname += '/' + zinfo = cls(arcname, date_time) + zinfo.external_attr = (st.st_mode & 0xFFFF) << 16 # Unix attributes + if isdir: + zinfo.file_size = 0 + zinfo.external_attr |= 0x10 # MS-DOS directory flag + else: + zinfo.file_size = st.st_size + + return zinfo + + def is_dir(self): + """Return True if this archive member is a directory.""" + return self.filename[-1] == '/' + + +# ZIP encryption uses the CRC32 one-byte primitive for scrambling some +# internal keys. We noticed that a direct implementation is faster than +# relying on binascii.crc32(). + +_crctable = None +def _gen_crc(crc): + for j in range(8): + if crc & 1: + crc = (crc >> 1) ^ 0xEDB88320 + else: + crc >>= 1 + return crc + +# ZIP supports a password-based form of encryption. Even though known +# plaintext attacks have been found against it, it is still useful +# to be able to get data out of such a file. +# +# Usage: +# zd = _ZipDecrypter(mypwd) +# plain_bytes = zd(cypher_bytes) + +def _ZipDecrypter(pwd): + key0 = 305419896 + key1 = 591751049 + key2 = 878082192 + + global _crctable + if _crctable is None: + _crctable = list(map(_gen_crc, range(256))) + crctable = _crctable + + def crc32(ch, crc): + """Compute the CRC32 primitive on one byte.""" + return (crc >> 8) ^ crctable[(crc ^ ch) & 0xFF] + + def update_keys(c): + nonlocal key0, key1, key2 + key0 = crc32(c, key0) + key1 = (key1 + (key0 & 0xFF)) & 0xFFFFFFFF + key1 = (key1 * 134775813 + 1) & 0xFFFFFFFF + key2 = crc32(key1 >> 24, key2) + + for p in pwd: + update_keys(p) + + def decrypter(data): + """Decrypt a bytes object.""" + result = bytearray() + append = result.append + for c in data: + k = key2 | 2 + c ^= ((k * (k^1)) >> 8) & 0xFF + update_keys(c) + append(c) + return bytes(result) + + return decrypter + + +class LZMACompressor: + + def __init__(self): + self._comp = None + + def _init(self): + props = lzma._encode_filter_properties({'id': lzma.FILTER_LZMA1}) + self._comp = lzma.LZMACompressor(lzma.FORMAT_RAW, filters=[ + lzma._decode_filter_properties(lzma.FILTER_LZMA1, props) + ]) + return struct.pack('> 8) & 0xff + else: + # compare against the CRC otherwise + check_byte = (zipinfo.CRC >> 24) & 0xff + h = self._init_decrypter() + if h != check_byte: + raise RuntimeError("Bad password for file %r" % zipinfo.orig_filename) + + + def _init_decrypter(self): + self._decrypter = _ZipDecrypter(self._pwd) + # The first 12 bytes in the cypher stream is an encryption header + # used to strengthen the algorithm. The first 11 bytes are + # completely random, while the 12th contains the MSB of the CRC, + # or the MSB of the file time depending on the header type + # and is used to check the correctness of the password. + header = self._fileobj.read(12) + self._compress_left -= 12 + return self._decrypter(header)[11] + + def __repr__(self): + result = ['<%s.%s' % (self.__class__.__module__, + self.__class__.__qualname__)] + if not self.closed: + result.append(' name=%r mode=%r' % (self.name, self.mode)) + if self._compress_type != ZIP_STORED: + result.append(' compress_type=%s' % + compressor_names.get(self._compress_type, + self._compress_type)) + else: + result.append(' [closed]') + result.append('>') + return ''.join(result) + + def readline(self, limit=-1): + """Read and return a line from the stream. + + If limit is specified, at most limit bytes will be read. + """ + + if limit < 0: + # Shortcut common case - newline found in buffer. + i = self._readbuffer.find(b'\n', self._offset) + 1 + if i > 0: + line = self._readbuffer[self._offset: i] + self._offset = i + return line + + return io.BufferedIOBase.readline(self, limit) + + def peek(self, n=1): + """Returns buffered bytes without advancing the position.""" + if n > len(self._readbuffer) - self._offset: + chunk = self.read(n) + if len(chunk) > self._offset: + self._readbuffer = chunk + self._readbuffer[self._offset:] + self._offset = 0 + else: + self._offset -= len(chunk) + + # Return up to 512 bytes to reduce allocation overhead for tight loops. + return self._readbuffer[self._offset: self._offset + 512] + + def readable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return True + + def read(self, n=-1): + """Read and return up to n bytes. + If the argument is omitted, None, or negative, data is read and returned until EOF is reached. + """ + if self.closed: + raise ValueError("read from closed file.") + if n is None or n < 0: + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while not self._eof: + buf += self._read1(self.MAX_N) + return buf + + end = n + self._offset + if end < len(self._readbuffer): + buf = self._readbuffer[self._offset:end] + self._offset = end + return buf + + n = end - len(self._readbuffer) + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while n > 0 and not self._eof: + data = self._read1(n) + if n < len(data): + self._readbuffer = data + self._offset = n + buf += data[:n] + break + buf += data + n -= len(data) + return buf + + def _update_crc(self, newdata): + # Update the CRC using the given data. + if self._expected_crc is None: + # No need to compute the CRC if we don't have a reference value + return + self._running_crc = crc32(newdata, self._running_crc) + # Check the CRC if we're at the end of the file + if self._eof and self._running_crc != self._expected_crc: + raise BadZipFile("Bad CRC-32 for file %r" % self.name) + + def read1(self, n): + """Read up to n bytes with at most one read() system call.""" + + if n is None or n < 0: + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + while not self._eof: + data = self._read1(self.MAX_N) + if data: + buf += data + break + return buf + + end = n + self._offset + if end < len(self._readbuffer): + buf = self._readbuffer[self._offset:end] + self._offset = end + return buf + + n = end - len(self._readbuffer) + buf = self._readbuffer[self._offset:] + self._readbuffer = b'' + self._offset = 0 + if n > 0: + while not self._eof: + data = self._read1(n) + if n < len(data): + self._readbuffer = data + self._offset = n + buf += data[:n] + break + if data: + buf += data + break + return buf + + def _read1(self, n): + # Read up to n compressed bytes with at most one read() system call, + # decrypt and decompress them. + if self._eof or n <= 0: + return b'' + + # Read from file. + if self._compress_type == ZIP_DEFLATED: + ## Handle unconsumed data. + data = self._decompressor.unconsumed_tail + if n > len(data): + data += self._read2(n - len(data)) + else: + data = self._read2(n) + + if self._compress_type == ZIP_STORED: + self._eof = self._compress_left <= 0 + elif self._compress_type == ZIP_DEFLATED: + n = max(n, self.MIN_READ_SIZE) + data = self._decompressor.decompress(data, n) + self._eof = (self._decompressor.eof or + self._compress_left <= 0 and + not self._decompressor.unconsumed_tail) + if self._eof: + data += self._decompressor.flush() + else: + data = self._decompressor.decompress(data) + self._eof = self._decompressor.eof or self._compress_left <= 0 + + data = data[:self._left] + self._left -= len(data) + if self._left <= 0: + self._eof = True + self._update_crc(data) + return data + + def _read2(self, n): + if self._compress_left <= 0: + return b'' + + n = max(n, self.MIN_READ_SIZE) + n = min(n, self._compress_left) + + data = self._fileobj.read(n) + self._compress_left -= len(data) + if not data: + raise EOFError + + if self._decrypter is not None: + data = self._decrypter(data) + return data + + def close(self): + try: + if self._close_fileobj: + self._fileobj.close() + finally: + super().close() + + def seekable(self): + if self.closed: + raise ValueError("I/O operation on closed file.") + return self._seekable + + def seek(self, offset, whence=0): + if self.closed: + raise ValueError("seek on closed file.") + if not self._seekable: + raise io.UnsupportedOperation("underlying stream is not seekable") + curr_pos = self.tell() + if whence == 0: # Seek from start of file + new_pos = offset + elif whence == 1: # Seek from current position + new_pos = curr_pos + offset + elif whence == 2: # Seek from EOF + new_pos = self._orig_file_size + offset + else: + raise ValueError("whence must be os.SEEK_SET (0), " + "os.SEEK_CUR (1), or os.SEEK_END (2)") + + if new_pos > self._orig_file_size: + new_pos = self._orig_file_size + + if new_pos < 0: + new_pos = 0 + + read_offset = new_pos - curr_pos + buff_offset = read_offset + self._offset + + if buff_offset >= 0 and buff_offset < len(self._readbuffer): + # Just move the _offset index if the new position is in the _readbuffer + self._offset = buff_offset + read_offset = 0 + elif read_offset < 0: + # Position is before the current position. Reset the ZipExtFile + self._fileobj.seek(self._orig_compress_start) + self._running_crc = self._orig_start_crc + self._compress_left = self._orig_compress_size + self._left = self._orig_file_size + self._readbuffer = b'' + self._offset = 0 + self._decompressor = _get_decompressor(self._compress_type) + self._eof = False + read_offset = new_pos + if self._decrypter is not None: + self._init_decrypter() + + while read_offset > 0: + read_len = min(self.MAX_SEEK_READ, read_offset) + self.read(read_len) + read_offset -= read_len + + return self.tell() + + def tell(self): + if self.closed: + raise ValueError("tell on closed file.") + if not self._seekable: + raise io.UnsupportedOperation("underlying stream is not seekable") + filepos = self._orig_file_size - self._left - len(self._readbuffer) + self._offset + return filepos + + +class _ZipWriteFile(io.BufferedIOBase): + def __init__(self, zf, zinfo, zip64): + self._zinfo = zinfo + self._zip64 = zip64 + self._zipfile = zf + self._compressor = _get_compressor(zinfo.compress_type, + zinfo._compresslevel) + self._file_size = 0 + self._compress_size = 0 + self._crc = 0 + + @property + def _fileobj(self): + return self._zipfile.fp + + def writable(self): + return True + + def write(self, data): + if self.closed: + raise ValueError('I/O operation on closed file.') + + # Accept any data that supports the buffer protocol + if isinstance(data, (bytes, bytearray)): + nbytes = len(data) + else: + data = memoryview(data) + nbytes = data.nbytes + self._file_size += nbytes + + self._crc = crc32(data, self._crc) + if self._compressor: + data = self._compressor.compress(data) + self._compress_size += len(data) + self._fileobj.write(data) + return nbytes + + def close(self): + if self.closed: + return + try: + super().close() + # Flush any data from the compressor, and update header info + if self._compressor: + buf = self._compressor.flush() + self._compress_size += len(buf) + self._fileobj.write(buf) + self._zinfo.compress_size = self._compress_size + else: + self._zinfo.compress_size = self._file_size + self._zinfo.CRC = self._crc + self._zinfo.file_size = self._file_size + + # Write updated header info + if self._zinfo.flag_bits & 0x08: + # Write CRC and file sizes after the file data + fmt = ' ZIP64_LIMIT: + raise RuntimeError( + 'File size unexpectedly exceeded ZIP64 limit') + if self._compress_size > ZIP64_LIMIT: + raise RuntimeError( + 'Compressed size unexpectedly exceeded ZIP64 limit') + # Seek backwards and write file header (which will now include + # correct CRC and file sizes) + + # Preserve current position in file + self._zipfile.start_dir = self._fileobj.tell() + self._fileobj.seek(self._zinfo.header_offset) + self._fileobj.write(self._zinfo.FileHeader(self._zip64)) + self._fileobj.seek(self._zipfile.start_dir) + + # Successfully written: Add file to our caches + self._zipfile.filelist.append(self._zinfo) + self._zipfile.NameToInfo[self._zinfo.filename] = self._zinfo + finally: + self._zipfile._writing = False + + + +class ZipFile: + """ Class with methods to open, read, write, close, list zip files. + + z = ZipFile(file, mode="r", compression=ZIP_STORED, allowZip64=True, + compresslevel=None) + + file: Either the path to the file, or a file-like object. + If it is a path, the file will be opened and closed by ZipFile. + mode: The mode can be either read 'r', write 'w', exclusive create 'x', + or append 'a'. + compression: ZIP_STORED (no compression), ZIP_DEFLATED (requires zlib), + ZIP_BZIP2 (requires bz2) or ZIP_LZMA (requires lzma). + allowZip64: if True ZipFile will create files with ZIP64 extensions when + needed, otherwise it will raise an exception when this would + be necessary. + compresslevel: None (default for the given compression type) or an integer + specifying the level to pass to the compressor. + When using ZIP_STORED or ZIP_LZMA this keyword has no effect. + When using ZIP_DEFLATED integers 0 through 9 are accepted. + When using ZIP_BZIP2 integers 1 through 9 are accepted. + + """ + + fp = None # Set here since __del__ checks it + _windows_illegal_name_trans_table = None + + def __init__(self, file, mode="r", compression=ZIP_STORED, allowZip64=True, + compresslevel=None, *, strict_timestamps=True): + """Open the ZIP file with mode read 'r', write 'w', exclusive create 'x', + or append 'a'.""" + if mode not in ('r', 'w', 'x', 'a'): + raise ValueError("ZipFile requires mode 'r', 'w', 'x', or 'a'") + + _check_compression(compression) + + self._allowZip64 = allowZip64 + self._didModify = False + self.debug = 0 # Level of printing: 0 through 3 + self.NameToInfo = {} # Find file info given name + self.filelist = [] # List of ZipInfo instances for archive + self.compression = compression # Method of compression + self.compresslevel = compresslevel + self.mode = mode + self.pwd = None + self._comment = b'' + self._strict_timestamps = strict_timestamps + + # Check if we were passed a file-like object + if isinstance(file, os.PathLike): + file = os.fspath(file) + if isinstance(file, str): + # No, it's a filename + self._filePassed = 0 + self.filename = file + modeDict = {'r' : 'rb', 'w': 'w+b', 'x': 'x+b', 'a' : 'r+b', + 'r+b': 'w+b', 'w+b': 'wb', 'x+b': 'xb'} + filemode = modeDict[mode] + while True: + try: + self.fp = io.open(file, filemode) + except OSError: + if filemode in modeDict: + filemode = modeDict[filemode] + continue + raise + break + else: + self._filePassed = 1 + self.fp = file + self.filename = getattr(file, 'name', None) + self._fileRefCnt = 1 + self._lock = threading.RLock() + self._seekable = True + self._writing = False + + try: + if mode == 'r': + self._RealGetContents() + elif mode in ('w', 'x'): + # set the modified flag so central directory gets written + # even if no files are added to the archive + self._didModify = True + try: + self.start_dir = self.fp.tell() + except (AttributeError, OSError): + self.fp = _Tellable(self.fp) + self.start_dir = 0 + self._seekable = False + else: + # Some file-like objects can provide tell() but not seek() + try: + self.fp.seek(self.start_dir) + except (AttributeError, OSError): + self._seekable = False + elif mode == 'a': + try: + # See if file is a zip file + self._RealGetContents() + # seek to start of directory and overwrite + self.fp.seek(self.start_dir) + except BadZipFile: + # file is not a zip file, just append + self.fp.seek(0, 2) + + # set the modified flag so central directory gets written + # even if no files are added to the archive + self._didModify = True + self.start_dir = self.fp.tell() + else: + raise ValueError("Mode must be 'r', 'w', 'x', or 'a'") + except: + fp = self.fp + self.fp = None + self._fpclose(fp) + raise + + def __enter__(self): + return self + + def __exit__(self, type, value, traceback): + self.close() + + def __repr__(self): + result = ['<%s.%s' % (self.__class__.__module__, + self.__class__.__qualname__)] + if self.fp is not None: + if self._filePassed: + result.append(' file=%r' % self.fp) + elif self.filename is not None: + result.append(' filename=%r' % self.filename) + result.append(' mode=%r' % self.mode) + else: + result.append(' [closed]') + result.append('>') + return ''.join(result) + + def _RealGetContents(self): + """Read in the table of contents for the ZIP file.""" + fp = self.fp + try: + endrec = _EndRecData(fp) + except OSError: + raise BadZipFile("File is not a zip file") + if not endrec: + raise BadZipFile("File is not a zip file") + if self.debug > 1: + print(endrec) + size_cd = endrec[_ECD_SIZE] # bytes in central directory + offset_cd = endrec[_ECD_OFFSET] # offset of central directory + self._comment = endrec[_ECD_COMMENT] # archive comment + + # "concat" is zero, unless zip was concatenated to another file + concat = endrec[_ECD_LOCATION] - size_cd - offset_cd + if endrec[_ECD_SIGNATURE] == stringEndArchive64: + # If Zip64 extension structures are present, account for them + concat -= (sizeEndCentDir64 + sizeEndCentDir64Locator) + + if self.debug > 2: + inferred = concat + offset_cd + print("given, inferred, offset", offset_cd, inferred, concat) + # self.start_dir: Position of start of central directory + self.start_dir = offset_cd + concat + if self.start_dir < 0: + raise BadZipFile("Bad offset for central directory") + fp.seek(self.start_dir, 0) + data = fp.read(size_cd) + fp = io.BytesIO(data) + total = 0 + while total < size_cd: + centdir = fp.read(sizeCentralDir) + if len(centdir) != sizeCentralDir: + raise BadZipFile("Truncated central directory") + centdir = struct.unpack(structCentralDir, centdir) + if centdir[_CD_SIGNATURE] != stringCentralDir: + raise BadZipFile("Bad magic number for central directory") + if self.debug > 2: + print(centdir) + filename = fp.read(centdir[_CD_FILENAME_LENGTH]) + flags = centdir[5] + if flags & 0x800: + # UTF-8 file names extension + filename = filename.decode('utf-8') + else: + # Historical ZIP filename encoding + filename = filename.decode('cp437') + # Create ZipInfo instance to store file information + x = ZipInfo(filename) + x.extra = fp.read(centdir[_CD_EXTRA_FIELD_LENGTH]) + x.comment = fp.read(centdir[_CD_COMMENT_LENGTH]) + x.header_offset = centdir[_CD_LOCAL_HEADER_OFFSET] + (x.create_version, x.create_system, x.extract_version, x.reserved, + x.flag_bits, x.compress_type, t, d, + x.CRC, x.compress_size, x.file_size) = centdir[1:12] + if x.extract_version > MAX_EXTRACT_VERSION: + raise NotImplementedError("zip file version %.1f" % + (x.extract_version / 10)) + x.volume, x.internal_attr, x.external_attr = centdir[15:18] + # Convert date/time code to (year, month, day, hour, min, sec) + x._raw_time = t + x.date_time = ( (d>>9)+1980, (d>>5)&0xF, d&0x1F, + t>>11, (t>>5)&0x3F, (t&0x1F) * 2 ) + + x._decodeExtra() + x.header_offset = x.header_offset + concat + self.filelist.append(x) + self.NameToInfo[x.filename] = x + + # update total bytes read from central directory + total = (total + sizeCentralDir + centdir[_CD_FILENAME_LENGTH] + + centdir[_CD_EXTRA_FIELD_LENGTH] + + centdir[_CD_COMMENT_LENGTH]) + + if self.debug > 2: + print("total", total) + + end_offset = self.start_dir + for zinfo in sorted(self.filelist, + key=lambda zinfo: zinfo.header_offset, + reverse=True): + zinfo._end_offset = end_offset + end_offset = zinfo.header_offset + + def namelist(self): + """Return a list of file names in the archive.""" + return [data.filename for data in self.filelist] + + def infolist(self): + """Return a list of class ZipInfo instances for files in the + archive.""" + return self.filelist + + def printdir(self, file=None): + """Print a table of contents for the zip file.""" + print("%-46s %19s %12s" % ("File Name", "Modified ", "Size"), + file=file) + for zinfo in self.filelist: + date = "%d-%02d-%02d %02d:%02d:%02d" % zinfo.date_time[:6] + print("%-46s %s %12d" % (zinfo.filename, date, zinfo.file_size), + file=file) + + def testzip(self): + """Read all the files and check the CRC.""" + chunk_size = 2 ** 20 + for zinfo in self.filelist: + try: + # Read by chunks, to avoid an OverflowError or a + # MemoryError with very large embedded files. + with self.open(zinfo.filename, "r") as f: + while f.read(chunk_size): # Check CRC-32 + pass + except BadZipFile: + return zinfo.filename + + def getinfo(self, name): + """Return the instance of ZipInfo given 'name'.""" + info = self.NameToInfo.get(name) + if info is None: + raise KeyError( + 'There is no item named %r in the archive' % name) + + return info + + def setpassword(self, pwd): + """Set default password for encrypted files.""" + if pwd and not isinstance(pwd, bytes): + raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) + if pwd: + self.pwd = pwd + else: + self.pwd = None + + @property + def comment(self): + """The comment text associated with the ZIP file.""" + return self._comment + + @comment.setter + def comment(self, comment): + if not isinstance(comment, bytes): + raise TypeError("comment: expected bytes, got %s" % type(comment).__name__) + # check for valid comment length + if len(comment) > ZIP_MAX_COMMENT: + import warnings + warnings.warn('Archive comment is too long; truncating to %d bytes' + % ZIP_MAX_COMMENT, stacklevel=2) + comment = comment[:ZIP_MAX_COMMENT] + self._comment = comment + self._didModify = True + + def read(self, name, pwd=None): + """Return file bytes for name.""" + with self.open(name, "r", pwd) as fp: + return fp.read() + + def open(self, name, mode="r", pwd=None, *, force_zip64=False): + """Return file-like object for 'name'. + + name is a string for the file name within the ZIP file, or a ZipInfo + object. + + mode should be 'r' to read a file already in the ZIP file, or 'w' to + write to a file newly added to the archive. + + pwd is the password to decrypt files (only used for reading). + + When writing, if the file size is not known in advance but may exceed + 2 GiB, pass force_zip64 to use the ZIP64 format, which can handle large + files. If the size is known in advance, it is best to pass a ZipInfo + instance for name, with zinfo.file_size set. + """ + if mode not in {"r", "w"}: + raise ValueError('open() requires mode "r" or "w"') + if pwd and not isinstance(pwd, bytes): + raise TypeError("pwd: expected bytes, got %s" % type(pwd).__name__) + if pwd and (mode == "w"): + raise ValueError("pwd is only supported for reading files") + if not self.fp: + raise ValueError( + "Attempt to use ZIP archive that was already closed") + + # Make sure we have an info object + if isinstance(name, ZipInfo): + # 'name' is already an info object + zinfo = name + elif mode == 'w': + zinfo = ZipInfo(name) + zinfo.compress_type = self.compression + zinfo._compresslevel = self.compresslevel + else: + # Get info object for name + zinfo = self.getinfo(name) + + if mode == 'w': + return self._open_to_write(zinfo, force_zip64=force_zip64) + + if self._writing: + raise ValueError("Can't read from the ZIP file while there " + "is an open writing handle on it. " + "Close the writing handle before trying to read.") + + # Open for reading: + self._fileRefCnt += 1 + zef_file = _SharedFile(self.fp, zinfo.header_offset, + self._fpclose, self._lock, lambda: self._writing) + try: + # Skip the file header: + fheader = zef_file.read(sizeFileHeader) + if len(fheader) != sizeFileHeader: + raise BadZipFile("Truncated file header") + fheader = struct.unpack(structFileHeader, fheader) + if fheader[_FH_SIGNATURE] != stringFileHeader: + raise BadZipFile("Bad magic number for file header") + + fname = zef_file.read(fheader[_FH_FILENAME_LENGTH]) + if fheader[_FH_EXTRA_FIELD_LENGTH]: + zef_file.read(fheader[_FH_EXTRA_FIELD_LENGTH]) + + if zinfo.flag_bits & 0x20: + # Zip 2.7: compressed patched data + raise NotImplementedError("compressed patched data (flag bit 5)") + + if zinfo.flag_bits & 0x40: + # strong encryption + raise NotImplementedError("strong encryption (flag bit 6)") + + if fheader[_FH_GENERAL_PURPOSE_FLAG_BITS] & 0x800: + # UTF-8 filename + fname_str = fname.decode("utf-8") + else: + fname_str = fname.decode("cp437") + + if fname_str != zinfo.orig_filename: + raise BadZipFile( + 'File name in directory %r and header %r differ.' + % (zinfo.orig_filename, fname)) + + if (zinfo._end_offset is not None and + zef_file.tell() + zinfo.compress_size > zinfo._end_offset): + raise BadZipFile(f"Overlapped entries: {zinfo.orig_filename!r} (possible zip bomb)") + + # check for encrypted flag & handle password + is_encrypted = zinfo.flag_bits & 0x1 + if is_encrypted: + if not pwd: + pwd = self.pwd + if not pwd: + raise RuntimeError("File %r is encrypted, password " + "required for extraction" % name) + else: + pwd = None + + return ZipExtFile(zef_file, mode, zinfo, pwd, True) + except: + zef_file.close() + raise + + def _open_to_write(self, zinfo, force_zip64=False): + if force_zip64 and not self._allowZip64: + raise ValueError( + "force_zip64 is True, but allowZip64 was False when opening " + "the ZIP file." + ) + if self._writing: + raise ValueError("Can't write to the ZIP file while there is " + "another write handle open on it. " + "Close the first handle before opening another.") + + # Size and CRC are overwritten with correct data after processing the file + zinfo.compress_size = 0 + zinfo.CRC = 0 + + zinfo.flag_bits = 0x00 + if zinfo.compress_type == ZIP_LZMA: + # Compressed data includes an end-of-stream (EOS) marker + zinfo.flag_bits |= 0x02 + if not self._seekable: + zinfo.flag_bits |= 0x08 + + if not zinfo.external_attr: + zinfo.external_attr = 0o600 << 16 # permissions: ?rw------- + + # Compressed size can be larger than uncompressed size + zip64 = self._allowZip64 and \ + (force_zip64 or zinfo.file_size * 1.05 > ZIP64_LIMIT) + + if self._seekable: + self.fp.seek(self.start_dir) + zinfo.header_offset = self.fp.tell() + + self._writecheck(zinfo) + self._didModify = True + + self.fp.write(zinfo.FileHeader(zip64)) + + self._writing = True + return _ZipWriteFile(self, zinfo, zip64) + + def extract(self, member, path=None, pwd=None): + """Extract a member from the archive to the current working directory, + using its full name. Its file information is extracted as accurately + as possible. `member' may be a filename or a ZipInfo object. You can + specify a different directory using `path'. + """ + if path is None: + path = os.getcwd() + else: + path = os.fspath(path) + + return self._extract_member(member, path, pwd) + + def extractall(self, path=None, members=None, pwd=None): + """Extract all members from the archive to the current working + directory. `path' specifies a different directory to extract to. + `members' is optional and must be a subset of the list returned + by namelist(). + """ + if members is None: + members = self.namelist() + + if path is None: + path = os.getcwd() + else: + path = os.fspath(path) + + for zipinfo in members: + self._extract_member(zipinfo, path, pwd) + + @classmethod + def _sanitize_windows_name(cls, arcname, pathsep): + """Replace bad characters and remove trailing dots from parts.""" + table = cls._windows_illegal_name_trans_table + if not table: + illegal = ':<>|"?*' + table = str.maketrans(illegal, '_' * len(illegal)) + cls._windows_illegal_name_trans_table = table + arcname = arcname.translate(table) + # remove trailing dots + arcname = (x.rstrip('.') for x in arcname.split(pathsep)) + # rejoin, removing empty parts. + arcname = pathsep.join(x for x in arcname if x) + return arcname + + def _extract_member(self, member, targetpath, pwd): + """Extract the ZipInfo object 'member' to a physical + file on the path targetpath. + """ + if not isinstance(member, ZipInfo): + member = self.getinfo(member) + + # build the destination pathname, replacing + # forward slashes to platform specific separators. + arcname = member.filename.replace('/', os.path.sep) + + if os.path.altsep: + arcname = arcname.replace(os.path.altsep, os.path.sep) + # interpret absolute pathname as relative, remove drive letter or + # UNC path, redundant separators, "." and ".." components. + arcname = os.path.splitdrive(arcname)[1] + invalid_path_parts = ('', os.path.curdir, os.path.pardir) + arcname = os.path.sep.join(x for x in arcname.split(os.path.sep) + if x not in invalid_path_parts) + if os.path.sep == '\\': + # filter illegal characters on Windows + arcname = self._sanitize_windows_name(arcname, os.path.sep) + + targetpath = os.path.join(targetpath, arcname) + targetpath = os.path.normpath(targetpath) + + # Create all upper directories if necessary. + upperdirs = os.path.dirname(targetpath) + if upperdirs and not os.path.exists(upperdirs): + os.makedirs(upperdirs) + + if member.is_dir(): + if not os.path.isdir(targetpath): + os.mkdir(targetpath) + return targetpath + + with self.open(member, pwd=pwd) as source, \ + open(targetpath, "wb") as target: + shutil.copyfileobj(source, target) + + return targetpath + + def _writecheck(self, zinfo): + """Check for errors before writing a file to the archive.""" + if zinfo.filename in self.NameToInfo: + import warnings + warnings.warn('Duplicate name: %r' % zinfo.filename, stacklevel=3) + if self.mode not in ('w', 'x', 'a'): + raise ValueError("write() requires mode 'w', 'x', or 'a'") + if not self.fp: + raise ValueError( + "Attempt to write ZIP archive that was already closed") + _check_compression(zinfo.compress_type) + if not self._allowZip64: + requires_zip64 = None + if len(self.filelist) >= ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif zinfo.file_size > ZIP64_LIMIT: + requires_zip64 = "Filesize" + elif zinfo.header_offset > ZIP64_LIMIT: + requires_zip64 = "Zipfile size" + if requires_zip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") + + def write(self, filename, arcname=None, + compress_type=None, compresslevel=None): + """Put the bytes from filename into the archive under the name + arcname.""" + if not self.fp: + raise ValueError( + "Attempt to write to ZIP archive that was already closed") + if self._writing: + raise ValueError( + "Can't write to ZIP archive while an open writing handle exists" + ) + + zinfo = ZipInfo.from_file(filename, arcname, + strict_timestamps=self._strict_timestamps) + + if zinfo.is_dir(): + zinfo.compress_size = 0 + zinfo.CRC = 0 + else: + if compress_type is not None: + zinfo.compress_type = compress_type + else: + zinfo.compress_type = self.compression + + if compresslevel is not None: + zinfo._compresslevel = compresslevel + else: + zinfo._compresslevel = self.compresslevel + + if zinfo.is_dir(): + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + zinfo.header_offset = self.fp.tell() # Start of header bytes + if zinfo.compress_type == ZIP_LZMA: + # Compressed data includes an end-of-stream (EOS) marker + zinfo.flag_bits |= 0x02 + + self._writecheck(zinfo) + self._didModify = True + + self.filelist.append(zinfo) + self.NameToInfo[zinfo.filename] = zinfo + self.fp.write(zinfo.FileHeader(False)) + self.start_dir = self.fp.tell() + else: + with open(filename, "rb") as src, self.open(zinfo, 'w') as dest: + shutil.copyfileobj(src, dest, 1024*8) + + def writestr(self, zinfo_or_arcname, data, + compress_type=None, compresslevel=None): + """Write a file into the archive. The contents is 'data', which + may be either a 'str' or a 'bytes' instance; if it is a 'str', + it is encoded as UTF-8 first. + 'zinfo_or_arcname' is either a ZipInfo instance or + the name of the file in the archive.""" + if isinstance(data, str): + data = data.encode("utf-8") + if not isinstance(zinfo_or_arcname, ZipInfo): + zinfo = ZipInfo(filename=zinfo_or_arcname, + date_time=time.localtime(time.time())[:6]) + zinfo.compress_type = self.compression + zinfo._compresslevel = self.compresslevel + if zinfo.filename[-1] == '/': + zinfo.external_attr = 0o40775 << 16 # drwxrwxr-x + zinfo.external_attr |= 0x10 # MS-DOS directory flag + else: + zinfo.external_attr = 0o600 << 16 # ?rw------- + else: + zinfo = zinfo_or_arcname + + if not self.fp: + raise ValueError( + "Attempt to write to ZIP archive that was already closed") + if self._writing: + raise ValueError( + "Can't write to ZIP archive while an open writing handle exists." + ) + + if compress_type is not None: + zinfo.compress_type = compress_type + + if compresslevel is not None: + zinfo._compresslevel = compresslevel + + zinfo.file_size = len(data) # Uncompressed size + with self._lock: + with self.open(zinfo, mode='w') as dest: + dest.write(data) + + def __del__(self): + """Call the "close()" method in case the user forgot.""" + self.close() + + def close(self): + """Close the file, and for mode 'w', 'x' and 'a' write the ending + records.""" + if self.fp is None: + return + + if self._writing: + raise ValueError("Can't close the ZIP file while there is " + "an open writing handle on it. " + "Close the writing handle before closing the zip.") + + try: + if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records + with self._lock: + if self._seekable: + self.fp.seek(self.start_dir) + self._write_end_record() + finally: + fp = self.fp + self.fp = None + self._fpclose(fp) + + def _write_end_record(self): + for zinfo in self.filelist: # write central directory + dt = zinfo.date_time + dosdate = (dt[0] - 1980) << 9 | dt[1] << 5 | dt[2] + dostime = dt[3] << 11 | dt[4] << 5 | (dt[5] // 2) + extra = [] + if zinfo.file_size > ZIP64_LIMIT \ + or zinfo.compress_size > ZIP64_LIMIT: + extra.append(zinfo.file_size) + extra.append(zinfo.compress_size) + file_size = 0xffffffff + compress_size = 0xffffffff + else: + file_size = zinfo.file_size + compress_size = zinfo.compress_size + + if zinfo.header_offset > ZIP64_LIMIT: + extra.append(zinfo.header_offset) + header_offset = 0xffffffff + else: + header_offset = zinfo.header_offset + + extra_data = zinfo.extra + min_version = 0 + if extra: + # Append a ZIP64 field to the extra's + extra_data = _strip_extra(extra_data, (1,)) + extra_data = struct.pack( + ' ZIP_FILECOUNT_LIMIT: + requires_zip64 = "Files count" + elif centDirOffset > ZIP64_LIMIT: + requires_zip64 = "Central directory offset" + elif centDirSize > ZIP64_LIMIT: + requires_zip64 = "Central directory size" + if requires_zip64: + # Need to write the ZIP64 end-of-archive records + if not self._allowZip64: + raise LargeZipFile(requires_zip64 + + " would require ZIP64 extensions") + zip64endrec = struct.pack( + structEndArchive64, stringEndArchive64, + 44, 45, 45, 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset) + self.fp.write(zip64endrec) + + zip64locrec = struct.pack( + structEndArchive64Locator, + stringEndArchive64Locator, 0, pos2, 1) + self.fp.write(zip64locrec) + centDirCount = min(centDirCount, 0xFFFF) + centDirSize = min(centDirSize, 0xFFFFFFFF) + centDirOffset = min(centDirOffset, 0xFFFFFFFF) + + endrec = struct.pack(structEndArchive, stringEndArchive, + 0, 0, centDirCount, centDirCount, + centDirSize, centDirOffset, len(self._comment)) + self.fp.write(endrec) + self.fp.write(self._comment) + if self.mode == "a": + self.fp.truncate() + self.fp.flush() + + def _fpclose(self, fp): + assert self._fileRefCnt > 0 + self._fileRefCnt -= 1 + if not self._fileRefCnt and not self._filePassed: + fp.close() + + +class PyZipFile(ZipFile): + """Class to create ZIP archives with Python library files and packages.""" + + def __init__(self, file, mode="r", compression=ZIP_STORED, + allowZip64=True, optimize=-1): + ZipFile.__init__(self, file, mode=mode, compression=compression, + allowZip64=allowZip64) + self._optimize = optimize + + def writepy(self, pathname, basename="", filterfunc=None): + """Add all files from "pathname" to the ZIP archive. + + If pathname is a package directory, search the directory and + all package subdirectories recursively for all *.py and enter + the modules into the archive. If pathname is a plain + directory, listdir *.py and enter all modules. Else, pathname + must be a Python *.py file and the module will be put into the + archive. Added modules are always module.pyc. + This method will compile the module.py into module.pyc if + necessary. + If filterfunc(pathname) is given, it is called with every argument. + When it is False, the file or directory is skipped. + """ + pathname = os.fspath(pathname) + if filterfunc and not filterfunc(pathname): + if self.debug: + label = 'path' if os.path.isdir(pathname) else 'file' + print('%s %r skipped by filterfunc' % (label, pathname)) + return + dir, name = os.path.split(pathname) + if os.path.isdir(pathname): + initname = os.path.join(pathname, "__init__.py") + if os.path.isfile(initname): + # This is a package directory, add it + if basename: + basename = "%s/%s" % (basename, name) + else: + basename = name + if self.debug: + print("Adding package in", pathname, "as", basename) + fname, arcname = self._get_codename(initname[0:-3], basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + dirlist = sorted(os.listdir(pathname)) + dirlist.remove("__init__.py") + # Add all *.py files and package subdirectories + for filename in dirlist: + path = os.path.join(pathname, filename) + root, ext = os.path.splitext(filename) + if os.path.isdir(path): + if os.path.isfile(os.path.join(path, "__init__.py")): + # This is a package directory, add it + self.writepy(path, basename, + filterfunc=filterfunc) # Recursive call + elif ext == ".py": + if filterfunc and not filterfunc(path): + if self.debug: + print('file %r skipped by filterfunc' % path) + continue + fname, arcname = self._get_codename(path[0:-3], + basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + else: + # This is NOT a package directory, add its files at top level + if self.debug: + print("Adding files from directory", pathname) + for filename in sorted(os.listdir(pathname)): + path = os.path.join(pathname, filename) + root, ext = os.path.splitext(filename) + if ext == ".py": + if filterfunc and not filterfunc(path): + if self.debug: + print('file %r skipped by filterfunc' % path) + continue + fname, arcname = self._get_codename(path[0:-3], + basename) + if self.debug: + print("Adding", arcname) + self.write(fname, arcname) + else: + if pathname[-3:] != ".py": + raise RuntimeError( + 'Files added with writepy() must end with ".py"') + fname, arcname = self._get_codename(pathname[0:-3], basename) + if self.debug: + print("Adding file", arcname) + self.write(fname, arcname) + + def _get_codename(self, pathname, basename): + """Return (filename, archivename) for the path. + + Given a module name path, return the correct file path and + archive name, compiling if necessary. For example, given + /python/lib/string, return (/python/lib/string.pyc, string). + """ + def _compile(file, optimize=-1): + import py_compile + if self.debug: + print("Compiling", file) + try: + py_compile.compile(file, doraise=True, optimize=optimize) + except py_compile.PyCompileError as err: + print(err.msg) + return False + return True + + file_py = pathname + ".py" + file_pyc = pathname + ".pyc" + pycache_opt0 = importlib.util.cache_from_source(file_py, optimization='') + pycache_opt1 = importlib.util.cache_from_source(file_py, optimization=1) + pycache_opt2 = importlib.util.cache_from_source(file_py, optimization=2) + if self._optimize == -1: + # legacy mode: use whatever file is present + if (os.path.isfile(file_pyc) and + os.stat(file_pyc).st_mtime >= os.stat(file_py).st_mtime): + # Use .pyc file. + arcname = fname = file_pyc + elif (os.path.isfile(pycache_opt0) and + os.stat(pycache_opt0).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt0 + arcname = file_pyc + elif (os.path.isfile(pycache_opt1) and + os.stat(pycache_opt1).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt1 + arcname = file_pyc + elif (os.path.isfile(pycache_opt2) and + os.stat(pycache_opt2).st_mtime >= os.stat(file_py).st_mtime): + # Use the __pycache__/*.pyc file, but write it to the legacy pyc + # file name in the archive. + fname = pycache_opt2 + arcname = file_pyc + else: + # Compile py into PEP 3147 pyc file. + if _compile(file_py): + if sys.flags.optimize == 0: + fname = pycache_opt0 + elif sys.flags.optimize == 1: + fname = pycache_opt1 + else: + fname = pycache_opt2 + arcname = file_pyc + else: + fname = arcname = file_py + else: + # new mode: use given optimization level + if self._optimize == 0: + fname = pycache_opt0 + arcname = file_pyc + else: + arcname = file_pyc + if self._optimize == 1: + fname = pycache_opt1 + elif self._optimize == 2: + fname = pycache_opt2 + else: + msg = "invalid value for 'optimize': {!r}".format(self._optimize) + raise ValueError(msg) + if not (os.path.isfile(fname) and + os.stat(fname).st_mtime >= os.stat(file_py).st_mtime): + if not _compile(file_py, optimize=self._optimize): + fname = arcname = file_py + archivename = os.path.split(arcname)[1] + if basename: + archivename = "%s/%s" % (basename, archivename) + return (fname, archivename) + + +def _parents(path): + """ + Given a path with elements separated by + posixpath.sep, generate all parents of that path. + + >>> list(_parents('b/d')) + ['b'] + >>> list(_parents('/b/d/')) + ['/b'] + >>> list(_parents('b/d/f/')) + ['b/d', 'b'] + >>> list(_parents('b')) + [] + >>> list(_parents('')) + [] + """ + return itertools.islice(_ancestry(path), 1, None) + + +def _ancestry(path): + """ + Given a path with elements separated by + posixpath.sep, generate all elements of that path. + + >>> list(_ancestry('b/d')) + ['b/d', 'b'] + >>> list(_ancestry('/b/d/')) + ['/b/d', '/b'] + >>> list(_ancestry('b/d/f/')) + ['b/d/f', 'b/d', 'b'] + >>> list(_ancestry('b')) + ['b'] + >>> list(_ancestry('')) + [] + + Multiple separators are treated like a single. + + >>> list(_ancestry('//b//d///f//')) + ['//b//d///f', '//b//d', '//b'] + """ + path = path.rstrip(posixpath.sep) + while path.rstrip(posixpath.sep): + yield path + path, tail = posixpath.split(path) + + +_dedupe = dict.fromkeys +"""Deduplicate an iterable in original order""" + + +def _difference(minuend, subtrahend): + """ + Return items in minuend not in subtrahend, retaining order + with O(1) lookup. + """ + return itertools.filterfalse(set(subtrahend).__contains__, minuend) + + +class CompleteDirs(ZipFile): + """ + A ZipFile subclass that ensures that implied directories + are always included in the namelist. + """ + + @staticmethod + def _implied_dirs(names): + parents = itertools.chain.from_iterable(map(_parents, names)) + as_dirs = (p + posixpath.sep for p in parents) + return _dedupe(_difference(as_dirs, names)) + + def namelist(self): + names = super(CompleteDirs, self).namelist() + return names + list(self._implied_dirs(names)) + + def _name_set(self): + return set(self.namelist()) + + def resolve_dir(self, name): + """ + If the name represents a directory, return that name + as a directory (with the trailing slash). + """ + names = self._name_set() + dirname = name + '/' + dir_match = name not in names and dirname in names + return dirname if dir_match else name + + def getinfo(self, name): + """ + Supplement getinfo for implied dirs. + """ + try: + return super().getinfo(name) + except KeyError: + if not name.endswith('/') or name not in self._name_set(): + raise + return ZipInfo(filename=name) + + @classmethod + def make(cls, source): + """ + Given a source (filename or zipfile), return an + appropriate CompleteDirs subclass. + """ + if isinstance(source, CompleteDirs): + return source + + if not isinstance(source, ZipFile): + return cls(source) + + # Only allow for FastLookup when supplied zipfile is read-only + if 'r' not in source.mode: + cls = CompleteDirs + + source.__class__ = cls + return source + + +class FastLookup(CompleteDirs): + """ + ZipFile subclass to ensure implicit + dirs exist and are resolved rapidly. + """ + + def namelist(self): + with contextlib.suppress(AttributeError): + return self.__names + self.__names = super(FastLookup, self).namelist() + return self.__names + + def _name_set(self): + with contextlib.suppress(AttributeError): + return self.__lookup + self.__lookup = super(FastLookup, self)._name_set() + return self.__lookup + + +def _extract_text_encoding(encoding=None, *args, **kwargs): + # stacklevel=3 so that the caller of the caller see any warning. + return io.text_encoding(encoding, 3), args, kwargs + + +class Path: + """ + A pathlib-compatible interface for zip files. + + Consider a zip file with this structure:: + + . + ├── a.txt + └── b + ├── c.txt + └── d + └── e.txt + + >>> data = io.BytesIO() + >>> zf = ZipFile(data, 'w') + >>> zf.writestr('a.txt', 'content of a') + >>> zf.writestr('b/c.txt', 'content of c') + >>> zf.writestr('b/d/e.txt', 'content of e') + >>> zf.filename = 'mem/abcde.zip' + + Path accepts the zipfile object itself or a filename + + >>> root = Path(zf) + + From there, several path operations are available. + + Directory iteration (including the zip file itself): + + >>> a, b = root.iterdir() + >>> a + Path('mem/abcde.zip', 'a.txt') + >>> b + Path('mem/abcde.zip', 'b/') + + name property: + + >>> b.name + 'b' + + join with divide operator: + + >>> c = b / 'c.txt' + >>> c + Path('mem/abcde.zip', 'b/c.txt') + >>> c.name + 'c.txt' + + Read text: + + >>> c.read_text() + 'content of c' + + existence: + + >>> c.exists() + True + >>> (b / 'missing.txt').exists() + False + + Coercion to string: + + >>> import os + >>> str(c).replace(os.sep, posixpath.sep) + 'mem/abcde.zip/b/c.txt' + + At the root, ``name``, ``filename``, and ``parent`` + resolve to the zipfile. Note these attributes are not + valid and will raise a ``ValueError`` if the zipfile + has no filename. + + >>> root.name + 'abcde.zip' + >>> str(root.filename).replace(os.sep, posixpath.sep) + 'mem/abcde.zip' + >>> str(root.parent) + 'mem' + """ + + __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" + + def __init__(self, root, at=""): + """ + Construct a Path from a ZipFile or filename. + + Note: When the source is an existing ZipFile object, + its type (__class__) will be mutated to a + specialized type. If the caller wishes to retain the + original type, the caller should either create a + separate ZipFile object or pass a filename. + """ + self.root = FastLookup.make(root) + self.at = at + + def open(self, mode='r', *args, pwd=None, **kwargs): + """ + Open this entry as text or binary following the semantics + of ``pathlib.Path.open()`` by passing arguments through + to io.TextIOWrapper(). + """ + if self.is_dir(): + raise IsADirectoryError(self) + zip_mode = mode[0] + if not self.exists() and zip_mode == 'r': + raise FileNotFoundError(self) + stream = self.root.open(self.at, zip_mode, pwd=pwd) + if 'b' in mode: + if args or kwargs: + raise ValueError("encoding args invalid for binary operation") + return stream + # Text mode: + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + return io.TextIOWrapper(stream, encoding, *args, **kwargs) + + @property + def name(self): + return pathlib.PurePosixPath(self.at).name or self.filename.name + + @property + def filename(self): + return pathlib.Path(self.root.filename).joinpath(self.at) + + def read_text(self, *args, **kwargs): + encoding, args, kwargs = _extract_text_encoding(*args, **kwargs) + with self.open('r', encoding, *args, **kwargs) as strm: + return strm.read() + + def read_bytes(self): + with self.open('rb') as strm: + return strm.read() + + def _is_child(self, path): + return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") + + def _next(self, at): + return self.__class__(self.root, at) + + def is_dir(self): + return not self.at or self.at.endswith("/") + + def is_file(self): + return self.exists() and not self.is_dir() + + def exists(self): + return self.at in self.root._name_set() + + def iterdir(self): + if not self.is_dir(): + raise ValueError("Can't listdir a file") + subs = map(self._next, self.root.namelist()) + return filter(self._is_child, subs) + + def __str__(self): + return posixpath.join(self.root.filename, self.at) + + def __repr__(self): + return self.__repr.format(self=self) + + def joinpath(self, *other): + next = posixpath.join(self.at, *other) + return self._next(self.root.resolve_dir(next)) + + __truediv__ = joinpath + + @property + def parent(self): + if not self.at: + return self.filename.parent + parent_at = posixpath.dirname(self.at.rstrip('/')) + if parent_at: + parent_at += '/' + return self._next(parent_at) + + +def main(args=None): + import argparse + + description = 'A simple command-line interface for zipfile module.' + parser = argparse.ArgumentParser(description=description) + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument('-l', '--list', metavar='', + help='Show listing of a zipfile') + group.add_argument('-e', '--extract', nargs=2, + metavar=('', ''), + help='Extract zipfile into target dir') + group.add_argument('-c', '--create', nargs='+', + metavar=('', ''), + help='Create zipfile from sources') + group.add_argument('-t', '--test', metavar='', + help='Test if a zipfile is valid') + args = parser.parse_args(args) + + if args.test is not None: + src = args.test + with ZipFile(src, 'r') as zf: + badfile = zf.testzip() + if badfile: + print("The following enclosed file is corrupted: {!r}".format(badfile)) + print("Done testing") + + elif args.list is not None: + src = args.list + with ZipFile(src, 'r') as zf: + zf.printdir() + + elif args.extract is not None: + src, curdir = args.extract + with ZipFile(src, 'r') as zf: + zf.extractall(curdir) + + elif args.create is not None: + zip_name = args.create.pop(0) + files = args.create + + def addToZip(zf, path, zippath): + if os.path.isfile(path): + zf.write(path, zippath, ZIP_DEFLATED) + elif os.path.isdir(path): + if zippath: + zf.write(path, zippath) + for nm in sorted(os.listdir(path)): + addToZip(zf, + os.path.join(path, nm), os.path.join(zippath, nm)) + # else: ignore + + with ZipFile(zip_name, 'w') as zf: + for path in files: + zippath = os.path.basename(path) + if not zippath: + zippath = os.path.basename(os.path.dirname(path)) + if zippath in ('', os.curdir, os.pardir): + zippath = '' + addToZip(zf, path, zippath) + + +if __name__ == "__main__": + main() diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..da9f8c8717ea414f7bddafce2177a6db7bb74d3e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_per_sample_weights_backward_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _embedding_bag_per_sample_weights_backward(const at::Tensor & grad, const at::Tensor & weight, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, int64_t mode, int64_t padding_idx=-1); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_channel_affine_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_channel_affine_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..07593d2c2a495008877977bcdfea80233fb61848 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_fake_quantize_learnable_per_channel_affine_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _fake_quantize_learnable_per_channel_affine_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor=1.0); +TORCH_API at::Tensor & _fake_quantize_learnable_per_channel_affine_outf(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max, double grad_factor, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9e088db3e3e5886f2ade39caa41abd051174502c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sinh_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _foreach_sinh { + using schema = ::std::vector (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sinh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sinh(Tensor[] self) -> Tensor[]") + static ::std::vector call(at::TensorList self); + static ::std::vector redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_sinh_ { + using schema = void (at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sinh_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sinh_(Tensor(a!)[] self) -> ()") + static void call(at::TensorList self); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self); +}; + +struct TORCH_API _foreach_sinh_out { + using schema = void (at::TensorList, at::TensorList); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_sinh") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_sinh.out(Tensor[] self, *, Tensor(a!)[] out) -> ()") + static void call(at::TensorList self, at::TensorList out); + static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adamw_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adamw_native.h new file mode 100644 index 0000000000000000000000000000000000000000..d5242aadd241268c17b7eff36c4f98200f73a891 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_fused_adamw_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}); +TORCH_API void _fused_adamw_out(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); +TORCH_API void _fused_adamw_kernel_cpu_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}); +TORCH_API void _fused_adamw_kernel_cuda_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, double lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}); +TORCH_API ::std::tuple<::std::vector,::std::vector,::std::vector,::std::vector,::std::vector> _fused_adamw(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}); +TORCH_API void _fused_adamw_tensor_lr_out(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale, const ::std::optional & found_inf, at::TensorList out); +TORCH_API void _fused_adamw_kernel_cpu_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}); +TORCH_API void _fused_adamw_kernel_cuda_(at::TensorList self, at::TensorList grads, at::TensorList exp_avgs, at::TensorList exp_avg_sqs, at::TensorList max_exp_avg_sqs, at::TensorList state_steps, const at::Tensor & lr, double beta1, double beta2, double weight_decay, double eps, bool amsgrad, bool maximize, const ::std::optional & grad_scale={}, const ::std::optional & found_inf={}); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..4b0f2a2567e3ad75295cbd75a5f071efd7564c98 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_grid_sampler_2d_cpu_fallback_backward(Tensor grad_output, Tensor input, Tensor grid, int interpolation_mode, int padding_mode, bool align_corners) -> (Tensor, Tensor) +inline ::std::tuple _grid_sampler_2d_cpu_fallback_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners) { + return at::_ops::_grid_sampler_2d_cpu_fallback_backward::call(grad_output, input, grid, interpolation_mode, padding_mode, align_corners); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eab29da0930dffc40e4fcd6f857c87f524e761e3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_cudnn_attention_backward_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _scaled_dot_product_cudnn_attention_backward(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, int64_t max_q, int64_t max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional scale=::std::nullopt); +TORCH_API ::std::tuple _scaled_dot_product_cudnn_attention_backward_symint(const at::Tensor & grad_out, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & cum_seq_q, const at::Tensor & cum_seq_k, c10::SymInt max_q, c10::SymInt max_k, double dropout_p, bool is_causal, const at::Tensor & philox_seed, const at::Tensor & philox_offset, ::std::optional scale=::std::nullopt); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..d0788d461966b554e17c38f3b71b4ab97609db32 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_backward_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _scaled_dot_product_efficient_attention_backward { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, const at::Tensor &, double, ::std::array, bool, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_scaled_dot_product_efficient_attention_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_scaled_dot_product_efficient_attention_backward(Tensor grad_out_, Tensor query, Tensor key, Tensor value, Tensor attn_bias, Tensor out, Tensor logsumexp, Tensor philox_seed, Tensor philox_offset, float dropout_p, bool[4] grad_input_mask, bool is_causal=False, *, float? scale=None) -> (Tensor, Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal, ::std::optional scale); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_out_, const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const at::Tensor & attn_bias, const at::Tensor & out, const at::Tensor & logsumexp, const at::Tensor & philox_seed, const at::Tensor & philox_offset, double dropout_p, ::std::array grad_input_mask, bool is_causal, ::std::optional scale); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f2585041b658e5bb1e09b4e48437db4580598966 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_compressed_tensor_unsafe_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _sparse_compressed_tensor_unsafe_symint(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, c10::SymIntArrayRef size, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_backward.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..5deb1b46f518e62746e99c04df32decfbb0fa87c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_sum_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_sum_backward(Tensor grad, Tensor self, int[] dim) -> Tensor +inline at::Tensor _sparse_sum_backward(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::_sparse_sum_backward::call(grad, self, dim); +} + +// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_sum_backward_out(at::Tensor & out, const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim) { + return at::_ops::_sparse_sum_backward_out::call(grad, self, dim, out); +} +// aten::_sparse_sum_backward.out(Tensor grad, Tensor self, int[] dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sparse_sum_backward_outf(const at::Tensor & grad, const at::Tensor & self, at::IntArrayRef dim, at::Tensor & out) { + return at::_ops::_sparse_sum_backward_out::call(grad, self, dim, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad.h new file mode 100644 index 0000000000000000000000000000000000000000..b10ee2d60b85f10e1de83c5194bb1b7645d41269 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_standard_gamma_grad.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_standard_gamma_grad(Tensor self, Tensor output) -> Tensor +inline at::Tensor _standard_gamma_grad(const at::Tensor & self, const at::Tensor & output) { + return at::_ops::_standard_gamma_grad::call(self, output); +} + +// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _standard_gamma_grad_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & output) { + return at::_ops::_standard_gamma_grad_out::call(self, output, out); +} +// aten::_standard_gamma_grad.out(Tensor self, Tensor output, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _standard_gamma_grad_outf(const at::Tensor & self, const at::Tensor & output, at::Tensor & out) { + return at::_ops::_standard_gamma_grad_out::call(self, output, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_cpu_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_cpu_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f8578fb53b245764feb2713c327637b66784c121 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_to_cpu_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector _to_cpu(at::TensorList tensors); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..660435a04a499f03aee7b9e9e6f78fff4704eab8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bicubic2d_aa_meta_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor _upsample_bicubic2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor _upsample_bicubic2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bicubic2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..bd81591a6ae32418d92325ee921db91013f64e88 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_bilinear2d_aa_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _upsample_bilinear2d_aa(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor _upsample_bilinear2d_aa_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_outf(const at::Tensor & self, at::IntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h=::std::nullopt, ::std::optional scales_w=::std::nullopt); +TORCH_API at::Tensor & _upsample_bilinear2d_aa_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, bool align_corners, ::std::optional scales_h, ::std::optional scales_w, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..893ba418cde8351a4662feb90b5481cf9bfa277c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/acosh_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor acosh(const at::Tensor & self); +TORCH_API at::Tensor & acosh_(at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adjoint.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adjoint.h new file mode 100644 index 0000000000000000000000000000000000000000..315f8046deccb7c2fff8174e88418a1c209a235f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/adjoint.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::adjoint(Tensor(a) self) -> Tensor(a) +inline at::Tensor adjoint(const at::Tensor & self) { + return at::_ops::adjoint::call(self); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6391439f4b4568edcdc8164a2d5e02e303c4c70b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cauchy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor cauchy(const at::Tensor & self, double median=0, double sigma=1, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & cauchy_out(at::Tensor & out, const at::Tensor & self, double median=0, double sigma=1, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & cauchy_outf(const at::Tensor & self, double median, double sigma, ::std::optional generator, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_meta.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..e579f6e2dad6b20439767396fcafd5b55646284c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ceil_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_ceil : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cov_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cov_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5dae50541955835673612f8fd49cf9b87968979d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cov_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor cov(const at::Tensor & self, int64_t correction=1, const ::std::optional & fweights={}, const ::std::optional & aweights={}); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..e6ad76ecb1bb264b0501b533921867b25e6402e1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/crow_indices_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::crow_indices_copy(Tensor self) -> Tensor +inline at::Tensor crow_indices_copy(const at::Tensor & self) { + return at::_ops::crow_indices_copy::call(self); +} + +// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & crow_indices_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::crow_indices_copy_out::call(self, out); +} +// aten::crow_indices_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & crow_indices_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::crow_indices_copy_out::call(self, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..acb29423b475c4d9ee7796a625715c038daa19e0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor cumprod(const at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); +TORCH_API at::Tensor & cumprod_(at::Tensor & self, int64_t dim, ::std::optional dtype=::std::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..9ee54af05f3c9af5ebd95544f733b4e69c746e83 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fliplr_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API fliplr { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fliplr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fliplr(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/frac_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/frac_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4252aa4d5b9782e823edad49d0244aa23952698f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/frac_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor frac(const at::Tensor & self); +TORCH_API at::Tensor & frac_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & frac_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & frac_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fe10f78d34355d617c659ddce3acc4fb8377d0d1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/fractional_max_pool2d_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor fractional_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices); +TORCH_API at::Tensor & fractional_max_pool2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef output_size, const at::Tensor & indices, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ger.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ger.h new file mode 100644 index 0000000000000000000000000000000000000000..0911a3cfbbd7f77110152a05129093bd48b0c493 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/ger.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::ger(Tensor self, Tensor vec2) -> Tensor +inline at::Tensor ger(const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::ger::call(self, vec2); +} + +// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ger_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & vec2) { + return at::_ops::ger_out::call(self, vec2, out); +} +// aten::ger.out(Tensor self, Tensor vec2, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & ger_outf(const at::Tensor & self, const at::Tensor & vec2, at::Tensor & out) { + return at::_ops::ger_out::call(self, vec2, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a073bed34110233a9370f476e37ddadb51feb9f9 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/hypot_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hypot(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & hypot_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..60fdbe6930a37e428bfaa7e238ed6c6d95b084ce --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_copy_native.h @@ -0,0 +1,25 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_index_copy_out : public at::meta::structured_index_copy { +void impl(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Tensor & out); +}; +TORCH_API at::Tensor & index_copy_(at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +TORCH_API at::Tensor index_copy(const at::Tensor & self, at::Dimname dim, const at::Tensor & index, const at::Tensor & source); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f94f5aa1224f5cc024b55afb159d164432a53856 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/index_fill_compositeexplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value); +TORCH_API at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor index_fill(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_out(at::Tensor & out, const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value); +TORCH_API at::Tensor & index_fill_outf(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & value, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b127f01036d92fc97c5ebb6ea56d3952c237d3ae --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lerp_ops.h @@ -0,0 +1,83 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lerp__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp_.Scalar(Tensor(a!) self, Tensor end, Scalar weight) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +}; + +struct TORCH_API lerp__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp_.Tensor(Tensor(a!) self, Tensor end, Tensor weight) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +}; + +struct TORCH_API lerp_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Scalar_out(Tensor self, Tensor end, Scalar weight, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight, at::Tensor & out); +}; + +struct TORCH_API lerp_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Tensor_out(Tensor self, Tensor end, Tensor weight, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight, at::Tensor & out); +}; + +struct TORCH_API lerp_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Scalar(Tensor self, Tensor end, Scalar weight) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Scalar & weight); +}; + +struct TORCH_API lerp_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lerp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lerp.Tensor(Tensor self, Tensor end, Tensor weight) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & end, const at::Tensor & weight); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7e060baf63e6b3d760020d5306eb814db4fa5b81 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_ldl_solve { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_ldl_solve") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_ldl_solve(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False) -> Tensor") + static at::Tensor call(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian); +}; + +struct TORCH_API linalg_ldl_solve_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const at::Tensor &, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_ldl_solve") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_ldl_solve.out(Tensor LD, Tensor pivots, Tensor B, *, bool hermitian=False, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3dcbd8ee55d501448ed804d5925eaf1ad56f5732 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_qr_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API ::std::tuple linalg_qr(const at::Tensor & A, c10::string_view mode="reduced"); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..175d42d9973e19b376104b3804796f04468beb27 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logaddexp2_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logaddexp2_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logaddexp2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logaddexp2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API logaddexp2 { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logaddexp2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logaddexp2(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5d973d6937fe13ed26a5d81acccf7baa0b21f446 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API logit_backward_grad_input { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, ::std::optional, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logit_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logit_backward.grad_input(Tensor grad_output, Tensor self, float? eps=None, *, Tensor(a!) grad_input) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps, at::Tensor & grad_input); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps, at::Tensor & grad_input); +}; + +struct TORCH_API logit_backward { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::logit_backward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "logit_backward(Tensor grad_output, Tensor self, float? eps=None) -> Tensor") + static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h new file mode 100644 index 0000000000000000000000000000000000000000..508eaabc3a481fdc0786707ed8df620d57ff2bb4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logit_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor logit(const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_out(const at::Tensor & self, ::std::optional eps, at::Tensor & out); +TORCH_API at::Tensor & logit_(at::Tensor & self, ::std::optional eps=::std::nullopt); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6244dbc3d042b5d1a0f52d34db577ce73960f091 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_relu_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_relu_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..182b563d90ef83419d4a3f701384095b8fca61b7 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_relu_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor miopen_convolution_relu(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, int64_t groups); +TORCH_API at::Tensor miopen_convolution_relu_symint(const at::Tensor & self, const at::Tensor & weight, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef dilation, c10::SymInt groups); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a9ed9b4a56f7af48052665e66f99f42929369e8f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multi_margin_loss_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor multi_margin_loss(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const ::std::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & target, const at::Scalar & p=1, const at::Scalar & margin=1, const ::std::optional & weight={}, int64_t reduction=at::Reduction::Mean); +TORCH_API at::Tensor & multi_margin_loss_outf(const at::Tensor & self, const at::Tensor & target, const at::Scalar & p, const at::Scalar & margin, const ::std::optional & weight, int64_t reduction, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nested_to_padded_tensor_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nested_to_padded_tensor_native.h new file mode 100644 index 0000000000000000000000000000000000000000..94ac6693083dee764c53105e9d0c4926b6730091 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/nested_to_padded_tensor_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nested_to_padded_tensor(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=::std::nullopt); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/norm_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1fb774d24e72e4f6aa9d95b69182a7877b23341f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/norm_native.h @@ -0,0 +1,36 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +TORCH_API at::Tensor norm(const at::Tensor & self, const ::std::optional & p, at::ScalarType dtype); +TORCH_API at::Tensor & norm_ScalarOpt_dtype_out(const at::Tensor & self, const ::std::optional & p, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor norm(const at::Tensor & self, const at::Scalar & p=2); +TORCH_API at::Tensor & norm_Scalar_out(const at::Tensor & self, const at::Scalar & p, at::Tensor & out); +struct TORCH_API structured_norm_dtype_out : public at::meta::structured_norm_ScalarOpt_dim_dtype { +void impl(const at::Tensor & self, at::OptionalScalarRef p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, const at::Tensor & out); +}; +TORCH_API at::Tensor sparse_dtype_norm(const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +struct TORCH_API structured_norm_out : public at::meta::structured_norm_ScalarOpt_dim { +void impl(const at::Tensor & self, at::OptionalScalarRef p, at::IntArrayRef dim, bool keepdim, const at::Tensor & out); +}; +TORCH_API at::Tensor sparse_norm(const at::Tensor & self, const ::std::optional & p, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor norm(const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype); +TORCH_API at::Tensor & norm_out(const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor norm(const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim=false); +TORCH_API at::Tensor & norm_out(const at::Tensor & self, const ::std::optional & p, at::DimnameList dim, bool keepdim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/permute_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/permute_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c674af8a2adf0ffec8d5d028834e12824b3b6da0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/permute_compositeexplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor permute(const at::Tensor & self, at::IntArrayRef dims); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad3d_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f4e55a8894f0cb8dcd46461fc47d5810a295bd3b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/replication_pad3d_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_replication_pad3d_out_cpu : public at::meta::structured_replication_pad3d { +void impl(const at::Tensor & self, at::ArrayRef padding, const at::Tensor & out); +}; +struct TORCH_API structured_replication_pad3d_out_cuda : public at::meta::structured_replication_pad3d { +void impl(const at::Tensor & self, at::ArrayRef padding, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/resize_as_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/resize_as_native.h new file mode 100644 index 0000000000000000000000000000000000000000..31454bd38c4e7400c7db8b34a55ce18d9c8ea939 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/resize_as_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor resize_as(const at::Tensor & self, const at::Tensor & the_template, ::std::optional memory_format=::std::nullopt); +TORCH_API const at::Tensor & resize_as_out(const at::Tensor & self, const at::Tensor & the_template, ::std::optional memory_format, const at::Tensor & out); +TORCH_API const at::Tensor & resize_as_(const at::Tensor & self, const at::Tensor & the_template, ::std::optional memory_format=::std::nullopt); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..670661c053004e8c4ea9f332ba6f48f22d5d41f5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_modified_bessel_i0_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_modified_bessel_i0 { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_modified_bessel_i0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_modified_bessel_i0(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API special_modified_bessel_i0_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_modified_bessel_i0") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_modified_bessel_i0.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_softmax_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_softmax_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c9f2ad0eca899f41d561d783c393d493d08806b5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_softmax_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API special_softmax { + using schema = at::Tensor (const at::Tensor &, int64_t, ::std::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::special_softmax") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "special_softmax(Tensor self, int dim, ScalarType? dtype=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, int64_t dim, ::std::optional dtype); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, int64_t dim, ::std::optional dtype); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_zeta_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_zeta_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..dbaa2ea7751a49ac5fe43b690948b675d02bdadc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_zeta_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor special_zeta(const at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bbd0e4502aa39ac5c9f4c5ae6658c01610973f2e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/tanh_backward_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_tanh_backward_out : public at::meta::structured_tanh_backward { +void impl(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & grad_input); +}; +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ae4c086404ab3421c8655897022b565b2b8781a0 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & unfold_backward_out(at::Tensor & out, const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step); +TORCH_API at::Tensor & unfold_backward_outf(const at::Tensor & grad_in, at::IntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out); +TORCH_API at::Tensor & unfold_backward_symint_out(at::Tensor & out, const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step); +TORCH_API at::Tensor & unfold_backward_symint_outf(const at::Tensor & grad_in, c10::SymIntArrayRef input_sizes, int64_t dim, int64_t size, int64_t step, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..79ef868b9e7808ad0c0a83e9dc210cdf1d39c8ed --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/unfold_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor unfold(const at::Tensor & self, int64_t dimension, int64_t size, int64_t step); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/var_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/var_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8b373a33d14e67826f2495c1a76afbfc91ccafaf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/var_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor var(const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, const ::std::optional & correction=::std::nullopt, bool keepdim=false); +TORCH_API at::Tensor & var_out(at::Tensor & out, const at::Tensor & self, at::OptionalIntArrayRef dim=::std::nullopt, const ::std::optional & correction=::std::nullopt, bool keepdim=false); +TORCH_API at::Tensor & var_outf(const at::Tensor & self, at::OptionalIntArrayRef dim, const ::std::optional & correction, bool keepdim, at::Tensor & out); + +} // namespace cpu +} // namespace at