diff --git a/parrot/lib/python3.10/importlib/__pycache__/machinery.cpython-310.pyc b/parrot/lib/python3.10/importlib/__pycache__/machinery.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d37f98a95a1ed51154ff8b8d8a6ee7e403c9728b Binary files /dev/null and b/parrot/lib/python3.10/importlib/__pycache__/machinery.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/importlib/__pycache__/readers.cpython-310.pyc b/parrot/lib/python3.10/importlib/__pycache__/readers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..818618e6bbb89e65ff60f88f8a4ad0122443c460 Binary files /dev/null and b/parrot/lib/python3.10/importlib/__pycache__/readers.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/importlib/_adapters.py b/parrot/lib/python3.10/importlib/_adapters.py new file mode 100644 index 0000000000000000000000000000000000000000..e72edd10705c26a2726d798f94c3ce11f3aee230 --- /dev/null +++ b/parrot/lib/python3.10/importlib/_adapters.py @@ -0,0 +1,83 @@ +from contextlib import suppress + +from . import abc + + +class SpecLoaderAdapter: + """ + Adapt a package spec to adapt the underlying loader. + """ + + def __init__(self, spec, adapter=lambda spec: spec.loader): + self.spec = spec + self.loader = adapter(spec) + + def __getattr__(self, name): + return getattr(self.spec, name) + + +class TraversableResourcesLoader: + """ + Adapt a loader to provide TraversableResources. + """ + + def __init__(self, spec): + self.spec = spec + + def get_resource_reader(self, name): + return DegenerateFiles(self.spec)._native() + + +class DegenerateFiles: + """ + Adapter for an existing or non-existant resource reader + to provide a degenerate .files(). + """ + + class Path(abc.Traversable): + def iterdir(self): + return iter(()) + + def is_dir(self): + return False + + is_file = exists = is_dir # type: ignore + + def joinpath(self, other): + return DegenerateFiles.Path() + + @property + def name(self): + return '' + + def open(self, mode='rb', *args, **kwargs): + raise ValueError() + + def __init__(self, spec): + self.spec = spec + + @property + def _reader(self): + with suppress(AttributeError): + return self.spec.loader.get_resource_reader(self.spec.name) + + def _native(self): + """ + Return the native reader if it supports files(). + """ + reader = self._reader + return reader if hasattr(reader, 'files') else self + + def __getattr__(self, attr): + return getattr(self._reader, attr) + + def files(self): + return DegenerateFiles.Path() + + +def wrap_spec(package): + """ + Construct a package spec with traversable compatibility + on the spec/loader/reader. + """ + return SpecLoaderAdapter(package.__spec__, TraversableResourcesLoader) diff --git a/parrot/lib/python3.10/importlib/_bootstrap_external.py b/parrot/lib/python3.10/importlib/_bootstrap_external.py new file mode 100644 index 0000000000000000000000000000000000000000..49bcaea78d76509ad69ba509a41bf0c42ed996b6 --- /dev/null +++ b/parrot/lib/python3.10/importlib/_bootstrap_external.py @@ -0,0 +1,1686 @@ +"""Core implementation of path-based import. + +This module is NOT meant to be directly imported! It has been designed such +that it can be bootstrapped into Python as the implementation of import. As +such it requires the injection of specific modules and attributes in order to +work. One should use importlib as the public-facing version of this module. + +""" +# IMPORTANT: Whenever making changes to this module, be sure to run a top-level +# `make regen-importlib` followed by `make` in order to get the frozen version +# of the module updated. Not doing so will result in the Makefile to fail for +# all others who don't have a ./python around to freeze the module in the early +# stages of compilation. +# + +# See importlib._setup() for what is injected into the global namespace. + +# When editing this code be aware that code executed at import time CANNOT +# reference any injected objects! This includes not only global code but also +# anything specified at the class level. + +# Module injected manually by _set_bootstrap_module() +_bootstrap = None + +# Import builtin modules +import _imp +import _io +import sys +import _warnings +import marshal + + +_MS_WINDOWS = (sys.platform == 'win32') +if _MS_WINDOWS: + import nt as _os + import winreg +else: + import posix as _os + + +if _MS_WINDOWS: + path_separators = ['\\', '/'] +else: + path_separators = ['/'] +# Assumption made in _path_join() +assert all(len(sep) == 1 for sep in path_separators) +path_sep = path_separators[0] +path_sep_tuple = tuple(path_separators) +path_separators = ''.join(path_separators) +_pathseps_with_colon = {f':{s}' for s in path_separators} + + +# Bootstrap-related code ###################################################### +_CASE_INSENSITIVE_PLATFORMS_STR_KEY = 'win', +_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY = 'cygwin', 'darwin' +_CASE_INSENSITIVE_PLATFORMS = (_CASE_INSENSITIVE_PLATFORMS_BYTES_KEY + + _CASE_INSENSITIVE_PLATFORMS_STR_KEY) + + +def _make_relax_case(): + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS_STR_KEY): + key = 'PYTHONCASEOK' + else: + key = b'PYTHONCASEOK' + + def _relax_case(): + """True if filenames must be checked case-insensitively and ignore environment flags are not set.""" + return not sys.flags.ignore_environment and key in _os.environ + else: + def _relax_case(): + """True if filenames must be checked case-insensitively.""" + return False + return _relax_case + +_relax_case = _make_relax_case() + + +def _pack_uint32(x): + """Convert a 32-bit integer to little-endian.""" + return (int(x) & 0xFFFFFFFF).to_bytes(4, 'little') + + +def _unpack_uint32(data): + """Convert 4 bytes in little-endian to an integer.""" + assert len(data) == 4 + return int.from_bytes(data, 'little') + +def _unpack_uint16(data): + """Convert 2 bytes in little-endian to an integer.""" + assert len(data) == 2 + return int.from_bytes(data, 'little') + + +if _MS_WINDOWS: + def _path_join(*path_parts): + """Replacement for os.path.join().""" + if not path_parts: + return "" + if len(path_parts) == 1: + return path_parts[0] + root = "" + path = [] + for new_root, tail in map(_os._path_splitroot, path_parts): + if new_root.startswith(path_sep_tuple) or new_root.endswith(path_sep_tuple): + root = new_root.rstrip(path_separators) or root + path = [path_sep + tail] + elif new_root.endswith(':'): + if root.casefold() != new_root.casefold(): + # Drive relative paths have to be resolved by the OS, so we reset the + # tail but do not add a path_sep prefix. + root = new_root + path = [tail] + else: + path.append(tail) + else: + root = new_root or root + path.append(tail) + path = [p.rstrip(path_separators) for p in path if p] + if len(path) == 1 and not path[0]: + # Avoid losing the root's trailing separator when joining with nothing + return root + path_sep + return root + path_sep.join(path) + +else: + def _path_join(*path_parts): + """Replacement for os.path.join().""" + return path_sep.join([part.rstrip(path_separators) + for part in path_parts if part]) + + +def _path_split(path): + """Replacement for os.path.split().""" + i = max(path.rfind(p) for p in path_separators) + if i < 0: + return '', path + return path[:i], path[i + 1:] + + +def _path_stat(path): + """Stat the path. + + Made a separate function to make it easier to override in experiments + (e.g. cache stat results). + + """ + return _os.stat(path) + + +def _path_is_mode_type(path, mode): + """Test whether the path is the specified mode type.""" + try: + stat_info = _path_stat(path) + except OSError: + return False + return (stat_info.st_mode & 0o170000) == mode + + +def _path_isfile(path): + """Replacement for os.path.isfile.""" + return _path_is_mode_type(path, 0o100000) + + +def _path_isdir(path): + """Replacement for os.path.isdir.""" + if not path: + path = _os.getcwd() + return _path_is_mode_type(path, 0o040000) + + +if _MS_WINDOWS: + def _path_isabs(path): + """Replacement for os.path.isabs.""" + if not path: + return False + root = _os._path_splitroot(path)[0].replace('/', '\\') + return len(root) > 1 and (root.startswith('\\\\') or root.endswith('\\')) + +else: + def _path_isabs(path): + """Replacement for os.path.isabs.""" + return path.startswith(path_separators) + + +def _write_atomic(path, data, mode=0o666): + """Best-effort function to write data to a path atomically. + Be prepared to handle a FileExistsError if concurrent writing of the + temporary file is attempted.""" + # id() is used to generate a pseudo-random filename. + path_tmp = '{}.{}'.format(path, id(path)) + fd = _os.open(path_tmp, + _os.O_EXCL | _os.O_CREAT | _os.O_WRONLY, mode & 0o666) + try: + # We first write data to a temporary file, and then use os.replace() to + # perform an atomic rename. + with _io.FileIO(fd, 'wb') as file: + file.write(data) + _os.replace(path_tmp, path) + except OSError: + try: + _os.unlink(path_tmp) + except OSError: + pass + raise + + +_code_type = type(_write_atomic.__code__) + + +# Finder/loader utility code ############################################### + +# Magic word to reject .pyc files generated by other Python versions. +# It should change for each incompatible change to the bytecode. +# +# The value of CR and LF is incorporated so if you ever read or write +# a .pyc file in text mode the magic number will be wrong; also, the +# Apple MPW compiler swaps their values, botching string constants. +# +# There were a variety of old schemes for setting the magic number. +# The current working scheme is to increment the previous value by +# 10. +# +# Starting with the adoption of PEP 3147 in Python 3.2, every bump in magic +# number also includes a new "magic tag", i.e. a human readable string used +# to represent the magic number in __pycache__ directories. When you change +# the magic number, you must also set a new unique magic tag. Generally this +# can be named after the Python major version of the magic number bump, but +# it can really be anything, as long as it's different than anything else +# that's come before. The tags are included in the following table, starting +# with Python 3.2a0. +# +# Known values: +# Python 1.5: 20121 +# Python 1.5.1: 20121 +# Python 1.5.2: 20121 +# Python 1.6: 50428 +# Python 2.0: 50823 +# Python 2.0.1: 50823 +# Python 2.1: 60202 +# Python 2.1.1: 60202 +# Python 2.1.2: 60202 +# Python 2.2: 60717 +# Python 2.3a0: 62011 +# Python 2.3a0: 62021 +# Python 2.3a0: 62011 (!) +# Python 2.4a0: 62041 +# Python 2.4a3: 62051 +# Python 2.4b1: 62061 +# Python 2.5a0: 62071 +# Python 2.5a0: 62081 (ast-branch) +# Python 2.5a0: 62091 (with) +# Python 2.5a0: 62092 (changed WITH_CLEANUP opcode) +# Python 2.5b3: 62101 (fix wrong code: for x, in ...) +# Python 2.5b3: 62111 (fix wrong code: x += yield) +# Python 2.5c1: 62121 (fix wrong lnotab with for loops and +# storing constants that should have been removed) +# Python 2.5c2: 62131 (fix wrong code: for x, in ... in listcomp/genexp) +# Python 2.6a0: 62151 (peephole optimizations and STORE_MAP opcode) +# Python 2.6a1: 62161 (WITH_CLEANUP optimization) +# Python 2.7a0: 62171 (optimize list comprehensions/change LIST_APPEND) +# Python 2.7a0: 62181 (optimize conditional branches: +# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE) +# Python 2.7a0 62191 (introduce SETUP_WITH) +# Python 2.7a0 62201 (introduce BUILD_SET) +# Python 2.7a0 62211 (introduce MAP_ADD and SET_ADD) +# Python 3000: 3000 +# 3010 (removed UNARY_CONVERT) +# 3020 (added BUILD_SET) +# 3030 (added keyword-only parameters) +# 3040 (added signature annotations) +# 3050 (print becomes a function) +# 3060 (PEP 3115 metaclass syntax) +# 3061 (string literals become unicode) +# 3071 (PEP 3109 raise changes) +# 3081 (PEP 3137 make __file__ and __name__ unicode) +# 3091 (kill str8 interning) +# 3101 (merge from 2.6a0, see 62151) +# 3103 (__file__ points to source file) +# Python 3.0a4: 3111 (WITH_CLEANUP optimization). +# Python 3.0b1: 3131 (lexical exception stacking, including POP_EXCEPT + #3021) +# Python 3.1a1: 3141 (optimize list, set and dict comprehensions: +# change LIST_APPEND and SET_ADD, add MAP_ADD #2183) +# Python 3.1a1: 3151 (optimize conditional branches: +# introduce POP_JUMP_IF_FALSE and POP_JUMP_IF_TRUE + #4715) +# Python 3.2a1: 3160 (add SETUP_WITH #6101) +# tag: cpython-32 +# Python 3.2a2: 3170 (add DUP_TOP_TWO, remove DUP_TOPX and ROT_FOUR #9225) +# tag: cpython-32 +# Python 3.2a3 3180 (add DELETE_DEREF #4617) +# Python 3.3a1 3190 (__class__ super closure changed) +# Python 3.3a1 3200 (PEP 3155 __qualname__ added #13448) +# Python 3.3a1 3210 (added size modulo 2**32 to the pyc header #13645) +# Python 3.3a2 3220 (changed PEP 380 implementation #14230) +# Python 3.3a4 3230 (revert changes to implicit __class__ closure #14857) +# Python 3.4a1 3250 (evaluate positional default arguments before +# keyword-only defaults #16967) +# Python 3.4a1 3260 (add LOAD_CLASSDEREF; allow locals of class to override +# free vars #17853) +# Python 3.4a1 3270 (various tweaks to the __class__ closure #12370) +# Python 3.4a1 3280 (remove implicit class argument) +# Python 3.4a4 3290 (changes to __qualname__ computation #19301) +# Python 3.4a4 3300 (more changes to __qualname__ computation #19301) +# Python 3.4rc2 3310 (alter __qualname__ computation #20625) +# Python 3.5a1 3320 (PEP 465: Matrix multiplication operator #21176) +# Python 3.5b1 3330 (PEP 448: Additional Unpacking Generalizations #2292) +# Python 3.5b2 3340 (fix dictionary display evaluation order #11205) +# Python 3.5b3 3350 (add GET_YIELD_FROM_ITER opcode #24400) +# Python 3.5.2 3351 (fix BUILD_MAP_UNPACK_WITH_CALL opcode #27286) +# Python 3.6a0 3360 (add FORMAT_VALUE opcode #25483) +# Python 3.6a1 3361 (lineno delta of code.co_lnotab becomes signed #26107) +# Python 3.6a2 3370 (16 bit wordcode #26647) +# Python 3.6a2 3371 (add BUILD_CONST_KEY_MAP opcode #27140) +# Python 3.6a2 3372 (MAKE_FUNCTION simplification, remove MAKE_CLOSURE +# #27095) +# Python 3.6b1 3373 (add BUILD_STRING opcode #27078) +# Python 3.6b1 3375 (add SETUP_ANNOTATIONS and STORE_ANNOTATION opcodes +# #27985) +# Python 3.6b1 3376 (simplify CALL_FUNCTIONs & BUILD_MAP_UNPACK_WITH_CALL + #27213) +# Python 3.6b1 3377 (set __class__ cell from type.__new__ #23722) +# Python 3.6b2 3378 (add BUILD_TUPLE_UNPACK_WITH_CALL #28257) +# Python 3.6rc1 3379 (more thorough __class__ validation #23722) +# Python 3.7a1 3390 (add LOAD_METHOD and CALL_METHOD opcodes #26110) +# Python 3.7a2 3391 (update GET_AITER #31709) +# Python 3.7a4 3392 (PEP 552: Deterministic pycs #31650) +# Python 3.7b1 3393 (remove STORE_ANNOTATION opcode #32550) +# Python 3.7b5 3394 (restored docstring as the first stmt in the body; +# this might affected the first line number #32911) +# Python 3.8a1 3400 (move frame block handling to compiler #17611) +# Python 3.8a1 3401 (add END_ASYNC_FOR #33041) +# Python 3.8a1 3410 (PEP570 Python Positional-Only Parameters #36540) +# Python 3.8b2 3411 (Reverse evaluation order of key: value in dict +# comprehensions #35224) +# Python 3.8b2 3412 (Swap the position of positional args and positional +# only args in ast.arguments #37593) +# Python 3.8b4 3413 (Fix "break" and "continue" in "finally" #37830) +# Python 3.9a0 3420 (add LOAD_ASSERTION_ERROR #34880) +# Python 3.9a0 3421 (simplified bytecode for with blocks #32949) +# Python 3.9a0 3422 (remove BEGIN_FINALLY, END_FINALLY, CALL_FINALLY, POP_FINALLY bytecodes #33387) +# Python 3.9a2 3423 (add IS_OP, CONTAINS_OP and JUMP_IF_NOT_EXC_MATCH bytecodes #39156) +# Python 3.9a2 3424 (simplify bytecodes for *value unpacking) +# Python 3.9a2 3425 (simplify bytecodes for **value unpacking) +# Python 3.10a1 3430 (Make 'annotations' future by default) +# Python 3.10a1 3431 (New line number table format -- PEP 626) +# Python 3.10a2 3432 (Function annotation for MAKE_FUNCTION is changed from dict to tuple bpo-42202) +# Python 3.10a2 3433 (RERAISE restores f_lasti if oparg != 0) +# Python 3.10a6 3434 (PEP 634: Structural Pattern Matching) +# Python 3.10a7 3435 Use instruction offsets (as opposed to byte offsets). +# Python 3.10b1 3436 (Add GEN_START bytecode #43683) +# Python 3.10b1 3437 (Undo making 'annotations' future by default - We like to dance among core devs!) +# Python 3.10b1 3438 Safer line number table handling. +# Python 3.10b1 3439 (Add ROT_N) + +# +# MAGIC must change whenever the bytecode emitted by the compiler may no +# longer be understood by older implementations of the eval loop (usually +# due to the addition of new opcodes). +# +# Whenever MAGIC_NUMBER is changed, the ranges in the magic_values array +# in PC/launcher.c must also be updated. + +MAGIC_NUMBER = (3439).to_bytes(2, 'little') + b'\r\n' +_RAW_MAGIC_NUMBER = int.from_bytes(MAGIC_NUMBER, 'little') # For import.c + +_PYCACHE = '__pycache__' +_OPT = 'opt-' + +SOURCE_SUFFIXES = ['.py'] +if _MS_WINDOWS: + SOURCE_SUFFIXES.append('.pyw') + +EXTENSION_SUFFIXES = _imp.extension_suffixes() + +BYTECODE_SUFFIXES = ['.pyc'] +# Deprecated. +DEBUG_BYTECODE_SUFFIXES = OPTIMIZED_BYTECODE_SUFFIXES = BYTECODE_SUFFIXES + +def cache_from_source(path, debug_override=None, *, optimization=None): + """Given the path to a .py file, return the path to its .pyc file. + + The .py file does not need to exist; this simply returns the path to the + .pyc file calculated as if the .py file were imported. + + The 'optimization' parameter controls the presumed optimization level of + the bytecode file. If 'optimization' is not None, the string representation + of the argument is taken and verified to be alphanumeric (else ValueError + is raised). + + The debug_override parameter is deprecated. If debug_override is not None, + a True value is the same as setting 'optimization' to the empty string + while a False value is equivalent to setting 'optimization' to '1'. + + If sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + if debug_override is not None: + _warnings.warn('the debug_override parameter is deprecated; use ' + "'optimization' instead", DeprecationWarning) + if optimization is not None: + message = 'debug_override or optimization must be set to None' + raise TypeError(message) + optimization = '' if debug_override else 1 + path = _os.fspath(path) + head, tail = _path_split(path) + base, sep, rest = tail.rpartition('.') + tag = sys.implementation.cache_tag + if tag is None: + raise NotImplementedError('sys.implementation.cache_tag is None') + almost_filename = ''.join([(base if base else rest), sep, tag]) + if optimization is None: + if sys.flags.optimize == 0: + optimization = '' + else: + optimization = sys.flags.optimize + optimization = str(optimization) + if optimization != '': + if not optimization.isalnum(): + raise ValueError('{!r} is not alphanumeric'.format(optimization)) + almost_filename = '{}.{}{}'.format(almost_filename, _OPT, optimization) + filename = almost_filename + BYTECODE_SUFFIXES[0] + if sys.pycache_prefix is not None: + # We need an absolute path to the py file to avoid the possibility of + # collisions within sys.pycache_prefix, if someone has two different + # `foo/bar.py` on their system and they import both of them using the + # same sys.pycache_prefix. Let's say sys.pycache_prefix is + # `C:\Bytecode`; the idea here is that if we get `Foo\Bar`, we first + # make it absolute (`C:\Somewhere\Foo\Bar`), then make it root-relative + # (`Somewhere\Foo\Bar`), so we end up placing the bytecode file in an + # unambiguous `C:\Bytecode\Somewhere\Foo\Bar\`. + if not _path_isabs(head): + head = _path_join(_os.getcwd(), head) + + # Strip initial drive from a Windows path. We know we have an absolute + # path here, so the second part of the check rules out a POSIX path that + # happens to contain a colon at the second character. + if head[1] == ':' and head[0] not in path_separators: + head = head[2:] + + # Strip initial path separator from `head` to complete the conversion + # back to a root-relative path before joining. + return _path_join( + sys.pycache_prefix, + head.lstrip(path_separators), + filename, + ) + return _path_join(head, _PYCACHE, filename) + + +def source_from_cache(path): + """Given the path to a .pyc. file, return the path to its .py file. + + The .pyc file does not need to exist; this simply returns the path to + the .py file calculated to correspond to the .pyc file. If path does + not conform to PEP 3147/488 format, ValueError will be raised. If + sys.implementation.cache_tag is None then NotImplementedError is raised. + + """ + if sys.implementation.cache_tag is None: + raise NotImplementedError('sys.implementation.cache_tag is None') + path = _os.fspath(path) + head, pycache_filename = _path_split(path) + found_in_pycache_prefix = False + if sys.pycache_prefix is not None: + stripped_path = sys.pycache_prefix.rstrip(path_separators) + if head.startswith(stripped_path + path_sep): + head = head[len(stripped_path):] + found_in_pycache_prefix = True + if not found_in_pycache_prefix: + head, pycache = _path_split(head) + if pycache != _PYCACHE: + raise ValueError(f'{_PYCACHE} not bottom-level directory in ' + f'{path!r}') + dot_count = pycache_filename.count('.') + if dot_count not in {2, 3}: + raise ValueError(f'expected only 2 or 3 dots in {pycache_filename!r}') + elif dot_count == 3: + optimization = pycache_filename.rsplit('.', 2)[-2] + if not optimization.startswith(_OPT): + raise ValueError("optimization portion of filename does not start " + f"with {_OPT!r}") + opt_level = optimization[len(_OPT):] + if not opt_level.isalnum(): + raise ValueError(f"optimization level {optimization!r} is not an " + "alphanumeric value") + base_filename = pycache_filename.partition('.')[0] + return _path_join(head, base_filename + SOURCE_SUFFIXES[0]) + + +def _get_sourcefile(bytecode_path): + """Convert a bytecode file path to a source path (if possible). + + This function exists purely for backwards-compatibility for + PyImport_ExecCodeModuleWithFilenames() in the C API. + + """ + if len(bytecode_path) == 0: + return None + rest, _, extension = bytecode_path.rpartition('.') + if not rest or extension.lower()[-3:-1] != 'py': + return bytecode_path + try: + source_path = source_from_cache(bytecode_path) + except (NotImplementedError, ValueError): + source_path = bytecode_path[:-1] + return source_path if _path_isfile(source_path) else bytecode_path + + +def _get_cached(filename): + if filename.endswith(tuple(SOURCE_SUFFIXES)): + try: + return cache_from_source(filename) + except NotImplementedError: + pass + elif filename.endswith(tuple(BYTECODE_SUFFIXES)): + return filename + else: + return None + + +def _calc_mode(path): + """Calculate the mode permissions for a bytecode file.""" + try: + mode = _path_stat(path).st_mode + except OSError: + mode = 0o666 + # We always ensure write access so we can update cached files + # later even when the source files are read-only on Windows (#6074) + mode |= 0o200 + return mode + + +def _check_name(method): + """Decorator to verify that the module being requested matches the one the + loader can handle. + + The first argument (self) must define _name which the second argument is + compared against. If the comparison fails then ImportError is raised. + + """ + def _check_name_wrapper(self, name=None, *args, **kwargs): + if name is None: + name = self.name + elif self.name != name: + raise ImportError('loader for %s cannot handle %s' % + (self.name, name), name=name) + return method(self, name, *args, **kwargs) + + # FIXME: @_check_name is used to define class methods before the + # _bootstrap module is set by _set_bootstrap_module(). + if _bootstrap is not None: + _wrap = _bootstrap._wrap + else: + def _wrap(new, old): + for replace in ['__module__', '__name__', '__qualname__', '__doc__']: + if hasattr(old, replace): + setattr(new, replace, getattr(old, replace)) + new.__dict__.update(old.__dict__) + + _wrap(_check_name_wrapper, method) + return _check_name_wrapper + + +def _find_module_shim(self, fullname): + """Try to find a loader for the specified module by delegating to + self.find_loader(). + + This method is deprecated in favor of finder.find_spec(). + + """ + _warnings.warn("find_module() is deprecated and " + "slated for removal in Python 3.12; use find_spec() instead", + DeprecationWarning) + # Call find_loader(). If it returns a string (indicating this + # is a namespace package portion), generate a warning and + # return None. + loader, portions = self.find_loader(fullname) + if loader is None and len(portions): + msg = 'Not importing directory {}: missing __init__' + _warnings.warn(msg.format(portions[0]), ImportWarning) + return loader + + +def _classify_pyc(data, name, exc_details): + """Perform basic validity checking of a pyc header and return the flags field, + which determines how the pyc should be further validated against the source. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required, though.) + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + ImportError is raised when the magic number is incorrect or when the flags + field is invalid. EOFError is raised when the data is found to be truncated. + + """ + magic = data[:4] + if magic != MAGIC_NUMBER: + message = f'bad magic number in {name!r}: {magic!r}' + _bootstrap._verbose_message('{}', message) + raise ImportError(message, **exc_details) + if len(data) < 16: + message = f'reached EOF while reading pyc header of {name!r}' + _bootstrap._verbose_message('{}', message) + raise EOFError(message) + flags = _unpack_uint32(data[4:8]) + # Only the first two flags are defined. + if flags & ~0b11: + message = f'invalid flags {flags!r} in {name!r}' + raise ImportError(message, **exc_details) + return flags + + +def _validate_timestamp_pyc(data, source_mtime, source_size, name, + exc_details): + """Validate a pyc against the source last-modified time. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_mtime* is the last modified timestamp of the source file. + + *source_size* is None or the size of the source file in bytes. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + """ + if _unpack_uint32(data[8:12]) != (source_mtime & 0xFFFFFFFF): + message = f'bytecode is stale for {name!r}' + _bootstrap._verbose_message('{}', message) + raise ImportError(message, **exc_details) + if (source_size is not None and + _unpack_uint32(data[12:16]) != (source_size & 0xFFFFFFFF)): + raise ImportError(f'bytecode is stale for {name!r}', **exc_details) + + +def _validate_hash_pyc(data, source_hash, name, exc_details): + """Validate a hash-based pyc by checking the real source hash against the one in + the pyc header. + + *data* is the contents of the pyc file. (Only the first 16 bytes are + required.) + + *source_hash* is the importlib.util.source_hash() of the source file. + + *name* is the name of the module being imported. It is used for logging. + + *exc_details* is a dictionary passed to ImportError if it raised for + improved debugging. + + An ImportError is raised if the bytecode is stale. + + """ + if data[8:16] != source_hash: + raise ImportError( + f'hash in bytecode doesn\'t match hash of source {name!r}', + **exc_details, + ) + + +def _compile_bytecode(data, name=None, bytecode_path=None, source_path=None): + """Compile bytecode as found in a pyc.""" + code = marshal.loads(data) + if isinstance(code, _code_type): + _bootstrap._verbose_message('code object from {!r}', bytecode_path) + if source_path is not None: + _imp._fix_co_filename(code, source_path) + return code + else: + raise ImportError('Non-code object in {!r}'.format(bytecode_path), + name=name, path=bytecode_path) + + +def _code_to_timestamp_pyc(code, mtime=0, source_size=0): + "Produce the data for a timestamp-based pyc." + data = bytearray(MAGIC_NUMBER) + data.extend(_pack_uint32(0)) + data.extend(_pack_uint32(mtime)) + data.extend(_pack_uint32(source_size)) + data.extend(marshal.dumps(code)) + return data + + +def _code_to_hash_pyc(code, source_hash, checked=True): + "Produce the data for a hash-based pyc." + data = bytearray(MAGIC_NUMBER) + flags = 0b1 | checked << 1 + data.extend(_pack_uint32(flags)) + assert len(source_hash) == 8 + data.extend(source_hash) + data.extend(marshal.dumps(code)) + return data + + +def decode_source(source_bytes): + """Decode bytes representing source code and return the string. + + Universal newline support is used in the decoding. + """ + import tokenize # To avoid bootstrap issues. + source_bytes_readline = _io.BytesIO(source_bytes).readline + encoding = tokenize.detect_encoding(source_bytes_readline) + newline_decoder = _io.IncrementalNewlineDecoder(None, True) + return newline_decoder.decode(source_bytes.decode(encoding[0])) + + +# Module specifications ####################################################### + +_POPULATE = object() + + +def spec_from_file_location(name, location=None, *, loader=None, + submodule_search_locations=_POPULATE): + """Return a module spec based on a file location. + + To indicate that the module is a package, set + submodule_search_locations to a list of directory paths. An + empty list is sufficient, though its not otherwise useful to the + import system. + + The loader must take a spec as its only __init__() arg. + + """ + if location is None: + # The caller may simply want a partially populated location- + # oriented spec. So we set the location to a bogus value and + # fill in as much as we can. + location = '' + if hasattr(loader, 'get_filename'): + # ExecutionLoader + try: + location = loader.get_filename(name) + except ImportError: + pass + else: + location = _os.fspath(location) + if not _path_isabs(location): + try: + location = _path_join(_os.getcwd(), location) + except OSError: + pass + + # If the location is on the filesystem, but doesn't actually exist, + # we could return None here, indicating that the location is not + # valid. However, we don't have a good way of testing since an + # indirect location (e.g. a zip file or URL) will look like a + # non-existent file relative to the filesystem. + + spec = _bootstrap.ModuleSpec(name, loader, origin=location) + spec._set_fileattr = True + + # Pick a loader if one wasn't provided. + if loader is None: + for loader_class, suffixes in _get_supported_file_loaders(): + if location.endswith(tuple(suffixes)): + loader = loader_class(name, location) + spec.loader = loader + break + else: + return None + + # Set submodule_search_paths appropriately. + if submodule_search_locations is _POPULATE: + # Check the loader. + if hasattr(loader, 'is_package'): + try: + is_package = loader.is_package(name) + except ImportError: + pass + else: + if is_package: + spec.submodule_search_locations = [] + else: + spec.submodule_search_locations = submodule_search_locations + if spec.submodule_search_locations == []: + if location: + dirname = _path_split(location)[0] + spec.submodule_search_locations.append(dirname) + + return spec + + +# Loaders ##################################################################### + +class WindowsRegistryFinder: + + """Meta path finder for modules declared in the Windows registry.""" + + REGISTRY_KEY = ( + 'Software\\Python\\PythonCore\\{sys_version}' + '\\Modules\\{fullname}') + REGISTRY_KEY_DEBUG = ( + 'Software\\Python\\PythonCore\\{sys_version}' + '\\Modules\\{fullname}\\Debug') + DEBUG_BUILD = (_MS_WINDOWS and '_d.pyd' in EXTENSION_SUFFIXES) + + @staticmethod + def _open_registry(key): + try: + return winreg.OpenKey(winreg.HKEY_CURRENT_USER, key) + except OSError: + return winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key) + + @classmethod + def _search_registry(cls, fullname): + if cls.DEBUG_BUILD: + registry_key = cls.REGISTRY_KEY_DEBUG + else: + registry_key = cls.REGISTRY_KEY + key = registry_key.format(fullname=fullname, + sys_version='%d.%d' % sys.version_info[:2]) + try: + with cls._open_registry(key) as hkey: + filepath = winreg.QueryValue(hkey, '') + except OSError: + return None + return filepath + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + filepath = cls._search_registry(fullname) + if filepath is None: + return None + try: + _path_stat(filepath) + except OSError: + return None + for loader, suffixes in _get_supported_file_loaders(): + if filepath.endswith(tuple(suffixes)): + spec = _bootstrap.spec_from_loader(fullname, + loader(fullname, filepath), + origin=filepath) + return spec + + @classmethod + def find_module(cls, fullname, path=None): + """Find module named in the registry. + + This method is deprecated. Use find_spec() instead. + + """ + _warnings.warn("WindowsRegistryFinder.find_module() is deprecated and " + "slated for removal in Python 3.12; use find_spec() instead", + DeprecationWarning) + spec = cls.find_spec(fullname, path) + if spec is not None: + return spec.loader + else: + return None + + +class _LoaderBasics: + + """Base class of common code needed by both SourceLoader and + SourcelessFileLoader.""" + + def is_package(self, fullname): + """Concrete implementation of InspectLoader.is_package by checking if + the path returned by get_filename has a filename of '__init__.py'.""" + filename = _path_split(self.get_filename(fullname))[1] + filename_base = filename.rsplit('.', 1)[0] + tail_name = fullname.rpartition('.')[2] + return filename_base == '__init__' and tail_name != '__init__' + + def create_module(self, spec): + """Use default semantics for module creation.""" + + def exec_module(self, module): + """Execute the module.""" + code = self.get_code(module.__name__) + if code is None: + raise ImportError('cannot load module {!r} when get_code() ' + 'returns None'.format(module.__name__)) + _bootstrap._call_with_frames_removed(exec, code, module.__dict__) + + def load_module(self, fullname): + """This method is deprecated.""" + # Warning implemented in _load_module_shim(). + return _bootstrap._load_module_shim(self, fullname) + + +class SourceLoader(_LoaderBasics): + + def path_mtime(self, path): + """Optional method that returns the modification time (an int) for the + specified path (a str). + + Raises OSError when the path cannot be handled. + """ + raise OSError + + def path_stats(self, path): + """Optional method returning a metadata dict for the specified + path (a str). + + Possible keys: + - 'mtime' (mandatory) is the numeric timestamp of last source + code modification; + - 'size' (optional) is the size in bytes of the source code. + + Implementing this method allows the loader to read bytecode files. + Raises OSError when the path cannot be handled. + """ + return {'mtime': self.path_mtime(path)} + + def _cache_bytecode(self, source_path, cache_path, data): + """Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + + The source path is needed in order to correctly transfer permissions + """ + # For backwards compatibility, we delegate to set_data() + return self.set_data(cache_path, data) + + def set_data(self, path, data): + """Optional method which writes data (bytes) to a file path (a str). + + Implementing this method allows for the writing of bytecode files. + """ + + + def get_source(self, fullname): + """Concrete implementation of InspectLoader.get_source.""" + path = self.get_filename(fullname) + try: + source_bytes = self.get_data(path) + except OSError as exc: + raise ImportError('source not available through get_data()', + name=fullname) from exc + return decode_source(source_bytes) + + def source_to_code(self, data, path, *, _optimize=-1): + """Return the code object compiled from source. + + The 'data' argument can be any object type that compile() supports. + """ + return _bootstrap._call_with_frames_removed(compile, data, path, 'exec', + dont_inherit=True, optimize=_optimize) + + def get_code(self, fullname): + """Concrete implementation of InspectLoader.get_code. + + Reading of bytecode requires path_stats to be implemented. To write + bytecode, set_data must also be implemented. + + """ + source_path = self.get_filename(fullname) + source_mtime = None + source_bytes = None + source_hash = None + hash_based = False + check_source = True + try: + bytecode_path = cache_from_source(source_path) + except NotImplementedError: + bytecode_path = None + else: + try: + st = self.path_stats(source_path) + except OSError: + pass + else: + source_mtime = int(st['mtime']) + try: + data = self.get_data(bytecode_path) + except OSError: + pass + else: + exc_details = { + 'name': fullname, + 'path': bytecode_path, + } + try: + flags = _classify_pyc(data, fullname, exc_details) + bytes_data = memoryview(data)[16:] + hash_based = flags & 0b1 != 0 + if hash_based: + check_source = flags & 0b10 != 0 + if (_imp.check_hash_based_pycs != 'never' and + (check_source or + _imp.check_hash_based_pycs == 'always')): + source_bytes = self.get_data(source_path) + source_hash = _imp.source_hash( + _RAW_MAGIC_NUMBER, + source_bytes, + ) + _validate_hash_pyc(data, source_hash, fullname, + exc_details) + else: + _validate_timestamp_pyc( + data, + source_mtime, + st['size'], + fullname, + exc_details, + ) + except (ImportError, EOFError): + pass + else: + _bootstrap._verbose_message('{} matches {}', bytecode_path, + source_path) + return _compile_bytecode(bytes_data, name=fullname, + bytecode_path=bytecode_path, + source_path=source_path) + if source_bytes is None: + source_bytes = self.get_data(source_path) + code_object = self.source_to_code(source_bytes, source_path) + _bootstrap._verbose_message('code object from {}', source_path) + if (not sys.dont_write_bytecode and bytecode_path is not None and + source_mtime is not None): + if hash_based: + if source_hash is None: + source_hash = _imp.source_hash(source_bytes) + data = _code_to_hash_pyc(code_object, source_hash, check_source) + else: + data = _code_to_timestamp_pyc(code_object, source_mtime, + len(source_bytes)) + try: + self._cache_bytecode(source_path, bytecode_path, data) + except NotImplementedError: + pass + return code_object + + +class FileLoader: + + """Base file loader class which implements the loader protocol methods that + require file system usage.""" + + def __init__(self, fullname, path): + """Cache the module name and the path to the file found by the + finder.""" + self.name = fullname + self.path = path + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + + def __hash__(self): + return hash(self.name) ^ hash(self.path) + + @_check_name + def load_module(self, fullname): + """Load a module from a file. + + This method is deprecated. Use exec_module() instead. + + """ + # The only reason for this method is for the name check. + # Issue #14857: Avoid the zero-argument form of super so the implementation + # of that form can be updated without breaking the frozen module. + return super(FileLoader, self).load_module(fullname) + + @_check_name + def get_filename(self, fullname): + """Return the path to the source file as found by the finder.""" + return self.path + + def get_data(self, path): + """Return the data from path as raw bytes.""" + if isinstance(self, (SourceLoader, ExtensionFileLoader)): + with _io.open_code(str(path)) as file: + return file.read() + else: + with _io.FileIO(path, 'r') as file: + return file.read() + + @_check_name + def get_resource_reader(self, module): + from importlib.readers import FileReader + return FileReader(self) + + +class SourceFileLoader(FileLoader, SourceLoader): + + """Concrete implementation of SourceLoader using the file system.""" + + def path_stats(self, path): + """Return the metadata for the path.""" + st = _path_stat(path) + return {'mtime': st.st_mtime, 'size': st.st_size} + + def _cache_bytecode(self, source_path, bytecode_path, data): + # Adapt between the two APIs + mode = _calc_mode(source_path) + return self.set_data(bytecode_path, data, _mode=mode) + + def set_data(self, path, data, *, _mode=0o666): + """Write bytes data to a file.""" + parent, filename = _path_split(path) + path_parts = [] + # Figure out what directories are missing. + while parent and not _path_isdir(parent): + parent, part = _path_split(parent) + path_parts.append(part) + # Create needed directories. + for part in reversed(path_parts): + parent = _path_join(parent, part) + try: + _os.mkdir(parent) + except FileExistsError: + # Probably another Python process already created the dir. + continue + except OSError as exc: + # Could be a permission error, read-only filesystem: just forget + # about writing the data. + _bootstrap._verbose_message('could not create {!r}: {!r}', + parent, exc) + return + try: + _write_atomic(path, data, _mode) + _bootstrap._verbose_message('created {!r}', path) + except OSError as exc: + # Same as above: just don't write the bytecode. + _bootstrap._verbose_message('could not create {!r}: {!r}', path, + exc) + + +class SourcelessFileLoader(FileLoader, _LoaderBasics): + + """Loader which handles sourceless file imports.""" + + def get_code(self, fullname): + path = self.get_filename(fullname) + data = self.get_data(path) + # Call _classify_pyc to do basic validation of the pyc but ignore the + # result. There's no source to check against. + exc_details = { + 'name': fullname, + 'path': path, + } + _classify_pyc(data, fullname, exc_details) + return _compile_bytecode( + memoryview(data)[16:], + name=fullname, + bytecode_path=path, + ) + + def get_source(self, fullname): + """Return None as there is no source code.""" + return None + + +class ExtensionFileLoader(FileLoader, _LoaderBasics): + + """Loader for extension modules. + + The constructor is designed to work with FileFinder. + + """ + + def __init__(self, name, path): + self.name = name + self.path = path + + def __eq__(self, other): + return (self.__class__ == other.__class__ and + self.__dict__ == other.__dict__) + + def __hash__(self): + return hash(self.name) ^ hash(self.path) + + def create_module(self, spec): + """Create an unitialized extension module""" + module = _bootstrap._call_with_frames_removed( + _imp.create_dynamic, spec) + _bootstrap._verbose_message('extension module {!r} loaded from {!r}', + spec.name, self.path) + return module + + def exec_module(self, module): + """Initialize an extension module""" + _bootstrap._call_with_frames_removed(_imp.exec_dynamic, module) + _bootstrap._verbose_message('extension module {!r} executed from {!r}', + self.name, self.path) + + def is_package(self, fullname): + """Return True if the extension module is a package.""" + file_name = _path_split(self.path)[1] + return any(file_name == '__init__' + suffix + for suffix in EXTENSION_SUFFIXES) + + def get_code(self, fullname): + """Return None as an extension module cannot create a code object.""" + return None + + def get_source(self, fullname): + """Return None as extension modules have no source code.""" + return None + + @_check_name + def get_filename(self, fullname): + """Return the path to the source file as found by the finder.""" + return self.path + + +class _NamespacePath: + """Represents a namespace package's path. It uses the module name + to find its parent module, and from there it looks up the parent's + __path__. When this changes, the module's own path is recomputed, + using path_finder. For top-level modules, the parent module's path + is sys.path.""" + + # When invalidate_caches() is called, this epoch is incremented + # https://bugs.python.org/issue45703 + _epoch = 0 + + def __init__(self, name, path, path_finder): + self._name = name + self._path = path + self._last_parent_path = tuple(self._get_parent_path()) + self._last_epoch = self._epoch + self._path_finder = path_finder + + def _find_parent_path_names(self): + """Returns a tuple of (parent-module-name, parent-path-attr-name)""" + parent, dot, me = self._name.rpartition('.') + if dot == '': + # This is a top-level module. sys.path contains the parent path. + return 'sys', 'path' + # Not a top-level module. parent-module.__path__ contains the + # parent path. + return parent, '__path__' + + def _get_parent_path(self): + parent_module_name, path_attr_name = self._find_parent_path_names() + return getattr(sys.modules[parent_module_name], path_attr_name) + + def _recalculate(self): + # If the parent's path has changed, recalculate _path + parent_path = tuple(self._get_parent_path()) # Make a copy + if parent_path != self._last_parent_path or self._epoch != self._last_epoch: + spec = self._path_finder(self._name, parent_path) + # Note that no changes are made if a loader is returned, but we + # do remember the new parent path + if spec is not None and spec.loader is None: + if spec.submodule_search_locations: + self._path = spec.submodule_search_locations + self._last_parent_path = parent_path # Save the copy + self._last_epoch = self._epoch + return self._path + + def __iter__(self): + return iter(self._recalculate()) + + def __getitem__(self, index): + return self._recalculate()[index] + + def __setitem__(self, index, path): + self._path[index] = path + + def __len__(self): + return len(self._recalculate()) + + def __repr__(self): + return '_NamespacePath({!r})'.format(self._path) + + def __contains__(self, item): + return item in self._recalculate() + + def append(self, item): + self._path.append(item) + + +# We use this exclusively in module_from_spec() for backward-compatibility. +class _NamespaceLoader: + def __init__(self, name, path, path_finder): + self._path = _NamespacePath(name, path, path_finder) + + @staticmethod + def module_repr(module): + """Return repr for the module. + + The method is deprecated. The import machinery does the job itself. + + """ + _warnings.warn("_NamespaceLoader.module_repr() is deprecated and " + "slated for removal in Python 3.12", DeprecationWarning) + return ''.format(module.__name__) + + def is_package(self, fullname): + return True + + def get_source(self, fullname): + return '' + + def get_code(self, fullname): + return compile('', '', 'exec', dont_inherit=True) + + def create_module(self, spec): + """Use default semantics for module creation.""" + + def exec_module(self, module): + pass + + def load_module(self, fullname): + """Load a namespace module. + + This method is deprecated. Use exec_module() instead. + + """ + # The import system never calls this method. + _bootstrap._verbose_message('namespace module loaded with path {!r}', + self._path) + # Warning implemented in _load_module_shim(). + return _bootstrap._load_module_shim(self, fullname) + + def get_resource_reader(self, module): + from importlib.readers import NamespaceReader + return NamespaceReader(self._path) + + +# Finders ##################################################################### + +class PathFinder: + + """Meta path finder for sys.path and package __path__ attributes.""" + + @staticmethod + def invalidate_caches(): + """Call the invalidate_caches() method on all path entry finders + stored in sys.path_importer_caches (where implemented).""" + for name, finder in list(sys.path_importer_cache.items()): + if finder is None: + del sys.path_importer_cache[name] + elif hasattr(finder, 'invalidate_caches'): + finder.invalidate_caches() + # Also invalidate the caches of _NamespacePaths + # https://bugs.python.org/issue45703 + _NamespacePath._epoch += 1 + + @staticmethod + def _path_hooks(path): + """Search sys.path_hooks for a finder for 'path'.""" + if sys.path_hooks is not None and not sys.path_hooks: + _warnings.warn('sys.path_hooks is empty', ImportWarning) + for hook in sys.path_hooks: + try: + return hook(path) + except ImportError: + continue + else: + return None + + @classmethod + def _path_importer_cache(cls, path): + """Get the finder for the path entry from sys.path_importer_cache. + + If the path entry is not in the cache, find the appropriate finder + and cache it. If no finder is available, store None. + + """ + if path == '': + try: + path = _os.getcwd() + except FileNotFoundError: + # Don't cache the failure as the cwd can easily change to + # a valid directory later on. + return None + try: + finder = sys.path_importer_cache[path] + except KeyError: + finder = cls._path_hooks(path) + sys.path_importer_cache[path] = finder + return finder + + @classmethod + def _legacy_get_spec(cls, fullname, finder): + # This would be a good place for a DeprecationWarning if + # we ended up going that route. + if hasattr(finder, 'find_loader'): + msg = (f"{_bootstrap._object_name(finder)}.find_spec() not found; " + "falling back to find_loader()") + _warnings.warn(msg, ImportWarning) + loader, portions = finder.find_loader(fullname) + else: + msg = (f"{_bootstrap._object_name(finder)}.find_spec() not found; " + "falling back to find_module()") + _warnings.warn(msg, ImportWarning) + loader = finder.find_module(fullname) + portions = [] + if loader is not None: + return _bootstrap.spec_from_loader(fullname, loader) + spec = _bootstrap.ModuleSpec(fullname, None) + spec.submodule_search_locations = portions + return spec + + @classmethod + def _get_spec(cls, fullname, path, target=None): + """Find the loader or namespace_path for this module/package name.""" + # If this ends up being a namespace package, namespace_path is + # the list of paths that will become its __path__ + namespace_path = [] + for entry in path: + if not isinstance(entry, (str, bytes)): + continue + finder = cls._path_importer_cache(entry) + if finder is not None: + if hasattr(finder, 'find_spec'): + spec = finder.find_spec(fullname, target) + else: + spec = cls._legacy_get_spec(fullname, finder) + if spec is None: + continue + if spec.loader is not None: + return spec + portions = spec.submodule_search_locations + if portions is None: + raise ImportError('spec missing loader') + # This is possibly part of a namespace package. + # Remember these path entries (if any) for when we + # create a namespace package, and continue iterating + # on path. + namespace_path.extend(portions) + else: + spec = _bootstrap.ModuleSpec(fullname, None) + spec.submodule_search_locations = namespace_path + return spec + + @classmethod + def find_spec(cls, fullname, path=None, target=None): + """Try to find a spec for 'fullname' on sys.path or 'path'. + + The search is based on sys.path_hooks and sys.path_importer_cache. + """ + if path is None: + path = sys.path + spec = cls._get_spec(fullname, path, target) + if spec is None: + return None + elif spec.loader is None: + namespace_path = spec.submodule_search_locations + if namespace_path: + # We found at least one namespace path. Return a spec which + # can create the namespace package. + spec.origin = None + spec.submodule_search_locations = _NamespacePath(fullname, namespace_path, cls._get_spec) + return spec + else: + return None + else: + return spec + + @classmethod + def find_module(cls, fullname, path=None): + """find the module on sys.path or 'path' based on sys.path_hooks and + sys.path_importer_cache. + + This method is deprecated. Use find_spec() instead. + + """ + _warnings.warn("PathFinder.find_module() is deprecated and " + "slated for removal in Python 3.12; use find_spec() instead", + DeprecationWarning) + spec = cls.find_spec(fullname, path) + if spec is None: + return None + return spec.loader + + @staticmethod + def find_distributions(*args, **kwargs): + """ + Find distributions. + + Return an iterable of all Distribution instances capable of + loading the metadata for packages matching ``context.name`` + (or all names if ``None`` indicated) along the paths in the list + of directories ``context.path``. + """ + from importlib.metadata import MetadataPathFinder + return MetadataPathFinder.find_distributions(*args, **kwargs) + + +class FileFinder: + + """File-based finder. + + Interactions with the file system are cached for performance, being + refreshed when the directory the finder is handling has been modified. + + """ + + def __init__(self, path, *loader_details): + """Initialize with the path to search on and a variable number of + 2-tuples containing the loader and the file suffixes the loader + recognizes.""" + loaders = [] + for loader, suffixes in loader_details: + loaders.extend((suffix, loader) for suffix in suffixes) + self._loaders = loaders + # Base (directory) path + self.path = path or '.' + if not _path_isabs(self.path): + self.path = _path_join(_os.getcwd(), self.path) + self._path_mtime = -1 + self._path_cache = set() + self._relaxed_path_cache = set() + + def invalidate_caches(self): + """Invalidate the directory mtime.""" + self._path_mtime = -1 + + find_module = _find_module_shim + + def find_loader(self, fullname): + """Try to find a loader for the specified module, or the namespace + package portions. Returns (loader, list-of-portions). + + This method is deprecated. Use find_spec() instead. + + """ + _warnings.warn("FileFinder.find_loader() is deprecated and " + "slated for removal in Python 3.12; use find_spec() instead", + DeprecationWarning) + spec = self.find_spec(fullname) + if spec is None: + return None, [] + return spec.loader, spec.submodule_search_locations or [] + + def _get_spec(self, loader_class, fullname, path, smsl, target): + loader = loader_class(fullname, path) + return spec_from_file_location(fullname, path, loader=loader, + submodule_search_locations=smsl) + + def find_spec(self, fullname, target=None): + """Try to find a spec for the specified module. + + Returns the matching spec, or None if not found. + """ + is_namespace = False + tail_module = fullname.rpartition('.')[2] + try: + mtime = _path_stat(self.path or _os.getcwd()).st_mtime + except OSError: + mtime = -1 + if mtime != self._path_mtime: + self._fill_cache() + self._path_mtime = mtime + # tail_module keeps the original casing, for __file__ and friends + if _relax_case(): + cache = self._relaxed_path_cache + cache_module = tail_module.lower() + else: + cache = self._path_cache + cache_module = tail_module + # Check if the module is the name of a directory (and thus a package). + if cache_module in cache: + base_path = _path_join(self.path, tail_module) + for suffix, loader_class in self._loaders: + init_filename = '__init__' + suffix + full_path = _path_join(base_path, init_filename) + if _path_isfile(full_path): + return self._get_spec(loader_class, fullname, full_path, [base_path], target) + else: + # If a namespace package, return the path if we don't + # find a module in the next section. + is_namespace = _path_isdir(base_path) + # Check for a file w/ a proper suffix exists. + for suffix, loader_class in self._loaders: + try: + full_path = _path_join(self.path, tail_module + suffix) + except ValueError: + return None + _bootstrap._verbose_message('trying {}', full_path, verbosity=2) + if cache_module + suffix in cache: + if _path_isfile(full_path): + return self._get_spec(loader_class, fullname, full_path, + None, target) + if is_namespace: + _bootstrap._verbose_message('possible namespace for {}', base_path) + spec = _bootstrap.ModuleSpec(fullname, None) + spec.submodule_search_locations = [base_path] + return spec + return None + + def _fill_cache(self): + """Fill the cache of potential modules and packages for this directory.""" + path = self.path + try: + contents = _os.listdir(path or _os.getcwd()) + except (FileNotFoundError, PermissionError, NotADirectoryError): + # Directory has either been removed, turned into a file, or made + # unreadable. + contents = [] + # We store two cached versions, to handle runtime changes of the + # PYTHONCASEOK environment variable. + if not sys.platform.startswith('win'): + self._path_cache = set(contents) + else: + # Windows users can import modules with case-insensitive file + # suffixes (for legacy reasons). Make the suffix lowercase here + # so it's done once instead of for every import. This is safe as + # the specified suffixes to check against are always specified in a + # case-sensitive manner. + lower_suffix_contents = set() + for item in contents: + name, dot, suffix = item.partition('.') + if dot: + new_name = '{}.{}'.format(name, suffix.lower()) + else: + new_name = name + lower_suffix_contents.add(new_name) + self._path_cache = lower_suffix_contents + if sys.platform.startswith(_CASE_INSENSITIVE_PLATFORMS): + self._relaxed_path_cache = {fn.lower() for fn in contents} + + @classmethod + def path_hook(cls, *loader_details): + """A class method which returns a closure to use on sys.path_hook + which will return an instance using the specified loaders and the path + called on the closure. + + If the path called on the closure is not a directory, ImportError is + raised. + + """ + def path_hook_for_FileFinder(path): + """Path hook for importlib.machinery.FileFinder.""" + if not _path_isdir(path): + raise ImportError('only directories are supported', path=path) + return cls(path, *loader_details) + + return path_hook_for_FileFinder + + def __repr__(self): + return 'FileFinder({!r})'.format(self.path) + + +# Import setup ############################################################### + +def _fix_up_module(ns, name, pathname, cpathname=None): + # This function is used by PyImport_ExecCodeModuleObject(). + loader = ns.get('__loader__') + spec = ns.get('__spec__') + if not loader: + if spec: + loader = spec.loader + elif pathname == cpathname: + loader = SourcelessFileLoader(name, pathname) + else: + loader = SourceFileLoader(name, pathname) + if not spec: + spec = spec_from_file_location(name, pathname, loader=loader) + try: + ns['__spec__'] = spec + ns['__loader__'] = loader + ns['__file__'] = pathname + ns['__cached__'] = cpathname + except Exception: + # Not important enough to report. + pass + + +def _get_supported_file_loaders(): + """Returns a list of file-based module loaders. + + Each item is a tuple (loader, suffixes). + """ + extensions = ExtensionFileLoader, _imp.extension_suffixes() + source = SourceFileLoader, SOURCE_SUFFIXES + bytecode = SourcelessFileLoader, BYTECODE_SUFFIXES + return [extensions, source, bytecode] + + +def _set_bootstrap_module(_bootstrap_module): + global _bootstrap + _bootstrap = _bootstrap_module + + +def _install(_bootstrap_module): + """Install the path-based import components.""" + _set_bootstrap_module(_bootstrap_module) + supported_loaders = _get_supported_file_loaders() + sys.path_hooks.extend([FileFinder.path_hook(*supported_loaders)]) + sys.meta_path.append(PathFinder) diff --git a/parrot/lib/python3.10/importlib/util.py b/parrot/lib/python3.10/importlib/util.py new file mode 100644 index 0000000000000000000000000000000000000000..8623c89840c6a24d6a692b149e1ab66881bf0002 --- /dev/null +++ b/parrot/lib/python3.10/importlib/util.py @@ -0,0 +1,302 @@ +"""Utility code for constructing importers, etc.""" +from ._abc import Loader +from ._bootstrap import module_from_spec +from ._bootstrap import _resolve_name +from ._bootstrap import spec_from_loader +from ._bootstrap import _find_spec +from ._bootstrap_external import MAGIC_NUMBER +from ._bootstrap_external import _RAW_MAGIC_NUMBER +from ._bootstrap_external import cache_from_source +from ._bootstrap_external import decode_source +from ._bootstrap_external import source_from_cache +from ._bootstrap_external import spec_from_file_location + +from contextlib import contextmanager +import _imp +import functools +import sys +import types +import warnings + + +def source_hash(source_bytes): + "Return the hash of *source_bytes* as used in hash-based pyc files." + return _imp.source_hash(_RAW_MAGIC_NUMBER, source_bytes) + + +def resolve_name(name, package): + """Resolve a relative module name to an absolute one.""" + if not name.startswith('.'): + return name + elif not package: + raise ImportError(f'no package specified for {repr(name)} ' + '(required for relative module names)') + level = 0 + for character in name: + if character != '.': + break + level += 1 + return _resolve_name(name[level:], package, level) + + +def _find_spec_from_path(name, path=None): + """Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + Dotted names do not have their parent packages implicitly imported. You will + most likely need to explicitly import all parent packages in the proper + order for a submodule to get the correct spec. + + """ + if name not in sys.modules: + return _find_spec(name, path) + else: + module = sys.modules[name] + if module is None: + return None + try: + spec = module.__spec__ + except AttributeError: + raise ValueError('{}.__spec__ is not set'.format(name)) from None + else: + if spec is None: + raise ValueError('{}.__spec__ is None'.format(name)) + return spec + + +def find_spec(name, package=None): + """Return the spec for the specified module. + + First, sys.modules is checked to see if the module was already imported. If + so, then sys.modules[name].__spec__ is returned. If that happens to be + set to None, then ValueError is raised. If the module is not in + sys.modules, then sys.meta_path is searched for a suitable spec with the + value of 'path' given to the finders. None is returned if no spec could + be found. + + If the name is for submodule (contains a dot), the parent module is + automatically imported. + + The name and package arguments work the same as importlib.import_module(). + In other words, relative module names (with leading dots) work. + + """ + fullname = resolve_name(name, package) if name.startswith('.') else name + if fullname not in sys.modules: + parent_name = fullname.rpartition('.')[0] + if parent_name: + parent = __import__(parent_name, fromlist=['__path__']) + try: + parent_path = parent.__path__ + except AttributeError as e: + raise ModuleNotFoundError( + f"__path__ attribute not found on {parent_name!r} " + f"while trying to find {fullname!r}", name=fullname) from e + else: + parent_path = None + return _find_spec(fullname, parent_path) + else: + module = sys.modules[fullname] + if module is None: + return None + try: + spec = module.__spec__ + except AttributeError: + raise ValueError('{}.__spec__ is not set'.format(name)) from None + else: + if spec is None: + raise ValueError('{}.__spec__ is None'.format(name)) + return spec + + +@contextmanager +def _module_to_load(name): + is_reload = name in sys.modules + + module = sys.modules.get(name) + if not is_reload: + # This must be done before open() is called as the 'io' module + # implicitly imports 'locale' and would otherwise trigger an + # infinite loop. + module = type(sys)(name) + # This must be done before putting the module in sys.modules + # (otherwise an optimization shortcut in import.c becomes wrong) + module.__initializing__ = True + sys.modules[name] = module + try: + yield module + except Exception: + if not is_reload: + try: + del sys.modules[name] + except KeyError: + pass + finally: + module.__initializing__ = False + + +def set_package(fxn): + """Set __package__ on the returned module. + + This function is deprecated. + + """ + @functools.wraps(fxn) + def set_package_wrapper(*args, **kwargs): + warnings.warn('The import system now takes care of this automatically; ' + 'this decorator is slated for removal in Python 3.12', + DeprecationWarning, stacklevel=2) + module = fxn(*args, **kwargs) + if getattr(module, '__package__', None) is None: + module.__package__ = module.__name__ + if not hasattr(module, '__path__'): + module.__package__ = module.__package__.rpartition('.')[0] + return module + return set_package_wrapper + + +def set_loader(fxn): + """Set __loader__ on the returned module. + + This function is deprecated. + + """ + @functools.wraps(fxn) + def set_loader_wrapper(self, *args, **kwargs): + warnings.warn('The import system now takes care of this automatically; ' + 'this decorator is slated for removal in Python 3.12', + DeprecationWarning, stacklevel=2) + module = fxn(self, *args, **kwargs) + if getattr(module, '__loader__', None) is None: + module.__loader__ = self + return module + return set_loader_wrapper + + +def module_for_loader(fxn): + """Decorator to handle selecting the proper module for loaders. + + The decorated function is passed the module to use instead of the module + name. The module passed in to the function is either from sys.modules if + it already exists or is a new module. If the module is new, then __name__ + is set the first argument to the method, __loader__ is set to self, and + __package__ is set accordingly (if self.is_package() is defined) will be set + before it is passed to the decorated function (if self.is_package() does + not work for the module it will be set post-load). + + If an exception is raised and the decorator created the module it is + subsequently removed from sys.modules. + + The decorator assumes that the decorated function takes the module name as + the second argument. + + """ + warnings.warn('The import system now takes care of this automatically; ' + 'this decorator is slated for removal in Python 3.12', + DeprecationWarning, stacklevel=2) + @functools.wraps(fxn) + def module_for_loader_wrapper(self, fullname, *args, **kwargs): + with _module_to_load(fullname) as module: + module.__loader__ = self + try: + is_package = self.is_package(fullname) + except (ImportError, AttributeError): + pass + else: + if is_package: + module.__package__ = fullname + else: + module.__package__ = fullname.rpartition('.')[0] + # If __package__ was not set above, __import__() will do it later. + return fxn(self, module, *args, **kwargs) + + return module_for_loader_wrapper + + +class _LazyModule(types.ModuleType): + + """A subclass of the module type which triggers loading upon attribute access.""" + + def __getattribute__(self, attr): + """Trigger the load of the module and return the attribute.""" + # All module metadata must be garnered from __spec__ in order to avoid + # using mutated values. + # Stop triggering this method. + self.__class__ = types.ModuleType + # Get the original name to make sure no object substitution occurred + # in sys.modules. + original_name = self.__spec__.name + # Figure out exactly what attributes were mutated between the creation + # of the module and now. + attrs_then = self.__spec__.loader_state['__dict__'] + attrs_now = self.__dict__ + attrs_updated = {} + for key, value in attrs_now.items(): + # Code that set the attribute may have kept a reference to the + # assigned object, making identity more important than equality. + if key not in attrs_then: + attrs_updated[key] = value + elif id(attrs_now[key]) != id(attrs_then[key]): + attrs_updated[key] = value + self.__spec__.loader.exec_module(self) + # If exec_module() was used directly there is no guarantee the module + # object was put into sys.modules. + if original_name in sys.modules: + if id(self) != id(sys.modules[original_name]): + raise ValueError(f"module object for {original_name!r} " + "substituted in sys.modules during a lazy " + "load") + # Update after loading since that's what would happen in an eager + # loading situation. + self.__dict__.update(attrs_updated) + return getattr(self, attr) + + def __delattr__(self, attr): + """Trigger the load and then perform the deletion.""" + # To trigger the load and raise an exception if the attribute + # doesn't exist. + self.__getattribute__(attr) + delattr(self, attr) + + +class LazyLoader(Loader): + + """A loader that creates a module which defers loading until attribute access.""" + + @staticmethod + def __check_eager_loader(loader): + if not hasattr(loader, 'exec_module'): + raise TypeError('loader must define exec_module()') + + @classmethod + def factory(cls, loader): + """Construct a callable which returns the eager loader made lazy.""" + cls.__check_eager_loader(loader) + return lambda *args, **kwargs: cls(loader(*args, **kwargs)) + + def __init__(self, loader): + self.__check_eager_loader(loader) + self.loader = loader + + def create_module(self, spec): + return self.loader.create_module(spec) + + def exec_module(self, module): + """Make the module load lazily.""" + module.__spec__.loader = self.loader + module.__loader__ = self.loader + # Don't need to worry about deep-copying as trying to set an attribute + # on an object would have triggered the load, + # e.g. ``module.__spec__.loader = None`` would trigger a load from + # trying to access module.__spec__. + loader_state = {} + loader_state['__dict__'] = module.__dict__.copy() + loader_state['__class__'] = module.__class__ + module.__spec__.loader_state = loader_state + module.__class__ = _LazyModule diff --git a/parrot/lib/python3.10/logging/__pycache__/config.cpython-310.pyc b/parrot/lib/python3.10/logging/__pycache__/config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a10e0982d529c30ed6eea1701d0719b24d590678 Binary files /dev/null and b/parrot/lib/python3.10/logging/__pycache__/config.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/logging/config.py b/parrot/lib/python3.10/logging/config.py new file mode 100644 index 0000000000000000000000000000000000000000..5cab008f8d126883f32cf231244bb33ea908fe59 --- /dev/null +++ b/parrot/lib/python3.10/logging/config.py @@ -0,0 +1,947 @@ +# Copyright 2001-2019 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Configuration functions for the logging package for Python. The core package +is based on PEP 282 and comments thereto in comp.lang.python, and influenced +by Apache's log4j system. + +Copyright (C) 2001-2019 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging' and log away! +""" + +import errno +import io +import logging +import logging.handlers +import re +import struct +import sys +import threading +import traceback + +from socketserver import ThreadingTCPServer, StreamRequestHandler + + +DEFAULT_LOGGING_CONFIG_PORT = 9030 + +RESET_ERROR = errno.ECONNRESET + +# +# The following code implements a socket listener for on-the-fly +# reconfiguration of logging. +# +# _listener holds the server object doing the listening +_listener = None + +def fileConfig(fname, defaults=None, disable_existing_loggers=True, encoding=None): + """ + Read the logging configuration from a ConfigParser-format file. + + This can be called several times from an application, allowing an end user + the ability to select from various pre-canned configurations (if the + developer provides a mechanism to present the choices and load the chosen + configuration). + """ + import configparser + + if isinstance(fname, configparser.RawConfigParser): + cp = fname + else: + cp = configparser.ConfigParser(defaults) + if hasattr(fname, 'readline'): + cp.read_file(fname) + else: + encoding = io.text_encoding(encoding) + cp.read(fname, encoding=encoding) + + formatters = _create_formatters(cp) + + # critical section + logging._acquireLock() + try: + _clearExistingHandlers() + + # Handlers add themselves to logging._handlers + handlers = _install_handlers(cp, formatters) + _install_loggers(cp, handlers, disable_existing_loggers) + finally: + logging._releaseLock() + + +def _resolve(name): + """Resolve a dotted name to a global object.""" + name = name.split('.') + used = name.pop(0) + found = __import__(used) + for n in name: + used = used + '.' + n + try: + found = getattr(found, n) + except AttributeError: + __import__(used) + found = getattr(found, n) + return found + +def _strip_spaces(alist): + return map(str.strip, alist) + +def _create_formatters(cp): + """Create and return formatters""" + flist = cp["formatters"]["keys"] + if not len(flist): + return {} + flist = flist.split(",") + flist = _strip_spaces(flist) + formatters = {} + for form in flist: + sectname = "formatter_%s" % form + fs = cp.get(sectname, "format", raw=True, fallback=None) + dfs = cp.get(sectname, "datefmt", raw=True, fallback=None) + stl = cp.get(sectname, "style", raw=True, fallback='%') + c = logging.Formatter + class_name = cp[sectname].get("class") + if class_name: + c = _resolve(class_name) + f = c(fs, dfs, stl) + formatters[form] = f + return formatters + + +def _install_handlers(cp, formatters): + """Install and return handlers""" + hlist = cp["handlers"]["keys"] + if not len(hlist): + return {} + hlist = hlist.split(",") + hlist = _strip_spaces(hlist) + handlers = {} + fixups = [] #for inter-handler references + for hand in hlist: + section = cp["handler_%s" % hand] + klass = section["class"] + fmt = section.get("formatter", "") + try: + klass = eval(klass, vars(logging)) + except (AttributeError, NameError): + klass = _resolve(klass) + args = section.get("args", '()') + args = eval(args, vars(logging)) + kwargs = section.get("kwargs", '{}') + kwargs = eval(kwargs, vars(logging)) + h = klass(*args, **kwargs) + h.name = hand + if "level" in section: + level = section["level"] + h.setLevel(level) + if len(fmt): + h.setFormatter(formatters[fmt]) + if issubclass(klass, logging.handlers.MemoryHandler): + target = section.get("target", "") + if len(target): #the target handler may not be loaded yet, so keep for later... + fixups.append((h, target)) + handlers[hand] = h + #now all handlers are loaded, fixup inter-handler references... + for h, t in fixups: + h.setTarget(handlers[t]) + return handlers + +def _handle_existing_loggers(existing, child_loggers, disable_existing): + """ + When (re)configuring logging, handle loggers which were in the previous + configuration but are not in the new configuration. There's no point + deleting them as other threads may continue to hold references to them; + and by disabling them, you stop them doing any logging. + + However, don't disable children of named loggers, as that's probably not + what was intended by the user. Also, allow existing loggers to NOT be + disabled if disable_existing is false. + """ + root = logging.root + for log in existing: + logger = root.manager.loggerDict[log] + if log in child_loggers: + if not isinstance(logger, logging.PlaceHolder): + logger.setLevel(logging.NOTSET) + logger.handlers = [] + logger.propagate = True + else: + logger.disabled = disable_existing + +def _install_loggers(cp, handlers, disable_existing): + """Create and install loggers""" + + # configure the root first + llist = cp["loggers"]["keys"] + llist = llist.split(",") + llist = list(_strip_spaces(llist)) + llist.remove("root") + section = cp["logger_root"] + root = logging.root + log = root + if "level" in section: + level = section["level"] + log.setLevel(level) + for h in root.handlers[:]: + root.removeHandler(h) + hlist = section["handlers"] + if len(hlist): + hlist = hlist.split(",") + hlist = _strip_spaces(hlist) + for hand in hlist: + log.addHandler(handlers[hand]) + + #and now the others... + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + existing = list(root.manager.loggerDict.keys()) + #The list needs to be sorted so that we can + #avoid disabling child loggers of explicitly + #named loggers. With a sorted list it is easier + #to find the child loggers. + existing.sort() + #We'll keep the list of existing loggers + #which are children of named loggers here... + child_loggers = [] + #now set up the new ones... + for log in llist: + section = cp["logger_%s" % log] + qn = section["qualname"] + propagate = section.getint("propagate", fallback=1) + logger = logging.getLogger(qn) + if qn in existing: + i = existing.index(qn) + 1 # start with the entry after qn + prefixed = qn + "." + pflen = len(prefixed) + num_existing = len(existing) + while i < num_existing: + if existing[i][:pflen] == prefixed: + child_loggers.append(existing[i]) + i += 1 + existing.remove(qn) + if "level" in section: + level = section["level"] + logger.setLevel(level) + for h in logger.handlers[:]: + logger.removeHandler(h) + logger.propagate = propagate + logger.disabled = 0 + hlist = section["handlers"] + if len(hlist): + hlist = hlist.split(",") + hlist = _strip_spaces(hlist) + for hand in hlist: + logger.addHandler(handlers[hand]) + + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + #However, don't disable children of named loggers, as that's + #probably not what was intended by the user. + #for log in existing: + # logger = root.manager.loggerDict[log] + # if log in child_loggers: + # logger.level = logging.NOTSET + # logger.handlers = [] + # logger.propagate = 1 + # elif disable_existing_loggers: + # logger.disabled = 1 + _handle_existing_loggers(existing, child_loggers, disable_existing) + + +def _clearExistingHandlers(): + """Clear and close existing handlers""" + logging._handlers.clear() + logging.shutdown(logging._handlerList[:]) + del logging._handlerList[:] + + +IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) + + +def valid_ident(s): + m = IDENTIFIER.match(s) + if not m: + raise ValueError('Not a valid Python identifier: %r' % s) + return True + + +class ConvertingMixin(object): + """For ConvertingXXX's, this mixin class provides common functions""" + + def convert_with_key(self, key, value, replace=True): + result = self.configurator.convert(value) + #If the converted value is different, save for next time + if value is not result: + if replace: + self[key] = result + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + result.key = key + return result + + def convert(self, value): + result = self.configurator.convert(value) + if value is not result: + if type(result) in (ConvertingDict, ConvertingList, + ConvertingTuple): + result.parent = self + return result + + +# The ConvertingXXX classes are wrappers around standard Python containers, +# and they serve to convert any suitable values in the container. The +# conversion converts base dicts, lists and tuples to their wrapped +# equivalents, whereas strings which match a conversion format are converted +# appropriately. +# +# Each wrapper should have a configurator attribute holding the actual +# configurator to use for conversion. + +class ConvertingDict(dict, ConvertingMixin): + """A converting dictionary wrapper.""" + + def __getitem__(self, key): + value = dict.__getitem__(self, key) + return self.convert_with_key(key, value) + + def get(self, key, default=None): + value = dict.get(self, key, default) + return self.convert_with_key(key, value) + + def pop(self, key, default=None): + value = dict.pop(self, key, default) + return self.convert_with_key(key, value, replace=False) + +class ConvertingList(list, ConvertingMixin): + """A converting list wrapper.""" + def __getitem__(self, key): + value = list.__getitem__(self, key) + return self.convert_with_key(key, value) + + def pop(self, idx=-1): + value = list.pop(self, idx) + return self.convert(value) + +class ConvertingTuple(tuple, ConvertingMixin): + """A converting tuple wrapper.""" + def __getitem__(self, key): + value = tuple.__getitem__(self, key) + # Can't replace a tuple entry. + return self.convert_with_key(key, value, replace=False) + +class BaseConfigurator(object): + """ + The configurator base class which defines some useful defaults. + """ + + CONVERT_PATTERN = re.compile(r'^(?P[a-z]+)://(?P.*)$') + + WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') + DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') + INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') + DIGIT_PATTERN = re.compile(r'^\d+$') + + value_converters = { + 'ext' : 'ext_convert', + 'cfg' : 'cfg_convert', + } + + # We might want to use a different one, e.g. importlib + importer = staticmethod(__import__) + + def __init__(self, config): + self.config = ConvertingDict(config) + self.config.configurator = self + + def resolve(self, s): + """ + Resolve strings to objects using standard import and attribute + syntax. + """ + name = s.split('.') + used = name.pop(0) + try: + found = self.importer(used) + for frag in name: + used += '.' + frag + try: + found = getattr(found, frag) + except AttributeError: + self.importer(used) + found = getattr(found, frag) + return found + except ImportError: + e, tb = sys.exc_info()[1:] + v = ValueError('Cannot resolve %r: %s' % (s, e)) + v.__cause__, v.__traceback__ = e, tb + raise v + + def ext_convert(self, value): + """Default converter for the ext:// protocol.""" + return self.resolve(value) + + def cfg_convert(self, value): + """Default converter for the cfg:// protocol.""" + rest = value + m = self.WORD_PATTERN.match(rest) + if m is None: + raise ValueError("Unable to convert %r" % value) + else: + rest = rest[m.end():] + d = self.config[m.groups()[0]] + #print d, rest + while rest: + m = self.DOT_PATTERN.match(rest) + if m: + d = d[m.groups()[0]] + else: + m = self.INDEX_PATTERN.match(rest) + if m: + idx = m.groups()[0] + if not self.DIGIT_PATTERN.match(idx): + d = d[idx] + else: + try: + n = int(idx) # try as number first (most likely) + d = d[n] + except TypeError: + d = d[idx] + if m: + rest = rest[m.end():] + else: + raise ValueError('Unable to convert ' + '%r at %r' % (value, rest)) + #rest should be empty + return d + + def convert(self, value): + """ + Convert values to an appropriate type. dicts, lists and tuples are + replaced by their converting alternatives. Strings are checked to + see if they have a conversion format and are converted if they do. + """ + if not isinstance(value, ConvertingDict) and isinstance(value, dict): + value = ConvertingDict(value) + value.configurator = self + elif not isinstance(value, ConvertingList) and isinstance(value, list): + value = ConvertingList(value) + value.configurator = self + elif not isinstance(value, ConvertingTuple) and\ + isinstance(value, tuple) and not hasattr(value, '_fields'): + value = ConvertingTuple(value) + value.configurator = self + elif isinstance(value, str): # str for py3k + m = self.CONVERT_PATTERN.match(value) + if m: + d = m.groupdict() + prefix = d['prefix'] + converter = self.value_converters.get(prefix, None) + if converter: + suffix = d['suffix'] + converter = getattr(self, converter) + value = converter(suffix) + return value + + def configure_custom(self, config): + """Configure an object with a user-supplied factory.""" + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + props = config.pop('.', None) + # Check for valid identifiers + kwargs = {k: config[k] for k in config if valid_ident(k)} + result = c(**kwargs) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def as_tuple(self, value): + """Utility function which converts lists to tuples.""" + if isinstance(value, list): + value = tuple(value) + return value + +class DictConfigurator(BaseConfigurator): + """ + Configure logging using a dictionary-like object to describe the + configuration. + """ + + def configure(self): + """Do the configuration.""" + + config = self.config + if 'version' not in config: + raise ValueError("dictionary doesn't specify a version") + if config['version'] != 1: + raise ValueError("Unsupported version: %s" % config['version']) + incremental = config.pop('incremental', False) + EMPTY_DICT = {} + logging._acquireLock() + try: + if incremental: + handlers = config.get('handlers', EMPTY_DICT) + for name in handlers: + if name not in logging._handlers: + raise ValueError('No handler found with ' + 'name %r' % name) + else: + try: + handler = logging._handlers[name] + handler_config = handlers[name] + level = handler_config.get('level', None) + if level: + handler.setLevel(logging._checkLevel(level)) + except Exception as e: + raise ValueError('Unable to configure handler ' + '%r' % name) from e + loggers = config.get('loggers', EMPTY_DICT) + for name in loggers: + try: + self.configure_logger(name, loggers[name], True) + except Exception as e: + raise ValueError('Unable to configure logger ' + '%r' % name) from e + root = config.get('root', None) + if root: + try: + self.configure_root(root, True) + except Exception as e: + raise ValueError('Unable to configure root ' + 'logger') from e + else: + disable_existing = config.pop('disable_existing_loggers', True) + + _clearExistingHandlers() + + # Do formatters first - they don't refer to anything else + formatters = config.get('formatters', EMPTY_DICT) + for name in formatters: + try: + formatters[name] = self.configure_formatter( + formatters[name]) + except Exception as e: + raise ValueError('Unable to configure ' + 'formatter %r' % name) from e + # Next, do filters - they don't refer to anything else, either + filters = config.get('filters', EMPTY_DICT) + for name in filters: + try: + filters[name] = self.configure_filter(filters[name]) + except Exception as e: + raise ValueError('Unable to configure ' + 'filter %r' % name) from e + + # Next, do handlers - they refer to formatters and filters + # As handlers can refer to other handlers, sort the keys + # to allow a deterministic order of configuration + handlers = config.get('handlers', EMPTY_DICT) + deferred = [] + for name in sorted(handlers): + try: + handler = self.configure_handler(handlers[name]) + handler.name = name + handlers[name] = handler + except Exception as e: + if 'target not configured yet' in str(e.__cause__): + deferred.append(name) + else: + raise ValueError('Unable to configure handler ' + '%r' % name) from e + + # Now do any that were deferred + for name in deferred: + try: + handler = self.configure_handler(handlers[name]) + handler.name = name + handlers[name] = handler + except Exception as e: + raise ValueError('Unable to configure handler ' + '%r' % name) from e + + # Next, do loggers - they refer to handlers and filters + + #we don't want to lose the existing loggers, + #since other threads may have pointers to them. + #existing is set to contain all existing loggers, + #and as we go through the new configuration we + #remove any which are configured. At the end, + #what's left in existing is the set of loggers + #which were in the previous configuration but + #which are not in the new configuration. + root = logging.root + existing = list(root.manager.loggerDict.keys()) + #The list needs to be sorted so that we can + #avoid disabling child loggers of explicitly + #named loggers. With a sorted list it is easier + #to find the child loggers. + existing.sort() + #We'll keep the list of existing loggers + #which are children of named loggers here... + child_loggers = [] + #now set up the new ones... + loggers = config.get('loggers', EMPTY_DICT) + for name in loggers: + if name in existing: + i = existing.index(name) + 1 # look after name + prefixed = name + "." + pflen = len(prefixed) + num_existing = len(existing) + while i < num_existing: + if existing[i][:pflen] == prefixed: + child_loggers.append(existing[i]) + i += 1 + existing.remove(name) + try: + self.configure_logger(name, loggers[name]) + except Exception as e: + raise ValueError('Unable to configure logger ' + '%r' % name) from e + + #Disable any old loggers. There's no point deleting + #them as other threads may continue to hold references + #and by disabling them, you stop them doing any logging. + #However, don't disable children of named loggers, as that's + #probably not what was intended by the user. + #for log in existing: + # logger = root.manager.loggerDict[log] + # if log in child_loggers: + # logger.level = logging.NOTSET + # logger.handlers = [] + # logger.propagate = True + # elif disable_existing: + # logger.disabled = True + _handle_existing_loggers(existing, child_loggers, + disable_existing) + + # And finally, do the root logger + root = config.get('root', None) + if root: + try: + self.configure_root(root) + except Exception as e: + raise ValueError('Unable to configure root ' + 'logger') from e + finally: + logging._releaseLock() + + def configure_formatter(self, config): + """Configure a formatter from a dictionary.""" + if '()' in config: + factory = config['()'] # for use in exception handler + try: + result = self.configure_custom(config) + except TypeError as te: + if "'format'" not in str(te): + raise + #Name of parameter changed from fmt to format. + #Retry with old name. + #This is so that code can be used with older Python versions + #(e.g. by Django) + config['fmt'] = config.pop('format') + config['()'] = factory + result = self.configure_custom(config) + else: + fmt = config.get('format', None) + dfmt = config.get('datefmt', None) + style = config.get('style', '%') + cname = config.get('class', None) + + if not cname: + c = logging.Formatter + else: + c = _resolve(cname) + + # A TypeError would be raised if "validate" key is passed in with a formatter callable + # that does not accept "validate" as a parameter + if 'validate' in config: # if user hasn't mentioned it, the default will be fine + result = c(fmt, dfmt, style, config['validate']) + else: + result = c(fmt, dfmt, style) + + return result + + def configure_filter(self, config): + """Configure a filter from a dictionary.""" + if '()' in config: + result = self.configure_custom(config) + else: + name = config.get('name', '') + result = logging.Filter(name) + return result + + def add_filters(self, filterer, filters): + """Add filters to a filterer from a list of names.""" + for f in filters: + try: + filterer.addFilter(self.config['filters'][f]) + except Exception as e: + raise ValueError('Unable to add filter %r' % f) from e + + def configure_handler(self, config): + """Configure a handler from a dictionary.""" + config_copy = dict(config) # for restoring in case of error + formatter = config.pop('formatter', None) + if formatter: + try: + formatter = self.config['formatters'][formatter] + except Exception as e: + raise ValueError('Unable to set formatter ' + '%r' % formatter) from e + level = config.pop('level', None) + filters = config.pop('filters', None) + if '()' in config: + c = config.pop('()') + if not callable(c): + c = self.resolve(c) + factory = c + else: + cname = config.pop('class') + klass = self.resolve(cname) + #Special case for handler which refers to another handler + if issubclass(klass, logging.handlers.MemoryHandler) and\ + 'target' in config: + try: + th = self.config['handlers'][config['target']] + if not isinstance(th, logging.Handler): + config.update(config_copy) # restore for deferred cfg + raise TypeError('target not configured yet') + config['target'] = th + except Exception as e: + raise ValueError('Unable to set target handler ' + '%r' % config['target']) from e + elif issubclass(klass, logging.handlers.SMTPHandler) and\ + 'mailhost' in config: + config['mailhost'] = self.as_tuple(config['mailhost']) + elif issubclass(klass, logging.handlers.SysLogHandler) and\ + 'address' in config: + config['address'] = self.as_tuple(config['address']) + factory = klass + props = config.pop('.', None) + kwargs = {k: config[k] for k in config if valid_ident(k)} + try: + result = factory(**kwargs) + except TypeError as te: + if "'stream'" not in str(te): + raise + #The argument name changed from strm to stream + #Retry with old name. + #This is so that code can be used with older Python versions + #(e.g. by Django) + kwargs['strm'] = kwargs.pop('stream') + result = factory(**kwargs) + if formatter: + result.setFormatter(formatter) + if level is not None: + result.setLevel(logging._checkLevel(level)) + if filters: + self.add_filters(result, filters) + if props: + for name, value in props.items(): + setattr(result, name, value) + return result + + def add_handlers(self, logger, handlers): + """Add handlers to a logger from a list of names.""" + for h in handlers: + try: + logger.addHandler(self.config['handlers'][h]) + except Exception as e: + raise ValueError('Unable to add handler %r' % h) from e + + def common_logger_config(self, logger, config, incremental=False): + """ + Perform configuration which is common to root and non-root loggers. + """ + level = config.get('level', None) + if level is not None: + logger.setLevel(logging._checkLevel(level)) + if not incremental: + #Remove any existing handlers + for h in logger.handlers[:]: + logger.removeHandler(h) + handlers = config.get('handlers', None) + if handlers: + self.add_handlers(logger, handlers) + filters = config.get('filters', None) + if filters: + self.add_filters(logger, filters) + + def configure_logger(self, name, config, incremental=False): + """Configure a non-root logger from a dictionary.""" + logger = logging.getLogger(name) + self.common_logger_config(logger, config, incremental) + logger.disabled = False + propagate = config.get('propagate', None) + if propagate is not None: + logger.propagate = propagate + + def configure_root(self, config, incremental=False): + """Configure a root logger from a dictionary.""" + root = logging.getLogger() + self.common_logger_config(root, config, incremental) + +dictConfigClass = DictConfigurator + +def dictConfig(config): + """Configure logging using a dictionary.""" + dictConfigClass(config).configure() + + +def listen(port=DEFAULT_LOGGING_CONFIG_PORT, verify=None): + """ + Start up a socket server on the specified port, and listen for new + configurations. + + These will be sent as a file suitable for processing by fileConfig(). + Returns a Thread object on which you can call start() to start the server, + and which you can join() when appropriate. To stop the server, call + stopListening(). + + Use the ``verify`` argument to verify any bytes received across the wire + from a client. If specified, it should be a callable which receives a + single argument - the bytes of configuration data received across the + network - and it should return either ``None``, to indicate that the + passed in bytes could not be verified and should be discarded, or a + byte string which is then passed to the configuration machinery as + normal. Note that you can return transformed bytes, e.g. by decrypting + the bytes passed in. + """ + + class ConfigStreamHandler(StreamRequestHandler): + """ + Handler for a logging configuration request. + + It expects a completely new logging configuration and uses fileConfig + to install it. + """ + def handle(self): + """ + Handle a request. + + Each request is expected to be a 4-byte length, packed using + struct.pack(">L", n), followed by the config file. + Uses fileConfig() to do the grunt work. + """ + try: + conn = self.connection + chunk = conn.recv(4) + if len(chunk) == 4: + slen = struct.unpack(">L", chunk)[0] + chunk = self.connection.recv(slen) + while len(chunk) < slen: + chunk = chunk + conn.recv(slen - len(chunk)) + if self.server.verify is not None: + chunk = self.server.verify(chunk) + if chunk is not None: # verified, can process + chunk = chunk.decode("utf-8") + try: + import json + d =json.loads(chunk) + assert isinstance(d, dict) + dictConfig(d) + except Exception: + #Apply new configuration. + + file = io.StringIO(chunk) + try: + fileConfig(file) + except Exception: + traceback.print_exc() + if self.server.ready: + self.server.ready.set() + except OSError as e: + if e.errno != RESET_ERROR: + raise + + class ConfigSocketReceiver(ThreadingTCPServer): + """ + A simple TCP socket-based logging config receiver. + """ + + allow_reuse_address = 1 + + def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, + handler=None, ready=None, verify=None): + ThreadingTCPServer.__init__(self, (host, port), handler) + logging._acquireLock() + self.abort = 0 + logging._releaseLock() + self.timeout = 1 + self.ready = ready + self.verify = verify + + def serve_until_stopped(self): + import select + abort = 0 + while not abort: + rd, wr, ex = select.select([self.socket.fileno()], + [], [], + self.timeout) + if rd: + self.handle_request() + logging._acquireLock() + abort = self.abort + logging._releaseLock() + self.server_close() + + class Server(threading.Thread): + + def __init__(self, rcvr, hdlr, port, verify): + super(Server, self).__init__() + self.rcvr = rcvr + self.hdlr = hdlr + self.port = port + self.verify = verify + self.ready = threading.Event() + + def run(self): + server = self.rcvr(port=self.port, handler=self.hdlr, + ready=self.ready, + verify=self.verify) + if self.port == 0: + self.port = server.server_address[1] + self.ready.set() + global _listener + logging._acquireLock() + _listener = server + logging._releaseLock() + server.serve_until_stopped() + + return Server(ConfigSocketReceiver, ConfigStreamHandler, port, verify) + +def stopListening(): + """ + Stop the listening server which was created with a call to listen(). + """ + global _listener + logging._acquireLock() + try: + if _listener: + _listener.abort = 1 + _listener = None + finally: + logging._releaseLock() diff --git a/parrot/lib/python3.10/logging/handlers.py b/parrot/lib/python3.10/logging/handlers.py new file mode 100644 index 0000000000000000000000000000000000000000..f0fdedae5ebb5cf5061ad7f9ca57a87c1dfd9a9f --- /dev/null +++ b/parrot/lib/python3.10/logging/handlers.py @@ -0,0 +1,1587 @@ +# Copyright 2001-2021 by Vinay Sajip. All Rights Reserved. +# +# Permission to use, copy, modify, and distribute this software and its +# documentation for any purpose and without fee is hereby granted, +# provided that the above copyright notice appear in all copies and that +# both that copyright notice and this permission notice appear in +# supporting documentation, and that the name of Vinay Sajip +# not be used in advertising or publicity pertaining to distribution +# of the software without specific, written prior permission. +# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING +# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL +# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR +# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER +# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT +# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +""" +Additional handlers for the logging package for Python. The core package is +based on PEP 282 and comments thereto in comp.lang.python. + +Copyright (C) 2001-2021 Vinay Sajip. All Rights Reserved. + +To use, simply 'import logging.handlers' and log away! +""" + +import io, logging, socket, os, pickle, struct, time, re +from stat import ST_DEV, ST_INO, ST_MTIME +import queue +import threading +import copy + +# +# Some constants... +# + +DEFAULT_TCP_LOGGING_PORT = 9020 +DEFAULT_UDP_LOGGING_PORT = 9021 +DEFAULT_HTTP_LOGGING_PORT = 9022 +DEFAULT_SOAP_LOGGING_PORT = 9023 +SYSLOG_UDP_PORT = 514 +SYSLOG_TCP_PORT = 514 + +_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day + +class BaseRotatingHandler(logging.FileHandler): + """ + Base class for handlers that rotate log files at a certain point. + Not meant to be instantiated directly. Instead, use RotatingFileHandler + or TimedRotatingFileHandler. + """ + namer = None + rotator = None + + def __init__(self, filename, mode, encoding=None, delay=False, errors=None): + """ + Use the specified filename for streamed logging + """ + logging.FileHandler.__init__(self, filename, mode=mode, + encoding=encoding, delay=delay, + errors=errors) + self.mode = mode + self.encoding = encoding + self.errors = errors + + def emit(self, record): + """ + Emit a record. + + Output the record to the file, catering for rollover as described + in doRollover(). + """ + try: + if self.shouldRollover(record): + self.doRollover() + logging.FileHandler.emit(self, record) + except Exception: + self.handleError(record) + + def rotation_filename(self, default_name): + """ + Modify the filename of a log file when rotating. + + This is provided so that a custom filename can be provided. + + The default implementation calls the 'namer' attribute of the + handler, if it's callable, passing the default name to + it. If the attribute isn't callable (the default is None), the name + is returned unchanged. + + :param default_name: The default name for the log file. + """ + if not callable(self.namer): + result = default_name + else: + result = self.namer(default_name) + return result + + def rotate(self, source, dest): + """ + When rotating, rotate the current log. + + The default implementation calls the 'rotator' attribute of the + handler, if it's callable, passing the source and dest arguments to + it. If the attribute isn't callable (the default is None), the source + is simply renamed to the destination. + + :param source: The source filename. This is normally the base + filename, e.g. 'test.log' + :param dest: The destination filename. This is normally + what the source is rotated to, e.g. 'test.log.1'. + """ + if not callable(self.rotator): + # Issue 18940: A file may not have been created if delay is True. + if os.path.exists(source): + os.rename(source, dest) + else: + self.rotator(source, dest) + +class RotatingFileHandler(BaseRotatingHandler): + """ + Handler for logging to a set of files, which switches from one file + to the next when the current file reaches a certain size. + """ + def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, + encoding=None, delay=False, errors=None): + """ + Open the specified file and use it as the stream for logging. + + By default, the file grows indefinitely. You can specify particular + values of maxBytes and backupCount to allow the file to rollover at + a predetermined size. + + Rollover occurs whenever the current log file is nearly maxBytes in + length. If backupCount is >= 1, the system will successively create + new files with the same pathname as the base file, but with extensions + ".1", ".2" etc. appended to it. For example, with a backupCount of 5 + and a base file name of "app.log", you would get "app.log", + "app.log.1", "app.log.2", ... through to "app.log.5". The file being + written to is always "app.log" - when it gets filled up, it is closed + and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc. + exist, then they are renamed to "app.log.2", "app.log.3" etc. + respectively. + + If maxBytes is zero, rollover never occurs. + """ + # If rotation/rollover is wanted, it doesn't make sense to use another + # mode. If for example 'w' were specified, then if there were multiple + # runs of the calling application, the logs from previous runs would be + # lost if the 'w' is respected, because the log file would be truncated + # on each run. + if maxBytes > 0: + mode = 'a' + if "b" not in mode: + encoding = io.text_encoding(encoding) + BaseRotatingHandler.__init__(self, filename, mode, encoding=encoding, + delay=delay, errors=errors) + self.maxBytes = maxBytes + self.backupCount = backupCount + + def doRollover(self): + """ + Do a rollover, as described in __init__(). + """ + if self.stream: + self.stream.close() + self.stream = None + if self.backupCount > 0: + for i in range(self.backupCount - 1, 0, -1): + sfn = self.rotation_filename("%s.%d" % (self.baseFilename, i)) + dfn = self.rotation_filename("%s.%d" % (self.baseFilename, + i + 1)) + if os.path.exists(sfn): + if os.path.exists(dfn): + os.remove(dfn) + os.rename(sfn, dfn) + dfn = self.rotation_filename(self.baseFilename + ".1") + if os.path.exists(dfn): + os.remove(dfn) + self.rotate(self.baseFilename, dfn) + if not self.delay: + self.stream = self._open() + + def shouldRollover(self, record): + """ + Determine if rollover should occur. + + Basically, see if the supplied record would cause the file to exceed + the size limit we have. + """ + # See bpo-45401: Never rollover anything other than regular files + if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): + return False + if self.stream is None: # delay was set... + self.stream = self._open() + if self.maxBytes > 0: # are we rolling over? + msg = "%s\n" % self.format(record) + self.stream.seek(0, 2) #due to non-posix-compliant Windows feature + if self.stream.tell() + len(msg) >= self.maxBytes: + return True + return False + +class TimedRotatingFileHandler(BaseRotatingHandler): + """ + Handler for logging to a file, rotating the log file at certain timed + intervals. + + If backupCount is > 0, when rollover is done, no more than backupCount + files are kept - the oldest ones are deleted. + """ + def __init__(self, filename, when='h', interval=1, backupCount=0, + encoding=None, delay=False, utc=False, atTime=None, + errors=None): + encoding = io.text_encoding(encoding) + BaseRotatingHandler.__init__(self, filename, 'a', encoding=encoding, + delay=delay, errors=errors) + self.when = when.upper() + self.backupCount = backupCount + self.utc = utc + self.atTime = atTime + # Calculate the real rollover interval, which is just the number of + # seconds between rollovers. Also set the filename suffix used when + # a rollover occurs. Current 'when' events supported: + # S - Seconds + # M - Minutes + # H - Hours + # D - Days + # midnight - roll over at midnight + # W{0-6} - roll over on a certain day; 0 - Monday + # + # Case of the 'when' specifier is not important; lower or upper case + # will work. + if self.when == 'S': + self.interval = 1 # one second + self.suffix = "%Y-%m-%d_%H-%M-%S" + self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}(\.\w+)?$" + elif self.when == 'M': + self.interval = 60 # one minute + self.suffix = "%Y-%m-%d_%H-%M" + self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}(\.\w+)?$" + elif self.when == 'H': + self.interval = 60 * 60 # one hour + self.suffix = "%Y-%m-%d_%H" + self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}(\.\w+)?$" + elif self.when == 'D' or self.when == 'MIDNIGHT': + self.interval = 60 * 60 * 24 # one day + self.suffix = "%Y-%m-%d" + self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" + elif self.when.startswith('W'): + self.interval = 60 * 60 * 24 * 7 # one week + if len(self.when) != 2: + raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when) + if self.when[1] < '0' or self.when[1] > '6': + raise ValueError("Invalid day specified for weekly rollover: %s" % self.when) + self.dayOfWeek = int(self.when[1]) + self.suffix = "%Y-%m-%d" + self.extMatch = r"^\d{4}-\d{2}-\d{2}(\.\w+)?$" + else: + raise ValueError("Invalid rollover interval specified: %s" % self.when) + + self.extMatch = re.compile(self.extMatch, re.ASCII) + self.interval = self.interval * interval # multiply by units requested + # The following line added because the filename passed in could be a + # path object (see Issue #27493), but self.baseFilename will be a string + filename = self.baseFilename + if os.path.exists(filename): + t = os.stat(filename)[ST_MTIME] + else: + t = int(time.time()) + self.rolloverAt = self.computeRollover(t) + + def computeRollover(self, currentTime): + """ + Work out the rollover time based on the specified time. + """ + result = currentTime + self.interval + # If we are rolling over at midnight or weekly, then the interval is already known. + # What we need to figure out is WHEN the next interval is. In other words, + # if you are rolling over at midnight, then your base interval is 1 day, + # but you want to start that one day clock at midnight, not now. So, we + # have to fudge the rolloverAt value in order to trigger the first rollover + # at the right time. After that, the regular interval will take care of + # the rest. Note that this code doesn't care about leap seconds. :) + if self.when == 'MIDNIGHT' or self.when.startswith('W'): + # This could be done with less code, but I wanted it to be clear + if self.utc: + t = time.gmtime(currentTime) + else: + t = time.localtime(currentTime) + currentHour = t[3] + currentMinute = t[4] + currentSecond = t[5] + currentDay = t[6] + # r is the number of seconds left between now and the next rotation + if self.atTime is None: + rotate_ts = _MIDNIGHT + else: + rotate_ts = ((self.atTime.hour * 60 + self.atTime.minute)*60 + + self.atTime.second) + + r = rotate_ts - ((currentHour * 60 + currentMinute) * 60 + + currentSecond) + if r < 0: + # Rotate time is before the current time (for example when + # self.rotateAt is 13:45 and it now 14:15), rotation is + # tomorrow. + r += _MIDNIGHT + currentDay = (currentDay + 1) % 7 + result = currentTime + r + # If we are rolling over on a certain day, add in the number of days until + # the next rollover, but offset by 1 since we just calculated the time + # until the next day starts. There are three cases: + # Case 1) The day to rollover is today; in this case, do nothing + # Case 2) The day to rollover is further in the interval (i.e., today is + # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to + # next rollover is simply 6 - 2 - 1, or 3. + # Case 3) The day to rollover is behind us in the interval (i.e., today + # is day 5 (Saturday) and rollover is on day 3 (Thursday). + # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the + # number of days left in the current week (1) plus the number + # of days in the next week until the rollover day (3). + # The calculations described in 2) and 3) above need to have a day added. + # This is because the above time calculation takes us to midnight on this + # day, i.e. the start of the next day. + if self.when.startswith('W'): + day = currentDay # 0 is Monday + if day != self.dayOfWeek: + if day < self.dayOfWeek: + daysToWait = self.dayOfWeek - day + else: + daysToWait = 6 - day + self.dayOfWeek + 1 + newRolloverAt = result + (daysToWait * (60 * 60 * 24)) + if not self.utc: + dstNow = t[-1] + dstAtRollover = time.localtime(newRolloverAt)[-1] + if dstNow != dstAtRollover: + if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour + addend = -3600 + else: # DST bows out before next rollover, so we need to add an hour + addend = 3600 + newRolloverAt += addend + result = newRolloverAt + return result + + def shouldRollover(self, record): + """ + Determine if rollover should occur. + + record is not used, as we are just comparing times, but it is needed so + the method signatures are the same + """ + t = int(time.time()) + if t >= self.rolloverAt: + # See #89564: Never rollover anything other than regular files + if os.path.exists(self.baseFilename) and not os.path.isfile(self.baseFilename): + # The file is not a regular file, so do not rollover, but do + # set the next rollover time to avoid repeated checks. + self.rolloverAt = self.computeRollover(t) + return False + + return True + return False + + def getFilesToDelete(self): + """ + Determine the files to delete when rolling over. + + More specific than the earlier method, which just used glob.glob(). + """ + dirName, baseName = os.path.split(self.baseFilename) + fileNames = os.listdir(dirName) + result = [] + # See bpo-44753: Don't use the extension when computing the prefix. + n, e = os.path.splitext(baseName) + prefix = n + '.' + plen = len(prefix) + for fileName in fileNames: + if self.namer is None: + # Our files will always start with baseName + if not fileName.startswith(baseName): + continue + else: + # Our files could be just about anything after custom naming, but + # likely candidates are of the form + # foo.log.DATETIME_SUFFIX or foo.DATETIME_SUFFIX.log + if (not fileName.startswith(baseName) and fileName.endswith(e) and + len(fileName) > (plen + 1) and not fileName[plen+1].isdigit()): + continue + + if fileName[:plen] == prefix: + suffix = fileName[plen:] + # See bpo-45628: The date/time suffix could be anywhere in the + # filename + parts = suffix.split('.') + for part in parts: + if self.extMatch.match(part): + result.append(os.path.join(dirName, fileName)) + break + if len(result) < self.backupCount: + result = [] + else: + result.sort() + result = result[:len(result) - self.backupCount] + return result + + def doRollover(self): + """ + do a rollover; in this case, a date/time stamp is appended to the filename + when the rollover happens. However, you want the file to be named for the + start of the interval, not the current time. If there is a backup count, + then we have to get a list of matching filenames, sort them and remove + the one with the oldest suffix. + """ + if self.stream: + self.stream.close() + self.stream = None + # get the time that this sequence started at and make it a TimeTuple + currentTime = int(time.time()) + dstNow = time.localtime(currentTime)[-1] + t = self.rolloverAt - self.interval + if self.utc: + timeTuple = time.gmtime(t) + else: + timeTuple = time.localtime(t) + dstThen = timeTuple[-1] + if dstNow != dstThen: + if dstNow: + addend = 3600 + else: + addend = -3600 + timeTuple = time.localtime(t + addend) + dfn = self.rotation_filename(self.baseFilename + "." + + time.strftime(self.suffix, timeTuple)) + if os.path.exists(dfn): + os.remove(dfn) + self.rotate(self.baseFilename, dfn) + if self.backupCount > 0: + for s in self.getFilesToDelete(): + os.remove(s) + if not self.delay: + self.stream = self._open() + newRolloverAt = self.computeRollover(currentTime) + while newRolloverAt <= currentTime: + newRolloverAt = newRolloverAt + self.interval + #If DST changes and midnight or weekly rollover, adjust for this. + if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: + dstAtRollover = time.localtime(newRolloverAt)[-1] + if dstNow != dstAtRollover: + if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour + addend = -3600 + else: # DST bows out before next rollover, so we need to add an hour + addend = 3600 + newRolloverAt += addend + self.rolloverAt = newRolloverAt + +class WatchedFileHandler(logging.FileHandler): + """ + A handler for logging to a file, which watches the file + to see if it has changed while in use. This can happen because of + usage of programs such as newsyslog and logrotate which perform + log file rotation. This handler, intended for use under Unix, + watches the file to see if it has changed since the last emit. + (A file has changed if its device or inode have changed.) + If it has changed, the old file stream is closed, and the file + opened to get a new stream. + + This handler is not appropriate for use under Windows, because + under Windows open files cannot be moved or renamed - logging + opens the files with exclusive locks - and so there is no need + for such a handler. Furthermore, ST_INO is not supported under + Windows; stat always returns zero for this value. + + This handler is based on a suggestion and patch by Chad J. + Schroeder. + """ + def __init__(self, filename, mode='a', encoding=None, delay=False, + errors=None): + if "b" not in mode: + encoding = io.text_encoding(encoding) + logging.FileHandler.__init__(self, filename, mode=mode, + encoding=encoding, delay=delay, + errors=errors) + self.dev, self.ino = -1, -1 + self._statstream() + + def _statstream(self): + if self.stream: + sres = os.fstat(self.stream.fileno()) + self.dev, self.ino = sres[ST_DEV], sres[ST_INO] + + def reopenIfNeeded(self): + """ + Reopen log file if needed. + + Checks if the underlying file has changed, and if it + has, close the old stream and reopen the file to get the + current stream. + """ + # Reduce the chance of race conditions by stat'ing by path only + # once and then fstat'ing our new fd if we opened a new log stream. + # See issue #14632: Thanks to John Mulligan for the problem report + # and patch. + try: + # stat the file by path, checking for existence + sres = os.stat(self.baseFilename) + except FileNotFoundError: + sres = None + # compare file system stat with that of our stream file handle + if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino: + if self.stream is not None: + # we have an open file handle, clean it up + self.stream.flush() + self.stream.close() + self.stream = None # See Issue #21742: _open () might fail. + # open a new file handle and get new stat info from that fd + self.stream = self._open() + self._statstream() + + def emit(self, record): + """ + Emit a record. + + If underlying file has changed, reopen the file before emitting the + record to it. + """ + self.reopenIfNeeded() + logging.FileHandler.emit(self, record) + + +class SocketHandler(logging.Handler): + """ + A handler class which writes logging records, in pickle format, to + a streaming socket. The socket is kept open across logging calls. + If the peer resets it, an attempt is made to reconnect on the next call. + The pickle which is sent is that of the LogRecord's attribute dictionary + (__dict__), so that the receiver does not need to have the logging module + installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + """ + + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + + When the attribute *closeOnError* is set to True - if a socket error + occurs, the socket is silently closed and then reopened on the next + logging call. + """ + logging.Handler.__init__(self) + self.host = host + self.port = port + if port is None: + self.address = host + else: + self.address = (host, port) + self.sock = None + self.closeOnError = False + self.retryTime = None + # + # Exponential backoff parameters. + # + self.retryStart = 1.0 + self.retryMax = 30.0 + self.retryFactor = 2.0 + + def makeSocket(self, timeout=1): + """ + A factory method which allows subclasses to define the precise + type of socket they want. + """ + if self.port is not None: + result = socket.create_connection(self.address, timeout=timeout) + else: + result = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + result.settimeout(timeout) + try: + result.connect(self.address) + except OSError: + result.close() # Issue 19182 + raise + return result + + def createSocket(self): + """ + Try to create a socket, using an exponential backoff with + a max retry time. Thanks to Robert Olson for the original patch + (SF #815911) which has been slightly refactored. + """ + now = time.time() + # Either retryTime is None, in which case this + # is the first time back after a disconnect, or + # we've waited long enough. + if self.retryTime is None: + attempt = True + else: + attempt = (now >= self.retryTime) + if attempt: + try: + self.sock = self.makeSocket() + self.retryTime = None # next time, no delay before trying + except OSError: + #Creation failed, so set the retry time and return. + if self.retryTime is None: + self.retryPeriod = self.retryStart + else: + self.retryPeriod = self.retryPeriod * self.retryFactor + if self.retryPeriod > self.retryMax: + self.retryPeriod = self.retryMax + self.retryTime = now + self.retryPeriod + + def send(self, s): + """ + Send a pickled string to the socket. + + This function allows for partial sends which can happen when the + network is busy. + """ + if self.sock is None: + self.createSocket() + #self.sock can be None either because we haven't reached the retry + #time yet, or because we have reached the retry time and retried, + #but are still unable to connect. + if self.sock: + try: + self.sock.sendall(s) + except OSError: #pragma: no cover + self.sock.close() + self.sock = None # so we can call createSocket next time + + def makePickle(self, record): + """ + Pickles the record in binary format with a length prefix, and + returns it ready for transmission across the socket. + """ + ei = record.exc_info + if ei: + # just to get traceback text into record.exc_text ... + dummy = self.format(record) + # See issue #14436: If msg or args are objects, they may not be + # available on the receiving end. So we convert the msg % args + # to a string, save it as msg and zap the args. + d = dict(record.__dict__) + d['msg'] = record.getMessage() + d['args'] = None + d['exc_info'] = None + # Issue #25685: delete 'message' if present: redundant with 'msg' + d.pop('message', None) + s = pickle.dumps(d, 1) + slen = struct.pack(">L", len(s)) + return slen + s + + def handleError(self, record): + """ + Handle an error during logging. + + An error has occurred during logging. Most likely cause - + connection lost. Close the socket so that we can retry on the + next event. + """ + if self.closeOnError and self.sock: + self.sock.close() + self.sock = None #try to reconnect next time + else: + logging.Handler.handleError(self, record) + + def emit(self, record): + """ + Emit a record. + + Pickles the record and writes it to the socket in binary format. + If there is an error with the socket, silently drop the packet. + If there was a problem with the socket, re-establishes the + socket. + """ + try: + s = self.makePickle(record) + self.send(s) + except Exception: + self.handleError(record) + + def close(self): + """ + Closes the socket. + """ + self.acquire() + try: + sock = self.sock + if sock: + self.sock = None + sock.close() + logging.Handler.close(self) + finally: + self.release() + +class DatagramHandler(SocketHandler): + """ + A handler class which writes logging records, in pickle format, to + a datagram socket. The pickle which is sent is that of the LogRecord's + attribute dictionary (__dict__), so that the receiver does not need to + have the logging module installed in order to process the logging event. + + To unpickle the record at the receiving end into a LogRecord, use the + makeLogRecord function. + + """ + def __init__(self, host, port): + """ + Initializes the handler with a specific host address and port. + """ + SocketHandler.__init__(self, host, port) + self.closeOnError = False + + def makeSocket(self): + """ + The factory method of SocketHandler is here overridden to create + a UDP socket (SOCK_DGRAM). + """ + if self.port is None: + family = socket.AF_UNIX + else: + family = socket.AF_INET + s = socket.socket(family, socket.SOCK_DGRAM) + return s + + def send(self, s): + """ + Send a pickled string to a socket. + + This function no longer allows for partial sends which can happen + when the network is busy - UDP does not guarantee delivery and + can deliver packets out of sequence. + """ + if self.sock is None: + self.createSocket() + self.sock.sendto(s, self.address) + +class SysLogHandler(logging.Handler): + """ + A handler class which sends formatted logging records to a syslog + server. Based on Sam Rushing's syslog module: + http://www.nightmare.com/squirl/python-ext/misc/syslog.py + Contributed by Nicolas Untz (after which minor refactoring changes + have been made). + """ + + # from : + # ====================================================================== + # priorities/facilities are encoded into a single 32-bit quantity, where + # the bottom 3 bits are the priority (0-7) and the top 28 bits are the + # facility (0-big number). Both the priorities and the facilities map + # roughly one-to-one to strings in the syslogd(8) source code. This + # mapping is included in this file. + # + # priorities (these are ordered) + + LOG_EMERG = 0 # system is unusable + LOG_ALERT = 1 # action must be taken immediately + LOG_CRIT = 2 # critical conditions + LOG_ERR = 3 # error conditions + LOG_WARNING = 4 # warning conditions + LOG_NOTICE = 5 # normal but significant condition + LOG_INFO = 6 # informational + LOG_DEBUG = 7 # debug-level messages + + # facility codes + LOG_KERN = 0 # kernel messages + LOG_USER = 1 # random user-level messages + LOG_MAIL = 2 # mail system + LOG_DAEMON = 3 # system daemons + LOG_AUTH = 4 # security/authorization messages + LOG_SYSLOG = 5 # messages generated internally by syslogd + LOG_LPR = 6 # line printer subsystem + LOG_NEWS = 7 # network news subsystem + LOG_UUCP = 8 # UUCP subsystem + LOG_CRON = 9 # clock daemon + LOG_AUTHPRIV = 10 # security/authorization messages (private) + LOG_FTP = 11 # FTP daemon + LOG_NTP = 12 # NTP subsystem + LOG_SECURITY = 13 # Log audit + LOG_CONSOLE = 14 # Log alert + LOG_SOLCRON = 15 # Scheduling daemon (Solaris) + + # other codes through 15 reserved for system use + LOG_LOCAL0 = 16 # reserved for local use + LOG_LOCAL1 = 17 # reserved for local use + LOG_LOCAL2 = 18 # reserved for local use + LOG_LOCAL3 = 19 # reserved for local use + LOG_LOCAL4 = 20 # reserved for local use + LOG_LOCAL5 = 21 # reserved for local use + LOG_LOCAL6 = 22 # reserved for local use + LOG_LOCAL7 = 23 # reserved for local use + + priority_names = { + "alert": LOG_ALERT, + "crit": LOG_CRIT, + "critical": LOG_CRIT, + "debug": LOG_DEBUG, + "emerg": LOG_EMERG, + "err": LOG_ERR, + "error": LOG_ERR, # DEPRECATED + "info": LOG_INFO, + "notice": LOG_NOTICE, + "panic": LOG_EMERG, # DEPRECATED + "warn": LOG_WARNING, # DEPRECATED + "warning": LOG_WARNING, + } + + facility_names = { + "auth": LOG_AUTH, + "authpriv": LOG_AUTHPRIV, + "console": LOG_CONSOLE, + "cron": LOG_CRON, + "daemon": LOG_DAEMON, + "ftp": LOG_FTP, + "kern": LOG_KERN, + "lpr": LOG_LPR, + "mail": LOG_MAIL, + "news": LOG_NEWS, + "ntp": LOG_NTP, + "security": LOG_SECURITY, + "solaris-cron": LOG_SOLCRON, + "syslog": LOG_SYSLOG, + "user": LOG_USER, + "uucp": LOG_UUCP, + "local0": LOG_LOCAL0, + "local1": LOG_LOCAL1, + "local2": LOG_LOCAL2, + "local3": LOG_LOCAL3, + "local4": LOG_LOCAL4, + "local5": LOG_LOCAL5, + "local6": LOG_LOCAL6, + "local7": LOG_LOCAL7, + } + + #The map below appears to be trivially lowercasing the key. However, + #there's more to it than meets the eye - in some locales, lowercasing + #gives unexpected results. See SF #1524081: in the Turkish locale, + #"INFO".lower() != "info" + priority_map = { + "DEBUG" : "debug", + "INFO" : "info", + "WARNING" : "warning", + "ERROR" : "error", + "CRITICAL" : "critical" + } + + def __init__(self, address=('localhost', SYSLOG_UDP_PORT), + facility=LOG_USER, socktype=None): + """ + Initialize a handler. + + If address is specified as a string, a UNIX socket is used. To log to a + local syslogd, "SysLogHandler(address="/dev/log")" can be used. + If facility is not specified, LOG_USER is used. If socktype is + specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific + socket type will be used. For Unix sockets, you can also specify a + socktype of None, in which case socket.SOCK_DGRAM will be used, falling + back to socket.SOCK_STREAM. + """ + logging.Handler.__init__(self) + + self.address = address + self.facility = facility + self.socktype = socktype + + if isinstance(address, str): + self.unixsocket = True + # Syslog server may be unavailable during handler initialisation. + # C's openlog() function also ignores connection errors. + # Moreover, we ignore these errors while logging, so it not worse + # to ignore it also here. + try: + self._connect_unixsocket(address) + except OSError: + pass + else: + self.unixsocket = False + if socktype is None: + socktype = socket.SOCK_DGRAM + host, port = address + ress = socket.getaddrinfo(host, port, 0, socktype) + if not ress: + raise OSError("getaddrinfo returns an empty list") + for res in ress: + af, socktype, proto, _, sa = res + err = sock = None + try: + sock = socket.socket(af, socktype, proto) + if socktype == socket.SOCK_STREAM: + sock.connect(sa) + break + except OSError as exc: + err = exc + if sock is not None: + sock.close() + if err is not None: + raise err + self.socket = sock + self.socktype = socktype + + def _connect_unixsocket(self, address): + use_socktype = self.socktype + if use_socktype is None: + use_socktype = socket.SOCK_DGRAM + self.socket = socket.socket(socket.AF_UNIX, use_socktype) + try: + self.socket.connect(address) + # it worked, so set self.socktype to the used type + self.socktype = use_socktype + except OSError: + self.socket.close() + if self.socktype is not None: + # user didn't specify falling back, so fail + raise + use_socktype = socket.SOCK_STREAM + self.socket = socket.socket(socket.AF_UNIX, use_socktype) + try: + self.socket.connect(address) + # it worked, so set self.socktype to the used type + self.socktype = use_socktype + except OSError: + self.socket.close() + raise + + def encodePriority(self, facility, priority): + """ + Encode the facility and priority. You can pass in strings or + integers - if strings are passed, the facility_names and + priority_names mapping dictionaries are used to convert them to + integers. + """ + if isinstance(facility, str): + facility = self.facility_names[facility] + if isinstance(priority, str): + priority = self.priority_names[priority] + return (facility << 3) | priority + + def close(self): + """ + Closes the socket. + """ + self.acquire() + try: + self.socket.close() + logging.Handler.close(self) + finally: + self.release() + + def mapPriority(self, levelName): + """ + Map a logging level name to a key in the priority_names map. + This is useful in two scenarios: when custom levels are being + used, and in the case where you can't do a straightforward + mapping by lowercasing the logging level name because of locale- + specific issues (see SF #1524081). + """ + return self.priority_map.get(levelName, "warning") + + ident = '' # prepended to all messages + append_nul = True # some old syslog daemons expect a NUL terminator + + def emit(self, record): + """ + Emit a record. + + The record is formatted, and then sent to the syslog server. If + exception information is present, it is NOT sent to the server. + """ + try: + msg = self.format(record) + if self.ident: + msg = self.ident + msg + if self.append_nul: + msg += '\000' + + # We need to convert record level to lowercase, maybe this will + # change in the future. + prio = '<%d>' % self.encodePriority(self.facility, + self.mapPriority(record.levelname)) + prio = prio.encode('utf-8') + # Message is a string. Convert to bytes as required by RFC 5424 + msg = msg.encode('utf-8') + msg = prio + msg + if self.unixsocket: + try: + self.socket.send(msg) + except OSError: + self.socket.close() + self._connect_unixsocket(self.address) + self.socket.send(msg) + elif self.socktype == socket.SOCK_DGRAM: + self.socket.sendto(msg, self.address) + else: + self.socket.sendall(msg) + except Exception: + self.handleError(record) + +class SMTPHandler(logging.Handler): + """ + A handler class which sends an SMTP email for each logging event. + """ + def __init__(self, mailhost, fromaddr, toaddrs, subject, + credentials=None, secure=None, timeout=5.0): + """ + Initialize the handler. + + Initialize the instance with the from and to addresses and subject + line of the email. To specify a non-standard SMTP port, use the + (host, port) tuple format for the mailhost argument. To specify + authentication credentials, supply a (username, password) tuple + for the credentials argument. To specify the use of a secure + protocol (TLS), pass in a tuple for the secure argument. This will + only be used when authentication credentials are supplied. The tuple + will be either an empty tuple, or a single-value tuple with the name + of a keyfile, or a 2-value tuple with the names of the keyfile and + certificate file. (This tuple is passed to the `starttls` method). + A timeout in seconds can be specified for the SMTP connection (the + default is one second). + """ + logging.Handler.__init__(self) + if isinstance(mailhost, (list, tuple)): + self.mailhost, self.mailport = mailhost + else: + self.mailhost, self.mailport = mailhost, None + if isinstance(credentials, (list, tuple)): + self.username, self.password = credentials + else: + self.username = None + self.fromaddr = fromaddr + if isinstance(toaddrs, str): + toaddrs = [toaddrs] + self.toaddrs = toaddrs + self.subject = subject + self.secure = secure + self.timeout = timeout + + def getSubject(self, record): + """ + Determine the subject for the email. + + If you want to specify a subject line which is record-dependent, + override this method. + """ + return self.subject + + def emit(self, record): + """ + Emit a record. + + Format the record and send it to the specified addressees. + """ + try: + import smtplib + from email.message import EmailMessage + import email.utils + + port = self.mailport + if not port: + port = smtplib.SMTP_PORT + smtp = smtplib.SMTP(self.mailhost, port, timeout=self.timeout) + msg = EmailMessage() + msg['From'] = self.fromaddr + msg['To'] = ','.join(self.toaddrs) + msg['Subject'] = self.getSubject(record) + msg['Date'] = email.utils.localtime() + msg.set_content(self.format(record)) + if self.username: + if self.secure is not None: + smtp.ehlo() + smtp.starttls(*self.secure) + smtp.ehlo() + smtp.login(self.username, self.password) + smtp.send_message(msg) + smtp.quit() + except Exception: + self.handleError(record) + +class NTEventLogHandler(logging.Handler): + """ + A handler class which sends events to the NT Event Log. Adds a + registry entry for the specified application name. If no dllname is + provided, win32service.pyd (which contains some basic message + placeholders) is used. Note that use of these placeholders will make + your event logs big, as the entire message source is held in the log. + If you want slimmer logs, you have to pass in the name of your own DLL + which contains the message definitions you want to use in the event log. + """ + def __init__(self, appname, dllname=None, logtype="Application"): + logging.Handler.__init__(self) + try: + import win32evtlogutil, win32evtlog + self.appname = appname + self._welu = win32evtlogutil + if not dllname: + dllname = os.path.split(self._welu.__file__) + dllname = os.path.split(dllname[0]) + dllname = os.path.join(dllname[0], r'win32service.pyd') + self.dllname = dllname + self.logtype = logtype + # Administrative privileges are required to add a source to the registry. + # This may not be available for a user that just wants to add to an + # existing source - handle this specific case. + try: + self._welu.AddSourceToRegistry(appname, dllname, logtype) + except Exception as e: + # This will probably be a pywintypes.error. Only raise if it's not + # an "access denied" error, else let it pass + if getattr(e, 'winerror', None) != 5: # not access denied + raise + self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE + self.typemap = { + logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE, + logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE, + logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE, + logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE, + } + except ImportError: + print("The Python Win32 extensions for NT (service, event "\ + "logging) appear not to be available.") + self._welu = None + + def getMessageID(self, record): + """ + Return the message ID for the event record. If you are using your + own messages, you could do this by having the msg passed to the + logger being an ID rather than a formatting string. Then, in here, + you could use a dictionary lookup to get the message ID. This + version returns 1, which is the base message ID in win32service.pyd. + """ + return 1 + + def getEventCategory(self, record): + """ + Return the event category for the record. + + Override this if you want to specify your own categories. This version + returns 0. + """ + return 0 + + def getEventType(self, record): + """ + Return the event type for the record. + + Override this if you want to specify your own types. This version does + a mapping using the handler's typemap attribute, which is set up in + __init__() to a dictionary which contains mappings for DEBUG, INFO, + WARNING, ERROR and CRITICAL. If you are using your own levels you will + either need to override this method or place a suitable dictionary in + the handler's typemap attribute. + """ + return self.typemap.get(record.levelno, self.deftype) + + def emit(self, record): + """ + Emit a record. + + Determine the message ID, event category and event type. Then + log the message in the NT event log. + """ + if self._welu: + try: + id = self.getMessageID(record) + cat = self.getEventCategory(record) + type = self.getEventType(record) + msg = self.format(record) + self._welu.ReportEvent(self.appname, id, cat, type, [msg]) + except Exception: + self.handleError(record) + + def close(self): + """ + Clean up this handler. + + You can remove the application name from the registry as a + source of event log entries. However, if you do this, you will + not be able to see the events as you intended in the Event Log + Viewer - it needs to be able to access the registry to get the + DLL name. + """ + #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype) + logging.Handler.close(self) + +class HTTPHandler(logging.Handler): + """ + A class which sends records to a web server, using either GET or + POST semantics. + """ + def __init__(self, host, url, method="GET", secure=False, credentials=None, + context=None): + """ + Initialize the instance with the host, the request URL, and the method + ("GET" or "POST") + """ + logging.Handler.__init__(self) + method = method.upper() + if method not in ["GET", "POST"]: + raise ValueError("method must be GET or POST") + if not secure and context is not None: + raise ValueError("context parameter only makes sense " + "with secure=True") + self.host = host + self.url = url + self.method = method + self.secure = secure + self.credentials = credentials + self.context = context + + def mapLogRecord(self, record): + """ + Default implementation of mapping the log record into a dict + that is sent as the CGI data. Overwrite in your class. + Contributed by Franz Glasner. + """ + return record.__dict__ + + def getConnection(self, host, secure): + """ + get a HTTP[S]Connection. + + Override when a custom connection is required, for example if + there is a proxy. + """ + import http.client + if secure: + connection = http.client.HTTPSConnection(host, context=self.context) + else: + connection = http.client.HTTPConnection(host) + return connection + + def emit(self, record): + """ + Emit a record. + + Send the record to the web server as a percent-encoded dictionary + """ + try: + import urllib.parse + host = self.host + h = self.getConnection(host, self.secure) + url = self.url + data = urllib.parse.urlencode(self.mapLogRecord(record)) + if self.method == "GET": + if (url.find('?') >= 0): + sep = '&' + else: + sep = '?' + url = url + "%c%s" % (sep, data) + h.putrequest(self.method, url) + # support multiple hosts on one IP address... + # need to strip optional :port from host, if present + i = host.find(":") + if i >= 0: + host = host[:i] + # See issue #30904: putrequest call above already adds this header + # on Python 3.x. + # h.putheader("Host", host) + if self.method == "POST": + h.putheader("Content-type", + "application/x-www-form-urlencoded") + h.putheader("Content-length", str(len(data))) + if self.credentials: + import base64 + s = ('%s:%s' % self.credentials).encode('utf-8') + s = 'Basic ' + base64.b64encode(s).strip().decode('ascii') + h.putheader('Authorization', s) + h.endheaders() + if self.method == "POST": + h.send(data.encode('utf-8')) + h.getresponse() #can't do anything with the result + except Exception: + self.handleError(record) + +class BufferingHandler(logging.Handler): + """ + A handler class which buffers logging records in memory. Whenever each + record is added to the buffer, a check is made to see if the buffer should + be flushed. If it should, then flush() is expected to do what's needed. + """ + def __init__(self, capacity): + """ + Initialize the handler with the buffer size. + """ + logging.Handler.__init__(self) + self.capacity = capacity + self.buffer = [] + + def shouldFlush(self, record): + """ + Should the handler flush its buffer? + + Returns true if the buffer is up to capacity. This method can be + overridden to implement custom flushing strategies. + """ + return (len(self.buffer) >= self.capacity) + + def emit(self, record): + """ + Emit a record. + + Append the record. If shouldFlush() tells us to, call flush() to process + the buffer. + """ + self.buffer.append(record) + if self.shouldFlush(record): + self.flush() + + def flush(self): + """ + Override to implement custom flushing behaviour. + + This version just zaps the buffer to empty. + """ + self.acquire() + try: + self.buffer.clear() + finally: + self.release() + + def close(self): + """ + Close the handler. + + This version just flushes and chains to the parent class' close(). + """ + try: + self.flush() + finally: + logging.Handler.close(self) + +class MemoryHandler(BufferingHandler): + """ + A handler class which buffers logging records in memory, periodically + flushing them to a target handler. Flushing occurs whenever the buffer + is full, or when an event of a certain severity or greater is seen. + """ + def __init__(self, capacity, flushLevel=logging.ERROR, target=None, + flushOnClose=True): + """ + Initialize the handler with the buffer size, the level at which + flushing should occur and an optional target. + + Note that without a target being set either here or via setTarget(), + a MemoryHandler is no use to anyone! + + The ``flushOnClose`` argument is ``True`` for backward compatibility + reasons - the old behaviour is that when the handler is closed, the + buffer is flushed, even if the flush level hasn't been exceeded nor the + capacity exceeded. To prevent this, set ``flushOnClose`` to ``False``. + """ + BufferingHandler.__init__(self, capacity) + self.flushLevel = flushLevel + self.target = target + # See Issue #26559 for why this has been added + self.flushOnClose = flushOnClose + + def shouldFlush(self, record): + """ + Check for buffer full or a record at the flushLevel or higher. + """ + return (len(self.buffer) >= self.capacity) or \ + (record.levelno >= self.flushLevel) + + def setTarget(self, target): + """ + Set the target handler for this handler. + """ + self.acquire() + try: + self.target = target + finally: + self.release() + + def flush(self): + """ + For a MemoryHandler, flushing means just sending the buffered + records to the target, if there is one. Override if you want + different behaviour. + + The record buffer is also cleared by this operation. + """ + self.acquire() + try: + if self.target: + for record in self.buffer: + self.target.handle(record) + self.buffer.clear() + finally: + self.release() + + def close(self): + """ + Flush, if appropriately configured, set the target to None and lose the + buffer. + """ + try: + if self.flushOnClose: + self.flush() + finally: + self.acquire() + try: + self.target = None + BufferingHandler.close(self) + finally: + self.release() + + +class QueueHandler(logging.Handler): + """ + This handler sends events to a queue. Typically, it would be used together + with a multiprocessing Queue to centralise logging to file in one process + (in a multi-process application), so as to avoid file write contention + between processes. + + This code is new in Python 3.2, but this class can be copy pasted into + user code for use with earlier Python versions. + """ + + def __init__(self, queue): + """ + Initialise an instance, using the passed queue. + """ + logging.Handler.__init__(self) + self.queue = queue + + def enqueue(self, record): + """ + Enqueue a record. + + The base implementation uses put_nowait. You may want to override + this method if you want to use blocking, timeouts or custom queue + implementations. + """ + self.queue.put_nowait(record) + + def prepare(self, record): + """ + Prepares a record for queuing. The object returned by this method is + enqueued. + + The base implementation formats the record to merge the message + and arguments, and removes unpickleable items from the record + in-place. + + You might want to override this method if you want to convert + the record to a dict or JSON string, or send a modified copy + of the record while leaving the original intact. + """ + # The format operation gets traceback text into record.exc_text + # (if there's exception data), and also returns the formatted + # message. We can then use this to replace the original + # msg + args, as these might be unpickleable. We also zap the + # exc_info, exc_text and stack_info attributes, as they are no longer + # needed and, if not None, will typically not be pickleable. + msg = self.format(record) + # bpo-35726: make copy of record to avoid affecting other handlers in the chain. + record = copy.copy(record) + record.message = msg + record.msg = msg + record.args = None + record.exc_info = None + record.exc_text = None + record.stack_info = None + return record + + def emit(self, record): + """ + Emit a record. + + Writes the LogRecord to the queue, preparing it for pickling first. + """ + try: + self.enqueue(self.prepare(record)) + except Exception: + self.handleError(record) + + +class QueueListener(object): + """ + This class implements an internal threaded listener which watches for + LogRecords being added to a queue, removes them and passes them to a + list of handlers for processing. + """ + _sentinel = None + + def __init__(self, queue, *handlers, respect_handler_level=False): + """ + Initialise an instance with the specified queue and + handlers. + """ + self.queue = queue + self.handlers = handlers + self._thread = None + self.respect_handler_level = respect_handler_level + + def dequeue(self, block): + """ + Dequeue a record and return it, optionally blocking. + + The base implementation uses get. You may want to override this method + if you want to use timeouts or work with custom queue implementations. + """ + return self.queue.get(block) + + def start(self): + """ + Start the listener. + + This starts up a background thread to monitor the queue for + LogRecords to process. + """ + self._thread = t = threading.Thread(target=self._monitor) + t.daemon = True + t.start() + + def prepare(self, record): + """ + Prepare a record for handling. + + This method just returns the passed-in record. You may want to + override this method if you need to do any custom marshalling or + manipulation of the record before passing it to the handlers. + """ + return record + + def handle(self, record): + """ + Handle a record. + + This just loops through the handlers offering them the record + to handle. + """ + record = self.prepare(record) + for handler in self.handlers: + if not self.respect_handler_level: + process = True + else: + process = record.levelno >= handler.level + if process: + handler.handle(record) + + def _monitor(self): + """ + Monitor the queue for records, and ask the handler + to deal with them. + + This method runs on a separate, internal thread. + The thread will terminate if it sees a sentinel object in the queue. + """ + q = self.queue + has_task_done = hasattr(q, 'task_done') + while True: + try: + record = self.dequeue(True) + if record is self._sentinel: + if has_task_done: + q.task_done() + break + self.handle(record) + if has_task_done: + q.task_done() + except queue.Empty: + break + + def enqueue_sentinel(self): + """ + This is used to enqueue the sentinel record. + + The base implementation uses put_nowait. You may want to override this + method if you want to use timeouts or work with custom queue + implementations. + """ + self.queue.put_nowait(self._sentinel) + + def stop(self): + """ + Stop the listener. + + This asks the thread to terminate, and then waits for it to do so. + Note that if you don't call this before your application exits, there + may be some records still left on the queue, which won't be processed. + """ + self.enqueue_sentinel() + self._thread.join() + self._thread = None diff --git a/parrot/lib/python3.10/turtledemo/__init__.py b/parrot/lib/python3.10/turtledemo/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..77150e25331cc4b31d3fd64c9ed079dc880e8356 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/__init__.py @@ -0,0 +1,14 @@ +""" + -------------------------------------- + About this viewer + -------------------------------------- + + Tiny demo viewer to view turtle graphics example scripts. + + Quickly and dirtyly assembled by Gregor Lingl. + June, 2006 + + For more information see: turtledemo - Help + + Have fun! +""" diff --git a/parrot/lib/python3.10/turtledemo/__main__.py b/parrot/lib/python3.10/turtledemo/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..caea022da4a68807838e9ca171b58d57856a181a --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/__main__.py @@ -0,0 +1,398 @@ +#!/usr/bin/env python3 + +""" + ---------------------------------------------- + turtleDemo - Help + ---------------------------------------------- + + This document has two sections: + + (1) How to use the demo viewer + (2) How to add your own demos to the demo repository + + + (1) How to use the demo viewer. + + Select a demoscript from the example menu. + The (syntax colored) source code appears in the left + source code window. IT CANNOT BE EDITED, but ONLY VIEWED! + + The demo viewer windows can be resized. The divider between text + and canvas can be moved by grabbing it with the mouse. The text font + size can be changed from the menu and with Control/Command '-'/'+'. + It can also be changed on most systems with Control-mousewheel + when the mouse is over the text. + + Press START button to start the demo. + Stop execution by pressing the STOP button. + Clear screen by pressing the CLEAR button. + Restart by pressing the START button again. + + SPECIAL demos, such as clock.py are those which run EVENTDRIVEN. + + Press START button to start the demo. + + - Until the EVENTLOOP is entered everything works + as in an ordinary demo script. + + - When the EVENTLOOP is entered, you control the + application by using the mouse and/or keys (or it's + controlled by some timer events) + To stop it you can and must press the STOP button. + + While the EVENTLOOP is running, the examples menu is disabled. + + - Only after having pressed the STOP button, you may + restart it or choose another example script. + + * * * * * * * * + In some rare situations there may occur interferences/conflicts + between events concerning the demo script and those concerning the + demo-viewer. (They run in the same process.) Strange behaviour may be + the consequence and in the worst case you must close and restart the + viewer. + * * * * * * * * + + + (2) How to add your own demos to the demo repository + + - Place the file in the same directory as turtledemo/__main__.py + IMPORTANT! When imported, the demo should not modify the system + by calling functions in other modules, such as sys, tkinter, or + turtle. Global variables should be initialized in main(). + + - The code must contain a main() function which will + be executed by the viewer (see provided example scripts). + It may return a string which will be displayed in the Label below + the source code window (when execution has finished.) + + - In order to run mydemo.py by itself, such as during development, + add the following at the end of the file: + + if __name__ == '__main__': + main() + mainloop() # keep window open + + python -m turtledemo.mydemo # will then run it + + - If the demo is EVENT DRIVEN, main must return the string + "EVENTLOOP". This informs the demo viewer that the script is + still running and must be stopped by the user! + + If an "EVENTLOOP" demo runs by itself, as with clock, which uses + ontimer, or minimal_hanoi, which loops by recursion, then the + code should catch the turtle.Terminator exception that will be + raised when the user presses the STOP button. (Paint is not such + a demo; it only acts in response to mouse clicks and movements.) +""" +import sys +import os + +from tkinter import * +from idlelib.colorizer import ColorDelegator, color_config +from idlelib.percolator import Percolator +from idlelib.textview import view_text +from turtledemo import __doc__ as about_turtledemo + +import turtle + +demo_dir = os.path.dirname(os.path.abspath(__file__)) +darwin = sys.platform == 'darwin' + +STARTUP = 1 +READY = 2 +RUNNING = 3 +DONE = 4 +EVENTDRIVEN = 5 + +menufont = ("Arial", 12, NORMAL) +btnfont = ("Arial", 12, 'bold') +txtfont = ['Lucida Console', 10, 'normal'] + +MINIMUM_FONT_SIZE = 6 +MAXIMUM_FONT_SIZE = 100 +font_sizes = [8, 9, 10, 11, 12, 14, 18, 20, 22, 24, 30] + +def getExampleEntries(): + return [entry[:-3] for entry in os.listdir(demo_dir) if + entry.endswith(".py") and entry[0] != '_'] + +help_entries = ( # (help_label, help_doc) + ('Turtledemo help', __doc__), + ('About turtledemo', about_turtledemo), + ('About turtle module', turtle.__doc__), + ) + + +class DemoWindow(object): + + def __init__(self, filename=None): + self.root = root = turtle._root = Tk() + root.title('Python turtle-graphics examples') + root.wm_protocol("WM_DELETE_WINDOW", self._destroy) + + if darwin: + import subprocess + # Make sure we are the currently activated OS X application + # so that our menu bar appears. + subprocess.run( + [ + 'osascript', + '-e', 'tell application "System Events"', + '-e', 'set frontmost of the first process whose ' + 'unix id is {} to true'.format(os.getpid()), + '-e', 'end tell', + ], + stderr=subprocess.DEVNULL, + stdout=subprocess.DEVNULL,) + + root.grid_rowconfigure(0, weight=1) + root.grid_columnconfigure(0, weight=1) + root.grid_columnconfigure(1, minsize=90, weight=1) + root.grid_columnconfigure(2, minsize=90, weight=1) + root.grid_columnconfigure(3, minsize=90, weight=1) + + self.mBar = Menu(root, relief=RAISED, borderwidth=2) + self.mBar.add_cascade(menu=self.makeLoadDemoMenu(self.mBar), + label='Examples', underline=0) + self.mBar.add_cascade(menu=self.makeFontMenu(self.mBar), + label='Fontsize', underline=0) + self.mBar.add_cascade(menu=self.makeHelpMenu(self.mBar), + label='Help', underline=0) + root['menu'] = self.mBar + + pane = PanedWindow(orient=HORIZONTAL, sashwidth=5, + sashrelief=SOLID, bg='#ddd') + pane.add(self.makeTextFrame(pane)) + pane.add(self.makeGraphFrame(pane)) + pane.grid(row=0, columnspan=4, sticky='news') + + self.output_lbl = Label(root, height= 1, text=" --- ", bg="#ddf", + font=("Arial", 16, 'normal'), borderwidth=2, + relief=RIDGE) + if darwin: # Leave Mac button colors alone - #44254. + self.start_btn = Button(root, text=" START ", font=btnfont, + fg='#00cc22', command=self.startDemo) + self.stop_btn = Button(root, text=" STOP ", font=btnfont, + fg='#00cc22', command=self.stopIt) + self.clear_btn = Button(root, text=" CLEAR ", font=btnfont, + fg='#00cc22', command = self.clearCanvas) + else: + self.start_btn = Button(root, text=" START ", font=btnfont, + fg="white", disabledforeground = "#fed", + command=self.startDemo) + self.stop_btn = Button(root, text=" STOP ", font=btnfont, + fg="white", disabledforeground = "#fed", + command=self.stopIt) + self.clear_btn = Button(root, text=" CLEAR ", font=btnfont, + fg="white", disabledforeground="#fed", + command = self.clearCanvas) + self.output_lbl.grid(row=1, column=0, sticky='news', padx=(0,5)) + self.start_btn.grid(row=1, column=1, sticky='ew') + self.stop_btn.grid(row=1, column=2, sticky='ew') + self.clear_btn.grid(row=1, column=3, sticky='ew') + + Percolator(self.text).insertfilter(ColorDelegator()) + self.dirty = False + self.exitflag = False + if filename: + self.loadfile(filename) + self.configGUI(DISABLED, DISABLED, DISABLED, + "Choose example from menu", "black") + self.state = STARTUP + + + def onResize(self, event): + cwidth = self._canvas.winfo_width() + cheight = self._canvas.winfo_height() + self._canvas.xview_moveto(0.5*(self.canvwidth-cwidth)/self.canvwidth) + self._canvas.yview_moveto(0.5*(self.canvheight-cheight)/self.canvheight) + + def makeTextFrame(self, root): + self.text_frame = text_frame = Frame(root) + self.text = text = Text(text_frame, name='text', padx=5, + wrap='none', width=45) + color_config(text) + + self.vbar = vbar = Scrollbar(text_frame, name='vbar') + vbar['command'] = text.yview + vbar.pack(side=LEFT, fill=Y) + self.hbar = hbar = Scrollbar(text_frame, name='hbar', orient=HORIZONTAL) + hbar['command'] = text.xview + hbar.pack(side=BOTTOM, fill=X) + text['yscrollcommand'] = vbar.set + text['xscrollcommand'] = hbar.set + + text['font'] = tuple(txtfont) + shortcut = 'Command' if darwin else 'Control' + text.bind_all('<%s-minus>' % shortcut, self.decrease_size) + text.bind_all('<%s-underscore>' % shortcut, self.decrease_size) + text.bind_all('<%s-equal>' % shortcut, self.increase_size) + text.bind_all('<%s-plus>' % shortcut, self.increase_size) + text.bind('', self.update_mousewheel) + text.bind('', self.increase_size) + text.bind('', self.decrease_size) + + text.pack(side=LEFT, fill=BOTH, expand=1) + return text_frame + + def makeGraphFrame(self, root): + turtle._Screen._root = root + self.canvwidth = 1000 + self.canvheight = 800 + turtle._Screen._canvas = self._canvas = canvas = turtle.ScrolledCanvas( + root, 800, 600, self.canvwidth, self.canvheight) + canvas.adjustScrolls() + canvas._rootwindow.bind('', self.onResize) + canvas._canvas['borderwidth'] = 0 + + self.screen = _s_ = turtle.Screen() + turtle.TurtleScreen.__init__(_s_, _s_._canvas) + self.scanvas = _s_._canvas + turtle.RawTurtle.screens = [_s_] + return canvas + + def set_txtsize(self, size): + txtfont[1] = size + self.text['font'] = tuple(txtfont) + self.output_lbl['text'] = 'Font size %d' % size + + def decrease_size(self, dummy=None): + self.set_txtsize(max(txtfont[1] - 1, MINIMUM_FONT_SIZE)) + return 'break' + + def increase_size(self, dummy=None): + self.set_txtsize(min(txtfont[1] + 1, MAXIMUM_FONT_SIZE)) + return 'break' + + def update_mousewheel(self, event): + # For wheel up, event.delta = 120 on Windows, -1 on darwin. + # X-11 sends Control-Button-4 event instead. + if (event.delta < 0) == (not darwin): + return self.decrease_size() + else: + return self.increase_size() + + def configGUI(self, start, stop, clear, txt="", color="blue"): + if darwin: # Leave Mac button colors alone - #44254. + self.start_btn.config(state=start) + self.stop_btn.config(state=stop) + self.clear_btn.config(state=clear) + else: + self.start_btn.config(state=start, + bg="#d00" if start == NORMAL else "#fca") + self.stop_btn.config(state=stop, + bg="#d00" if stop == NORMAL else "#fca") + self.clear_btn.config(state=clear, + bg="#d00" if clear == NORMAL else "#fca") + self.output_lbl.config(text=txt, fg=color) + + def makeLoadDemoMenu(self, master): + menu = Menu(master) + + for entry in getExampleEntries(): + def load(entry=entry): + self.loadfile(entry) + menu.add_command(label=entry, underline=0, + font=menufont, command=load) + return menu + + def makeFontMenu(self, master): + menu = Menu(master) + menu.add_command(label="Decrease (C-'-')", command=self.decrease_size, + font=menufont) + menu.add_command(label="Increase (C-'+')", command=self.increase_size, + font=menufont) + menu.add_separator() + + for size in font_sizes: + def resize(size=size): + self.set_txtsize(size) + menu.add_command(label=str(size), underline=0, + font=menufont, command=resize) + return menu + + def makeHelpMenu(self, master): + menu = Menu(master) + + for help_label, help_file in help_entries: + def show(help_label=help_label, help_file=help_file): + view_text(self.root, help_label, help_file) + menu.add_command(label=help_label, font=menufont, command=show) + return menu + + def refreshCanvas(self): + if self.dirty: + self.screen.clear() + self.dirty=False + + def loadfile(self, filename): + self.clearCanvas() + turtle.TurtleScreen._RUNNING = False + modname = 'turtledemo.' + filename + __import__(modname) + self.module = sys.modules[modname] + with open(self.module.__file__, 'r') as f: + chars = f.read() + self.text.delete("1.0", "end") + self.text.insert("1.0", chars) + self.root.title(filename + " - a Python turtle graphics example") + self.configGUI(NORMAL, DISABLED, DISABLED, + "Press start button", "red") + self.state = READY + + def startDemo(self): + self.refreshCanvas() + self.dirty = True + turtle.TurtleScreen._RUNNING = True + self.configGUI(DISABLED, NORMAL, DISABLED, + "demo running...", "black") + self.screen.clear() + self.screen.mode("standard") + self.state = RUNNING + + try: + result = self.module.main() + if result == "EVENTLOOP": + self.state = EVENTDRIVEN + else: + self.state = DONE + except turtle.Terminator: + if self.root is None: + return + self.state = DONE + result = "stopped!" + if self.state == DONE: + self.configGUI(NORMAL, DISABLED, NORMAL, + result) + elif self.state == EVENTDRIVEN: + self.exitflag = True + self.configGUI(DISABLED, NORMAL, DISABLED, + "use mouse/keys or STOP", "red") + + def clearCanvas(self): + self.refreshCanvas() + self.screen._delete("all") + self.scanvas.config(cursor="") + self.configGUI(NORMAL, DISABLED, DISABLED) + + def stopIt(self): + if self.exitflag: + self.clearCanvas() + self.exitflag = False + self.configGUI(NORMAL, DISABLED, DISABLED, + "STOPPED!", "red") + turtle.TurtleScreen._RUNNING = False + + def _destroy(self): + turtle.TurtleScreen._RUNNING = False + self.root.destroy() + self.root = None + + +def main(): + demo = DemoWindow() + demo.root.mainloop() + +if __name__ == '__main__': + main() diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/__main__.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/__main__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50af3e8883b6d104e40120fc6abaf9a5997a0389 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/__main__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/bytedesign.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/bytedesign.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4e5d115485bb9298049eb6dd87f6f7ead4bad422 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/bytedesign.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/chaos.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/chaos.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8839fe087ce89a7837ae3fa9aa966229d36ad963 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/chaos.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/clock.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/clock.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c8a168e12d4b3ba45ddc2a401945bcf423037438 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/clock.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/colormixer.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/colormixer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..55145264b80a84147e35e60f333383fd8a8da8bc Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/colormixer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/forest.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/forest.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9d374e4e3b25a3d4b4943a262a7ceeacaac1cfa0 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/forest.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/fractalcurves.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/fractalcurves.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..59892c37cda9628e4800fc3561ec1b19ce443222 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/fractalcurves.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/lindenmayer.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/lindenmayer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c92667e290803978d4ec67991eb959a249778bed Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/lindenmayer.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/minimal_hanoi.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/minimal_hanoi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5abe28681de3751825dd39a57ec1735e38914b23 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/minimal_hanoi.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/nim.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/nim.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33b9965e8d394dd2095ed9e218a4e371665d1f57 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/nim.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/paint.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/paint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ead483ad4927e4e1f0530363ec294590fed43dfa Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/paint.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/peace.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/peace.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb81309823dc60b9ba090e1fa7768bb9955e06d0 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/peace.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/planet_and_moon.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/planet_and_moon.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..252e942eff69439d6da8663f302fd587787858df Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/planet_and_moon.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/rosette.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/rosette.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3044e52ffa99fcb6246a39c841f463a0acbcb1d4 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/rosette.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/round_dance.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/round_dance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7a74c4a9dda5500101a283034760f2eba2455903 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/round_dance.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/sorting_animate.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/sorting_animate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c0198f7dedd7c80c139784e93a495953f4c53920 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/sorting_animate.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/tree.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/tree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6b3fc619bac8235c0a15f4e966e43b3be4e52d80 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/tree.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/two_canvases.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/two_canvases.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b237b92b1890667a7ae1db2856fa0ae4298675e2 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/two_canvases.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/__pycache__/yinyang.cpython-310.pyc b/parrot/lib/python3.10/turtledemo/__pycache__/yinyang.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1e78770943953e13b042c38489f1c997248212b1 Binary files /dev/null and b/parrot/lib/python3.10/turtledemo/__pycache__/yinyang.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/turtledemo/bytedesign.py b/parrot/lib/python3.10/turtledemo/bytedesign.py new file mode 100644 index 0000000000000000000000000000000000000000..1b7452b512c6eb9a1ff6e47c2da78a63f7946185 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/bytedesign.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_bytedesign.py + +An example adapted from the example-suite +of PythonCard's turtle graphics. + +It's based on an article in BYTE magazine +Problem Solving with Logo: Using Turtle +Graphics to Redraw a Design +November 1982, p. 118 - 134 + +------------------------------------------- + +Due to the statement + +t.delay(0) + +in line 152, which sets the animation delay +to 0, this animation runs in "line per line" +mode as fast as possible. +""" + +from turtle import Turtle, mainloop +from time import perf_counter as clock + +# wrapper for any additional drawing routines +# that need to know about each other +class Designer(Turtle): + + def design(self, homePos, scale): + self.up() + for i in range(5): + self.forward(64.65 * scale) + self.down() + self.wheel(self.position(), scale) + self.up() + self.backward(64.65 * scale) + self.right(72) + self.up() + self.goto(homePos) + self.right(36) + self.forward(24.5 * scale) + self.right(198) + self.down() + self.centerpiece(46 * scale, 143.4, scale) + self.getscreen().tracer(True) + + def wheel(self, initpos, scale): + self.right(54) + for i in range(4): + self.pentpiece(initpos, scale) + self.down() + self.left(36) + for i in range(5): + self.tripiece(initpos, scale) + self.left(36) + for i in range(5): + self.down() + self.right(72) + self.forward(28 * scale) + self.up() + self.backward(28 * scale) + self.left(54) + self.getscreen().update() + + def tripiece(self, initpos, scale): + oldh = self.heading() + self.down() + self.backward(2.5 * scale) + self.tripolyr(31.5 * scale, scale) + self.up() + self.goto(initpos) + self.setheading(oldh) + self.down() + self.backward(2.5 * scale) + self.tripolyl(31.5 * scale, scale) + self.up() + self.goto(initpos) + self.setheading(oldh) + self.left(72) + self.getscreen().update() + + def pentpiece(self, initpos, scale): + oldh = self.heading() + self.up() + self.forward(29 * scale) + self.down() + for i in range(5): + self.forward(18 * scale) + self.right(72) + self.pentr(18 * scale, 75, scale) + self.up() + self.goto(initpos) + self.setheading(oldh) + self.forward(29 * scale) + self.down() + for i in range(5): + self.forward(18 * scale) + self.right(72) + self.pentl(18 * scale, 75, scale) + self.up() + self.goto(initpos) + self.setheading(oldh) + self.left(72) + self.getscreen().update() + + def pentl(self, side, ang, scale): + if side < (2 * scale): return + self.forward(side) + self.left(ang) + self.pentl(side - (.38 * scale), ang, scale) + + def pentr(self, side, ang, scale): + if side < (2 * scale): return + self.forward(side) + self.right(ang) + self.pentr(side - (.38 * scale), ang, scale) + + def tripolyr(self, side, scale): + if side < (4 * scale): return + self.forward(side) + self.right(111) + self.forward(side / 1.78) + self.right(111) + self.forward(side / 1.3) + self.right(146) + self.tripolyr(side * .75, scale) + + def tripolyl(self, side, scale): + if side < (4 * scale): return + self.forward(side) + self.left(111) + self.forward(side / 1.78) + self.left(111) + self.forward(side / 1.3) + self.left(146) + self.tripolyl(side * .75, scale) + + def centerpiece(self, s, a, scale): + self.forward(s); self.left(a) + if s < (7.5 * scale): + return + self.centerpiece(s - (1.2 * scale), a, scale) + +def main(): + t = Designer() + t.speed(0) + t.hideturtle() + t.getscreen().delay(0) + t.getscreen().tracer(0) + at = clock() + t.design(t.position(), 2) + et = clock() + return "runtime: %.2f sec." % (et-at) + +if __name__ == '__main__': + msg = main() + print(msg) + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/chaos.py b/parrot/lib/python3.10/turtledemo/chaos.py new file mode 100644 index 0000000000000000000000000000000000000000..6a45d0d807ef0b88bb2189a0bf82490ac3dd0df9 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/chaos.py @@ -0,0 +1,59 @@ +# File: tdemo_chaos.py +# Author: Gregor Lingl +# Date: 2009-06-24 + +# A demonstration of chaos + +from turtle import * + +N = 80 + +def f(x): + return 3.9*x*(1-x) + +def g(x): + return 3.9*(x-x**2) + +def h(x): + return 3.9*x-3.9*x*x + +def jumpto(x, y): + penup(); goto(x,y) + +def line(x1, y1, x2, y2): + jumpto(x1, y1) + pendown() + goto(x2, y2) + +def coosys(): + line(-1, 0, N+1, 0) + line(0, -0.1, 0, 1.1) + +def plot(fun, start, color): + pencolor(color) + x = start + jumpto(0, x) + pendown() + dot(5) + for i in range(N): + x=fun(x) + goto(i+1,x) + dot(5) + +def main(): + reset() + setworldcoordinates(-1.0,-0.1, N+1, 1.1) + speed(0) + hideturtle() + coosys() + plot(f, 0.35, "blue") + plot(g, 0.35, "green") + plot(h, 0.35, "red") + # Now zoom in: + for s in range(100): + setworldcoordinates(0.5*s,-0.1, N+1, 1.1) + return "Done!" + +if __name__ == "__main__": + main() + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/clock.py b/parrot/lib/python3.10/turtledemo/clock.py new file mode 100644 index 0000000000000000000000000000000000000000..9f8585bd11e053e3ce668c0a9feccc5f7db438e9 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/clock.py @@ -0,0 +1,131 @@ +#!/usr/bin/env python3 +# -*- coding: cp1252 -*- +""" turtle-example-suite: + + tdemo_clock.py + +Enhanced clock-program, showing date +and time + ------------------------------------ + Press STOP to exit the program! + ------------------------------------ +""" +from turtle import * +from datetime import datetime + +def jump(distanz, winkel=0): + penup() + right(winkel) + forward(distanz) + left(winkel) + pendown() + +def hand(laenge, spitze): + fd(laenge*1.15) + rt(90) + fd(spitze/2.0) + lt(120) + fd(spitze) + lt(120) + fd(spitze) + lt(120) + fd(spitze/2.0) + +def make_hand_shape(name, laenge, spitze): + reset() + jump(-laenge*0.15) + begin_poly() + hand(laenge, spitze) + end_poly() + hand_form = get_poly() + register_shape(name, hand_form) + +def clockface(radius): + reset() + pensize(7) + for i in range(60): + jump(radius) + if i % 5 == 0: + fd(25) + jump(-radius-25) + else: + dot(3) + jump(-radius) + rt(6) + +def setup(): + global second_hand, minute_hand, hour_hand, writer + mode("logo") + make_hand_shape("second_hand", 125, 25) + make_hand_shape("minute_hand", 130, 25) + make_hand_shape("hour_hand", 90, 25) + clockface(160) + second_hand = Turtle() + second_hand.shape("second_hand") + second_hand.color("gray20", "gray80") + minute_hand = Turtle() + minute_hand.shape("minute_hand") + minute_hand.color("blue1", "red1") + hour_hand = Turtle() + hour_hand.shape("hour_hand") + hour_hand.color("blue3", "red3") + for hand in second_hand, minute_hand, hour_hand: + hand.resizemode("user") + hand.shapesize(1, 1, 3) + hand.speed(0) + ht() + writer = Turtle() + #writer.mode("logo") + writer.ht() + writer.pu() + writer.bk(85) + +def wochentag(t): + wochentag = ["Monday", "Tuesday", "Wednesday", + "Thursday", "Friday", "Saturday", "Sunday"] + return wochentag[t.weekday()] + +def datum(z): + monat = ["Jan.", "Feb.", "Mar.", "Apr.", "May", "June", + "July", "Aug.", "Sep.", "Oct.", "Nov.", "Dec."] + j = z.year + m = monat[z.month - 1] + t = z.day + return "%s %d %d" % (m, t, j) + +def tick(): + t = datetime.today() + sekunde = t.second + t.microsecond*0.000001 + minute = t.minute + sekunde/60.0 + stunde = t.hour + minute/60.0 + try: + tracer(False) # Terminator can occur here + writer.clear() + writer.home() + writer.forward(65) + writer.write(wochentag(t), + align="center", font=("Courier", 14, "bold")) + writer.back(150) + writer.write(datum(t), + align="center", font=("Courier", 14, "bold")) + writer.forward(85) + second_hand.setheading(6*sekunde) # or here + minute_hand.setheading(6*minute) + hour_hand.setheading(30*stunde) + tracer(True) + ontimer(tick, 100) + except Terminator: + pass # turtledemo user pressed STOP + +def main(): + tracer(False) + setup() + tracer(True) + tick() + return "EVENTLOOP" + +if __name__ == "__main__": + mode("logo") + msg = main() + print(msg) + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/colormixer.py b/parrot/lib/python3.10/turtledemo/colormixer.py new file mode 100644 index 0000000000000000000000000000000000000000..448db83361a649eff317733a5b932a1989a8697f --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/colormixer.py @@ -0,0 +1,58 @@ +# colormixer + +from turtle import Screen, Turtle, mainloop + +class ColorTurtle(Turtle): + + def __init__(self, x, y): + Turtle.__init__(self) + self.shape("turtle") + self.resizemode("user") + self.shapesize(3,3,5) + self.pensize(10) + self._color = [0,0,0] + self.x = x + self._color[x] = y + self.color(self._color) + self.speed(0) + self.left(90) + self.pu() + self.goto(x,0) + self.pd() + self.sety(1) + self.pu() + self.sety(y) + self.pencolor("gray25") + self.ondrag(self.shift) + + def shift(self, x, y): + self.sety(max(0,min(y,1))) + self._color[self.x] = self.ycor() + self.fillcolor(self._color) + setbgcolor() + +def setbgcolor(): + screen.bgcolor(red.ycor(), green.ycor(), blue.ycor()) + +def main(): + global screen, red, green, blue + screen = Screen() + screen.delay(0) + screen.setworldcoordinates(-1, -0.3, 3, 1.3) + + red = ColorTurtle(0, .5) + green = ColorTurtle(1, .5) + blue = ColorTurtle(2, .5) + setbgcolor() + + writer = Turtle() + writer.ht() + writer.pu() + writer.goto(1,1.15) + writer.write("DRAG!",align="center",font=("Arial",30,("bold","italic"))) + return "EVENTLOOP" + +if __name__ == "__main__": + msg = main() + print(msg) + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/forest.py b/parrot/lib/python3.10/turtledemo/forest.py new file mode 100644 index 0000000000000000000000000000000000000000..55b7da947d24760b204afc45bff9474b48f45848 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/forest.py @@ -0,0 +1,108 @@ +#!/usr/bin/env python3 +""" turtlegraphics-example-suite: + + tdemo_forest.py + +Displays a 'forest' of 3 breadth-first-trees +similar to the one in tree. +For further remarks see tree.py + +This example is a 'breadth-first'-rewrite of +a Logo program written by Erich Neuwirth. See +http://homepage.univie.ac.at/erich.neuwirth/ +""" +from turtle import Turtle, colormode, tracer, mainloop +from random import randrange +from time import perf_counter as clock + +def symRandom(n): + return randrange(-n,n+1) + +def randomize( branchlist, angledist, sizedist ): + return [ (angle+symRandom(angledist), + sizefactor*1.01**symRandom(sizedist)) + for angle, sizefactor in branchlist ] + +def randomfd( t, distance, parts, angledist ): + for i in range(parts): + t.left(symRandom(angledist)) + t.forward( (1.0 * distance)/parts ) + +def tree(tlist, size, level, widthfactor, branchlists, angledist=10, sizedist=5): + # benutzt Liste von turtles und Liste von Zweiglisten, + # fuer jede turtle eine! + if level > 0: + lst = [] + brs = [] + for t, branchlist in list(zip(tlist,branchlists)): + t.pensize( size * widthfactor ) + t.pencolor( 255 - (180 - 11 * level + symRandom(15)), + 180 - 11 * level + symRandom(15), + 0 ) + t.pendown() + randomfd(t, size, level, angledist ) + yield 1 + for angle, sizefactor in branchlist: + t.left(angle) + lst.append(t.clone()) + brs.append(randomize(branchlist, angledist, sizedist)) + t.right(angle) + for x in tree(lst, size*sizefactor, level-1, widthfactor, brs, + angledist, sizedist): + yield None + + +def start(t,x,y): + colormode(255) + t.reset() + t.speed(0) + t.hideturtle() + t.left(90) + t.penup() + t.setpos(x,y) + t.pendown() + +def doit1(level, pen): + pen.hideturtle() + start(pen, 20, -208) + t = tree( [pen], 80, level, 0.1, [[ (45,0.69), (0,0.65), (-45,0.71) ]] ) + return t + +def doit2(level, pen): + pen.hideturtle() + start(pen, -135, -130) + t = tree( [pen], 120, level, 0.1, [[ (45,0.69), (-45,0.71) ]] ) + return t + +def doit3(level, pen): + pen.hideturtle() + start(pen, 190, -90) + t = tree( [pen], 100, level, 0.1, [[ (45,0.7), (0,0.72), (-45,0.65) ]] ) + return t + +# Hier 3 Baumgeneratoren: +def main(): + p = Turtle() + p.ht() + tracer(75,0) + u = doit1(6, Turtle(undobuffersize=1)) + s = doit2(7, Turtle(undobuffersize=1)) + t = doit3(5, Turtle(undobuffersize=1)) + a = clock() + while True: + done = 0 + for b in u,s,t: + try: + b.__next__() + except: + done += 1 + if done == 3: + break + + tracer(1,10) + b = clock() + return "runtime: %.2f sec." % (b-a) + +if __name__ == '__main__': + main() + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/fractalcurves.py b/parrot/lib/python3.10/turtledemo/fractalcurves.py new file mode 100644 index 0000000000000000000000000000000000000000..54ade96a0ad05eba6304b643c21906db609903c4 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/fractalcurves.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_fractalCurves.py + +This program draws two fractal-curve-designs: +(1) A hilbert curve (in a box) +(2) A combination of Koch-curves. + +The CurvesTurtle class and the fractal-curve- +methods are taken from the PythonCard example +scripts for turtle-graphics. +""" +from turtle import * +from time import sleep, perf_counter as clock + +class CurvesTurtle(Pen): + # example derived from + # Turtle Geometry: The Computer as a Medium for Exploring Mathematics + # by Harold Abelson and Andrea diSessa + # p. 96-98 + def hilbert(self, size, level, parity): + if level == 0: + return + # rotate and draw first subcurve with opposite parity to big curve + self.left(parity * 90) + self.hilbert(size, level - 1, -parity) + # interface to and draw second subcurve with same parity as big curve + self.forward(size) + self.right(parity * 90) + self.hilbert(size, level - 1, parity) + # third subcurve + self.forward(size) + self.hilbert(size, level - 1, parity) + # fourth subcurve + self.right(parity * 90) + self.forward(size) + self.hilbert(size, level - 1, -parity) + # a final turn is needed to make the turtle + # end up facing outward from the large square + self.left(parity * 90) + + # Visual Modeling with Logo: A Structural Approach to Seeing + # by James Clayson + # Koch curve, after Helge von Koch who introduced this geometric figure in 1904 + # p. 146 + def fractalgon(self, n, rad, lev, dir): + import math + + # if dir = 1 turn outward + # if dir = -1 turn inward + edge = 2 * rad * math.sin(math.pi / n) + self.pu() + self.fd(rad) + self.pd() + self.rt(180 - (90 * (n - 2) / n)) + for i in range(n): + self.fractal(edge, lev, dir) + self.rt(360 / n) + self.lt(180 - (90 * (n - 2) / n)) + self.pu() + self.bk(rad) + self.pd() + + # p. 146 + def fractal(self, dist, depth, dir): + if depth < 1: + self.fd(dist) + return + self.fractal(dist / 3, depth - 1, dir) + self.lt(60 * dir) + self.fractal(dist / 3, depth - 1, dir) + self.rt(120 * dir) + self.fractal(dist / 3, depth - 1, dir) + self.lt(60 * dir) + self.fractal(dist / 3, depth - 1, dir) + +def main(): + ft = CurvesTurtle() + + ft.reset() + ft.speed(0) + ft.ht() + ft.getscreen().tracer(1,0) + ft.pu() + + size = 6 + ft.setpos(-33*size, -32*size) + ft.pd() + + ta=clock() + ft.fillcolor("red") + ft.begin_fill() + ft.fd(size) + + ft.hilbert(size, 6, 1) + + # frame + ft.fd(size) + for i in range(3): + ft.lt(90) + ft.fd(size*(64+i%2)) + ft.pu() + for i in range(2): + ft.fd(size) + ft.rt(90) + ft.pd() + for i in range(4): + ft.fd(size*(66+i%2)) + ft.rt(90) + ft.end_fill() + tb=clock() + res = "Hilbert: %.2fsec. " % (tb-ta) + + sleep(3) + + ft.reset() + ft.speed(0) + ft.ht() + ft.getscreen().tracer(1,0) + + ta=clock() + ft.color("black", "blue") + ft.begin_fill() + ft.fractalgon(3, 250, 4, 1) + ft.end_fill() + ft.begin_fill() + ft.color("red") + ft.fractalgon(3, 200, 4, -1) + ft.end_fill() + tb=clock() + res += "Koch: %.2fsec." % (tb-ta) + return res + +if __name__ == '__main__': + msg = main() + print(msg) + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/lindenmayer.py b/parrot/lib/python3.10/turtledemo/lindenmayer.py new file mode 100644 index 0000000000000000000000000000000000000000..3925f25da61870d0cf75ede6ddefbef2562afd03 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/lindenmayer.py @@ -0,0 +1,119 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + xtx_lindenmayer_indian.py + +Each morning women in Tamil Nadu, in southern +India, place designs, created by using rice +flour and known as kolam on the thresholds of +their homes. + +These can be described by Lindenmayer systems, +which can easily be implemented with turtle +graphics and Python. + +Two examples are shown here: +(1) the snake kolam +(2) anklets of Krishna + +Taken from Marcia Ascher: Mathematics +Elsewhere, An Exploration of Ideas Across +Cultures + +""" +################################ +# Mini Lindenmayer tool +############################### + +from turtle import * + +def replace( seq, replacementRules, n ): + for i in range(n): + newseq = "" + for element in seq: + newseq = newseq + replacementRules.get(element,element) + seq = newseq + return seq + +def draw( commands, rules ): + for b in commands: + try: + rules[b]() + except TypeError: + try: + draw(rules[b], rules) + except: + pass + + +def main(): + ################################ + # Example 1: Snake kolam + ################################ + + + def r(): + right(45) + + def l(): + left(45) + + def f(): + forward(7.5) + + snake_rules = {"-":r, "+":l, "f":f, "b":"f+f+f--f--f+f+f"} + snake_replacementRules = {"b": "b+f+b--f--b+f+b"} + snake_start = "b--f--b--f" + + drawing = replace(snake_start, snake_replacementRules, 3) + + reset() + speed(3) + tracer(1,0) + ht() + up() + backward(195) + down() + draw(drawing, snake_rules) + + from time import sleep + sleep(3) + + ################################ + # Example 2: Anklets of Krishna + ################################ + + def A(): + color("red") + circle(10,90) + + def B(): + from math import sqrt + color("black") + l = 5/sqrt(2) + forward(l) + circle(l, 270) + forward(l) + + def F(): + color("green") + forward(10) + + krishna_rules = {"a":A, "b":B, "f":F} + krishna_replacementRules = {"a" : "afbfa", "b" : "afbfbfbfa" } + krishna_start = "fbfbfbfb" + + reset() + speed(0) + tracer(3,0) + ht() + left(45) + drawing = replace(krishna_start, krishna_replacementRules, 3) + draw(drawing, krishna_rules) + tracer(1) + return "Done!" + +if __name__=='__main__': + msg = main() + print(msg) + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/minimal_hanoi.py b/parrot/lib/python3.10/turtledemo/minimal_hanoi.py new file mode 100644 index 0000000000000000000000000000000000000000..4a432f2b2908d5ec6c18640fe856044bddc8a7f6 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/minimal_hanoi.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_minimal_hanoi.py + +A minimal 'Towers of Hanoi' animation: +A tower of 6 discs is transferred from the +left to the right peg. + +An imho quite elegant and concise +implementation using a tower class, which +is derived from the built-in type list. + +Discs are turtles with shape "square", but +stretched to rectangles by shapesize() + --------------------------------------- + To exit press STOP button + --------------------------------------- +""" +from turtle import * + +class Disc(Turtle): + def __init__(self, n): + Turtle.__init__(self, shape="square", visible=False) + self.pu() + self.shapesize(1.5, n*1.5, 2) # square-->rectangle + self.fillcolor(n/6., 0, 1-n/6.) + self.st() + +class Tower(list): + "Hanoi tower, a subclass of built-in type list" + def __init__(self, x): + "create an empty tower. x is x-position of peg" + self.x = x + def push(self, d): + d.setx(self.x) + d.sety(-150+34*len(self)) + self.append(d) + def pop(self): + d = list.pop(self) + d.sety(150) + return d + +def hanoi(n, from_, with_, to_): + if n > 0: + hanoi(n-1, from_, to_, with_) + to_.push(from_.pop()) + hanoi(n-1, with_, from_, to_) + +def play(): + onkey(None,"space") + clear() + try: + hanoi(6, t1, t2, t3) + write("press STOP button to exit", + align="center", font=("Courier", 16, "bold")) + except Terminator: + pass # turtledemo user pressed STOP + +def main(): + global t1, t2, t3 + ht(); penup(); goto(0, -225) # writer turtle + t1 = Tower(-250) + t2 = Tower(0) + t3 = Tower(250) + # make tower of 6 discs + for i in range(6,0,-1): + t1.push(Disc(i)) + # prepare spartanic user interface ;-) + write("press spacebar to start game", + align="center", font=("Courier", 16, "bold")) + onkey(play, "space") + listen() + return "EVENTLOOP" + +if __name__=="__main__": + msg = main() + print(msg) + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/nim.py b/parrot/lib/python3.10/turtledemo/nim.py new file mode 100644 index 0000000000000000000000000000000000000000..9ae6cc5c01b9039981f31f4c28a470ac249ee5dc --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/nim.py @@ -0,0 +1,226 @@ +""" turtle-example-suite: + + tdemo_nim.py + +Play nim against the computer. The player +who takes the last stick is the winner. + +Implements the model-view-controller +design pattern. +""" + + +import turtle +import random +import time + +SCREENWIDTH = 640 +SCREENHEIGHT = 480 + +MINSTICKS = 7 +MAXSTICKS = 31 + +HUNIT = SCREENHEIGHT // 12 +WUNIT = SCREENWIDTH // ((MAXSTICKS // 5) * 11 + (MAXSTICKS % 5) * 2) + +SCOLOR = (63, 63, 31) +HCOLOR = (255, 204, 204) +COLOR = (204, 204, 255) + +def randomrow(): + return random.randint(MINSTICKS, MAXSTICKS) + +def computerzug(state): + xored = state[0] ^ state[1] ^ state[2] + if xored == 0: + return randommove(state) + for z in range(3): + s = state[z] ^ xored + if s <= state[z]: + move = (z, s) + return move + +def randommove(state): + m = max(state) + while True: + z = random.randint(0,2) + if state[z] > (m > 1): + break + rand = random.randint(m > 1, state[z]-1) + return z, rand + + +class NimModel(object): + def __init__(self, game): + self.game = game + + def setup(self): + if self.game.state not in [Nim.CREATED, Nim.OVER]: + return + self.sticks = [randomrow(), randomrow(), randomrow()] + self.player = 0 + self.winner = None + self.game.view.setup() + self.game.state = Nim.RUNNING + + def move(self, row, col): + maxspalte = self.sticks[row] + self.sticks[row] = col + self.game.view.notify_move(row, col, maxspalte, self.player) + if self.game_over(): + self.game.state = Nim.OVER + self.winner = self.player + self.game.view.notify_over() + elif self.player == 0: + self.player = 1 + row, col = computerzug(self.sticks) + self.move(row, col) + self.player = 0 + + def game_over(self): + return self.sticks == [0, 0, 0] + + def notify_move(self, row, col): + if self.sticks[row] <= col: + return + self.move(row, col) + + +class Stick(turtle.Turtle): + def __init__(self, row, col, game): + turtle.Turtle.__init__(self, visible=False) + self.row = row + self.col = col + self.game = game + x, y = self.coords(row, col) + self.shape("square") + self.shapesize(HUNIT/10.0, WUNIT/20.0) + self.speed(0) + self.pu() + self.goto(x,y) + self.color("white") + self.showturtle() + + def coords(self, row, col): + packet, remainder = divmod(col, 5) + x = (3 + 11 * packet + 2 * remainder) * WUNIT + y = (2 + 3 * row) * HUNIT + return x - SCREENWIDTH // 2 + WUNIT // 2, SCREENHEIGHT // 2 - y - HUNIT // 2 + + def makemove(self, x, y): + if self.game.state != Nim.RUNNING: + return + self.game.controller.notify_move(self.row, self.col) + + +class NimView(object): + def __init__(self, game): + self.game = game + self.screen = game.screen + self.model = game.model + self.screen.colormode(255) + self.screen.tracer(False) + self.screen.bgcolor((240, 240, 255)) + self.writer = turtle.Turtle(visible=False) + self.writer.pu() + self.writer.speed(0) + self.sticks = {} + for row in range(3): + for col in range(MAXSTICKS): + self.sticks[(row, col)] = Stick(row, col, game) + self.display("... a moment please ...") + self.screen.tracer(True) + + def display(self, msg1, msg2=None): + self.screen.tracer(False) + self.writer.clear() + if msg2 is not None: + self.writer.goto(0, - SCREENHEIGHT // 2 + 48) + self.writer.pencolor("red") + self.writer.write(msg2, align="center", font=("Courier",18,"bold")) + self.writer.goto(0, - SCREENHEIGHT // 2 + 20) + self.writer.pencolor("black") + self.writer.write(msg1, align="center", font=("Courier",14,"bold")) + self.screen.tracer(True) + + def setup(self): + self.screen.tracer(False) + for row in range(3): + for col in range(self.model.sticks[row]): + self.sticks[(row, col)].color(SCOLOR) + for row in range(3): + for col in range(self.model.sticks[row], MAXSTICKS): + self.sticks[(row, col)].color("white") + self.display("Your turn! Click leftmost stick to remove.") + self.screen.tracer(True) + + def notify_move(self, row, col, maxspalte, player): + if player == 0: + farbe = HCOLOR + for s in range(col, maxspalte): + self.sticks[(row, s)].color(farbe) + else: + self.display(" ... thinking ... ") + time.sleep(0.5) + self.display(" ... thinking ... aaah ...") + farbe = COLOR + for s in range(maxspalte-1, col-1, -1): + time.sleep(0.2) + self.sticks[(row, s)].color(farbe) + self.display("Your turn! Click leftmost stick to remove.") + + def notify_over(self): + if self.game.model.winner == 0: + msg2 = "Congrats. You're the winner!!!" + else: + msg2 = "Sorry, the computer is the winner." + self.display("To play again press space bar. To leave press ESC.", msg2) + + def clear(self): + if self.game.state == Nim.OVER: + self.screen.clear() + + +class NimController(object): + + def __init__(self, game): + self.game = game + self.sticks = game.view.sticks + self.BUSY = False + for stick in self.sticks.values(): + stick.onclick(stick.makemove) + self.game.screen.onkey(self.game.model.setup, "space") + self.game.screen.onkey(self.game.view.clear, "Escape") + self.game.view.display("Press space bar to start game") + self.game.screen.listen() + + def notify_move(self, row, col): + if self.BUSY: + return + self.BUSY = True + self.game.model.notify_move(row, col) + self.BUSY = False + + +class Nim(object): + CREATED = 0 + RUNNING = 1 + OVER = 2 + def __init__(self, screen): + self.state = Nim.CREATED + self.screen = screen + self.model = NimModel(self) + self.view = NimView(self) + self.controller = NimController(self) + + +def main(): + mainscreen = turtle.Screen() + mainscreen.mode("standard") + mainscreen.setup(SCREENWIDTH, SCREENHEIGHT) + nim = Nim(mainscreen) + return "EVENTLOOP" + +if __name__ == "__main__": + main() + turtle.mainloop() diff --git a/parrot/lib/python3.10/turtledemo/paint.py b/parrot/lib/python3.10/turtledemo/paint.py new file mode 100644 index 0000000000000000000000000000000000000000..fc6852a20082f5fb085b35201c452d0ed96bafe0 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/paint.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_paint.py + +A simple event-driven paint program + +- left mouse button moves turtle +- middle mouse button changes color +- right mouse button toggles between pen up +(no line drawn when the turtle moves) and +pen down (line is drawn). If pen up follows +at least two pen-down moves, the polygon that +includes the starting point is filled. + ------------------------------------------- + Play around by clicking into the canvas + using all three mouse buttons. + ------------------------------------------- + To exit press STOP button + ------------------------------------------- +""" +from turtle import * + +def switchupdown(x=0, y=0): + if pen()["pendown"]: + end_fill() + up() + else: + down() + begin_fill() + +def changecolor(x=0, y=0): + global colors + colors = colors[1:]+colors[:1] + color(colors[0]) + +def main(): + global colors + shape("circle") + resizemode("user") + shapesize(.5) + width(3) + colors=["red", "green", "blue", "yellow"] + color(colors[0]) + switchupdown() + onscreenclick(goto,1) + onscreenclick(changecolor,2) + onscreenclick(switchupdown,3) + return "EVENTLOOP" + +if __name__ == "__main__": + msg = main() + print(msg) + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/peace.py b/parrot/lib/python3.10/turtledemo/peace.py new file mode 100644 index 0000000000000000000000000000000000000000..e2ba9288d9e42e78bb0ad68f69ed4910aa65567d --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/peace.py @@ -0,0 +1,61 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_peace.py + +A simple drawing suitable as a beginner's +programming example. Aside from the +peacecolors assignment and the for loop, +it only uses turtle commands. +""" + +from turtle import * + +def main(): + peacecolors = ("red3", "orange", "yellow", + "seagreen4", "orchid4", + "royalblue1", "dodgerblue4") + + reset() + Screen() + up() + goto(-320,-195) + width(70) + + for pcolor in peacecolors: + color(pcolor) + down() + forward(640) + up() + backward(640) + left(90) + forward(66) + right(90) + + width(25) + color("white") + goto(0,-170) + down() + + circle(170) + left(90) + forward(340) + up() + left(180) + forward(170) + right(45) + down() + forward(170) + up() + backward(170) + left(90) + down() + forward(170) + up() + + goto(0,300) # vanish if hideturtle() is not available ;-) + return "Done!" + +if __name__ == "__main__": + main() + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/penrose.py b/parrot/lib/python3.10/turtledemo/penrose.py new file mode 100644 index 0000000000000000000000000000000000000000..045722a2286061eb9cb22e1e245f0164fc665afc --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/penrose.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +""" xturtle-example-suite: + + xtx_kites_and_darts.py + +Constructs two aperiodic penrose-tilings, +consisting of kites and darts, by the method +of inflation in six steps. + +Starting points are the patterns "sun" +consisting of five kites and "star" +consisting of five darts. + +For more information see: + http://en.wikipedia.org/wiki/Penrose_tiling + ------------------------------------------- +""" +from turtle import * +from math import cos, pi +from time import perf_counter as clock, sleep + +f = (5**0.5-1)/2.0 # (sqrt(5)-1)/2 -- golden ratio +d = 2 * cos(3*pi/10) + +def kite(l): + fl = f * l + lt(36) + fd(l) + rt(108) + fd(fl) + rt(36) + fd(fl) + rt(108) + fd(l) + rt(144) + +def dart(l): + fl = f * l + lt(36) + fd(l) + rt(144) + fd(fl) + lt(36) + fd(fl) + rt(144) + fd(l) + rt(144) + +def inflatekite(l, n): + if n == 0: + px, py = pos() + h, x, y = int(heading()), round(px,3), round(py,3) + tiledict[(h,x,y)] = True + return + fl = f * l + lt(36) + inflatedart(fl, n-1) + fd(l) + rt(144) + inflatekite(fl, n-1) + lt(18) + fd(l*d) + rt(162) + inflatekite(fl, n-1) + lt(36) + fd(l) + rt(180) + inflatedart(fl, n-1) + lt(36) + +def inflatedart(l, n): + if n == 0: + px, py = pos() + h, x, y = int(heading()), round(px,3), round(py,3) + tiledict[(h,x,y)] = False + return + fl = f * l + inflatekite(fl, n-1) + lt(36) + fd(l) + rt(180) + inflatedart(fl, n-1) + lt(54) + fd(l*d) + rt(126) + inflatedart(fl, n-1) + fd(l) + rt(144) + +def draw(l, n, th=2): + clear() + l = l * f**n + shapesize(l/100.0, l/100.0, th) + for k in tiledict: + h, x, y = k + setpos(x, y) + setheading(h) + if tiledict[k]: + shape("kite") + color("black", (0, 0.75, 0)) + else: + shape("dart") + color("black", (0.75, 0, 0)) + stamp() + +def sun(l, n): + for i in range(5): + inflatekite(l, n) + lt(72) + +def star(l,n): + for i in range(5): + inflatedart(l, n) + lt(72) + +def makeshapes(): + tracer(0) + begin_poly() + kite(100) + end_poly() + register_shape("kite", get_poly()) + begin_poly() + dart(100) + end_poly() + register_shape("dart", get_poly()) + tracer(1) + +def start(): + reset() + ht() + pu() + makeshapes() + resizemode("user") + +def test(l=200, n=4, fun=sun, startpos=(0,0), th=2): + global tiledict + goto(startpos) + setheading(0) + tiledict = {} + tracer(0) + fun(l, n) + draw(l, n, th) + tracer(1) + nk = len([x for x in tiledict if tiledict[x]]) + nd = len([x for x in tiledict if not tiledict[x]]) + print("%d kites and %d darts = %d pieces." % (nk, nd, nk+nd)) + +def demo(fun=sun): + start() + for i in range(8): + a = clock() + test(300, i, fun) + b = clock() + t = b - a + if t < 2: + sleep(2 - t) + +def main(): + #title("Penrose-tiling with kites and darts.") + mode("logo") + bgcolor(0.3, 0.3, 0) + demo(sun) + sleep(2) + demo(star) + pencolor("black") + goto(0,-200) + pencolor(0.7,0.7,1) + write("Please wait...", + align="center", font=('Arial Black', 36, 'bold')) + test(600, 8, startpos=(70, 117)) + return "Done" + +if __name__ == "__main__": + msg = main() + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/planet_and_moon.py b/parrot/lib/python3.10/turtledemo/planet_and_moon.py new file mode 100644 index 0000000000000000000000000000000000000000..021ff99383aa65122b663f394b9d55bf57a24e9d --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/planet_and_moon.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_planets_and_moon.py + +Gravitational system simulation using the +approximation method from Feynman-lectures, +p.9-8, using turtlegraphics. + +Example: heavy central body, light planet, +very light moon! +Planet has a circular orbit, moon a stable +orbit around the planet. + +You can hold the movement temporarily by +pressing the left mouse button with the +mouse over the scrollbar of the canvas. + +""" +from turtle import Shape, Turtle, mainloop, Vec2D as Vec + +G = 8 + +class GravSys(object): + def __init__(self): + self.planets = [] + self.t = 0 + self.dt = 0.01 + def init(self): + for p in self.planets: + p.init() + def start(self): + for i in range(10000): + self.t += self.dt + for p in self.planets: + p.step() + +class Star(Turtle): + def __init__(self, m, x, v, gravSys, shape): + Turtle.__init__(self, shape=shape) + self.penup() + self.m = m + self.setpos(x) + self.v = v + gravSys.planets.append(self) + self.gravSys = gravSys + self.resizemode("user") + self.pendown() + def init(self): + dt = self.gravSys.dt + self.a = self.acc() + self.v = self.v + 0.5*dt*self.a + def acc(self): + a = Vec(0,0) + for planet in self.gravSys.planets: + if planet != self: + v = planet.pos()-self.pos() + a += (G*planet.m/abs(v)**3)*v + return a + def step(self): + dt = self.gravSys.dt + self.setpos(self.pos() + dt*self.v) + if self.gravSys.planets.index(self) != 0: + self.setheading(self.towards(self.gravSys.planets[0])) + self.a = self.acc() + self.v = self.v + dt*self.a + +## create compound yellow/blue turtleshape for planets + +def main(): + s = Turtle() + s.reset() + s.getscreen().tracer(0,0) + s.ht() + s.pu() + s.fd(6) + s.lt(90) + s.begin_poly() + s.circle(6, 180) + s.end_poly() + m1 = s.get_poly() + s.begin_poly() + s.circle(6,180) + s.end_poly() + m2 = s.get_poly() + + planetshape = Shape("compound") + planetshape.addcomponent(m1,"orange") + planetshape.addcomponent(m2,"blue") + s.getscreen().register_shape("planet", planetshape) + s.getscreen().tracer(1,0) + + ## setup gravitational system + gs = GravSys() + sun = Star(1000000, Vec(0,0), Vec(0,-2.5), gs, "circle") + sun.color("yellow") + sun.shapesize(1.8) + sun.pu() + earth = Star(12500, Vec(210,0), Vec(0,195), gs, "planet") + earth.pencolor("green") + earth.shapesize(0.8) + moon = Star(1, Vec(220,0), Vec(0,295), gs, "planet") + moon.pencolor("blue") + moon.shapesize(0.5) + gs.init() + gs.start() + return "Done!" + +if __name__ == '__main__': + main() + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/rosette.py b/parrot/lib/python3.10/turtledemo/rosette.py new file mode 100644 index 0000000000000000000000000000000000000000..47d0f00e9da9d15dc93b0814208949e86be618f5 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/rosette.py @@ -0,0 +1,65 @@ +""" turtle-example-suite: + + tdemo_wikipedia3.py + +This example is +inspired by the Wikipedia article on turtle +graphics. (See example wikipedia1 for URLs) + +First we create (ne-1) (i.e. 35 in this +example) copies of our first turtle p. +Then we let them perform their steps in +parallel. + +Followed by a complete undo(). +""" +from turtle import Screen, Turtle, mainloop +from time import perf_counter as clock, sleep + +def mn_eck(p, ne,sz): + turtlelist = [p] + #create ne-1 additional turtles + for i in range(1,ne): + q = p.clone() + q.rt(360.0/ne) + turtlelist.append(q) + p = q + for i in range(ne): + c = abs(ne/2.0-i)/(ne*.7) + # let those ne turtles make a step + # in parallel: + for t in turtlelist: + t.rt(360./ne) + t.pencolor(1-c,0,c) + t.fd(sz) + +def main(): + s = Screen() + s.bgcolor("black") + p=Turtle() + p.speed(0) + p.hideturtle() + p.pencolor("red") + p.pensize(3) + + s.tracer(36,0) + + at = clock() + mn_eck(p, 36, 19) + et = clock() + z1 = et-at + + sleep(1) + + at = clock() + while any(t.undobufferentries() for t in s.turtles()): + for t in s.turtles(): + t.undo() + et = clock() + return "runtime: %.3f sec" % (z1+et-at) + + +if __name__ == '__main__': + msg = main() + print(msg) + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/round_dance.py b/parrot/lib/python3.10/turtledemo/round_dance.py new file mode 100644 index 0000000000000000000000000000000000000000..10383614c6e974c7b0cbd3e9ad99ba8ee5371922 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/round_dance.py @@ -0,0 +1,86 @@ +""" turtle-example-suite: + + tdemo_round_dance.py + +(Needs version 1.1 of the turtle module that +comes with Python 3.1) + +Dancing turtles have a compound shape +consisting of a series of triangles of +decreasing size. + +Turtles march along a circle while rotating +pairwise in opposite direction, with one +exception. Does that breaking of symmetry +enhance the attractiveness of the example? + +Press any key to stop the animation. + +Technically: demonstrates use of compound +shapes, transformation of shapes as well as +cloning turtles. The animation is +controlled through update(). +""" + +from turtle import * + +def stop(): + global running + running = False + +def main(): + global running + clearscreen() + bgcolor("gray10") + tracer(False) + shape("triangle") + f = 0.793402 + phi = 9.064678 + s = 5 + c = 1 + # create compound shape + sh = Shape("compound") + for i in range(10): + shapesize(s) + p =get_shapepoly() + s *= f + c *= f + tilt(-phi) + sh.addcomponent(p, (c, 0.25, 1-c), "black") + register_shape("multitri", sh) + # create dancers + shapesize(1) + shape("multitri") + pu() + setpos(0, -200) + dancers = [] + for i in range(180): + fd(7) + tilt(-4) + lt(2) + update() + if i % 12 == 0: + dancers.append(clone()) + home() + # dance + running = True + onkeypress(stop) + listen() + cs = 1 + while running: + ta = -4 + for dancer in dancers: + dancer.fd(7) + dancer.lt(2) + dancer.tilt(ta) + ta = -4 if ta > 0 else 2 + if cs < 180: + right(4) + shapesize(cs) + cs *= 1.005 + update() + return "DONE!" + +if __name__=='__main__': + print(main()) + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/sorting_animate.py b/parrot/lib/python3.10/turtledemo/sorting_animate.py new file mode 100644 index 0000000000000000000000000000000000000000..d25a0ab6cebdc09e79a7a7974345852cd23a469c --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/sorting_animate.py @@ -0,0 +1,204 @@ +#!/usr/bin/env python3 +""" + + sorting_animation.py + +A minimal sorting algorithm animation: +Sorts a shelf of 10 blocks using insertion +sort, selection sort and quicksort. + +Shelfs are implemented using builtin lists. + +Blocks are turtles with shape "square", but +stretched to rectangles by shapesize() + --------------------------------------- + To exit press space button + --------------------------------------- +""" +from turtle import * +import random + + +class Block(Turtle): + + def __init__(self, size): + self.size = size + Turtle.__init__(self, shape="square", visible=False) + self.pu() + self.shapesize(size * 1.5, 1.5, 2) # square-->rectangle + self.fillcolor("black") + self.st() + + def glow(self): + self.fillcolor("red") + + def unglow(self): + self.fillcolor("black") + + def __repr__(self): + return "Block size: {0}".format(self.size) + + +class Shelf(list): + + def __init__(self, y): + "create a shelf. y is y-position of first block" + self.y = y + self.x = -150 + + def push(self, d): + width, _, _ = d.shapesize() + # align blocks by the bottom edge + y_offset = width / 2 * 20 + d.sety(self.y + y_offset) + d.setx(self.x + 34 * len(self)) + self.append(d) + + def _close_gap_from_i(self, i): + for b in self[i:]: + xpos, _ = b.pos() + b.setx(xpos - 34) + + def _open_gap_from_i(self, i): + for b in self[i:]: + xpos, _ = b.pos() + b.setx(xpos + 34) + + def pop(self, key): + b = list.pop(self, key) + b.glow() + b.sety(200) + self._close_gap_from_i(key) + return b + + def insert(self, key, b): + self._open_gap_from_i(key) + list.insert(self, key, b) + b.setx(self.x + 34 * key) + width, _, _ = b.shapesize() + # align blocks by the bottom edge + y_offset = width / 2 * 20 + b.sety(self.y + y_offset) + b.unglow() + +def isort(shelf): + length = len(shelf) + for i in range(1, length): + hole = i + while hole > 0 and shelf[i].size < shelf[hole - 1].size: + hole = hole - 1 + shelf.insert(hole, shelf.pop(i)) + return + +def ssort(shelf): + length = len(shelf) + for j in range(0, length - 1): + imin = j + for i in range(j + 1, length): + if shelf[i].size < shelf[imin].size: + imin = i + if imin != j: + shelf.insert(j, shelf.pop(imin)) + +def partition(shelf, left, right, pivot_index): + pivot = shelf[pivot_index] + shelf.insert(right, shelf.pop(pivot_index)) + store_index = left + for i in range(left, right): # range is non-inclusive of ending value + if shelf[i].size < pivot.size: + shelf.insert(store_index, shelf.pop(i)) + store_index = store_index + 1 + shelf.insert(store_index, shelf.pop(right)) # move pivot to correct position + return store_index + +def qsort(shelf, left, right): + if left < right: + pivot_index = left + pivot_new_index = partition(shelf, left, right, pivot_index) + qsort(shelf, left, pivot_new_index - 1) + qsort(shelf, pivot_new_index + 1, right) + +def randomize(): + disable_keys() + clear() + target = list(range(10)) + random.shuffle(target) + for i, t in enumerate(target): + for j in range(i, len(s)): + if s[j].size == t + 1: + s.insert(i, s.pop(j)) + show_text(instructions1) + show_text(instructions2, line=1) + enable_keys() + +def show_text(text, line=0): + line = 20 * line + goto(0,-250 - line) + write(text, align="center", font=("Courier", 16, "bold")) + +def start_ssort(): + disable_keys() + clear() + show_text("Selection Sort") + ssort(s) + clear() + show_text(instructions1) + show_text(instructions2, line=1) + enable_keys() + +def start_isort(): + disable_keys() + clear() + show_text("Insertion Sort") + isort(s) + clear() + show_text(instructions1) + show_text(instructions2, line=1) + enable_keys() + +def start_qsort(): + disable_keys() + clear() + show_text("Quicksort") + qsort(s, 0, len(s) - 1) + clear() + show_text(instructions1) + show_text(instructions2, line=1) + enable_keys() + +def init_shelf(): + global s + s = Shelf(-200) + vals = (4, 2, 8, 9, 1, 5, 10, 3, 7, 6) + for i in vals: + s.push(Block(i)) + +def disable_keys(): + onkey(None, "s") + onkey(None, "i") + onkey(None, "q") + onkey(None, "r") + +def enable_keys(): + onkey(start_isort, "i") + onkey(start_ssort, "s") + onkey(start_qsort, "q") + onkey(randomize, "r") + onkey(bye, "space") + +def main(): + getscreen().clearscreen() + ht(); penup() + init_shelf() + show_text(instructions1) + show_text(instructions2, line=1) + enable_keys() + listen() + return "EVENTLOOP" + +instructions1 = "press i for insertion sort, s for selection sort, q for quicksort" +instructions2 = "spacebar to quit, r to randomize" + +if __name__=="__main__": + msg = main() + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/tree.py b/parrot/lib/python3.10/turtledemo/tree.py new file mode 100644 index 0000000000000000000000000000000000000000..98a20da7f15c11d90a4243b8319131ceed2e8f4c --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/tree.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_tree.py + +Displays a 'breadth-first-tree' - in contrast +to the classical Logo tree drawing programs, +which use a depth-first-algorithm. + +Uses: +(1) a tree-generator, where the drawing is +quasi the side-effect, whereas the generator +always yields None. +(2) Turtle-cloning: At each branching point +the current pen is cloned. So in the end +there are 1024 turtles. +""" +from turtle import Turtle, mainloop +from time import perf_counter as clock + +def tree(plist, l, a, f): + """ plist is list of pens + l is length of branch + a is half of the angle between 2 branches + f is factor by which branch is shortened + from level to level.""" + if l > 3: + lst = [] + for p in plist: + p.forward(l) + q = p.clone() + p.left(a) + q.right(a) + lst.append(p) + lst.append(q) + for x in tree(lst, l*f, a, f): + yield None + +def maketree(): + p = Turtle() + p.setundobuffer(None) + p.hideturtle() + p.speed(0) + p.getscreen().tracer(30,0) + p.left(90) + p.penup() + p.forward(-210) + p.pendown() + t = tree([p], 200, 65, 0.6375) + for x in t: + pass + +def main(): + a=clock() + maketree() + b=clock() + return "done: %.2f sec." % (b-a) + +if __name__ == "__main__": + msg = main() + print(msg) + mainloop() diff --git a/parrot/lib/python3.10/turtledemo/turtle.cfg b/parrot/lib/python3.10/turtledemo/turtle.cfg new file mode 100644 index 0000000000000000000000000000000000000000..bd89a741f926912a6a9288a688d1f31d133afac9 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/turtle.cfg @@ -0,0 +1,10 @@ +width = 800 +height = 600 +canvwidth = 1200 +canvheight = 900 +shape = arrow +mode = standard +resizemode = auto +fillcolor = "" +title = Python turtle graphics demo. + diff --git a/parrot/lib/python3.10/turtledemo/two_canvases.py b/parrot/lib/python3.10/turtledemo/two_canvases.py new file mode 100644 index 0000000000000000000000000000000000000000..f3602585ab0592c393fbe9526cb5aae4ae902f6c --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/two_canvases.py @@ -0,0 +1,54 @@ +"""turtledemo.two_canvases + +Use TurtleScreen and RawTurtle to draw on two +distinct canvases in a separate window. The +new window must be separately closed in +addition to pressing the STOP button. +""" + +from turtle import TurtleScreen, RawTurtle, TK + +def main(): + root = TK.Tk() + cv1 = TK.Canvas(root, width=300, height=200, bg="#ddffff") + cv2 = TK.Canvas(root, width=300, height=200, bg="#ffeeee") + cv1.pack() + cv2.pack() + + s1 = TurtleScreen(cv1) + s1.bgcolor(0.85, 0.85, 1) + s2 = TurtleScreen(cv2) + s2.bgcolor(1, 0.85, 0.85) + + p = RawTurtle(s1) + q = RawTurtle(s2) + + p.color("red", (1, 0.85, 0.85)) + p.width(3) + q.color("blue", (0.85, 0.85, 1)) + q.width(3) + + for t in p,q: + t.shape("turtle") + t.lt(36) + + q.lt(180) + + for t in p, q: + t.begin_fill() + for i in range(5): + for t in p, q: + t.fd(50) + t.lt(72) + for t in p,q: + t.end_fill() + t.lt(54) + t.pu() + t.bk(50) + + return "EVENTLOOP" + + +if __name__ == '__main__': + main() + TK.mainloop() # keep window open until user closes it diff --git a/parrot/lib/python3.10/turtledemo/yinyang.py b/parrot/lib/python3.10/turtledemo/yinyang.py new file mode 100644 index 0000000000000000000000000000000000000000..11d1f47cae25491b302f2a387a816c3558130444 --- /dev/null +++ b/parrot/lib/python3.10/turtledemo/yinyang.py @@ -0,0 +1,49 @@ +#!/usr/bin/env python3 +""" turtle-example-suite: + + tdemo_yinyang.py + +Another drawing suitable as a beginner's +programming example. + +The small circles are drawn by the circle +command. + +""" + +from turtle import * + +def yin(radius, color1, color2): + width(3) + color("black", color1) + begin_fill() + circle(radius/2., 180) + circle(radius, 180) + left(180) + circle(-radius/2., 180) + end_fill() + left(90) + up() + forward(radius*0.35) + right(90) + down() + color(color1, color2) + begin_fill() + circle(radius*0.15) + end_fill() + left(90) + up() + backward(radius*0.35) + down() + left(90) + +def main(): + reset() + yin(200, "black", "white") + yin(200, "white", "black") + ht() + return "Done!" + +if __name__ == '__main__': + main() + mainloop() diff --git a/parrot/lib/python3.10/xml/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/xml/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a7604bdddb707f57f1fa7361a7523c5761d968e1 Binary files /dev/null and b/parrot/lib/python3.10/xml/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/dom/NodeFilter.py b/parrot/lib/python3.10/xml/dom/NodeFilter.py new file mode 100644 index 0000000000000000000000000000000000000000..640e0bfd246ca5f9b61fd7ca79ad7b3e560a2a2d --- /dev/null +++ b/parrot/lib/python3.10/xml/dom/NodeFilter.py @@ -0,0 +1,27 @@ +# This is the Python mapping for interface NodeFilter from +# DOM2-Traversal-Range. It contains only constants. + +class NodeFilter: + """ + This is the DOM2 NodeFilter interface. It contains only constants. + """ + FILTER_ACCEPT = 1 + FILTER_REJECT = 2 + FILTER_SKIP = 3 + + SHOW_ALL = 0xFFFFFFFF + SHOW_ELEMENT = 0x00000001 + SHOW_ATTRIBUTE = 0x00000002 + SHOW_TEXT = 0x00000004 + SHOW_CDATA_SECTION = 0x00000008 + SHOW_ENTITY_REFERENCE = 0x00000010 + SHOW_ENTITY = 0x00000020 + SHOW_PROCESSING_INSTRUCTION = 0x00000040 + SHOW_COMMENT = 0x00000080 + SHOW_DOCUMENT = 0x00000100 + SHOW_DOCUMENT_TYPE = 0x00000200 + SHOW_DOCUMENT_FRAGMENT = 0x00000400 + SHOW_NOTATION = 0x00000800 + + def acceptNode(self, node): + raise NotImplementedError diff --git a/parrot/lib/python3.10/xml/dom/__pycache__/NodeFilter.cpython-310.pyc b/parrot/lib/python3.10/xml/dom/__pycache__/NodeFilter.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e8f57cfab446c662ae0ef6d09c5ec13a11787e81 Binary files /dev/null and b/parrot/lib/python3.10/xml/dom/__pycache__/NodeFilter.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/dom/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/xml/dom/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca74832813b8466d4519b48e965b85310a0161c6 Binary files /dev/null and b/parrot/lib/python3.10/xml/dom/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/dom/__pycache__/domreg.cpython-310.pyc b/parrot/lib/python3.10/xml/dom/__pycache__/domreg.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50ee0daf7d34203947f7b2e70edf859a428bf536 Binary files /dev/null and b/parrot/lib/python3.10/xml/dom/__pycache__/domreg.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/dom/__pycache__/expatbuilder.cpython-310.pyc b/parrot/lib/python3.10/xml/dom/__pycache__/expatbuilder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0a764b66afa2014d3030f3528a78970ae7eae47f Binary files /dev/null and b/parrot/lib/python3.10/xml/dom/__pycache__/expatbuilder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/dom/__pycache__/minicompat.cpython-310.pyc b/parrot/lib/python3.10/xml/dom/__pycache__/minicompat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..deed7399a5ef681d38e9e44bfeb8e2f9020e876b Binary files /dev/null and b/parrot/lib/python3.10/xml/dom/__pycache__/minicompat.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/dom/__pycache__/minidom.cpython-310.pyc b/parrot/lib/python3.10/xml/dom/__pycache__/minidom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5a7407512f744920f005bf2d8befc08e1df0315a Binary files /dev/null and b/parrot/lib/python3.10/xml/dom/__pycache__/minidom.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/dom/__pycache__/pulldom.cpython-310.pyc b/parrot/lib/python3.10/xml/dom/__pycache__/pulldom.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3849636e4cc5d2dfcb55e8e6cb9d4cb6112cf515 Binary files /dev/null and b/parrot/lib/python3.10/xml/dom/__pycache__/pulldom.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/dom/__pycache__/xmlbuilder.cpython-310.pyc b/parrot/lib/python3.10/xml/dom/__pycache__/xmlbuilder.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fa326d41116a94bc2343e6b1cee832afc2d90c09 Binary files /dev/null and b/parrot/lib/python3.10/xml/dom/__pycache__/xmlbuilder.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/dom/expatbuilder.py b/parrot/lib/python3.10/xml/dom/expatbuilder.py new file mode 100644 index 0000000000000000000000000000000000000000..199c22d0af347e15bf53005f74b5359ce0f0e904 --- /dev/null +++ b/parrot/lib/python3.10/xml/dom/expatbuilder.py @@ -0,0 +1,965 @@ +"""Facility to use the Expat parser to load a minidom instance +from a string or file. + +This avoids all the overhead of SAX and pulldom to gain performance. +""" + +# Warning! +# +# This module is tightly bound to the implementation details of the +# minidom DOM and can't be used with other DOM implementations. This +# is due, in part, to a lack of appropriate methods in the DOM (there is +# no way to create Entity and Notation nodes via the DOM Level 2 +# interface), and for performance. The latter is the cause of some fairly +# cryptic code. +# +# Performance hacks: +# +# - .character_data_handler() has an extra case in which continuing +# data is appended to an existing Text node; this can be a +# speedup since pyexpat can break up character data into multiple +# callbacks even though we set the buffer_text attribute on the +# parser. This also gives us the advantage that we don't need a +# separate normalization pass. +# +# - Determining that a node exists is done using an identity comparison +# with None rather than a truth test; this avoids searching for and +# calling any methods on the node object if it exists. (A rather +# nice speedup is achieved this way as well!) + +from xml.dom import xmlbuilder, minidom, Node +from xml.dom import EMPTY_NAMESPACE, EMPTY_PREFIX, XMLNS_NAMESPACE +from xml.parsers import expat +from xml.dom.minidom import _append_child, _set_attribute_node +from xml.dom.NodeFilter import NodeFilter + +TEXT_NODE = Node.TEXT_NODE +CDATA_SECTION_NODE = Node.CDATA_SECTION_NODE +DOCUMENT_NODE = Node.DOCUMENT_NODE + +FILTER_ACCEPT = xmlbuilder.DOMBuilderFilter.FILTER_ACCEPT +FILTER_REJECT = xmlbuilder.DOMBuilderFilter.FILTER_REJECT +FILTER_SKIP = xmlbuilder.DOMBuilderFilter.FILTER_SKIP +FILTER_INTERRUPT = xmlbuilder.DOMBuilderFilter.FILTER_INTERRUPT + +theDOMImplementation = minidom.getDOMImplementation() + +# Expat typename -> TypeInfo +_typeinfo_map = { + "CDATA": minidom.TypeInfo(None, "cdata"), + "ENUM": minidom.TypeInfo(None, "enumeration"), + "ENTITY": minidom.TypeInfo(None, "entity"), + "ENTITIES": minidom.TypeInfo(None, "entities"), + "ID": minidom.TypeInfo(None, "id"), + "IDREF": minidom.TypeInfo(None, "idref"), + "IDREFS": minidom.TypeInfo(None, "idrefs"), + "NMTOKEN": minidom.TypeInfo(None, "nmtoken"), + "NMTOKENS": minidom.TypeInfo(None, "nmtokens"), + } + +class ElementInfo(object): + __slots__ = '_attr_info', '_model', 'tagName' + + def __init__(self, tagName, model=None): + self.tagName = tagName + self._attr_info = [] + self._model = model + + def __getstate__(self): + return self._attr_info, self._model, self.tagName + + def __setstate__(self, state): + self._attr_info, self._model, self.tagName = state + + def getAttributeType(self, aname): + for info in self._attr_info: + if info[1] == aname: + t = info[-2] + if t[0] == "(": + return _typeinfo_map["ENUM"] + else: + return _typeinfo_map[info[-2]] + return minidom._no_type + + def getAttributeTypeNS(self, namespaceURI, localName): + return minidom._no_type + + def isElementContent(self): + if self._model: + type = self._model[0] + return type not in (expat.model.XML_CTYPE_ANY, + expat.model.XML_CTYPE_MIXED) + else: + return False + + def isEmpty(self): + if self._model: + return self._model[0] == expat.model.XML_CTYPE_EMPTY + else: + return False + + def isId(self, aname): + for info in self._attr_info: + if info[1] == aname: + return info[-2] == "ID" + return False + + def isIdNS(self, euri, ename, auri, aname): + # not sure this is meaningful + return self.isId((auri, aname)) + +def _intern(builder, s): + return builder._intern_setdefault(s, s) + +def _parse_ns_name(builder, name): + assert ' ' in name + parts = name.split(' ') + intern = builder._intern_setdefault + if len(parts) == 3: + uri, localname, prefix = parts + prefix = intern(prefix, prefix) + qname = "%s:%s" % (prefix, localname) + qname = intern(qname, qname) + localname = intern(localname, localname) + elif len(parts) == 2: + uri, localname = parts + prefix = EMPTY_PREFIX + qname = localname = intern(localname, localname) + else: + raise ValueError("Unsupported syntax: spaces in URIs not supported: %r" % name) + return intern(uri, uri), localname, prefix, qname + + +class ExpatBuilder: + """Document builder that uses Expat to build a ParsedXML.DOM document + instance.""" + + def __init__(self, options=None): + if options is None: + options = xmlbuilder.Options() + self._options = options + if self._options.filter is not None: + self._filter = FilterVisibilityController(self._options.filter) + else: + self._filter = None + # This *really* doesn't do anything in this case, so + # override it with something fast & minimal. + self._finish_start_element = id + self._parser = None + self.reset() + + def createParser(self): + """Create a new parser object.""" + return expat.ParserCreate() + + def getParser(self): + """Return the parser object, creating a new one if needed.""" + if not self._parser: + self._parser = self.createParser() + self._intern_setdefault = self._parser.intern.setdefault + self._parser.buffer_text = True + self._parser.ordered_attributes = True + self._parser.specified_attributes = True + self.install(self._parser) + return self._parser + + def reset(self): + """Free all data structures used during DOM construction.""" + self.document = theDOMImplementation.createDocument( + EMPTY_NAMESPACE, None, None) + self.curNode = self.document + self._elem_info = self.document._elem_info + self._cdata = False + + def install(self, parser): + """Install the callbacks needed to build the DOM into the parser.""" + # This creates circular references! + parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler + parser.StartElementHandler = self.first_element_handler + parser.EndElementHandler = self.end_element_handler + parser.ProcessingInstructionHandler = self.pi_handler + if self._options.entities: + parser.EntityDeclHandler = self.entity_decl_handler + parser.NotationDeclHandler = self.notation_decl_handler + if self._options.comments: + parser.CommentHandler = self.comment_handler + if self._options.cdata_sections: + parser.StartCdataSectionHandler = self.start_cdata_section_handler + parser.EndCdataSectionHandler = self.end_cdata_section_handler + parser.CharacterDataHandler = self.character_data_handler_cdata + else: + parser.CharacterDataHandler = self.character_data_handler + parser.ExternalEntityRefHandler = self.external_entity_ref_handler + parser.XmlDeclHandler = self.xml_decl_handler + parser.ElementDeclHandler = self.element_decl_handler + parser.AttlistDeclHandler = self.attlist_decl_handler + + def parseFile(self, file): + """Parse a document from a file object, returning the document + node.""" + parser = self.getParser() + first_buffer = True + try: + while 1: + buffer = file.read(16*1024) + if not buffer: + break + parser.Parse(buffer, False) + if first_buffer and self.document.documentElement: + self._setup_subset(buffer) + first_buffer = False + parser.Parse(b"", True) + except ParseEscape: + pass + doc = self.document + self.reset() + self._parser = None + return doc + + def parseString(self, string): + """Parse a document from a string, returning the document node.""" + parser = self.getParser() + try: + parser.Parse(string, True) + self._setup_subset(string) + except ParseEscape: + pass + doc = self.document + self.reset() + self._parser = None + return doc + + def _setup_subset(self, buffer): + """Load the internal subset if there might be one.""" + if self.document.doctype: + extractor = InternalSubsetExtractor() + extractor.parseString(buffer) + subset = extractor.getSubset() + self.document.doctype.internalSubset = subset + + def start_doctype_decl_handler(self, doctypeName, systemId, publicId, + has_internal_subset): + doctype = self.document.implementation.createDocumentType( + doctypeName, publicId, systemId) + doctype.ownerDocument = self.document + _append_child(self.document, doctype) + self.document.doctype = doctype + if self._filter and self._filter.acceptNode(doctype) == FILTER_REJECT: + self.document.doctype = None + del self.document.childNodes[-1] + doctype = None + self._parser.EntityDeclHandler = None + self._parser.NotationDeclHandler = None + if has_internal_subset: + if doctype is not None: + doctype.entities._seq = [] + doctype.notations._seq = [] + self._parser.CommentHandler = None + self._parser.ProcessingInstructionHandler = None + self._parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler + + def end_doctype_decl_handler(self): + if self._options.comments: + self._parser.CommentHandler = self.comment_handler + self._parser.ProcessingInstructionHandler = self.pi_handler + if not (self._elem_info or self._filter): + self._finish_end_element = id + + def pi_handler(self, target, data): + node = self.document.createProcessingInstruction(target, data) + _append_child(self.curNode, node) + if self._filter and self._filter.acceptNode(node) == FILTER_REJECT: + self.curNode.removeChild(node) + + def character_data_handler_cdata(self, data): + childNodes = self.curNode.childNodes + if self._cdata: + if ( self._cdata_continue + and childNodes[-1].nodeType == CDATA_SECTION_NODE): + childNodes[-1].appendData(data) + return + node = self.document.createCDATASection(data) + self._cdata_continue = True + elif childNodes and childNodes[-1].nodeType == TEXT_NODE: + node = childNodes[-1] + value = node.data + data + node.data = value + return + else: + node = minidom.Text() + node.data = data + node.ownerDocument = self.document + _append_child(self.curNode, node) + + def character_data_handler(self, data): + childNodes = self.curNode.childNodes + if childNodes and childNodes[-1].nodeType == TEXT_NODE: + node = childNodes[-1] + node.data = node.data + data + return + node = minidom.Text() + node.data = node.data + data + node.ownerDocument = self.document + _append_child(self.curNode, node) + + def entity_decl_handler(self, entityName, is_parameter_entity, value, + base, systemId, publicId, notationName): + if is_parameter_entity: + # we don't care about parameter entities for the DOM + return + if not self._options.entities: + return + node = self.document._create_entity(entityName, publicId, + systemId, notationName) + if value is not None: + # internal entity + # node *should* be readonly, but we'll cheat + child = self.document.createTextNode(value) + node.childNodes.append(child) + self.document.doctype.entities._seq.append(node) + if self._filter and self._filter.acceptNode(node) == FILTER_REJECT: + del self.document.doctype.entities._seq[-1] + + def notation_decl_handler(self, notationName, base, systemId, publicId): + node = self.document._create_notation(notationName, publicId, systemId) + self.document.doctype.notations._seq.append(node) + if self._filter and self._filter.acceptNode(node) == FILTER_ACCEPT: + del self.document.doctype.notations._seq[-1] + + def comment_handler(self, data): + node = self.document.createComment(data) + _append_child(self.curNode, node) + if self._filter and self._filter.acceptNode(node) == FILTER_REJECT: + self.curNode.removeChild(node) + + def start_cdata_section_handler(self): + self._cdata = True + self._cdata_continue = False + + def end_cdata_section_handler(self): + self._cdata = False + self._cdata_continue = False + + def external_entity_ref_handler(self, context, base, systemId, publicId): + return 1 + + def first_element_handler(self, name, attributes): + if self._filter is None and not self._elem_info: + self._finish_end_element = id + self.getParser().StartElementHandler = self.start_element_handler + self.start_element_handler(name, attributes) + + def start_element_handler(self, name, attributes): + node = self.document.createElement(name) + _append_child(self.curNode, node) + self.curNode = node + + if attributes: + for i in range(0, len(attributes), 2): + a = minidom.Attr(attributes[i], EMPTY_NAMESPACE, + None, EMPTY_PREFIX) + value = attributes[i+1] + a.value = value + a.ownerDocument = self.document + _set_attribute_node(node, a) + + if node is not self.document.documentElement: + self._finish_start_element(node) + + def _finish_start_element(self, node): + if self._filter: + # To be general, we'd have to call isSameNode(), but this + # is sufficient for minidom: + if node is self.document.documentElement: + return + filt = self._filter.startContainer(node) + if filt == FILTER_REJECT: + # ignore this node & all descendents + Rejecter(self) + elif filt == FILTER_SKIP: + # ignore this node, but make it's children become + # children of the parent node + Skipper(self) + else: + return + self.curNode = node.parentNode + node.parentNode.removeChild(node) + node.unlink() + + # If this ever changes, Namespaces.end_element_handler() needs to + # be changed to match. + # + def end_element_handler(self, name): + curNode = self.curNode + self.curNode = curNode.parentNode + self._finish_end_element(curNode) + + def _finish_end_element(self, curNode): + info = self._elem_info.get(curNode.tagName) + if info: + self._handle_white_text_nodes(curNode, info) + if self._filter: + if curNode is self.document.documentElement: + return + if self._filter.acceptNode(curNode) == FILTER_REJECT: + self.curNode.removeChild(curNode) + curNode.unlink() + + def _handle_white_text_nodes(self, node, info): + if (self._options.whitespace_in_element_content + or not info.isElementContent()): + return + + # We have element type information and should remove ignorable + # whitespace; identify for text nodes which contain only + # whitespace. + L = [] + for child in node.childNodes: + if child.nodeType == TEXT_NODE and not child.data.strip(): + L.append(child) + + # Remove ignorable whitespace from the tree. + for child in L: + node.removeChild(child) + + def element_decl_handler(self, name, model): + info = self._elem_info.get(name) + if info is None: + self._elem_info[name] = ElementInfo(name, model) + else: + assert info._model is None + info._model = model + + def attlist_decl_handler(self, elem, name, type, default, required): + info = self._elem_info.get(elem) + if info is None: + info = ElementInfo(elem) + self._elem_info[elem] = info + info._attr_info.append( + [None, name, None, None, default, 0, type, required]) + + def xml_decl_handler(self, version, encoding, standalone): + self.document.version = version + self.document.encoding = encoding + # This is still a little ugly, thanks to the pyexpat API. ;-( + if standalone >= 0: + if standalone: + self.document.standalone = True + else: + self.document.standalone = False + + +# Don't include FILTER_INTERRUPT, since that's checked separately +# where allowed. +_ALLOWED_FILTER_RETURNS = (FILTER_ACCEPT, FILTER_REJECT, FILTER_SKIP) + +class FilterVisibilityController(object): + """Wrapper around a DOMBuilderFilter which implements the checks + to make the whatToShow filter attribute work.""" + + __slots__ = 'filter', + + def __init__(self, filter): + self.filter = filter + + def startContainer(self, node): + mask = self._nodetype_mask[node.nodeType] + if self.filter.whatToShow & mask: + val = self.filter.startContainer(node) + if val == FILTER_INTERRUPT: + raise ParseEscape + if val not in _ALLOWED_FILTER_RETURNS: + raise ValueError( + "startContainer() returned illegal value: " + repr(val)) + return val + else: + return FILTER_ACCEPT + + def acceptNode(self, node): + mask = self._nodetype_mask[node.nodeType] + if self.filter.whatToShow & mask: + val = self.filter.acceptNode(node) + if val == FILTER_INTERRUPT: + raise ParseEscape + if val == FILTER_SKIP: + # move all child nodes to the parent, and remove this node + parent = node.parentNode + for child in node.childNodes[:]: + parent.appendChild(child) + # node is handled by the caller + return FILTER_REJECT + if val not in _ALLOWED_FILTER_RETURNS: + raise ValueError( + "acceptNode() returned illegal value: " + repr(val)) + return val + else: + return FILTER_ACCEPT + + _nodetype_mask = { + Node.ELEMENT_NODE: NodeFilter.SHOW_ELEMENT, + Node.ATTRIBUTE_NODE: NodeFilter.SHOW_ATTRIBUTE, + Node.TEXT_NODE: NodeFilter.SHOW_TEXT, + Node.CDATA_SECTION_NODE: NodeFilter.SHOW_CDATA_SECTION, + Node.ENTITY_REFERENCE_NODE: NodeFilter.SHOW_ENTITY_REFERENCE, + Node.ENTITY_NODE: NodeFilter.SHOW_ENTITY, + Node.PROCESSING_INSTRUCTION_NODE: NodeFilter.SHOW_PROCESSING_INSTRUCTION, + Node.COMMENT_NODE: NodeFilter.SHOW_COMMENT, + Node.DOCUMENT_NODE: NodeFilter.SHOW_DOCUMENT, + Node.DOCUMENT_TYPE_NODE: NodeFilter.SHOW_DOCUMENT_TYPE, + Node.DOCUMENT_FRAGMENT_NODE: NodeFilter.SHOW_DOCUMENT_FRAGMENT, + Node.NOTATION_NODE: NodeFilter.SHOW_NOTATION, + } + + +class FilterCrutch(object): + __slots__ = '_builder', '_level', '_old_start', '_old_end' + + def __init__(self, builder): + self._level = 0 + self._builder = builder + parser = builder._parser + self._old_start = parser.StartElementHandler + self._old_end = parser.EndElementHandler + parser.StartElementHandler = self.start_element_handler + parser.EndElementHandler = self.end_element_handler + +class Rejecter(FilterCrutch): + __slots__ = () + + def __init__(self, builder): + FilterCrutch.__init__(self, builder) + parser = builder._parser + for name in ("ProcessingInstructionHandler", + "CommentHandler", + "CharacterDataHandler", + "StartCdataSectionHandler", + "EndCdataSectionHandler", + "ExternalEntityRefHandler", + ): + setattr(parser, name, None) + + def start_element_handler(self, *args): + self._level = self._level + 1 + + def end_element_handler(self, *args): + if self._level == 0: + # restore the old handlers + parser = self._builder._parser + self._builder.install(parser) + parser.StartElementHandler = self._old_start + parser.EndElementHandler = self._old_end + else: + self._level = self._level - 1 + +class Skipper(FilterCrutch): + __slots__ = () + + def start_element_handler(self, *args): + node = self._builder.curNode + self._old_start(*args) + if self._builder.curNode is not node: + self._level = self._level + 1 + + def end_element_handler(self, *args): + if self._level == 0: + # We're popping back out of the node we're skipping, so we + # shouldn't need to do anything but reset the handlers. + self._builder._parser.StartElementHandler = self._old_start + self._builder._parser.EndElementHandler = self._old_end + self._builder = None + else: + self._level = self._level - 1 + self._old_end(*args) + + +# framework document used by the fragment builder. +# Takes a string for the doctype, subset string, and namespace attrs string. + +_FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID = \ + "http://xml.python.org/entities/fragment-builder/internal" + +_FRAGMENT_BUILDER_TEMPLATE = ( + '''\ + +%%s +]> +&fragment-builder-internal;''' + % _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID) + + +class FragmentBuilder(ExpatBuilder): + """Builder which constructs document fragments given XML source + text and a context node. + + The context node is expected to provide information about the + namespace declarations which are in scope at the start of the + fragment. + """ + + def __init__(self, context, options=None): + if context.nodeType == DOCUMENT_NODE: + self.originalDocument = context + self.context = context + else: + self.originalDocument = context.ownerDocument + self.context = context + ExpatBuilder.__init__(self, options) + + def reset(self): + ExpatBuilder.reset(self) + self.fragment = None + + def parseFile(self, file): + """Parse a document fragment from a file object, returning the + fragment node.""" + return self.parseString(file.read()) + + def parseString(self, string): + """Parse a document fragment from a string, returning the + fragment node.""" + self._source = string + parser = self.getParser() + doctype = self.originalDocument.doctype + ident = "" + if doctype: + subset = doctype.internalSubset or self._getDeclarations() + if doctype.publicId: + ident = ('PUBLIC "%s" "%s"' + % (doctype.publicId, doctype.systemId)) + elif doctype.systemId: + ident = 'SYSTEM "%s"' % doctype.systemId + else: + subset = "" + nsattrs = self._getNSattrs() # get ns decls from node's ancestors + document = _FRAGMENT_BUILDER_TEMPLATE % (ident, subset, nsattrs) + try: + parser.Parse(document, True) + except: + self.reset() + raise + fragment = self.fragment + self.reset() +## self._parser = None + return fragment + + def _getDeclarations(self): + """Re-create the internal subset from the DocumentType node. + + This is only needed if we don't already have the + internalSubset as a string. + """ + doctype = self.context.ownerDocument.doctype + s = "" + if doctype: + for i in range(doctype.notations.length): + notation = doctype.notations.item(i) + if s: + s = s + "\n " + s = "%s' \ + % (s, notation.publicId, notation.systemId) + else: + s = '%s SYSTEM "%s">' % (s, notation.systemId) + for i in range(doctype.entities.length): + entity = doctype.entities.item(i) + if s: + s = s + "\n " + s = "%s" + return s + + def _getNSattrs(self): + return "" + + def external_entity_ref_handler(self, context, base, systemId, publicId): + if systemId == _FRAGMENT_BUILDER_INTERNAL_SYSTEM_ID: + # this entref is the one that we made to put the subtree + # in; all of our given input is parsed in here. + old_document = self.document + old_cur_node = self.curNode + parser = self._parser.ExternalEntityParserCreate(context) + # put the real document back, parse into the fragment to return + self.document = self.originalDocument + self.fragment = self.document.createDocumentFragment() + self.curNode = self.fragment + try: + parser.Parse(self._source, True) + finally: + self.curNode = old_cur_node + self.document = old_document + self._source = None + return -1 + else: + return ExpatBuilder.external_entity_ref_handler( + self, context, base, systemId, publicId) + + +class Namespaces: + """Mix-in class for builders; adds support for namespaces.""" + + def _initNamespaces(self): + # list of (prefix, uri) ns declarations. Namespace attrs are + # constructed from this and added to the element's attrs. + self._ns_ordered_prefixes = [] + + def createParser(self): + """Create a new namespace-handling parser.""" + parser = expat.ParserCreate(namespace_separator=" ") + parser.namespace_prefixes = True + return parser + + def install(self, parser): + """Insert the namespace-handlers onto the parser.""" + ExpatBuilder.install(self, parser) + if self._options.namespace_declarations: + parser.StartNamespaceDeclHandler = ( + self.start_namespace_decl_handler) + + def start_namespace_decl_handler(self, prefix, uri): + """Push this namespace declaration on our storage.""" + self._ns_ordered_prefixes.append((prefix, uri)) + + def start_element_handler(self, name, attributes): + if ' ' in name: + uri, localname, prefix, qname = _parse_ns_name(self, name) + else: + uri = EMPTY_NAMESPACE + qname = name + localname = None + prefix = EMPTY_PREFIX + node = minidom.Element(qname, uri, prefix, localname) + node.ownerDocument = self.document + _append_child(self.curNode, node) + self.curNode = node + + if self._ns_ordered_prefixes: + for prefix, uri in self._ns_ordered_prefixes: + if prefix: + a = minidom.Attr(_intern(self, 'xmlns:' + prefix), + XMLNS_NAMESPACE, prefix, "xmlns") + else: + a = minidom.Attr("xmlns", XMLNS_NAMESPACE, + "xmlns", EMPTY_PREFIX) + a.value = uri + a.ownerDocument = self.document + _set_attribute_node(node, a) + del self._ns_ordered_prefixes[:] + + if attributes: + node._ensure_attributes() + _attrs = node._attrs + _attrsNS = node._attrsNS + for i in range(0, len(attributes), 2): + aname = attributes[i] + value = attributes[i+1] + if ' ' in aname: + uri, localname, prefix, qname = _parse_ns_name(self, aname) + a = minidom.Attr(qname, uri, localname, prefix) + _attrs[qname] = a + _attrsNS[(uri, localname)] = a + else: + a = minidom.Attr(aname, EMPTY_NAMESPACE, + aname, EMPTY_PREFIX) + _attrs[aname] = a + _attrsNS[(EMPTY_NAMESPACE, aname)] = a + a.ownerDocument = self.document + a.value = value + a.ownerElement = node + + if __debug__: + # This only adds some asserts to the original + # end_element_handler(), so we only define this when -O is not + # used. If changing one, be sure to check the other to see if + # it needs to be changed as well. + # + def end_element_handler(self, name): + curNode = self.curNode + if ' ' in name: + uri, localname, prefix, qname = _parse_ns_name(self, name) + assert (curNode.namespaceURI == uri + and curNode.localName == localname + and curNode.prefix == prefix), \ + "element stack messed up! (namespace)" + else: + assert curNode.nodeName == name, \ + "element stack messed up - bad nodeName" + assert curNode.namespaceURI == EMPTY_NAMESPACE, \ + "element stack messed up - bad namespaceURI" + self.curNode = curNode.parentNode + self._finish_end_element(curNode) + + +class ExpatBuilderNS(Namespaces, ExpatBuilder): + """Document builder that supports namespaces.""" + + def reset(self): + ExpatBuilder.reset(self) + self._initNamespaces() + + +class FragmentBuilderNS(Namespaces, FragmentBuilder): + """Fragment builder that supports namespaces.""" + + def reset(self): + FragmentBuilder.reset(self) + self._initNamespaces() + + def _getNSattrs(self): + """Return string of namespace attributes from this element and + ancestors.""" + # XXX This needs to be re-written to walk the ancestors of the + # context to build up the namespace information from + # declarations, elements, and attributes found in context. + # Otherwise we have to store a bunch more data on the DOM + # (though that *might* be more reliable -- not clear). + attrs = "" + context = self.context + L = [] + while context: + if hasattr(context, '_ns_prefix_uri'): + for prefix, uri in context._ns_prefix_uri.items(): + # add every new NS decl from context to L and attrs string + if prefix in L: + continue + L.append(prefix) + if prefix: + declname = "xmlns:" + prefix + else: + declname = "xmlns" + if attrs: + attrs = "%s\n %s='%s'" % (attrs, declname, uri) + else: + attrs = " %s='%s'" % (declname, uri) + context = context.parentNode + return attrs + + +class ParseEscape(Exception): + """Exception raised to short-circuit parsing in InternalSubsetExtractor.""" + pass + +class InternalSubsetExtractor(ExpatBuilder): + """XML processor which can rip out the internal document type subset.""" + + subset = None + + def getSubset(self): + """Return the internal subset as a string.""" + return self.subset + + def parseFile(self, file): + try: + ExpatBuilder.parseFile(self, file) + except ParseEscape: + pass + + def parseString(self, string): + try: + ExpatBuilder.parseString(self, string) + except ParseEscape: + pass + + def install(self, parser): + parser.StartDoctypeDeclHandler = self.start_doctype_decl_handler + parser.StartElementHandler = self.start_element_handler + + def start_doctype_decl_handler(self, name, publicId, systemId, + has_internal_subset): + if has_internal_subset: + parser = self.getParser() + self.subset = [] + parser.DefaultHandler = self.subset.append + parser.EndDoctypeDeclHandler = self.end_doctype_decl_handler + else: + raise ParseEscape() + + def end_doctype_decl_handler(self): + s = ''.join(self.subset).replace('\r\n', '\n').replace('\r', '\n') + self.subset = s + raise ParseEscape() + + def start_element_handler(self, name, attrs): + raise ParseEscape() + + +def parse(file, namespaces=True): + """Parse a document, returning the resulting Document node. + + 'file' may be either a file name or an open file object. + """ + if namespaces: + builder = ExpatBuilderNS() + else: + builder = ExpatBuilder() + + if isinstance(file, str): + with open(file, 'rb') as fp: + result = builder.parseFile(fp) + else: + result = builder.parseFile(file) + return result + + +def parseString(string, namespaces=True): + """Parse a document from a string, returning the resulting + Document node. + """ + if namespaces: + builder = ExpatBuilderNS() + else: + builder = ExpatBuilder() + return builder.parseString(string) + + +def parseFragment(file, context, namespaces=True): + """Parse a fragment of a document, given the context from which it + was originally extracted. context should be the parent of the + node(s) which are in the fragment. + + 'file' may be either a file name or an open file object. + """ + if namespaces: + builder = FragmentBuilderNS(context) + else: + builder = FragmentBuilder(context) + + if isinstance(file, str): + with open(file, 'rb') as fp: + result = builder.parseFile(fp) + else: + result = builder.parseFile(file) + return result + + +def parseFragmentString(string, context, namespaces=True): + """Parse a fragment of a document from a string, given the context + from which it was originally extracted. context should be the + parent of the node(s) which are in the fragment. + """ + if namespaces: + builder = FragmentBuilderNS(context) + else: + builder = FragmentBuilder(context) + return builder.parseString(string) + + +def makeBuilder(options): + """Create a builder based on an Options object.""" + if options.namespaces: + return ExpatBuilderNS(options) + else: + return ExpatBuilder(options) diff --git a/parrot/lib/python3.10/xml/dom/minicompat.py b/parrot/lib/python3.10/xml/dom/minicompat.py new file mode 100644 index 0000000000000000000000000000000000000000..5d6fae9a2575bf502999b3c59aeecb7b93a37385 --- /dev/null +++ b/parrot/lib/python3.10/xml/dom/minicompat.py @@ -0,0 +1,109 @@ +"""Python version compatibility support for minidom. + +This module contains internal implementation details and +should not be imported; use xml.dom.minidom instead. +""" + +# This module should only be imported using "import *". +# +# The following names are defined: +# +# NodeList -- lightest possible NodeList implementation +# +# EmptyNodeList -- lightest possible NodeList that is guaranteed to +# remain empty (immutable) +# +# StringTypes -- tuple of defined string types +# +# defproperty -- function used in conjunction with GetattrMagic; +# using these together is needed to make them work +# as efficiently as possible in both Python 2.2+ +# and older versions. For example: +# +# class MyClass(GetattrMagic): +# def _get_myattr(self): +# return something +# +# defproperty(MyClass, "myattr", +# "return some value") +# +# For Python 2.2 and newer, this will construct a +# property object on the class, which avoids +# needing to override __getattr__(). It will only +# work for read-only attributes. +# +# For older versions of Python, inheriting from +# GetattrMagic will use the traditional +# __getattr__() hackery to achieve the same effect, +# but less efficiently. +# +# defproperty() should be used for each version of +# the relevant _get_() function. + +__all__ = ["NodeList", "EmptyNodeList", "StringTypes", "defproperty"] + +import xml.dom + +StringTypes = (str,) + + +class NodeList(list): + __slots__ = () + + def item(self, index): + if 0 <= index < len(self): + return self[index] + + def _get_length(self): + return len(self) + + def _set_length(self, value): + raise xml.dom.NoModificationAllowedErr( + "attempt to modify read-only attribute 'length'") + + length = property(_get_length, _set_length, + doc="The number of nodes in the NodeList.") + + # For backward compatibility + def __setstate__(self, state): + if state is None: + state = [] + self[:] = state + + +class EmptyNodeList(tuple): + __slots__ = () + + def __add__(self, other): + NL = NodeList() + NL.extend(other) + return NL + + def __radd__(self, other): + NL = NodeList() + NL.extend(other) + return NL + + def item(self, index): + return None + + def _get_length(self): + return 0 + + def _set_length(self, value): + raise xml.dom.NoModificationAllowedErr( + "attempt to modify read-only attribute 'length'") + + length = property(_get_length, _set_length, + doc="The number of nodes in the NodeList.") + + +def defproperty(klass, name, doc): + get = getattr(klass, ("_get_" + name)) + def set(self, value, name=name): + raise xml.dom.NoModificationAllowedErr( + "attempt to modify read-only attribute " + repr(name)) + assert not hasattr(klass, "_set_" + name), \ + "expected not to find _set_" + name + prop = property(get, set, doc=doc) + setattr(klass, name, prop) diff --git a/parrot/lib/python3.10/xml/dom/xmlbuilder.py b/parrot/lib/python3.10/xml/dom/xmlbuilder.py new file mode 100644 index 0000000000000000000000000000000000000000..8a200263497b8927e7630ec5d8c14cfd8d8f5a19 --- /dev/null +++ b/parrot/lib/python3.10/xml/dom/xmlbuilder.py @@ -0,0 +1,387 @@ +"""Implementation of the DOM Level 3 'LS-Load' feature.""" + +import copy +import xml.dom + +from xml.dom.NodeFilter import NodeFilter + + +__all__ = ["DOMBuilder", "DOMEntityResolver", "DOMInputSource"] + + +class Options: + """Features object that has variables set for each DOMBuilder feature. + + The DOMBuilder class uses an instance of this class to pass settings to + the ExpatBuilder class. + """ + + # Note that the DOMBuilder class in LoadSave constrains which of these + # values can be set using the DOM Level 3 LoadSave feature. + + namespaces = 1 + namespace_declarations = True + validation = False + external_parameter_entities = True + external_general_entities = True + external_dtd_subset = True + validate_if_schema = False + validate = False + datatype_normalization = False + create_entity_ref_nodes = True + entities = True + whitespace_in_element_content = True + cdata_sections = True + comments = True + charset_overrides_xml_encoding = True + infoset = False + supported_mediatypes_only = False + + errorHandler = None + filter = None + + +class DOMBuilder: + entityResolver = None + errorHandler = None + filter = None + + ACTION_REPLACE = 1 + ACTION_APPEND_AS_CHILDREN = 2 + ACTION_INSERT_AFTER = 3 + ACTION_INSERT_BEFORE = 4 + + _legal_actions = (ACTION_REPLACE, ACTION_APPEND_AS_CHILDREN, + ACTION_INSERT_AFTER, ACTION_INSERT_BEFORE) + + def __init__(self): + self._options = Options() + + def _get_entityResolver(self): + return self.entityResolver + def _set_entityResolver(self, entityResolver): + self.entityResolver = entityResolver + + def _get_errorHandler(self): + return self.errorHandler + def _set_errorHandler(self, errorHandler): + self.errorHandler = errorHandler + + def _get_filter(self): + return self.filter + def _set_filter(self, filter): + self.filter = filter + + def setFeature(self, name, state): + if self.supportsFeature(name): + state = state and 1 or 0 + try: + settings = self._settings[(_name_xform(name), state)] + except KeyError: + raise xml.dom.NotSupportedErr( + "unsupported feature: %r" % (name,)) from None + else: + for name, value in settings: + setattr(self._options, name, value) + else: + raise xml.dom.NotFoundErr("unknown feature: " + repr(name)) + + def supportsFeature(self, name): + return hasattr(self._options, _name_xform(name)) + + def canSetFeature(self, name, state): + key = (_name_xform(name), state and 1 or 0) + return key in self._settings + + # This dictionary maps from (feature,value) to a list of + # (option,value) pairs that should be set on the Options object. + # If a (feature,value) setting is not in this dictionary, it is + # not supported by the DOMBuilder. + # + _settings = { + ("namespace_declarations", 0): [ + ("namespace_declarations", 0)], + ("namespace_declarations", 1): [ + ("namespace_declarations", 1)], + ("validation", 0): [ + ("validation", 0)], + ("external_general_entities", 0): [ + ("external_general_entities", 0)], + ("external_general_entities", 1): [ + ("external_general_entities", 1)], + ("external_parameter_entities", 0): [ + ("external_parameter_entities", 0)], + ("external_parameter_entities", 1): [ + ("external_parameter_entities", 1)], + ("validate_if_schema", 0): [ + ("validate_if_schema", 0)], + ("create_entity_ref_nodes", 0): [ + ("create_entity_ref_nodes", 0)], + ("create_entity_ref_nodes", 1): [ + ("create_entity_ref_nodes", 1)], + ("entities", 0): [ + ("create_entity_ref_nodes", 0), + ("entities", 0)], + ("entities", 1): [ + ("entities", 1)], + ("whitespace_in_element_content", 0): [ + ("whitespace_in_element_content", 0)], + ("whitespace_in_element_content", 1): [ + ("whitespace_in_element_content", 1)], + ("cdata_sections", 0): [ + ("cdata_sections", 0)], + ("cdata_sections", 1): [ + ("cdata_sections", 1)], + ("comments", 0): [ + ("comments", 0)], + ("comments", 1): [ + ("comments", 1)], + ("charset_overrides_xml_encoding", 0): [ + ("charset_overrides_xml_encoding", 0)], + ("charset_overrides_xml_encoding", 1): [ + ("charset_overrides_xml_encoding", 1)], + ("infoset", 0): [], + ("infoset", 1): [ + ("namespace_declarations", 0), + ("validate_if_schema", 0), + ("create_entity_ref_nodes", 0), + ("entities", 0), + ("cdata_sections", 0), + ("datatype_normalization", 1), + ("whitespace_in_element_content", 1), + ("comments", 1), + ("charset_overrides_xml_encoding", 1)], + ("supported_mediatypes_only", 0): [ + ("supported_mediatypes_only", 0)], + ("namespaces", 0): [ + ("namespaces", 0)], + ("namespaces", 1): [ + ("namespaces", 1)], + } + + def getFeature(self, name): + xname = _name_xform(name) + try: + return getattr(self._options, xname) + except AttributeError: + if name == "infoset": + options = self._options + return (options.datatype_normalization + and options.whitespace_in_element_content + and options.comments + and options.charset_overrides_xml_encoding + and not (options.namespace_declarations + or options.validate_if_schema + or options.create_entity_ref_nodes + or options.entities + or options.cdata_sections)) + raise xml.dom.NotFoundErr("feature %s not known" % repr(name)) + + def parseURI(self, uri): + if self.entityResolver: + input = self.entityResolver.resolveEntity(None, uri) + else: + input = DOMEntityResolver().resolveEntity(None, uri) + return self.parse(input) + + def parse(self, input): + options = copy.copy(self._options) + options.filter = self.filter + options.errorHandler = self.errorHandler + fp = input.byteStream + if fp is None and options.systemId: + import urllib.request + fp = urllib.request.urlopen(input.systemId) + return self._parse_bytestream(fp, options) + + def parseWithContext(self, input, cnode, action): + if action not in self._legal_actions: + raise ValueError("not a legal action") + raise NotImplementedError("Haven't written this yet...") + + def _parse_bytestream(self, stream, options): + import xml.dom.expatbuilder + builder = xml.dom.expatbuilder.makeBuilder(options) + return builder.parseFile(stream) + + +def _name_xform(name): + return name.lower().replace('-', '_') + + +class DOMEntityResolver(object): + __slots__ = '_opener', + + def resolveEntity(self, publicId, systemId): + assert systemId is not None + source = DOMInputSource() + source.publicId = publicId + source.systemId = systemId + source.byteStream = self._get_opener().open(systemId) + + # determine the encoding if the transport provided it + source.encoding = self._guess_media_encoding(source) + + # determine the base URI is we can + import posixpath, urllib.parse + parts = urllib.parse.urlparse(systemId) + scheme, netloc, path, params, query, fragment = parts + # XXX should we check the scheme here as well? + if path and not path.endswith("/"): + path = posixpath.dirname(path) + "/" + parts = scheme, netloc, path, params, query, fragment + source.baseURI = urllib.parse.urlunparse(parts) + + return source + + def _get_opener(self): + try: + return self._opener + except AttributeError: + self._opener = self._create_opener() + return self._opener + + def _create_opener(self): + import urllib.request + return urllib.request.build_opener() + + def _guess_media_encoding(self, source): + info = source.byteStream.info() + if "Content-Type" in info: + for param in info.getplist(): + if param.startswith("charset="): + return param.split("=", 1)[1].lower() + + +class DOMInputSource(object): + __slots__ = ('byteStream', 'characterStream', 'stringData', + 'encoding', 'publicId', 'systemId', 'baseURI') + + def __init__(self): + self.byteStream = None + self.characterStream = None + self.stringData = None + self.encoding = None + self.publicId = None + self.systemId = None + self.baseURI = None + + def _get_byteStream(self): + return self.byteStream + def _set_byteStream(self, byteStream): + self.byteStream = byteStream + + def _get_characterStream(self): + return self.characterStream + def _set_characterStream(self, characterStream): + self.characterStream = characterStream + + def _get_stringData(self): + return self.stringData + def _set_stringData(self, data): + self.stringData = data + + def _get_encoding(self): + return self.encoding + def _set_encoding(self, encoding): + self.encoding = encoding + + def _get_publicId(self): + return self.publicId + def _set_publicId(self, publicId): + self.publicId = publicId + + def _get_systemId(self): + return self.systemId + def _set_systemId(self, systemId): + self.systemId = systemId + + def _get_baseURI(self): + return self.baseURI + def _set_baseURI(self, uri): + self.baseURI = uri + + +class DOMBuilderFilter: + """Element filter which can be used to tailor construction of + a DOM instance. + """ + + # There's really no need for this class; concrete implementations + # should just implement the endElement() and startElement() + # methods as appropriate. Using this makes it easy to only + # implement one of them. + + FILTER_ACCEPT = 1 + FILTER_REJECT = 2 + FILTER_SKIP = 3 + FILTER_INTERRUPT = 4 + + whatToShow = NodeFilter.SHOW_ALL + + def _get_whatToShow(self): + return self.whatToShow + + def acceptNode(self, element): + return self.FILTER_ACCEPT + + def startContainer(self, element): + return self.FILTER_ACCEPT + +del NodeFilter + + +class DocumentLS: + """Mixin to create documents that conform to the load/save spec.""" + + async_ = False + + def _get_async(self): + return False + + def _set_async(self, flag): + if flag: + raise xml.dom.NotSupportedErr( + "asynchronous document loading is not supported") + + def abort(self): + # What does it mean to "clear" a document? Does the + # documentElement disappear? + raise NotImplementedError( + "haven't figured out what this means yet") + + def load(self, uri): + raise NotImplementedError("haven't written this yet") + + def loadXML(self, source): + raise NotImplementedError("haven't written this yet") + + def saveXML(self, snode): + if snode is None: + snode = self + elif snode.ownerDocument is not self: + raise xml.dom.WrongDocumentErr() + return snode.toxml() + + +class DOMImplementationLS: + MODE_SYNCHRONOUS = 1 + MODE_ASYNCHRONOUS = 2 + + def createDOMBuilder(self, mode, schemaType): + if schemaType is not None: + raise xml.dom.NotSupportedErr( + "schemaType not yet supported") + if mode == self.MODE_SYNCHRONOUS: + return DOMBuilder() + if mode == self.MODE_ASYNCHRONOUS: + raise xml.dom.NotSupportedErr( + "asynchronous builders are not supported") + raise ValueError("unknown value for mode") + + def createDOMWriter(self): + raise NotImplementedError( + "the writer interface hasn't been written yet!") + + def createDOMInputSource(self): + return DOMInputSource() diff --git a/parrot/lib/python3.10/xml/etree/__pycache__/cElementTree.cpython-310.pyc b/parrot/lib/python3.10/xml/etree/__pycache__/cElementTree.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fe97f6b76585b6b472e097ebdda4937941c849e2 Binary files /dev/null and b/parrot/lib/python3.10/xml/etree/__pycache__/cElementTree.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/parsers/__init__.py b/parrot/lib/python3.10/xml/parsers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..eb314a3b40573763835ad959acff985fad014537 --- /dev/null +++ b/parrot/lib/python3.10/xml/parsers/__init__.py @@ -0,0 +1,8 @@ +"""Python interfaces to XML parsers. + +This package contains one module: + +expat -- Python wrapper for James Clark's Expat parser, with namespace + support. + +""" diff --git a/parrot/lib/python3.10/xml/parsers/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/xml/parsers/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c9d39d9cbf42562a18cfdda5a7665b2b498c2503 Binary files /dev/null and b/parrot/lib/python3.10/xml/parsers/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/parsers/__pycache__/expat.cpython-310.pyc b/parrot/lib/python3.10/xml/parsers/__pycache__/expat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6836ee0d2501a8ca47184ee46795f9b14d0c2764 Binary files /dev/null and b/parrot/lib/python3.10/xml/parsers/__pycache__/expat.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/parsers/expat.py b/parrot/lib/python3.10/xml/parsers/expat.py new file mode 100644 index 0000000000000000000000000000000000000000..bcbe9fb1f8f00e568f76fce02fdbe7528012b622 --- /dev/null +++ b/parrot/lib/python3.10/xml/parsers/expat.py @@ -0,0 +1,8 @@ +"""Interface to the Expat non-validating XML parser.""" +import sys + +from pyexpat import * + +# provide pyexpat submodules as xml.parsers.expat submodules +sys.modules['xml.parsers.expat.model'] = model +sys.modules['xml.parsers.expat.errors'] = errors diff --git a/parrot/lib/python3.10/xml/sax/__pycache__/handler.cpython-310.pyc b/parrot/lib/python3.10/xml/sax/__pycache__/handler.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3515368b4a68d66d09ef99b85e7e58b230cdaa1d Binary files /dev/null and b/parrot/lib/python3.10/xml/sax/__pycache__/handler.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/sax/__pycache__/saxutils.cpython-310.pyc b/parrot/lib/python3.10/xml/sax/__pycache__/saxutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..29904aed8482142c38e818ddffcfa7ef3bd781ca Binary files /dev/null and b/parrot/lib/python3.10/xml/sax/__pycache__/saxutils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/sax/__pycache__/xmlreader.cpython-310.pyc b/parrot/lib/python3.10/xml/sax/__pycache__/xmlreader.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1f7669cc17cd5a7fac047bba4374c59fb7e85e21 Binary files /dev/null and b/parrot/lib/python3.10/xml/sax/__pycache__/xmlreader.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/xml/sax/_exceptions.py b/parrot/lib/python3.10/xml/sax/_exceptions.py new file mode 100644 index 0000000000000000000000000000000000000000..a9b2ba35c6a22b2e393b6a898780f025a9e15e0f --- /dev/null +++ b/parrot/lib/python3.10/xml/sax/_exceptions.py @@ -0,0 +1,131 @@ +"""Different kinds of SAX Exceptions""" +import sys +if sys.platform[:4] == "java": + from java.lang import Exception +del sys + +# ===== SAXEXCEPTION ===== + +class SAXException(Exception): + """Encapsulate an XML error or warning. This class can contain + basic error or warning information from either the XML parser or + the application: you can subclass it to provide additional + functionality, or to add localization. Note that although you will + receive a SAXException as the argument to the handlers in the + ErrorHandler interface, you are not actually required to raise + the exception; instead, you can simply read the information in + it.""" + + def __init__(self, msg, exception=None): + """Creates an exception. The message is required, but the exception + is optional.""" + self._msg = msg + self._exception = exception + Exception.__init__(self, msg) + + def getMessage(self): + "Return a message for this exception." + return self._msg + + def getException(self): + "Return the embedded exception, or None if there was none." + return self._exception + + def __str__(self): + "Create a string representation of the exception." + return self._msg + + def __getitem__(self, ix): + """Avoids weird error messages if someone does exception[ix] by + mistake, since Exception has __getitem__ defined.""" + raise AttributeError("__getitem__") + + +# ===== SAXPARSEEXCEPTION ===== + +class SAXParseException(SAXException): + """Encapsulate an XML parse error or warning. + + This exception will include information for locating the error in + the original XML document. Note that although the application will + receive a SAXParseException as the argument to the handlers in the + ErrorHandler interface, the application is not actually required + to raise the exception; instead, it can simply read the + information in it and take a different action. + + Since this exception is a subclass of SAXException, it inherits + the ability to wrap another exception.""" + + def __init__(self, msg, exception, locator): + "Creates the exception. The exception parameter is allowed to be None." + SAXException.__init__(self, msg, exception) + self._locator = locator + + # We need to cache this stuff at construction time. + # If this exception is raised, the objects through which we must + # traverse to get this information may be deleted by the time + # it gets caught. + self._systemId = self._locator.getSystemId() + self._colnum = self._locator.getColumnNumber() + self._linenum = self._locator.getLineNumber() + + def getColumnNumber(self): + """The column number of the end of the text where the exception + occurred.""" + return self._colnum + + def getLineNumber(self): + "The line number of the end of the text where the exception occurred." + return self._linenum + + def getPublicId(self): + "Get the public identifier of the entity where the exception occurred." + return self._locator.getPublicId() + + def getSystemId(self): + "Get the system identifier of the entity where the exception occurred." + return self._systemId + + def __str__(self): + "Create a string representation of the exception." + sysid = self.getSystemId() + if sysid is None: + sysid = "" + linenum = self.getLineNumber() + if linenum is None: + linenum = "?" + colnum = self.getColumnNumber() + if colnum is None: + colnum = "?" + return "%s:%s:%s: %s" % (sysid, linenum, colnum, self._msg) + + +# ===== SAXNOTRECOGNIZEDEXCEPTION ===== + +class SAXNotRecognizedException(SAXException): + """Exception class for an unrecognized identifier. + + An XMLReader will raise this exception when it is confronted with an + unrecognized feature or property. SAX applications and extensions may + use this class for similar purposes.""" + + +# ===== SAXNOTSUPPORTEDEXCEPTION ===== + +class SAXNotSupportedException(SAXException): + """Exception class for an unsupported operation. + + An XMLReader will raise this exception when a service it cannot + perform is requested (specifically setting a state or value). SAX + applications and extensions may use this class for similar + purposes.""" + +# ===== SAXNOTSUPPORTEDEXCEPTION ===== + +class SAXReaderNotAvailable(SAXNotSupportedException): + """Exception class for a missing driver. + + An XMLReader module (driver) should raise this exception when it + is first imported, e.g. when a support module cannot be imported. + It also may be raised during parsing, e.g. if executing an external + program is not permitted.""" diff --git a/parrot/lib/python3.10/xml/sax/expatreader.py b/parrot/lib/python3.10/xml/sax/expatreader.py new file mode 100644 index 0000000000000000000000000000000000000000..2f7c87b126c9933a4071dc19ec797e2472f28e42 --- /dev/null +++ b/parrot/lib/python3.10/xml/sax/expatreader.py @@ -0,0 +1,460 @@ +""" +SAX driver for the pyexpat C module. This driver works with +pyexpat.__version__ == '2.22'. +""" + +version = "0.20" + +from xml.sax._exceptions import * +from xml.sax.handler import feature_validation, feature_namespaces +from xml.sax.handler import feature_namespace_prefixes +from xml.sax.handler import feature_external_ges, feature_external_pes +from xml.sax.handler import feature_string_interning +from xml.sax.handler import property_xml_string, property_interning_dict + +# xml.parsers.expat does not raise ImportError in Jython +import sys +if sys.platform[:4] == "java": + raise SAXReaderNotAvailable("expat not available in Java", None) +del sys + +try: + from xml.parsers import expat +except ImportError: + raise SAXReaderNotAvailable("expat not supported", None) +else: + if not hasattr(expat, "ParserCreate"): + raise SAXReaderNotAvailable("expat not supported", None) +from xml.sax import xmlreader, saxutils, handler + +AttributesImpl = xmlreader.AttributesImpl +AttributesNSImpl = xmlreader.AttributesNSImpl + +# If we're using a sufficiently recent version of Python, we can use +# weak references to avoid cycles between the parser and content +# handler, otherwise we'll just have to pretend. +try: + import _weakref +except ImportError: + def _mkproxy(o): + return o +else: + import weakref + _mkproxy = weakref.proxy + del weakref, _weakref + +class _ClosedParser: + pass + +# --- ExpatLocator + +class ExpatLocator(xmlreader.Locator): + """Locator for use with the ExpatParser class. + + This uses a weak reference to the parser object to avoid creating + a circular reference between the parser and the content handler. + """ + def __init__(self, parser): + self._ref = _mkproxy(parser) + + def getColumnNumber(self): + parser = self._ref + if parser._parser is None: + return None + return parser._parser.ErrorColumnNumber + + def getLineNumber(self): + parser = self._ref + if parser._parser is None: + return 1 + return parser._parser.ErrorLineNumber + + def getPublicId(self): + parser = self._ref + if parser is None: + return None + return parser._source.getPublicId() + + def getSystemId(self): + parser = self._ref + if parser is None: + return None + return parser._source.getSystemId() + + +# --- ExpatParser + +class ExpatParser(xmlreader.IncrementalParser, xmlreader.Locator): + """SAX driver for the pyexpat C module.""" + + def __init__(self, namespaceHandling=0, bufsize=2**16-20): + xmlreader.IncrementalParser.__init__(self, bufsize) + self._source = xmlreader.InputSource() + self._parser = None + self._namespaces = namespaceHandling + self._lex_handler_prop = None + self._parsing = False + self._entity_stack = [] + self._external_ges = 0 + self._interning = None + + # XMLReader methods + + def parse(self, source): + "Parse an XML document from a URL or an InputSource." + source = saxutils.prepare_input_source(source) + + self._source = source + try: + self.reset() + self._cont_handler.setDocumentLocator(ExpatLocator(self)) + xmlreader.IncrementalParser.parse(self, source) + except: + # bpo-30264: Close the source on error to not leak resources: + # xml.sax.parse() doesn't give access to the underlying parser + # to the caller + self._close_source() + raise + + def prepareParser(self, source): + if source.getSystemId() is not None: + self._parser.SetBase(source.getSystemId()) + + # Redefined setContentHandler to allow changing handlers during parsing + + def setContentHandler(self, handler): + xmlreader.IncrementalParser.setContentHandler(self, handler) + if self._parsing: + self._reset_cont_handler() + + def getFeature(self, name): + if name == feature_namespaces: + return self._namespaces + elif name == feature_string_interning: + return self._interning is not None + elif name in (feature_validation, feature_external_pes, + feature_namespace_prefixes): + return 0 + elif name == feature_external_ges: + return self._external_ges + raise SAXNotRecognizedException("Feature '%s' not recognized" % name) + + def setFeature(self, name, state): + if self._parsing: + raise SAXNotSupportedException("Cannot set features while parsing") + + if name == feature_namespaces: + self._namespaces = state + elif name == feature_external_ges: + self._external_ges = state + elif name == feature_string_interning: + if state: + if self._interning is None: + self._interning = {} + else: + self._interning = None + elif name == feature_validation: + if state: + raise SAXNotSupportedException( + "expat does not support validation") + elif name == feature_external_pes: + if state: + raise SAXNotSupportedException( + "expat does not read external parameter entities") + elif name == feature_namespace_prefixes: + if state: + raise SAXNotSupportedException( + "expat does not report namespace prefixes") + else: + raise SAXNotRecognizedException( + "Feature '%s' not recognized" % name) + + def getProperty(self, name): + if name == handler.property_lexical_handler: + return self._lex_handler_prop + elif name == property_interning_dict: + return self._interning + elif name == property_xml_string: + if self._parser: + if hasattr(self._parser, "GetInputContext"): + return self._parser.GetInputContext() + else: + raise SAXNotRecognizedException( + "This version of expat does not support getting" + " the XML string") + else: + raise SAXNotSupportedException( + "XML string cannot be returned when not parsing") + raise SAXNotRecognizedException("Property '%s' not recognized" % name) + + def setProperty(self, name, value): + if name == handler.property_lexical_handler: + self._lex_handler_prop = value + if self._parsing: + self._reset_lex_handler_prop() + elif name == property_interning_dict: + self._interning = value + elif name == property_xml_string: + raise SAXNotSupportedException("Property '%s' cannot be set" % + name) + else: + raise SAXNotRecognizedException("Property '%s' not recognized" % + name) + + # IncrementalParser methods + + def feed(self, data, isFinal=False): + if not self._parsing: + self.reset() + self._parsing = True + self._cont_handler.startDocument() + + try: + # The isFinal parameter is internal to the expat reader. + # If it is set to true, expat will check validity of the entire + # document. When feeding chunks, they are not normally final - + # except when invoked from close. + self._parser.Parse(data, isFinal) + except expat.error as e: + exc = SAXParseException(expat.ErrorString(e.code), e, self) + # FIXME: when to invoke error()? + self._err_handler.fatalError(exc) + + def flush(self): + if self._parser is None: + return + + was_enabled = self._parser.GetReparseDeferralEnabled() + try: + self._parser.SetReparseDeferralEnabled(False) + self._parser.Parse(b"", False) + except expat.error as e: + exc = SAXParseException(expat.ErrorString(e.code), e, self) + self._err_handler.fatalError(exc) + finally: + self._parser.SetReparseDeferralEnabled(was_enabled) + + def _close_source(self): + source = self._source + try: + file = source.getCharacterStream() + if file is not None: + file.close() + finally: + file = source.getByteStream() + if file is not None: + file.close() + + def close(self): + if (self._entity_stack or self._parser is None or + isinstance(self._parser, _ClosedParser)): + # If we are completing an external entity, do nothing here + return + try: + self.feed(b"", isFinal=True) + self._cont_handler.endDocument() + self._parsing = False + # break cycle created by expat handlers pointing to our methods + self._parser = None + finally: + self._parsing = False + if self._parser is not None: + # Keep ErrorColumnNumber and ErrorLineNumber after closing. + parser = _ClosedParser() + parser.ErrorColumnNumber = self._parser.ErrorColumnNumber + parser.ErrorLineNumber = self._parser.ErrorLineNumber + self._parser = parser + self._close_source() + + def _reset_cont_handler(self): + self._parser.ProcessingInstructionHandler = \ + self._cont_handler.processingInstruction + self._parser.CharacterDataHandler = self._cont_handler.characters + + def _reset_lex_handler_prop(self): + lex = self._lex_handler_prop + parser = self._parser + if lex is None: + parser.CommentHandler = None + parser.StartCdataSectionHandler = None + parser.EndCdataSectionHandler = None + parser.StartDoctypeDeclHandler = None + parser.EndDoctypeDeclHandler = None + else: + parser.CommentHandler = lex.comment + parser.StartCdataSectionHandler = lex.startCDATA + parser.EndCdataSectionHandler = lex.endCDATA + parser.StartDoctypeDeclHandler = self.start_doctype_decl + parser.EndDoctypeDeclHandler = lex.endDTD + + def reset(self): + if self._namespaces: + self._parser = expat.ParserCreate(self._source.getEncoding(), " ", + intern=self._interning) + self._parser.namespace_prefixes = 1 + self._parser.StartElementHandler = self.start_element_ns + self._parser.EndElementHandler = self.end_element_ns + else: + self._parser = expat.ParserCreate(self._source.getEncoding(), + intern = self._interning) + self._parser.StartElementHandler = self.start_element + self._parser.EndElementHandler = self.end_element + + self._reset_cont_handler() + self._parser.UnparsedEntityDeclHandler = self.unparsed_entity_decl + self._parser.NotationDeclHandler = self.notation_decl + self._parser.StartNamespaceDeclHandler = self.start_namespace_decl + self._parser.EndNamespaceDeclHandler = self.end_namespace_decl + + self._decl_handler_prop = None + if self._lex_handler_prop: + self._reset_lex_handler_prop() +# self._parser.DefaultHandler = +# self._parser.DefaultHandlerExpand = +# self._parser.NotStandaloneHandler = + self._parser.ExternalEntityRefHandler = self.external_entity_ref + try: + self._parser.SkippedEntityHandler = self.skipped_entity_handler + except AttributeError: + # This pyexpat does not support SkippedEntity + pass + self._parser.SetParamEntityParsing( + expat.XML_PARAM_ENTITY_PARSING_UNLESS_STANDALONE) + + self._parsing = False + self._entity_stack = [] + + # Locator methods + + def getColumnNumber(self): + if self._parser is None: + return None + return self._parser.ErrorColumnNumber + + def getLineNumber(self): + if self._parser is None: + return 1 + return self._parser.ErrorLineNumber + + def getPublicId(self): + return self._source.getPublicId() + + def getSystemId(self): + return self._source.getSystemId() + + # event handlers + def start_element(self, name, attrs): + self._cont_handler.startElement(name, AttributesImpl(attrs)) + + def end_element(self, name): + self._cont_handler.endElement(name) + + def start_element_ns(self, name, attrs): + pair = name.split() + if len(pair) == 1: + # no namespace + pair = (None, name) + elif len(pair) == 3: + pair = pair[0], pair[1] + else: + # default namespace + pair = tuple(pair) + + newattrs = {} + qnames = {} + for (aname, value) in attrs.items(): + parts = aname.split() + length = len(parts) + if length == 1: + # no namespace + qname = aname + apair = (None, aname) + elif length == 3: + qname = "%s:%s" % (parts[2], parts[1]) + apair = parts[0], parts[1] + else: + # default namespace + qname = parts[1] + apair = tuple(parts) + + newattrs[apair] = value + qnames[apair] = qname + + self._cont_handler.startElementNS(pair, None, + AttributesNSImpl(newattrs, qnames)) + + def end_element_ns(self, name): + pair = name.split() + if len(pair) == 1: + pair = (None, name) + elif len(pair) == 3: + pair = pair[0], pair[1] + else: + pair = tuple(pair) + + self._cont_handler.endElementNS(pair, None) + + # this is not used (call directly to ContentHandler) + def processing_instruction(self, target, data): + self._cont_handler.processingInstruction(target, data) + + # this is not used (call directly to ContentHandler) + def character_data(self, data): + self._cont_handler.characters(data) + + def start_namespace_decl(self, prefix, uri): + self._cont_handler.startPrefixMapping(prefix, uri) + + def end_namespace_decl(self, prefix): + self._cont_handler.endPrefixMapping(prefix) + + def start_doctype_decl(self, name, sysid, pubid, has_internal_subset): + self._lex_handler_prop.startDTD(name, pubid, sysid) + + def unparsed_entity_decl(self, name, base, sysid, pubid, notation_name): + self._dtd_handler.unparsedEntityDecl(name, pubid, sysid, notation_name) + + def notation_decl(self, name, base, sysid, pubid): + self._dtd_handler.notationDecl(name, pubid, sysid) + + def external_entity_ref(self, context, base, sysid, pubid): + if not self._external_ges: + return 1 + + source = self._ent_handler.resolveEntity(pubid, sysid) + source = saxutils.prepare_input_source(source, + self._source.getSystemId() or + "") + + self._entity_stack.append((self._parser, self._source)) + self._parser = self._parser.ExternalEntityParserCreate(context) + self._source = source + + try: + xmlreader.IncrementalParser.parse(self, source) + except: + return 0 # FIXME: save error info here? + + (self._parser, self._source) = self._entity_stack[-1] + del self._entity_stack[-1] + return 1 + + def skipped_entity_handler(self, name, is_pe): + if is_pe: + # The SAX spec requires to report skipped PEs with a '%' + name = '%'+name + self._cont_handler.skippedEntity(name) + +# --- + +def create_parser(*args, **kwargs): + return ExpatParser(*args, **kwargs) + +# --- + +if __name__ == "__main__": + import xml.sax.saxutils + p = create_parser() + p.setContentHandler(xml.sax.saxutils.XMLGenerator()) + p.setErrorHandler(xml.sax.ErrorHandler()) + p.parse("http://www.ibiblio.org/xml/examples/shakespeare/hamlet.xml") diff --git a/parrot/lib/python3.10/xml/sax/handler.py b/parrot/lib/python3.10/xml/sax/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..e8d417e51942329cbe111e458de3fa0788601ccc --- /dev/null +++ b/parrot/lib/python3.10/xml/sax/handler.py @@ -0,0 +1,387 @@ +""" +This module contains the core classes of version 2.0 of SAX for Python. +This file provides only default classes with absolutely minimum +functionality, from which drivers and applications can be subclassed. + +Many of these classes are empty and are included only as documentation +of the interfaces. + +$Id$ +""" + +version = '2.0beta' + +#============================================================================ +# +# HANDLER INTERFACES +# +#============================================================================ + +# ===== ERRORHANDLER ===== + +class ErrorHandler: + """Basic interface for SAX error handlers. + + If you create an object that implements this interface, then + register the object with your XMLReader, the parser will call the + methods in your object to report all warnings and errors. There + are three levels of errors available: warnings, (possibly) + recoverable errors, and unrecoverable errors. All methods take a + SAXParseException as the only parameter.""" + + def error(self, exception): + "Handle a recoverable error." + raise exception + + def fatalError(self, exception): + "Handle a non-recoverable error." + raise exception + + def warning(self, exception): + "Handle a warning." + print(exception) + + +# ===== CONTENTHANDLER ===== + +class ContentHandler: + """Interface for receiving logical document content events. + + This is the main callback interface in SAX, and the one most + important to applications. The order of events in this interface + mirrors the order of the information in the document.""" + + def __init__(self): + self._locator = None + + def setDocumentLocator(self, locator): + """Called by the parser to give the application a locator for + locating the origin of document events. + + SAX parsers are strongly encouraged (though not absolutely + required) to supply a locator: if it does so, it must supply + the locator to the application by invoking this method before + invoking any of the other methods in the DocumentHandler + interface. + + The locator allows the application to determine the end + position of any document-related event, even if the parser is + not reporting an error. Typically, the application will use + this information for reporting its own errors (such as + character content that does not match an application's + business rules). The information returned by the locator is + probably not sufficient for use with a search engine. + + Note that the locator will return correct information only + during the invocation of the events in this interface. The + application should not attempt to use it at any other time.""" + self._locator = locator + + def startDocument(self): + """Receive notification of the beginning of a document. + + The SAX parser will invoke this method only once, before any + other methods in this interface or in DTDHandler (except for + setDocumentLocator).""" + + def endDocument(self): + """Receive notification of the end of a document. + + The SAX parser will invoke this method only once, and it will + be the last method invoked during the parse. The parser shall + not invoke this method until it has either abandoned parsing + (because of an unrecoverable error) or reached the end of + input.""" + + def startPrefixMapping(self, prefix, uri): + """Begin the scope of a prefix-URI Namespace mapping. + + The information from this event is not necessary for normal + Namespace processing: the SAX XML reader will automatically + replace prefixes for element and attribute names when the + http://xml.org/sax/features/namespaces feature is true (the + default). + + There are cases, however, when applications need to use + prefixes in character data or in attribute values, where they + cannot safely be expanded automatically; the + start/endPrefixMapping event supplies the information to the + application to expand prefixes in those contexts itself, if + necessary. + + Note that start/endPrefixMapping events are not guaranteed to + be properly nested relative to each-other: all + startPrefixMapping events will occur before the corresponding + startElement event, and all endPrefixMapping events will occur + after the corresponding endElement event, but their order is + not guaranteed.""" + + def endPrefixMapping(self, prefix): + """End the scope of a prefix-URI mapping. + + See startPrefixMapping for details. This event will always + occur after the corresponding endElement event, but the order + of endPrefixMapping events is not otherwise guaranteed.""" + + def startElement(self, name, attrs): + """Signals the start of an element in non-namespace mode. + + The name parameter contains the raw XML 1.0 name of the + element type as a string and the attrs parameter holds an + instance of the Attributes class containing the attributes of + the element.""" + + def endElement(self, name): + """Signals the end of an element in non-namespace mode. + + The name parameter contains the name of the element type, just + as with the startElement event.""" + + def startElementNS(self, name, qname, attrs): + """Signals the start of an element in namespace mode. + + The name parameter contains the name of the element type as a + (uri, localname) tuple, the qname parameter the raw XML 1.0 + name used in the source document, and the attrs parameter + holds an instance of the Attributes class containing the + attributes of the element. + + The uri part of the name tuple is None for elements which have + no namespace.""" + + def endElementNS(self, name, qname): + """Signals the end of an element in namespace mode. + + The name parameter contains the name of the element type, just + as with the startElementNS event.""" + + def characters(self, content): + """Receive notification of character data. + + The Parser will call this method to report each chunk of + character data. SAX parsers may return all contiguous + character data in a single chunk, or they may split it into + several chunks; however, all of the characters in any single + event must come from the same external entity so that the + Locator provides useful information.""" + + def ignorableWhitespace(self, whitespace): + """Receive notification of ignorable whitespace in element content. + + Validating Parsers must use this method to report each chunk + of ignorable whitespace (see the W3C XML 1.0 recommendation, + section 2.10): non-validating parsers may also use this method + if they are capable of parsing and using content models. + + SAX parsers may return all contiguous whitespace in a single + chunk, or they may split it into several chunks; however, all + of the characters in any single event must come from the same + external entity, so that the Locator provides useful + information.""" + + def processingInstruction(self, target, data): + """Receive notification of a processing instruction. + + The Parser will invoke this method once for each processing + instruction found: note that processing instructions may occur + before or after the main document element. + + A SAX parser should never report an XML declaration (XML 1.0, + section 2.8) or a text declaration (XML 1.0, section 4.3.1) + using this method.""" + + def skippedEntity(self, name): + """Receive notification of a skipped entity. + + The Parser will invoke this method once for each entity + skipped. Non-validating processors may skip entities if they + have not seen the declarations (because, for example, the + entity was declared in an external DTD subset). All processors + may skip external entities, depending on the values of the + http://xml.org/sax/features/external-general-entities and the + http://xml.org/sax/features/external-parameter-entities + properties.""" + + +# ===== DTDHandler ===== + +class DTDHandler: + """Handle DTD events. + + This interface specifies only those DTD events required for basic + parsing (unparsed entities and attributes).""" + + def notationDecl(self, name, publicId, systemId): + "Handle a notation declaration event." + + def unparsedEntityDecl(self, name, publicId, systemId, ndata): + "Handle an unparsed entity declaration event." + + +# ===== ENTITYRESOLVER ===== + +class EntityResolver: + """Basic interface for resolving entities. If you create an object + implementing this interface, then register the object with your + Parser, the parser will call the method in your object to + resolve all external entities. Note that DefaultHandler implements + this interface with the default behaviour.""" + + def resolveEntity(self, publicId, systemId): + """Resolve the system identifier of an entity and return either + the system identifier to read from as a string, or an InputSource + to read from.""" + return systemId + + +#============================================================================ +# +# CORE FEATURES +# +#============================================================================ + +feature_namespaces = "http://xml.org/sax/features/namespaces" +# true: Perform Namespace processing (default). +# false: Optionally do not perform Namespace processing +# (implies namespace-prefixes). +# access: (parsing) read-only; (not parsing) read/write + +feature_namespace_prefixes = "http://xml.org/sax/features/namespace-prefixes" +# true: Report the original prefixed names and attributes used for Namespace +# declarations. +# false: Do not report attributes used for Namespace declarations, and +# optionally do not report original prefixed names (default). +# access: (parsing) read-only; (not parsing) read/write + +feature_string_interning = "http://xml.org/sax/features/string-interning" +# true: All element names, prefixes, attribute names, Namespace URIs, and +# local names are interned using the built-in intern function. +# false: Names are not necessarily interned, although they may be (default). +# access: (parsing) read-only; (not parsing) read/write + +feature_validation = "http://xml.org/sax/features/validation" +# true: Report all validation errors (implies external-general-entities and +# external-parameter-entities). +# false: Do not report validation errors. +# access: (parsing) read-only; (not parsing) read/write + +feature_external_ges = "http://xml.org/sax/features/external-general-entities" +# true: Include all external general (text) entities. +# false: Do not include external general entities. +# access: (parsing) read-only; (not parsing) read/write + +feature_external_pes = "http://xml.org/sax/features/external-parameter-entities" +# true: Include all external parameter entities, including the external +# DTD subset. +# false: Do not include any external parameter entities, even the external +# DTD subset. +# access: (parsing) read-only; (not parsing) read/write + +all_features = [feature_namespaces, + feature_namespace_prefixes, + feature_string_interning, + feature_validation, + feature_external_ges, + feature_external_pes] + + +#============================================================================ +# +# CORE PROPERTIES +# +#============================================================================ + +property_lexical_handler = "http://xml.org/sax/properties/lexical-handler" +# data type: xml.sax.sax2lib.LexicalHandler +# description: An optional extension handler for lexical events like comments. +# access: read/write + +property_declaration_handler = "http://xml.org/sax/properties/declaration-handler" +# data type: xml.sax.sax2lib.DeclHandler +# description: An optional extension handler for DTD-related events other +# than notations and unparsed entities. +# access: read/write + +property_dom_node = "http://xml.org/sax/properties/dom-node" +# data type: org.w3c.dom.Node +# description: When parsing, the current DOM node being visited if this is +# a DOM iterator; when not parsing, the root DOM node for +# iteration. +# access: (parsing) read-only; (not parsing) read/write + +property_xml_string = "http://xml.org/sax/properties/xml-string" +# data type: String +# description: The literal string of characters that was the source for +# the current event. +# access: read-only + +property_encoding = "http://www.python.org/sax/properties/encoding" +# data type: String +# description: The name of the encoding to assume for input data. +# access: write: set the encoding, e.g. established by a higher-level +# protocol. May change during parsing (e.g. after +# processing a META tag) +# read: return the current encoding (possibly established through +# auto-detection. +# initial value: UTF-8 +# + +property_interning_dict = "http://www.python.org/sax/properties/interning-dict" +# data type: Dictionary +# description: The dictionary used to intern common strings in the document +# access: write: Request that the parser uses a specific dictionary, to +# allow interning across different documents +# read: return the current interning dictionary, or None +# + +all_properties = [property_lexical_handler, + property_dom_node, + property_declaration_handler, + property_xml_string, + property_encoding, + property_interning_dict] + + +class LexicalHandler: + """Optional SAX2 handler for lexical events. + + This handler is used to obtain lexical information about an XML + document, that is, information about how the document was encoded + (as opposed to what it contains, which is reported to the + ContentHandler), such as comments and CDATA marked section + boundaries. + + To set the LexicalHandler of an XMLReader, use the setProperty + method with the property identifier + 'http://xml.org/sax/properties/lexical-handler'.""" + + def comment(self, content): + """Reports a comment anywhere in the document (including the + DTD and outside the document element). + + content is a string that holds the contents of the comment.""" + + def startDTD(self, name, public_id, system_id): + """Report the start of the DTD declarations, if the document + has an associated DTD. + + A startEntity event will be reported before declaration events + from the external DTD subset are reported, and this can be + used to infer from which subset DTD declarations derive. + + name is the name of the document element type, public_id the + public identifier of the DTD (or None if none were supplied) + and system_id the system identfier of the external subset (or + None if none were supplied).""" + + def endDTD(self): + """Signals the end of DTD declarations.""" + + def startCDATA(self): + """Reports the beginning of a CDATA marked section. + + The contents of the CDATA marked section will be reported + through the characters event.""" + + def endCDATA(self): + """Reports the end of a CDATA marked section.""" diff --git a/parrot/lib/python3.10/xml/sax/saxutils.py b/parrot/lib/python3.10/xml/sax/saxutils.py new file mode 100644 index 0000000000000000000000000000000000000000..c1612ea1cebc5d064280e7d2d617d0fc2cf1f0f0 --- /dev/null +++ b/parrot/lib/python3.10/xml/sax/saxutils.py @@ -0,0 +1,369 @@ +"""\ +A library of useful helper classes to the SAX classes, for the +convenience of application and driver writers. +""" + +import os, urllib.parse, urllib.request +import io +import codecs +from . import handler +from . import xmlreader + +def __dict_replace(s, d): + """Replace substrings of a string using a dictionary.""" + for key, value in d.items(): + s = s.replace(key, value) + return s + +def escape(data, entities={}): + """Escape &, <, and > in a string of data. + + You can escape other strings of data by passing a dictionary as + the optional entities parameter. The keys and values must all be + strings; each key will be replaced with its corresponding value. + """ + + # must do ampersand first + data = data.replace("&", "&") + data = data.replace(">", ">") + data = data.replace("<", "<") + if entities: + data = __dict_replace(data, entities) + return data + +def unescape(data, entities={}): + """Unescape &, <, and > in a string of data. + + You can unescape other strings of data by passing a dictionary as + the optional entities parameter. The keys and values must all be + strings; each key will be replaced with its corresponding value. + """ + data = data.replace("<", "<") + data = data.replace(">", ">") + if entities: + data = __dict_replace(data, entities) + # must do ampersand last + return data.replace("&", "&") + +def quoteattr(data, entities={}): + """Escape and quote an attribute value. + + Escape &, <, and > in a string of data, then quote it for use as + an attribute value. The \" character will be escaped as well, if + necessary. + + You can escape other strings of data by passing a dictionary as + the optional entities parameter. The keys and values must all be + strings; each key will be replaced with its corresponding value. + """ + entities = {**entities, '\n': ' ', '\r': ' ', '\t':' '} + data = escape(data, entities) + if '"' in data: + if "'" in data: + data = '"%s"' % data.replace('"', """) + else: + data = "'%s'" % data + else: + data = '"%s"' % data + return data + + +def _gettextwriter(out, encoding): + if out is None: + import sys + return sys.stdout + + if isinstance(out, io.TextIOBase): + # use a text writer as is + return out + + if isinstance(out, (codecs.StreamWriter, codecs.StreamReaderWriter)): + # use a codecs stream writer as is + return out + + # wrap a binary writer with TextIOWrapper + if isinstance(out, io.RawIOBase): + # Keep the original file open when the TextIOWrapper is + # destroyed + class _wrapper: + __class__ = out.__class__ + def __getattr__(self, name): + return getattr(out, name) + buffer = _wrapper() + buffer.close = lambda: None + else: + # This is to handle passed objects that aren't in the + # IOBase hierarchy, but just have a write method + buffer = io.BufferedIOBase() + buffer.writable = lambda: True + buffer.write = out.write + try: + # TextIOWrapper uses this methods to determine + # if BOM (for UTF-16, etc) should be added + buffer.seekable = out.seekable + buffer.tell = out.tell + except AttributeError: + pass + return io.TextIOWrapper(buffer, encoding=encoding, + errors='xmlcharrefreplace', + newline='\n', + write_through=True) + +class XMLGenerator(handler.ContentHandler): + + def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False): + handler.ContentHandler.__init__(self) + out = _gettextwriter(out, encoding) + self._write = out.write + self._flush = out.flush + self._ns_contexts = [{}] # contains uri -> prefix dicts + self._current_context = self._ns_contexts[-1] + self._undeclared_ns_maps = [] + self._encoding = encoding + self._short_empty_elements = short_empty_elements + self._pending_start_element = False + + def _qname(self, name): + """Builds a qualified name from a (ns_url, localname) pair""" + if name[0]: + # Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is + # bound by definition to http://www.w3.org/XML/1998/namespace. It + # does not need to be declared and will not usually be found in + # self._current_context. + if 'http://www.w3.org/XML/1998/namespace' == name[0]: + return 'xml:' + name[1] + # The name is in a non-empty namespace + prefix = self._current_context[name[0]] + if prefix: + # If it is not the default namespace, prepend the prefix + return prefix + ":" + name[1] + # Return the unqualified name + return name[1] + + def _finish_pending_start_element(self,endElement=False): + if self._pending_start_element: + self._write('>') + self._pending_start_element = False + + # ContentHandler methods + + def startDocument(self): + self._write('\n' % + self._encoding) + + def endDocument(self): + self._flush() + + def startPrefixMapping(self, prefix, uri): + self._ns_contexts.append(self._current_context.copy()) + self._current_context[uri] = prefix + self._undeclared_ns_maps.append((prefix, uri)) + + def endPrefixMapping(self, prefix): + self._current_context = self._ns_contexts[-1] + del self._ns_contexts[-1] + + def startElement(self, name, attrs): + self._finish_pending_start_element() + self._write('<' + name) + for (name, value) in attrs.items(): + self._write(' %s=%s' % (name, quoteattr(value))) + if self._short_empty_elements: + self._pending_start_element = True + else: + self._write(">") + + def endElement(self, name): + if self._pending_start_element: + self._write('/>') + self._pending_start_element = False + else: + self._write('' % name) + + def startElementNS(self, name, qname, attrs): + self._finish_pending_start_element() + self._write('<' + self._qname(name)) + + for prefix, uri in self._undeclared_ns_maps: + if prefix: + self._write(' xmlns:%s="%s"' % (prefix, uri)) + else: + self._write(' xmlns="%s"' % uri) + self._undeclared_ns_maps = [] + + for (name, value) in attrs.items(): + self._write(' %s=%s' % (self._qname(name), quoteattr(value))) + if self._short_empty_elements: + self._pending_start_element = True + else: + self._write(">") + + def endElementNS(self, name, qname): + if self._pending_start_element: + self._write('/>') + self._pending_start_element = False + else: + self._write('' % self._qname(name)) + + def characters(self, content): + if content: + self._finish_pending_start_element() + if not isinstance(content, str): + content = str(content, self._encoding) + self._write(escape(content)) + + def ignorableWhitespace(self, content): + if content: + self._finish_pending_start_element() + if not isinstance(content, str): + content = str(content, self._encoding) + self._write(content) + + def processingInstruction(self, target, data): + self._finish_pending_start_element() + self._write('' % (target, data)) + + +class XMLFilterBase(xmlreader.XMLReader): + """This class is designed to sit between an XMLReader and the + client application's event handlers. By default, it does nothing + but pass requests up to the reader and events on to the handlers + unmodified, but subclasses can override specific methods to modify + the event stream or the configuration requests as they pass + through.""" + + def __init__(self, parent = None): + xmlreader.XMLReader.__init__(self) + self._parent = parent + + # ErrorHandler methods + + def error(self, exception): + self._err_handler.error(exception) + + def fatalError(self, exception): + self._err_handler.fatalError(exception) + + def warning(self, exception): + self._err_handler.warning(exception) + + # ContentHandler methods + + def setDocumentLocator(self, locator): + self._cont_handler.setDocumentLocator(locator) + + def startDocument(self): + self._cont_handler.startDocument() + + def endDocument(self): + self._cont_handler.endDocument() + + def startPrefixMapping(self, prefix, uri): + self._cont_handler.startPrefixMapping(prefix, uri) + + def endPrefixMapping(self, prefix): + self._cont_handler.endPrefixMapping(prefix) + + def startElement(self, name, attrs): + self._cont_handler.startElement(name, attrs) + + def endElement(self, name): + self._cont_handler.endElement(name) + + def startElementNS(self, name, qname, attrs): + self._cont_handler.startElementNS(name, qname, attrs) + + def endElementNS(self, name, qname): + self._cont_handler.endElementNS(name, qname) + + def characters(self, content): + self._cont_handler.characters(content) + + def ignorableWhitespace(self, chars): + self._cont_handler.ignorableWhitespace(chars) + + def processingInstruction(self, target, data): + self._cont_handler.processingInstruction(target, data) + + def skippedEntity(self, name): + self._cont_handler.skippedEntity(name) + + # DTDHandler methods + + def notationDecl(self, name, publicId, systemId): + self._dtd_handler.notationDecl(name, publicId, systemId) + + def unparsedEntityDecl(self, name, publicId, systemId, ndata): + self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata) + + # EntityResolver methods + + def resolveEntity(self, publicId, systemId): + return self._ent_handler.resolveEntity(publicId, systemId) + + # XMLReader methods + + def parse(self, source): + self._parent.setContentHandler(self) + self._parent.setErrorHandler(self) + self._parent.setEntityResolver(self) + self._parent.setDTDHandler(self) + self._parent.parse(source) + + def setLocale(self, locale): + self._parent.setLocale(locale) + + def getFeature(self, name): + return self._parent.getFeature(name) + + def setFeature(self, name, state): + self._parent.setFeature(name, state) + + def getProperty(self, name): + return self._parent.getProperty(name) + + def setProperty(self, name, value): + self._parent.setProperty(name, value) + + # XMLFilter methods + + def getParent(self): + return self._parent + + def setParent(self, parent): + self._parent = parent + +# --- Utility functions + +def prepare_input_source(source, base=""): + """This function takes an InputSource and an optional base URL and + returns a fully resolved InputSource object ready for reading.""" + + if isinstance(source, os.PathLike): + source = os.fspath(source) + if isinstance(source, str): + source = xmlreader.InputSource(source) + elif hasattr(source, "read"): + f = source + source = xmlreader.InputSource() + if isinstance(f.read(0), str): + source.setCharacterStream(f) + else: + source.setByteStream(f) + if hasattr(f, "name") and isinstance(f.name, str): + source.setSystemId(f.name) + + if source.getCharacterStream() is None and source.getByteStream() is None: + sysid = source.getSystemId() + basehead = os.path.dirname(os.path.normpath(base)) + sysidfilename = os.path.join(basehead, sysid) + if os.path.isfile(sysidfilename): + source.setSystemId(sysidfilename) + f = open(sysidfilename, "rb") + else: + source.setSystemId(urllib.parse.urljoin(base, sysid)) + f = urllib.request.urlopen(source.getSystemId()) + + source.setByteStream(f) + + return source diff --git a/parrot/lib/python3.10/xml/sax/xmlreader.py b/parrot/lib/python3.10/xml/sax/xmlreader.py new file mode 100644 index 0000000000000000000000000000000000000000..716f22840414e604ab38661f91eb07f094309833 --- /dev/null +++ b/parrot/lib/python3.10/xml/sax/xmlreader.py @@ -0,0 +1,380 @@ +"""An XML Reader is the SAX 2 name for an XML parser. XML Parsers +should be based on this code. """ + +from . import handler + +from ._exceptions import SAXNotSupportedException, SAXNotRecognizedException + + +# ===== XMLREADER ===== + +class XMLReader: + """Interface for reading an XML document using callbacks. + + XMLReader is the interface that an XML parser's SAX2 driver must + implement. This interface allows an application to set and query + features and properties in the parser, to register event handlers + for document processing, and to initiate a document parse. + + All SAX interfaces are assumed to be synchronous: the parse + methods must not return until parsing is complete, and readers + must wait for an event-handler callback to return before reporting + the next event.""" + + def __init__(self): + self._cont_handler = handler.ContentHandler() + self._dtd_handler = handler.DTDHandler() + self._ent_handler = handler.EntityResolver() + self._err_handler = handler.ErrorHandler() + + def parse(self, source): + "Parse an XML document from a system identifier or an InputSource." + raise NotImplementedError("This method must be implemented!") + + def getContentHandler(self): + "Returns the current ContentHandler." + return self._cont_handler + + def setContentHandler(self, handler): + "Registers a new object to receive document content events." + self._cont_handler = handler + + def getDTDHandler(self): + "Returns the current DTD handler." + return self._dtd_handler + + def setDTDHandler(self, handler): + "Register an object to receive basic DTD-related events." + self._dtd_handler = handler + + def getEntityResolver(self): + "Returns the current EntityResolver." + return self._ent_handler + + def setEntityResolver(self, resolver): + "Register an object to resolve external entities." + self._ent_handler = resolver + + def getErrorHandler(self): + "Returns the current ErrorHandler." + return self._err_handler + + def setErrorHandler(self, handler): + "Register an object to receive error-message events." + self._err_handler = handler + + def setLocale(self, locale): + """Allow an application to set the locale for errors and warnings. + + SAX parsers are not required to provide localization for errors + and warnings; if they cannot support the requested locale, + however, they must raise a SAX exception. Applications may + request a locale change in the middle of a parse.""" + raise SAXNotSupportedException("Locale support not implemented") + + def getFeature(self, name): + "Looks up and returns the state of a SAX2 feature." + raise SAXNotRecognizedException("Feature '%s' not recognized" % name) + + def setFeature(self, name, state): + "Sets the state of a SAX2 feature." + raise SAXNotRecognizedException("Feature '%s' not recognized" % name) + + def getProperty(self, name): + "Looks up and returns the value of a SAX2 property." + raise SAXNotRecognizedException("Property '%s' not recognized" % name) + + def setProperty(self, name, value): + "Sets the value of a SAX2 property." + raise SAXNotRecognizedException("Property '%s' not recognized" % name) + +class IncrementalParser(XMLReader): + """This interface adds three extra methods to the XMLReader + interface that allow XML parsers to support incremental + parsing. Support for this interface is optional, since not all + underlying XML parsers support this functionality. + + When the parser is instantiated it is ready to begin accepting + data from the feed method immediately. After parsing has been + finished with a call to close the reset method must be called to + make the parser ready to accept new data, either from feed or + using the parse method. + + Note that these methods must _not_ be called during parsing, that + is, after parse has been called and before it returns. + + By default, the class also implements the parse method of the XMLReader + interface using the feed, close and reset methods of the + IncrementalParser interface as a convenience to SAX 2.0 driver + writers.""" + + def __init__(self, bufsize=2**16): + self._bufsize = bufsize + XMLReader.__init__(self) + + def parse(self, source): + from . import saxutils + source = saxutils.prepare_input_source(source) + + self.prepareParser(source) + file = source.getCharacterStream() + if file is None: + file = source.getByteStream() + buffer = file.read(self._bufsize) + while buffer: + self.feed(buffer) + buffer = file.read(self._bufsize) + self.close() + + def feed(self, data): + """This method gives the raw XML data in the data parameter to + the parser and makes it parse the data, emitting the + corresponding events. It is allowed for XML constructs to be + split across several calls to feed. + + feed may raise SAXException.""" + raise NotImplementedError("This method must be implemented!") + + def prepareParser(self, source): + """This method is called by the parse implementation to allow + the SAX 2.0 driver to prepare itself for parsing.""" + raise NotImplementedError("prepareParser must be overridden!") + + def close(self): + """This method is called when the entire XML document has been + passed to the parser through the feed method, to notify the + parser that there are no more data. This allows the parser to + do the final checks on the document and empty the internal + data buffer. + + The parser will not be ready to parse another document until + the reset method has been called. + + close may raise SAXException.""" + raise NotImplementedError("This method must be implemented!") + + def reset(self): + """This method is called after close has been called to reset + the parser so that it is ready to parse new documents. The + results of calling parse or feed after close without calling + reset are undefined.""" + raise NotImplementedError("This method must be implemented!") + +# ===== LOCATOR ===== + +class Locator: + """Interface for associating a SAX event with a document + location. A locator object will return valid results only during + calls to DocumentHandler methods; at any other time, the + results are unpredictable.""" + + def getColumnNumber(self): + "Return the column number where the current event ends." + return -1 + + def getLineNumber(self): + "Return the line number where the current event ends." + return -1 + + def getPublicId(self): + "Return the public identifier for the current event." + return None + + def getSystemId(self): + "Return the system identifier for the current event." + return None + +# ===== INPUTSOURCE ===== + +class InputSource: + """Encapsulation of the information needed by the XMLReader to + read entities. + + This class may include information about the public identifier, + system identifier, byte stream (possibly with character encoding + information) and/or the character stream of an entity. + + Applications will create objects of this class for use in the + XMLReader.parse method and for returning from + EntityResolver.resolveEntity. + + An InputSource belongs to the application, the XMLReader is not + allowed to modify InputSource objects passed to it from the + application, although it may make copies and modify those.""" + + def __init__(self, system_id = None): + self.__system_id = system_id + self.__public_id = None + self.__encoding = None + self.__bytefile = None + self.__charfile = None + + def setPublicId(self, public_id): + "Sets the public identifier of this InputSource." + self.__public_id = public_id + + def getPublicId(self): + "Returns the public identifier of this InputSource." + return self.__public_id + + def setSystemId(self, system_id): + "Sets the system identifier of this InputSource." + self.__system_id = system_id + + def getSystemId(self): + "Returns the system identifier of this InputSource." + return self.__system_id + + def setEncoding(self, encoding): + """Sets the character encoding of this InputSource. + + The encoding must be a string acceptable for an XML encoding + declaration (see section 4.3.3 of the XML recommendation). + + The encoding attribute of the InputSource is ignored if the + InputSource also contains a character stream.""" + self.__encoding = encoding + + def getEncoding(self): + "Get the character encoding of this InputSource." + return self.__encoding + + def setByteStream(self, bytefile): + """Set the byte stream (a Python file-like object which does + not perform byte-to-character conversion) for this input + source. + + The SAX parser will ignore this if there is also a character + stream specified, but it will use a byte stream in preference + to opening a URI connection itself. + + If the application knows the character encoding of the byte + stream, it should set it with the setEncoding method.""" + self.__bytefile = bytefile + + def getByteStream(self): + """Get the byte stream for this input source. + + The getEncoding method will return the character encoding for + this byte stream, or None if unknown.""" + return self.__bytefile + + def setCharacterStream(self, charfile): + """Set the character stream for this input source. (The stream + must be a Python 2.0 Unicode-wrapped file-like that performs + conversion to Unicode strings.) + + If there is a character stream specified, the SAX parser will + ignore any byte stream and will not attempt to open a URI + connection to the system identifier.""" + self.__charfile = charfile + + def getCharacterStream(self): + "Get the character stream for this input source." + return self.__charfile + +# ===== ATTRIBUTESIMPL ===== + +class AttributesImpl: + + def __init__(self, attrs): + """Non-NS-aware implementation. + + attrs should be of the form {name : value}.""" + self._attrs = attrs + + def getLength(self): + return len(self._attrs) + + def getType(self, name): + return "CDATA" + + def getValue(self, name): + return self._attrs[name] + + def getValueByQName(self, name): + return self._attrs[name] + + def getNameByQName(self, name): + if name not in self._attrs: + raise KeyError(name) + return name + + def getQNameByName(self, name): + if name not in self._attrs: + raise KeyError(name) + return name + + def getNames(self): + return list(self._attrs.keys()) + + def getQNames(self): + return list(self._attrs.keys()) + + def __len__(self): + return len(self._attrs) + + def __getitem__(self, name): + return self._attrs[name] + + def keys(self): + return list(self._attrs.keys()) + + def __contains__(self, name): + return name in self._attrs + + def get(self, name, alternative=None): + return self._attrs.get(name, alternative) + + def copy(self): + return self.__class__(self._attrs) + + def items(self): + return list(self._attrs.items()) + + def values(self): + return list(self._attrs.values()) + +# ===== ATTRIBUTESNSIMPL ===== + +class AttributesNSImpl(AttributesImpl): + + def __init__(self, attrs, qnames): + """NS-aware implementation. + + attrs should be of the form {(ns_uri, lname): value, ...}. + qnames of the form {(ns_uri, lname): qname, ...}.""" + self._attrs = attrs + self._qnames = qnames + + def getValueByQName(self, name): + for (nsname, qname) in self._qnames.items(): + if qname == name: + return self._attrs[nsname] + + raise KeyError(name) + + def getNameByQName(self, name): + for (nsname, qname) in self._qnames.items(): + if qname == name: + return nsname + + raise KeyError(name) + + def getQNameByName(self, name): + return self._qnames[name] + + def getQNames(self): + return list(self._qnames.values()) + + def copy(self): + return self.__class__(self._attrs, self._qnames) + + +def _test(): + XMLReader() + IncrementalParser() + Locator() + +if __name__ == "__main__": + _test()