diff --git a/.gitattributes b/.gitattributes index 2533eedc9ed4557f2dba803ce83cf6a8d19b7f20..818f65d2eb51fb83cdc82e86395e3be046817618 100644 --- a/.gitattributes +++ b/.gitattributes @@ -434,3 +434,4 @@ openflamingo/bin/python filter=lfs diff=lfs merge=lfs -text llava/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text llava/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text llava/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +llava/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text diff --git a/llava/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl b/llava/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl new file mode 100644 index 0000000000000000000000000000000000000000..c7e03d464c9687907f77dc23718365528a05ac0b --- /dev/null +++ b/llava/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f +size 2055563 diff --git a/llava/lib/python3.10/lib2to3/PatternGrammar.txt b/llava/lib/python3.10/lib2to3/PatternGrammar.txt new file mode 100644 index 0000000000000000000000000000000000000000..36bf8148273bd7a27b4a76817a776fe2ab234562 --- /dev/null +++ b/llava/lib/python3.10/lib2to3/PatternGrammar.txt @@ -0,0 +1,28 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +# A grammar to describe tree matching patterns. +# Not shown here: +# - 'TOKEN' stands for any token (leaf node) +# - 'any' stands for any node (leaf or interior) +# With 'any' we can still specify the sub-structure. + +# The start symbol is 'Matcher'. + +Matcher: Alternatives ENDMARKER + +Alternatives: Alternative ('|' Alternative)* + +Alternative: (Unit | NegatedUnit)+ + +Unit: [NAME '='] ( STRING [Repeater] + | NAME [Details] [Repeater] + | '(' Alternatives ')' [Repeater] + | '[' Alternatives ']' + ) + +NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')') + +Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}' + +Details: '<' Alternatives '>' diff --git a/llava/lib/python3.10/lib2to3/__init__.py b/llava/lib/python3.10/lib2to3/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4224dffef429570a3aaca3c1cc3a258a8f322251 --- /dev/null +++ b/llava/lib/python3.10/lib2to3/__init__.py @@ -0,0 +1,8 @@ +import warnings + + +warnings.warn( + "lib2to3 package is deprecated and may not be able to parse Python 3.10+", + PendingDeprecationWarning, + stacklevel=2, +) diff --git a/llava/lib/python3.10/lib2to3/__main__.py b/llava/lib/python3.10/lib2to3/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..80688baf27abfcaf99ea680787f17212747db5f6 --- /dev/null +++ b/llava/lib/python3.10/lib2to3/__main__.py @@ -0,0 +1,4 @@ +import sys +from .main import main + +sys.exit(main("lib2to3.fixes")) diff --git a/llava/lib/python3.10/lib2to3/btm_matcher.py b/llava/lib/python3.10/lib2to3/btm_matcher.py new file mode 100644 index 0000000000000000000000000000000000000000..3b78868038bda0733f03995e3f31c517c17bdc00 --- /dev/null +++ b/llava/lib/python3.10/lib2to3/btm_matcher.py @@ -0,0 +1,163 @@ +"""A bottom-up tree matching algorithm implementation meant to speed +up 2to3's matching process. After the tree patterns are reduced to +their rarest linear path, a linear Aho-Corasick automaton is +created. The linear automaton traverses the linear paths from the +leaves to the root of the AST and returns a set of nodes for further +matching. This reduces significantly the number of candidate nodes.""" + +__author__ = "George Boutsioukis " + +import logging +import itertools +from collections import defaultdict + +from . import pytree +from .btm_utils import reduce_tree + +class BMNode(object): + """Class for a node of the Aho-Corasick automaton used in matching""" + count = itertools.count() + def __init__(self): + self.transition_table = {} + self.fixers = [] + self.id = next(BMNode.count) + self.content = '' + +class BottomMatcher(object): + """The main matcher class. After instantiating the patterns should + be added using the add_fixer method""" + + def __init__(self): + self.match = set() + self.root = BMNode() + self.nodes = [self.root] + self.fixers = [] + self.logger = logging.getLogger("RefactoringTool") + + def add_fixer(self, fixer): + """Reduces a fixer's pattern tree to a linear path and adds it + to the matcher(a common Aho-Corasick automaton). The fixer is + appended on the matching states and called when they are + reached""" + self.fixers.append(fixer) + tree = reduce_tree(fixer.pattern_tree) + linear = tree.get_linear_subpattern() + match_nodes = self.add(linear, start=self.root) + for match_node in match_nodes: + match_node.fixers.append(fixer) + + def add(self, pattern, start): + "Recursively adds a linear pattern to the AC automaton" + #print("adding pattern", pattern, "to", start) + if not pattern: + #print("empty pattern") + return [start] + if isinstance(pattern[0], tuple): + #alternatives + #print("alternatives") + match_nodes = [] + for alternative in pattern[0]: + #add all alternatives, and add the rest of the pattern + #to each end node + end_nodes = self.add(alternative, start=start) + for end in end_nodes: + match_nodes.extend(self.add(pattern[1:], end)) + return match_nodes + else: + #single token + #not last + if pattern[0] not in start.transition_table: + #transition did not exist, create new + next_node = BMNode() + start.transition_table[pattern[0]] = next_node + else: + #transition exists already, follow + next_node = start.transition_table[pattern[0]] + + if pattern[1:]: + end_nodes = self.add(pattern[1:], start=next_node) + else: + end_nodes = [next_node] + return end_nodes + + def run(self, leaves): + """The main interface with the bottom matcher. The tree is + traversed from the bottom using the constructed + automaton. Nodes are only checked once as the tree is + retraversed. When the automaton fails, we give it one more + shot(in case the above tree matches as a whole with the + rejected leaf), then we break for the next leaf. There is the + special case of multiple arguments(see code comments) where we + recheck the nodes + + Args: + The leaves of the AST tree to be matched + + Returns: + A dictionary of node matches with fixers as the keys + """ + current_ac_node = self.root + results = defaultdict(list) + for leaf in leaves: + current_ast_node = leaf + while current_ast_node: + current_ast_node.was_checked = True + for child in current_ast_node.children: + # multiple statements, recheck + if isinstance(child, pytree.Leaf) and child.value == ";": + current_ast_node.was_checked = False + break + if current_ast_node.type == 1: + #name + node_token = current_ast_node.value + else: + node_token = current_ast_node.type + + if node_token in current_ac_node.transition_table: + #token matches + current_ac_node = current_ac_node.transition_table[node_token] + for fixer in current_ac_node.fixers: + results[fixer].append(current_ast_node) + else: + #matching failed, reset automaton + current_ac_node = self.root + if (current_ast_node.parent is not None + and current_ast_node.parent.was_checked): + #the rest of the tree upwards has been checked, next leaf + break + + #recheck the rejected node once from the root + if node_token in current_ac_node.transition_table: + #token matches + current_ac_node = current_ac_node.transition_table[node_token] + for fixer in current_ac_node.fixers: + results[fixer].append(current_ast_node) + + current_ast_node = current_ast_node.parent + return results + + def print_ac(self): + "Prints a graphviz diagram of the BM automaton(for debugging)" + print("digraph g{") + def print_node(node): + for subnode_key in node.transition_table.keys(): + subnode = node.transition_table[subnode_key] + print("%d -> %d [label=%s] //%s" % + (node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers))) + if subnode_key == 1: + print(subnode.content) + print_node(subnode) + print_node(self.root) + print("}") + +# taken from pytree.py for debugging; only used by print_ac +_type_reprs = {} +def type_repr(type_num): + global _type_reprs + if not _type_reprs: + from .pygram import python_symbols + # printing tokens is possible but not as useful + # from .pgen2 import token // token.__dict__.items(): + for name, val in python_symbols.__dict__.items(): + if type(val) == int: _type_reprs[val] = name + return _type_reprs.setdefault(type_num, type_num) diff --git a/llava/lib/python3.10/lib2to3/btm_utils.py b/llava/lib/python3.10/lib2to3/btm_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..ff76ba340470729f7e44ec2ce19afc7cc1ca931a --- /dev/null +++ b/llava/lib/python3.10/lib2to3/btm_utils.py @@ -0,0 +1,281 @@ +"Utility functions used by the btm_matcher module" + +from . import pytree +from .pgen2 import grammar, token +from .pygram import pattern_symbols, python_symbols + +syms = pattern_symbols +pysyms = python_symbols +tokens = grammar.opmap +token_labels = token + +TYPE_ANY = -1 +TYPE_ALTERNATIVES = -2 +TYPE_GROUP = -3 + +class MinNode(object): + """This class serves as an intermediate representation of the + pattern tree during the conversion to sets of leaf-to-root + subpatterns""" + + def __init__(self, type=None, name=None): + self.type = type + self.name = name + self.children = [] + self.leaf = False + self.parent = None + self.alternatives = [] + self.group = [] + + def __repr__(self): + return str(self.type) + ' ' + str(self.name) + + def leaf_to_root(self): + """Internal method. Returns a characteristic path of the + pattern tree. This method must be run for all leaves until the + linear subpatterns are merged into a single""" + node = self + subp = [] + while node: + if node.type == TYPE_ALTERNATIVES: + node.alternatives.append(subp) + if len(node.alternatives) == len(node.children): + #last alternative + subp = [tuple(node.alternatives)] + node.alternatives = [] + node = node.parent + continue + else: + node = node.parent + subp = None + break + + if node.type == TYPE_GROUP: + node.group.append(subp) + #probably should check the number of leaves + if len(node.group) == len(node.children): + subp = get_characteristic_subpattern(node.group) + node.group = [] + node = node.parent + continue + else: + node = node.parent + subp = None + break + + if node.type == token_labels.NAME and node.name: + #in case of type=name, use the name instead + subp.append(node.name) + else: + subp.append(node.type) + + node = node.parent + return subp + + def get_linear_subpattern(self): + """Drives the leaf_to_root method. The reason that + leaf_to_root must be run multiple times is because we need to + reject 'group' matches; for example the alternative form + (a | b c) creates a group [b c] that needs to be matched. Since + matching multiple linear patterns overcomes the automaton's + capabilities, leaf_to_root merges each group into a single + choice based on 'characteristic'ity, + + i.e. (a|b c) -> (a|b) if b more characteristic than c + + Returns: The most 'characteristic'(as defined by + get_characteristic_subpattern) path for the compiled pattern + tree. + """ + + for l in self.leaves(): + subp = l.leaf_to_root() + if subp: + return subp + + def leaves(self): + "Generator that returns the leaves of the tree" + for child in self.children: + yield from child.leaves() + if not self.children: + yield self + +def reduce_tree(node, parent=None): + """ + Internal function. Reduces a compiled pattern tree to an + intermediate representation suitable for feeding the + automaton. This also trims off any optional pattern elements(like + [a], a*). + """ + + new_node = None + #switch on the node type + if node.type == syms.Matcher: + #skip + node = node.children[0] + + if node.type == syms.Alternatives : + #2 cases + if len(node.children) <= 2: + #just a single 'Alternative', skip this node + new_node = reduce_tree(node.children[0], parent) + else: + #real alternatives + new_node = MinNode(type=TYPE_ALTERNATIVES) + #skip odd children('|' tokens) + for child in node.children: + if node.children.index(child)%2: + continue + reduced = reduce_tree(child, new_node) + if reduced is not None: + new_node.children.append(reduced) + elif node.type == syms.Alternative: + if len(node.children) > 1: + + new_node = MinNode(type=TYPE_GROUP) + for child in node.children: + reduced = reduce_tree(child, new_node) + if reduced: + new_node.children.append(reduced) + if not new_node.children: + # delete the group if all of the children were reduced to None + new_node = None + + else: + new_node = reduce_tree(node.children[0], parent) + + elif node.type == syms.Unit: + if (isinstance(node.children[0], pytree.Leaf) and + node.children[0].value == '('): + #skip parentheses + return reduce_tree(node.children[1], parent) + if ((isinstance(node.children[0], pytree.Leaf) and + node.children[0].value == '[') + or + (len(node.children)>1 and + hasattr(node.children[1], "value") and + node.children[1].value == '[')): + #skip whole unit if its optional + return None + + leaf = True + details_node = None + alternatives_node = None + has_repeater = False + repeater_node = None + has_variable_name = False + + for child in node.children: + if child.type == syms.Details: + leaf = False + details_node = child + elif child.type == syms.Repeater: + has_repeater = True + repeater_node = child + elif child.type == syms.Alternatives: + alternatives_node = child + if hasattr(child, 'value') and child.value == '=': # variable name + has_variable_name = True + + #skip variable name + if has_variable_name: + #skip variable name, '=' + name_leaf = node.children[2] + if hasattr(name_leaf, 'value') and name_leaf.value == '(': + # skip parenthesis + name_leaf = node.children[3] + else: + name_leaf = node.children[0] + + #set node type + if name_leaf.type == token_labels.NAME: + #(python) non-name or wildcard + if name_leaf.value == 'any': + new_node = MinNode(type=TYPE_ANY) + else: + if hasattr(token_labels, name_leaf.value): + new_node = MinNode(type=getattr(token_labels, name_leaf.value)) + else: + new_node = MinNode(type=getattr(pysyms, name_leaf.value)) + + elif name_leaf.type == token_labels.STRING: + #(python) name or character; remove the apostrophes from + #the string value + name = name_leaf.value.strip("'") + if name in tokens: + new_node = MinNode(type=tokens[name]) + else: + new_node = MinNode(type=token_labels.NAME, name=name) + elif name_leaf.type == syms.Alternatives: + new_node = reduce_tree(alternatives_node, parent) + + #handle repeaters + if has_repeater: + if repeater_node.children[0].value == '*': + #reduce to None + new_node = None + elif repeater_node.children[0].value == '+': + #reduce to a single occurrence i.e. do nothing + pass + else: + #TODO: handle {min, max} repeaters + raise NotImplementedError + pass + + #add children + if details_node and new_node is not None: + for child in details_node.children[1:-1]: + #skip '<', '>' markers + reduced = reduce_tree(child, new_node) + if reduced is not None: + new_node.children.append(reduced) + if new_node: + new_node.parent = parent + return new_node + + +def get_characteristic_subpattern(subpatterns): + """Picks the most characteristic from a list of linear patterns + Current order used is: + names > common_names > common_chars + """ + if not isinstance(subpatterns, list): + return subpatterns + if len(subpatterns)==1: + return subpatterns[0] + + # first pick out the ones containing variable names + subpatterns_with_names = [] + subpatterns_with_common_names = [] + common_names = ['in', 'for', 'if' , 'not', 'None'] + subpatterns_with_common_chars = [] + common_chars = "[]().,:" + for subpattern in subpatterns: + if any(rec_test(subpattern, lambda x: type(x) is str)): + if any(rec_test(subpattern, + lambda x: isinstance(x, str) and x in common_chars)): + subpatterns_with_common_chars.append(subpattern) + elif any(rec_test(subpattern, + lambda x: isinstance(x, str) and x in common_names)): + subpatterns_with_common_names.append(subpattern) + + else: + subpatterns_with_names.append(subpattern) + + if subpatterns_with_names: + subpatterns = subpatterns_with_names + elif subpatterns_with_common_names: + subpatterns = subpatterns_with_common_names + elif subpatterns_with_common_chars: + subpatterns = subpatterns_with_common_chars + # of the remaining subpatterns pick out the longest one + return max(subpatterns, key=len) + +def rec_test(sequence, test_func): + """Tests test_func on all items of sequence and items of included + sub-iterables""" + for x in sequence: + if isinstance(x, (list, tuple)): + yield from rec_test(x, test_func) + else: + yield test_func(x) diff --git a/llava/lib/python3.10/lib2to3/fixer_base.py b/llava/lib/python3.10/lib2to3/fixer_base.py new file mode 100644 index 0000000000000000000000000000000000000000..df581a4deab9e93aade8591bf13f949050d7e50d --- /dev/null +++ b/llava/lib/python3.10/lib2to3/fixer_base.py @@ -0,0 +1,186 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Base class for fixers (optional, but recommended).""" + +# Python imports +import itertools + +# Local imports +from .patcomp import PatternCompiler +from . import pygram +from .fixer_util import does_tree_import + +class BaseFix(object): + + """Optional base class for fixers. + + The subclass name must be FixFooBar where FooBar is the result of + removing underscores and capitalizing the words of the fix name. + For example, the class name for a fixer named 'has_key' should be + FixHasKey. + """ + + PATTERN = None # Most subclasses should override with a string literal + pattern = None # Compiled pattern, set by compile_pattern() + pattern_tree = None # Tree representation of the pattern + options = None # Options object passed to initializer + filename = None # The filename (set by set_filename) + numbers = itertools.count(1) # For new_name() + used_names = set() # A set of all used NAMEs + order = "post" # Does the fixer prefer pre- or post-order traversal + explicit = False # Is this ignored by refactor.py -f all? + run_order = 5 # Fixers will be sorted by run order before execution + # Lower numbers will be run first. + _accept_type = None # [Advanced and not public] This tells RefactoringTool + # which node type to accept when there's not a pattern. + + keep_line_order = False # For the bottom matcher: match with the + # original line order + BM_compatible = False # Compatibility with the bottom matching + # module; every fixer should set this + # manually + + # Shortcut for access to Python grammar symbols + syms = pygram.python_symbols + + def __init__(self, options, log): + """Initializer. Subclass may override. + + Args: + options: a dict containing the options passed to RefactoringTool + that could be used to customize the fixer through the command line. + log: a list to append warnings and other messages to. + """ + self.options = options + self.log = log + self.compile_pattern() + + def compile_pattern(self): + """Compiles self.PATTERN into self.pattern. + + Subclass may override if it doesn't want to use + self.{pattern,PATTERN} in .match(). + """ + if self.PATTERN is not None: + PC = PatternCompiler() + self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN, + with_tree=True) + + def set_filename(self, filename): + """Set the filename. + + The main refactoring tool should call this. + """ + self.filename = filename + + def match(self, node): + """Returns match for a given parse tree node. + + Should return a true or false object (not necessarily a bool). + It may return a non-empty dict of matching sub-nodes as + returned by a matching pattern. + + Subclass may override. + """ + results = {"node": node} + return self.pattern.match(node, results) and results + + def transform(self, node, results): + """Returns the transformation for a given parse tree node. + + Args: + node: the root of the parse tree that matched the fixer. + results: a dict mapping symbolic names to part of the match. + + Returns: + None, or a node that is a modified copy of the + argument node. The node argument may also be modified in-place to + effect the same change. + + Subclass *must* override. + """ + raise NotImplementedError() + + def new_name(self, template="xxx_todo_changeme"): + """Return a string suitable for use as an identifier + + The new name is guaranteed not to conflict with other identifiers. + """ + name = template + while name in self.used_names: + name = template + str(next(self.numbers)) + self.used_names.add(name) + return name + + def log_message(self, message): + if self.first_log: + self.first_log = False + self.log.append("### In file %s ###" % self.filename) + self.log.append(message) + + def cannot_convert(self, node, reason=None): + """Warn the user that a given chunk of code is not valid Python 3, + but that it cannot be converted automatically. + + First argument is the top-level node for the code in question. + Optional second argument is why it can't be converted. + """ + lineno = node.get_lineno() + for_output = node.clone() + for_output.prefix = "" + msg = "Line %d: could not convert: %s" + self.log_message(msg % (lineno, for_output)) + if reason: + self.log_message(reason) + + def warning(self, node, reason): + """Used for warning the user about possible uncertainty in the + translation. + + First argument is the top-level node for the code in question. + Optional second argument is why it can't be converted. + """ + lineno = node.get_lineno() + self.log_message("Line %d: %s" % (lineno, reason)) + + def start_tree(self, tree, filename): + """Some fixers need to maintain tree-wide state. + This method is called once, at the start of tree fix-up. + + tree - the root node of the tree to be processed. + filename - the name of the file the tree came from. + """ + self.used_names = tree.used_names + self.set_filename(filename) + self.numbers = itertools.count(1) + self.first_log = True + + def finish_tree(self, tree, filename): + """Some fixers need to maintain tree-wide state. + This method is called once, at the conclusion of tree fix-up. + + tree - the root node of the tree to be processed. + filename - the name of the file the tree came from. + """ + pass + + +class ConditionalFix(BaseFix): + """ Base class for fixers which not execute if an import is found. """ + + # This is the name of the import which, if found, will cause the test to be skipped + skip_on = None + + def start_tree(self, *args): + super(ConditionalFix, self).start_tree(*args) + self._should_skip = None + + def should_skip(self, node): + if self._should_skip is not None: + return self._should_skip + pkg = self.skip_on.split(".") + name = pkg[-1] + pkg = ".".join(pkg[:-1]) + self._should_skip = does_tree_import(pkg, name, node) + return self._should_skip diff --git a/llava/lib/python3.10/lib2to3/fixer_util.py b/llava/lib/python3.10/lib2to3/fixer_util.py new file mode 100644 index 0000000000000000000000000000000000000000..c2a3a47f503286e05e9bd9d69759e6a2ca214b2c --- /dev/null +++ b/llava/lib/python3.10/lib2to3/fixer_util.py @@ -0,0 +1,453 @@ +"""Utility functions, node construction macros, etc.""" +# Author: Collin Winter + +# Local imports +from .pgen2 import token +from .pytree import Leaf, Node +from .pygram import python_symbols as syms +from . import patcomp + + +########################################################### +### Common node-construction "macros" +########################################################### + +def KeywordArg(keyword, value): + return Node(syms.argument, + [keyword, Leaf(token.EQUAL, "="), value]) + +def LParen(): + return Leaf(token.LPAR, "(") + +def RParen(): + return Leaf(token.RPAR, ")") + +def Assign(target, source): + """Build an assignment statement""" + if not isinstance(target, list): + target = [target] + if not isinstance(source, list): + source.prefix = " " + source = [source] + + return Node(syms.atom, + target + [Leaf(token.EQUAL, "=", prefix=" ")] + source) + +def Name(name, prefix=None): + """Return a NAME leaf""" + return Leaf(token.NAME, name, prefix=prefix) + +def Attr(obj, attr): + """A node tuple for obj.attr""" + return [obj, Node(syms.trailer, [Dot(), attr])] + +def Comma(): + """A comma leaf""" + return Leaf(token.COMMA, ",") + +def Dot(): + """A period (.) leaf""" + return Leaf(token.DOT, ".") + +def ArgList(args, lparen=LParen(), rparen=RParen()): + """A parenthesised argument list, used by Call()""" + node = Node(syms.trailer, [lparen.clone(), rparen.clone()]) + if args: + node.insert_child(1, Node(syms.arglist, args)) + return node + +def Call(func_name, args=None, prefix=None): + """A function call""" + node = Node(syms.power, [func_name, ArgList(args)]) + if prefix is not None: + node.prefix = prefix + return node + +def Newline(): + """A newline literal""" + return Leaf(token.NEWLINE, "\n") + +def BlankLine(): + """A blank line""" + return Leaf(token.NEWLINE, "") + +def Number(n, prefix=None): + return Leaf(token.NUMBER, n, prefix=prefix) + +def Subscript(index_node): + """A numeric or string subscript""" + return Node(syms.trailer, [Leaf(token.LBRACE, "["), + index_node, + Leaf(token.RBRACE, "]")]) + +def String(string, prefix=None): + """A string leaf""" + return Leaf(token.STRING, string, prefix=prefix) + +def ListComp(xp, fp, it, test=None): + """A list comprehension of the form [xp for fp in it if test]. + + If test is None, the "if test" part is omitted. + """ + xp.prefix = "" + fp.prefix = " " + it.prefix = " " + for_leaf = Leaf(token.NAME, "for") + for_leaf.prefix = " " + in_leaf = Leaf(token.NAME, "in") + in_leaf.prefix = " " + inner_args = [for_leaf, fp, in_leaf, it] + if test: + test.prefix = " " + if_leaf = Leaf(token.NAME, "if") + if_leaf.prefix = " " + inner_args.append(Node(syms.comp_if, [if_leaf, test])) + inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)]) + return Node(syms.atom, + [Leaf(token.LBRACE, "["), + inner, + Leaf(token.RBRACE, "]")]) + +def FromImport(package_name, name_leafs): + """ Return an import statement in the form: + from package import name_leafs""" + # XXX: May not handle dotted imports properly (eg, package_name='foo.bar') + #assert package_name == '.' or '.' not in package_name, "FromImport has "\ + # "not been tested with dotted package names -- use at your own "\ + # "peril!" + + for leaf in name_leafs: + # Pull the leaves out of their old tree + leaf.remove() + + children = [Leaf(token.NAME, "from"), + Leaf(token.NAME, package_name, prefix=" "), + Leaf(token.NAME, "import", prefix=" "), + Node(syms.import_as_names, name_leafs)] + imp = Node(syms.import_from, children) + return imp + +def ImportAndCall(node, results, names): + """Returns an import statement and calls a method + of the module: + + import module + module.name()""" + obj = results["obj"].clone() + if obj.type == syms.arglist: + newarglist = obj.clone() + else: + newarglist = Node(syms.arglist, [obj.clone()]) + after = results["after"] + if after: + after = [n.clone() for n in after] + new = Node(syms.power, + Attr(Name(names[0]), Name(names[1])) + + [Node(syms.trailer, + [results["lpar"].clone(), + newarglist, + results["rpar"].clone()])] + after) + new.prefix = node.prefix + return new + + +########################################################### +### Determine whether a node represents a given literal +########################################################### + +def is_tuple(node): + """Does the node represent a tuple literal?""" + if isinstance(node, Node) and node.children == [LParen(), RParen()]: + return True + return (isinstance(node, Node) + and len(node.children) == 3 + and isinstance(node.children[0], Leaf) + and isinstance(node.children[1], Node) + and isinstance(node.children[2], Leaf) + and node.children[0].value == "(" + and node.children[2].value == ")") + +def is_list(node): + """Does the node represent a list literal?""" + return (isinstance(node, Node) + and len(node.children) > 1 + and isinstance(node.children[0], Leaf) + and isinstance(node.children[-1], Leaf) + and node.children[0].value == "[" + and node.children[-1].value == "]") + + +########################################################### +### Misc +########################################################### + +def parenthesize(node): + return Node(syms.atom, [LParen(), node, RParen()]) + + +consuming_calls = {"sorted", "list", "set", "any", "all", "tuple", "sum", + "min", "max", "enumerate"} + +def attr_chain(obj, attr): + """Follow an attribute chain. + + If you have a chain of objects where a.foo -> b, b.foo-> c, etc, + use this to iterate over all objects in the chain. Iteration is + terminated by getattr(x, attr) is None. + + Args: + obj: the starting object + attr: the name of the chaining attribute + + Yields: + Each successive object in the chain. + """ + next = getattr(obj, attr) + while next: + yield next + next = getattr(next, attr) + +p0 = """for_stmt< 'for' any 'in' node=any ':' any* > + | comp_for< 'for' any 'in' node=any any* > + """ +p1 = """ +power< + ( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' | + 'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) ) + trailer< '(' node=any ')' > + any* +> +""" +p2 = """ +power< + ( 'sorted' | 'enumerate' ) + trailer< '(' arglist ')' > + any* +> +""" +pats_built = False +def in_special_context(node): + """ Returns true if node is in an environment where all that is required + of it is being iterable (ie, it doesn't matter if it returns a list + or an iterator). + See test_map_nochange in test_fixers.py for some examples and tests. + """ + global p0, p1, p2, pats_built + if not pats_built: + p0 = patcomp.compile_pattern(p0) + p1 = patcomp.compile_pattern(p1) + p2 = patcomp.compile_pattern(p2) + pats_built = True + patterns = [p0, p1, p2] + for pattern, parent in zip(patterns, attr_chain(node, "parent")): + results = {} + if pattern.match(parent, results) and results["node"] is node: + return True + return False + +def is_probably_builtin(node): + """ + Check that something isn't an attribute or function name etc. + """ + prev = node.prev_sibling + if prev is not None and prev.type == token.DOT: + # Attribute lookup. + return False + parent = node.parent + if parent.type in (syms.funcdef, syms.classdef): + return False + if parent.type == syms.expr_stmt and parent.children[0] is node: + # Assignment. + return False + if parent.type == syms.parameters or \ + (parent.type == syms.typedargslist and ( + (prev is not None and prev.type == token.COMMA) or + parent.children[0] is node + )): + # The name of an argument. + return False + return True + +def find_indentation(node): + """Find the indentation of *node*.""" + while node is not None: + if node.type == syms.suite and len(node.children) > 2: + indent = node.children[1] + if indent.type == token.INDENT: + return indent.value + node = node.parent + return "" + +########################################################### +### The following functions are to find bindings in a suite +########################################################### + +def make_suite(node): + if node.type == syms.suite: + return node + node = node.clone() + parent, node.parent = node.parent, None + suite = Node(syms.suite, [node]) + suite.parent = parent + return suite + +def find_root(node): + """Find the top level namespace.""" + # Scamper up to the top level namespace + while node.type != syms.file_input: + node = node.parent + if not node: + raise ValueError("root found before file_input node was found.") + return node + +def does_tree_import(package, name, node): + """ Returns true if name is imported from package at the + top level of the tree which node belongs to. + To cover the case of an import like 'import foo', use + None for the package and 'foo' for the name. """ + binding = find_binding(name, find_root(node), package) + return bool(binding) + +def is_import(node): + """Returns true if the node is an import statement.""" + return node.type in (syms.import_name, syms.import_from) + +def touch_import(package, name, node): + """ Works like `does_tree_import` but adds an import statement + if it was not imported. """ + def is_import_stmt(node): + return (node.type == syms.simple_stmt and node.children and + is_import(node.children[0])) + + root = find_root(node) + + if does_tree_import(package, name, root): + return + + # figure out where to insert the new import. First try to find + # the first import and then skip to the last one. + insert_pos = offset = 0 + for idx, node in enumerate(root.children): + if not is_import_stmt(node): + continue + for offset, node2 in enumerate(root.children[idx:]): + if not is_import_stmt(node2): + break + insert_pos = idx + offset + break + + # if there are no imports where we can insert, find the docstring. + # if that also fails, we stick to the beginning of the file + if insert_pos == 0: + for idx, node in enumerate(root.children): + if (node.type == syms.simple_stmt and node.children and + node.children[0].type == token.STRING): + insert_pos = idx + 1 + break + + if package is None: + import_ = Node(syms.import_name, [ + Leaf(token.NAME, "import"), + Leaf(token.NAME, name, prefix=" ") + ]) + else: + import_ = FromImport(package, [Leaf(token.NAME, name, prefix=" ")]) + + children = [import_, Newline()] + root.insert_child(insert_pos, Node(syms.simple_stmt, children)) + + +_def_syms = {syms.classdef, syms.funcdef} +def find_binding(name, node, package=None): + """ Returns the node which binds variable name, otherwise None. + If optional argument package is supplied, only imports will + be returned. + See test cases for examples.""" + for child in node.children: + ret = None + if child.type == syms.for_stmt: + if _find(name, child.children[1]): + return child + n = find_binding(name, make_suite(child.children[-1]), package) + if n: ret = n + elif child.type in (syms.if_stmt, syms.while_stmt): + n = find_binding(name, make_suite(child.children[-1]), package) + if n: ret = n + elif child.type == syms.try_stmt: + n = find_binding(name, make_suite(child.children[2]), package) + if n: + ret = n + else: + for i, kid in enumerate(child.children[3:]): + if kid.type == token.COLON and kid.value == ":": + # i+3 is the colon, i+4 is the suite + n = find_binding(name, make_suite(child.children[i+4]), package) + if n: ret = n + elif child.type in _def_syms and child.children[1].value == name: + ret = child + elif _is_import_binding(child, name, package): + ret = child + elif child.type == syms.simple_stmt: + ret = find_binding(name, child, package) + elif child.type == syms.expr_stmt: + if _find(name, child.children[0]): + ret = child + + if ret: + if not package: + return ret + if is_import(ret): + return ret + return None + +_block_syms = {syms.funcdef, syms.classdef, syms.trailer} +def _find(name, node): + nodes = [node] + while nodes: + node = nodes.pop() + if node.type > 256 and node.type not in _block_syms: + nodes.extend(node.children) + elif node.type == token.NAME and node.value == name: + return node + return None + +def _is_import_binding(node, name, package=None): + """ Will return node if node will import name, or node + will import * from package. None is returned otherwise. + See test cases for examples. """ + + if node.type == syms.import_name and not package: + imp = node.children[1] + if imp.type == syms.dotted_as_names: + for child in imp.children: + if child.type == syms.dotted_as_name: + if child.children[2].value == name: + return node + elif child.type == token.NAME and child.value == name: + return node + elif imp.type == syms.dotted_as_name: + last = imp.children[-1] + if last.type == token.NAME and last.value == name: + return node + elif imp.type == token.NAME and imp.value == name: + return node + elif node.type == syms.import_from: + # str(...) is used to make life easier here, because + # from a.b import parses to ['import', ['a', '.', 'b'], ...] + if package and str(node.children[1]).strip() != package: + return None + n = node.children[3] + if package and _find("as", n): + # See test_from_import_as for explanation + return None + elif n.type == syms.import_as_names and _find(name, n): + return node + elif n.type == syms.import_as_name: + child = n.children[2] + if child.type == token.NAME and child.value == name: + return node + elif n.type == token.NAME and n.value == name: + return node + elif package and n.type == token.STAR: + return node + return None diff --git a/llava/lib/python3.10/lib2to3/main.py b/llava/lib/python3.10/lib2to3/main.py new file mode 100644 index 0000000000000000000000000000000000000000..f2849fd6be3fd93d0f0c48768f09270aea77a209 --- /dev/null +++ b/llava/lib/python3.10/lib2to3/main.py @@ -0,0 +1,273 @@ +""" +Main program for 2to3. +""" + +from __future__ import with_statement, print_function + +import sys +import os +import difflib +import logging +import shutil +import optparse + +from . import refactor + + +def diff_texts(a, b, filename): + """Return a unified diff of two strings.""" + a = a.splitlines() + b = b.splitlines() + return difflib.unified_diff(a, b, filename, filename, + "(original)", "(refactored)", + lineterm="") + + +class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool): + """ + A refactoring tool that can avoid overwriting its input files. + Prints output to stdout. + + Output files can optionally be written to a different directory and or + have an extra file suffix appended to their name for use in situations + where you do not want to replace the input files. + """ + + def __init__(self, fixers, options, explicit, nobackups, show_diffs, + input_base_dir='', output_dir='', append_suffix=''): + """ + Args: + fixers: A list of fixers to import. + options: A dict with RefactoringTool configuration. + explicit: A list of fixers to run even if they are explicit. + nobackups: If true no backup '.bak' files will be created for those + files that are being refactored. + show_diffs: Should diffs of the refactoring be printed to stdout? + input_base_dir: The base directory for all input files. This class + will strip this path prefix off of filenames before substituting + it with output_dir. Only meaningful if output_dir is supplied. + All files processed by refactor() must start with this path. + output_dir: If supplied, all converted files will be written into + this directory tree instead of input_base_dir. + append_suffix: If supplied, all files output by this tool will have + this appended to their filename. Useful for changing .py to + .py3 for example by passing append_suffix='3'. + """ + self.nobackups = nobackups + self.show_diffs = show_diffs + if input_base_dir and not input_base_dir.endswith(os.sep): + input_base_dir += os.sep + self._input_base_dir = input_base_dir + self._output_dir = output_dir + self._append_suffix = append_suffix + super(StdoutRefactoringTool, self).__init__(fixers, options, explicit) + + def log_error(self, msg, *args, **kwargs): + self.errors.append((msg, args, kwargs)) + self.logger.error(msg, *args, **kwargs) + + def write_file(self, new_text, filename, old_text, encoding): + orig_filename = filename + if self._output_dir: + if filename.startswith(self._input_base_dir): + filename = os.path.join(self._output_dir, + filename[len(self._input_base_dir):]) + else: + raise ValueError('filename %s does not start with the ' + 'input_base_dir %s' % ( + filename, self._input_base_dir)) + if self._append_suffix: + filename += self._append_suffix + if orig_filename != filename: + output_dir = os.path.dirname(filename) + if not os.path.isdir(output_dir) and output_dir: + os.makedirs(output_dir) + self.log_message('Writing converted %s to %s.', orig_filename, + filename) + if not self.nobackups: + # Make backup + backup = filename + ".bak" + if os.path.lexists(backup): + try: + os.remove(backup) + except OSError: + self.log_message("Can't remove backup %s", backup) + try: + os.rename(filename, backup) + except OSError: + self.log_message("Can't rename %s to %s", filename, backup) + # Actually write the new file + write = super(StdoutRefactoringTool, self).write_file + write(new_text, filename, old_text, encoding) + if not self.nobackups: + shutil.copymode(backup, filename) + if orig_filename != filename: + # Preserve the file mode in the new output directory. + shutil.copymode(orig_filename, filename) + + def print_output(self, old, new, filename, equal): + if equal: + self.log_message("No changes to %s", filename) + else: + self.log_message("Refactored %s", filename) + if self.show_diffs: + diff_lines = diff_texts(old, new, filename) + try: + if self.output_lock is not None: + with self.output_lock: + for line in diff_lines: + print(line) + sys.stdout.flush() + else: + for line in diff_lines: + print(line) + except UnicodeEncodeError: + warn("couldn't encode %s's diff for your terminal" % + (filename,)) + return + +def warn(msg): + print("WARNING: %s" % (msg,), file=sys.stderr) + + +def main(fixer_pkg, args=None): + """Main program. + + Args: + fixer_pkg: the name of a package where the fixers are located. + args: optional; a list of command line arguments. If omitted, + sys.argv[1:] is used. + + Returns a suggested exit status (0, 1, 2). + """ + # Set up option parser + parser = optparse.OptionParser(usage="2to3 [options] file|dir ...") + parser.add_option("-d", "--doctests_only", action="store_true", + help="Fix up doctests only") + parser.add_option("-f", "--fix", action="append", default=[], + help="Each FIX specifies a transformation; default: all") + parser.add_option("-j", "--processes", action="store", default=1, + type="int", help="Run 2to3 concurrently") + parser.add_option("-x", "--nofix", action="append", default=[], + help="Prevent a transformation from being run") + parser.add_option("-l", "--list-fixes", action="store_true", + help="List available transformations") + parser.add_option("-p", "--print-function", action="store_true", + help="Modify the grammar so that print() is a function") + parser.add_option("-e", "--exec-function", action="store_true", + help="Modify the grammar so that exec() is a function") + parser.add_option("-v", "--verbose", action="store_true", + help="More verbose logging") + parser.add_option("--no-diffs", action="store_true", + help="Don't show diffs of the refactoring") + parser.add_option("-w", "--write", action="store_true", + help="Write back modified files") + parser.add_option("-n", "--nobackups", action="store_true", default=False, + help="Don't write backups for modified files") + parser.add_option("-o", "--output-dir", action="store", type="str", + default="", help="Put output files in this directory " + "instead of overwriting the input files. Requires -n.") + parser.add_option("-W", "--write-unchanged-files", action="store_true", + help="Also write files even if no changes were required" + " (useful with --output-dir); implies -w.") + parser.add_option("--add-suffix", action="store", type="str", default="", + help="Append this string to all output filenames." + " Requires -n if non-empty. " + "ex: --add-suffix='3' will generate .py3 files.") + + # Parse command line arguments + refactor_stdin = False + flags = {} + options, args = parser.parse_args(args) + if options.write_unchanged_files: + flags["write_unchanged_files"] = True + if not options.write: + warn("--write-unchanged-files/-W implies -w.") + options.write = True + # If we allowed these, the original files would be renamed to backup names + # but not replaced. + if options.output_dir and not options.nobackups: + parser.error("Can't use --output-dir/-o without -n.") + if options.add_suffix and not options.nobackups: + parser.error("Can't use --add-suffix without -n.") + + if not options.write and options.no_diffs: + warn("not writing files and not printing diffs; that's not very useful") + if not options.write and options.nobackups: + parser.error("Can't use -n without -w") + if options.list_fixes: + print("Available transformations for the -f/--fix option:") + for fixname in refactor.get_all_fix_names(fixer_pkg): + print(fixname) + if not args: + return 0 + if not args: + print("At least one file or directory argument required.", file=sys.stderr) + print("Use --help to show usage.", file=sys.stderr) + return 2 + if "-" in args: + refactor_stdin = True + if options.write: + print("Can't write to stdin.", file=sys.stderr) + return 2 + if options.print_function: + flags["print_function"] = True + + if options.exec_function: + flags["exec_function"] = True + + # Set up logging handler + level = logging.DEBUG if options.verbose else logging.INFO + logging.basicConfig(format='%(name)s: %(message)s', level=level) + logger = logging.getLogger('lib2to3.main') + + # Initialize the refactoring tool + avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg)) + unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix) + explicit = set() + if options.fix: + all_present = False + for fix in options.fix: + if fix == "all": + all_present = True + else: + explicit.add(fixer_pkg + ".fix_" + fix) + requested = avail_fixes.union(explicit) if all_present else explicit + else: + requested = avail_fixes.union(explicit) + fixer_names = requested.difference(unwanted_fixes) + input_base_dir = os.path.commonprefix(args) + if (input_base_dir and not input_base_dir.endswith(os.sep) + and not os.path.isdir(input_base_dir)): + # One or more similar names were passed, their directory is the base. + # os.path.commonprefix() is ignorant of path elements, this corrects + # for that weird API. + input_base_dir = os.path.dirname(input_base_dir) + if options.output_dir: + input_base_dir = input_base_dir.rstrip(os.sep) + logger.info('Output in %r will mirror the input directory %r layout.', + options.output_dir, input_base_dir) + rt = StdoutRefactoringTool( + sorted(fixer_names), flags, sorted(explicit), + options.nobackups, not options.no_diffs, + input_base_dir=input_base_dir, + output_dir=options.output_dir, + append_suffix=options.add_suffix) + + # Refactor all files and directories passed as arguments + if not rt.errors: + if refactor_stdin: + rt.refactor_stdin() + else: + try: + rt.refactor(args, options.write, options.doctests_only, + options.processes) + except refactor.MultiprocessingUnsupported: + assert options.processes > 1 + print("Sorry, -j isn't supported on this platform.", + file=sys.stderr) + return 1 + rt.summarize() + + # Return error status (0 if rt.errors is zero) + return int(bool(rt.errors)) diff --git a/llava/lib/python3.10/lib2to3/patcomp.py b/llava/lib/python3.10/lib2to3/patcomp.py new file mode 100644 index 0000000000000000000000000000000000000000..f57f4954b26ce7f59c44ce7cd67bf966fd594cd5 --- /dev/null +++ b/llava/lib/python3.10/lib2to3/patcomp.py @@ -0,0 +1,204 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Pattern compiler. + +The grammar is taken from PatternGrammar.txt. + +The compiler compiles a pattern to a pytree.*Pattern instance. +""" + +__author__ = "Guido van Rossum " + +# Python imports +import io + +# Fairly local imports +from .pgen2 import driver, literals, token, tokenize, parse, grammar + +# Really local imports +from . import pytree +from . import pygram + + +class PatternSyntaxError(Exception): + pass + + +def tokenize_wrapper(input): + """Tokenizes a string suppressing significant whitespace.""" + skip = {token.NEWLINE, token.INDENT, token.DEDENT} + tokens = tokenize.generate_tokens(io.StringIO(input).readline) + for quintuple in tokens: + type, value, start, end, line_text = quintuple + if type not in skip: + yield quintuple + + +class PatternCompiler(object): + + def __init__(self, grammar_file=None): + """Initializer. + + Takes an optional alternative filename for the pattern grammar. + """ + if grammar_file is None: + self.grammar = pygram.pattern_grammar + self.syms = pygram.pattern_symbols + else: + self.grammar = driver.load_grammar(grammar_file) + self.syms = pygram.Symbols(self.grammar) + self.pygrammar = pygram.python_grammar + self.pysyms = pygram.python_symbols + self.driver = driver.Driver(self.grammar, convert=pattern_convert) + + def compile_pattern(self, input, debug=False, with_tree=False): + """Compiles a pattern string to a nested pytree.*Pattern object.""" + tokens = tokenize_wrapper(input) + try: + root = self.driver.parse_tokens(tokens, debug=debug) + except parse.ParseError as e: + raise PatternSyntaxError(str(e)) from None + if with_tree: + return self.compile_node(root), root + else: + return self.compile_node(root) + + def compile_node(self, node): + """Compiles a node, recursively. + + This is one big switch on the node type. + """ + # XXX Optimize certain Wildcard-containing-Wildcard patterns + # that can be merged + if node.type == self.syms.Matcher: + node = node.children[0] # Avoid unneeded recursion + + if node.type == self.syms.Alternatives: + # Skip the odd children since they are just '|' tokens + alts = [self.compile_node(ch) for ch in node.children[::2]] + if len(alts) == 1: + return alts[0] + p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1) + return p.optimize() + + if node.type == self.syms.Alternative: + units = [self.compile_node(ch) for ch in node.children] + if len(units) == 1: + return units[0] + p = pytree.WildcardPattern([units], min=1, max=1) + return p.optimize() + + if node.type == self.syms.NegatedUnit: + pattern = self.compile_basic(node.children[1:]) + p = pytree.NegatedPattern(pattern) + return p.optimize() + + assert node.type == self.syms.Unit + + name = None + nodes = node.children + if len(nodes) >= 3 and nodes[1].type == token.EQUAL: + name = nodes[0].value + nodes = nodes[2:] + repeat = None + if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater: + repeat = nodes[-1] + nodes = nodes[:-1] + + # Now we've reduced it to: STRING | NAME [Details] | (...) | [...] + pattern = self.compile_basic(nodes, repeat) + + if repeat is not None: + assert repeat.type == self.syms.Repeater + children = repeat.children + child = children[0] + if child.type == token.STAR: + min = 0 + max = pytree.HUGE + elif child.type == token.PLUS: + min = 1 + max = pytree.HUGE + elif child.type == token.LBRACE: + assert children[-1].type == token.RBRACE + assert len(children) in (3, 5) + min = max = self.get_int(children[1]) + if len(children) == 5: + max = self.get_int(children[3]) + else: + assert False + if min != 1 or max != 1: + pattern = pattern.optimize() + pattern = pytree.WildcardPattern([[pattern]], min=min, max=max) + + if name is not None: + pattern.name = name + return pattern.optimize() + + def compile_basic(self, nodes, repeat=None): + # Compile STRING | NAME [Details] | (...) | [...] + assert len(nodes) >= 1 + node = nodes[0] + if node.type == token.STRING: + value = str(literals.evalString(node.value)) + return pytree.LeafPattern(_type_of_literal(value), value) + elif node.type == token.NAME: + value = node.value + if value.isupper(): + if value not in TOKEN_MAP: + raise PatternSyntaxError("Invalid token: %r" % value) + if nodes[1:]: + raise PatternSyntaxError("Can't have details for token") + return pytree.LeafPattern(TOKEN_MAP[value]) + else: + if value == "any": + type = None + elif not value.startswith("_"): + type = getattr(self.pysyms, value, None) + if type is None: + raise PatternSyntaxError("Invalid symbol: %r" % value) + if nodes[1:]: # Details present + content = [self.compile_node(nodes[1].children[1])] + else: + content = None + return pytree.NodePattern(type, content) + elif node.value == "(": + return self.compile_node(nodes[1]) + elif node.value == "[": + assert repeat is None + subpattern = self.compile_node(nodes[1]) + return pytree.WildcardPattern([[subpattern]], min=0, max=1) + assert False, node + + def get_int(self, node): + assert node.type == token.NUMBER + return int(node.value) + + +# Map named tokens to the type value for a LeafPattern +TOKEN_MAP = {"NAME": token.NAME, + "STRING": token.STRING, + "NUMBER": token.NUMBER, + "TOKEN": None} + + +def _type_of_literal(value): + if value[0].isalpha(): + return token.NAME + elif value in grammar.opmap: + return grammar.opmap[value] + else: + return None + + +def pattern_convert(grammar, raw_node_info): + """Converts raw node information to a Node or Leaf instance.""" + type, value, context, children = raw_node_info + if children or type in grammar.number2symbol: + return pytree.Node(type, children, context=context) + else: + return pytree.Leaf(type, value, context=context) + + +def compile_pattern(pattern): + return PatternCompiler().compile_pattern(pattern) diff --git a/llava/lib/python3.10/lib2to3/pygram.py b/llava/lib/python3.10/lib2to3/pygram.py new file mode 100644 index 0000000000000000000000000000000000000000..24d9db9217f131adf50f6eca7d571ce69cbcbc3e --- /dev/null +++ b/llava/lib/python3.10/lib2to3/pygram.py @@ -0,0 +1,43 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +"""Export the Python grammar and symbols.""" + +# Python imports +import os + +# Local imports +from .pgen2 import token +from .pgen2 import driver +from . import pytree + +# The grammar file +_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt") +_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), + "PatternGrammar.txt") + + +class Symbols(object): + + def __init__(self, grammar): + """Initializer. + + Creates an attribute for each grammar symbol (nonterminal), + whose value is the symbol's type (an int >= 256). + """ + for name, symbol in grammar.symbol2number.items(): + setattr(self, name, symbol) + + +python_grammar = driver.load_packaged_grammar("lib2to3", _GRAMMAR_FILE) + +python_symbols = Symbols(python_grammar) + +python_grammar_no_print_statement = python_grammar.copy() +del python_grammar_no_print_statement.keywords["print"] + +python_grammar_no_print_and_exec_statement = python_grammar_no_print_statement.copy() +del python_grammar_no_print_and_exec_statement.keywords["exec"] + +pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE) +pattern_symbols = Symbols(pattern_grammar) diff --git a/llava/lib/python3.10/lib2to3/pytree.py b/llava/lib/python3.10/lib2to3/pytree.py new file mode 100644 index 0000000000000000000000000000000000000000..729023df0284e15ad8d7ac9f30cf7ed8d42db272 --- /dev/null +++ b/llava/lib/python3.10/lib2to3/pytree.py @@ -0,0 +1,853 @@ +# Copyright 2006 Google, Inc. All Rights Reserved. +# Licensed to PSF under a Contributor Agreement. + +""" +Python parse tree definitions. + +This is a very concrete parse tree; we need to keep every token and +even the comments and whitespace between tokens. + +There's also a pattern matching implementation here. +""" + +__author__ = "Guido van Rossum " + +import sys +from io import StringIO + +HUGE = 0x7FFFFFFF # maximum repeat count, default max + +_type_reprs = {} +def type_repr(type_num): + global _type_reprs + if not _type_reprs: + from .pygram import python_symbols + # printing tokens is possible but not as useful + # from .pgen2 import token // token.__dict__.items(): + for name, val in python_symbols.__dict__.items(): + if type(val) == int: _type_reprs[val] = name + return _type_reprs.setdefault(type_num, type_num) + +class Base(object): + + """ + Abstract base class for Node and Leaf. + + This provides some default functionality and boilerplate using the + template pattern. + + A node may be a subnode of at most one parent. + """ + + # Default values for instance variables + type = None # int: token number (< 256) or symbol number (>= 256) + parent = None # Parent node pointer, or None + children = () # Tuple of subnodes + was_changed = False + was_checked = False + + def __new__(cls, *args, **kwds): + """Constructor that prevents Base from being instantiated.""" + assert cls is not Base, "Cannot instantiate Base" + return object.__new__(cls) + + def __eq__(self, other): + """ + Compare two nodes for equality. + + This calls the method _eq(). + """ + if self.__class__ is not other.__class__: + return NotImplemented + return self._eq(other) + + __hash__ = None # For Py3 compatibility. + + def _eq(self, other): + """ + Compare two nodes for equality. + + This is called by __eq__ and __ne__. It is only called if the two nodes + have the same type. This must be implemented by the concrete subclass. + Nodes should be considered equal if they have the same structure, + ignoring the prefix string and other context information. + """ + raise NotImplementedError + + def clone(self): + """ + Return a cloned (deep) copy of self. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def post_order(self): + """ + Return a post-order iterator for the tree. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def pre_order(self): + """ + Return a pre-order iterator for the tree. + + This must be implemented by the concrete subclass. + """ + raise NotImplementedError + + def replace(self, new): + """Replace this node with a new one in the parent.""" + assert self.parent is not None, str(self) + assert new is not None + if not isinstance(new, list): + new = [new] + l_children = [] + found = False + for ch in self.parent.children: + if ch is self: + assert not found, (self.parent.children, self, new) + if new is not None: + l_children.extend(new) + found = True + else: + l_children.append(ch) + assert found, (self.children, self, new) + self.parent.changed() + self.parent.children = l_children + for x in new: + x.parent = self.parent + self.parent = None + + def get_lineno(self): + """Return the line number which generated the invocant node.""" + node = self + while not isinstance(node, Leaf): + if not node.children: + return + node = node.children[0] + return node.lineno + + def changed(self): + if self.parent: + self.parent.changed() + self.was_changed = True + + def remove(self): + """ + Remove the node from the tree. Returns the position of the node in its + parent's children before it was removed. + """ + if self.parent: + for i, node in enumerate(self.parent.children): + if node is self: + self.parent.changed() + del self.parent.children[i] + self.parent = None + return i + + @property + def next_sibling(self): + """ + The node immediately following the invocant in their parent's children + list. If the invocant does not have a next sibling, it is None + """ + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + try: + return self.parent.children[i+1] + except IndexError: + return None + + @property + def prev_sibling(self): + """ + The node immediately preceding the invocant in their parent's children + list. If the invocant does not have a previous sibling, it is None. + """ + if self.parent is None: + return None + + # Can't use index(); we need to test by identity + for i, child in enumerate(self.parent.children): + if child is self: + if i == 0: + return None + return self.parent.children[i-1] + + def leaves(self): + for child in self.children: + yield from child.leaves() + + def depth(self): + if self.parent is None: + return 0 + return 1 + self.parent.depth() + + def get_suffix(self): + """ + Return the string immediately following the invocant node. This is + effectively equivalent to node.next_sibling.prefix + """ + next_sib = self.next_sibling + if next_sib is None: + return "" + return next_sib.prefix + + if sys.version_info < (3, 0): + def __str__(self): + return str(self).encode("ascii") + +class Node(Base): + + """Concrete implementation for interior nodes.""" + + def __init__(self,type, children, + context=None, + prefix=None, + fixers_applied=None): + """ + Initializer. + + Takes a type constant (a symbol number >= 256), a sequence of + child nodes, and an optional context keyword argument. + + As a side effect, the parent pointers of the children are updated. + """ + assert type >= 256, type + self.type = type + self.children = list(children) + for ch in self.children: + assert ch.parent is None, repr(ch) + ch.parent = self + if prefix is not None: + self.prefix = prefix + if fixers_applied: + self.fixers_applied = fixers_applied[:] + else: + self.fixers_applied = None + + def __repr__(self): + """Return a canonical string representation.""" + return "%s(%s, %r)" % (self.__class__.__name__, + type_repr(self.type), + self.children) + + def __unicode__(self): + """ + Return a pretty string representation. + + This reproduces the input source exactly. + """ + return "".join(map(str, self.children)) + + if sys.version_info > (3, 0): + __str__ = __unicode__ + + def _eq(self, other): + """Compare two nodes for equality.""" + return (self.type, self.children) == (other.type, other.children) + + def clone(self): + """Return a cloned (deep) copy of self.""" + return Node(self.type, [ch.clone() for ch in self.children], + fixers_applied=self.fixers_applied) + + def post_order(self): + """Return a post-order iterator for the tree.""" + for child in self.children: + yield from child.post_order() + yield self + + def pre_order(self): + """Return a pre-order iterator for the tree.""" + yield self + for child in self.children: + yield from child.pre_order() + + @property + def prefix(self): + """ + The whitespace and comments preceding this node in the input. + """ + if not self.children: + return "" + return self.children[0].prefix + + @prefix.setter + def prefix(self, prefix): + if self.children: + self.children[0].prefix = prefix + + def set_child(self, i, child): + """ + Equivalent to 'node.children[i] = child'. This method also sets the + child's parent attribute appropriately. + """ + child.parent = self + self.children[i].parent = None + self.children[i] = child + self.changed() + + def insert_child(self, i, child): + """ + Equivalent to 'node.children.insert(i, child)'. This method also sets + the child's parent attribute appropriately. + """ + child.parent = self + self.children.insert(i, child) + self.changed() + + def append_child(self, child): + """ + Equivalent to 'node.children.append(child)'. This method also sets the + child's parent attribute appropriately. + """ + child.parent = self + self.children.append(child) + self.changed() + + +class Leaf(Base): + + """Concrete implementation for leaf nodes.""" + + # Default values for instance variables + _prefix = "" # Whitespace and comments preceding this token in the input + lineno = 0 # Line where this token starts in the input + column = 0 # Column where this token tarts in the input + + def __init__(self, type, value, + context=None, + prefix=None, + fixers_applied=[]): + """ + Initializer. + + Takes a type constant (a token number < 256), a string value, and an + optional context keyword argument. + """ + assert 0 <= type < 256, type + if context is not None: + self._prefix, (self.lineno, self.column) = context + self.type = type + self.value = value + if prefix is not None: + self._prefix = prefix + self.fixers_applied = fixers_applied[:] + + def __repr__(self): + """Return a canonical string representation.""" + return "%s(%r, %r)" % (self.__class__.__name__, + self.type, + self.value) + + def __unicode__(self): + """ + Return a pretty string representation. + + This reproduces the input source exactly. + """ + return self.prefix + str(self.value) + + if sys.version_info > (3, 0): + __str__ = __unicode__ + + def _eq(self, other): + """Compare two nodes for equality.""" + return (self.type, self.value) == (other.type, other.value) + + def clone(self): + """Return a cloned (deep) copy of self.""" + return Leaf(self.type, self.value, + (self.prefix, (self.lineno, self.column)), + fixers_applied=self.fixers_applied) + + def leaves(self): + yield self + + def post_order(self): + """Return a post-order iterator for the tree.""" + yield self + + def pre_order(self): + """Return a pre-order iterator for the tree.""" + yield self + + @property + def prefix(self): + """ + The whitespace and comments preceding this token in the input. + """ + return self._prefix + + @prefix.setter + def prefix(self, prefix): + self.changed() + self._prefix = prefix + +def convert(gr, raw_node): + """ + Convert raw node information to a Node or Leaf instance. + + This is passed to the parser driver which calls it whenever a reduction of a + grammar rule produces a new complete node, so that the tree is build + strictly bottom-up. + """ + type, value, context, children = raw_node + if children or type in gr.number2symbol: + # If there's exactly one child, return that child instead of + # creating a new node. + if len(children) == 1: + return children[0] + return Node(type, children, context=context) + else: + return Leaf(type, value, context=context) + + +class BasePattern(object): + + """ + A pattern is a tree matching pattern. + + It looks for a specific node type (token or symbol), and + optionally for a specific content. + + This is an abstract base class. There are three concrete + subclasses: + + - LeafPattern matches a single leaf node; + - NodePattern matches a single node (usually non-leaf); + - WildcardPattern matches a sequence of nodes of variable length. + """ + + # Defaults for instance variables + type = None # Node type (token if < 256, symbol if >= 256) + content = None # Optional content matching pattern + name = None # Optional name used to store match in results dict + + def __new__(cls, *args, **kwds): + """Constructor that prevents BasePattern from being instantiated.""" + assert cls is not BasePattern, "Cannot instantiate BasePattern" + return object.__new__(cls) + + def __repr__(self): + args = [type_repr(self.type), self.content, self.name] + while args and args[-1] is None: + del args[-1] + return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args))) + + def optimize(self): + """ + A subclass can define this as a hook for optimizations. + + Returns either self or another node with the same effect. + """ + return self + + def match(self, node, results=None): + """ + Does this pattern exactly match a node? + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + Default implementation for non-wildcard patterns. + """ + if self.type is not None and node.type != self.type: + return False + if self.content is not None: + r = None + if results is not None: + r = {} + if not self._submatch(node, r): + return False + if r: + results.update(r) + if results is not None and self.name: + results[self.name] = node + return True + + def match_seq(self, nodes, results=None): + """ + Does this pattern exactly match a sequence of nodes? + + Default implementation for non-wildcard patterns. + """ + if len(nodes) != 1: + return False + return self.match(nodes[0], results) + + def generate_matches(self, nodes): + """ + Generator yielding all matches for this pattern. + + Default implementation for non-wildcard patterns. + """ + r = {} + if nodes and self.match(nodes[0], r): + yield 1, r + + +class LeafPattern(BasePattern): + + def __init__(self, type=None, content=None, name=None): + """ + Initializer. Takes optional type, content, and name. + + The type, if given must be a token type (< 256). If not given, + this matches any *leaf* node; the content may still be required. + + The content, if given, must be a string. + + If a name is given, the matching node is stored in the results + dict under that key. + """ + if type is not None: + assert 0 <= type < 256, type + if content is not None: + assert isinstance(content, str), repr(content) + self.type = type + self.content = content + self.name = name + + def match(self, node, results=None): + """Override match() to insist on a leaf node.""" + if not isinstance(node, Leaf): + return False + return BasePattern.match(self, node, results) + + def _submatch(self, node, results=None): + """ + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + """ + return self.content == node.value + + +class NodePattern(BasePattern): + + wildcards = False + + def __init__(self, type=None, content=None, name=None): + """ + Initializer. Takes optional type, content, and name. + + The type, if given, must be a symbol type (>= 256). If the + type is None this matches *any* single node (leaf or not), + except if content is not None, in which it only matches + non-leaf nodes that also match the content pattern. + + The content, if not None, must be a sequence of Patterns that + must match the node's children exactly. If the content is + given, the type must not be None. + + If a name is given, the matching node is stored in the results + dict under that key. + """ + if type is not None: + assert type >= 256, type + if content is not None: + assert not isinstance(content, str), repr(content) + content = list(content) + for i, item in enumerate(content): + assert isinstance(item, BasePattern), (i, item) + if isinstance(item, WildcardPattern): + self.wildcards = True + self.type = type + self.content = content + self.name = name + + def _submatch(self, node, results=None): + """ + Match the pattern's content to the node's children. + + This assumes the node type matches and self.content is not None. + + Returns True if it matches, False if not. + + If results is not None, it must be a dict which will be + updated with the nodes matching named subpatterns. + + When returning False, the results dict may still be updated. + """ + if self.wildcards: + for c, r in generate_matches(self.content, node.children): + if c == len(node.children): + if results is not None: + results.update(r) + return True + return False + if len(self.content) != len(node.children): + return False + for subpattern, child in zip(self.content, node.children): + if not subpattern.match(child, results): + return False + return True + + +class WildcardPattern(BasePattern): + + """ + A wildcard pattern can match zero or more nodes. + + This has all the flexibility needed to implement patterns like: + + .* .+ .? .{m,n} + (a b c | d e | f) + (...)* (...)+ (...)? (...){m,n} + + except it always uses non-greedy matching. + """ + + def __init__(self, content=None, min=0, max=HUGE, name=None): + """ + Initializer. + + Args: + content: optional sequence of subsequences of patterns; + if absent, matches one node; + if present, each subsequence is an alternative [*] + min: optional minimum number of times to match, default 0 + max: optional maximum number of times to match, default HUGE + name: optional name assigned to this match + + [*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is + equivalent to (a b c | d e | f g h); if content is None, + this is equivalent to '.' in regular expression terms. + The min and max parameters work as follows: + min=0, max=maxint: .* + min=1, max=maxint: .+ + min=0, max=1: .? + min=1, max=1: . + If content is not None, replace the dot with the parenthesized + list of alternatives, e.g. (a b c | d e | f g h)* + """ + assert 0 <= min <= max <= HUGE, (min, max) + if content is not None: + content = tuple(map(tuple, content)) # Protect against alterations + # Check sanity of alternatives + assert len(content), repr(content) # Can't have zero alternatives + for alt in content: + assert len(alt), repr(alt) # Can have empty alternatives + self.content = content + self.min = min + self.max = max + self.name = name + + def optimize(self): + """Optimize certain stacked wildcard patterns.""" + subpattern = None + if (self.content is not None and + len(self.content) == 1 and len(self.content[0]) == 1): + subpattern = self.content[0][0] + if self.min == 1 and self.max == 1: + if self.content is None: + return NodePattern(name=self.name) + if subpattern is not None and self.name == subpattern.name: + return subpattern.optimize() + if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and + subpattern.min <= 1 and self.name == subpattern.name): + return WildcardPattern(subpattern.content, + self.min*subpattern.min, + self.max*subpattern.max, + subpattern.name) + return self + + def match(self, node, results=None): + """Does this pattern exactly match a node?""" + return self.match_seq([node], results) + + def match_seq(self, nodes, results=None): + """Does this pattern exactly match a sequence of nodes?""" + for c, r in self.generate_matches(nodes): + if c == len(nodes): + if results is not None: + results.update(r) + if self.name: + results[self.name] = list(nodes) + return True + return False + + def generate_matches(self, nodes): + """ + Generator yielding matches for a sequence of nodes. + + Args: + nodes: sequence of nodes + + Yields: + (count, results) tuples where: + count: the match comprises nodes[:count]; + results: dict containing named submatches. + """ + if self.content is None: + # Shortcut for special case (see __init__.__doc__) + for count in range(self.min, 1 + min(len(nodes), self.max)): + r = {} + if self.name: + r[self.name] = nodes[:count] + yield count, r + elif self.name == "bare_name": + yield self._bare_name_matches(nodes) + else: + # The reason for this is that hitting the recursion limit usually + # results in some ugly messages about how RuntimeErrors are being + # ignored. We only have to do this on CPython, though, because other + # implementations don't have this nasty bug in the first place. + if hasattr(sys, "getrefcount"): + save_stderr = sys.stderr + sys.stderr = StringIO() + try: + for count, r in self._recursive_matches(nodes, 0): + if self.name: + r[self.name] = nodes[:count] + yield count, r + except RuntimeError: + # Fall back to the iterative pattern matching scheme if the + # recursive scheme hits the recursion limit (RecursionError). + for count, r in self._iterative_matches(nodes): + if self.name: + r[self.name] = nodes[:count] + yield count, r + finally: + if hasattr(sys, "getrefcount"): + sys.stderr = save_stderr + + def _iterative_matches(self, nodes): + """Helper to iteratively yield the matches.""" + nodelen = len(nodes) + if 0 >= self.min: + yield 0, {} + + results = [] + # generate matches that use just one alt from self.content + for alt in self.content: + for c, r in generate_matches(alt, nodes): + yield c, r + results.append((c, r)) + + # for each match, iterate down the nodes + while results: + new_results = [] + for c0, r0 in results: + # stop if the entire set of nodes has been matched + if c0 < nodelen and c0 <= self.max: + for alt in self.content: + for c1, r1 in generate_matches(alt, nodes[c0:]): + if c1 > 0: + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r + new_results.append((c0 + c1, r)) + results = new_results + + def _bare_name_matches(self, nodes): + """Special optimized matcher for bare_name.""" + count = 0 + r = {} + done = False + max = len(nodes) + while not done and count < max: + done = True + for leaf in self.content: + if leaf[0].match(nodes[count], r): + count += 1 + done = False + break + r[self.name] = nodes[:count] + return count, r + + def _recursive_matches(self, nodes, count): + """Helper to recursively yield the matches.""" + assert self.content is not None + if count >= self.min: + yield 0, {} + if count < self.max: + for alt in self.content: + for c0, r0 in generate_matches(alt, nodes): + for c1, r1 in self._recursive_matches(nodes[c0:], count+1): + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r + + +class NegatedPattern(BasePattern): + + def __init__(self, content=None): + """ + Initializer. + + The argument is either a pattern or None. If it is None, this + only matches an empty sequence (effectively '$' in regex + lingo). If it is not None, this matches whenever the argument + pattern doesn't have any matches. + """ + if content is not None: + assert isinstance(content, BasePattern), repr(content) + self.content = content + + def match(self, node): + # We never match a node in its entirety + return False + + def match_seq(self, nodes): + # We only match an empty sequence of nodes in its entirety + return len(nodes) == 0 + + def generate_matches(self, nodes): + if self.content is None: + # Return a match if there is an empty sequence + if len(nodes) == 0: + yield 0, {} + else: + # Return a match if the argument pattern has no matches + for c, r in self.content.generate_matches(nodes): + return + yield 0, {} + + +def generate_matches(patterns, nodes): + """ + Generator yielding matches for a sequence of patterns and nodes. + + Args: + patterns: a sequence of patterns + nodes: a sequence of nodes + + Yields: + (count, results) tuples where: + count: the entire sequence of patterns matches nodes[:count]; + results: dict containing named submatches. + """ + if not patterns: + yield 0, {} + else: + p, rest = patterns[0], patterns[1:] + for c0, r0 in p.generate_matches(nodes): + if not rest: + yield c0, r0 + else: + for c1, r1 in generate_matches(rest, nodes[c0:]): + r = {} + r.update(r0) + r.update(r1) + yield c0 + c1, r diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/__init__.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3afb227d27837d4ba552e197ac92728ce216450f Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/connection.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d66419c9751d3e7029aa75ef6783e906be376fed Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/connection.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/context.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/context.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..827f7c33d29511435ce643491fab0d4f4d299a23 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/context.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/forkserver.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/forkserver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e2b51b0702ca555fed15ceaa1094c1a3e734ed34 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/forkserver.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/heap.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/heap.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..126e7df85bf7544d1429df400adb916ecf276251 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/heap.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/managers.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/managers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db4079072ee1d1e1b0aa84c92dfaa687b0014b0d Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/managers.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/pool.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/pool.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..33601e294177d4da423cea290d3844b372309537 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/pool.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/popen_fork.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/popen_fork.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a67f5ee39776bc86a7fa51b1b2e9fa43b58f5531 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/popen_fork.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/popen_forkserver.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/popen_forkserver.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2c70f0c2eb6152ca67262795b81487b94dc28398 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/popen_forkserver.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/popen_spawn_posix.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/popen_spawn_posix.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68323dbb601e36ba06467dfe4439638b84e25fb0 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/popen_spawn_posix.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/popen_spawn_win32.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/popen_spawn_win32.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7e38646523cabef75f992558619cbc86aa34788f Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/popen_spawn_win32.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/process.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/process.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..418416c0447308e18371ae62a8b430ef04a0da63 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/process.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/queues.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/queues.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b1d66715b3e82eb054160d17d631a9451d855530 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/queues.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/reduction.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/reduction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..060c9ce60397f7a09b12a874b416f16d74ca798a Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/reduction.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/resource_sharer.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/resource_sharer.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9456cfbd9cc06470a6070428283d379b4f74a15c Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/resource_sharer.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/resource_tracker.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/resource_tracker.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b3a424afd5062508a9a790306375972d5ac43d29 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/resource_tracker.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/shared_memory.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/shared_memory.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6f357bab3f29384e65f5bcbf5548bdcdc470f364 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/shared_memory.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/sharedctypes.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/sharedctypes.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ad51c76a5c907028405ca0f5cea9f8a2ba55527f Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/sharedctypes.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/spawn.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/spawn.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e1e2885cd40a86be4222bf26e1f642b1213e5d56 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/spawn.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/synchronize.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/synchronize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a9ab2c477ada866618b51c0ff8a614fcb9ce0b15 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/synchronize.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/__pycache__/util.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/__pycache__/util.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb81f02a5febd322f9cf6d6426b0850cf506ab64 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/__pycache__/util.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/dummy/__init__.py b/llava/lib/python3.10/multiprocessing/dummy/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6a1468609e347b3a0b9281e5c9e6ec311fcb37e5 --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/dummy/__init__.py @@ -0,0 +1,126 @@ +# +# Support for the API of the multiprocessing package using threads +# +# multiprocessing/dummy/__init__.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Process', 'current_process', 'active_children', 'freeze_support', + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', + 'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue' + ] + +# +# Imports +# + +import threading +import sys +import weakref +import array + +from .connection import Pipe +from threading import Lock, RLock, Semaphore, BoundedSemaphore +from threading import Event, Condition, Barrier +from queue import Queue + +# +# +# + +class DummyProcess(threading.Thread): + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}): + threading.Thread.__init__(self, group, target, name, args, kwargs) + self._pid = None + self._children = weakref.WeakKeyDictionary() + self._start_called = False + self._parent = current_process() + + def start(self): + if self._parent is not current_process(): + raise RuntimeError( + "Parent is {0!r} but current_process is {1!r}".format( + self._parent, current_process())) + self._start_called = True + if hasattr(self._parent, '_children'): + self._parent._children[self] = None + threading.Thread.start(self) + + @property + def exitcode(self): + if self._start_called and not self.is_alive(): + return 0 + else: + return None + +# +# +# + +Process = DummyProcess +current_process = threading.current_thread +current_process()._children = weakref.WeakKeyDictionary() + +def active_children(): + children = current_process()._children + for p in list(children): + if not p.is_alive(): + children.pop(p, None) + return list(children) + +def freeze_support(): + pass + +# +# +# + +class Namespace(object): + def __init__(self, /, **kwds): + self.__dict__.update(kwds) + def __repr__(self): + items = list(self.__dict__.items()) + temp = [] + for name, value in items: + if not name.startswith('_'): + temp.append('%s=%r' % (name, value)) + temp.sort() + return '%s(%s)' % (self.__class__.__name__, ', '.join(temp)) + +dict = dict +list = list + +def Array(typecode, sequence, lock=True): + return array.array(typecode, sequence) + +class Value(object): + def __init__(self, typecode, value, lock=True): + self._typecode = typecode + self._value = value + + @property + def value(self): + return self._value + + @value.setter + def value(self, value): + self._value = value + + def __repr__(self): + return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value) + +def Manager(): + return sys.modules[__name__] + +def shutdown(): + pass + +def Pool(processes=None, initializer=None, initargs=()): + from ..pool import ThreadPool + return ThreadPool(processes, initializer, initargs) + +JoinableQueue = Queue diff --git a/llava/lib/python3.10/multiprocessing/dummy/__pycache__/__init__.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/dummy/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9839598a2bf9f1e53a4655eea2ebf0c3e37b5ac0 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/dummy/__pycache__/__init__.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/dummy/__pycache__/connection.cpython-310.pyc b/llava/lib/python3.10/multiprocessing/dummy/__pycache__/connection.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..526bf54053fd0b4e3e82bea79f703ad06508ab63 Binary files /dev/null and b/llava/lib/python3.10/multiprocessing/dummy/__pycache__/connection.cpython-310.pyc differ diff --git a/llava/lib/python3.10/multiprocessing/dummy/connection.py b/llava/lib/python3.10/multiprocessing/dummy/connection.py new file mode 100644 index 0000000000000000000000000000000000000000..f0ce320fcf514083f3a6477e87abf40e9719285a --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/dummy/connection.py @@ -0,0 +1,75 @@ +# +# Analogue of `multiprocessing.connection` which uses queues instead of sockets +# +# multiprocessing/dummy/connection.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ 'Client', 'Listener', 'Pipe' ] + +from queue import Queue + + +families = [None] + + +class Listener(object): + + def __init__(self, address=None, family=None, backlog=1): + self._backlog_queue = Queue(backlog) + + def accept(self): + return Connection(*self._backlog_queue.get()) + + def close(self): + self._backlog_queue = None + + @property + def address(self): + return self._backlog_queue + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() + + +def Client(address): + _in, _out = Queue(), Queue() + address.put((_out, _in)) + return Connection(_in, _out) + + +def Pipe(duplex=True): + a, b = Queue(), Queue() + return Connection(a, b), Connection(b, a) + + +class Connection(object): + + def __init__(self, _in, _out): + self._out = _out + self._in = _in + self.send = self.send_bytes = _out.put + self.recv = self.recv_bytes = _in.get + + def poll(self, timeout=0.0): + if self._in.qsize() > 0: + return True + if timeout <= 0.0: + return False + with self._in.not_empty: + self._in.not_empty.wait(timeout) + return self._in.qsize() > 0 + + def close(self): + pass + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_value, exc_tb): + self.close() diff --git a/llava/lib/python3.10/multiprocessing/heap.py b/llava/lib/python3.10/multiprocessing/heap.py new file mode 100644 index 0000000000000000000000000000000000000000..6217dfe12689b379f2dad6f1e4bc3bbf6af8f60a --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/heap.py @@ -0,0 +1,337 @@ +# +# Module which supports allocation of memory from an mmap +# +# multiprocessing/heap.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import bisect +from collections import defaultdict +import mmap +import os +import sys +import tempfile +import threading + +from .context import reduction, assert_spawning +from . import util + +__all__ = ['BufferWrapper'] + +# +# Inheritable class which wraps an mmap, and from which blocks can be allocated +# + +if sys.platform == 'win32': + + import _winapi + + class Arena(object): + """ + A shared memory area backed by anonymous memory (Windows). + """ + + _rand = tempfile._RandomNameSequence() + + def __init__(self, size): + self.size = size + for i in range(100): + name = 'pym-%d-%s' % (os.getpid(), next(self._rand)) + buf = mmap.mmap(-1, size, tagname=name) + if _winapi.GetLastError() == 0: + break + # We have reopened a preexisting mmap. + buf.close() + else: + raise FileExistsError('Cannot find name for new mmap') + self.name = name + self.buffer = buf + self._state = (self.size, self.name) + + def __getstate__(self): + assert_spawning(self) + return self._state + + def __setstate__(self, state): + self.size, self.name = self._state = state + # Reopen existing mmap + self.buffer = mmap.mmap(-1, self.size, tagname=self.name) + # XXX Temporarily preventing buildbot failures while determining + # XXX the correct long-term fix. See issue 23060 + #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS + +else: + + class Arena(object): + """ + A shared memory area backed by a temporary file (POSIX). + """ + + if sys.platform == 'linux': + _dir_candidates = ['/dev/shm'] + else: + _dir_candidates = [] + + def __init__(self, size, fd=-1): + self.size = size + self.fd = fd + if fd == -1: + # Arena is created anew (if fd != -1, it means we're coming + # from rebuild_arena() below) + self.fd, name = tempfile.mkstemp( + prefix='pym-%d-'%os.getpid(), + dir=self._choose_dir(size)) + os.unlink(name) + util.Finalize(self, os.close, (self.fd,)) + os.ftruncate(self.fd, size) + self.buffer = mmap.mmap(self.fd, self.size) + + def _choose_dir(self, size): + # Choose a non-storage backed directory if possible, + # to improve performance + for d in self._dir_candidates: + st = os.statvfs(d) + if st.f_bavail * st.f_frsize >= size: # enough free space? + return d + return util.get_temp_dir() + + def reduce_arena(a): + if a.fd == -1: + raise ValueError('Arena is unpicklable because ' + 'forking was enabled when it was created') + return rebuild_arena, (a.size, reduction.DupFd(a.fd)) + + def rebuild_arena(size, dupfd): + return Arena(size, dupfd.detach()) + + reduction.register(Arena, reduce_arena) + +# +# Class allowing allocation of chunks of memory from arenas +# + +class Heap(object): + + # Minimum malloc() alignment + _alignment = 8 + + _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB + _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2 + + def __init__(self, size=mmap.PAGESIZE): + self._lastpid = os.getpid() + self._lock = threading.Lock() + # Current arena allocation size + self._size = size + # A sorted list of available block sizes in arenas + self._lengths = [] + + # Free block management: + # - map each block size to a list of `(Arena, start, stop)` blocks + self._len_to_seq = {} + # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block + # starting at that offset + self._start_to_block = {} + # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block + # ending at that offset + self._stop_to_block = {} + + # Map arenas to their `(Arena, start, stop)` blocks in use + self._allocated_blocks = defaultdict(set) + self._arenas = [] + + # List of pending blocks to free - see comment in free() below + self._pending_free_blocks = [] + + # Statistics + self._n_mallocs = 0 + self._n_frees = 0 + + @staticmethod + def _roundup(n, alignment): + # alignment must be a power of 2 + mask = alignment - 1 + return (n + mask) & ~mask + + def _new_arena(self, size): + # Create a new arena with at least the given *size* + length = self._roundup(max(self._size, size), mmap.PAGESIZE) + # We carve larger and larger arenas, for efficiency, until we + # reach a large-ish size (roughly L3 cache-sized) + if self._size < self._DOUBLE_ARENA_SIZE_UNTIL: + self._size *= 2 + util.info('allocating a new mmap of length %d', length) + arena = Arena(length) + self._arenas.append(arena) + return (arena, 0, length) + + def _discard_arena(self, arena): + # Possibly delete the given (unused) arena + length = arena.size + # Reusing an existing arena is faster than creating a new one, so + # we only reclaim space if it's large enough. + if length < self._DISCARD_FREE_SPACE_LARGER_THAN: + return + blocks = self._allocated_blocks.pop(arena) + assert not blocks + del self._start_to_block[(arena, 0)] + del self._stop_to_block[(arena, length)] + self._arenas.remove(arena) + seq = self._len_to_seq[length] + seq.remove((arena, 0, length)) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + def _malloc(self, size): + # returns a large enough block -- it might be much larger + i = bisect.bisect_left(self._lengths, size) + if i == len(self._lengths): + return self._new_arena(size) + else: + length = self._lengths[i] + seq = self._len_to_seq[length] + block = seq.pop() + if not seq: + del self._len_to_seq[length], self._lengths[i] + + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + return block + + def _add_free_block(self, block): + # make block available and try to merge with its neighbours in the arena + (arena, start, stop) = block + + try: + prev_block = self._stop_to_block[(arena, start)] + except KeyError: + pass + else: + start, _ = self._absorb(prev_block) + + try: + next_block = self._start_to_block[(arena, stop)] + except KeyError: + pass + else: + _, stop = self._absorb(next_block) + + block = (arena, start, stop) + length = stop - start + + try: + self._len_to_seq[length].append(block) + except KeyError: + self._len_to_seq[length] = [block] + bisect.insort(self._lengths, length) + + self._start_to_block[(arena, start)] = block + self._stop_to_block[(arena, stop)] = block + + def _absorb(self, block): + # deregister this block so it can be merged with a neighbour + (arena, start, stop) = block + del self._start_to_block[(arena, start)] + del self._stop_to_block[(arena, stop)] + + length = stop - start + seq = self._len_to_seq[length] + seq.remove(block) + if not seq: + del self._len_to_seq[length] + self._lengths.remove(length) + + return start, stop + + def _remove_allocated_block(self, block): + arena, start, stop = block + blocks = self._allocated_blocks[arena] + blocks.remove((start, stop)) + if not blocks: + # Arena is entirely free, discard it from this process + self._discard_arena(arena) + + def _free_pending_blocks(self): + # Free all the blocks in the pending list - called with the lock held. + while True: + try: + block = self._pending_free_blocks.pop() + except IndexError: + break + self._add_free_block(block) + self._remove_allocated_block(block) + + def free(self, block): + # free a block returned by malloc() + # Since free() can be called asynchronously by the GC, it could happen + # that it's called while self._lock is held: in that case, + # self._lock.acquire() would deadlock (issue #12352). To avoid that, a + # trylock is used instead, and if the lock can't be acquired + # immediately, the block is added to a list of blocks to be freed + # synchronously sometimes later from malloc() or free(), by calling + # _free_pending_blocks() (appending and retrieving from a list is not + # strictly thread-safe but under CPython it's atomic thanks to the GIL). + if os.getpid() != self._lastpid: + raise ValueError( + "My pid ({0:n}) is not last pid {1:n}".format( + os.getpid(),self._lastpid)) + if not self._lock.acquire(False): + # can't acquire the lock right now, add the block to the list of + # pending blocks to free + self._pending_free_blocks.append(block) + else: + # we hold the lock + try: + self._n_frees += 1 + self._free_pending_blocks() + self._add_free_block(block) + self._remove_allocated_block(block) + finally: + self._lock.release() + + def malloc(self, size): + # return a block of right size (possibly rounded up) + if size < 0: + raise ValueError("Size {0:n} out of range".format(size)) + if sys.maxsize <= size: + raise OverflowError("Size {0:n} too large".format(size)) + if os.getpid() != self._lastpid: + self.__init__() # reinitialize after fork + with self._lock: + self._n_mallocs += 1 + # allow pending blocks to be marked available + self._free_pending_blocks() + size = self._roundup(max(size, 1), self._alignment) + (arena, start, stop) = self._malloc(size) + real_stop = start + size + if real_stop < stop: + # if the returned block is larger than necessary, mark + # the remainder available + self._add_free_block((arena, real_stop, stop)) + self._allocated_blocks[arena].add((start, real_stop)) + return (arena, start, real_stop) + +# +# Class wrapping a block allocated out of a Heap -- can be inherited by child process +# + +class BufferWrapper(object): + + _heap = Heap() + + def __init__(self, size): + if size < 0: + raise ValueError("Size {0:n} out of range".format(size)) + if sys.maxsize <= size: + raise OverflowError("Size {0:n} too large".format(size)) + block = BufferWrapper._heap.malloc(size) + self._state = (block, size) + util.Finalize(self, BufferWrapper._heap.free, args=(block,)) + + def create_memoryview(self): + (arena, start, stop), size = self._state + return memoryview(arena.buffer)[start:start+size] diff --git a/llava/lib/python3.10/multiprocessing/pool.py b/llava/lib/python3.10/multiprocessing/pool.py new file mode 100644 index 0000000000000000000000000000000000000000..961d7e5991847af1ea8034a5a37d9159cf4614ae --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/pool.py @@ -0,0 +1,957 @@ +# +# Module providing the `Pool` class for managing a process pool +# +# multiprocessing/pool.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['Pool', 'ThreadPool'] + +# +# Imports +# + +import collections +import itertools +import os +import queue +import threading +import time +import traceback +import types +import warnings + +# If threading is available then ThreadPool should be provided. Therefore +# we avoid top-level imports which are liable to fail on some systems. +from . import util +from . import get_context, TimeoutError +from .connection import wait + +# +# Constants representing the state of a pool +# + +INIT = "INIT" +RUN = "RUN" +CLOSE = "CLOSE" +TERMINATE = "TERMINATE" + +# +# Miscellaneous +# + +job_counter = itertools.count() + +def mapstar(args): + return list(map(*args)) + +def starmapstar(args): + return list(itertools.starmap(args[0], args[1])) + +# +# Hack to embed stringification of remote traceback in local traceback +# + +class RemoteTraceback(Exception): + def __init__(self, tb): + self.tb = tb + def __str__(self): + return self.tb + +class ExceptionWithTraceback: + def __init__(self, exc, tb): + tb = traceback.format_exception(type(exc), exc, tb) + tb = ''.join(tb) + self.exc = exc + self.tb = '\n"""\n%s"""' % tb + def __reduce__(self): + return rebuild_exc, (self.exc, self.tb) + +def rebuild_exc(exc, tb): + exc.__cause__ = RemoteTraceback(tb) + return exc + +# +# Code run by worker processes +# + +class MaybeEncodingError(Exception): + """Wraps possible unpickleable errors, so they can be + safely sent through the socket.""" + + def __init__(self, exc, value): + self.exc = repr(exc) + self.value = repr(value) + super(MaybeEncodingError, self).__init__(self.exc, self.value) + + def __str__(self): + return "Error sending result: '%s'. Reason: '%s'" % (self.value, + self.exc) + + def __repr__(self): + return "<%s: %s>" % (self.__class__.__name__, self) + + +def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None, + wrap_exception=False): + if (maxtasks is not None) and not (isinstance(maxtasks, int) + and maxtasks >= 1): + raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks)) + put = outqueue.put + get = inqueue.get + if hasattr(inqueue, '_writer'): + inqueue._writer.close() + outqueue._reader.close() + + if initializer is not None: + initializer(*initargs) + + completed = 0 + while maxtasks is None or (maxtasks and completed < maxtasks): + try: + task = get() + except (EOFError, OSError): + util.debug('worker got EOFError or OSError -- exiting') + break + + if task is None: + util.debug('worker got sentinel -- exiting') + break + + job, i, func, args, kwds = task + try: + result = (True, func(*args, **kwds)) + except Exception as e: + if wrap_exception and func is not _helper_reraises_exception: + e = ExceptionWithTraceback(e, e.__traceback__) + result = (False, e) + try: + put((job, i, result)) + except Exception as e: + wrapped = MaybeEncodingError(e, result[1]) + util.debug("Possible encoding error while sending result: %s" % ( + wrapped)) + put((job, i, (False, wrapped))) + + task = job = result = func = args = kwds = None + completed += 1 + util.debug('worker exiting after %d tasks' % completed) + +def _helper_reraises_exception(ex): + 'Pickle-able helper function for use by _guarded_task_generation.' + raise ex + +# +# Class representing a process pool +# + +class _PoolCache(dict): + """ + Class that implements a cache for the Pool class that will notify + the pool management threads every time the cache is emptied. The + notification is done by the use of a queue that is provided when + instantiating the cache. + """ + def __init__(self, /, *args, notifier=None, **kwds): + self.notifier = notifier + super().__init__(*args, **kwds) + + def __delitem__(self, item): + super().__delitem__(item) + + # Notify that the cache is empty. This is important because the + # pool keeps maintaining workers until the cache gets drained. This + # eliminates a race condition in which a task is finished after the + # the pool's _handle_workers method has enter another iteration of the + # loop. In this situation, the only event that can wake up the pool + # is the cache to be emptied (no more tasks available). + if not self: + self.notifier.put(None) + +class Pool(object): + ''' + Class which supports an async version of applying functions to arguments. + ''' + _wrap_exception = True + + @staticmethod + def Process(ctx, *args, **kwds): + return ctx.Process(*args, **kwds) + + def __init__(self, processes=None, initializer=None, initargs=(), + maxtasksperchild=None, context=None): + # Attributes initialized early to make sure that they exist in + # __del__() if __init__() raises an exception + self._pool = [] + self._state = INIT + + self._ctx = context or get_context() + self._setup_queues() + self._taskqueue = queue.SimpleQueue() + # The _change_notifier queue exist to wake up self._handle_workers() + # when the cache (self._cache) is empty or when there is a change in + # the _state variable of the thread that runs _handle_workers. + self._change_notifier = self._ctx.SimpleQueue() + self._cache = _PoolCache(notifier=self._change_notifier) + self._maxtasksperchild = maxtasksperchild + self._initializer = initializer + self._initargs = initargs + + if processes is None: + processes = os.cpu_count() or 1 + if processes < 1: + raise ValueError("Number of processes must be at least 1") + if maxtasksperchild is not None: + if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0: + raise ValueError("maxtasksperchild must be a positive int or None") + + if initializer is not None and not callable(initializer): + raise TypeError('initializer must be a callable') + + self._processes = processes + try: + self._repopulate_pool() + except Exception: + for p in self._pool: + if p.exitcode is None: + p.terminate() + for p in self._pool: + p.join() + raise + + sentinels = self._get_sentinels() + + self._worker_handler = threading.Thread( + target=Pool._handle_workers, + args=(self._cache, self._taskqueue, self._ctx, self.Process, + self._processes, self._pool, self._inqueue, self._outqueue, + self._initializer, self._initargs, self._maxtasksperchild, + self._wrap_exception, sentinels, self._change_notifier) + ) + self._worker_handler.daemon = True + self._worker_handler._state = RUN + self._worker_handler.start() + + + self._task_handler = threading.Thread( + target=Pool._handle_tasks, + args=(self._taskqueue, self._quick_put, self._outqueue, + self._pool, self._cache) + ) + self._task_handler.daemon = True + self._task_handler._state = RUN + self._task_handler.start() + + self._result_handler = threading.Thread( + target=Pool._handle_results, + args=(self._outqueue, self._quick_get, self._cache) + ) + self._result_handler.daemon = True + self._result_handler._state = RUN + self._result_handler.start() + + self._terminate = util.Finalize( + self, self._terminate_pool, + args=(self._taskqueue, self._inqueue, self._outqueue, self._pool, + self._change_notifier, self._worker_handler, self._task_handler, + self._result_handler, self._cache), + exitpriority=15 + ) + self._state = RUN + + # Copy globals as function locals to make sure that they are available + # during Python shutdown when the Pool is destroyed. + def __del__(self, _warn=warnings.warn, RUN=RUN): + if self._state == RUN: + _warn(f"unclosed running multiprocessing pool {self!r}", + ResourceWarning, source=self) + if getattr(self, '_change_notifier', None) is not None: + self._change_notifier.put(None) + + def __repr__(self): + cls = self.__class__ + return (f'<{cls.__module__}.{cls.__qualname__} ' + f'state={self._state} ' + f'pool_size={len(self._pool)}>') + + def _get_sentinels(self): + task_queue_sentinels = [self._outqueue._reader] + self_notifier_sentinels = [self._change_notifier._reader] + return [*task_queue_sentinels, *self_notifier_sentinels] + + @staticmethod + def _get_worker_sentinels(workers): + return [worker.sentinel for worker in + workers if hasattr(worker, "sentinel")] + + @staticmethod + def _join_exited_workers(pool): + """Cleanup after any worker processes which have exited due to reaching + their specified lifetime. Returns True if any workers were cleaned up. + """ + cleaned = False + for i in reversed(range(len(pool))): + worker = pool[i] + if worker.exitcode is not None: + # worker exited + util.debug('cleaning up worker %d' % i) + worker.join() + cleaned = True + del pool[i] + return cleaned + + def _repopulate_pool(self): + return self._repopulate_pool_static(self._ctx, self.Process, + self._processes, + self._pool, self._inqueue, + self._outqueue, self._initializer, + self._initargs, + self._maxtasksperchild, + self._wrap_exception) + + @staticmethod + def _repopulate_pool_static(ctx, Process, processes, pool, inqueue, + outqueue, initializer, initargs, + maxtasksperchild, wrap_exception): + """Bring the number of pool processes up to the specified number, + for use after reaping workers which have exited. + """ + for i in range(processes - len(pool)): + w = Process(ctx, target=worker, + args=(inqueue, outqueue, + initializer, + initargs, maxtasksperchild, + wrap_exception)) + w.name = w.name.replace('Process', 'PoolWorker') + w.daemon = True + w.start() + pool.append(w) + util.debug('added worker') + + @staticmethod + def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue, + initializer, initargs, maxtasksperchild, + wrap_exception): + """Clean up any exited workers and start replacements for them. + """ + if Pool._join_exited_workers(pool): + Pool._repopulate_pool_static(ctx, Process, processes, pool, + inqueue, outqueue, initializer, + initargs, maxtasksperchild, + wrap_exception) + + def _setup_queues(self): + self._inqueue = self._ctx.SimpleQueue() + self._outqueue = self._ctx.SimpleQueue() + self._quick_put = self._inqueue._writer.send + self._quick_get = self._outqueue._reader.recv + + def _check_running(self): + if self._state != RUN: + raise ValueError("Pool not running") + + def apply(self, func, args=(), kwds={}): + ''' + Equivalent of `func(*args, **kwds)`. + Pool must be running. + ''' + return self.apply_async(func, args, kwds).get() + + def map(self, func, iterable, chunksize=None): + ''' + Apply `func` to each element in `iterable`, collecting the results + in a list that is returned. + ''' + return self._map_async(func, iterable, mapstar, chunksize).get() + + def starmap(self, func, iterable, chunksize=None): + ''' + Like `map()` method but the elements of the `iterable` are expected to + be iterables as well and will be unpacked as arguments. Hence + `func` and (a, b) becomes func(a, b). + ''' + return self._map_async(func, iterable, starmapstar, chunksize).get() + + def starmap_async(self, func, iterable, chunksize=None, callback=None, + error_callback=None): + ''' + Asynchronous version of `starmap()` method. + ''' + return self._map_async(func, iterable, starmapstar, chunksize, + callback, error_callback) + + def _guarded_task_generation(self, result_job, func, iterable): + '''Provides a generator of tasks for imap and imap_unordered with + appropriate handling for iterables which throw exceptions during + iteration.''' + try: + i = -1 + for i, x in enumerate(iterable): + yield (result_job, i, func, (x,), {}) + except Exception as e: + yield (result_job, i+1, _helper_reraises_exception, (e,), {}) + + def imap(self, func, iterable, chunksize=1): + ''' + Equivalent of `map()` -- can be MUCH slower than `Pool.map()`. + ''' + self._check_running() + if chunksize == 1: + result = IMapIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, func, iterable), + result._set_length + )) + return result + else: + if chunksize < 1: + raise ValueError( + "Chunksize must be 1+, not {0:n}".format( + chunksize)) + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = IMapIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapstar, + task_batches), + result._set_length + )) + return (item for chunk in result for item in chunk) + + def imap_unordered(self, func, iterable, chunksize=1): + ''' + Like `imap()` method but ordering of results is arbitrary. + ''' + self._check_running() + if chunksize == 1: + result = IMapUnorderedIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, func, iterable), + result._set_length + )) + return result + else: + if chunksize < 1: + raise ValueError( + "Chunksize must be 1+, not {0!r}".format(chunksize)) + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = IMapUnorderedIterator(self) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapstar, + task_batches), + result._set_length + )) + return (item for chunk in result for item in chunk) + + def apply_async(self, func, args=(), kwds={}, callback=None, + error_callback=None): + ''' + Asynchronous version of `apply()` method. + ''' + self._check_running() + result = ApplyResult(self, callback, error_callback) + self._taskqueue.put(([(result._job, 0, func, args, kwds)], None)) + return result + + def map_async(self, func, iterable, chunksize=None, callback=None, + error_callback=None): + ''' + Asynchronous version of `map()` method. + ''' + return self._map_async(func, iterable, mapstar, chunksize, callback, + error_callback) + + def _map_async(self, func, iterable, mapper, chunksize=None, callback=None, + error_callback=None): + ''' + Helper function to implement map, starmap and their async counterparts. + ''' + self._check_running() + if not hasattr(iterable, '__len__'): + iterable = list(iterable) + + if chunksize is None: + chunksize, extra = divmod(len(iterable), len(self._pool) * 4) + if extra: + chunksize += 1 + if len(iterable) == 0: + chunksize = 0 + + task_batches = Pool._get_tasks(func, iterable, chunksize) + result = MapResult(self, chunksize, len(iterable), callback, + error_callback=error_callback) + self._taskqueue.put( + ( + self._guarded_task_generation(result._job, + mapper, + task_batches), + None + ) + ) + return result + + @staticmethod + def _wait_for_updates(sentinels, change_notifier, timeout=None): + wait(sentinels, timeout=timeout) + while not change_notifier.empty(): + change_notifier.get() + + @classmethod + def _handle_workers(cls, cache, taskqueue, ctx, Process, processes, + pool, inqueue, outqueue, initializer, initargs, + maxtasksperchild, wrap_exception, sentinels, + change_notifier): + thread = threading.current_thread() + + # Keep maintaining workers until the cache gets drained, unless the pool + # is terminated. + while thread._state == RUN or (cache and thread._state != TERMINATE): + cls._maintain_pool(ctx, Process, processes, pool, inqueue, + outqueue, initializer, initargs, + maxtasksperchild, wrap_exception) + + current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels] + + cls._wait_for_updates(current_sentinels, change_notifier) + # send sentinel to stop workers + taskqueue.put(None) + util.debug('worker handler exiting') + + @staticmethod + def _handle_tasks(taskqueue, put, outqueue, pool, cache): + thread = threading.current_thread() + + for taskseq, set_length in iter(taskqueue.get, None): + task = None + try: + # iterating taskseq cannot fail + for task in taskseq: + if thread._state != RUN: + util.debug('task handler found thread._state != RUN') + break + try: + put(task) + except Exception as e: + job, idx = task[:2] + try: + cache[job]._set(idx, (False, e)) + except KeyError: + pass + else: + if set_length: + util.debug('doing set_length()') + idx = task[1] if task else -1 + set_length(idx + 1) + continue + break + finally: + task = taskseq = job = None + else: + util.debug('task handler got sentinel') + + try: + # tell result handler to finish when cache is empty + util.debug('task handler sending sentinel to result handler') + outqueue.put(None) + + # tell workers there is no more work + util.debug('task handler sending sentinel to workers') + for p in pool: + put(None) + except OSError: + util.debug('task handler got OSError when sending sentinels') + + util.debug('task handler exiting') + + @staticmethod + def _handle_results(outqueue, get, cache): + thread = threading.current_thread() + + while 1: + try: + task = get() + except (OSError, EOFError): + util.debug('result handler got EOFError/OSError -- exiting') + return + + if thread._state != RUN: + assert thread._state == TERMINATE, "Thread not in TERMINATE" + util.debug('result handler found thread._state=TERMINATE') + break + + if task is None: + util.debug('result handler got sentinel') + break + + job, i, obj = task + try: + cache[job]._set(i, obj) + except KeyError: + pass + task = job = obj = None + + while cache and thread._state != TERMINATE: + try: + task = get() + except (OSError, EOFError): + util.debug('result handler got EOFError/OSError -- exiting') + return + + if task is None: + util.debug('result handler ignoring extra sentinel') + continue + job, i, obj = task + try: + cache[job]._set(i, obj) + except KeyError: + pass + task = job = obj = None + + if hasattr(outqueue, '_reader'): + util.debug('ensuring that outqueue is not full') + # If we don't make room available in outqueue then + # attempts to add the sentinel (None) to outqueue may + # block. There is guaranteed to be no more than 2 sentinels. + try: + for i in range(10): + if not outqueue._reader.poll(): + break + get() + except (OSError, EOFError): + pass + + util.debug('result handler exiting: len(cache)=%s, thread._state=%s', + len(cache), thread._state) + + @staticmethod + def _get_tasks(func, it, size): + it = iter(it) + while 1: + x = tuple(itertools.islice(it, size)) + if not x: + return + yield (func, x) + + def __reduce__(self): + raise NotImplementedError( + 'pool objects cannot be passed between processes or pickled' + ) + + def close(self): + util.debug('closing pool') + if self._state == RUN: + self._state = CLOSE + self._worker_handler._state = CLOSE + self._change_notifier.put(None) + + def terminate(self): + util.debug('terminating pool') + self._state = TERMINATE + self._terminate() + + def join(self): + util.debug('joining pool') + if self._state == RUN: + raise ValueError("Pool is still running") + elif self._state not in (CLOSE, TERMINATE): + raise ValueError("In unknown state") + self._worker_handler.join() + self._task_handler.join() + self._result_handler.join() + for p in self._pool: + p.join() + + @staticmethod + def _help_stuff_finish(inqueue, task_handler, size): + # task_handler may be blocked trying to put items on inqueue + util.debug('removing tasks from inqueue until task handler finished') + inqueue._rlock.acquire() + while task_handler.is_alive() and inqueue._reader.poll(): + inqueue._reader.recv() + time.sleep(0) + + @classmethod + def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier, + worker_handler, task_handler, result_handler, cache): + # this is guaranteed to only be called once + util.debug('finalizing pool') + + # Notify that the worker_handler state has been changed so the + # _handle_workers loop can be unblocked (and exited) in order to + # send the finalization sentinel all the workers. + worker_handler._state = TERMINATE + change_notifier.put(None) + + task_handler._state = TERMINATE + + util.debug('helping task handler/workers to finish') + cls._help_stuff_finish(inqueue, task_handler, len(pool)) + + if (not result_handler.is_alive()) and (len(cache) != 0): + raise AssertionError( + "Cannot have cache with result_hander not alive") + + result_handler._state = TERMINATE + change_notifier.put(None) + outqueue.put(None) # sentinel + + # We must wait for the worker handler to exit before terminating + # workers because we don't want workers to be restarted behind our back. + util.debug('joining worker handler') + if threading.current_thread() is not worker_handler: + worker_handler.join() + + # Terminate workers which haven't already finished. + if pool and hasattr(pool[0], 'terminate'): + util.debug('terminating workers') + for p in pool: + if p.exitcode is None: + p.terminate() + + util.debug('joining task handler') + if threading.current_thread() is not task_handler: + task_handler.join() + + util.debug('joining result handler') + if threading.current_thread() is not result_handler: + result_handler.join() + + if pool and hasattr(pool[0], 'terminate'): + util.debug('joining pool workers') + for p in pool: + if p.is_alive(): + # worker has not yet exited + util.debug('cleaning up worker %d' % p.pid) + p.join() + + def __enter__(self): + self._check_running() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + self.terminate() + +# +# Class whose instances are returned by `Pool.apply_async()` +# + +class ApplyResult(object): + + def __init__(self, pool, callback, error_callback): + self._pool = pool + self._event = threading.Event() + self._job = next(job_counter) + self._cache = pool._cache + self._callback = callback + self._error_callback = error_callback + self._cache[self._job] = self + + def ready(self): + return self._event.is_set() + + def successful(self): + if not self.ready(): + raise ValueError("{0!r} not ready".format(self)) + return self._success + + def wait(self, timeout=None): + self._event.wait(timeout) + + def get(self, timeout=None): + self.wait(timeout) + if not self.ready(): + raise TimeoutError + if self._success: + return self._value + else: + raise self._value + + def _set(self, i, obj): + self._success, self._value = obj + if self._callback and self._success: + self._callback(self._value) + if self._error_callback and not self._success: + self._error_callback(self._value) + self._event.set() + del self._cache[self._job] + self._pool = None + + __class_getitem__ = classmethod(types.GenericAlias) + +AsyncResult = ApplyResult # create alias -- see #17805 + +# +# Class whose instances are returned by `Pool.map_async()` +# + +class MapResult(ApplyResult): + + def __init__(self, pool, chunksize, length, callback, error_callback): + ApplyResult.__init__(self, pool, callback, + error_callback=error_callback) + self._success = True + self._value = [None] * length + self._chunksize = chunksize + if chunksize <= 0: + self._number_left = 0 + self._event.set() + del self._cache[self._job] + else: + self._number_left = length//chunksize + bool(length % chunksize) + + def _set(self, i, success_result): + self._number_left -= 1 + success, result = success_result + if success and self._success: + self._value[i*self._chunksize:(i+1)*self._chunksize] = result + if self._number_left == 0: + if self._callback: + self._callback(self._value) + del self._cache[self._job] + self._event.set() + self._pool = None + else: + if not success and self._success: + # only store first exception + self._success = False + self._value = result + if self._number_left == 0: + # only consider the result ready once all jobs are done + if self._error_callback: + self._error_callback(self._value) + del self._cache[self._job] + self._event.set() + self._pool = None + +# +# Class whose instances are returned by `Pool.imap()` +# + +class IMapIterator(object): + + def __init__(self, pool): + self._pool = pool + self._cond = threading.Condition(threading.Lock()) + self._job = next(job_counter) + self._cache = pool._cache + self._items = collections.deque() + self._index = 0 + self._length = None + self._unsorted = {} + self._cache[self._job] = self + + def __iter__(self): + return self + + def next(self, timeout=None): + with self._cond: + try: + item = self._items.popleft() + except IndexError: + if self._index == self._length: + self._pool = None + raise StopIteration from None + self._cond.wait(timeout) + try: + item = self._items.popleft() + except IndexError: + if self._index == self._length: + self._pool = None + raise StopIteration from None + raise TimeoutError from None + + success, value = item + if success: + return value + raise value + + __next__ = next # XXX + + def _set(self, i, obj): + with self._cond: + if self._index == i: + self._items.append(obj) + self._index += 1 + while self._index in self._unsorted: + obj = self._unsorted.pop(self._index) + self._items.append(obj) + self._index += 1 + self._cond.notify() + else: + self._unsorted[i] = obj + + if self._index == self._length: + del self._cache[self._job] + self._pool = None + + def _set_length(self, length): + with self._cond: + self._length = length + if self._index == self._length: + self._cond.notify() + del self._cache[self._job] + self._pool = None + +# +# Class whose instances are returned by `Pool.imap_unordered()` +# + +class IMapUnorderedIterator(IMapIterator): + + def _set(self, i, obj): + with self._cond: + self._items.append(obj) + self._index += 1 + self._cond.notify() + if self._index == self._length: + del self._cache[self._job] + self._pool = None + +# +# +# + +class ThreadPool(Pool): + _wrap_exception = False + + @staticmethod + def Process(ctx, *args, **kwds): + from .dummy import Process + return Process(*args, **kwds) + + def __init__(self, processes=None, initializer=None, initargs=()): + Pool.__init__(self, processes, initializer, initargs) + + def _setup_queues(self): + self._inqueue = queue.SimpleQueue() + self._outqueue = queue.SimpleQueue() + self._quick_put = self._inqueue.put + self._quick_get = self._outqueue.get + + def _get_sentinels(self): + return [self._change_notifier._reader] + + @staticmethod + def _get_worker_sentinels(workers): + return [] + + @staticmethod + def _help_stuff_finish(inqueue, task_handler, size): + # drain inqueue, and put sentinels at its head to make workers finish + try: + while True: + inqueue.get(block=False) + except queue.Empty: + pass + for i in range(size): + inqueue.put(None) + + def _wait_for_updates(self, sentinels, change_notifier, timeout): + time.sleep(timeout) diff --git a/llava/lib/python3.10/multiprocessing/popen_fork.py b/llava/lib/python3.10/multiprocessing/popen_fork.py new file mode 100644 index 0000000000000000000000000000000000000000..625981cf47627cab9d03cae4d3c6d8224a9375cc --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/popen_fork.py @@ -0,0 +1,83 @@ +import os +import signal + +from . import util + +__all__ = ['Popen'] + +# +# Start child process using fork +# + +class Popen(object): + method = 'fork' + + def __init__(self, process_obj): + util._flush_std_streams() + self.returncode = None + self.finalizer = None + self._launch(process_obj) + + def duplicate_for_child(self, fd): + return fd + + def poll(self, flag=os.WNOHANG): + if self.returncode is None: + try: + pid, sts = os.waitpid(self.pid, flag) + except OSError: + # Child process not yet created. See #1731717 + # e.errno == errno.ECHILD == 10 + return None + if pid == self.pid: + self.returncode = os.waitstatus_to_exitcode(sts) + return self.returncode + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is not None: + from multiprocessing.connection import wait + if not wait([self.sentinel], timeout): + return None + # This shouldn't block if wait() returned successfully. + return self.poll(os.WNOHANG if timeout == 0.0 else 0) + return self.returncode + + def _send_signal(self, sig): + if self.returncode is None: + try: + os.kill(self.pid, sig) + except ProcessLookupError: + pass + except OSError: + if self.wait(timeout=0.1) is None: + raise + + def terminate(self): + self._send_signal(signal.SIGTERM) + + def kill(self): + self._send_signal(signal.SIGKILL) + + def _launch(self, process_obj): + code = 1 + parent_r, child_w = os.pipe() + child_r, parent_w = os.pipe() + self.pid = os.fork() + if self.pid == 0: + try: + os.close(parent_r) + os.close(parent_w) + code = process_obj._bootstrap(parent_sentinel=child_r) + finally: + os._exit(code) + else: + os.close(child_w) + os.close(child_r) + self.finalizer = util.Finalize(self, util.close_fds, + (parent_r, parent_w,)) + self.sentinel = parent_r + + def close(self): + if self.finalizer is not None: + self.finalizer() diff --git a/llava/lib/python3.10/multiprocessing/popen_spawn_win32.py b/llava/lib/python3.10/multiprocessing/popen_spawn_win32.py new file mode 100644 index 0000000000000000000000000000000000000000..9c4098d0fa4f1e6e3ec94ecc8e596dd3857d741f --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/popen_spawn_win32.py @@ -0,0 +1,131 @@ +import os +import msvcrt +import signal +import sys +import _winapi + +from .context import reduction, get_spawning_popen, set_spawning_popen +from . import spawn +from . import util + +__all__ = ['Popen'] + +# +# +# + +TERMINATE = 0x10000 +WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False)) +WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + + +def _path_eq(p1, p2): + return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2) + +WINENV = not _path_eq(sys.executable, sys._base_executable) + + +def _close_handles(*handles): + for handle in handles: + _winapi.CloseHandle(handle) + + +# +# We define a Popen class similar to the one from subprocess, but +# whose constructor takes a process object as its argument. +# + +class Popen(object): + ''' + Start a subprocess to run the code of a process object + ''' + method = 'spawn' + + def __init__(self, process_obj): + prep_data = spawn.get_preparation_data(process_obj._name) + + # read end of pipe will be duplicated by the child process + # -- see spawn_main() in spawn.py. + # + # bpo-33929: Previously, the read end of pipe was "stolen" by the child + # process, but it leaked a handle if the child process had been + # terminated before it could steal the handle from the parent process. + rhandle, whandle = _winapi.CreatePipe(None, 0) + wfd = msvcrt.open_osfhandle(whandle, 0) + cmd = spawn.get_command_line(parent_pid=os.getpid(), + pipe_handle=rhandle) + cmd = ' '.join('"%s"' % x for x in cmd) + + python_exe = spawn.get_executable() + + # bpo-35797: When running in a venv, we bypass the redirect + # executor and launch our base Python. + if WINENV and _path_eq(python_exe, sys.executable): + python_exe = sys._base_executable + env = os.environ.copy() + env["__PYVENV_LAUNCHER__"] = sys.executable + else: + env = None + + with open(wfd, 'wb', closefd=True) as to_child: + # start process + try: + hp, ht, pid, tid = _winapi.CreateProcess( + python_exe, cmd, + None, None, False, 0, env, None, None) + _winapi.CloseHandle(ht) + except: + _winapi.CloseHandle(rhandle) + raise + + # set attributes of self + self.pid = pid + self.returncode = None + self._handle = hp + self.sentinel = int(hp) + self.finalizer = util.Finalize(self, _close_handles, + (self.sentinel, int(rhandle))) + + # send information to child + set_spawning_popen(self) + try: + reduction.dump(prep_data, to_child) + reduction.dump(process_obj, to_child) + finally: + set_spawning_popen(None) + + def duplicate_for_child(self, handle): + assert self is get_spawning_popen() + return reduction.duplicate(handle, self.sentinel) + + def wait(self, timeout=None): + if self.returncode is None: + if timeout is None: + msecs = _winapi.INFINITE + else: + msecs = max(0, int(timeout * 1000 + 0.5)) + + res = _winapi.WaitForSingleObject(int(self._handle), msecs) + if res == _winapi.WAIT_OBJECT_0: + code = _winapi.GetExitCodeProcess(self._handle) + if code == TERMINATE: + code = -signal.SIGTERM + self.returncode = code + + return self.returncode + + def poll(self): + return self.wait(timeout=0) + + def terminate(self): + if self.returncode is None: + try: + _winapi.TerminateProcess(int(self._handle), TERMINATE) + except OSError: + if self.wait(timeout=1.0) is None: + raise + + kill = terminate + + def close(self): + self.finalizer() diff --git a/llava/lib/python3.10/multiprocessing/process.py b/llava/lib/python3.10/multiprocessing/process.py new file mode 100644 index 0000000000000000000000000000000000000000..2d5372e6d947c849eedef02b7885acfd25930e12 --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/process.py @@ -0,0 +1,438 @@ +# +# Module providing the `Process` class which emulates `threading.Thread` +# +# multiprocessing/process.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['BaseProcess', 'current_process', 'active_children', + 'parent_process'] + +# +# Imports +# + +import os +import sys +import signal +import itertools +import threading +from _weakrefset import WeakSet + +# +# +# + +try: + ORIGINAL_DIR = os.path.abspath(os.getcwd()) +except OSError: + ORIGINAL_DIR = None + +# +# Public functions +# + +def current_process(): + ''' + Return process object representing the current process + ''' + return _current_process + +def active_children(): + ''' + Return list of process objects corresponding to live child processes + ''' + _cleanup() + return list(_children) + + +def parent_process(): + ''' + Return process object representing the parent process + ''' + return _parent_process + +# +# +# + +def _cleanup(): + # check for processes which have finished + for p in list(_children): + if p._popen.poll() is not None: + _children.discard(p) + +# +# The `Process` class +# + +class BaseProcess(object): + ''' + Process objects represent activity that is run in a separate process + + The class is analogous to `threading.Thread` + ''' + def _Popen(self): + raise NotImplementedError + + def __init__(self, group=None, target=None, name=None, args=(), kwargs={}, + *, daemon=None): + assert group is None, 'group argument must be None for now' + count = next(_process_counter) + self._identity = _current_process._identity + (count,) + self._config = _current_process._config.copy() + self._parent_pid = os.getpid() + self._parent_name = _current_process.name + self._popen = None + self._closed = False + self._target = target + self._args = tuple(args) + self._kwargs = dict(kwargs) + self._name = name or type(self).__name__ + '-' + \ + ':'.join(str(i) for i in self._identity) + if daemon is not None: + self.daemon = daemon + _dangling.add(self) + + def _check_closed(self): + if self._closed: + raise ValueError("process object is closed") + + def run(self): + ''' + Method to be run in sub-process; can be overridden in sub-class + ''' + if self._target: + self._target(*self._args, **self._kwargs) + + def start(self): + ''' + Start child process + ''' + self._check_closed() + assert self._popen is None, 'cannot start a process twice' + assert self._parent_pid == os.getpid(), \ + 'can only start a process object created by current process' + assert not _current_process._config.get('daemon'), \ + 'daemonic processes are not allowed to have children' + _cleanup() + self._popen = self._Popen(self) + self._sentinel = self._popen.sentinel + # Avoid a refcycle if the target function holds an indirect + # reference to the process object (see bpo-30775) + del self._target, self._args, self._kwargs + _children.add(self) + + def terminate(self): + ''' + Terminate process; sends SIGTERM signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.terminate() + + def kill(self): + ''' + Terminate process; sends SIGKILL signal or uses TerminateProcess() + ''' + self._check_closed() + self._popen.kill() + + def join(self, timeout=None): + ''' + Wait until child process terminates + ''' + self._check_closed() + assert self._parent_pid == os.getpid(), 'can only join a child process' + assert self._popen is not None, 'can only join a started process' + res = self._popen.wait(timeout) + if res is not None: + _children.discard(self) + + def is_alive(self): + ''' + Return whether process is alive + ''' + self._check_closed() + if self is _current_process: + return True + assert self._parent_pid == os.getpid(), 'can only test a child process' + + if self._popen is None: + return False + + returncode = self._popen.poll() + if returncode is None: + return True + else: + _children.discard(self) + return False + + def close(self): + ''' + Close the Process object. + + This method releases resources held by the Process object. It is + an error to call this method if the child process is still running. + ''' + if self._popen is not None: + if self._popen.poll() is None: + raise ValueError("Cannot close a process while it is still running. " + "You should first call join() or terminate().") + self._popen.close() + self._popen = None + del self._sentinel + _children.discard(self) + self._closed = True + + @property + def name(self): + return self._name + + @name.setter + def name(self, name): + assert isinstance(name, str), 'name must be a string' + self._name = name + + @property + def daemon(self): + ''' + Return whether process is a daemon + ''' + return self._config.get('daemon', False) + + @daemon.setter + def daemon(self, daemonic): + ''' + Set whether process is a daemon + ''' + assert self._popen is None, 'process has already started' + self._config['daemon'] = daemonic + + @property + def authkey(self): + return self._config['authkey'] + + @authkey.setter + def authkey(self, authkey): + ''' + Set authorization key of process + ''' + self._config['authkey'] = AuthenticationString(authkey) + + @property + def exitcode(self): + ''' + Return exit code of process or `None` if it has yet to stop + ''' + self._check_closed() + if self._popen is None: + return self._popen + return self._popen.poll() + + @property + def ident(self): + ''' + Return identifier (PID) of process or `None` if it has yet to start + ''' + self._check_closed() + if self is _current_process: + return os.getpid() + else: + return self._popen and self._popen.pid + + pid = ident + + @property + def sentinel(self): + ''' + Return a file descriptor (Unix) or handle (Windows) suitable for + waiting for process termination. + ''' + self._check_closed() + try: + return self._sentinel + except AttributeError: + raise ValueError("process not started") from None + + def __repr__(self): + exitcode = None + if self is _current_process: + status = 'started' + elif self._closed: + status = 'closed' + elif self._parent_pid != os.getpid(): + status = 'unknown' + elif self._popen is None: + status = 'initial' + else: + exitcode = self._popen.poll() + if exitcode is not None: + status = 'stopped' + else: + status = 'started' + + info = [type(self).__name__, 'name=%r' % self._name] + if self._popen is not None: + info.append('pid=%s' % self._popen.pid) + info.append('parent=%s' % self._parent_pid) + info.append(status) + if exitcode is not None: + exitcode = _exitcode_to_name.get(exitcode, exitcode) + info.append('exitcode=%s' % exitcode) + if self.daemon: + info.append('daemon') + return '<%s>' % ' '.join(info) + + ## + + def _bootstrap(self, parent_sentinel=None): + from . import util, context + global _current_process, _parent_process, _process_counter, _children + + try: + if self._start_method is not None: + context._force_start_method(self._start_method) + _process_counter = itertools.count(1) + _children = set() + util._close_stdin() + old_process = _current_process + _current_process = self + _parent_process = _ParentProcess( + self._parent_name, self._parent_pid, parent_sentinel) + if threading._HAVE_THREAD_NATIVE_ID: + threading.main_thread()._set_native_id() + try: + self._after_fork() + finally: + # delay finalization of the old process object until after + # _run_after_forkers() is executed + del old_process + util.info('child process calling self.run()') + try: + self.run() + exitcode = 0 + finally: + util._exit_function() + except SystemExit as e: + if e.code is None: + exitcode = 0 + elif isinstance(e.code, int): + exitcode = e.code + else: + sys.stderr.write(str(e.code) + '\n') + exitcode = 1 + except: + exitcode = 1 + import traceback + sys.stderr.write('Process %s:\n' % self.name) + traceback.print_exc() + finally: + threading._shutdown() + util.info('process exiting with exitcode %d' % exitcode) + util._flush_std_streams() + + return exitcode + + @staticmethod + def _after_fork(): + from . import util + util._finalizer_registry.clear() + util._run_after_forkers() + + +# +# We subclass bytes to avoid accidental transmission of auth keys over network +# + +class AuthenticationString(bytes): + def __reduce__(self): + from .context import get_spawning_popen + if get_spawning_popen() is None: + raise TypeError( + 'Pickling an AuthenticationString object is ' + 'disallowed for security reasons' + ) + return AuthenticationString, (bytes(self),) + + +# +# Create object representing the parent process +# + +class _ParentProcess(BaseProcess): + + def __init__(self, name, pid, sentinel): + self._identity = () + self._name = name + self._pid = pid + self._parent_pid = None + self._popen = None + self._closed = False + self._sentinel = sentinel + self._config = {} + + def is_alive(self): + from multiprocessing.connection import wait + return not wait([self._sentinel], timeout=0) + + @property + def ident(self): + return self._pid + + def join(self, timeout=None): + ''' + Wait until parent process terminates + ''' + from multiprocessing.connection import wait + wait([self._sentinel], timeout=timeout) + + pid = ident + +# +# Create object representing the main process +# + +class _MainProcess(BaseProcess): + + def __init__(self): + self._identity = () + self._name = 'MainProcess' + self._parent_pid = None + self._popen = None + self._closed = False + self._config = {'authkey': AuthenticationString(os.urandom(32)), + 'semprefix': '/mp'} + # Note that some versions of FreeBSD only allow named + # semaphores to have names of up to 14 characters. Therefore + # we choose a short prefix. + # + # On MacOSX in a sandbox it may be necessary to use a + # different prefix -- see #19478. + # + # Everything in self._config will be inherited by descendant + # processes. + + def close(self): + pass + + +_parent_process = None +_current_process = _MainProcess() +_process_counter = itertools.count(1) +_children = set() +del _MainProcess + +# +# Give names to some return codes +# + +_exitcode_to_name = {} + +for name, signum in list(signal.__dict__.items()): + if name[:3]=='SIG' and '_' not in name: + _exitcode_to_name[-signum] = f'-{name}' + +# For debug and leak testing +_dangling = WeakSet() diff --git a/llava/lib/python3.10/multiprocessing/queues.py b/llava/lib/python3.10/multiprocessing/queues.py new file mode 100644 index 0000000000000000000000000000000000000000..f37f114a968871ba367ac33d5f0b109da8869834 --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/queues.py @@ -0,0 +1,379 @@ +# +# Module implementing queues +# +# multiprocessing/queues.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue'] + +import sys +import os +import threading +import collections +import time +import types +import weakref +import errno + +from queue import Empty, Full + +import _multiprocessing + +from . import connection +from . import context +_ForkingPickler = context.reduction.ForkingPickler + +from .util import debug, info, Finalize, register_after_fork, is_exiting + +# +# Queue type using a pipe, buffer and thread +# + +class Queue(object): + + def __init__(self, maxsize=0, *, ctx): + if maxsize <= 0: + # Can raise ImportError (see issues #3770 and #23400) + from .synchronize import SEM_VALUE_MAX as maxsize + self._maxsize = maxsize + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._opid = os.getpid() + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + self._sem = ctx.BoundedSemaphore(maxsize) + # For use by concurrent.futures + self._ignore_epipe = False + self._reset() + + if sys.platform != 'win32': + register_after_fork(self, Queue._after_fork) + + def __getstate__(self): + context.assert_spawning(self) + return (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) + + def __setstate__(self, state): + (self._ignore_epipe, self._maxsize, self._reader, self._writer, + self._rlock, self._wlock, self._sem, self._opid) = state + self._reset() + + def _after_fork(self): + debug('Queue._after_fork()') + self._reset(after_fork=True) + + def _reset(self, after_fork=False): + if after_fork: + self._notempty._at_fork_reinit() + else: + self._notempty = threading.Condition(threading.Lock()) + self._buffer = collections.deque() + self._thread = None + self._jointhread = None + self._joincancelled = False + self._closed = False + self._close = None + self._send_bytes = self._writer.send_bytes + self._recv_bytes = self._reader.recv_bytes + self._poll = self._reader.poll + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._notempty.notify() + + def get(self, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if block and timeout is None: + with self._rlock: + res = self._recv_bytes() + self._sem.release() + else: + if block: + deadline = time.monotonic() + timeout + if not self._rlock.acquire(block, timeout): + raise Empty + try: + if block: + timeout = deadline - time.monotonic() + if not self._poll(timeout): + raise Empty + elif not self._poll(): + raise Empty + res = self._recv_bytes() + self._sem.release() + finally: + self._rlock.release() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def qsize(self): + # Raises NotImplementedError on Mac OSX because of broken sem_getvalue() + return self._maxsize - self._sem._semlock._get_value() + + def empty(self): + return not self._poll() + + def full(self): + return self._sem._semlock._is_zero() + + def get_nowait(self): + return self.get(False) + + def put_nowait(self, obj): + return self.put(obj, False) + + def close(self): + self._closed = True + close = self._close + if close: + self._close = None + close() + + def join_thread(self): + debug('Queue.join_thread()') + assert self._closed, "Queue {0!r} not closed".format(self) + if self._jointhread: + self._jointhread() + + def cancel_join_thread(self): + debug('Queue.cancel_join_thread()') + self._joincancelled = True + try: + self._jointhread.cancel() + except AttributeError: + pass + + def _start_thread(self): + debug('Queue._start_thread()') + + # Start thread which transfers data from buffer to pipe + self._buffer.clear() + self._thread = threading.Thread( + target=Queue._feed, + args=(self._buffer, self._notempty, self._send_bytes, + self._wlock, self._reader.close, self._writer.close, + self._ignore_epipe, self._on_queue_feeder_error, + self._sem), + name='QueueFeederThread' + ) + self._thread.daemon = True + + debug('doing self._thread.start()') + self._thread.start() + debug('... done self._thread.start()') + + if not self._joincancelled: + self._jointhread = Finalize( + self._thread, Queue._finalize_join, + [weakref.ref(self._thread)], + exitpriority=-5 + ) + + # Send sentinel to the thread queue object when garbage collected + self._close = Finalize( + self, Queue._finalize_close, + [self._buffer, self._notempty], + exitpriority=10 + ) + + @staticmethod + def _finalize_join(twr): + debug('joining queue thread') + thread = twr() + if thread is not None: + thread.join() + debug('... queue thread joined') + else: + debug('... queue thread already dead') + + @staticmethod + def _finalize_close(buffer, notempty): + debug('telling queue thread to quit') + with notempty: + buffer.append(_sentinel) + notempty.notify() + + @staticmethod + def _feed(buffer, notempty, send_bytes, writelock, reader_close, + writer_close, ignore_epipe, onerror, queue_sem): + debug('starting thread to feed data to pipe') + nacquire = notempty.acquire + nrelease = notempty.release + nwait = notempty.wait + bpopleft = buffer.popleft + sentinel = _sentinel + if sys.platform != 'win32': + wacquire = writelock.acquire + wrelease = writelock.release + else: + wacquire = None + + while 1: + try: + nacquire() + try: + if not buffer: + nwait() + finally: + nrelease() + try: + while 1: + obj = bpopleft() + if obj is sentinel: + debug('feeder thread got sentinel -- exiting') + reader_close() + writer_close() + return + + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if wacquire is None: + send_bytes(obj) + else: + wacquire() + try: + send_bytes(obj) + finally: + wrelease() + except IndexError: + pass + except Exception as e: + if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE: + return + # Since this runs in a daemon thread the resources it uses + # may be become unusable while the process is cleaning up. + # We ignore errors which happen after the process has + # started to cleanup. + if is_exiting(): + info('error in queue thread: %s', e) + return + else: + # Since the object has not been sent in the queue, we need + # to decrease the size of the queue. The error acts as + # if the object had been silently removed from the queue + # and this step is necessary to have a properly working + # queue. + queue_sem.release() + onerror(e, obj) + + @staticmethod + def _on_queue_feeder_error(e, obj): + """ + Private API hook called when feeding data in the background thread + raises an exception. For overriding by concurrent.futures. + """ + import traceback + traceback.print_exc() + + +_sentinel = object() + +# +# A queue type which also supports join() and task_done() methods +# +# Note that if you do not call task_done() for each finished task then +# eventually the counter's semaphore may overflow causing Bad Things +# to happen. +# + +class JoinableQueue(Queue): + + def __init__(self, maxsize=0, *, ctx): + Queue.__init__(self, maxsize, ctx=ctx) + self._unfinished_tasks = ctx.Semaphore(0) + self._cond = ctx.Condition() + + def __getstate__(self): + return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks) + + def __setstate__(self, state): + Queue.__setstate__(self, state[:-2]) + self._cond, self._unfinished_tasks = state[-2:] + + def put(self, obj, block=True, timeout=None): + if self._closed: + raise ValueError(f"Queue {self!r} is closed") + if not self._sem.acquire(block, timeout): + raise Full + + with self._notempty, self._cond: + if self._thread is None: + self._start_thread() + self._buffer.append(obj) + self._unfinished_tasks.release() + self._notempty.notify() + + def task_done(self): + with self._cond: + if not self._unfinished_tasks.acquire(False): + raise ValueError('task_done() called too many times') + if self._unfinished_tasks._semlock._is_zero(): + self._cond.notify_all() + + def join(self): + with self._cond: + if not self._unfinished_tasks._semlock._is_zero(): + self._cond.wait() + +# +# Simplified Queue type -- really just a locked pipe +# + +class SimpleQueue(object): + + def __init__(self, *, ctx): + self._reader, self._writer = connection.Pipe(duplex=False) + self._rlock = ctx.Lock() + self._poll = self._reader.poll + if sys.platform == 'win32': + self._wlock = None + else: + self._wlock = ctx.Lock() + + def close(self): + self._reader.close() + self._writer.close() + + def empty(self): + return not self._poll() + + def __getstate__(self): + context.assert_spawning(self) + return (self._reader, self._writer, self._rlock, self._wlock) + + def __setstate__(self, state): + (self._reader, self._writer, self._rlock, self._wlock) = state + self._poll = self._reader.poll + + def get(self): + with self._rlock: + res = self._reader.recv_bytes() + # unserialize the data after having released the lock + return _ForkingPickler.loads(res) + + def put(self, obj): + # serialize the data before acquiring the lock + obj = _ForkingPickler.dumps(obj) + if self._wlock is None: + # writes to a message oriented win32 pipe are atomic + self._writer.send_bytes(obj) + else: + with self._wlock: + self._writer.send_bytes(obj) + + __class_getitem__ = classmethod(types.GenericAlias) diff --git a/llava/lib/python3.10/multiprocessing/resource_tracker.py b/llava/lib/python3.10/multiprocessing/resource_tracker.py new file mode 100644 index 0000000000000000000000000000000000000000..ea369507297f86ca97ff289712da340a3b51fdf3 --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/resource_tracker.py @@ -0,0 +1,239 @@ +############################################################################### +# Server process to keep track of unlinked resources (like shared memory +# segments, semaphores etc.) and clean them. +# +# On Unix we run a server process which keeps track of unlinked +# resources. The server ignores SIGINT and SIGTERM and reads from a +# pipe. Every other process of the program has a copy of the writable +# end of the pipe, so we get EOF when all other processes have exited. +# Then the server process unlinks any remaining resource names. +# +# This is important because there may be system limits for such resources: for +# instance, the system only supports a limited number of named semaphores, and +# shared-memory segments live in the RAM. If a python process leaks such a +# resource, this resource will not be removed till the next reboot. Without +# this resource tracker process, "killall python" would probably leave unlinked +# resources. + +import os +import signal +import sys +import threading +import warnings + +from . import spawn +from . import util + +__all__ = ['ensure_running', 'register', 'unregister'] + +_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask') +_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM) + +_CLEANUP_FUNCS = { + 'noop': lambda: None, +} + +if os.name == 'posix': + import _multiprocessing + import _posixshmem + + # Use sem_unlink() to clean up named semaphores. + # + # sem_unlink() may be missing if the Python build process detected the + # absence of POSIX named semaphores. In that case, no named semaphores were + # ever opened, so no cleanup would be necessary. + if hasattr(_multiprocessing, 'sem_unlink'): + _CLEANUP_FUNCS.update({ + 'semaphore': _multiprocessing.sem_unlink, + }) + _CLEANUP_FUNCS.update({ + 'shared_memory': _posixshmem.shm_unlink, + }) + + +class ResourceTracker(object): + + def __init__(self): + self._lock = threading.Lock() + self._fd = None + self._pid = None + + def _stop(self): + with self._lock: + if self._fd is None: + # not running + return + + # closing the "alive" file descriptor stops main() + os.close(self._fd) + self._fd = None + + os.waitpid(self._pid, 0) + self._pid = None + + def getfd(self): + self.ensure_running() + return self._fd + + def ensure_running(self): + '''Make sure that resource tracker process is running. + + This can be run from any process. Usually a child process will use + the resource created by its parent.''' + with self._lock: + if self._fd is not None: + # resource tracker was launched before, is it still running? + if self._check_alive(): + # => still alive + return + # => dead, launch it again + os.close(self._fd) + + # Clean-up to avoid dangling processes. + try: + # _pid can be None if this process is a child from another + # python process, which has started the resource_tracker. + if self._pid is not None: + os.waitpid(self._pid, 0) + except ChildProcessError: + # The resource_tracker has already been terminated. + pass + self._fd = None + self._pid = None + + warnings.warn('resource_tracker: process died unexpectedly, ' + 'relaunching. Some resources might leak.') + + fds_to_pass = [] + try: + fds_to_pass.append(sys.stderr.fileno()) + except Exception: + pass + cmd = 'from multiprocessing.resource_tracker import main;main(%d)' + r, w = os.pipe() + try: + fds_to_pass.append(r) + # process will out live us, so no need to wait on pid + exe = spawn.get_executable() + args = [exe] + util._args_from_interpreter_flags() + args += ['-c', cmd % r] + # bpo-33613: Register a signal mask that will block the signals. + # This signal mask will be inherited by the child that is going + # to be spawned and will protect the child from a race condition + # that can make the child die before it registers signal handlers + # for SIGINT and SIGTERM. The mask is unregistered after spawning + # the child. + try: + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS) + pid = util.spawnv_passfds(exe, args, fds_to_pass) + finally: + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + except: + os.close(w) + raise + else: + self._fd = w + self._pid = pid + finally: + os.close(r) + + def _check_alive(self): + '''Check that the pipe has not been closed by sending a probe.''' + try: + # We cannot use send here as it calls ensure_running, creating + # a cycle. + os.write(self._fd, b'PROBE:0:noop\n') + except OSError: + return False + else: + return True + + def register(self, name, rtype): + '''Register name of resource with resource tracker.''' + self._send('REGISTER', name, rtype) + + def unregister(self, name, rtype): + '''Unregister name of resource with resource tracker.''' + self._send('UNREGISTER', name, rtype) + + def _send(self, cmd, name, rtype): + self.ensure_running() + msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii') + if len(msg) > 512: + # posix guarantees that writes to a pipe of less than PIPE_BUF + # bytes are atomic, and that PIPE_BUF >= 512 + raise ValueError('msg too long') + nbytes = os.write(self._fd, msg) + assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format( + nbytes, len(msg)) + + +_resource_tracker = ResourceTracker() +ensure_running = _resource_tracker.ensure_running +register = _resource_tracker.register +unregister = _resource_tracker.unregister +getfd = _resource_tracker.getfd + +def main(fd): + '''Run resource tracker.''' + # protect the process from ^C and "killall python" etc + signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGTERM, signal.SIG_IGN) + if _HAVE_SIGMASK: + signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS) + + for f in (sys.stdin, sys.stdout): + try: + f.close() + except Exception: + pass + + cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()} + try: + # keep track of registered/unregistered resources + with open(fd, 'rb') as f: + for line in f: + try: + cmd, name, rtype = line.strip().decode('ascii').split(':') + cleanup_func = _CLEANUP_FUNCS.get(rtype, None) + if cleanup_func is None: + raise ValueError( + f'Cannot register {name} for automatic cleanup: ' + f'unknown resource type {rtype}') + + if cmd == 'REGISTER': + cache[rtype].add(name) + elif cmd == 'UNREGISTER': + cache[rtype].remove(name) + elif cmd == 'PROBE': + pass + else: + raise RuntimeError('unrecognized command %r' % cmd) + except Exception: + try: + sys.excepthook(*sys.exc_info()) + except: + pass + finally: + # all processes have terminated; cleanup any remaining resources + for rtype, rtype_cache in cache.items(): + if rtype_cache: + try: + warnings.warn('resource_tracker: There appear to be %d ' + 'leaked %s objects to clean up at shutdown' % + (len(rtype_cache), rtype)) + except Exception: + pass + for name in rtype_cache: + # For some reason the process which created and registered this + # resource has failed to unregister it. Presumably it has + # died. We therefore unlink it. + try: + try: + _CLEANUP_FUNCS[rtype](name) + except Exception as e: + warnings.warn('resource_tracker: %r: %s' % (name, e)) + finally: + pass diff --git a/llava/lib/python3.10/multiprocessing/sharedctypes.py b/llava/lib/python3.10/multiprocessing/sharedctypes.py new file mode 100644 index 0000000000000000000000000000000000000000..6071707027bea46dccfc4d92fb3195060faf07ce --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/sharedctypes.py @@ -0,0 +1,240 @@ +# +# Module which supports allocation of ctypes objects from shared memory +# +# multiprocessing/sharedctypes.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import ctypes +import weakref + +from . import heap +from . import get_context + +from .context import reduction, assert_spawning +_ForkingPickler = reduction.ForkingPickler + +__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized'] + +# +# +# + +typecode_to_type = { + 'c': ctypes.c_char, 'u': ctypes.c_wchar, + 'b': ctypes.c_byte, 'B': ctypes.c_ubyte, + 'h': ctypes.c_short, 'H': ctypes.c_ushort, + 'i': ctypes.c_int, 'I': ctypes.c_uint, + 'l': ctypes.c_long, 'L': ctypes.c_ulong, + 'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong, + 'f': ctypes.c_float, 'd': ctypes.c_double + } + +# +# +# + +def _new_value(type_): + size = ctypes.sizeof(type_) + wrapper = heap.BufferWrapper(size) + return rebuild_ctype(type_, wrapper, None) + +def RawValue(typecode_or_type, *args): + ''' + Returns a ctypes object allocated from shared memory + ''' + type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) + obj = _new_value(type_) + ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) + obj.__init__(*args) + return obj + +def RawArray(typecode_or_type, size_or_initializer): + ''' + Returns a ctypes array allocated from shared memory + ''' + type_ = typecode_to_type.get(typecode_or_type, typecode_or_type) + if isinstance(size_or_initializer, int): + type_ = type_ * size_or_initializer + obj = _new_value(type_) + ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj)) + return obj + else: + type_ = type_ * len(size_or_initializer) + result = _new_value(type_) + result.__init__(*size_or_initializer) + return result + +def Value(typecode_or_type, *args, lock=True, ctx=None): + ''' + Return a synchronization wrapper for a Value + ''' + obj = RawValue(typecode_or_type, *args) + if lock is False: + return obj + if lock in (True, None): + ctx = ctx or get_context() + lock = ctx.RLock() + if not hasattr(lock, 'acquire'): + raise AttributeError("%r has no method 'acquire'" % lock) + return synchronized(obj, lock, ctx=ctx) + +def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None): + ''' + Return a synchronization wrapper for a RawArray + ''' + obj = RawArray(typecode_or_type, size_or_initializer) + if lock is False: + return obj + if lock in (True, None): + ctx = ctx or get_context() + lock = ctx.RLock() + if not hasattr(lock, 'acquire'): + raise AttributeError("%r has no method 'acquire'" % lock) + return synchronized(obj, lock, ctx=ctx) + +def copy(obj): + new_obj = _new_value(type(obj)) + ctypes.pointer(new_obj)[0] = obj + return new_obj + +def synchronized(obj, lock=None, ctx=None): + assert not isinstance(obj, SynchronizedBase), 'object already synchronized' + ctx = ctx or get_context() + + if isinstance(obj, ctypes._SimpleCData): + return Synchronized(obj, lock, ctx) + elif isinstance(obj, ctypes.Array): + if obj._type_ is ctypes.c_char: + return SynchronizedString(obj, lock, ctx) + return SynchronizedArray(obj, lock, ctx) + else: + cls = type(obj) + try: + scls = class_cache[cls] + except KeyError: + names = [field[0] for field in cls._fields_] + d = {name: make_property(name) for name in names} + classname = 'Synchronized' + cls.__name__ + scls = class_cache[cls] = type(classname, (SynchronizedBase,), d) + return scls(obj, lock, ctx) + +# +# Functions for pickling/unpickling +# + +def reduce_ctype(obj): + assert_spawning(obj) + if isinstance(obj, ctypes.Array): + return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_) + else: + return rebuild_ctype, (type(obj), obj._wrapper, None) + +def rebuild_ctype(type_, wrapper, length): + if length is not None: + type_ = type_ * length + _ForkingPickler.register(type_, reduce_ctype) + buf = wrapper.create_memoryview() + obj = type_.from_buffer(buf) + obj._wrapper = wrapper + return obj + +# +# Function to create properties +# + +def make_property(name): + try: + return prop_cache[name] + except KeyError: + d = {} + exec(template % ((name,)*7), d) + prop_cache[name] = d[name] + return d[name] + +template = ''' +def get%s(self): + self.acquire() + try: + return self._obj.%s + finally: + self.release() +def set%s(self, value): + self.acquire() + try: + self._obj.%s = value + finally: + self.release() +%s = property(get%s, set%s) +''' + +prop_cache = {} +class_cache = weakref.WeakKeyDictionary() + +# +# Synchronized wrappers +# + +class SynchronizedBase(object): + + def __init__(self, obj, lock=None, ctx=None): + self._obj = obj + if lock: + self._lock = lock + else: + ctx = ctx or get_context(force=True) + self._lock = ctx.RLock() + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def __reduce__(self): + assert_spawning(self) + return synchronized, (self._obj, self._lock) + + def get_obj(self): + return self._obj + + def get_lock(self): + return self._lock + + def __repr__(self): + return '<%s wrapper for %s>' % (type(self).__name__, self._obj) + + +class Synchronized(SynchronizedBase): + value = make_property('value') + + +class SynchronizedArray(SynchronizedBase): + + def __len__(self): + return len(self._obj) + + def __getitem__(self, i): + with self: + return self._obj[i] + + def __setitem__(self, i, value): + with self: + self._obj[i] = value + + def __getslice__(self, start, stop): + with self: + return self._obj[start:stop] + + def __setslice__(self, start, stop, values): + with self: + self._obj[start:stop] = values + + +class SynchronizedString(SynchronizedArray): + value = make_property('value') + raw = make_property('raw') diff --git a/llava/lib/python3.10/multiprocessing/spawn.py b/llava/lib/python3.10/multiprocessing/spawn.py new file mode 100644 index 0000000000000000000000000000000000000000..7cc129e2610761f3868523030ea4fa60ec9d844b --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/spawn.py @@ -0,0 +1,297 @@ +# +# Code used to start processes when using the spawn or forkserver +# start methods. +# +# multiprocessing/spawn.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import os +import sys +import runpy +import types + +from . import get_start_method, set_start_method +from . import process +from .context import reduction +from . import util + +__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable', + 'get_preparation_data', 'get_command_line', 'import_main_path'] + +# +# _python_exe is the assumed path to the python executable. +# People embedding Python want to modify it. +# + +if sys.platform != 'win32': + WINEXE = False + WINSERVICE = False +else: + WINEXE = getattr(sys, 'frozen', False) + WINSERVICE = sys.executable.lower().endswith("pythonservice.exe") + +if WINSERVICE: + _python_exe = os.path.join(sys.exec_prefix, 'python.exe') +else: + _python_exe = sys.executable + +def set_executable(exe): + global _python_exe + _python_exe = exe + +def get_executable(): + return _python_exe + +# +# +# + +def is_forking(argv): + ''' + Return whether commandline indicates we are forking + ''' + if len(argv) >= 2 and argv[1] == '--multiprocessing-fork': + return True + else: + return False + + +def freeze_support(): + ''' + Run code for process object if this in not the main process + ''' + if is_forking(sys.argv): + kwds = {} + for arg in sys.argv[2:]: + name, value = arg.split('=') + if value == 'None': + kwds[name] = None + else: + kwds[name] = int(value) + spawn_main(**kwds) + sys.exit() + + +def get_command_line(**kwds): + ''' + Returns prefix of command line used for spawning a child process + ''' + if getattr(sys, 'frozen', False): + return ([sys.executable, '--multiprocessing-fork'] + + ['%s=%r' % item for item in kwds.items()]) + else: + prog = 'from multiprocessing.spawn import spawn_main; spawn_main(%s)' + prog %= ', '.join('%s=%r' % item for item in kwds.items()) + opts = util._args_from_interpreter_flags() + return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork'] + + +def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None): + ''' + Run code specified by data received over pipe + ''' + assert is_forking(sys.argv), "Not forking" + if sys.platform == 'win32': + import msvcrt + import _winapi + + if parent_pid is not None: + source_process = _winapi.OpenProcess( + _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE, + False, parent_pid) + else: + source_process = None + new_handle = reduction.duplicate(pipe_handle, + source_process=source_process) + fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY) + parent_sentinel = source_process + else: + from . import resource_tracker + resource_tracker._resource_tracker._fd = tracker_fd + fd = pipe_handle + parent_sentinel = os.dup(pipe_handle) + exitcode = _main(fd, parent_sentinel) + sys.exit(exitcode) + + +def _main(fd, parent_sentinel): + with os.fdopen(fd, 'rb', closefd=True) as from_parent: + process.current_process()._inheriting = True + try: + preparation_data = reduction.pickle.load(from_parent) + prepare(preparation_data) + self = reduction.pickle.load(from_parent) + finally: + del process.current_process()._inheriting + return self._bootstrap(parent_sentinel) + + +def _check_not_importing_main(): + if getattr(process.current_process(), '_inheriting', False): + raise RuntimeError(''' + An attempt has been made to start a new process before the + current process has finished its bootstrapping phase. + + This probably means that you are not using fork to start your + child processes and you have forgotten to use the proper idiom + in the main module: + + if __name__ == '__main__': + freeze_support() + ... + + The "freeze_support()" line can be omitted if the program + is not going to be frozen to produce an executable.''') + + +def get_preparation_data(name): + ''' + Return info about parent needed by child to unpickle process object + ''' + _check_not_importing_main() + d = dict( + log_to_stderr=util._log_to_stderr, + authkey=process.current_process().authkey, + ) + + if util._logger is not None: + d['log_level'] = util._logger.getEffectiveLevel() + + sys_path=sys.path.copy() + try: + i = sys_path.index('') + except ValueError: + pass + else: + sys_path[i] = process.ORIGINAL_DIR + + d.update( + name=name, + sys_path=sys_path, + sys_argv=sys.argv, + orig_dir=process.ORIGINAL_DIR, + dir=os.getcwd(), + start_method=get_start_method(), + ) + + # Figure out whether to initialise main in the subprocess as a module + # or through direct execution (or to leave it alone entirely) + main_module = sys.modules['__main__'] + main_mod_name = getattr(main_module.__spec__, "name", None) + if main_mod_name is not None: + d['init_main_from_name'] = main_mod_name + elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE): + main_path = getattr(main_module, '__file__', None) + if main_path is not None: + if (not os.path.isabs(main_path) and + process.ORIGINAL_DIR is not None): + main_path = os.path.join(process.ORIGINAL_DIR, main_path) + d['init_main_from_path'] = os.path.normpath(main_path) + + return d + +# +# Prepare current process +# + +old_main_modules = [] + +def prepare(data): + ''' + Try to get current process ready to unpickle process object + ''' + if 'name' in data: + process.current_process().name = data['name'] + + if 'authkey' in data: + process.current_process().authkey = data['authkey'] + + if 'log_to_stderr' in data and data['log_to_stderr']: + util.log_to_stderr() + + if 'log_level' in data: + util.get_logger().setLevel(data['log_level']) + + if 'sys_path' in data: + sys.path = data['sys_path'] + + if 'sys_argv' in data: + sys.argv = data['sys_argv'] + + if 'dir' in data: + os.chdir(data['dir']) + + if 'orig_dir' in data: + process.ORIGINAL_DIR = data['orig_dir'] + + if 'start_method' in data: + set_start_method(data['start_method'], force=True) + + if 'init_main_from_name' in data: + _fixup_main_from_name(data['init_main_from_name']) + elif 'init_main_from_path' in data: + _fixup_main_from_path(data['init_main_from_path']) + +# Multiprocessing module helpers to fix up the main module in +# spawned subprocesses +def _fixup_main_from_name(mod_name): + # __main__.py files for packages, directories, zip archives, etc, run + # their "main only" code unconditionally, so we don't even try to + # populate anything in __main__, nor do we make any changes to + # __main__ attributes + current_main = sys.modules['__main__'] + if mod_name == "__main__" or mod_name.endswith(".__main__"): + return + + # If this process was forked, __main__ may already be populated + if getattr(current_main.__spec__, "name", None) == mod_name: + return + + # Otherwise, __main__ may contain some non-main code where we need to + # support unpickling it properly. We rerun it as __mp_main__ and make + # the normal __main__ an alias to that + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_module(mod_name, + run_name="__mp_main__", + alter_sys=True) + main_module.__dict__.update(main_content) + sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module + + +def _fixup_main_from_path(main_path): + # If this process was forked, __main__ may already be populated + current_main = sys.modules['__main__'] + + # Unfortunately, the main ipython launch script historically had no + # "if __name__ == '__main__'" guard, so we work around that + # by treating it like a __main__.py file + # See https://github.com/ipython/ipython/issues/4698 + main_name = os.path.splitext(os.path.basename(main_path))[0] + if main_name == 'ipython': + return + + # Otherwise, if __file__ already has the setting we expect, + # there's nothing more to do + if getattr(current_main, '__file__', None) == main_path: + return + + # If the parent process has sent a path through rather than a module + # name we assume it is an executable script that may contain + # non-main code that needs to be executed + old_main_modules.append(current_main) + main_module = types.ModuleType("__mp_main__") + main_content = runpy.run_path(main_path, + run_name="__mp_main__") + main_module.__dict__.update(main_content) + sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module + + +def import_main_path(main_path): + ''' + Set sys.modules['__main__'] to module at main_path + ''' + _fixup_main_from_path(main_path) diff --git a/llava/lib/python3.10/multiprocessing/synchronize.py b/llava/lib/python3.10/multiprocessing/synchronize.py new file mode 100644 index 0000000000000000000000000000000000000000..d0be48f1fd7a8f14b935f74d89c168d831b84dbc --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/synchronize.py @@ -0,0 +1,394 @@ +# +# Module implementing synchronization primitives +# +# multiprocessing/synchronize.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +__all__ = [ + 'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event' + ] + +import threading +import sys +import tempfile +import _multiprocessing +import time + +from . import context +from . import process +from . import util + +# Try to import the mp.synchronize module cleanly, if it fails +# raise ImportError for platforms lacking a working sem_open implementation. +# See issue 3770 +try: + from _multiprocessing import SemLock, sem_unlink +except (ImportError): + raise ImportError("This platform lacks a functioning sem_open" + + " implementation, therefore, the required" + + " synchronization primitives needed will not" + + " function, see issue 3770.") + +# +# Constants +# + +RECURSIVE_MUTEX, SEMAPHORE = list(range(2)) +SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX + +# +# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock` +# + +class SemLock(object): + + _rand = tempfile._RandomNameSequence() + + def __init__(self, kind, value, maxvalue, *, ctx): + if ctx is None: + ctx = context._default_context.get_context() + name = ctx.get_start_method() + unlink_now = sys.platform == 'win32' or name == 'fork' + for i in range(100): + try: + sl = self._semlock = _multiprocessing.SemLock( + kind, value, maxvalue, self._make_name(), + unlink_now) + except FileExistsError: + pass + else: + break + else: + raise FileExistsError('cannot find name for semaphore') + + util.debug('created semlock with handle %s' % sl.handle) + self._make_methods() + + if sys.platform != 'win32': + def _after_fork(obj): + obj._semlock._after_fork() + util.register_after_fork(self, _after_fork) + + if self._semlock.name is not None: + # We only get here if we are on Unix with forking + # disabled. When the object is garbage collected or the + # process shuts down we unlink the semaphore name + from .resource_tracker import register + register(self._semlock.name, "semaphore") + util.Finalize(self, SemLock._cleanup, (self._semlock.name,), + exitpriority=0) + + @staticmethod + def _cleanup(name): + from .resource_tracker import unregister + sem_unlink(name) + unregister(name, "semaphore") + + def _make_methods(self): + self.acquire = self._semlock.acquire + self.release = self._semlock.release + + def __enter__(self): + return self._semlock.__enter__() + + def __exit__(self, *args): + return self._semlock.__exit__(*args) + + def __getstate__(self): + context.assert_spawning(self) + sl = self._semlock + if sys.platform == 'win32': + h = context.get_spawning_popen().duplicate_for_child(sl.handle) + else: + h = sl.handle + return (h, sl.kind, sl.maxvalue, sl.name) + + def __setstate__(self, state): + self._semlock = _multiprocessing.SemLock._rebuild(*state) + util.debug('recreated blocker with handle %r' % state[0]) + self._make_methods() + + @staticmethod + def _make_name(): + return '%s-%s' % (process.current_process()._config['semprefix'], + next(SemLock._rand)) + +# +# Semaphore +# + +class Semaphore(SemLock): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx) + + def get_value(self): + return self._semlock._get_value() + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s)>' % (self.__class__.__name__, value) + +# +# Bounded semaphore +# + +class BoundedSemaphore(Semaphore): + + def __init__(self, value=1, *, ctx): + SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx) + + def __repr__(self): + try: + value = self._semlock._get_value() + except Exception: + value = 'unknown' + return '<%s(value=%s, maxvalue=%s)>' % \ + (self.__class__.__name__, value, self._semlock.maxvalue) + +# +# Non-recursive lock +# + +class Lock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + elif self._semlock._get_value() == 1: + name = 'None' + elif self._semlock._count() > 0: + name = 'SomeOtherThread' + else: + name = 'SomeOtherProcess' + except Exception: + name = 'unknown' + return '<%s(owner=%s)>' % (self.__class__.__name__, name) + +# +# Recursive lock +# + +class RLock(SemLock): + + def __init__(self, *, ctx): + SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx) + + def __repr__(self): + try: + if self._semlock._is_mine(): + name = process.current_process().name + if threading.current_thread().name != 'MainThread': + name += '|' + threading.current_thread().name + count = self._semlock._count() + elif self._semlock._get_value() == 1: + name, count = 'None', 0 + elif self._semlock._count() > 0: + name, count = 'SomeOtherThread', 'nonzero' + else: + name, count = 'SomeOtherProcess', 'nonzero' + except Exception: + name, count = 'unknown', 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, name, count) + +# +# Condition variable +# + +class Condition(object): + + def __init__(self, lock=None, *, ctx): + self._lock = lock or ctx.RLock() + self._sleeping_count = ctx.Semaphore(0) + self._woken_count = ctx.Semaphore(0) + self._wait_semaphore = ctx.Semaphore(0) + self._make_methods() + + def __getstate__(self): + context.assert_spawning(self) + return (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) + + def __setstate__(self, state): + (self._lock, self._sleeping_count, + self._woken_count, self._wait_semaphore) = state + self._make_methods() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + def _make_methods(self): + self.acquire = self._lock.acquire + self.release = self._lock.release + + def __repr__(self): + try: + num_waiters = (self._sleeping_count._semlock._get_value() - + self._woken_count._semlock._get_value()) + except Exception: + num_waiters = 'unknown' + return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters) + + def wait(self, timeout=None): + assert self._lock._semlock._is_mine(), \ + 'must acquire() condition before using wait()' + + # indicate that this thread is going to sleep + self._sleeping_count.release() + + # release lock + count = self._lock._semlock._count() + for i in range(count): + self._lock.release() + + try: + # wait for notification or timeout + return self._wait_semaphore.acquire(True, timeout) + finally: + # indicate that this thread has woken + self._woken_count.release() + + # reacquire lock + for i in range(count): + self._lock.acquire() + + def notify(self, n=1): + assert self._lock._semlock._is_mine(), 'lock is not owned' + assert not self._wait_semaphore.acquire( + False), ('notify: Should not have been able to acquire ' + + '_wait_semaphore') + + # to take account of timeouts since last notify*() we subtract + # woken_count from sleeping_count and rezero woken_count + while self._woken_count.acquire(False): + res = self._sleeping_count.acquire(False) + assert res, ('notify: Bug in sleeping_count.acquire' + + '- res should not be False') + + sleepers = 0 + while sleepers < n and self._sleeping_count.acquire(False): + self._wait_semaphore.release() # wake up one sleeper + sleepers += 1 + + if sleepers: + for i in range(sleepers): + self._woken_count.acquire() # wait for a sleeper to wake + + # rezero wait_semaphore in case some timeouts just happened + while self._wait_semaphore.acquire(False): + pass + + def notify_all(self): + self.notify(n=sys.maxsize) + + def wait_for(self, predicate, timeout=None): + result = predicate() + if result: + return result + if timeout is not None: + endtime = time.monotonic() + timeout + else: + endtime = None + waittime = None + while not result: + if endtime is not None: + waittime = endtime - time.monotonic() + if waittime <= 0: + break + self.wait(waittime) + result = predicate() + return result + +# +# Event +# + +class Event(object): + + def __init__(self, *, ctx): + self._cond = ctx.Condition(ctx.Lock()) + self._flag = ctx.Semaphore(0) + + def is_set(self): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + return True + return False + + def set(self): + with self._cond: + self._flag.acquire(False) + self._flag.release() + self._cond.notify_all() + + def clear(self): + with self._cond: + self._flag.acquire(False) + + def wait(self, timeout=None): + with self._cond: + if self._flag.acquire(False): + self._flag.release() + else: + self._cond.wait(timeout) + + if self._flag.acquire(False): + self._flag.release() + return True + return False + +# +# Barrier +# + +class Barrier(threading.Barrier): + + def __init__(self, parties, action=None, timeout=None, *, ctx): + import struct + from .heap import BufferWrapper + wrapper = BufferWrapper(struct.calcsize('i') * 2) + cond = ctx.Condition() + self.__setstate__((parties, action, timeout, cond, wrapper)) + self._state = 0 + self._count = 0 + + def __setstate__(self, state): + (self._parties, self._action, self._timeout, + self._cond, self._wrapper) = state + self._array = self._wrapper.create_memoryview().cast('i') + + def __getstate__(self): + return (self._parties, self._action, self._timeout, + self._cond, self._wrapper) + + @property + def _state(self): + return self._array[0] + + @_state.setter + def _state(self, value): + self._array[0] = value + + @property + def _count(self): + return self._array[1] + + @_count.setter + def _count(self, value): + self._array[1] = value diff --git a/llava/lib/python3.10/multiprocessing/util.py b/llava/lib/python3.10/multiprocessing/util.py new file mode 100644 index 0000000000000000000000000000000000000000..9e07a4e93e597e853fab59f355419fa5ac282fb2 --- /dev/null +++ b/llava/lib/python3.10/multiprocessing/util.py @@ -0,0 +1,489 @@ +# +# Module providing various facilities to other parts of the package +# +# multiprocessing/util.py +# +# Copyright (c) 2006-2008, R Oudkerk +# Licensed to PSF under a Contributor Agreement. +# + +import os +import itertools +import sys +import weakref +import atexit +import threading # we want threading to install it's + # cleanup function before multiprocessing does +from subprocess import _args_from_interpreter_flags + +from . import process + +__all__ = [ + 'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger', + 'log_to_stderr', 'get_temp_dir', 'register_after_fork', + 'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal', + 'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING', + ] + +# +# Logging +# + +NOTSET = 0 +SUBDEBUG = 5 +DEBUG = 10 +INFO = 20 +SUBWARNING = 25 + +LOGGER_NAME = 'multiprocessing' +DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s' + +_logger = None +_log_to_stderr = False + +def sub_debug(msg, *args): + if _logger: + _logger.log(SUBDEBUG, msg, *args) + +def debug(msg, *args): + if _logger: + _logger.log(DEBUG, msg, *args) + +def info(msg, *args): + if _logger: + _logger.log(INFO, msg, *args) + +def sub_warning(msg, *args): + if _logger: + _logger.log(SUBWARNING, msg, *args) + +def get_logger(): + ''' + Returns logger used by multiprocessing + ''' + global _logger + import logging + + logging._acquireLock() + try: + if not _logger: + + _logger = logging.getLogger(LOGGER_NAME) + _logger.propagate = 0 + + # XXX multiprocessing should cleanup before logging + if hasattr(atexit, 'unregister'): + atexit.unregister(_exit_function) + atexit.register(_exit_function) + else: + atexit._exithandlers.remove((_exit_function, (), {})) + atexit._exithandlers.append((_exit_function, (), {})) + + finally: + logging._releaseLock() + + return _logger + +def log_to_stderr(level=None): + ''' + Turn on logging and add a handler which prints to stderr + ''' + global _log_to_stderr + import logging + + logger = get_logger() + formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT) + handler = logging.StreamHandler() + handler.setFormatter(formatter) + logger.addHandler(handler) + + if level: + logger.setLevel(level) + _log_to_stderr = True + return _logger + + +# Abstract socket support + +def _platform_supports_abstract_sockets(): + if sys.platform == "linux": + return True + if hasattr(sys, 'getandroidapilevel'): + return True + return False + + +def is_abstract_socket_namespace(address): + if not address: + return False + if isinstance(address, bytes): + return address[0] == 0 + elif isinstance(address, str): + return address[0] == "\0" + raise TypeError(f'address type of {address!r} unrecognized') + + +abstract_sockets_supported = _platform_supports_abstract_sockets() + +# +# Function returning a temp directory which will be removed on exit +# + +def _remove_temp_dir(rmtree, tempdir): + rmtree(tempdir) + + current_process = process.current_process() + # current_process() can be None if the finalizer is called + # late during Python finalization + if current_process is not None: + current_process._config['tempdir'] = None + +def get_temp_dir(): + # get name of a temp directory which will be automatically cleaned up + tempdir = process.current_process()._config.get('tempdir') + if tempdir is None: + import shutil, tempfile + tempdir = tempfile.mkdtemp(prefix='pymp-') + info('created temp directory %s', tempdir) + # keep a strong reference to shutil.rmtree(), since the finalizer + # can be called late during Python shutdown + Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir), + exitpriority=-100) + process.current_process()._config['tempdir'] = tempdir + return tempdir + +# +# Support for reinitialization of objects when bootstrapping a child process +# + +_afterfork_registry = weakref.WeakValueDictionary() +_afterfork_counter = itertools.count() + +def _run_after_forkers(): + items = list(_afterfork_registry.items()) + items.sort() + for (index, ident, func), obj in items: + try: + func(obj) + except Exception as e: + info('after forker raised exception %s', e) + +def register_after_fork(obj, func): + _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj + +# +# Finalization using weakrefs +# + +_finalizer_registry = {} +_finalizer_counter = itertools.count() + + +class Finalize(object): + ''' + Class which supports object finalization using weakrefs + ''' + def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None): + if (exitpriority is not None) and not isinstance(exitpriority,int): + raise TypeError( + "Exitpriority ({0!r}) must be None or int, not {1!s}".format( + exitpriority, type(exitpriority))) + + if obj is not None: + self._weakref = weakref.ref(obj, self) + elif exitpriority is None: + raise ValueError("Without object, exitpriority cannot be None") + + self._callback = callback + self._args = args + self._kwargs = kwargs or {} + self._key = (exitpriority, next(_finalizer_counter)) + self._pid = os.getpid() + + _finalizer_registry[self._key] = self + + def __call__(self, wr=None, + # Need to bind these locally because the globals can have + # been cleared at shutdown + _finalizer_registry=_finalizer_registry, + sub_debug=sub_debug, getpid=os.getpid): + ''' + Run the callback unless it has already been called or cancelled + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + sub_debug('finalizer no longer registered') + else: + if self._pid != getpid(): + sub_debug('finalizer ignored because different process') + res = None + else: + sub_debug('finalizer calling %s with args %s and kwargs %s', + self._callback, self._args, self._kwargs) + res = self._callback(*self._args, **self._kwargs) + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + return res + + def cancel(self): + ''' + Cancel finalization of the object + ''' + try: + del _finalizer_registry[self._key] + except KeyError: + pass + else: + self._weakref = self._callback = self._args = \ + self._kwargs = self._key = None + + def still_active(self): + ''' + Return whether this finalizer is still waiting to invoke callback + ''' + return self._key in _finalizer_registry + + def __repr__(self): + try: + obj = self._weakref() + except (AttributeError, TypeError): + obj = None + + if obj is None: + return '<%s object, dead>' % self.__class__.__name__ + + x = '<%s object, callback=%s' % ( + self.__class__.__name__, + getattr(self._callback, '__name__', self._callback)) + if self._args: + x += ', args=' + str(self._args) + if self._kwargs: + x += ', kwargs=' + str(self._kwargs) + if self._key[0] is not None: + x += ', exitpriority=' + str(self._key[0]) + return x + '>' + + +def _run_finalizers(minpriority=None): + ''' + Run all finalizers whose exit priority is not None and at least minpriority + + Finalizers with highest priority are called first; finalizers with + the same priority will be called in reverse order of creation. + ''' + if _finalizer_registry is None: + # This function may be called after this module's globals are + # destroyed. See the _exit_function function in this module for more + # notes. + return + + if minpriority is None: + f = lambda p : p[0] is not None + else: + f = lambda p : p[0] is not None and p[0] >= minpriority + + # Careful: _finalizer_registry may be mutated while this function + # is running (either by a GC run or by another thread). + + # list(_finalizer_registry) should be atomic, while + # list(_finalizer_registry.items()) is not. + keys = [key for key in list(_finalizer_registry) if f(key)] + keys.sort(reverse=True) + + for key in keys: + finalizer = _finalizer_registry.get(key) + # key may have been removed from the registry + if finalizer is not None: + sub_debug('calling %s', finalizer) + try: + finalizer() + except Exception: + import traceback + traceback.print_exc() + + if minpriority is None: + _finalizer_registry.clear() + +# +# Clean up on exit +# + +def is_exiting(): + ''' + Returns true if the process is shutting down + ''' + return _exiting or _exiting is None + +_exiting = False + +def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers, + active_children=process.active_children, + current_process=process.current_process): + # We hold on to references to functions in the arglist due to the + # situation described below, where this function is called after this + # module's globals are destroyed. + + global _exiting + + if not _exiting: + _exiting = True + + info('process shutting down') + debug('running all "atexit" finalizers with priority >= 0') + _run_finalizers(0) + + if current_process() is not None: + # We check if the current process is None here because if + # it's None, any call to ``active_children()`` will raise + # an AttributeError (active_children winds up trying to + # get attributes from util._current_process). One + # situation where this can happen is if someone has + # manipulated sys.modules, causing this module to be + # garbage collected. The destructor for the module type + # then replaces all values in the module dict with None. + # For instance, after setuptools runs a test it replaces + # sys.modules with a copy created earlier. See issues + # #9775 and #15881. Also related: #4106, #9205, and + # #9207. + + for p in active_children(): + if p.daemon: + info('calling terminate() for daemon %s', p.name) + p._popen.terminate() + + for p in active_children(): + info('calling join() for process %s', p.name) + p.join() + + debug('running the remaining "atexit" finalizers') + _run_finalizers() + +atexit.register(_exit_function) + +# +# Some fork aware types +# + +class ForkAwareThreadLock(object): + def __init__(self): + self._lock = threading.Lock() + self.acquire = self._lock.acquire + self.release = self._lock.release + register_after_fork(self, ForkAwareThreadLock._at_fork_reinit) + + def _at_fork_reinit(self): + self._lock._at_fork_reinit() + + def __enter__(self): + return self._lock.__enter__() + + def __exit__(self, *args): + return self._lock.__exit__(*args) + + +class ForkAwareLocal(threading.local): + def __init__(self): + register_after_fork(self, lambda obj : obj.__dict__.clear()) + def __reduce__(self): + return type(self), () + +# +# Close fds except those specified +# + +try: + MAXFD = os.sysconf("SC_OPEN_MAX") +except Exception: + MAXFD = 256 + +def close_all_fds_except(fds): + fds = list(fds) + [-1, MAXFD] + fds.sort() + assert fds[-1] == MAXFD, 'fd too large' + for i in range(len(fds) - 1): + os.closerange(fds[i]+1, fds[i+1]) +# +# Close sys.stdin and replace stdin with os.devnull +# + +def _close_stdin(): + if sys.stdin is None: + return + + try: + sys.stdin.close() + except (OSError, ValueError): + pass + + try: + fd = os.open(os.devnull, os.O_RDONLY) + try: + sys.stdin = open(fd, encoding="utf-8", closefd=False) + except: + os.close(fd) + raise + except (OSError, ValueError): + pass + +# +# Flush standard streams, if any +# + +def _flush_std_streams(): + try: + sys.stdout.flush() + except (AttributeError, ValueError): + pass + try: + sys.stderr.flush() + except (AttributeError, ValueError): + pass + +# +# Start a program with only specified fds kept open +# + +def spawnv_passfds(path, args, passfds): + import _posixsubprocess + passfds = tuple(sorted(map(int, passfds))) + errpipe_read, errpipe_write = os.pipe() + try: + return _posixsubprocess.fork_exec( + args, [os.fsencode(path)], True, passfds, None, None, + -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write, + False, False, None, None, None, -1, None) + finally: + os.close(errpipe_read) + os.close(errpipe_write) + + +def close_fds(*fds): + """Close each file descriptor given as an argument""" + for fd in fds: + os.close(fd) + + +def _cleanup_tests(): + """Cleanup multiprocessing resources when multiprocessing tests + completed.""" + + from test import support + + # cleanup multiprocessing + process._cleanup() + + # Stop the ForkServer process if it's running + from multiprocessing import forkserver + forkserver._forkserver._stop() + + # Stop the ResourceTracker process if it's running + from multiprocessing import resource_tracker + resource_tracker._resource_tracker._stop() + + # bpo-37421: Explicitly call _run_finalizers() to remove immediately + # temporary directories created by multiprocessing.util.get_temp_dir(). + _run_finalizers() + support.gc_collect() + + support.reap_children() diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..74e4c342c6a77a46821240232fd18db56ae6b6e2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_cpu_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false); +TORCH_API ::std::tuple _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, bool zero_infinity=false); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub.h new file mode 100644 index 0000000000000000000000000000000000000000..a231c416add126a7b00c2b136fd48c340259234d --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sub.h @@ -0,0 +1,82 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_foreach_sub.Scalar(Tensor[] self, Scalar scalar) -> Tensor[] +inline ::std::vector _foreach_sub(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_sub_Scalar::call(self, scalar); +} + +// aten::_foreach_sub_.Scalar(Tensor(a!)[] self, Scalar scalar) -> () +inline void _foreach_sub_(at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_sub__Scalar::call(self, scalar); +} + +// aten::_foreach_sub.List(Tensor[] self, Tensor[] other, *, Scalar alpha=1) -> Tensor[] +inline ::std::vector _foreach_sub(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_sub_List::call(self, other, alpha); +} + +// aten::_foreach_sub_.List(Tensor(a!)[] self, Tensor[] other, *, Scalar alpha=1) -> () +inline void _foreach_sub_(at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_sub__List::call(self, other, alpha); +} + +// aten::_foreach_sub.ScalarList(Tensor[] self, Scalar[] scalars) -> Tensor[] +inline ::std::vector _foreach_sub(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_sub_ScalarList::call(self, scalars); +} + +// aten::_foreach_sub_.ScalarList(Tensor(a!)[] self, Scalar[] scalars) -> () +inline void _foreach_sub_(at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_sub__ScalarList::call(self, scalars); +} + +// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_sub_out(at::TensorList out, at::TensorList self, const at::Scalar & scalar) { + return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out); +} +// aten::_foreach_sub.Scalar_out(Tensor[] self, Scalar scalar, *, Tensor(a!)[] out) -> () +inline void _foreach_sub_outf(at::TensorList self, const at::Scalar & scalar, at::TensorList out) { + return at::_ops::_foreach_sub_Scalar_out::call(self, scalar, out); +} + +// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_sub_out(at::TensorList out, at::TensorList self, at::TensorList other, const at::Scalar & alpha=1) { + return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out); +} +// aten::_foreach_sub.List_out(Tensor[] self, Tensor[] other, *, Scalar alpha=1, Tensor(a!)[] out) -> () +inline void _foreach_sub_outf(at::TensorList self, at::TensorList other, const at::Scalar & alpha, at::TensorList out) { + return at::_ops::_foreach_sub_List_out::call(self, other, alpha, out); +} + +// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_sub_out(at::TensorList out, at::TensorList self, at::ArrayRef scalars) { + return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out); +} +// aten::_foreach_sub.ScalarList_out(Tensor[] self, Scalar[] scalars, *, Tensor(a!)[] out) -> () +inline void _foreach_sub_outf(at::TensorList self, at::ArrayRef scalars, at::TensorList out) { + return at::_ops::_foreach_sub_ScalarList_out::call(self, scalars, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a03572ce2dc1567ec8a6d2b36881b4f9ec3ad7d2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple _grid_sampler_2d_cpu_fallback_backward(const at::Tensor & grad_output, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_native.h new file mode 100644 index 0000000000000000000000000000000000000000..196c36ef32f978ad0c0be18e9f114607e6feab45 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_native.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_softmax_cpu_out : public at::meta::structured__softmax { +void impl(const at::Tensor & self, int64_t dim, bool half_to_float, const at::Tensor & out); +}; +struct TORCH_API structured_softmax_cuda_out : public at::meta::structured__softmax { +void impl(const at::Tensor & self, int64_t dim, bool half_to_float, const at::Tensor & out); +}; +TORCH_API at::Tensor softmax_nested(const at::Tensor & self, int64_t dim, bool half_to_float); +TORCH_API at::Tensor mkldnn_softmax(const at::Tensor & self, int64_t dim, bool half_to_float); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe.h new file mode 100644 index 0000000000000000000000000000000000000000..ae366aadf5e55253e75edc4b2ae545aece7eb719 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_bsc_tensor_unsafe.h @@ -0,0 +1,34 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}) { + return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, c10::optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::_sparse_bsc_tensor_unsafe(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor +inline at::Tensor _sparse_bsc_tensor_unsafe(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, ::std::optional dtype, ::std::optional layout, ::std::optional device, ::std::optional pin_memory) { + return at::_ops::_sparse_bsc_tensor_unsafe::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..7f9dcd4e1ed0ddf5157fc17f67554ef0cf9a6d69 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_test_autograd_multiple_dispatch_view_copy_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _test_autograd_multiple_dispatch_view_copy { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_autograd_multiple_dispatch_view_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_autograd_multiple_dispatch_view_copy(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API _test_autograd_multiple_dispatch_view_copy_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_test_autograd_multiple_dispatch_view_copy") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_test_autograd_multiple_dispatch_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward_cuda_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cd1d4e8077a4084f8989d4b7c45e281f7b55170c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_gru_cell_backward_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _thnn_fused_gru_cell_backward(const at::Tensor & grad_hy, const at::Tensor & workspace, bool has_bias); + +} // namespace cuda +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/acos_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/acos_native.h new file mode 100644 index 0000000000000000000000000000000000000000..1dbf39f8a4565bc4fa43caace5661a3631f34614 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/acos_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_acos_out : public at::meta::structured_acos { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_compositeexplicitautogradnonfunctional_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..278a318677bf49f8a7a2a20ff58a1ab9b49d234b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..af40c03df0c03e1157ca29f8ca87aad1d4e9527b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/atanh_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor atanh(const at::Tensor & self); +TORCH_API at::Tensor & atanh_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & atanh_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & atanh_(at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_native.h new file mode 100644 index 0000000000000000000000000000000000000000..7ec34c6ce542d4adb4d9da5dc92a10a217e64edf --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/clamp_min_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_clamp_min_out : public at::meta::structured_clamp_min { +void impl(const at::Tensor & self, const at::Scalar & min, const at::Tensor & out); +}; +struct TORCH_API structured_clamp_min_Tensor_out : public at::meta::structured_clamp_min_Tensor { +void impl(const at::Tensor & self, const at::Tensor & min, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..702bfc6b5cf7e44fdbae0cbcb1520f0f0dc15f32 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_backward_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor cumprod_backward(const at::Tensor & grad, const at::Tensor & input, int64_t dim, const at::Tensor & output); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..b5b05288aef27f70e8f136858a1b721251da8b47 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API cumulative_trapezoid_x { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumulative_trapezoid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "x") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor") + static at::Tensor call(const at::Tensor & y, const at::Tensor & x, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Tensor & x, int64_t dim); +}; + +struct TORCH_API cumulative_trapezoid_dx { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::cumulative_trapezoid") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "dx") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor") + static at::Tensor call(const at::Tensor & y, const at::Scalar & dx, int64_t dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & y, const at::Scalar & dx, int64_t dim); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/flip.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/flip.h new file mode 100644 index 0000000000000000000000000000000000000000..ccd5f1cfb2f526088c980fe54ffe94a1d35305aa --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/flip.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::flip(Tensor self, int[] dims) -> Tensor +inline at::Tensor flip(const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::flip::call(self, dims); +} + +// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & flip_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims) { + return at::_ops::flip_out::call(self, dims, out); +} +// aten::flip.out(Tensor self, int[] dims, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & flip_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::flip_out::call(self, dims, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..7823e43b9a610546b1dee85303e896cacd481d69 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/geometric_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor geometric(const at::Tensor & self, double p, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & geometric_out(at::Tensor & out, const at::Tensor & self, double p, ::std::optional generator=::std::nullopt); +TORCH_API at::Tensor & geometric_outf(const at::Tensor & self, double p, ::std::optional generator, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_ops.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..ad560922ed827d4eeac753bd790fefe2f8b7ce94 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/lcm_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API lcm_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lcm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lcm.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API lcm { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lcm") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lcm(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API lcm_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::lcm_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "lcm_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..3a338f8a1d239b094a8dc264f26d0d1b1c5c07e1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_solve_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_linalg_ldl_solve : public at::impl::MetaBase { + + + void meta(const at::Tensor & LD, const at::Tensor & pivots, const at::Tensor & B, bool hermitian); +}; + +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f111b9e02b26d573b3299fccc7ffc4fa6f0a2cbe --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/logit_backward_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor logit_backward(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps=::std::nullopt); +TORCH_API at::Tensor & logit_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, ::std::optional eps, at::Tensor & grad_input); + +} // namespace meta +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..69891ac818f712ba34e2599885ce2c5c6a9891c2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/multilabel_margin_loss_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & multilabel_margin_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) { + return at::_ops::multilabel_margin_loss_backward_grad_input::call(grad_output, self, target, reduction, is_target, grad_input); +} +// aten::multilabel_margin_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & multilabel_margin_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target, at::Tensor & grad_input) { + return at::_ops::multilabel_margin_loss_backward_grad_input::call(grad_output, self, target, reduction, is_target, grad_input); +} + +// aten::multilabel_margin_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction, Tensor is_target) -> Tensor +inline at::Tensor multilabel_margin_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, const at::Tensor & is_target) { + return at::_ops::multilabel_margin_loss_backward::call(grad_output, self, target, reduction, is_target); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2a144c8454ef80f54b0d9d1fb1e315c461ca76b8 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/permute_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & permute_copy_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dims); +TORCH_API at::Tensor & permute_copy_outf(const at::Tensor & self, at::IntArrayRef dims, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/select_backward_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/select_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..bc25fcc1a11a033f7f51608e1ef7c240801b79d1 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/select_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & select_backward_out_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index, at::Tensor & out); +TORCH_API at::Tensor select_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef input_sizes, int64_t dim, c10::SymInt index); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/select_copy_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/select_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9179e7e3e4599013de49c723c8838e9d9a72e85c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/select_copy_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & select_copy_int_out_symint(const at::Tensor & self, int64_t dim, c10::SymInt index, at::Tensor & out); +TORCH_API at::Tensor select_copy_sparse_csr(const at::Tensor & self, int64_t dim, int64_t index); +TORCH_API at::Tensor select_copy_symint(const at::Tensor & self, int64_t dim, c10::SymInt index); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..51a6e04e8de70701100d2f47cf33926d5e8e85cc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/slow_conv3d_forward_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor slow_conv3d_forward(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor slow_conv3d_forward_symint(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & slow_conv3d_forward_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API at::Tensor & slow_conv3d_forward_outf(const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, const ::std::optional & bias, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & output); +TORCH_API at::Tensor & slow_conv3d_forward_symint_out(at::Tensor & output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); +TORCH_API at::Tensor & slow_conv3d_forward_symint_outf(const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, const ::std::optional & bias, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & output); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he_native.h new file mode 100644 index 0000000000000000000000000000000000000000..476663801cc3b3e1ffbf6c462394c7c385aba126 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_hermite_polynomial_he_native.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_hermite_polynomial_he_out : public at::meta::structured_special_hermite_polynomial_he { +void impl(const at::Tensor & x, const at::Tensor & n, const at::Tensor & out); +}; +TORCH_API at::Tensor special_hermite_polynomial_he(const at::Scalar & x, const at::Tensor & n); +TORCH_API at::Tensor & special_hermite_polynomial_he_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out); +TORCH_API at::Tensor special_hermite_polynomial_he(const at::Tensor & x, const at::Scalar & n); +TORCH_API at::Tensor & special_hermite_polynomial_he_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_cpu_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..36049fb8c569d819488536ece367000a01480527 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_i1e_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor special_i1e(const at::Tensor & self); +TORCH_API at::Tensor & special_i1e_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & special_i1e_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_psi.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_psi.h new file mode 100644 index 0000000000000000000000000000000000000000..35ecd7f1dc24882da09d9e2dff9577a7d93d91d3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/special_psi.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::special_psi(Tensor self) -> Tensor +inline at::Tensor special_psi(const at::Tensor & self) { + return at::_ops::special_psi::call(self); +} + +// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_psi_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::special_psi_out::call(self, out); +} +// aten::special_psi.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & special_psi_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::special_psi_out::call(self, out); +} + +} diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/to_native.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/to_native.h new file mode 100644 index 0000000000000000000000000000000000000000..68479efde74b8a6f35ac6dd2a12d6f8fe15780ea --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/to_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor to(const at::Tensor & self, ::std::optional dtype={}, ::std::optional layout={}, ::std::optional device={}, ::std::optional pin_memory={}, bool non_blocking=false, bool copy=false, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor to(const at::Tensor & self, at::Device device, at::ScalarType dtype, bool non_blocking=false, bool copy=false, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor to(const at::Tensor & self, at::ScalarType dtype, bool non_blocking=false, bool copy=false, ::std::optional memory_format=::std::nullopt); +TORCH_API at::Tensor to(const at::Tensor & self, const at::Tensor & other, bool non_blocking=false, bool copy=false, ::std::optional memory_format=::std::nullopt); +} // namespace native +} // namespace at diff --git a/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..9054bc9e2eeb9cbabe83cd443455ad272907d533 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/torch/include/ATen/ops/view_as_real_copy_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & view_as_real_copy_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & view_as_real_copy_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at