Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- llava/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl +3 -0
- llava/lib/python3.10/lib2to3/PatternGrammar.txt +28 -0
- llava/lib/python3.10/lib2to3/__init__.py +8 -0
- llava/lib/python3.10/lib2to3/__main__.py +4 -0
- llava/lib/python3.10/lib2to3/btm_matcher.py +163 -0
- llava/lib/python3.10/lib2to3/btm_utils.py +281 -0
- llava/lib/python3.10/lib2to3/fixer_base.py +186 -0
- llava/lib/python3.10/lib2to3/fixer_util.py +453 -0
- llava/lib/python3.10/lib2to3/main.py +273 -0
- llava/lib/python3.10/lib2to3/patcomp.py +204 -0
- llava/lib/python3.10/lib2to3/pygram.py +43 -0
- llava/lib/python3.10/lib2to3/pytree.py +853 -0
- llava/lib/python3.10/multiprocessing/__pycache__/__init__.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/connection.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/context.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/forkserver.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/heap.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/managers.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/pool.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/popen_fork.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/popen_forkserver.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/popen_spawn_posix.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/popen_spawn_win32.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/process.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/queues.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/reduction.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/resource_sharer.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/resource_tracker.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/shared_memory.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/sharedctypes.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/spawn.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/synchronize.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/__pycache__/util.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/dummy/__init__.py +126 -0
- llava/lib/python3.10/multiprocessing/dummy/__pycache__/__init__.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/dummy/__pycache__/connection.cpython-310.pyc +0 -0
- llava/lib/python3.10/multiprocessing/dummy/connection.py +75 -0
- llava/lib/python3.10/multiprocessing/heap.py +337 -0
- llava/lib/python3.10/multiprocessing/pool.py +957 -0
- llava/lib/python3.10/multiprocessing/popen_fork.py +83 -0
- llava/lib/python3.10/multiprocessing/popen_spawn_win32.py +131 -0
- llava/lib/python3.10/multiprocessing/process.py +438 -0
- llava/lib/python3.10/multiprocessing/queues.py +379 -0
- llava/lib/python3.10/multiprocessing/resource_tracker.py +239 -0
- llava/lib/python3.10/multiprocessing/sharedctypes.py +240 -0
- llava/lib/python3.10/multiprocessing/spawn.py +297 -0
- llava/lib/python3.10/multiprocessing/synchronize.py +394 -0
- llava/lib/python3.10/multiprocessing/util.py +489 -0
- parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_cpu_dispatch.h +24 -0
.gitattributes
CHANGED
|
@@ -434,3 +434,4 @@ openflamingo/bin/python filter=lfs diff=lfs merge=lfs -text
|
|
| 434 |
llava/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text
|
| 435 |
llava/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
|
| 436 |
llava/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 434 |
llava/lib/libtcl8.6.so filter=lfs diff=lfs merge=lfs -text
|
| 435 |
llava/lib/python3.10/ensurepip/_bundled/setuptools-65.5.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
|
| 436 |
llava/lib/python3.10/tkinter/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 437 |
+
llava/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
|
llava/lib/python3.10/ensurepip/_bundled/pip-23.0.1-py3-none-any.whl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:236bcb61156d76c4b8a05821b988c7b8c35bf0da28a4b614e8d6ab5212c25c6f
|
| 3 |
+
size 2055563
|
llava/lib/python3.10/lib2to3/PatternGrammar.txt
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2006 Google, Inc. All Rights Reserved.
|
| 2 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 3 |
+
|
| 4 |
+
# A grammar to describe tree matching patterns.
|
| 5 |
+
# Not shown here:
|
| 6 |
+
# - 'TOKEN' stands for any token (leaf node)
|
| 7 |
+
# - 'any' stands for any node (leaf or interior)
|
| 8 |
+
# With 'any' we can still specify the sub-structure.
|
| 9 |
+
|
| 10 |
+
# The start symbol is 'Matcher'.
|
| 11 |
+
|
| 12 |
+
Matcher: Alternatives ENDMARKER
|
| 13 |
+
|
| 14 |
+
Alternatives: Alternative ('|' Alternative)*
|
| 15 |
+
|
| 16 |
+
Alternative: (Unit | NegatedUnit)+
|
| 17 |
+
|
| 18 |
+
Unit: [NAME '='] ( STRING [Repeater]
|
| 19 |
+
| NAME [Details] [Repeater]
|
| 20 |
+
| '(' Alternatives ')' [Repeater]
|
| 21 |
+
| '[' Alternatives ']'
|
| 22 |
+
)
|
| 23 |
+
|
| 24 |
+
NegatedUnit: 'not' (STRING | NAME [Details] | '(' Alternatives ')')
|
| 25 |
+
|
| 26 |
+
Repeater: '*' | '+' | '{' NUMBER [',' NUMBER] '}'
|
| 27 |
+
|
| 28 |
+
Details: '<' Alternatives '>'
|
llava/lib/python3.10/lib2to3/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import warnings
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
warnings.warn(
|
| 5 |
+
"lib2to3 package is deprecated and may not be able to parse Python 3.10+",
|
| 6 |
+
PendingDeprecationWarning,
|
| 7 |
+
stacklevel=2,
|
| 8 |
+
)
|
llava/lib/python3.10/lib2to3/__main__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
from .main import main
|
| 3 |
+
|
| 4 |
+
sys.exit(main("lib2to3.fixes"))
|
llava/lib/python3.10/lib2to3/btm_matcher.py
ADDED
|
@@ -0,0 +1,163 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""A bottom-up tree matching algorithm implementation meant to speed
|
| 2 |
+
up 2to3's matching process. After the tree patterns are reduced to
|
| 3 |
+
their rarest linear path, a linear Aho-Corasick automaton is
|
| 4 |
+
created. The linear automaton traverses the linear paths from the
|
| 5 |
+
leaves to the root of the AST and returns a set of nodes for further
|
| 6 |
+
matching. This reduces significantly the number of candidate nodes."""
|
| 7 |
+
|
| 8 |
+
__author__ = "George Boutsioukis <gboutsioukis@gmail.com>"
|
| 9 |
+
|
| 10 |
+
import logging
|
| 11 |
+
import itertools
|
| 12 |
+
from collections import defaultdict
|
| 13 |
+
|
| 14 |
+
from . import pytree
|
| 15 |
+
from .btm_utils import reduce_tree
|
| 16 |
+
|
| 17 |
+
class BMNode(object):
|
| 18 |
+
"""Class for a node of the Aho-Corasick automaton used in matching"""
|
| 19 |
+
count = itertools.count()
|
| 20 |
+
def __init__(self):
|
| 21 |
+
self.transition_table = {}
|
| 22 |
+
self.fixers = []
|
| 23 |
+
self.id = next(BMNode.count)
|
| 24 |
+
self.content = ''
|
| 25 |
+
|
| 26 |
+
class BottomMatcher(object):
|
| 27 |
+
"""The main matcher class. After instantiating the patterns should
|
| 28 |
+
be added using the add_fixer method"""
|
| 29 |
+
|
| 30 |
+
def __init__(self):
|
| 31 |
+
self.match = set()
|
| 32 |
+
self.root = BMNode()
|
| 33 |
+
self.nodes = [self.root]
|
| 34 |
+
self.fixers = []
|
| 35 |
+
self.logger = logging.getLogger("RefactoringTool")
|
| 36 |
+
|
| 37 |
+
def add_fixer(self, fixer):
|
| 38 |
+
"""Reduces a fixer's pattern tree to a linear path and adds it
|
| 39 |
+
to the matcher(a common Aho-Corasick automaton). The fixer is
|
| 40 |
+
appended on the matching states and called when they are
|
| 41 |
+
reached"""
|
| 42 |
+
self.fixers.append(fixer)
|
| 43 |
+
tree = reduce_tree(fixer.pattern_tree)
|
| 44 |
+
linear = tree.get_linear_subpattern()
|
| 45 |
+
match_nodes = self.add(linear, start=self.root)
|
| 46 |
+
for match_node in match_nodes:
|
| 47 |
+
match_node.fixers.append(fixer)
|
| 48 |
+
|
| 49 |
+
def add(self, pattern, start):
|
| 50 |
+
"Recursively adds a linear pattern to the AC automaton"
|
| 51 |
+
#print("adding pattern", pattern, "to", start)
|
| 52 |
+
if not pattern:
|
| 53 |
+
#print("empty pattern")
|
| 54 |
+
return [start]
|
| 55 |
+
if isinstance(pattern[0], tuple):
|
| 56 |
+
#alternatives
|
| 57 |
+
#print("alternatives")
|
| 58 |
+
match_nodes = []
|
| 59 |
+
for alternative in pattern[0]:
|
| 60 |
+
#add all alternatives, and add the rest of the pattern
|
| 61 |
+
#to each end node
|
| 62 |
+
end_nodes = self.add(alternative, start=start)
|
| 63 |
+
for end in end_nodes:
|
| 64 |
+
match_nodes.extend(self.add(pattern[1:], end))
|
| 65 |
+
return match_nodes
|
| 66 |
+
else:
|
| 67 |
+
#single token
|
| 68 |
+
#not last
|
| 69 |
+
if pattern[0] not in start.transition_table:
|
| 70 |
+
#transition did not exist, create new
|
| 71 |
+
next_node = BMNode()
|
| 72 |
+
start.transition_table[pattern[0]] = next_node
|
| 73 |
+
else:
|
| 74 |
+
#transition exists already, follow
|
| 75 |
+
next_node = start.transition_table[pattern[0]]
|
| 76 |
+
|
| 77 |
+
if pattern[1:]:
|
| 78 |
+
end_nodes = self.add(pattern[1:], start=next_node)
|
| 79 |
+
else:
|
| 80 |
+
end_nodes = [next_node]
|
| 81 |
+
return end_nodes
|
| 82 |
+
|
| 83 |
+
def run(self, leaves):
|
| 84 |
+
"""The main interface with the bottom matcher. The tree is
|
| 85 |
+
traversed from the bottom using the constructed
|
| 86 |
+
automaton. Nodes are only checked once as the tree is
|
| 87 |
+
retraversed. When the automaton fails, we give it one more
|
| 88 |
+
shot(in case the above tree matches as a whole with the
|
| 89 |
+
rejected leaf), then we break for the next leaf. There is the
|
| 90 |
+
special case of multiple arguments(see code comments) where we
|
| 91 |
+
recheck the nodes
|
| 92 |
+
|
| 93 |
+
Args:
|
| 94 |
+
The leaves of the AST tree to be matched
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
A dictionary of node matches with fixers as the keys
|
| 98 |
+
"""
|
| 99 |
+
current_ac_node = self.root
|
| 100 |
+
results = defaultdict(list)
|
| 101 |
+
for leaf in leaves:
|
| 102 |
+
current_ast_node = leaf
|
| 103 |
+
while current_ast_node:
|
| 104 |
+
current_ast_node.was_checked = True
|
| 105 |
+
for child in current_ast_node.children:
|
| 106 |
+
# multiple statements, recheck
|
| 107 |
+
if isinstance(child, pytree.Leaf) and child.value == ";":
|
| 108 |
+
current_ast_node.was_checked = False
|
| 109 |
+
break
|
| 110 |
+
if current_ast_node.type == 1:
|
| 111 |
+
#name
|
| 112 |
+
node_token = current_ast_node.value
|
| 113 |
+
else:
|
| 114 |
+
node_token = current_ast_node.type
|
| 115 |
+
|
| 116 |
+
if node_token in current_ac_node.transition_table:
|
| 117 |
+
#token matches
|
| 118 |
+
current_ac_node = current_ac_node.transition_table[node_token]
|
| 119 |
+
for fixer in current_ac_node.fixers:
|
| 120 |
+
results[fixer].append(current_ast_node)
|
| 121 |
+
else:
|
| 122 |
+
#matching failed, reset automaton
|
| 123 |
+
current_ac_node = self.root
|
| 124 |
+
if (current_ast_node.parent is not None
|
| 125 |
+
and current_ast_node.parent.was_checked):
|
| 126 |
+
#the rest of the tree upwards has been checked, next leaf
|
| 127 |
+
break
|
| 128 |
+
|
| 129 |
+
#recheck the rejected node once from the root
|
| 130 |
+
if node_token in current_ac_node.transition_table:
|
| 131 |
+
#token matches
|
| 132 |
+
current_ac_node = current_ac_node.transition_table[node_token]
|
| 133 |
+
for fixer in current_ac_node.fixers:
|
| 134 |
+
results[fixer].append(current_ast_node)
|
| 135 |
+
|
| 136 |
+
current_ast_node = current_ast_node.parent
|
| 137 |
+
return results
|
| 138 |
+
|
| 139 |
+
def print_ac(self):
|
| 140 |
+
"Prints a graphviz diagram of the BM automaton(for debugging)"
|
| 141 |
+
print("digraph g{")
|
| 142 |
+
def print_node(node):
|
| 143 |
+
for subnode_key in node.transition_table.keys():
|
| 144 |
+
subnode = node.transition_table[subnode_key]
|
| 145 |
+
print("%d -> %d [label=%s] //%s" %
|
| 146 |
+
(node.id, subnode.id, type_repr(subnode_key), str(subnode.fixers)))
|
| 147 |
+
if subnode_key == 1:
|
| 148 |
+
print(subnode.content)
|
| 149 |
+
print_node(subnode)
|
| 150 |
+
print_node(self.root)
|
| 151 |
+
print("}")
|
| 152 |
+
|
| 153 |
+
# taken from pytree.py for debugging; only used by print_ac
|
| 154 |
+
_type_reprs = {}
|
| 155 |
+
def type_repr(type_num):
|
| 156 |
+
global _type_reprs
|
| 157 |
+
if not _type_reprs:
|
| 158 |
+
from .pygram import python_symbols
|
| 159 |
+
# printing tokens is possible but not as useful
|
| 160 |
+
# from .pgen2 import token // token.__dict__.items():
|
| 161 |
+
for name, val in python_symbols.__dict__.items():
|
| 162 |
+
if type(val) == int: _type_reprs[val] = name
|
| 163 |
+
return _type_reprs.setdefault(type_num, type_num)
|
llava/lib/python3.10/lib2to3/btm_utils.py
ADDED
|
@@ -0,0 +1,281 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"Utility functions used by the btm_matcher module"
|
| 2 |
+
|
| 3 |
+
from . import pytree
|
| 4 |
+
from .pgen2 import grammar, token
|
| 5 |
+
from .pygram import pattern_symbols, python_symbols
|
| 6 |
+
|
| 7 |
+
syms = pattern_symbols
|
| 8 |
+
pysyms = python_symbols
|
| 9 |
+
tokens = grammar.opmap
|
| 10 |
+
token_labels = token
|
| 11 |
+
|
| 12 |
+
TYPE_ANY = -1
|
| 13 |
+
TYPE_ALTERNATIVES = -2
|
| 14 |
+
TYPE_GROUP = -3
|
| 15 |
+
|
| 16 |
+
class MinNode(object):
|
| 17 |
+
"""This class serves as an intermediate representation of the
|
| 18 |
+
pattern tree during the conversion to sets of leaf-to-root
|
| 19 |
+
subpatterns"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, type=None, name=None):
|
| 22 |
+
self.type = type
|
| 23 |
+
self.name = name
|
| 24 |
+
self.children = []
|
| 25 |
+
self.leaf = False
|
| 26 |
+
self.parent = None
|
| 27 |
+
self.alternatives = []
|
| 28 |
+
self.group = []
|
| 29 |
+
|
| 30 |
+
def __repr__(self):
|
| 31 |
+
return str(self.type) + ' ' + str(self.name)
|
| 32 |
+
|
| 33 |
+
def leaf_to_root(self):
|
| 34 |
+
"""Internal method. Returns a characteristic path of the
|
| 35 |
+
pattern tree. This method must be run for all leaves until the
|
| 36 |
+
linear subpatterns are merged into a single"""
|
| 37 |
+
node = self
|
| 38 |
+
subp = []
|
| 39 |
+
while node:
|
| 40 |
+
if node.type == TYPE_ALTERNATIVES:
|
| 41 |
+
node.alternatives.append(subp)
|
| 42 |
+
if len(node.alternatives) == len(node.children):
|
| 43 |
+
#last alternative
|
| 44 |
+
subp = [tuple(node.alternatives)]
|
| 45 |
+
node.alternatives = []
|
| 46 |
+
node = node.parent
|
| 47 |
+
continue
|
| 48 |
+
else:
|
| 49 |
+
node = node.parent
|
| 50 |
+
subp = None
|
| 51 |
+
break
|
| 52 |
+
|
| 53 |
+
if node.type == TYPE_GROUP:
|
| 54 |
+
node.group.append(subp)
|
| 55 |
+
#probably should check the number of leaves
|
| 56 |
+
if len(node.group) == len(node.children):
|
| 57 |
+
subp = get_characteristic_subpattern(node.group)
|
| 58 |
+
node.group = []
|
| 59 |
+
node = node.parent
|
| 60 |
+
continue
|
| 61 |
+
else:
|
| 62 |
+
node = node.parent
|
| 63 |
+
subp = None
|
| 64 |
+
break
|
| 65 |
+
|
| 66 |
+
if node.type == token_labels.NAME and node.name:
|
| 67 |
+
#in case of type=name, use the name instead
|
| 68 |
+
subp.append(node.name)
|
| 69 |
+
else:
|
| 70 |
+
subp.append(node.type)
|
| 71 |
+
|
| 72 |
+
node = node.parent
|
| 73 |
+
return subp
|
| 74 |
+
|
| 75 |
+
def get_linear_subpattern(self):
|
| 76 |
+
"""Drives the leaf_to_root method. The reason that
|
| 77 |
+
leaf_to_root must be run multiple times is because we need to
|
| 78 |
+
reject 'group' matches; for example the alternative form
|
| 79 |
+
(a | b c) creates a group [b c] that needs to be matched. Since
|
| 80 |
+
matching multiple linear patterns overcomes the automaton's
|
| 81 |
+
capabilities, leaf_to_root merges each group into a single
|
| 82 |
+
choice based on 'characteristic'ity,
|
| 83 |
+
|
| 84 |
+
i.e. (a|b c) -> (a|b) if b more characteristic than c
|
| 85 |
+
|
| 86 |
+
Returns: The most 'characteristic'(as defined by
|
| 87 |
+
get_characteristic_subpattern) path for the compiled pattern
|
| 88 |
+
tree.
|
| 89 |
+
"""
|
| 90 |
+
|
| 91 |
+
for l in self.leaves():
|
| 92 |
+
subp = l.leaf_to_root()
|
| 93 |
+
if subp:
|
| 94 |
+
return subp
|
| 95 |
+
|
| 96 |
+
def leaves(self):
|
| 97 |
+
"Generator that returns the leaves of the tree"
|
| 98 |
+
for child in self.children:
|
| 99 |
+
yield from child.leaves()
|
| 100 |
+
if not self.children:
|
| 101 |
+
yield self
|
| 102 |
+
|
| 103 |
+
def reduce_tree(node, parent=None):
|
| 104 |
+
"""
|
| 105 |
+
Internal function. Reduces a compiled pattern tree to an
|
| 106 |
+
intermediate representation suitable for feeding the
|
| 107 |
+
automaton. This also trims off any optional pattern elements(like
|
| 108 |
+
[a], a*).
|
| 109 |
+
"""
|
| 110 |
+
|
| 111 |
+
new_node = None
|
| 112 |
+
#switch on the node type
|
| 113 |
+
if node.type == syms.Matcher:
|
| 114 |
+
#skip
|
| 115 |
+
node = node.children[0]
|
| 116 |
+
|
| 117 |
+
if node.type == syms.Alternatives :
|
| 118 |
+
#2 cases
|
| 119 |
+
if len(node.children) <= 2:
|
| 120 |
+
#just a single 'Alternative', skip this node
|
| 121 |
+
new_node = reduce_tree(node.children[0], parent)
|
| 122 |
+
else:
|
| 123 |
+
#real alternatives
|
| 124 |
+
new_node = MinNode(type=TYPE_ALTERNATIVES)
|
| 125 |
+
#skip odd children('|' tokens)
|
| 126 |
+
for child in node.children:
|
| 127 |
+
if node.children.index(child)%2:
|
| 128 |
+
continue
|
| 129 |
+
reduced = reduce_tree(child, new_node)
|
| 130 |
+
if reduced is not None:
|
| 131 |
+
new_node.children.append(reduced)
|
| 132 |
+
elif node.type == syms.Alternative:
|
| 133 |
+
if len(node.children) > 1:
|
| 134 |
+
|
| 135 |
+
new_node = MinNode(type=TYPE_GROUP)
|
| 136 |
+
for child in node.children:
|
| 137 |
+
reduced = reduce_tree(child, new_node)
|
| 138 |
+
if reduced:
|
| 139 |
+
new_node.children.append(reduced)
|
| 140 |
+
if not new_node.children:
|
| 141 |
+
# delete the group if all of the children were reduced to None
|
| 142 |
+
new_node = None
|
| 143 |
+
|
| 144 |
+
else:
|
| 145 |
+
new_node = reduce_tree(node.children[0], parent)
|
| 146 |
+
|
| 147 |
+
elif node.type == syms.Unit:
|
| 148 |
+
if (isinstance(node.children[0], pytree.Leaf) and
|
| 149 |
+
node.children[0].value == '('):
|
| 150 |
+
#skip parentheses
|
| 151 |
+
return reduce_tree(node.children[1], parent)
|
| 152 |
+
if ((isinstance(node.children[0], pytree.Leaf) and
|
| 153 |
+
node.children[0].value == '[')
|
| 154 |
+
or
|
| 155 |
+
(len(node.children)>1 and
|
| 156 |
+
hasattr(node.children[1], "value") and
|
| 157 |
+
node.children[1].value == '[')):
|
| 158 |
+
#skip whole unit if its optional
|
| 159 |
+
return None
|
| 160 |
+
|
| 161 |
+
leaf = True
|
| 162 |
+
details_node = None
|
| 163 |
+
alternatives_node = None
|
| 164 |
+
has_repeater = False
|
| 165 |
+
repeater_node = None
|
| 166 |
+
has_variable_name = False
|
| 167 |
+
|
| 168 |
+
for child in node.children:
|
| 169 |
+
if child.type == syms.Details:
|
| 170 |
+
leaf = False
|
| 171 |
+
details_node = child
|
| 172 |
+
elif child.type == syms.Repeater:
|
| 173 |
+
has_repeater = True
|
| 174 |
+
repeater_node = child
|
| 175 |
+
elif child.type == syms.Alternatives:
|
| 176 |
+
alternatives_node = child
|
| 177 |
+
if hasattr(child, 'value') and child.value == '=': # variable name
|
| 178 |
+
has_variable_name = True
|
| 179 |
+
|
| 180 |
+
#skip variable name
|
| 181 |
+
if has_variable_name:
|
| 182 |
+
#skip variable name, '='
|
| 183 |
+
name_leaf = node.children[2]
|
| 184 |
+
if hasattr(name_leaf, 'value') and name_leaf.value == '(':
|
| 185 |
+
# skip parenthesis
|
| 186 |
+
name_leaf = node.children[3]
|
| 187 |
+
else:
|
| 188 |
+
name_leaf = node.children[0]
|
| 189 |
+
|
| 190 |
+
#set node type
|
| 191 |
+
if name_leaf.type == token_labels.NAME:
|
| 192 |
+
#(python) non-name or wildcard
|
| 193 |
+
if name_leaf.value == 'any':
|
| 194 |
+
new_node = MinNode(type=TYPE_ANY)
|
| 195 |
+
else:
|
| 196 |
+
if hasattr(token_labels, name_leaf.value):
|
| 197 |
+
new_node = MinNode(type=getattr(token_labels, name_leaf.value))
|
| 198 |
+
else:
|
| 199 |
+
new_node = MinNode(type=getattr(pysyms, name_leaf.value))
|
| 200 |
+
|
| 201 |
+
elif name_leaf.type == token_labels.STRING:
|
| 202 |
+
#(python) name or character; remove the apostrophes from
|
| 203 |
+
#the string value
|
| 204 |
+
name = name_leaf.value.strip("'")
|
| 205 |
+
if name in tokens:
|
| 206 |
+
new_node = MinNode(type=tokens[name])
|
| 207 |
+
else:
|
| 208 |
+
new_node = MinNode(type=token_labels.NAME, name=name)
|
| 209 |
+
elif name_leaf.type == syms.Alternatives:
|
| 210 |
+
new_node = reduce_tree(alternatives_node, parent)
|
| 211 |
+
|
| 212 |
+
#handle repeaters
|
| 213 |
+
if has_repeater:
|
| 214 |
+
if repeater_node.children[0].value == '*':
|
| 215 |
+
#reduce to None
|
| 216 |
+
new_node = None
|
| 217 |
+
elif repeater_node.children[0].value == '+':
|
| 218 |
+
#reduce to a single occurrence i.e. do nothing
|
| 219 |
+
pass
|
| 220 |
+
else:
|
| 221 |
+
#TODO: handle {min, max} repeaters
|
| 222 |
+
raise NotImplementedError
|
| 223 |
+
pass
|
| 224 |
+
|
| 225 |
+
#add children
|
| 226 |
+
if details_node and new_node is not None:
|
| 227 |
+
for child in details_node.children[1:-1]:
|
| 228 |
+
#skip '<', '>' markers
|
| 229 |
+
reduced = reduce_tree(child, new_node)
|
| 230 |
+
if reduced is not None:
|
| 231 |
+
new_node.children.append(reduced)
|
| 232 |
+
if new_node:
|
| 233 |
+
new_node.parent = parent
|
| 234 |
+
return new_node
|
| 235 |
+
|
| 236 |
+
|
| 237 |
+
def get_characteristic_subpattern(subpatterns):
|
| 238 |
+
"""Picks the most characteristic from a list of linear patterns
|
| 239 |
+
Current order used is:
|
| 240 |
+
names > common_names > common_chars
|
| 241 |
+
"""
|
| 242 |
+
if not isinstance(subpatterns, list):
|
| 243 |
+
return subpatterns
|
| 244 |
+
if len(subpatterns)==1:
|
| 245 |
+
return subpatterns[0]
|
| 246 |
+
|
| 247 |
+
# first pick out the ones containing variable names
|
| 248 |
+
subpatterns_with_names = []
|
| 249 |
+
subpatterns_with_common_names = []
|
| 250 |
+
common_names = ['in', 'for', 'if' , 'not', 'None']
|
| 251 |
+
subpatterns_with_common_chars = []
|
| 252 |
+
common_chars = "[]().,:"
|
| 253 |
+
for subpattern in subpatterns:
|
| 254 |
+
if any(rec_test(subpattern, lambda x: type(x) is str)):
|
| 255 |
+
if any(rec_test(subpattern,
|
| 256 |
+
lambda x: isinstance(x, str) and x in common_chars)):
|
| 257 |
+
subpatterns_with_common_chars.append(subpattern)
|
| 258 |
+
elif any(rec_test(subpattern,
|
| 259 |
+
lambda x: isinstance(x, str) and x in common_names)):
|
| 260 |
+
subpatterns_with_common_names.append(subpattern)
|
| 261 |
+
|
| 262 |
+
else:
|
| 263 |
+
subpatterns_with_names.append(subpattern)
|
| 264 |
+
|
| 265 |
+
if subpatterns_with_names:
|
| 266 |
+
subpatterns = subpatterns_with_names
|
| 267 |
+
elif subpatterns_with_common_names:
|
| 268 |
+
subpatterns = subpatterns_with_common_names
|
| 269 |
+
elif subpatterns_with_common_chars:
|
| 270 |
+
subpatterns = subpatterns_with_common_chars
|
| 271 |
+
# of the remaining subpatterns pick out the longest one
|
| 272 |
+
return max(subpatterns, key=len)
|
| 273 |
+
|
| 274 |
+
def rec_test(sequence, test_func):
|
| 275 |
+
"""Tests test_func on all items of sequence and items of included
|
| 276 |
+
sub-iterables"""
|
| 277 |
+
for x in sequence:
|
| 278 |
+
if isinstance(x, (list, tuple)):
|
| 279 |
+
yield from rec_test(x, test_func)
|
| 280 |
+
else:
|
| 281 |
+
yield test_func(x)
|
llava/lib/python3.10/lib2to3/fixer_base.py
ADDED
|
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2006 Google, Inc. All Rights Reserved.
|
| 2 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 3 |
+
|
| 4 |
+
"""Base class for fixers (optional, but recommended)."""
|
| 5 |
+
|
| 6 |
+
# Python imports
|
| 7 |
+
import itertools
|
| 8 |
+
|
| 9 |
+
# Local imports
|
| 10 |
+
from .patcomp import PatternCompiler
|
| 11 |
+
from . import pygram
|
| 12 |
+
from .fixer_util import does_tree_import
|
| 13 |
+
|
| 14 |
+
class BaseFix(object):
|
| 15 |
+
|
| 16 |
+
"""Optional base class for fixers.
|
| 17 |
+
|
| 18 |
+
The subclass name must be FixFooBar where FooBar is the result of
|
| 19 |
+
removing underscores and capitalizing the words of the fix name.
|
| 20 |
+
For example, the class name for a fixer named 'has_key' should be
|
| 21 |
+
FixHasKey.
|
| 22 |
+
"""
|
| 23 |
+
|
| 24 |
+
PATTERN = None # Most subclasses should override with a string literal
|
| 25 |
+
pattern = None # Compiled pattern, set by compile_pattern()
|
| 26 |
+
pattern_tree = None # Tree representation of the pattern
|
| 27 |
+
options = None # Options object passed to initializer
|
| 28 |
+
filename = None # The filename (set by set_filename)
|
| 29 |
+
numbers = itertools.count(1) # For new_name()
|
| 30 |
+
used_names = set() # A set of all used NAMEs
|
| 31 |
+
order = "post" # Does the fixer prefer pre- or post-order traversal
|
| 32 |
+
explicit = False # Is this ignored by refactor.py -f all?
|
| 33 |
+
run_order = 5 # Fixers will be sorted by run order before execution
|
| 34 |
+
# Lower numbers will be run first.
|
| 35 |
+
_accept_type = None # [Advanced and not public] This tells RefactoringTool
|
| 36 |
+
# which node type to accept when there's not a pattern.
|
| 37 |
+
|
| 38 |
+
keep_line_order = False # For the bottom matcher: match with the
|
| 39 |
+
# original line order
|
| 40 |
+
BM_compatible = False # Compatibility with the bottom matching
|
| 41 |
+
# module; every fixer should set this
|
| 42 |
+
# manually
|
| 43 |
+
|
| 44 |
+
# Shortcut for access to Python grammar symbols
|
| 45 |
+
syms = pygram.python_symbols
|
| 46 |
+
|
| 47 |
+
def __init__(self, options, log):
|
| 48 |
+
"""Initializer. Subclass may override.
|
| 49 |
+
|
| 50 |
+
Args:
|
| 51 |
+
options: a dict containing the options passed to RefactoringTool
|
| 52 |
+
that could be used to customize the fixer through the command line.
|
| 53 |
+
log: a list to append warnings and other messages to.
|
| 54 |
+
"""
|
| 55 |
+
self.options = options
|
| 56 |
+
self.log = log
|
| 57 |
+
self.compile_pattern()
|
| 58 |
+
|
| 59 |
+
def compile_pattern(self):
|
| 60 |
+
"""Compiles self.PATTERN into self.pattern.
|
| 61 |
+
|
| 62 |
+
Subclass may override if it doesn't want to use
|
| 63 |
+
self.{pattern,PATTERN} in .match().
|
| 64 |
+
"""
|
| 65 |
+
if self.PATTERN is not None:
|
| 66 |
+
PC = PatternCompiler()
|
| 67 |
+
self.pattern, self.pattern_tree = PC.compile_pattern(self.PATTERN,
|
| 68 |
+
with_tree=True)
|
| 69 |
+
|
| 70 |
+
def set_filename(self, filename):
|
| 71 |
+
"""Set the filename.
|
| 72 |
+
|
| 73 |
+
The main refactoring tool should call this.
|
| 74 |
+
"""
|
| 75 |
+
self.filename = filename
|
| 76 |
+
|
| 77 |
+
def match(self, node):
|
| 78 |
+
"""Returns match for a given parse tree node.
|
| 79 |
+
|
| 80 |
+
Should return a true or false object (not necessarily a bool).
|
| 81 |
+
It may return a non-empty dict of matching sub-nodes as
|
| 82 |
+
returned by a matching pattern.
|
| 83 |
+
|
| 84 |
+
Subclass may override.
|
| 85 |
+
"""
|
| 86 |
+
results = {"node": node}
|
| 87 |
+
return self.pattern.match(node, results) and results
|
| 88 |
+
|
| 89 |
+
def transform(self, node, results):
|
| 90 |
+
"""Returns the transformation for a given parse tree node.
|
| 91 |
+
|
| 92 |
+
Args:
|
| 93 |
+
node: the root of the parse tree that matched the fixer.
|
| 94 |
+
results: a dict mapping symbolic names to part of the match.
|
| 95 |
+
|
| 96 |
+
Returns:
|
| 97 |
+
None, or a node that is a modified copy of the
|
| 98 |
+
argument node. The node argument may also be modified in-place to
|
| 99 |
+
effect the same change.
|
| 100 |
+
|
| 101 |
+
Subclass *must* override.
|
| 102 |
+
"""
|
| 103 |
+
raise NotImplementedError()
|
| 104 |
+
|
| 105 |
+
def new_name(self, template="xxx_todo_changeme"):
|
| 106 |
+
"""Return a string suitable for use as an identifier
|
| 107 |
+
|
| 108 |
+
The new name is guaranteed not to conflict with other identifiers.
|
| 109 |
+
"""
|
| 110 |
+
name = template
|
| 111 |
+
while name in self.used_names:
|
| 112 |
+
name = template + str(next(self.numbers))
|
| 113 |
+
self.used_names.add(name)
|
| 114 |
+
return name
|
| 115 |
+
|
| 116 |
+
def log_message(self, message):
|
| 117 |
+
if self.first_log:
|
| 118 |
+
self.first_log = False
|
| 119 |
+
self.log.append("### In file %s ###" % self.filename)
|
| 120 |
+
self.log.append(message)
|
| 121 |
+
|
| 122 |
+
def cannot_convert(self, node, reason=None):
|
| 123 |
+
"""Warn the user that a given chunk of code is not valid Python 3,
|
| 124 |
+
but that it cannot be converted automatically.
|
| 125 |
+
|
| 126 |
+
First argument is the top-level node for the code in question.
|
| 127 |
+
Optional second argument is why it can't be converted.
|
| 128 |
+
"""
|
| 129 |
+
lineno = node.get_lineno()
|
| 130 |
+
for_output = node.clone()
|
| 131 |
+
for_output.prefix = ""
|
| 132 |
+
msg = "Line %d: could not convert: %s"
|
| 133 |
+
self.log_message(msg % (lineno, for_output))
|
| 134 |
+
if reason:
|
| 135 |
+
self.log_message(reason)
|
| 136 |
+
|
| 137 |
+
def warning(self, node, reason):
|
| 138 |
+
"""Used for warning the user about possible uncertainty in the
|
| 139 |
+
translation.
|
| 140 |
+
|
| 141 |
+
First argument is the top-level node for the code in question.
|
| 142 |
+
Optional second argument is why it can't be converted.
|
| 143 |
+
"""
|
| 144 |
+
lineno = node.get_lineno()
|
| 145 |
+
self.log_message("Line %d: %s" % (lineno, reason))
|
| 146 |
+
|
| 147 |
+
def start_tree(self, tree, filename):
|
| 148 |
+
"""Some fixers need to maintain tree-wide state.
|
| 149 |
+
This method is called once, at the start of tree fix-up.
|
| 150 |
+
|
| 151 |
+
tree - the root node of the tree to be processed.
|
| 152 |
+
filename - the name of the file the tree came from.
|
| 153 |
+
"""
|
| 154 |
+
self.used_names = tree.used_names
|
| 155 |
+
self.set_filename(filename)
|
| 156 |
+
self.numbers = itertools.count(1)
|
| 157 |
+
self.first_log = True
|
| 158 |
+
|
| 159 |
+
def finish_tree(self, tree, filename):
|
| 160 |
+
"""Some fixers need to maintain tree-wide state.
|
| 161 |
+
This method is called once, at the conclusion of tree fix-up.
|
| 162 |
+
|
| 163 |
+
tree - the root node of the tree to be processed.
|
| 164 |
+
filename - the name of the file the tree came from.
|
| 165 |
+
"""
|
| 166 |
+
pass
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
class ConditionalFix(BaseFix):
|
| 170 |
+
""" Base class for fixers which not execute if an import is found. """
|
| 171 |
+
|
| 172 |
+
# This is the name of the import which, if found, will cause the test to be skipped
|
| 173 |
+
skip_on = None
|
| 174 |
+
|
| 175 |
+
def start_tree(self, *args):
|
| 176 |
+
super(ConditionalFix, self).start_tree(*args)
|
| 177 |
+
self._should_skip = None
|
| 178 |
+
|
| 179 |
+
def should_skip(self, node):
|
| 180 |
+
if self._should_skip is not None:
|
| 181 |
+
return self._should_skip
|
| 182 |
+
pkg = self.skip_on.split(".")
|
| 183 |
+
name = pkg[-1]
|
| 184 |
+
pkg = ".".join(pkg[:-1])
|
| 185 |
+
self._should_skip = does_tree_import(pkg, name, node)
|
| 186 |
+
return self._should_skip
|
llava/lib/python3.10/lib2to3/fixer_util.py
ADDED
|
@@ -0,0 +1,453 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Utility functions, node construction macros, etc."""
|
| 2 |
+
# Author: Collin Winter
|
| 3 |
+
|
| 4 |
+
# Local imports
|
| 5 |
+
from .pgen2 import token
|
| 6 |
+
from .pytree import Leaf, Node
|
| 7 |
+
from .pygram import python_symbols as syms
|
| 8 |
+
from . import patcomp
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
###########################################################
|
| 12 |
+
### Common node-construction "macros"
|
| 13 |
+
###########################################################
|
| 14 |
+
|
| 15 |
+
def KeywordArg(keyword, value):
|
| 16 |
+
return Node(syms.argument,
|
| 17 |
+
[keyword, Leaf(token.EQUAL, "="), value])
|
| 18 |
+
|
| 19 |
+
def LParen():
|
| 20 |
+
return Leaf(token.LPAR, "(")
|
| 21 |
+
|
| 22 |
+
def RParen():
|
| 23 |
+
return Leaf(token.RPAR, ")")
|
| 24 |
+
|
| 25 |
+
def Assign(target, source):
|
| 26 |
+
"""Build an assignment statement"""
|
| 27 |
+
if not isinstance(target, list):
|
| 28 |
+
target = [target]
|
| 29 |
+
if not isinstance(source, list):
|
| 30 |
+
source.prefix = " "
|
| 31 |
+
source = [source]
|
| 32 |
+
|
| 33 |
+
return Node(syms.atom,
|
| 34 |
+
target + [Leaf(token.EQUAL, "=", prefix=" ")] + source)
|
| 35 |
+
|
| 36 |
+
def Name(name, prefix=None):
|
| 37 |
+
"""Return a NAME leaf"""
|
| 38 |
+
return Leaf(token.NAME, name, prefix=prefix)
|
| 39 |
+
|
| 40 |
+
def Attr(obj, attr):
|
| 41 |
+
"""A node tuple for obj.attr"""
|
| 42 |
+
return [obj, Node(syms.trailer, [Dot(), attr])]
|
| 43 |
+
|
| 44 |
+
def Comma():
|
| 45 |
+
"""A comma leaf"""
|
| 46 |
+
return Leaf(token.COMMA, ",")
|
| 47 |
+
|
| 48 |
+
def Dot():
|
| 49 |
+
"""A period (.) leaf"""
|
| 50 |
+
return Leaf(token.DOT, ".")
|
| 51 |
+
|
| 52 |
+
def ArgList(args, lparen=LParen(), rparen=RParen()):
|
| 53 |
+
"""A parenthesised argument list, used by Call()"""
|
| 54 |
+
node = Node(syms.trailer, [lparen.clone(), rparen.clone()])
|
| 55 |
+
if args:
|
| 56 |
+
node.insert_child(1, Node(syms.arglist, args))
|
| 57 |
+
return node
|
| 58 |
+
|
| 59 |
+
def Call(func_name, args=None, prefix=None):
|
| 60 |
+
"""A function call"""
|
| 61 |
+
node = Node(syms.power, [func_name, ArgList(args)])
|
| 62 |
+
if prefix is not None:
|
| 63 |
+
node.prefix = prefix
|
| 64 |
+
return node
|
| 65 |
+
|
| 66 |
+
def Newline():
|
| 67 |
+
"""A newline literal"""
|
| 68 |
+
return Leaf(token.NEWLINE, "\n")
|
| 69 |
+
|
| 70 |
+
def BlankLine():
|
| 71 |
+
"""A blank line"""
|
| 72 |
+
return Leaf(token.NEWLINE, "")
|
| 73 |
+
|
| 74 |
+
def Number(n, prefix=None):
|
| 75 |
+
return Leaf(token.NUMBER, n, prefix=prefix)
|
| 76 |
+
|
| 77 |
+
def Subscript(index_node):
|
| 78 |
+
"""A numeric or string subscript"""
|
| 79 |
+
return Node(syms.trailer, [Leaf(token.LBRACE, "["),
|
| 80 |
+
index_node,
|
| 81 |
+
Leaf(token.RBRACE, "]")])
|
| 82 |
+
|
| 83 |
+
def String(string, prefix=None):
|
| 84 |
+
"""A string leaf"""
|
| 85 |
+
return Leaf(token.STRING, string, prefix=prefix)
|
| 86 |
+
|
| 87 |
+
def ListComp(xp, fp, it, test=None):
|
| 88 |
+
"""A list comprehension of the form [xp for fp in it if test].
|
| 89 |
+
|
| 90 |
+
If test is None, the "if test" part is omitted.
|
| 91 |
+
"""
|
| 92 |
+
xp.prefix = ""
|
| 93 |
+
fp.prefix = " "
|
| 94 |
+
it.prefix = " "
|
| 95 |
+
for_leaf = Leaf(token.NAME, "for")
|
| 96 |
+
for_leaf.prefix = " "
|
| 97 |
+
in_leaf = Leaf(token.NAME, "in")
|
| 98 |
+
in_leaf.prefix = " "
|
| 99 |
+
inner_args = [for_leaf, fp, in_leaf, it]
|
| 100 |
+
if test:
|
| 101 |
+
test.prefix = " "
|
| 102 |
+
if_leaf = Leaf(token.NAME, "if")
|
| 103 |
+
if_leaf.prefix = " "
|
| 104 |
+
inner_args.append(Node(syms.comp_if, [if_leaf, test]))
|
| 105 |
+
inner = Node(syms.listmaker, [xp, Node(syms.comp_for, inner_args)])
|
| 106 |
+
return Node(syms.atom,
|
| 107 |
+
[Leaf(token.LBRACE, "["),
|
| 108 |
+
inner,
|
| 109 |
+
Leaf(token.RBRACE, "]")])
|
| 110 |
+
|
| 111 |
+
def FromImport(package_name, name_leafs):
|
| 112 |
+
""" Return an import statement in the form:
|
| 113 |
+
from package import name_leafs"""
|
| 114 |
+
# XXX: May not handle dotted imports properly (eg, package_name='foo.bar')
|
| 115 |
+
#assert package_name == '.' or '.' not in package_name, "FromImport has "\
|
| 116 |
+
# "not been tested with dotted package names -- use at your own "\
|
| 117 |
+
# "peril!"
|
| 118 |
+
|
| 119 |
+
for leaf in name_leafs:
|
| 120 |
+
# Pull the leaves out of their old tree
|
| 121 |
+
leaf.remove()
|
| 122 |
+
|
| 123 |
+
children = [Leaf(token.NAME, "from"),
|
| 124 |
+
Leaf(token.NAME, package_name, prefix=" "),
|
| 125 |
+
Leaf(token.NAME, "import", prefix=" "),
|
| 126 |
+
Node(syms.import_as_names, name_leafs)]
|
| 127 |
+
imp = Node(syms.import_from, children)
|
| 128 |
+
return imp
|
| 129 |
+
|
| 130 |
+
def ImportAndCall(node, results, names):
|
| 131 |
+
"""Returns an import statement and calls a method
|
| 132 |
+
of the module:
|
| 133 |
+
|
| 134 |
+
import module
|
| 135 |
+
module.name()"""
|
| 136 |
+
obj = results["obj"].clone()
|
| 137 |
+
if obj.type == syms.arglist:
|
| 138 |
+
newarglist = obj.clone()
|
| 139 |
+
else:
|
| 140 |
+
newarglist = Node(syms.arglist, [obj.clone()])
|
| 141 |
+
after = results["after"]
|
| 142 |
+
if after:
|
| 143 |
+
after = [n.clone() for n in after]
|
| 144 |
+
new = Node(syms.power,
|
| 145 |
+
Attr(Name(names[0]), Name(names[1])) +
|
| 146 |
+
[Node(syms.trailer,
|
| 147 |
+
[results["lpar"].clone(),
|
| 148 |
+
newarglist,
|
| 149 |
+
results["rpar"].clone()])] + after)
|
| 150 |
+
new.prefix = node.prefix
|
| 151 |
+
return new
|
| 152 |
+
|
| 153 |
+
|
| 154 |
+
###########################################################
|
| 155 |
+
### Determine whether a node represents a given literal
|
| 156 |
+
###########################################################
|
| 157 |
+
|
| 158 |
+
def is_tuple(node):
|
| 159 |
+
"""Does the node represent a tuple literal?"""
|
| 160 |
+
if isinstance(node, Node) and node.children == [LParen(), RParen()]:
|
| 161 |
+
return True
|
| 162 |
+
return (isinstance(node, Node)
|
| 163 |
+
and len(node.children) == 3
|
| 164 |
+
and isinstance(node.children[0], Leaf)
|
| 165 |
+
and isinstance(node.children[1], Node)
|
| 166 |
+
and isinstance(node.children[2], Leaf)
|
| 167 |
+
and node.children[0].value == "("
|
| 168 |
+
and node.children[2].value == ")")
|
| 169 |
+
|
| 170 |
+
def is_list(node):
|
| 171 |
+
"""Does the node represent a list literal?"""
|
| 172 |
+
return (isinstance(node, Node)
|
| 173 |
+
and len(node.children) > 1
|
| 174 |
+
and isinstance(node.children[0], Leaf)
|
| 175 |
+
and isinstance(node.children[-1], Leaf)
|
| 176 |
+
and node.children[0].value == "["
|
| 177 |
+
and node.children[-1].value == "]")
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
###########################################################
|
| 181 |
+
### Misc
|
| 182 |
+
###########################################################
|
| 183 |
+
|
| 184 |
+
def parenthesize(node):
|
| 185 |
+
return Node(syms.atom, [LParen(), node, RParen()])
|
| 186 |
+
|
| 187 |
+
|
| 188 |
+
consuming_calls = {"sorted", "list", "set", "any", "all", "tuple", "sum",
|
| 189 |
+
"min", "max", "enumerate"}
|
| 190 |
+
|
| 191 |
+
def attr_chain(obj, attr):
|
| 192 |
+
"""Follow an attribute chain.
|
| 193 |
+
|
| 194 |
+
If you have a chain of objects where a.foo -> b, b.foo-> c, etc,
|
| 195 |
+
use this to iterate over all objects in the chain. Iteration is
|
| 196 |
+
terminated by getattr(x, attr) is None.
|
| 197 |
+
|
| 198 |
+
Args:
|
| 199 |
+
obj: the starting object
|
| 200 |
+
attr: the name of the chaining attribute
|
| 201 |
+
|
| 202 |
+
Yields:
|
| 203 |
+
Each successive object in the chain.
|
| 204 |
+
"""
|
| 205 |
+
next = getattr(obj, attr)
|
| 206 |
+
while next:
|
| 207 |
+
yield next
|
| 208 |
+
next = getattr(next, attr)
|
| 209 |
+
|
| 210 |
+
p0 = """for_stmt< 'for' any 'in' node=any ':' any* >
|
| 211 |
+
| comp_for< 'for' any 'in' node=any any* >
|
| 212 |
+
"""
|
| 213 |
+
p1 = """
|
| 214 |
+
power<
|
| 215 |
+
( 'iter' | 'list' | 'tuple' | 'sorted' | 'set' | 'sum' |
|
| 216 |
+
'any' | 'all' | 'enumerate' | (any* trailer< '.' 'join' >) )
|
| 217 |
+
trailer< '(' node=any ')' >
|
| 218 |
+
any*
|
| 219 |
+
>
|
| 220 |
+
"""
|
| 221 |
+
p2 = """
|
| 222 |
+
power<
|
| 223 |
+
( 'sorted' | 'enumerate' )
|
| 224 |
+
trailer< '(' arglist<node=any any*> ')' >
|
| 225 |
+
any*
|
| 226 |
+
>
|
| 227 |
+
"""
|
| 228 |
+
pats_built = False
|
| 229 |
+
def in_special_context(node):
|
| 230 |
+
""" Returns true if node is in an environment where all that is required
|
| 231 |
+
of it is being iterable (ie, it doesn't matter if it returns a list
|
| 232 |
+
or an iterator).
|
| 233 |
+
See test_map_nochange in test_fixers.py for some examples and tests.
|
| 234 |
+
"""
|
| 235 |
+
global p0, p1, p2, pats_built
|
| 236 |
+
if not pats_built:
|
| 237 |
+
p0 = patcomp.compile_pattern(p0)
|
| 238 |
+
p1 = patcomp.compile_pattern(p1)
|
| 239 |
+
p2 = patcomp.compile_pattern(p2)
|
| 240 |
+
pats_built = True
|
| 241 |
+
patterns = [p0, p1, p2]
|
| 242 |
+
for pattern, parent in zip(patterns, attr_chain(node, "parent")):
|
| 243 |
+
results = {}
|
| 244 |
+
if pattern.match(parent, results) and results["node"] is node:
|
| 245 |
+
return True
|
| 246 |
+
return False
|
| 247 |
+
|
| 248 |
+
def is_probably_builtin(node):
|
| 249 |
+
"""
|
| 250 |
+
Check that something isn't an attribute or function name etc.
|
| 251 |
+
"""
|
| 252 |
+
prev = node.prev_sibling
|
| 253 |
+
if prev is not None and prev.type == token.DOT:
|
| 254 |
+
# Attribute lookup.
|
| 255 |
+
return False
|
| 256 |
+
parent = node.parent
|
| 257 |
+
if parent.type in (syms.funcdef, syms.classdef):
|
| 258 |
+
return False
|
| 259 |
+
if parent.type == syms.expr_stmt and parent.children[0] is node:
|
| 260 |
+
# Assignment.
|
| 261 |
+
return False
|
| 262 |
+
if parent.type == syms.parameters or \
|
| 263 |
+
(parent.type == syms.typedargslist and (
|
| 264 |
+
(prev is not None and prev.type == token.COMMA) or
|
| 265 |
+
parent.children[0] is node
|
| 266 |
+
)):
|
| 267 |
+
# The name of an argument.
|
| 268 |
+
return False
|
| 269 |
+
return True
|
| 270 |
+
|
| 271 |
+
def find_indentation(node):
|
| 272 |
+
"""Find the indentation of *node*."""
|
| 273 |
+
while node is not None:
|
| 274 |
+
if node.type == syms.suite and len(node.children) > 2:
|
| 275 |
+
indent = node.children[1]
|
| 276 |
+
if indent.type == token.INDENT:
|
| 277 |
+
return indent.value
|
| 278 |
+
node = node.parent
|
| 279 |
+
return ""
|
| 280 |
+
|
| 281 |
+
###########################################################
|
| 282 |
+
### The following functions are to find bindings in a suite
|
| 283 |
+
###########################################################
|
| 284 |
+
|
| 285 |
+
def make_suite(node):
|
| 286 |
+
if node.type == syms.suite:
|
| 287 |
+
return node
|
| 288 |
+
node = node.clone()
|
| 289 |
+
parent, node.parent = node.parent, None
|
| 290 |
+
suite = Node(syms.suite, [node])
|
| 291 |
+
suite.parent = parent
|
| 292 |
+
return suite
|
| 293 |
+
|
| 294 |
+
def find_root(node):
|
| 295 |
+
"""Find the top level namespace."""
|
| 296 |
+
# Scamper up to the top level namespace
|
| 297 |
+
while node.type != syms.file_input:
|
| 298 |
+
node = node.parent
|
| 299 |
+
if not node:
|
| 300 |
+
raise ValueError("root found before file_input node was found.")
|
| 301 |
+
return node
|
| 302 |
+
|
| 303 |
+
def does_tree_import(package, name, node):
|
| 304 |
+
""" Returns true if name is imported from package at the
|
| 305 |
+
top level of the tree which node belongs to.
|
| 306 |
+
To cover the case of an import like 'import foo', use
|
| 307 |
+
None for the package and 'foo' for the name. """
|
| 308 |
+
binding = find_binding(name, find_root(node), package)
|
| 309 |
+
return bool(binding)
|
| 310 |
+
|
| 311 |
+
def is_import(node):
|
| 312 |
+
"""Returns true if the node is an import statement."""
|
| 313 |
+
return node.type in (syms.import_name, syms.import_from)
|
| 314 |
+
|
| 315 |
+
def touch_import(package, name, node):
|
| 316 |
+
""" Works like `does_tree_import` but adds an import statement
|
| 317 |
+
if it was not imported. """
|
| 318 |
+
def is_import_stmt(node):
|
| 319 |
+
return (node.type == syms.simple_stmt and node.children and
|
| 320 |
+
is_import(node.children[0]))
|
| 321 |
+
|
| 322 |
+
root = find_root(node)
|
| 323 |
+
|
| 324 |
+
if does_tree_import(package, name, root):
|
| 325 |
+
return
|
| 326 |
+
|
| 327 |
+
# figure out where to insert the new import. First try to find
|
| 328 |
+
# the first import and then skip to the last one.
|
| 329 |
+
insert_pos = offset = 0
|
| 330 |
+
for idx, node in enumerate(root.children):
|
| 331 |
+
if not is_import_stmt(node):
|
| 332 |
+
continue
|
| 333 |
+
for offset, node2 in enumerate(root.children[idx:]):
|
| 334 |
+
if not is_import_stmt(node2):
|
| 335 |
+
break
|
| 336 |
+
insert_pos = idx + offset
|
| 337 |
+
break
|
| 338 |
+
|
| 339 |
+
# if there are no imports where we can insert, find the docstring.
|
| 340 |
+
# if that also fails, we stick to the beginning of the file
|
| 341 |
+
if insert_pos == 0:
|
| 342 |
+
for idx, node in enumerate(root.children):
|
| 343 |
+
if (node.type == syms.simple_stmt and node.children and
|
| 344 |
+
node.children[0].type == token.STRING):
|
| 345 |
+
insert_pos = idx + 1
|
| 346 |
+
break
|
| 347 |
+
|
| 348 |
+
if package is None:
|
| 349 |
+
import_ = Node(syms.import_name, [
|
| 350 |
+
Leaf(token.NAME, "import"),
|
| 351 |
+
Leaf(token.NAME, name, prefix=" ")
|
| 352 |
+
])
|
| 353 |
+
else:
|
| 354 |
+
import_ = FromImport(package, [Leaf(token.NAME, name, prefix=" ")])
|
| 355 |
+
|
| 356 |
+
children = [import_, Newline()]
|
| 357 |
+
root.insert_child(insert_pos, Node(syms.simple_stmt, children))
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
_def_syms = {syms.classdef, syms.funcdef}
|
| 361 |
+
def find_binding(name, node, package=None):
|
| 362 |
+
""" Returns the node which binds variable name, otherwise None.
|
| 363 |
+
If optional argument package is supplied, only imports will
|
| 364 |
+
be returned.
|
| 365 |
+
See test cases for examples."""
|
| 366 |
+
for child in node.children:
|
| 367 |
+
ret = None
|
| 368 |
+
if child.type == syms.for_stmt:
|
| 369 |
+
if _find(name, child.children[1]):
|
| 370 |
+
return child
|
| 371 |
+
n = find_binding(name, make_suite(child.children[-1]), package)
|
| 372 |
+
if n: ret = n
|
| 373 |
+
elif child.type in (syms.if_stmt, syms.while_stmt):
|
| 374 |
+
n = find_binding(name, make_suite(child.children[-1]), package)
|
| 375 |
+
if n: ret = n
|
| 376 |
+
elif child.type == syms.try_stmt:
|
| 377 |
+
n = find_binding(name, make_suite(child.children[2]), package)
|
| 378 |
+
if n:
|
| 379 |
+
ret = n
|
| 380 |
+
else:
|
| 381 |
+
for i, kid in enumerate(child.children[3:]):
|
| 382 |
+
if kid.type == token.COLON and kid.value == ":":
|
| 383 |
+
# i+3 is the colon, i+4 is the suite
|
| 384 |
+
n = find_binding(name, make_suite(child.children[i+4]), package)
|
| 385 |
+
if n: ret = n
|
| 386 |
+
elif child.type in _def_syms and child.children[1].value == name:
|
| 387 |
+
ret = child
|
| 388 |
+
elif _is_import_binding(child, name, package):
|
| 389 |
+
ret = child
|
| 390 |
+
elif child.type == syms.simple_stmt:
|
| 391 |
+
ret = find_binding(name, child, package)
|
| 392 |
+
elif child.type == syms.expr_stmt:
|
| 393 |
+
if _find(name, child.children[0]):
|
| 394 |
+
ret = child
|
| 395 |
+
|
| 396 |
+
if ret:
|
| 397 |
+
if not package:
|
| 398 |
+
return ret
|
| 399 |
+
if is_import(ret):
|
| 400 |
+
return ret
|
| 401 |
+
return None
|
| 402 |
+
|
| 403 |
+
_block_syms = {syms.funcdef, syms.classdef, syms.trailer}
|
| 404 |
+
def _find(name, node):
|
| 405 |
+
nodes = [node]
|
| 406 |
+
while nodes:
|
| 407 |
+
node = nodes.pop()
|
| 408 |
+
if node.type > 256 and node.type not in _block_syms:
|
| 409 |
+
nodes.extend(node.children)
|
| 410 |
+
elif node.type == token.NAME and node.value == name:
|
| 411 |
+
return node
|
| 412 |
+
return None
|
| 413 |
+
|
| 414 |
+
def _is_import_binding(node, name, package=None):
|
| 415 |
+
""" Will return node if node will import name, or node
|
| 416 |
+
will import * from package. None is returned otherwise.
|
| 417 |
+
See test cases for examples. """
|
| 418 |
+
|
| 419 |
+
if node.type == syms.import_name and not package:
|
| 420 |
+
imp = node.children[1]
|
| 421 |
+
if imp.type == syms.dotted_as_names:
|
| 422 |
+
for child in imp.children:
|
| 423 |
+
if child.type == syms.dotted_as_name:
|
| 424 |
+
if child.children[2].value == name:
|
| 425 |
+
return node
|
| 426 |
+
elif child.type == token.NAME and child.value == name:
|
| 427 |
+
return node
|
| 428 |
+
elif imp.type == syms.dotted_as_name:
|
| 429 |
+
last = imp.children[-1]
|
| 430 |
+
if last.type == token.NAME and last.value == name:
|
| 431 |
+
return node
|
| 432 |
+
elif imp.type == token.NAME and imp.value == name:
|
| 433 |
+
return node
|
| 434 |
+
elif node.type == syms.import_from:
|
| 435 |
+
# str(...) is used to make life easier here, because
|
| 436 |
+
# from a.b import parses to ['import', ['a', '.', 'b'], ...]
|
| 437 |
+
if package and str(node.children[1]).strip() != package:
|
| 438 |
+
return None
|
| 439 |
+
n = node.children[3]
|
| 440 |
+
if package and _find("as", n):
|
| 441 |
+
# See test_from_import_as for explanation
|
| 442 |
+
return None
|
| 443 |
+
elif n.type == syms.import_as_names and _find(name, n):
|
| 444 |
+
return node
|
| 445 |
+
elif n.type == syms.import_as_name:
|
| 446 |
+
child = n.children[2]
|
| 447 |
+
if child.type == token.NAME and child.value == name:
|
| 448 |
+
return node
|
| 449 |
+
elif n.type == token.NAME and n.value == name:
|
| 450 |
+
return node
|
| 451 |
+
elif package and n.type == token.STAR:
|
| 452 |
+
return node
|
| 453 |
+
return None
|
llava/lib/python3.10/lib2to3/main.py
ADDED
|
@@ -0,0 +1,273 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Main program for 2to3.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
from __future__ import with_statement, print_function
|
| 6 |
+
|
| 7 |
+
import sys
|
| 8 |
+
import os
|
| 9 |
+
import difflib
|
| 10 |
+
import logging
|
| 11 |
+
import shutil
|
| 12 |
+
import optparse
|
| 13 |
+
|
| 14 |
+
from . import refactor
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def diff_texts(a, b, filename):
|
| 18 |
+
"""Return a unified diff of two strings."""
|
| 19 |
+
a = a.splitlines()
|
| 20 |
+
b = b.splitlines()
|
| 21 |
+
return difflib.unified_diff(a, b, filename, filename,
|
| 22 |
+
"(original)", "(refactored)",
|
| 23 |
+
lineterm="")
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class StdoutRefactoringTool(refactor.MultiprocessRefactoringTool):
|
| 27 |
+
"""
|
| 28 |
+
A refactoring tool that can avoid overwriting its input files.
|
| 29 |
+
Prints output to stdout.
|
| 30 |
+
|
| 31 |
+
Output files can optionally be written to a different directory and or
|
| 32 |
+
have an extra file suffix appended to their name for use in situations
|
| 33 |
+
where you do not want to replace the input files.
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
def __init__(self, fixers, options, explicit, nobackups, show_diffs,
|
| 37 |
+
input_base_dir='', output_dir='', append_suffix=''):
|
| 38 |
+
"""
|
| 39 |
+
Args:
|
| 40 |
+
fixers: A list of fixers to import.
|
| 41 |
+
options: A dict with RefactoringTool configuration.
|
| 42 |
+
explicit: A list of fixers to run even if they are explicit.
|
| 43 |
+
nobackups: If true no backup '.bak' files will be created for those
|
| 44 |
+
files that are being refactored.
|
| 45 |
+
show_diffs: Should diffs of the refactoring be printed to stdout?
|
| 46 |
+
input_base_dir: The base directory for all input files. This class
|
| 47 |
+
will strip this path prefix off of filenames before substituting
|
| 48 |
+
it with output_dir. Only meaningful if output_dir is supplied.
|
| 49 |
+
All files processed by refactor() must start with this path.
|
| 50 |
+
output_dir: If supplied, all converted files will be written into
|
| 51 |
+
this directory tree instead of input_base_dir.
|
| 52 |
+
append_suffix: If supplied, all files output by this tool will have
|
| 53 |
+
this appended to their filename. Useful for changing .py to
|
| 54 |
+
.py3 for example by passing append_suffix='3'.
|
| 55 |
+
"""
|
| 56 |
+
self.nobackups = nobackups
|
| 57 |
+
self.show_diffs = show_diffs
|
| 58 |
+
if input_base_dir and not input_base_dir.endswith(os.sep):
|
| 59 |
+
input_base_dir += os.sep
|
| 60 |
+
self._input_base_dir = input_base_dir
|
| 61 |
+
self._output_dir = output_dir
|
| 62 |
+
self._append_suffix = append_suffix
|
| 63 |
+
super(StdoutRefactoringTool, self).__init__(fixers, options, explicit)
|
| 64 |
+
|
| 65 |
+
def log_error(self, msg, *args, **kwargs):
|
| 66 |
+
self.errors.append((msg, args, kwargs))
|
| 67 |
+
self.logger.error(msg, *args, **kwargs)
|
| 68 |
+
|
| 69 |
+
def write_file(self, new_text, filename, old_text, encoding):
|
| 70 |
+
orig_filename = filename
|
| 71 |
+
if self._output_dir:
|
| 72 |
+
if filename.startswith(self._input_base_dir):
|
| 73 |
+
filename = os.path.join(self._output_dir,
|
| 74 |
+
filename[len(self._input_base_dir):])
|
| 75 |
+
else:
|
| 76 |
+
raise ValueError('filename %s does not start with the '
|
| 77 |
+
'input_base_dir %s' % (
|
| 78 |
+
filename, self._input_base_dir))
|
| 79 |
+
if self._append_suffix:
|
| 80 |
+
filename += self._append_suffix
|
| 81 |
+
if orig_filename != filename:
|
| 82 |
+
output_dir = os.path.dirname(filename)
|
| 83 |
+
if not os.path.isdir(output_dir) and output_dir:
|
| 84 |
+
os.makedirs(output_dir)
|
| 85 |
+
self.log_message('Writing converted %s to %s.', orig_filename,
|
| 86 |
+
filename)
|
| 87 |
+
if not self.nobackups:
|
| 88 |
+
# Make backup
|
| 89 |
+
backup = filename + ".bak"
|
| 90 |
+
if os.path.lexists(backup):
|
| 91 |
+
try:
|
| 92 |
+
os.remove(backup)
|
| 93 |
+
except OSError:
|
| 94 |
+
self.log_message("Can't remove backup %s", backup)
|
| 95 |
+
try:
|
| 96 |
+
os.rename(filename, backup)
|
| 97 |
+
except OSError:
|
| 98 |
+
self.log_message("Can't rename %s to %s", filename, backup)
|
| 99 |
+
# Actually write the new file
|
| 100 |
+
write = super(StdoutRefactoringTool, self).write_file
|
| 101 |
+
write(new_text, filename, old_text, encoding)
|
| 102 |
+
if not self.nobackups:
|
| 103 |
+
shutil.copymode(backup, filename)
|
| 104 |
+
if orig_filename != filename:
|
| 105 |
+
# Preserve the file mode in the new output directory.
|
| 106 |
+
shutil.copymode(orig_filename, filename)
|
| 107 |
+
|
| 108 |
+
def print_output(self, old, new, filename, equal):
|
| 109 |
+
if equal:
|
| 110 |
+
self.log_message("No changes to %s", filename)
|
| 111 |
+
else:
|
| 112 |
+
self.log_message("Refactored %s", filename)
|
| 113 |
+
if self.show_diffs:
|
| 114 |
+
diff_lines = diff_texts(old, new, filename)
|
| 115 |
+
try:
|
| 116 |
+
if self.output_lock is not None:
|
| 117 |
+
with self.output_lock:
|
| 118 |
+
for line in diff_lines:
|
| 119 |
+
print(line)
|
| 120 |
+
sys.stdout.flush()
|
| 121 |
+
else:
|
| 122 |
+
for line in diff_lines:
|
| 123 |
+
print(line)
|
| 124 |
+
except UnicodeEncodeError:
|
| 125 |
+
warn("couldn't encode %s's diff for your terminal" %
|
| 126 |
+
(filename,))
|
| 127 |
+
return
|
| 128 |
+
|
| 129 |
+
def warn(msg):
|
| 130 |
+
print("WARNING: %s" % (msg,), file=sys.stderr)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def main(fixer_pkg, args=None):
|
| 134 |
+
"""Main program.
|
| 135 |
+
|
| 136 |
+
Args:
|
| 137 |
+
fixer_pkg: the name of a package where the fixers are located.
|
| 138 |
+
args: optional; a list of command line arguments. If omitted,
|
| 139 |
+
sys.argv[1:] is used.
|
| 140 |
+
|
| 141 |
+
Returns a suggested exit status (0, 1, 2).
|
| 142 |
+
"""
|
| 143 |
+
# Set up option parser
|
| 144 |
+
parser = optparse.OptionParser(usage="2to3 [options] file|dir ...")
|
| 145 |
+
parser.add_option("-d", "--doctests_only", action="store_true",
|
| 146 |
+
help="Fix up doctests only")
|
| 147 |
+
parser.add_option("-f", "--fix", action="append", default=[],
|
| 148 |
+
help="Each FIX specifies a transformation; default: all")
|
| 149 |
+
parser.add_option("-j", "--processes", action="store", default=1,
|
| 150 |
+
type="int", help="Run 2to3 concurrently")
|
| 151 |
+
parser.add_option("-x", "--nofix", action="append", default=[],
|
| 152 |
+
help="Prevent a transformation from being run")
|
| 153 |
+
parser.add_option("-l", "--list-fixes", action="store_true",
|
| 154 |
+
help="List available transformations")
|
| 155 |
+
parser.add_option("-p", "--print-function", action="store_true",
|
| 156 |
+
help="Modify the grammar so that print() is a function")
|
| 157 |
+
parser.add_option("-e", "--exec-function", action="store_true",
|
| 158 |
+
help="Modify the grammar so that exec() is a function")
|
| 159 |
+
parser.add_option("-v", "--verbose", action="store_true",
|
| 160 |
+
help="More verbose logging")
|
| 161 |
+
parser.add_option("--no-diffs", action="store_true",
|
| 162 |
+
help="Don't show diffs of the refactoring")
|
| 163 |
+
parser.add_option("-w", "--write", action="store_true",
|
| 164 |
+
help="Write back modified files")
|
| 165 |
+
parser.add_option("-n", "--nobackups", action="store_true", default=False,
|
| 166 |
+
help="Don't write backups for modified files")
|
| 167 |
+
parser.add_option("-o", "--output-dir", action="store", type="str",
|
| 168 |
+
default="", help="Put output files in this directory "
|
| 169 |
+
"instead of overwriting the input files. Requires -n.")
|
| 170 |
+
parser.add_option("-W", "--write-unchanged-files", action="store_true",
|
| 171 |
+
help="Also write files even if no changes were required"
|
| 172 |
+
" (useful with --output-dir); implies -w.")
|
| 173 |
+
parser.add_option("--add-suffix", action="store", type="str", default="",
|
| 174 |
+
help="Append this string to all output filenames."
|
| 175 |
+
" Requires -n if non-empty. "
|
| 176 |
+
"ex: --add-suffix='3' will generate .py3 files.")
|
| 177 |
+
|
| 178 |
+
# Parse command line arguments
|
| 179 |
+
refactor_stdin = False
|
| 180 |
+
flags = {}
|
| 181 |
+
options, args = parser.parse_args(args)
|
| 182 |
+
if options.write_unchanged_files:
|
| 183 |
+
flags["write_unchanged_files"] = True
|
| 184 |
+
if not options.write:
|
| 185 |
+
warn("--write-unchanged-files/-W implies -w.")
|
| 186 |
+
options.write = True
|
| 187 |
+
# If we allowed these, the original files would be renamed to backup names
|
| 188 |
+
# but not replaced.
|
| 189 |
+
if options.output_dir and not options.nobackups:
|
| 190 |
+
parser.error("Can't use --output-dir/-o without -n.")
|
| 191 |
+
if options.add_suffix and not options.nobackups:
|
| 192 |
+
parser.error("Can't use --add-suffix without -n.")
|
| 193 |
+
|
| 194 |
+
if not options.write and options.no_diffs:
|
| 195 |
+
warn("not writing files and not printing diffs; that's not very useful")
|
| 196 |
+
if not options.write and options.nobackups:
|
| 197 |
+
parser.error("Can't use -n without -w")
|
| 198 |
+
if options.list_fixes:
|
| 199 |
+
print("Available transformations for the -f/--fix option:")
|
| 200 |
+
for fixname in refactor.get_all_fix_names(fixer_pkg):
|
| 201 |
+
print(fixname)
|
| 202 |
+
if not args:
|
| 203 |
+
return 0
|
| 204 |
+
if not args:
|
| 205 |
+
print("At least one file or directory argument required.", file=sys.stderr)
|
| 206 |
+
print("Use --help to show usage.", file=sys.stderr)
|
| 207 |
+
return 2
|
| 208 |
+
if "-" in args:
|
| 209 |
+
refactor_stdin = True
|
| 210 |
+
if options.write:
|
| 211 |
+
print("Can't write to stdin.", file=sys.stderr)
|
| 212 |
+
return 2
|
| 213 |
+
if options.print_function:
|
| 214 |
+
flags["print_function"] = True
|
| 215 |
+
|
| 216 |
+
if options.exec_function:
|
| 217 |
+
flags["exec_function"] = True
|
| 218 |
+
|
| 219 |
+
# Set up logging handler
|
| 220 |
+
level = logging.DEBUG if options.verbose else logging.INFO
|
| 221 |
+
logging.basicConfig(format='%(name)s: %(message)s', level=level)
|
| 222 |
+
logger = logging.getLogger('lib2to3.main')
|
| 223 |
+
|
| 224 |
+
# Initialize the refactoring tool
|
| 225 |
+
avail_fixes = set(refactor.get_fixers_from_package(fixer_pkg))
|
| 226 |
+
unwanted_fixes = set(fixer_pkg + ".fix_" + fix for fix in options.nofix)
|
| 227 |
+
explicit = set()
|
| 228 |
+
if options.fix:
|
| 229 |
+
all_present = False
|
| 230 |
+
for fix in options.fix:
|
| 231 |
+
if fix == "all":
|
| 232 |
+
all_present = True
|
| 233 |
+
else:
|
| 234 |
+
explicit.add(fixer_pkg + ".fix_" + fix)
|
| 235 |
+
requested = avail_fixes.union(explicit) if all_present else explicit
|
| 236 |
+
else:
|
| 237 |
+
requested = avail_fixes.union(explicit)
|
| 238 |
+
fixer_names = requested.difference(unwanted_fixes)
|
| 239 |
+
input_base_dir = os.path.commonprefix(args)
|
| 240 |
+
if (input_base_dir and not input_base_dir.endswith(os.sep)
|
| 241 |
+
and not os.path.isdir(input_base_dir)):
|
| 242 |
+
# One or more similar names were passed, their directory is the base.
|
| 243 |
+
# os.path.commonprefix() is ignorant of path elements, this corrects
|
| 244 |
+
# for that weird API.
|
| 245 |
+
input_base_dir = os.path.dirname(input_base_dir)
|
| 246 |
+
if options.output_dir:
|
| 247 |
+
input_base_dir = input_base_dir.rstrip(os.sep)
|
| 248 |
+
logger.info('Output in %r will mirror the input directory %r layout.',
|
| 249 |
+
options.output_dir, input_base_dir)
|
| 250 |
+
rt = StdoutRefactoringTool(
|
| 251 |
+
sorted(fixer_names), flags, sorted(explicit),
|
| 252 |
+
options.nobackups, not options.no_diffs,
|
| 253 |
+
input_base_dir=input_base_dir,
|
| 254 |
+
output_dir=options.output_dir,
|
| 255 |
+
append_suffix=options.add_suffix)
|
| 256 |
+
|
| 257 |
+
# Refactor all files and directories passed as arguments
|
| 258 |
+
if not rt.errors:
|
| 259 |
+
if refactor_stdin:
|
| 260 |
+
rt.refactor_stdin()
|
| 261 |
+
else:
|
| 262 |
+
try:
|
| 263 |
+
rt.refactor(args, options.write, options.doctests_only,
|
| 264 |
+
options.processes)
|
| 265 |
+
except refactor.MultiprocessingUnsupported:
|
| 266 |
+
assert options.processes > 1
|
| 267 |
+
print("Sorry, -j isn't supported on this platform.",
|
| 268 |
+
file=sys.stderr)
|
| 269 |
+
return 1
|
| 270 |
+
rt.summarize()
|
| 271 |
+
|
| 272 |
+
# Return error status (0 if rt.errors is zero)
|
| 273 |
+
return int(bool(rt.errors))
|
llava/lib/python3.10/lib2to3/patcomp.py
ADDED
|
@@ -0,0 +1,204 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2006 Google, Inc. All Rights Reserved.
|
| 2 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 3 |
+
|
| 4 |
+
"""Pattern compiler.
|
| 5 |
+
|
| 6 |
+
The grammar is taken from PatternGrammar.txt.
|
| 7 |
+
|
| 8 |
+
The compiler compiles a pattern to a pytree.*Pattern instance.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
__author__ = "Guido van Rossum <guido@python.org>"
|
| 12 |
+
|
| 13 |
+
# Python imports
|
| 14 |
+
import io
|
| 15 |
+
|
| 16 |
+
# Fairly local imports
|
| 17 |
+
from .pgen2 import driver, literals, token, tokenize, parse, grammar
|
| 18 |
+
|
| 19 |
+
# Really local imports
|
| 20 |
+
from . import pytree
|
| 21 |
+
from . import pygram
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class PatternSyntaxError(Exception):
|
| 25 |
+
pass
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def tokenize_wrapper(input):
|
| 29 |
+
"""Tokenizes a string suppressing significant whitespace."""
|
| 30 |
+
skip = {token.NEWLINE, token.INDENT, token.DEDENT}
|
| 31 |
+
tokens = tokenize.generate_tokens(io.StringIO(input).readline)
|
| 32 |
+
for quintuple in tokens:
|
| 33 |
+
type, value, start, end, line_text = quintuple
|
| 34 |
+
if type not in skip:
|
| 35 |
+
yield quintuple
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
class PatternCompiler(object):
|
| 39 |
+
|
| 40 |
+
def __init__(self, grammar_file=None):
|
| 41 |
+
"""Initializer.
|
| 42 |
+
|
| 43 |
+
Takes an optional alternative filename for the pattern grammar.
|
| 44 |
+
"""
|
| 45 |
+
if grammar_file is None:
|
| 46 |
+
self.grammar = pygram.pattern_grammar
|
| 47 |
+
self.syms = pygram.pattern_symbols
|
| 48 |
+
else:
|
| 49 |
+
self.grammar = driver.load_grammar(grammar_file)
|
| 50 |
+
self.syms = pygram.Symbols(self.grammar)
|
| 51 |
+
self.pygrammar = pygram.python_grammar
|
| 52 |
+
self.pysyms = pygram.python_symbols
|
| 53 |
+
self.driver = driver.Driver(self.grammar, convert=pattern_convert)
|
| 54 |
+
|
| 55 |
+
def compile_pattern(self, input, debug=False, with_tree=False):
|
| 56 |
+
"""Compiles a pattern string to a nested pytree.*Pattern object."""
|
| 57 |
+
tokens = tokenize_wrapper(input)
|
| 58 |
+
try:
|
| 59 |
+
root = self.driver.parse_tokens(tokens, debug=debug)
|
| 60 |
+
except parse.ParseError as e:
|
| 61 |
+
raise PatternSyntaxError(str(e)) from None
|
| 62 |
+
if with_tree:
|
| 63 |
+
return self.compile_node(root), root
|
| 64 |
+
else:
|
| 65 |
+
return self.compile_node(root)
|
| 66 |
+
|
| 67 |
+
def compile_node(self, node):
|
| 68 |
+
"""Compiles a node, recursively.
|
| 69 |
+
|
| 70 |
+
This is one big switch on the node type.
|
| 71 |
+
"""
|
| 72 |
+
# XXX Optimize certain Wildcard-containing-Wildcard patterns
|
| 73 |
+
# that can be merged
|
| 74 |
+
if node.type == self.syms.Matcher:
|
| 75 |
+
node = node.children[0] # Avoid unneeded recursion
|
| 76 |
+
|
| 77 |
+
if node.type == self.syms.Alternatives:
|
| 78 |
+
# Skip the odd children since they are just '|' tokens
|
| 79 |
+
alts = [self.compile_node(ch) for ch in node.children[::2]]
|
| 80 |
+
if len(alts) == 1:
|
| 81 |
+
return alts[0]
|
| 82 |
+
p = pytree.WildcardPattern([[a] for a in alts], min=1, max=1)
|
| 83 |
+
return p.optimize()
|
| 84 |
+
|
| 85 |
+
if node.type == self.syms.Alternative:
|
| 86 |
+
units = [self.compile_node(ch) for ch in node.children]
|
| 87 |
+
if len(units) == 1:
|
| 88 |
+
return units[0]
|
| 89 |
+
p = pytree.WildcardPattern([units], min=1, max=1)
|
| 90 |
+
return p.optimize()
|
| 91 |
+
|
| 92 |
+
if node.type == self.syms.NegatedUnit:
|
| 93 |
+
pattern = self.compile_basic(node.children[1:])
|
| 94 |
+
p = pytree.NegatedPattern(pattern)
|
| 95 |
+
return p.optimize()
|
| 96 |
+
|
| 97 |
+
assert node.type == self.syms.Unit
|
| 98 |
+
|
| 99 |
+
name = None
|
| 100 |
+
nodes = node.children
|
| 101 |
+
if len(nodes) >= 3 and nodes[1].type == token.EQUAL:
|
| 102 |
+
name = nodes[0].value
|
| 103 |
+
nodes = nodes[2:]
|
| 104 |
+
repeat = None
|
| 105 |
+
if len(nodes) >= 2 and nodes[-1].type == self.syms.Repeater:
|
| 106 |
+
repeat = nodes[-1]
|
| 107 |
+
nodes = nodes[:-1]
|
| 108 |
+
|
| 109 |
+
# Now we've reduced it to: STRING | NAME [Details] | (...) | [...]
|
| 110 |
+
pattern = self.compile_basic(nodes, repeat)
|
| 111 |
+
|
| 112 |
+
if repeat is not None:
|
| 113 |
+
assert repeat.type == self.syms.Repeater
|
| 114 |
+
children = repeat.children
|
| 115 |
+
child = children[0]
|
| 116 |
+
if child.type == token.STAR:
|
| 117 |
+
min = 0
|
| 118 |
+
max = pytree.HUGE
|
| 119 |
+
elif child.type == token.PLUS:
|
| 120 |
+
min = 1
|
| 121 |
+
max = pytree.HUGE
|
| 122 |
+
elif child.type == token.LBRACE:
|
| 123 |
+
assert children[-1].type == token.RBRACE
|
| 124 |
+
assert len(children) in (3, 5)
|
| 125 |
+
min = max = self.get_int(children[1])
|
| 126 |
+
if len(children) == 5:
|
| 127 |
+
max = self.get_int(children[3])
|
| 128 |
+
else:
|
| 129 |
+
assert False
|
| 130 |
+
if min != 1 or max != 1:
|
| 131 |
+
pattern = pattern.optimize()
|
| 132 |
+
pattern = pytree.WildcardPattern([[pattern]], min=min, max=max)
|
| 133 |
+
|
| 134 |
+
if name is not None:
|
| 135 |
+
pattern.name = name
|
| 136 |
+
return pattern.optimize()
|
| 137 |
+
|
| 138 |
+
def compile_basic(self, nodes, repeat=None):
|
| 139 |
+
# Compile STRING | NAME [Details] | (...) | [...]
|
| 140 |
+
assert len(nodes) >= 1
|
| 141 |
+
node = nodes[0]
|
| 142 |
+
if node.type == token.STRING:
|
| 143 |
+
value = str(literals.evalString(node.value))
|
| 144 |
+
return pytree.LeafPattern(_type_of_literal(value), value)
|
| 145 |
+
elif node.type == token.NAME:
|
| 146 |
+
value = node.value
|
| 147 |
+
if value.isupper():
|
| 148 |
+
if value not in TOKEN_MAP:
|
| 149 |
+
raise PatternSyntaxError("Invalid token: %r" % value)
|
| 150 |
+
if nodes[1:]:
|
| 151 |
+
raise PatternSyntaxError("Can't have details for token")
|
| 152 |
+
return pytree.LeafPattern(TOKEN_MAP[value])
|
| 153 |
+
else:
|
| 154 |
+
if value == "any":
|
| 155 |
+
type = None
|
| 156 |
+
elif not value.startswith("_"):
|
| 157 |
+
type = getattr(self.pysyms, value, None)
|
| 158 |
+
if type is None:
|
| 159 |
+
raise PatternSyntaxError("Invalid symbol: %r" % value)
|
| 160 |
+
if nodes[1:]: # Details present
|
| 161 |
+
content = [self.compile_node(nodes[1].children[1])]
|
| 162 |
+
else:
|
| 163 |
+
content = None
|
| 164 |
+
return pytree.NodePattern(type, content)
|
| 165 |
+
elif node.value == "(":
|
| 166 |
+
return self.compile_node(nodes[1])
|
| 167 |
+
elif node.value == "[":
|
| 168 |
+
assert repeat is None
|
| 169 |
+
subpattern = self.compile_node(nodes[1])
|
| 170 |
+
return pytree.WildcardPattern([[subpattern]], min=0, max=1)
|
| 171 |
+
assert False, node
|
| 172 |
+
|
| 173 |
+
def get_int(self, node):
|
| 174 |
+
assert node.type == token.NUMBER
|
| 175 |
+
return int(node.value)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
# Map named tokens to the type value for a LeafPattern
|
| 179 |
+
TOKEN_MAP = {"NAME": token.NAME,
|
| 180 |
+
"STRING": token.STRING,
|
| 181 |
+
"NUMBER": token.NUMBER,
|
| 182 |
+
"TOKEN": None}
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def _type_of_literal(value):
|
| 186 |
+
if value[0].isalpha():
|
| 187 |
+
return token.NAME
|
| 188 |
+
elif value in grammar.opmap:
|
| 189 |
+
return grammar.opmap[value]
|
| 190 |
+
else:
|
| 191 |
+
return None
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def pattern_convert(grammar, raw_node_info):
|
| 195 |
+
"""Converts raw node information to a Node or Leaf instance."""
|
| 196 |
+
type, value, context, children = raw_node_info
|
| 197 |
+
if children or type in grammar.number2symbol:
|
| 198 |
+
return pytree.Node(type, children, context=context)
|
| 199 |
+
else:
|
| 200 |
+
return pytree.Leaf(type, value, context=context)
|
| 201 |
+
|
| 202 |
+
|
| 203 |
+
def compile_pattern(pattern):
|
| 204 |
+
return PatternCompiler().compile_pattern(pattern)
|
llava/lib/python3.10/lib2to3/pygram.py
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2006 Google, Inc. All Rights Reserved.
|
| 2 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 3 |
+
|
| 4 |
+
"""Export the Python grammar and symbols."""
|
| 5 |
+
|
| 6 |
+
# Python imports
|
| 7 |
+
import os
|
| 8 |
+
|
| 9 |
+
# Local imports
|
| 10 |
+
from .pgen2 import token
|
| 11 |
+
from .pgen2 import driver
|
| 12 |
+
from . import pytree
|
| 13 |
+
|
| 14 |
+
# The grammar file
|
| 15 |
+
_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__), "Grammar.txt")
|
| 16 |
+
_PATTERN_GRAMMAR_FILE = os.path.join(os.path.dirname(__file__),
|
| 17 |
+
"PatternGrammar.txt")
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
class Symbols(object):
|
| 21 |
+
|
| 22 |
+
def __init__(self, grammar):
|
| 23 |
+
"""Initializer.
|
| 24 |
+
|
| 25 |
+
Creates an attribute for each grammar symbol (nonterminal),
|
| 26 |
+
whose value is the symbol's type (an int >= 256).
|
| 27 |
+
"""
|
| 28 |
+
for name, symbol in grammar.symbol2number.items():
|
| 29 |
+
setattr(self, name, symbol)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
python_grammar = driver.load_packaged_grammar("lib2to3", _GRAMMAR_FILE)
|
| 33 |
+
|
| 34 |
+
python_symbols = Symbols(python_grammar)
|
| 35 |
+
|
| 36 |
+
python_grammar_no_print_statement = python_grammar.copy()
|
| 37 |
+
del python_grammar_no_print_statement.keywords["print"]
|
| 38 |
+
|
| 39 |
+
python_grammar_no_print_and_exec_statement = python_grammar_no_print_statement.copy()
|
| 40 |
+
del python_grammar_no_print_and_exec_statement.keywords["exec"]
|
| 41 |
+
|
| 42 |
+
pattern_grammar = driver.load_packaged_grammar("lib2to3", _PATTERN_GRAMMAR_FILE)
|
| 43 |
+
pattern_symbols = Symbols(pattern_grammar)
|
llava/lib/python3.10/lib2to3/pytree.py
ADDED
|
@@ -0,0 +1,853 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Copyright 2006 Google, Inc. All Rights Reserved.
|
| 2 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
Python parse tree definitions.
|
| 6 |
+
|
| 7 |
+
This is a very concrete parse tree; we need to keep every token and
|
| 8 |
+
even the comments and whitespace between tokens.
|
| 9 |
+
|
| 10 |
+
There's also a pattern matching implementation here.
|
| 11 |
+
"""
|
| 12 |
+
|
| 13 |
+
__author__ = "Guido van Rossum <guido@python.org>"
|
| 14 |
+
|
| 15 |
+
import sys
|
| 16 |
+
from io import StringIO
|
| 17 |
+
|
| 18 |
+
HUGE = 0x7FFFFFFF # maximum repeat count, default max
|
| 19 |
+
|
| 20 |
+
_type_reprs = {}
|
| 21 |
+
def type_repr(type_num):
|
| 22 |
+
global _type_reprs
|
| 23 |
+
if not _type_reprs:
|
| 24 |
+
from .pygram import python_symbols
|
| 25 |
+
# printing tokens is possible but not as useful
|
| 26 |
+
# from .pgen2 import token // token.__dict__.items():
|
| 27 |
+
for name, val in python_symbols.__dict__.items():
|
| 28 |
+
if type(val) == int: _type_reprs[val] = name
|
| 29 |
+
return _type_reprs.setdefault(type_num, type_num)
|
| 30 |
+
|
| 31 |
+
class Base(object):
|
| 32 |
+
|
| 33 |
+
"""
|
| 34 |
+
Abstract base class for Node and Leaf.
|
| 35 |
+
|
| 36 |
+
This provides some default functionality and boilerplate using the
|
| 37 |
+
template pattern.
|
| 38 |
+
|
| 39 |
+
A node may be a subnode of at most one parent.
|
| 40 |
+
"""
|
| 41 |
+
|
| 42 |
+
# Default values for instance variables
|
| 43 |
+
type = None # int: token number (< 256) or symbol number (>= 256)
|
| 44 |
+
parent = None # Parent node pointer, or None
|
| 45 |
+
children = () # Tuple of subnodes
|
| 46 |
+
was_changed = False
|
| 47 |
+
was_checked = False
|
| 48 |
+
|
| 49 |
+
def __new__(cls, *args, **kwds):
|
| 50 |
+
"""Constructor that prevents Base from being instantiated."""
|
| 51 |
+
assert cls is not Base, "Cannot instantiate Base"
|
| 52 |
+
return object.__new__(cls)
|
| 53 |
+
|
| 54 |
+
def __eq__(self, other):
|
| 55 |
+
"""
|
| 56 |
+
Compare two nodes for equality.
|
| 57 |
+
|
| 58 |
+
This calls the method _eq().
|
| 59 |
+
"""
|
| 60 |
+
if self.__class__ is not other.__class__:
|
| 61 |
+
return NotImplemented
|
| 62 |
+
return self._eq(other)
|
| 63 |
+
|
| 64 |
+
__hash__ = None # For Py3 compatibility.
|
| 65 |
+
|
| 66 |
+
def _eq(self, other):
|
| 67 |
+
"""
|
| 68 |
+
Compare two nodes for equality.
|
| 69 |
+
|
| 70 |
+
This is called by __eq__ and __ne__. It is only called if the two nodes
|
| 71 |
+
have the same type. This must be implemented by the concrete subclass.
|
| 72 |
+
Nodes should be considered equal if they have the same structure,
|
| 73 |
+
ignoring the prefix string and other context information.
|
| 74 |
+
"""
|
| 75 |
+
raise NotImplementedError
|
| 76 |
+
|
| 77 |
+
def clone(self):
|
| 78 |
+
"""
|
| 79 |
+
Return a cloned (deep) copy of self.
|
| 80 |
+
|
| 81 |
+
This must be implemented by the concrete subclass.
|
| 82 |
+
"""
|
| 83 |
+
raise NotImplementedError
|
| 84 |
+
|
| 85 |
+
def post_order(self):
|
| 86 |
+
"""
|
| 87 |
+
Return a post-order iterator for the tree.
|
| 88 |
+
|
| 89 |
+
This must be implemented by the concrete subclass.
|
| 90 |
+
"""
|
| 91 |
+
raise NotImplementedError
|
| 92 |
+
|
| 93 |
+
def pre_order(self):
|
| 94 |
+
"""
|
| 95 |
+
Return a pre-order iterator for the tree.
|
| 96 |
+
|
| 97 |
+
This must be implemented by the concrete subclass.
|
| 98 |
+
"""
|
| 99 |
+
raise NotImplementedError
|
| 100 |
+
|
| 101 |
+
def replace(self, new):
|
| 102 |
+
"""Replace this node with a new one in the parent."""
|
| 103 |
+
assert self.parent is not None, str(self)
|
| 104 |
+
assert new is not None
|
| 105 |
+
if not isinstance(new, list):
|
| 106 |
+
new = [new]
|
| 107 |
+
l_children = []
|
| 108 |
+
found = False
|
| 109 |
+
for ch in self.parent.children:
|
| 110 |
+
if ch is self:
|
| 111 |
+
assert not found, (self.parent.children, self, new)
|
| 112 |
+
if new is not None:
|
| 113 |
+
l_children.extend(new)
|
| 114 |
+
found = True
|
| 115 |
+
else:
|
| 116 |
+
l_children.append(ch)
|
| 117 |
+
assert found, (self.children, self, new)
|
| 118 |
+
self.parent.changed()
|
| 119 |
+
self.parent.children = l_children
|
| 120 |
+
for x in new:
|
| 121 |
+
x.parent = self.parent
|
| 122 |
+
self.parent = None
|
| 123 |
+
|
| 124 |
+
def get_lineno(self):
|
| 125 |
+
"""Return the line number which generated the invocant node."""
|
| 126 |
+
node = self
|
| 127 |
+
while not isinstance(node, Leaf):
|
| 128 |
+
if not node.children:
|
| 129 |
+
return
|
| 130 |
+
node = node.children[0]
|
| 131 |
+
return node.lineno
|
| 132 |
+
|
| 133 |
+
def changed(self):
|
| 134 |
+
if self.parent:
|
| 135 |
+
self.parent.changed()
|
| 136 |
+
self.was_changed = True
|
| 137 |
+
|
| 138 |
+
def remove(self):
|
| 139 |
+
"""
|
| 140 |
+
Remove the node from the tree. Returns the position of the node in its
|
| 141 |
+
parent's children before it was removed.
|
| 142 |
+
"""
|
| 143 |
+
if self.parent:
|
| 144 |
+
for i, node in enumerate(self.parent.children):
|
| 145 |
+
if node is self:
|
| 146 |
+
self.parent.changed()
|
| 147 |
+
del self.parent.children[i]
|
| 148 |
+
self.parent = None
|
| 149 |
+
return i
|
| 150 |
+
|
| 151 |
+
@property
|
| 152 |
+
def next_sibling(self):
|
| 153 |
+
"""
|
| 154 |
+
The node immediately following the invocant in their parent's children
|
| 155 |
+
list. If the invocant does not have a next sibling, it is None
|
| 156 |
+
"""
|
| 157 |
+
if self.parent is None:
|
| 158 |
+
return None
|
| 159 |
+
|
| 160 |
+
# Can't use index(); we need to test by identity
|
| 161 |
+
for i, child in enumerate(self.parent.children):
|
| 162 |
+
if child is self:
|
| 163 |
+
try:
|
| 164 |
+
return self.parent.children[i+1]
|
| 165 |
+
except IndexError:
|
| 166 |
+
return None
|
| 167 |
+
|
| 168 |
+
@property
|
| 169 |
+
def prev_sibling(self):
|
| 170 |
+
"""
|
| 171 |
+
The node immediately preceding the invocant in their parent's children
|
| 172 |
+
list. If the invocant does not have a previous sibling, it is None.
|
| 173 |
+
"""
|
| 174 |
+
if self.parent is None:
|
| 175 |
+
return None
|
| 176 |
+
|
| 177 |
+
# Can't use index(); we need to test by identity
|
| 178 |
+
for i, child in enumerate(self.parent.children):
|
| 179 |
+
if child is self:
|
| 180 |
+
if i == 0:
|
| 181 |
+
return None
|
| 182 |
+
return self.parent.children[i-1]
|
| 183 |
+
|
| 184 |
+
def leaves(self):
|
| 185 |
+
for child in self.children:
|
| 186 |
+
yield from child.leaves()
|
| 187 |
+
|
| 188 |
+
def depth(self):
|
| 189 |
+
if self.parent is None:
|
| 190 |
+
return 0
|
| 191 |
+
return 1 + self.parent.depth()
|
| 192 |
+
|
| 193 |
+
def get_suffix(self):
|
| 194 |
+
"""
|
| 195 |
+
Return the string immediately following the invocant node. This is
|
| 196 |
+
effectively equivalent to node.next_sibling.prefix
|
| 197 |
+
"""
|
| 198 |
+
next_sib = self.next_sibling
|
| 199 |
+
if next_sib is None:
|
| 200 |
+
return ""
|
| 201 |
+
return next_sib.prefix
|
| 202 |
+
|
| 203 |
+
if sys.version_info < (3, 0):
|
| 204 |
+
def __str__(self):
|
| 205 |
+
return str(self).encode("ascii")
|
| 206 |
+
|
| 207 |
+
class Node(Base):
|
| 208 |
+
|
| 209 |
+
"""Concrete implementation for interior nodes."""
|
| 210 |
+
|
| 211 |
+
def __init__(self,type, children,
|
| 212 |
+
context=None,
|
| 213 |
+
prefix=None,
|
| 214 |
+
fixers_applied=None):
|
| 215 |
+
"""
|
| 216 |
+
Initializer.
|
| 217 |
+
|
| 218 |
+
Takes a type constant (a symbol number >= 256), a sequence of
|
| 219 |
+
child nodes, and an optional context keyword argument.
|
| 220 |
+
|
| 221 |
+
As a side effect, the parent pointers of the children are updated.
|
| 222 |
+
"""
|
| 223 |
+
assert type >= 256, type
|
| 224 |
+
self.type = type
|
| 225 |
+
self.children = list(children)
|
| 226 |
+
for ch in self.children:
|
| 227 |
+
assert ch.parent is None, repr(ch)
|
| 228 |
+
ch.parent = self
|
| 229 |
+
if prefix is not None:
|
| 230 |
+
self.prefix = prefix
|
| 231 |
+
if fixers_applied:
|
| 232 |
+
self.fixers_applied = fixers_applied[:]
|
| 233 |
+
else:
|
| 234 |
+
self.fixers_applied = None
|
| 235 |
+
|
| 236 |
+
def __repr__(self):
|
| 237 |
+
"""Return a canonical string representation."""
|
| 238 |
+
return "%s(%s, %r)" % (self.__class__.__name__,
|
| 239 |
+
type_repr(self.type),
|
| 240 |
+
self.children)
|
| 241 |
+
|
| 242 |
+
def __unicode__(self):
|
| 243 |
+
"""
|
| 244 |
+
Return a pretty string representation.
|
| 245 |
+
|
| 246 |
+
This reproduces the input source exactly.
|
| 247 |
+
"""
|
| 248 |
+
return "".join(map(str, self.children))
|
| 249 |
+
|
| 250 |
+
if sys.version_info > (3, 0):
|
| 251 |
+
__str__ = __unicode__
|
| 252 |
+
|
| 253 |
+
def _eq(self, other):
|
| 254 |
+
"""Compare two nodes for equality."""
|
| 255 |
+
return (self.type, self.children) == (other.type, other.children)
|
| 256 |
+
|
| 257 |
+
def clone(self):
|
| 258 |
+
"""Return a cloned (deep) copy of self."""
|
| 259 |
+
return Node(self.type, [ch.clone() for ch in self.children],
|
| 260 |
+
fixers_applied=self.fixers_applied)
|
| 261 |
+
|
| 262 |
+
def post_order(self):
|
| 263 |
+
"""Return a post-order iterator for the tree."""
|
| 264 |
+
for child in self.children:
|
| 265 |
+
yield from child.post_order()
|
| 266 |
+
yield self
|
| 267 |
+
|
| 268 |
+
def pre_order(self):
|
| 269 |
+
"""Return a pre-order iterator for the tree."""
|
| 270 |
+
yield self
|
| 271 |
+
for child in self.children:
|
| 272 |
+
yield from child.pre_order()
|
| 273 |
+
|
| 274 |
+
@property
|
| 275 |
+
def prefix(self):
|
| 276 |
+
"""
|
| 277 |
+
The whitespace and comments preceding this node in the input.
|
| 278 |
+
"""
|
| 279 |
+
if not self.children:
|
| 280 |
+
return ""
|
| 281 |
+
return self.children[0].prefix
|
| 282 |
+
|
| 283 |
+
@prefix.setter
|
| 284 |
+
def prefix(self, prefix):
|
| 285 |
+
if self.children:
|
| 286 |
+
self.children[0].prefix = prefix
|
| 287 |
+
|
| 288 |
+
def set_child(self, i, child):
|
| 289 |
+
"""
|
| 290 |
+
Equivalent to 'node.children[i] = child'. This method also sets the
|
| 291 |
+
child's parent attribute appropriately.
|
| 292 |
+
"""
|
| 293 |
+
child.parent = self
|
| 294 |
+
self.children[i].parent = None
|
| 295 |
+
self.children[i] = child
|
| 296 |
+
self.changed()
|
| 297 |
+
|
| 298 |
+
def insert_child(self, i, child):
|
| 299 |
+
"""
|
| 300 |
+
Equivalent to 'node.children.insert(i, child)'. This method also sets
|
| 301 |
+
the child's parent attribute appropriately.
|
| 302 |
+
"""
|
| 303 |
+
child.parent = self
|
| 304 |
+
self.children.insert(i, child)
|
| 305 |
+
self.changed()
|
| 306 |
+
|
| 307 |
+
def append_child(self, child):
|
| 308 |
+
"""
|
| 309 |
+
Equivalent to 'node.children.append(child)'. This method also sets the
|
| 310 |
+
child's parent attribute appropriately.
|
| 311 |
+
"""
|
| 312 |
+
child.parent = self
|
| 313 |
+
self.children.append(child)
|
| 314 |
+
self.changed()
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
class Leaf(Base):
|
| 318 |
+
|
| 319 |
+
"""Concrete implementation for leaf nodes."""
|
| 320 |
+
|
| 321 |
+
# Default values for instance variables
|
| 322 |
+
_prefix = "" # Whitespace and comments preceding this token in the input
|
| 323 |
+
lineno = 0 # Line where this token starts in the input
|
| 324 |
+
column = 0 # Column where this token tarts in the input
|
| 325 |
+
|
| 326 |
+
def __init__(self, type, value,
|
| 327 |
+
context=None,
|
| 328 |
+
prefix=None,
|
| 329 |
+
fixers_applied=[]):
|
| 330 |
+
"""
|
| 331 |
+
Initializer.
|
| 332 |
+
|
| 333 |
+
Takes a type constant (a token number < 256), a string value, and an
|
| 334 |
+
optional context keyword argument.
|
| 335 |
+
"""
|
| 336 |
+
assert 0 <= type < 256, type
|
| 337 |
+
if context is not None:
|
| 338 |
+
self._prefix, (self.lineno, self.column) = context
|
| 339 |
+
self.type = type
|
| 340 |
+
self.value = value
|
| 341 |
+
if prefix is not None:
|
| 342 |
+
self._prefix = prefix
|
| 343 |
+
self.fixers_applied = fixers_applied[:]
|
| 344 |
+
|
| 345 |
+
def __repr__(self):
|
| 346 |
+
"""Return a canonical string representation."""
|
| 347 |
+
return "%s(%r, %r)" % (self.__class__.__name__,
|
| 348 |
+
self.type,
|
| 349 |
+
self.value)
|
| 350 |
+
|
| 351 |
+
def __unicode__(self):
|
| 352 |
+
"""
|
| 353 |
+
Return a pretty string representation.
|
| 354 |
+
|
| 355 |
+
This reproduces the input source exactly.
|
| 356 |
+
"""
|
| 357 |
+
return self.prefix + str(self.value)
|
| 358 |
+
|
| 359 |
+
if sys.version_info > (3, 0):
|
| 360 |
+
__str__ = __unicode__
|
| 361 |
+
|
| 362 |
+
def _eq(self, other):
|
| 363 |
+
"""Compare two nodes for equality."""
|
| 364 |
+
return (self.type, self.value) == (other.type, other.value)
|
| 365 |
+
|
| 366 |
+
def clone(self):
|
| 367 |
+
"""Return a cloned (deep) copy of self."""
|
| 368 |
+
return Leaf(self.type, self.value,
|
| 369 |
+
(self.prefix, (self.lineno, self.column)),
|
| 370 |
+
fixers_applied=self.fixers_applied)
|
| 371 |
+
|
| 372 |
+
def leaves(self):
|
| 373 |
+
yield self
|
| 374 |
+
|
| 375 |
+
def post_order(self):
|
| 376 |
+
"""Return a post-order iterator for the tree."""
|
| 377 |
+
yield self
|
| 378 |
+
|
| 379 |
+
def pre_order(self):
|
| 380 |
+
"""Return a pre-order iterator for the tree."""
|
| 381 |
+
yield self
|
| 382 |
+
|
| 383 |
+
@property
|
| 384 |
+
def prefix(self):
|
| 385 |
+
"""
|
| 386 |
+
The whitespace and comments preceding this token in the input.
|
| 387 |
+
"""
|
| 388 |
+
return self._prefix
|
| 389 |
+
|
| 390 |
+
@prefix.setter
|
| 391 |
+
def prefix(self, prefix):
|
| 392 |
+
self.changed()
|
| 393 |
+
self._prefix = prefix
|
| 394 |
+
|
| 395 |
+
def convert(gr, raw_node):
|
| 396 |
+
"""
|
| 397 |
+
Convert raw node information to a Node or Leaf instance.
|
| 398 |
+
|
| 399 |
+
This is passed to the parser driver which calls it whenever a reduction of a
|
| 400 |
+
grammar rule produces a new complete node, so that the tree is build
|
| 401 |
+
strictly bottom-up.
|
| 402 |
+
"""
|
| 403 |
+
type, value, context, children = raw_node
|
| 404 |
+
if children or type in gr.number2symbol:
|
| 405 |
+
# If there's exactly one child, return that child instead of
|
| 406 |
+
# creating a new node.
|
| 407 |
+
if len(children) == 1:
|
| 408 |
+
return children[0]
|
| 409 |
+
return Node(type, children, context=context)
|
| 410 |
+
else:
|
| 411 |
+
return Leaf(type, value, context=context)
|
| 412 |
+
|
| 413 |
+
|
| 414 |
+
class BasePattern(object):
|
| 415 |
+
|
| 416 |
+
"""
|
| 417 |
+
A pattern is a tree matching pattern.
|
| 418 |
+
|
| 419 |
+
It looks for a specific node type (token or symbol), and
|
| 420 |
+
optionally for a specific content.
|
| 421 |
+
|
| 422 |
+
This is an abstract base class. There are three concrete
|
| 423 |
+
subclasses:
|
| 424 |
+
|
| 425 |
+
- LeafPattern matches a single leaf node;
|
| 426 |
+
- NodePattern matches a single node (usually non-leaf);
|
| 427 |
+
- WildcardPattern matches a sequence of nodes of variable length.
|
| 428 |
+
"""
|
| 429 |
+
|
| 430 |
+
# Defaults for instance variables
|
| 431 |
+
type = None # Node type (token if < 256, symbol if >= 256)
|
| 432 |
+
content = None # Optional content matching pattern
|
| 433 |
+
name = None # Optional name used to store match in results dict
|
| 434 |
+
|
| 435 |
+
def __new__(cls, *args, **kwds):
|
| 436 |
+
"""Constructor that prevents BasePattern from being instantiated."""
|
| 437 |
+
assert cls is not BasePattern, "Cannot instantiate BasePattern"
|
| 438 |
+
return object.__new__(cls)
|
| 439 |
+
|
| 440 |
+
def __repr__(self):
|
| 441 |
+
args = [type_repr(self.type), self.content, self.name]
|
| 442 |
+
while args and args[-1] is None:
|
| 443 |
+
del args[-1]
|
| 444 |
+
return "%s(%s)" % (self.__class__.__name__, ", ".join(map(repr, args)))
|
| 445 |
+
|
| 446 |
+
def optimize(self):
|
| 447 |
+
"""
|
| 448 |
+
A subclass can define this as a hook for optimizations.
|
| 449 |
+
|
| 450 |
+
Returns either self or another node with the same effect.
|
| 451 |
+
"""
|
| 452 |
+
return self
|
| 453 |
+
|
| 454 |
+
def match(self, node, results=None):
|
| 455 |
+
"""
|
| 456 |
+
Does this pattern exactly match a node?
|
| 457 |
+
|
| 458 |
+
Returns True if it matches, False if not.
|
| 459 |
+
|
| 460 |
+
If results is not None, it must be a dict which will be
|
| 461 |
+
updated with the nodes matching named subpatterns.
|
| 462 |
+
|
| 463 |
+
Default implementation for non-wildcard patterns.
|
| 464 |
+
"""
|
| 465 |
+
if self.type is not None and node.type != self.type:
|
| 466 |
+
return False
|
| 467 |
+
if self.content is not None:
|
| 468 |
+
r = None
|
| 469 |
+
if results is not None:
|
| 470 |
+
r = {}
|
| 471 |
+
if not self._submatch(node, r):
|
| 472 |
+
return False
|
| 473 |
+
if r:
|
| 474 |
+
results.update(r)
|
| 475 |
+
if results is not None and self.name:
|
| 476 |
+
results[self.name] = node
|
| 477 |
+
return True
|
| 478 |
+
|
| 479 |
+
def match_seq(self, nodes, results=None):
|
| 480 |
+
"""
|
| 481 |
+
Does this pattern exactly match a sequence of nodes?
|
| 482 |
+
|
| 483 |
+
Default implementation for non-wildcard patterns.
|
| 484 |
+
"""
|
| 485 |
+
if len(nodes) != 1:
|
| 486 |
+
return False
|
| 487 |
+
return self.match(nodes[0], results)
|
| 488 |
+
|
| 489 |
+
def generate_matches(self, nodes):
|
| 490 |
+
"""
|
| 491 |
+
Generator yielding all matches for this pattern.
|
| 492 |
+
|
| 493 |
+
Default implementation for non-wildcard patterns.
|
| 494 |
+
"""
|
| 495 |
+
r = {}
|
| 496 |
+
if nodes and self.match(nodes[0], r):
|
| 497 |
+
yield 1, r
|
| 498 |
+
|
| 499 |
+
|
| 500 |
+
class LeafPattern(BasePattern):
|
| 501 |
+
|
| 502 |
+
def __init__(self, type=None, content=None, name=None):
|
| 503 |
+
"""
|
| 504 |
+
Initializer. Takes optional type, content, and name.
|
| 505 |
+
|
| 506 |
+
The type, if given must be a token type (< 256). If not given,
|
| 507 |
+
this matches any *leaf* node; the content may still be required.
|
| 508 |
+
|
| 509 |
+
The content, if given, must be a string.
|
| 510 |
+
|
| 511 |
+
If a name is given, the matching node is stored in the results
|
| 512 |
+
dict under that key.
|
| 513 |
+
"""
|
| 514 |
+
if type is not None:
|
| 515 |
+
assert 0 <= type < 256, type
|
| 516 |
+
if content is not None:
|
| 517 |
+
assert isinstance(content, str), repr(content)
|
| 518 |
+
self.type = type
|
| 519 |
+
self.content = content
|
| 520 |
+
self.name = name
|
| 521 |
+
|
| 522 |
+
def match(self, node, results=None):
|
| 523 |
+
"""Override match() to insist on a leaf node."""
|
| 524 |
+
if not isinstance(node, Leaf):
|
| 525 |
+
return False
|
| 526 |
+
return BasePattern.match(self, node, results)
|
| 527 |
+
|
| 528 |
+
def _submatch(self, node, results=None):
|
| 529 |
+
"""
|
| 530 |
+
Match the pattern's content to the node's children.
|
| 531 |
+
|
| 532 |
+
This assumes the node type matches and self.content is not None.
|
| 533 |
+
|
| 534 |
+
Returns True if it matches, False if not.
|
| 535 |
+
|
| 536 |
+
If results is not None, it must be a dict which will be
|
| 537 |
+
updated with the nodes matching named subpatterns.
|
| 538 |
+
|
| 539 |
+
When returning False, the results dict may still be updated.
|
| 540 |
+
"""
|
| 541 |
+
return self.content == node.value
|
| 542 |
+
|
| 543 |
+
|
| 544 |
+
class NodePattern(BasePattern):
|
| 545 |
+
|
| 546 |
+
wildcards = False
|
| 547 |
+
|
| 548 |
+
def __init__(self, type=None, content=None, name=None):
|
| 549 |
+
"""
|
| 550 |
+
Initializer. Takes optional type, content, and name.
|
| 551 |
+
|
| 552 |
+
The type, if given, must be a symbol type (>= 256). If the
|
| 553 |
+
type is None this matches *any* single node (leaf or not),
|
| 554 |
+
except if content is not None, in which it only matches
|
| 555 |
+
non-leaf nodes that also match the content pattern.
|
| 556 |
+
|
| 557 |
+
The content, if not None, must be a sequence of Patterns that
|
| 558 |
+
must match the node's children exactly. If the content is
|
| 559 |
+
given, the type must not be None.
|
| 560 |
+
|
| 561 |
+
If a name is given, the matching node is stored in the results
|
| 562 |
+
dict under that key.
|
| 563 |
+
"""
|
| 564 |
+
if type is not None:
|
| 565 |
+
assert type >= 256, type
|
| 566 |
+
if content is not None:
|
| 567 |
+
assert not isinstance(content, str), repr(content)
|
| 568 |
+
content = list(content)
|
| 569 |
+
for i, item in enumerate(content):
|
| 570 |
+
assert isinstance(item, BasePattern), (i, item)
|
| 571 |
+
if isinstance(item, WildcardPattern):
|
| 572 |
+
self.wildcards = True
|
| 573 |
+
self.type = type
|
| 574 |
+
self.content = content
|
| 575 |
+
self.name = name
|
| 576 |
+
|
| 577 |
+
def _submatch(self, node, results=None):
|
| 578 |
+
"""
|
| 579 |
+
Match the pattern's content to the node's children.
|
| 580 |
+
|
| 581 |
+
This assumes the node type matches and self.content is not None.
|
| 582 |
+
|
| 583 |
+
Returns True if it matches, False if not.
|
| 584 |
+
|
| 585 |
+
If results is not None, it must be a dict which will be
|
| 586 |
+
updated with the nodes matching named subpatterns.
|
| 587 |
+
|
| 588 |
+
When returning False, the results dict may still be updated.
|
| 589 |
+
"""
|
| 590 |
+
if self.wildcards:
|
| 591 |
+
for c, r in generate_matches(self.content, node.children):
|
| 592 |
+
if c == len(node.children):
|
| 593 |
+
if results is not None:
|
| 594 |
+
results.update(r)
|
| 595 |
+
return True
|
| 596 |
+
return False
|
| 597 |
+
if len(self.content) != len(node.children):
|
| 598 |
+
return False
|
| 599 |
+
for subpattern, child in zip(self.content, node.children):
|
| 600 |
+
if not subpattern.match(child, results):
|
| 601 |
+
return False
|
| 602 |
+
return True
|
| 603 |
+
|
| 604 |
+
|
| 605 |
+
class WildcardPattern(BasePattern):
|
| 606 |
+
|
| 607 |
+
"""
|
| 608 |
+
A wildcard pattern can match zero or more nodes.
|
| 609 |
+
|
| 610 |
+
This has all the flexibility needed to implement patterns like:
|
| 611 |
+
|
| 612 |
+
.* .+ .? .{m,n}
|
| 613 |
+
(a b c | d e | f)
|
| 614 |
+
(...)* (...)+ (...)? (...){m,n}
|
| 615 |
+
|
| 616 |
+
except it always uses non-greedy matching.
|
| 617 |
+
"""
|
| 618 |
+
|
| 619 |
+
def __init__(self, content=None, min=0, max=HUGE, name=None):
|
| 620 |
+
"""
|
| 621 |
+
Initializer.
|
| 622 |
+
|
| 623 |
+
Args:
|
| 624 |
+
content: optional sequence of subsequences of patterns;
|
| 625 |
+
if absent, matches one node;
|
| 626 |
+
if present, each subsequence is an alternative [*]
|
| 627 |
+
min: optional minimum number of times to match, default 0
|
| 628 |
+
max: optional maximum number of times to match, default HUGE
|
| 629 |
+
name: optional name assigned to this match
|
| 630 |
+
|
| 631 |
+
[*] Thus, if content is [[a, b, c], [d, e], [f, g, h]] this is
|
| 632 |
+
equivalent to (a b c | d e | f g h); if content is None,
|
| 633 |
+
this is equivalent to '.' in regular expression terms.
|
| 634 |
+
The min and max parameters work as follows:
|
| 635 |
+
min=0, max=maxint: .*
|
| 636 |
+
min=1, max=maxint: .+
|
| 637 |
+
min=0, max=1: .?
|
| 638 |
+
min=1, max=1: .
|
| 639 |
+
If content is not None, replace the dot with the parenthesized
|
| 640 |
+
list of alternatives, e.g. (a b c | d e | f g h)*
|
| 641 |
+
"""
|
| 642 |
+
assert 0 <= min <= max <= HUGE, (min, max)
|
| 643 |
+
if content is not None:
|
| 644 |
+
content = tuple(map(tuple, content)) # Protect against alterations
|
| 645 |
+
# Check sanity of alternatives
|
| 646 |
+
assert len(content), repr(content) # Can't have zero alternatives
|
| 647 |
+
for alt in content:
|
| 648 |
+
assert len(alt), repr(alt) # Can have empty alternatives
|
| 649 |
+
self.content = content
|
| 650 |
+
self.min = min
|
| 651 |
+
self.max = max
|
| 652 |
+
self.name = name
|
| 653 |
+
|
| 654 |
+
def optimize(self):
|
| 655 |
+
"""Optimize certain stacked wildcard patterns."""
|
| 656 |
+
subpattern = None
|
| 657 |
+
if (self.content is not None and
|
| 658 |
+
len(self.content) == 1 and len(self.content[0]) == 1):
|
| 659 |
+
subpattern = self.content[0][0]
|
| 660 |
+
if self.min == 1 and self.max == 1:
|
| 661 |
+
if self.content is None:
|
| 662 |
+
return NodePattern(name=self.name)
|
| 663 |
+
if subpattern is not None and self.name == subpattern.name:
|
| 664 |
+
return subpattern.optimize()
|
| 665 |
+
if (self.min <= 1 and isinstance(subpattern, WildcardPattern) and
|
| 666 |
+
subpattern.min <= 1 and self.name == subpattern.name):
|
| 667 |
+
return WildcardPattern(subpattern.content,
|
| 668 |
+
self.min*subpattern.min,
|
| 669 |
+
self.max*subpattern.max,
|
| 670 |
+
subpattern.name)
|
| 671 |
+
return self
|
| 672 |
+
|
| 673 |
+
def match(self, node, results=None):
|
| 674 |
+
"""Does this pattern exactly match a node?"""
|
| 675 |
+
return self.match_seq([node], results)
|
| 676 |
+
|
| 677 |
+
def match_seq(self, nodes, results=None):
|
| 678 |
+
"""Does this pattern exactly match a sequence of nodes?"""
|
| 679 |
+
for c, r in self.generate_matches(nodes):
|
| 680 |
+
if c == len(nodes):
|
| 681 |
+
if results is not None:
|
| 682 |
+
results.update(r)
|
| 683 |
+
if self.name:
|
| 684 |
+
results[self.name] = list(nodes)
|
| 685 |
+
return True
|
| 686 |
+
return False
|
| 687 |
+
|
| 688 |
+
def generate_matches(self, nodes):
|
| 689 |
+
"""
|
| 690 |
+
Generator yielding matches for a sequence of nodes.
|
| 691 |
+
|
| 692 |
+
Args:
|
| 693 |
+
nodes: sequence of nodes
|
| 694 |
+
|
| 695 |
+
Yields:
|
| 696 |
+
(count, results) tuples where:
|
| 697 |
+
count: the match comprises nodes[:count];
|
| 698 |
+
results: dict containing named submatches.
|
| 699 |
+
"""
|
| 700 |
+
if self.content is None:
|
| 701 |
+
# Shortcut for special case (see __init__.__doc__)
|
| 702 |
+
for count in range(self.min, 1 + min(len(nodes), self.max)):
|
| 703 |
+
r = {}
|
| 704 |
+
if self.name:
|
| 705 |
+
r[self.name] = nodes[:count]
|
| 706 |
+
yield count, r
|
| 707 |
+
elif self.name == "bare_name":
|
| 708 |
+
yield self._bare_name_matches(nodes)
|
| 709 |
+
else:
|
| 710 |
+
# The reason for this is that hitting the recursion limit usually
|
| 711 |
+
# results in some ugly messages about how RuntimeErrors are being
|
| 712 |
+
# ignored. We only have to do this on CPython, though, because other
|
| 713 |
+
# implementations don't have this nasty bug in the first place.
|
| 714 |
+
if hasattr(sys, "getrefcount"):
|
| 715 |
+
save_stderr = sys.stderr
|
| 716 |
+
sys.stderr = StringIO()
|
| 717 |
+
try:
|
| 718 |
+
for count, r in self._recursive_matches(nodes, 0):
|
| 719 |
+
if self.name:
|
| 720 |
+
r[self.name] = nodes[:count]
|
| 721 |
+
yield count, r
|
| 722 |
+
except RuntimeError:
|
| 723 |
+
# Fall back to the iterative pattern matching scheme if the
|
| 724 |
+
# recursive scheme hits the recursion limit (RecursionError).
|
| 725 |
+
for count, r in self._iterative_matches(nodes):
|
| 726 |
+
if self.name:
|
| 727 |
+
r[self.name] = nodes[:count]
|
| 728 |
+
yield count, r
|
| 729 |
+
finally:
|
| 730 |
+
if hasattr(sys, "getrefcount"):
|
| 731 |
+
sys.stderr = save_stderr
|
| 732 |
+
|
| 733 |
+
def _iterative_matches(self, nodes):
|
| 734 |
+
"""Helper to iteratively yield the matches."""
|
| 735 |
+
nodelen = len(nodes)
|
| 736 |
+
if 0 >= self.min:
|
| 737 |
+
yield 0, {}
|
| 738 |
+
|
| 739 |
+
results = []
|
| 740 |
+
# generate matches that use just one alt from self.content
|
| 741 |
+
for alt in self.content:
|
| 742 |
+
for c, r in generate_matches(alt, nodes):
|
| 743 |
+
yield c, r
|
| 744 |
+
results.append((c, r))
|
| 745 |
+
|
| 746 |
+
# for each match, iterate down the nodes
|
| 747 |
+
while results:
|
| 748 |
+
new_results = []
|
| 749 |
+
for c0, r0 in results:
|
| 750 |
+
# stop if the entire set of nodes has been matched
|
| 751 |
+
if c0 < nodelen and c0 <= self.max:
|
| 752 |
+
for alt in self.content:
|
| 753 |
+
for c1, r1 in generate_matches(alt, nodes[c0:]):
|
| 754 |
+
if c1 > 0:
|
| 755 |
+
r = {}
|
| 756 |
+
r.update(r0)
|
| 757 |
+
r.update(r1)
|
| 758 |
+
yield c0 + c1, r
|
| 759 |
+
new_results.append((c0 + c1, r))
|
| 760 |
+
results = new_results
|
| 761 |
+
|
| 762 |
+
def _bare_name_matches(self, nodes):
|
| 763 |
+
"""Special optimized matcher for bare_name."""
|
| 764 |
+
count = 0
|
| 765 |
+
r = {}
|
| 766 |
+
done = False
|
| 767 |
+
max = len(nodes)
|
| 768 |
+
while not done and count < max:
|
| 769 |
+
done = True
|
| 770 |
+
for leaf in self.content:
|
| 771 |
+
if leaf[0].match(nodes[count], r):
|
| 772 |
+
count += 1
|
| 773 |
+
done = False
|
| 774 |
+
break
|
| 775 |
+
r[self.name] = nodes[:count]
|
| 776 |
+
return count, r
|
| 777 |
+
|
| 778 |
+
def _recursive_matches(self, nodes, count):
|
| 779 |
+
"""Helper to recursively yield the matches."""
|
| 780 |
+
assert self.content is not None
|
| 781 |
+
if count >= self.min:
|
| 782 |
+
yield 0, {}
|
| 783 |
+
if count < self.max:
|
| 784 |
+
for alt in self.content:
|
| 785 |
+
for c0, r0 in generate_matches(alt, nodes):
|
| 786 |
+
for c1, r1 in self._recursive_matches(nodes[c0:], count+1):
|
| 787 |
+
r = {}
|
| 788 |
+
r.update(r0)
|
| 789 |
+
r.update(r1)
|
| 790 |
+
yield c0 + c1, r
|
| 791 |
+
|
| 792 |
+
|
| 793 |
+
class NegatedPattern(BasePattern):
|
| 794 |
+
|
| 795 |
+
def __init__(self, content=None):
|
| 796 |
+
"""
|
| 797 |
+
Initializer.
|
| 798 |
+
|
| 799 |
+
The argument is either a pattern or None. If it is None, this
|
| 800 |
+
only matches an empty sequence (effectively '$' in regex
|
| 801 |
+
lingo). If it is not None, this matches whenever the argument
|
| 802 |
+
pattern doesn't have any matches.
|
| 803 |
+
"""
|
| 804 |
+
if content is not None:
|
| 805 |
+
assert isinstance(content, BasePattern), repr(content)
|
| 806 |
+
self.content = content
|
| 807 |
+
|
| 808 |
+
def match(self, node):
|
| 809 |
+
# We never match a node in its entirety
|
| 810 |
+
return False
|
| 811 |
+
|
| 812 |
+
def match_seq(self, nodes):
|
| 813 |
+
# We only match an empty sequence of nodes in its entirety
|
| 814 |
+
return len(nodes) == 0
|
| 815 |
+
|
| 816 |
+
def generate_matches(self, nodes):
|
| 817 |
+
if self.content is None:
|
| 818 |
+
# Return a match if there is an empty sequence
|
| 819 |
+
if len(nodes) == 0:
|
| 820 |
+
yield 0, {}
|
| 821 |
+
else:
|
| 822 |
+
# Return a match if the argument pattern has no matches
|
| 823 |
+
for c, r in self.content.generate_matches(nodes):
|
| 824 |
+
return
|
| 825 |
+
yield 0, {}
|
| 826 |
+
|
| 827 |
+
|
| 828 |
+
def generate_matches(patterns, nodes):
|
| 829 |
+
"""
|
| 830 |
+
Generator yielding matches for a sequence of patterns and nodes.
|
| 831 |
+
|
| 832 |
+
Args:
|
| 833 |
+
patterns: a sequence of patterns
|
| 834 |
+
nodes: a sequence of nodes
|
| 835 |
+
|
| 836 |
+
Yields:
|
| 837 |
+
(count, results) tuples where:
|
| 838 |
+
count: the entire sequence of patterns matches nodes[:count];
|
| 839 |
+
results: dict containing named submatches.
|
| 840 |
+
"""
|
| 841 |
+
if not patterns:
|
| 842 |
+
yield 0, {}
|
| 843 |
+
else:
|
| 844 |
+
p, rest = patterns[0], patterns[1:]
|
| 845 |
+
for c0, r0 in p.generate_matches(nodes):
|
| 846 |
+
if not rest:
|
| 847 |
+
yield c0, r0
|
| 848 |
+
else:
|
| 849 |
+
for c1, r1 in generate_matches(rest, nodes[c0:]):
|
| 850 |
+
r = {}
|
| 851 |
+
r.update(r0)
|
| 852 |
+
r.update(r1)
|
| 853 |
+
yield c0 + c1, r
|
llava/lib/python3.10/multiprocessing/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (664 Bytes). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/connection.cpython-310.pyc
ADDED
|
Binary file (25.6 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/context.cpython-310.pyc
ADDED
|
Binary file (13.1 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/forkserver.cpython-310.pyc
ADDED
|
Binary file (8.42 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/heap.cpython-310.pyc
ADDED
|
Binary file (7.92 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/managers.cpython-310.pyc
ADDED
|
Binary file (40.8 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/pool.cpython-310.pyc
ADDED
|
Binary file (25.5 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/popen_fork.cpython-310.pyc
ADDED
|
Binary file (2.52 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/popen_forkserver.cpython-310.pyc
ADDED
|
Binary file (2.46 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/popen_spawn_posix.cpython-310.pyc
ADDED
|
Binary file (2.34 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/popen_spawn_win32.cpython-310.pyc
ADDED
|
Binary file (3.73 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/process.cpython-310.pyc
ADDED
|
Binary file (11.5 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/queues.cpython-310.pyc
ADDED
|
Binary file (10.5 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/reduction.cpython-310.pyc
ADDED
|
Binary file (8.29 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/resource_sharer.cpython-310.pyc
ADDED
|
Binary file (5.56 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/resource_tracker.cpython-310.pyc
ADDED
|
Binary file (5.74 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/shared_memory.cpython-310.pyc
ADDED
|
Binary file (14.7 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/sharedctypes.cpython-310.pyc
ADDED
|
Binary file (7.1 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/spawn.cpython-310.pyc
ADDED
|
Binary file (6.99 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/synchronize.cpython-310.pyc
ADDED
|
Binary file (11.3 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/__pycache__/util.cpython-310.pyc
ADDED
|
Binary file (11.5 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/dummy/__init__.py
ADDED
|
@@ -0,0 +1,126 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Support for the API of the multiprocessing package using threads
|
| 3 |
+
#
|
| 4 |
+
# multiprocessing/dummy/__init__.py
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2006-2008, R Oudkerk
|
| 7 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 8 |
+
#
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'Process', 'current_process', 'active_children', 'freeze_support',
|
| 12 |
+
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
|
| 13 |
+
'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
|
| 14 |
+
]
|
| 15 |
+
|
| 16 |
+
#
|
| 17 |
+
# Imports
|
| 18 |
+
#
|
| 19 |
+
|
| 20 |
+
import threading
|
| 21 |
+
import sys
|
| 22 |
+
import weakref
|
| 23 |
+
import array
|
| 24 |
+
|
| 25 |
+
from .connection import Pipe
|
| 26 |
+
from threading import Lock, RLock, Semaphore, BoundedSemaphore
|
| 27 |
+
from threading import Event, Condition, Barrier
|
| 28 |
+
from queue import Queue
|
| 29 |
+
|
| 30 |
+
#
|
| 31 |
+
#
|
| 32 |
+
#
|
| 33 |
+
|
| 34 |
+
class DummyProcess(threading.Thread):
|
| 35 |
+
|
| 36 |
+
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
|
| 37 |
+
threading.Thread.__init__(self, group, target, name, args, kwargs)
|
| 38 |
+
self._pid = None
|
| 39 |
+
self._children = weakref.WeakKeyDictionary()
|
| 40 |
+
self._start_called = False
|
| 41 |
+
self._parent = current_process()
|
| 42 |
+
|
| 43 |
+
def start(self):
|
| 44 |
+
if self._parent is not current_process():
|
| 45 |
+
raise RuntimeError(
|
| 46 |
+
"Parent is {0!r} but current_process is {1!r}".format(
|
| 47 |
+
self._parent, current_process()))
|
| 48 |
+
self._start_called = True
|
| 49 |
+
if hasattr(self._parent, '_children'):
|
| 50 |
+
self._parent._children[self] = None
|
| 51 |
+
threading.Thread.start(self)
|
| 52 |
+
|
| 53 |
+
@property
|
| 54 |
+
def exitcode(self):
|
| 55 |
+
if self._start_called and not self.is_alive():
|
| 56 |
+
return 0
|
| 57 |
+
else:
|
| 58 |
+
return None
|
| 59 |
+
|
| 60 |
+
#
|
| 61 |
+
#
|
| 62 |
+
#
|
| 63 |
+
|
| 64 |
+
Process = DummyProcess
|
| 65 |
+
current_process = threading.current_thread
|
| 66 |
+
current_process()._children = weakref.WeakKeyDictionary()
|
| 67 |
+
|
| 68 |
+
def active_children():
|
| 69 |
+
children = current_process()._children
|
| 70 |
+
for p in list(children):
|
| 71 |
+
if not p.is_alive():
|
| 72 |
+
children.pop(p, None)
|
| 73 |
+
return list(children)
|
| 74 |
+
|
| 75 |
+
def freeze_support():
|
| 76 |
+
pass
|
| 77 |
+
|
| 78 |
+
#
|
| 79 |
+
#
|
| 80 |
+
#
|
| 81 |
+
|
| 82 |
+
class Namespace(object):
|
| 83 |
+
def __init__(self, /, **kwds):
|
| 84 |
+
self.__dict__.update(kwds)
|
| 85 |
+
def __repr__(self):
|
| 86 |
+
items = list(self.__dict__.items())
|
| 87 |
+
temp = []
|
| 88 |
+
for name, value in items:
|
| 89 |
+
if not name.startswith('_'):
|
| 90 |
+
temp.append('%s=%r' % (name, value))
|
| 91 |
+
temp.sort()
|
| 92 |
+
return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))
|
| 93 |
+
|
| 94 |
+
dict = dict
|
| 95 |
+
list = list
|
| 96 |
+
|
| 97 |
+
def Array(typecode, sequence, lock=True):
|
| 98 |
+
return array.array(typecode, sequence)
|
| 99 |
+
|
| 100 |
+
class Value(object):
|
| 101 |
+
def __init__(self, typecode, value, lock=True):
|
| 102 |
+
self._typecode = typecode
|
| 103 |
+
self._value = value
|
| 104 |
+
|
| 105 |
+
@property
|
| 106 |
+
def value(self):
|
| 107 |
+
return self._value
|
| 108 |
+
|
| 109 |
+
@value.setter
|
| 110 |
+
def value(self, value):
|
| 111 |
+
self._value = value
|
| 112 |
+
|
| 113 |
+
def __repr__(self):
|
| 114 |
+
return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value)
|
| 115 |
+
|
| 116 |
+
def Manager():
|
| 117 |
+
return sys.modules[__name__]
|
| 118 |
+
|
| 119 |
+
def shutdown():
|
| 120 |
+
pass
|
| 121 |
+
|
| 122 |
+
def Pool(processes=None, initializer=None, initargs=()):
|
| 123 |
+
from ..pool import ThreadPool
|
| 124 |
+
return ThreadPool(processes, initializer, initargs)
|
| 125 |
+
|
| 126 |
+
JoinableQueue = Queue
|
llava/lib/python3.10/multiprocessing/dummy/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (4.12 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/dummy/__pycache__/connection.cpython-310.pyc
ADDED
|
Binary file (2.82 kB). View file
|
|
|
llava/lib/python3.10/multiprocessing/dummy/connection.py
ADDED
|
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
|
| 3 |
+
#
|
| 4 |
+
# multiprocessing/dummy/connection.py
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2006-2008, R Oudkerk
|
| 7 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 8 |
+
#
|
| 9 |
+
|
| 10 |
+
__all__ = [ 'Client', 'Listener', 'Pipe' ]
|
| 11 |
+
|
| 12 |
+
from queue import Queue
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
families = [None]
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Listener(object):
|
| 19 |
+
|
| 20 |
+
def __init__(self, address=None, family=None, backlog=1):
|
| 21 |
+
self._backlog_queue = Queue(backlog)
|
| 22 |
+
|
| 23 |
+
def accept(self):
|
| 24 |
+
return Connection(*self._backlog_queue.get())
|
| 25 |
+
|
| 26 |
+
def close(self):
|
| 27 |
+
self._backlog_queue = None
|
| 28 |
+
|
| 29 |
+
@property
|
| 30 |
+
def address(self):
|
| 31 |
+
return self._backlog_queue
|
| 32 |
+
|
| 33 |
+
def __enter__(self):
|
| 34 |
+
return self
|
| 35 |
+
|
| 36 |
+
def __exit__(self, exc_type, exc_value, exc_tb):
|
| 37 |
+
self.close()
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def Client(address):
|
| 41 |
+
_in, _out = Queue(), Queue()
|
| 42 |
+
address.put((_out, _in))
|
| 43 |
+
return Connection(_in, _out)
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def Pipe(duplex=True):
|
| 47 |
+
a, b = Queue(), Queue()
|
| 48 |
+
return Connection(a, b), Connection(b, a)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
class Connection(object):
|
| 52 |
+
|
| 53 |
+
def __init__(self, _in, _out):
|
| 54 |
+
self._out = _out
|
| 55 |
+
self._in = _in
|
| 56 |
+
self.send = self.send_bytes = _out.put
|
| 57 |
+
self.recv = self.recv_bytes = _in.get
|
| 58 |
+
|
| 59 |
+
def poll(self, timeout=0.0):
|
| 60 |
+
if self._in.qsize() > 0:
|
| 61 |
+
return True
|
| 62 |
+
if timeout <= 0.0:
|
| 63 |
+
return False
|
| 64 |
+
with self._in.not_empty:
|
| 65 |
+
self._in.not_empty.wait(timeout)
|
| 66 |
+
return self._in.qsize() > 0
|
| 67 |
+
|
| 68 |
+
def close(self):
|
| 69 |
+
pass
|
| 70 |
+
|
| 71 |
+
def __enter__(self):
|
| 72 |
+
return self
|
| 73 |
+
|
| 74 |
+
def __exit__(self, exc_type, exc_value, exc_tb):
|
| 75 |
+
self.close()
|
llava/lib/python3.10/multiprocessing/heap.py
ADDED
|
@@ -0,0 +1,337 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Module which supports allocation of memory from an mmap
|
| 3 |
+
#
|
| 4 |
+
# multiprocessing/heap.py
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2006-2008, R Oudkerk
|
| 7 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 8 |
+
#
|
| 9 |
+
|
| 10 |
+
import bisect
|
| 11 |
+
from collections import defaultdict
|
| 12 |
+
import mmap
|
| 13 |
+
import os
|
| 14 |
+
import sys
|
| 15 |
+
import tempfile
|
| 16 |
+
import threading
|
| 17 |
+
|
| 18 |
+
from .context import reduction, assert_spawning
|
| 19 |
+
from . import util
|
| 20 |
+
|
| 21 |
+
__all__ = ['BufferWrapper']
|
| 22 |
+
|
| 23 |
+
#
|
| 24 |
+
# Inheritable class which wraps an mmap, and from which blocks can be allocated
|
| 25 |
+
#
|
| 26 |
+
|
| 27 |
+
if sys.platform == 'win32':
|
| 28 |
+
|
| 29 |
+
import _winapi
|
| 30 |
+
|
| 31 |
+
class Arena(object):
|
| 32 |
+
"""
|
| 33 |
+
A shared memory area backed by anonymous memory (Windows).
|
| 34 |
+
"""
|
| 35 |
+
|
| 36 |
+
_rand = tempfile._RandomNameSequence()
|
| 37 |
+
|
| 38 |
+
def __init__(self, size):
|
| 39 |
+
self.size = size
|
| 40 |
+
for i in range(100):
|
| 41 |
+
name = 'pym-%d-%s' % (os.getpid(), next(self._rand))
|
| 42 |
+
buf = mmap.mmap(-1, size, tagname=name)
|
| 43 |
+
if _winapi.GetLastError() == 0:
|
| 44 |
+
break
|
| 45 |
+
# We have reopened a preexisting mmap.
|
| 46 |
+
buf.close()
|
| 47 |
+
else:
|
| 48 |
+
raise FileExistsError('Cannot find name for new mmap')
|
| 49 |
+
self.name = name
|
| 50 |
+
self.buffer = buf
|
| 51 |
+
self._state = (self.size, self.name)
|
| 52 |
+
|
| 53 |
+
def __getstate__(self):
|
| 54 |
+
assert_spawning(self)
|
| 55 |
+
return self._state
|
| 56 |
+
|
| 57 |
+
def __setstate__(self, state):
|
| 58 |
+
self.size, self.name = self._state = state
|
| 59 |
+
# Reopen existing mmap
|
| 60 |
+
self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
|
| 61 |
+
# XXX Temporarily preventing buildbot failures while determining
|
| 62 |
+
# XXX the correct long-term fix. See issue 23060
|
| 63 |
+
#assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS
|
| 64 |
+
|
| 65 |
+
else:
|
| 66 |
+
|
| 67 |
+
class Arena(object):
|
| 68 |
+
"""
|
| 69 |
+
A shared memory area backed by a temporary file (POSIX).
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
if sys.platform == 'linux':
|
| 73 |
+
_dir_candidates = ['/dev/shm']
|
| 74 |
+
else:
|
| 75 |
+
_dir_candidates = []
|
| 76 |
+
|
| 77 |
+
def __init__(self, size, fd=-1):
|
| 78 |
+
self.size = size
|
| 79 |
+
self.fd = fd
|
| 80 |
+
if fd == -1:
|
| 81 |
+
# Arena is created anew (if fd != -1, it means we're coming
|
| 82 |
+
# from rebuild_arena() below)
|
| 83 |
+
self.fd, name = tempfile.mkstemp(
|
| 84 |
+
prefix='pym-%d-'%os.getpid(),
|
| 85 |
+
dir=self._choose_dir(size))
|
| 86 |
+
os.unlink(name)
|
| 87 |
+
util.Finalize(self, os.close, (self.fd,))
|
| 88 |
+
os.ftruncate(self.fd, size)
|
| 89 |
+
self.buffer = mmap.mmap(self.fd, self.size)
|
| 90 |
+
|
| 91 |
+
def _choose_dir(self, size):
|
| 92 |
+
# Choose a non-storage backed directory if possible,
|
| 93 |
+
# to improve performance
|
| 94 |
+
for d in self._dir_candidates:
|
| 95 |
+
st = os.statvfs(d)
|
| 96 |
+
if st.f_bavail * st.f_frsize >= size: # enough free space?
|
| 97 |
+
return d
|
| 98 |
+
return util.get_temp_dir()
|
| 99 |
+
|
| 100 |
+
def reduce_arena(a):
|
| 101 |
+
if a.fd == -1:
|
| 102 |
+
raise ValueError('Arena is unpicklable because '
|
| 103 |
+
'forking was enabled when it was created')
|
| 104 |
+
return rebuild_arena, (a.size, reduction.DupFd(a.fd))
|
| 105 |
+
|
| 106 |
+
def rebuild_arena(size, dupfd):
|
| 107 |
+
return Arena(size, dupfd.detach())
|
| 108 |
+
|
| 109 |
+
reduction.register(Arena, reduce_arena)
|
| 110 |
+
|
| 111 |
+
#
|
| 112 |
+
# Class allowing allocation of chunks of memory from arenas
|
| 113 |
+
#
|
| 114 |
+
|
| 115 |
+
class Heap(object):
|
| 116 |
+
|
| 117 |
+
# Minimum malloc() alignment
|
| 118 |
+
_alignment = 8
|
| 119 |
+
|
| 120 |
+
_DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2 # 4 MB
|
| 121 |
+
_DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2
|
| 122 |
+
|
| 123 |
+
def __init__(self, size=mmap.PAGESIZE):
|
| 124 |
+
self._lastpid = os.getpid()
|
| 125 |
+
self._lock = threading.Lock()
|
| 126 |
+
# Current arena allocation size
|
| 127 |
+
self._size = size
|
| 128 |
+
# A sorted list of available block sizes in arenas
|
| 129 |
+
self._lengths = []
|
| 130 |
+
|
| 131 |
+
# Free block management:
|
| 132 |
+
# - map each block size to a list of `(Arena, start, stop)` blocks
|
| 133 |
+
self._len_to_seq = {}
|
| 134 |
+
# - map `(Arena, start)` tuple to the `(Arena, start, stop)` block
|
| 135 |
+
# starting at that offset
|
| 136 |
+
self._start_to_block = {}
|
| 137 |
+
# - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block
|
| 138 |
+
# ending at that offset
|
| 139 |
+
self._stop_to_block = {}
|
| 140 |
+
|
| 141 |
+
# Map arenas to their `(Arena, start, stop)` blocks in use
|
| 142 |
+
self._allocated_blocks = defaultdict(set)
|
| 143 |
+
self._arenas = []
|
| 144 |
+
|
| 145 |
+
# List of pending blocks to free - see comment in free() below
|
| 146 |
+
self._pending_free_blocks = []
|
| 147 |
+
|
| 148 |
+
# Statistics
|
| 149 |
+
self._n_mallocs = 0
|
| 150 |
+
self._n_frees = 0
|
| 151 |
+
|
| 152 |
+
@staticmethod
|
| 153 |
+
def _roundup(n, alignment):
|
| 154 |
+
# alignment must be a power of 2
|
| 155 |
+
mask = alignment - 1
|
| 156 |
+
return (n + mask) & ~mask
|
| 157 |
+
|
| 158 |
+
def _new_arena(self, size):
|
| 159 |
+
# Create a new arena with at least the given *size*
|
| 160 |
+
length = self._roundup(max(self._size, size), mmap.PAGESIZE)
|
| 161 |
+
# We carve larger and larger arenas, for efficiency, until we
|
| 162 |
+
# reach a large-ish size (roughly L3 cache-sized)
|
| 163 |
+
if self._size < self._DOUBLE_ARENA_SIZE_UNTIL:
|
| 164 |
+
self._size *= 2
|
| 165 |
+
util.info('allocating a new mmap of length %d', length)
|
| 166 |
+
arena = Arena(length)
|
| 167 |
+
self._arenas.append(arena)
|
| 168 |
+
return (arena, 0, length)
|
| 169 |
+
|
| 170 |
+
def _discard_arena(self, arena):
|
| 171 |
+
# Possibly delete the given (unused) arena
|
| 172 |
+
length = arena.size
|
| 173 |
+
# Reusing an existing arena is faster than creating a new one, so
|
| 174 |
+
# we only reclaim space if it's large enough.
|
| 175 |
+
if length < self._DISCARD_FREE_SPACE_LARGER_THAN:
|
| 176 |
+
return
|
| 177 |
+
blocks = self._allocated_blocks.pop(arena)
|
| 178 |
+
assert not blocks
|
| 179 |
+
del self._start_to_block[(arena, 0)]
|
| 180 |
+
del self._stop_to_block[(arena, length)]
|
| 181 |
+
self._arenas.remove(arena)
|
| 182 |
+
seq = self._len_to_seq[length]
|
| 183 |
+
seq.remove((arena, 0, length))
|
| 184 |
+
if not seq:
|
| 185 |
+
del self._len_to_seq[length]
|
| 186 |
+
self._lengths.remove(length)
|
| 187 |
+
|
| 188 |
+
def _malloc(self, size):
|
| 189 |
+
# returns a large enough block -- it might be much larger
|
| 190 |
+
i = bisect.bisect_left(self._lengths, size)
|
| 191 |
+
if i == len(self._lengths):
|
| 192 |
+
return self._new_arena(size)
|
| 193 |
+
else:
|
| 194 |
+
length = self._lengths[i]
|
| 195 |
+
seq = self._len_to_seq[length]
|
| 196 |
+
block = seq.pop()
|
| 197 |
+
if not seq:
|
| 198 |
+
del self._len_to_seq[length], self._lengths[i]
|
| 199 |
+
|
| 200 |
+
(arena, start, stop) = block
|
| 201 |
+
del self._start_to_block[(arena, start)]
|
| 202 |
+
del self._stop_to_block[(arena, stop)]
|
| 203 |
+
return block
|
| 204 |
+
|
| 205 |
+
def _add_free_block(self, block):
|
| 206 |
+
# make block available and try to merge with its neighbours in the arena
|
| 207 |
+
(arena, start, stop) = block
|
| 208 |
+
|
| 209 |
+
try:
|
| 210 |
+
prev_block = self._stop_to_block[(arena, start)]
|
| 211 |
+
except KeyError:
|
| 212 |
+
pass
|
| 213 |
+
else:
|
| 214 |
+
start, _ = self._absorb(prev_block)
|
| 215 |
+
|
| 216 |
+
try:
|
| 217 |
+
next_block = self._start_to_block[(arena, stop)]
|
| 218 |
+
except KeyError:
|
| 219 |
+
pass
|
| 220 |
+
else:
|
| 221 |
+
_, stop = self._absorb(next_block)
|
| 222 |
+
|
| 223 |
+
block = (arena, start, stop)
|
| 224 |
+
length = stop - start
|
| 225 |
+
|
| 226 |
+
try:
|
| 227 |
+
self._len_to_seq[length].append(block)
|
| 228 |
+
except KeyError:
|
| 229 |
+
self._len_to_seq[length] = [block]
|
| 230 |
+
bisect.insort(self._lengths, length)
|
| 231 |
+
|
| 232 |
+
self._start_to_block[(arena, start)] = block
|
| 233 |
+
self._stop_to_block[(arena, stop)] = block
|
| 234 |
+
|
| 235 |
+
def _absorb(self, block):
|
| 236 |
+
# deregister this block so it can be merged with a neighbour
|
| 237 |
+
(arena, start, stop) = block
|
| 238 |
+
del self._start_to_block[(arena, start)]
|
| 239 |
+
del self._stop_to_block[(arena, stop)]
|
| 240 |
+
|
| 241 |
+
length = stop - start
|
| 242 |
+
seq = self._len_to_seq[length]
|
| 243 |
+
seq.remove(block)
|
| 244 |
+
if not seq:
|
| 245 |
+
del self._len_to_seq[length]
|
| 246 |
+
self._lengths.remove(length)
|
| 247 |
+
|
| 248 |
+
return start, stop
|
| 249 |
+
|
| 250 |
+
def _remove_allocated_block(self, block):
|
| 251 |
+
arena, start, stop = block
|
| 252 |
+
blocks = self._allocated_blocks[arena]
|
| 253 |
+
blocks.remove((start, stop))
|
| 254 |
+
if not blocks:
|
| 255 |
+
# Arena is entirely free, discard it from this process
|
| 256 |
+
self._discard_arena(arena)
|
| 257 |
+
|
| 258 |
+
def _free_pending_blocks(self):
|
| 259 |
+
# Free all the blocks in the pending list - called with the lock held.
|
| 260 |
+
while True:
|
| 261 |
+
try:
|
| 262 |
+
block = self._pending_free_blocks.pop()
|
| 263 |
+
except IndexError:
|
| 264 |
+
break
|
| 265 |
+
self._add_free_block(block)
|
| 266 |
+
self._remove_allocated_block(block)
|
| 267 |
+
|
| 268 |
+
def free(self, block):
|
| 269 |
+
# free a block returned by malloc()
|
| 270 |
+
# Since free() can be called asynchronously by the GC, it could happen
|
| 271 |
+
# that it's called while self._lock is held: in that case,
|
| 272 |
+
# self._lock.acquire() would deadlock (issue #12352). To avoid that, a
|
| 273 |
+
# trylock is used instead, and if the lock can't be acquired
|
| 274 |
+
# immediately, the block is added to a list of blocks to be freed
|
| 275 |
+
# synchronously sometimes later from malloc() or free(), by calling
|
| 276 |
+
# _free_pending_blocks() (appending and retrieving from a list is not
|
| 277 |
+
# strictly thread-safe but under CPython it's atomic thanks to the GIL).
|
| 278 |
+
if os.getpid() != self._lastpid:
|
| 279 |
+
raise ValueError(
|
| 280 |
+
"My pid ({0:n}) is not last pid {1:n}".format(
|
| 281 |
+
os.getpid(),self._lastpid))
|
| 282 |
+
if not self._lock.acquire(False):
|
| 283 |
+
# can't acquire the lock right now, add the block to the list of
|
| 284 |
+
# pending blocks to free
|
| 285 |
+
self._pending_free_blocks.append(block)
|
| 286 |
+
else:
|
| 287 |
+
# we hold the lock
|
| 288 |
+
try:
|
| 289 |
+
self._n_frees += 1
|
| 290 |
+
self._free_pending_blocks()
|
| 291 |
+
self._add_free_block(block)
|
| 292 |
+
self._remove_allocated_block(block)
|
| 293 |
+
finally:
|
| 294 |
+
self._lock.release()
|
| 295 |
+
|
| 296 |
+
def malloc(self, size):
|
| 297 |
+
# return a block of right size (possibly rounded up)
|
| 298 |
+
if size < 0:
|
| 299 |
+
raise ValueError("Size {0:n} out of range".format(size))
|
| 300 |
+
if sys.maxsize <= size:
|
| 301 |
+
raise OverflowError("Size {0:n} too large".format(size))
|
| 302 |
+
if os.getpid() != self._lastpid:
|
| 303 |
+
self.__init__() # reinitialize after fork
|
| 304 |
+
with self._lock:
|
| 305 |
+
self._n_mallocs += 1
|
| 306 |
+
# allow pending blocks to be marked available
|
| 307 |
+
self._free_pending_blocks()
|
| 308 |
+
size = self._roundup(max(size, 1), self._alignment)
|
| 309 |
+
(arena, start, stop) = self._malloc(size)
|
| 310 |
+
real_stop = start + size
|
| 311 |
+
if real_stop < stop:
|
| 312 |
+
# if the returned block is larger than necessary, mark
|
| 313 |
+
# the remainder available
|
| 314 |
+
self._add_free_block((arena, real_stop, stop))
|
| 315 |
+
self._allocated_blocks[arena].add((start, real_stop))
|
| 316 |
+
return (arena, start, real_stop)
|
| 317 |
+
|
| 318 |
+
#
|
| 319 |
+
# Class wrapping a block allocated out of a Heap -- can be inherited by child process
|
| 320 |
+
#
|
| 321 |
+
|
| 322 |
+
class BufferWrapper(object):
|
| 323 |
+
|
| 324 |
+
_heap = Heap()
|
| 325 |
+
|
| 326 |
+
def __init__(self, size):
|
| 327 |
+
if size < 0:
|
| 328 |
+
raise ValueError("Size {0:n} out of range".format(size))
|
| 329 |
+
if sys.maxsize <= size:
|
| 330 |
+
raise OverflowError("Size {0:n} too large".format(size))
|
| 331 |
+
block = BufferWrapper._heap.malloc(size)
|
| 332 |
+
self._state = (block, size)
|
| 333 |
+
util.Finalize(self, BufferWrapper._heap.free, args=(block,))
|
| 334 |
+
|
| 335 |
+
def create_memoryview(self):
|
| 336 |
+
(arena, start, stop), size = self._state
|
| 337 |
+
return memoryview(arena.buffer)[start:start+size]
|
llava/lib/python3.10/multiprocessing/pool.py
ADDED
|
@@ -0,0 +1,957 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Module providing the `Pool` class for managing a process pool
|
| 3 |
+
#
|
| 4 |
+
# multiprocessing/pool.py
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2006-2008, R Oudkerk
|
| 7 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 8 |
+
#
|
| 9 |
+
|
| 10 |
+
__all__ = ['Pool', 'ThreadPool']
|
| 11 |
+
|
| 12 |
+
#
|
| 13 |
+
# Imports
|
| 14 |
+
#
|
| 15 |
+
|
| 16 |
+
import collections
|
| 17 |
+
import itertools
|
| 18 |
+
import os
|
| 19 |
+
import queue
|
| 20 |
+
import threading
|
| 21 |
+
import time
|
| 22 |
+
import traceback
|
| 23 |
+
import types
|
| 24 |
+
import warnings
|
| 25 |
+
|
| 26 |
+
# If threading is available then ThreadPool should be provided. Therefore
|
| 27 |
+
# we avoid top-level imports which are liable to fail on some systems.
|
| 28 |
+
from . import util
|
| 29 |
+
from . import get_context, TimeoutError
|
| 30 |
+
from .connection import wait
|
| 31 |
+
|
| 32 |
+
#
|
| 33 |
+
# Constants representing the state of a pool
|
| 34 |
+
#
|
| 35 |
+
|
| 36 |
+
INIT = "INIT"
|
| 37 |
+
RUN = "RUN"
|
| 38 |
+
CLOSE = "CLOSE"
|
| 39 |
+
TERMINATE = "TERMINATE"
|
| 40 |
+
|
| 41 |
+
#
|
| 42 |
+
# Miscellaneous
|
| 43 |
+
#
|
| 44 |
+
|
| 45 |
+
job_counter = itertools.count()
|
| 46 |
+
|
| 47 |
+
def mapstar(args):
|
| 48 |
+
return list(map(*args))
|
| 49 |
+
|
| 50 |
+
def starmapstar(args):
|
| 51 |
+
return list(itertools.starmap(args[0], args[1]))
|
| 52 |
+
|
| 53 |
+
#
|
| 54 |
+
# Hack to embed stringification of remote traceback in local traceback
|
| 55 |
+
#
|
| 56 |
+
|
| 57 |
+
class RemoteTraceback(Exception):
|
| 58 |
+
def __init__(self, tb):
|
| 59 |
+
self.tb = tb
|
| 60 |
+
def __str__(self):
|
| 61 |
+
return self.tb
|
| 62 |
+
|
| 63 |
+
class ExceptionWithTraceback:
|
| 64 |
+
def __init__(self, exc, tb):
|
| 65 |
+
tb = traceback.format_exception(type(exc), exc, tb)
|
| 66 |
+
tb = ''.join(tb)
|
| 67 |
+
self.exc = exc
|
| 68 |
+
self.tb = '\n"""\n%s"""' % tb
|
| 69 |
+
def __reduce__(self):
|
| 70 |
+
return rebuild_exc, (self.exc, self.tb)
|
| 71 |
+
|
| 72 |
+
def rebuild_exc(exc, tb):
|
| 73 |
+
exc.__cause__ = RemoteTraceback(tb)
|
| 74 |
+
return exc
|
| 75 |
+
|
| 76 |
+
#
|
| 77 |
+
# Code run by worker processes
|
| 78 |
+
#
|
| 79 |
+
|
| 80 |
+
class MaybeEncodingError(Exception):
|
| 81 |
+
"""Wraps possible unpickleable errors, so they can be
|
| 82 |
+
safely sent through the socket."""
|
| 83 |
+
|
| 84 |
+
def __init__(self, exc, value):
|
| 85 |
+
self.exc = repr(exc)
|
| 86 |
+
self.value = repr(value)
|
| 87 |
+
super(MaybeEncodingError, self).__init__(self.exc, self.value)
|
| 88 |
+
|
| 89 |
+
def __str__(self):
|
| 90 |
+
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
|
| 91 |
+
self.exc)
|
| 92 |
+
|
| 93 |
+
def __repr__(self):
|
| 94 |
+
return "<%s: %s>" % (self.__class__.__name__, self)
|
| 95 |
+
|
| 96 |
+
|
| 97 |
+
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
|
| 98 |
+
wrap_exception=False):
|
| 99 |
+
if (maxtasks is not None) and not (isinstance(maxtasks, int)
|
| 100 |
+
and maxtasks >= 1):
|
| 101 |
+
raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks))
|
| 102 |
+
put = outqueue.put
|
| 103 |
+
get = inqueue.get
|
| 104 |
+
if hasattr(inqueue, '_writer'):
|
| 105 |
+
inqueue._writer.close()
|
| 106 |
+
outqueue._reader.close()
|
| 107 |
+
|
| 108 |
+
if initializer is not None:
|
| 109 |
+
initializer(*initargs)
|
| 110 |
+
|
| 111 |
+
completed = 0
|
| 112 |
+
while maxtasks is None or (maxtasks and completed < maxtasks):
|
| 113 |
+
try:
|
| 114 |
+
task = get()
|
| 115 |
+
except (EOFError, OSError):
|
| 116 |
+
util.debug('worker got EOFError or OSError -- exiting')
|
| 117 |
+
break
|
| 118 |
+
|
| 119 |
+
if task is None:
|
| 120 |
+
util.debug('worker got sentinel -- exiting')
|
| 121 |
+
break
|
| 122 |
+
|
| 123 |
+
job, i, func, args, kwds = task
|
| 124 |
+
try:
|
| 125 |
+
result = (True, func(*args, **kwds))
|
| 126 |
+
except Exception as e:
|
| 127 |
+
if wrap_exception and func is not _helper_reraises_exception:
|
| 128 |
+
e = ExceptionWithTraceback(e, e.__traceback__)
|
| 129 |
+
result = (False, e)
|
| 130 |
+
try:
|
| 131 |
+
put((job, i, result))
|
| 132 |
+
except Exception as e:
|
| 133 |
+
wrapped = MaybeEncodingError(e, result[1])
|
| 134 |
+
util.debug("Possible encoding error while sending result: %s" % (
|
| 135 |
+
wrapped))
|
| 136 |
+
put((job, i, (False, wrapped)))
|
| 137 |
+
|
| 138 |
+
task = job = result = func = args = kwds = None
|
| 139 |
+
completed += 1
|
| 140 |
+
util.debug('worker exiting after %d tasks' % completed)
|
| 141 |
+
|
| 142 |
+
def _helper_reraises_exception(ex):
|
| 143 |
+
'Pickle-able helper function for use by _guarded_task_generation.'
|
| 144 |
+
raise ex
|
| 145 |
+
|
| 146 |
+
#
|
| 147 |
+
# Class representing a process pool
|
| 148 |
+
#
|
| 149 |
+
|
| 150 |
+
class _PoolCache(dict):
|
| 151 |
+
"""
|
| 152 |
+
Class that implements a cache for the Pool class that will notify
|
| 153 |
+
the pool management threads every time the cache is emptied. The
|
| 154 |
+
notification is done by the use of a queue that is provided when
|
| 155 |
+
instantiating the cache.
|
| 156 |
+
"""
|
| 157 |
+
def __init__(self, /, *args, notifier=None, **kwds):
|
| 158 |
+
self.notifier = notifier
|
| 159 |
+
super().__init__(*args, **kwds)
|
| 160 |
+
|
| 161 |
+
def __delitem__(self, item):
|
| 162 |
+
super().__delitem__(item)
|
| 163 |
+
|
| 164 |
+
# Notify that the cache is empty. This is important because the
|
| 165 |
+
# pool keeps maintaining workers until the cache gets drained. This
|
| 166 |
+
# eliminates a race condition in which a task is finished after the
|
| 167 |
+
# the pool's _handle_workers method has enter another iteration of the
|
| 168 |
+
# loop. In this situation, the only event that can wake up the pool
|
| 169 |
+
# is the cache to be emptied (no more tasks available).
|
| 170 |
+
if not self:
|
| 171 |
+
self.notifier.put(None)
|
| 172 |
+
|
| 173 |
+
class Pool(object):
|
| 174 |
+
'''
|
| 175 |
+
Class which supports an async version of applying functions to arguments.
|
| 176 |
+
'''
|
| 177 |
+
_wrap_exception = True
|
| 178 |
+
|
| 179 |
+
@staticmethod
|
| 180 |
+
def Process(ctx, *args, **kwds):
|
| 181 |
+
return ctx.Process(*args, **kwds)
|
| 182 |
+
|
| 183 |
+
def __init__(self, processes=None, initializer=None, initargs=(),
|
| 184 |
+
maxtasksperchild=None, context=None):
|
| 185 |
+
# Attributes initialized early to make sure that they exist in
|
| 186 |
+
# __del__() if __init__() raises an exception
|
| 187 |
+
self._pool = []
|
| 188 |
+
self._state = INIT
|
| 189 |
+
|
| 190 |
+
self._ctx = context or get_context()
|
| 191 |
+
self._setup_queues()
|
| 192 |
+
self._taskqueue = queue.SimpleQueue()
|
| 193 |
+
# The _change_notifier queue exist to wake up self._handle_workers()
|
| 194 |
+
# when the cache (self._cache) is empty or when there is a change in
|
| 195 |
+
# the _state variable of the thread that runs _handle_workers.
|
| 196 |
+
self._change_notifier = self._ctx.SimpleQueue()
|
| 197 |
+
self._cache = _PoolCache(notifier=self._change_notifier)
|
| 198 |
+
self._maxtasksperchild = maxtasksperchild
|
| 199 |
+
self._initializer = initializer
|
| 200 |
+
self._initargs = initargs
|
| 201 |
+
|
| 202 |
+
if processes is None:
|
| 203 |
+
processes = os.cpu_count() or 1
|
| 204 |
+
if processes < 1:
|
| 205 |
+
raise ValueError("Number of processes must be at least 1")
|
| 206 |
+
if maxtasksperchild is not None:
|
| 207 |
+
if not isinstance(maxtasksperchild, int) or maxtasksperchild <= 0:
|
| 208 |
+
raise ValueError("maxtasksperchild must be a positive int or None")
|
| 209 |
+
|
| 210 |
+
if initializer is not None and not callable(initializer):
|
| 211 |
+
raise TypeError('initializer must be a callable')
|
| 212 |
+
|
| 213 |
+
self._processes = processes
|
| 214 |
+
try:
|
| 215 |
+
self._repopulate_pool()
|
| 216 |
+
except Exception:
|
| 217 |
+
for p in self._pool:
|
| 218 |
+
if p.exitcode is None:
|
| 219 |
+
p.terminate()
|
| 220 |
+
for p in self._pool:
|
| 221 |
+
p.join()
|
| 222 |
+
raise
|
| 223 |
+
|
| 224 |
+
sentinels = self._get_sentinels()
|
| 225 |
+
|
| 226 |
+
self._worker_handler = threading.Thread(
|
| 227 |
+
target=Pool._handle_workers,
|
| 228 |
+
args=(self._cache, self._taskqueue, self._ctx, self.Process,
|
| 229 |
+
self._processes, self._pool, self._inqueue, self._outqueue,
|
| 230 |
+
self._initializer, self._initargs, self._maxtasksperchild,
|
| 231 |
+
self._wrap_exception, sentinels, self._change_notifier)
|
| 232 |
+
)
|
| 233 |
+
self._worker_handler.daemon = True
|
| 234 |
+
self._worker_handler._state = RUN
|
| 235 |
+
self._worker_handler.start()
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
self._task_handler = threading.Thread(
|
| 239 |
+
target=Pool._handle_tasks,
|
| 240 |
+
args=(self._taskqueue, self._quick_put, self._outqueue,
|
| 241 |
+
self._pool, self._cache)
|
| 242 |
+
)
|
| 243 |
+
self._task_handler.daemon = True
|
| 244 |
+
self._task_handler._state = RUN
|
| 245 |
+
self._task_handler.start()
|
| 246 |
+
|
| 247 |
+
self._result_handler = threading.Thread(
|
| 248 |
+
target=Pool._handle_results,
|
| 249 |
+
args=(self._outqueue, self._quick_get, self._cache)
|
| 250 |
+
)
|
| 251 |
+
self._result_handler.daemon = True
|
| 252 |
+
self._result_handler._state = RUN
|
| 253 |
+
self._result_handler.start()
|
| 254 |
+
|
| 255 |
+
self._terminate = util.Finalize(
|
| 256 |
+
self, self._terminate_pool,
|
| 257 |
+
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
|
| 258 |
+
self._change_notifier, self._worker_handler, self._task_handler,
|
| 259 |
+
self._result_handler, self._cache),
|
| 260 |
+
exitpriority=15
|
| 261 |
+
)
|
| 262 |
+
self._state = RUN
|
| 263 |
+
|
| 264 |
+
# Copy globals as function locals to make sure that they are available
|
| 265 |
+
# during Python shutdown when the Pool is destroyed.
|
| 266 |
+
def __del__(self, _warn=warnings.warn, RUN=RUN):
|
| 267 |
+
if self._state == RUN:
|
| 268 |
+
_warn(f"unclosed running multiprocessing pool {self!r}",
|
| 269 |
+
ResourceWarning, source=self)
|
| 270 |
+
if getattr(self, '_change_notifier', None) is not None:
|
| 271 |
+
self._change_notifier.put(None)
|
| 272 |
+
|
| 273 |
+
def __repr__(self):
|
| 274 |
+
cls = self.__class__
|
| 275 |
+
return (f'<{cls.__module__}.{cls.__qualname__} '
|
| 276 |
+
f'state={self._state} '
|
| 277 |
+
f'pool_size={len(self._pool)}>')
|
| 278 |
+
|
| 279 |
+
def _get_sentinels(self):
|
| 280 |
+
task_queue_sentinels = [self._outqueue._reader]
|
| 281 |
+
self_notifier_sentinels = [self._change_notifier._reader]
|
| 282 |
+
return [*task_queue_sentinels, *self_notifier_sentinels]
|
| 283 |
+
|
| 284 |
+
@staticmethod
|
| 285 |
+
def _get_worker_sentinels(workers):
|
| 286 |
+
return [worker.sentinel for worker in
|
| 287 |
+
workers if hasattr(worker, "sentinel")]
|
| 288 |
+
|
| 289 |
+
@staticmethod
|
| 290 |
+
def _join_exited_workers(pool):
|
| 291 |
+
"""Cleanup after any worker processes which have exited due to reaching
|
| 292 |
+
their specified lifetime. Returns True if any workers were cleaned up.
|
| 293 |
+
"""
|
| 294 |
+
cleaned = False
|
| 295 |
+
for i in reversed(range(len(pool))):
|
| 296 |
+
worker = pool[i]
|
| 297 |
+
if worker.exitcode is not None:
|
| 298 |
+
# worker exited
|
| 299 |
+
util.debug('cleaning up worker %d' % i)
|
| 300 |
+
worker.join()
|
| 301 |
+
cleaned = True
|
| 302 |
+
del pool[i]
|
| 303 |
+
return cleaned
|
| 304 |
+
|
| 305 |
+
def _repopulate_pool(self):
|
| 306 |
+
return self._repopulate_pool_static(self._ctx, self.Process,
|
| 307 |
+
self._processes,
|
| 308 |
+
self._pool, self._inqueue,
|
| 309 |
+
self._outqueue, self._initializer,
|
| 310 |
+
self._initargs,
|
| 311 |
+
self._maxtasksperchild,
|
| 312 |
+
self._wrap_exception)
|
| 313 |
+
|
| 314 |
+
@staticmethod
|
| 315 |
+
def _repopulate_pool_static(ctx, Process, processes, pool, inqueue,
|
| 316 |
+
outqueue, initializer, initargs,
|
| 317 |
+
maxtasksperchild, wrap_exception):
|
| 318 |
+
"""Bring the number of pool processes up to the specified number,
|
| 319 |
+
for use after reaping workers which have exited.
|
| 320 |
+
"""
|
| 321 |
+
for i in range(processes - len(pool)):
|
| 322 |
+
w = Process(ctx, target=worker,
|
| 323 |
+
args=(inqueue, outqueue,
|
| 324 |
+
initializer,
|
| 325 |
+
initargs, maxtasksperchild,
|
| 326 |
+
wrap_exception))
|
| 327 |
+
w.name = w.name.replace('Process', 'PoolWorker')
|
| 328 |
+
w.daemon = True
|
| 329 |
+
w.start()
|
| 330 |
+
pool.append(w)
|
| 331 |
+
util.debug('added worker')
|
| 332 |
+
|
| 333 |
+
@staticmethod
|
| 334 |
+
def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue,
|
| 335 |
+
initializer, initargs, maxtasksperchild,
|
| 336 |
+
wrap_exception):
|
| 337 |
+
"""Clean up any exited workers and start replacements for them.
|
| 338 |
+
"""
|
| 339 |
+
if Pool._join_exited_workers(pool):
|
| 340 |
+
Pool._repopulate_pool_static(ctx, Process, processes, pool,
|
| 341 |
+
inqueue, outqueue, initializer,
|
| 342 |
+
initargs, maxtasksperchild,
|
| 343 |
+
wrap_exception)
|
| 344 |
+
|
| 345 |
+
def _setup_queues(self):
|
| 346 |
+
self._inqueue = self._ctx.SimpleQueue()
|
| 347 |
+
self._outqueue = self._ctx.SimpleQueue()
|
| 348 |
+
self._quick_put = self._inqueue._writer.send
|
| 349 |
+
self._quick_get = self._outqueue._reader.recv
|
| 350 |
+
|
| 351 |
+
def _check_running(self):
|
| 352 |
+
if self._state != RUN:
|
| 353 |
+
raise ValueError("Pool not running")
|
| 354 |
+
|
| 355 |
+
def apply(self, func, args=(), kwds={}):
|
| 356 |
+
'''
|
| 357 |
+
Equivalent of `func(*args, **kwds)`.
|
| 358 |
+
Pool must be running.
|
| 359 |
+
'''
|
| 360 |
+
return self.apply_async(func, args, kwds).get()
|
| 361 |
+
|
| 362 |
+
def map(self, func, iterable, chunksize=None):
|
| 363 |
+
'''
|
| 364 |
+
Apply `func` to each element in `iterable`, collecting the results
|
| 365 |
+
in a list that is returned.
|
| 366 |
+
'''
|
| 367 |
+
return self._map_async(func, iterable, mapstar, chunksize).get()
|
| 368 |
+
|
| 369 |
+
def starmap(self, func, iterable, chunksize=None):
|
| 370 |
+
'''
|
| 371 |
+
Like `map()` method but the elements of the `iterable` are expected to
|
| 372 |
+
be iterables as well and will be unpacked as arguments. Hence
|
| 373 |
+
`func` and (a, b) becomes func(a, b).
|
| 374 |
+
'''
|
| 375 |
+
return self._map_async(func, iterable, starmapstar, chunksize).get()
|
| 376 |
+
|
| 377 |
+
def starmap_async(self, func, iterable, chunksize=None, callback=None,
|
| 378 |
+
error_callback=None):
|
| 379 |
+
'''
|
| 380 |
+
Asynchronous version of `starmap()` method.
|
| 381 |
+
'''
|
| 382 |
+
return self._map_async(func, iterable, starmapstar, chunksize,
|
| 383 |
+
callback, error_callback)
|
| 384 |
+
|
| 385 |
+
def _guarded_task_generation(self, result_job, func, iterable):
|
| 386 |
+
'''Provides a generator of tasks for imap and imap_unordered with
|
| 387 |
+
appropriate handling for iterables which throw exceptions during
|
| 388 |
+
iteration.'''
|
| 389 |
+
try:
|
| 390 |
+
i = -1
|
| 391 |
+
for i, x in enumerate(iterable):
|
| 392 |
+
yield (result_job, i, func, (x,), {})
|
| 393 |
+
except Exception as e:
|
| 394 |
+
yield (result_job, i+1, _helper_reraises_exception, (e,), {})
|
| 395 |
+
|
| 396 |
+
def imap(self, func, iterable, chunksize=1):
|
| 397 |
+
'''
|
| 398 |
+
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
|
| 399 |
+
'''
|
| 400 |
+
self._check_running()
|
| 401 |
+
if chunksize == 1:
|
| 402 |
+
result = IMapIterator(self)
|
| 403 |
+
self._taskqueue.put(
|
| 404 |
+
(
|
| 405 |
+
self._guarded_task_generation(result._job, func, iterable),
|
| 406 |
+
result._set_length
|
| 407 |
+
))
|
| 408 |
+
return result
|
| 409 |
+
else:
|
| 410 |
+
if chunksize < 1:
|
| 411 |
+
raise ValueError(
|
| 412 |
+
"Chunksize must be 1+, not {0:n}".format(
|
| 413 |
+
chunksize))
|
| 414 |
+
task_batches = Pool._get_tasks(func, iterable, chunksize)
|
| 415 |
+
result = IMapIterator(self)
|
| 416 |
+
self._taskqueue.put(
|
| 417 |
+
(
|
| 418 |
+
self._guarded_task_generation(result._job,
|
| 419 |
+
mapstar,
|
| 420 |
+
task_batches),
|
| 421 |
+
result._set_length
|
| 422 |
+
))
|
| 423 |
+
return (item for chunk in result for item in chunk)
|
| 424 |
+
|
| 425 |
+
def imap_unordered(self, func, iterable, chunksize=1):
|
| 426 |
+
'''
|
| 427 |
+
Like `imap()` method but ordering of results is arbitrary.
|
| 428 |
+
'''
|
| 429 |
+
self._check_running()
|
| 430 |
+
if chunksize == 1:
|
| 431 |
+
result = IMapUnorderedIterator(self)
|
| 432 |
+
self._taskqueue.put(
|
| 433 |
+
(
|
| 434 |
+
self._guarded_task_generation(result._job, func, iterable),
|
| 435 |
+
result._set_length
|
| 436 |
+
))
|
| 437 |
+
return result
|
| 438 |
+
else:
|
| 439 |
+
if chunksize < 1:
|
| 440 |
+
raise ValueError(
|
| 441 |
+
"Chunksize must be 1+, not {0!r}".format(chunksize))
|
| 442 |
+
task_batches = Pool._get_tasks(func, iterable, chunksize)
|
| 443 |
+
result = IMapUnorderedIterator(self)
|
| 444 |
+
self._taskqueue.put(
|
| 445 |
+
(
|
| 446 |
+
self._guarded_task_generation(result._job,
|
| 447 |
+
mapstar,
|
| 448 |
+
task_batches),
|
| 449 |
+
result._set_length
|
| 450 |
+
))
|
| 451 |
+
return (item for chunk in result for item in chunk)
|
| 452 |
+
|
| 453 |
+
def apply_async(self, func, args=(), kwds={}, callback=None,
|
| 454 |
+
error_callback=None):
|
| 455 |
+
'''
|
| 456 |
+
Asynchronous version of `apply()` method.
|
| 457 |
+
'''
|
| 458 |
+
self._check_running()
|
| 459 |
+
result = ApplyResult(self, callback, error_callback)
|
| 460 |
+
self._taskqueue.put(([(result._job, 0, func, args, kwds)], None))
|
| 461 |
+
return result
|
| 462 |
+
|
| 463 |
+
def map_async(self, func, iterable, chunksize=None, callback=None,
|
| 464 |
+
error_callback=None):
|
| 465 |
+
'''
|
| 466 |
+
Asynchronous version of `map()` method.
|
| 467 |
+
'''
|
| 468 |
+
return self._map_async(func, iterable, mapstar, chunksize, callback,
|
| 469 |
+
error_callback)
|
| 470 |
+
|
| 471 |
+
def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
|
| 472 |
+
error_callback=None):
|
| 473 |
+
'''
|
| 474 |
+
Helper function to implement map, starmap and their async counterparts.
|
| 475 |
+
'''
|
| 476 |
+
self._check_running()
|
| 477 |
+
if not hasattr(iterable, '__len__'):
|
| 478 |
+
iterable = list(iterable)
|
| 479 |
+
|
| 480 |
+
if chunksize is None:
|
| 481 |
+
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
|
| 482 |
+
if extra:
|
| 483 |
+
chunksize += 1
|
| 484 |
+
if len(iterable) == 0:
|
| 485 |
+
chunksize = 0
|
| 486 |
+
|
| 487 |
+
task_batches = Pool._get_tasks(func, iterable, chunksize)
|
| 488 |
+
result = MapResult(self, chunksize, len(iterable), callback,
|
| 489 |
+
error_callback=error_callback)
|
| 490 |
+
self._taskqueue.put(
|
| 491 |
+
(
|
| 492 |
+
self._guarded_task_generation(result._job,
|
| 493 |
+
mapper,
|
| 494 |
+
task_batches),
|
| 495 |
+
None
|
| 496 |
+
)
|
| 497 |
+
)
|
| 498 |
+
return result
|
| 499 |
+
|
| 500 |
+
@staticmethod
|
| 501 |
+
def _wait_for_updates(sentinels, change_notifier, timeout=None):
|
| 502 |
+
wait(sentinels, timeout=timeout)
|
| 503 |
+
while not change_notifier.empty():
|
| 504 |
+
change_notifier.get()
|
| 505 |
+
|
| 506 |
+
@classmethod
|
| 507 |
+
def _handle_workers(cls, cache, taskqueue, ctx, Process, processes,
|
| 508 |
+
pool, inqueue, outqueue, initializer, initargs,
|
| 509 |
+
maxtasksperchild, wrap_exception, sentinels,
|
| 510 |
+
change_notifier):
|
| 511 |
+
thread = threading.current_thread()
|
| 512 |
+
|
| 513 |
+
# Keep maintaining workers until the cache gets drained, unless the pool
|
| 514 |
+
# is terminated.
|
| 515 |
+
while thread._state == RUN or (cache and thread._state != TERMINATE):
|
| 516 |
+
cls._maintain_pool(ctx, Process, processes, pool, inqueue,
|
| 517 |
+
outqueue, initializer, initargs,
|
| 518 |
+
maxtasksperchild, wrap_exception)
|
| 519 |
+
|
| 520 |
+
current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels]
|
| 521 |
+
|
| 522 |
+
cls._wait_for_updates(current_sentinels, change_notifier)
|
| 523 |
+
# send sentinel to stop workers
|
| 524 |
+
taskqueue.put(None)
|
| 525 |
+
util.debug('worker handler exiting')
|
| 526 |
+
|
| 527 |
+
@staticmethod
|
| 528 |
+
def _handle_tasks(taskqueue, put, outqueue, pool, cache):
|
| 529 |
+
thread = threading.current_thread()
|
| 530 |
+
|
| 531 |
+
for taskseq, set_length in iter(taskqueue.get, None):
|
| 532 |
+
task = None
|
| 533 |
+
try:
|
| 534 |
+
# iterating taskseq cannot fail
|
| 535 |
+
for task in taskseq:
|
| 536 |
+
if thread._state != RUN:
|
| 537 |
+
util.debug('task handler found thread._state != RUN')
|
| 538 |
+
break
|
| 539 |
+
try:
|
| 540 |
+
put(task)
|
| 541 |
+
except Exception as e:
|
| 542 |
+
job, idx = task[:2]
|
| 543 |
+
try:
|
| 544 |
+
cache[job]._set(idx, (False, e))
|
| 545 |
+
except KeyError:
|
| 546 |
+
pass
|
| 547 |
+
else:
|
| 548 |
+
if set_length:
|
| 549 |
+
util.debug('doing set_length()')
|
| 550 |
+
idx = task[1] if task else -1
|
| 551 |
+
set_length(idx + 1)
|
| 552 |
+
continue
|
| 553 |
+
break
|
| 554 |
+
finally:
|
| 555 |
+
task = taskseq = job = None
|
| 556 |
+
else:
|
| 557 |
+
util.debug('task handler got sentinel')
|
| 558 |
+
|
| 559 |
+
try:
|
| 560 |
+
# tell result handler to finish when cache is empty
|
| 561 |
+
util.debug('task handler sending sentinel to result handler')
|
| 562 |
+
outqueue.put(None)
|
| 563 |
+
|
| 564 |
+
# tell workers there is no more work
|
| 565 |
+
util.debug('task handler sending sentinel to workers')
|
| 566 |
+
for p in pool:
|
| 567 |
+
put(None)
|
| 568 |
+
except OSError:
|
| 569 |
+
util.debug('task handler got OSError when sending sentinels')
|
| 570 |
+
|
| 571 |
+
util.debug('task handler exiting')
|
| 572 |
+
|
| 573 |
+
@staticmethod
|
| 574 |
+
def _handle_results(outqueue, get, cache):
|
| 575 |
+
thread = threading.current_thread()
|
| 576 |
+
|
| 577 |
+
while 1:
|
| 578 |
+
try:
|
| 579 |
+
task = get()
|
| 580 |
+
except (OSError, EOFError):
|
| 581 |
+
util.debug('result handler got EOFError/OSError -- exiting')
|
| 582 |
+
return
|
| 583 |
+
|
| 584 |
+
if thread._state != RUN:
|
| 585 |
+
assert thread._state == TERMINATE, "Thread not in TERMINATE"
|
| 586 |
+
util.debug('result handler found thread._state=TERMINATE')
|
| 587 |
+
break
|
| 588 |
+
|
| 589 |
+
if task is None:
|
| 590 |
+
util.debug('result handler got sentinel')
|
| 591 |
+
break
|
| 592 |
+
|
| 593 |
+
job, i, obj = task
|
| 594 |
+
try:
|
| 595 |
+
cache[job]._set(i, obj)
|
| 596 |
+
except KeyError:
|
| 597 |
+
pass
|
| 598 |
+
task = job = obj = None
|
| 599 |
+
|
| 600 |
+
while cache and thread._state != TERMINATE:
|
| 601 |
+
try:
|
| 602 |
+
task = get()
|
| 603 |
+
except (OSError, EOFError):
|
| 604 |
+
util.debug('result handler got EOFError/OSError -- exiting')
|
| 605 |
+
return
|
| 606 |
+
|
| 607 |
+
if task is None:
|
| 608 |
+
util.debug('result handler ignoring extra sentinel')
|
| 609 |
+
continue
|
| 610 |
+
job, i, obj = task
|
| 611 |
+
try:
|
| 612 |
+
cache[job]._set(i, obj)
|
| 613 |
+
except KeyError:
|
| 614 |
+
pass
|
| 615 |
+
task = job = obj = None
|
| 616 |
+
|
| 617 |
+
if hasattr(outqueue, '_reader'):
|
| 618 |
+
util.debug('ensuring that outqueue is not full')
|
| 619 |
+
# If we don't make room available in outqueue then
|
| 620 |
+
# attempts to add the sentinel (None) to outqueue may
|
| 621 |
+
# block. There is guaranteed to be no more than 2 sentinels.
|
| 622 |
+
try:
|
| 623 |
+
for i in range(10):
|
| 624 |
+
if not outqueue._reader.poll():
|
| 625 |
+
break
|
| 626 |
+
get()
|
| 627 |
+
except (OSError, EOFError):
|
| 628 |
+
pass
|
| 629 |
+
|
| 630 |
+
util.debug('result handler exiting: len(cache)=%s, thread._state=%s',
|
| 631 |
+
len(cache), thread._state)
|
| 632 |
+
|
| 633 |
+
@staticmethod
|
| 634 |
+
def _get_tasks(func, it, size):
|
| 635 |
+
it = iter(it)
|
| 636 |
+
while 1:
|
| 637 |
+
x = tuple(itertools.islice(it, size))
|
| 638 |
+
if not x:
|
| 639 |
+
return
|
| 640 |
+
yield (func, x)
|
| 641 |
+
|
| 642 |
+
def __reduce__(self):
|
| 643 |
+
raise NotImplementedError(
|
| 644 |
+
'pool objects cannot be passed between processes or pickled'
|
| 645 |
+
)
|
| 646 |
+
|
| 647 |
+
def close(self):
|
| 648 |
+
util.debug('closing pool')
|
| 649 |
+
if self._state == RUN:
|
| 650 |
+
self._state = CLOSE
|
| 651 |
+
self._worker_handler._state = CLOSE
|
| 652 |
+
self._change_notifier.put(None)
|
| 653 |
+
|
| 654 |
+
def terminate(self):
|
| 655 |
+
util.debug('terminating pool')
|
| 656 |
+
self._state = TERMINATE
|
| 657 |
+
self._terminate()
|
| 658 |
+
|
| 659 |
+
def join(self):
|
| 660 |
+
util.debug('joining pool')
|
| 661 |
+
if self._state == RUN:
|
| 662 |
+
raise ValueError("Pool is still running")
|
| 663 |
+
elif self._state not in (CLOSE, TERMINATE):
|
| 664 |
+
raise ValueError("In unknown state")
|
| 665 |
+
self._worker_handler.join()
|
| 666 |
+
self._task_handler.join()
|
| 667 |
+
self._result_handler.join()
|
| 668 |
+
for p in self._pool:
|
| 669 |
+
p.join()
|
| 670 |
+
|
| 671 |
+
@staticmethod
|
| 672 |
+
def _help_stuff_finish(inqueue, task_handler, size):
|
| 673 |
+
# task_handler may be blocked trying to put items on inqueue
|
| 674 |
+
util.debug('removing tasks from inqueue until task handler finished')
|
| 675 |
+
inqueue._rlock.acquire()
|
| 676 |
+
while task_handler.is_alive() and inqueue._reader.poll():
|
| 677 |
+
inqueue._reader.recv()
|
| 678 |
+
time.sleep(0)
|
| 679 |
+
|
| 680 |
+
@classmethod
|
| 681 |
+
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier,
|
| 682 |
+
worker_handler, task_handler, result_handler, cache):
|
| 683 |
+
# this is guaranteed to only be called once
|
| 684 |
+
util.debug('finalizing pool')
|
| 685 |
+
|
| 686 |
+
# Notify that the worker_handler state has been changed so the
|
| 687 |
+
# _handle_workers loop can be unblocked (and exited) in order to
|
| 688 |
+
# send the finalization sentinel all the workers.
|
| 689 |
+
worker_handler._state = TERMINATE
|
| 690 |
+
change_notifier.put(None)
|
| 691 |
+
|
| 692 |
+
task_handler._state = TERMINATE
|
| 693 |
+
|
| 694 |
+
util.debug('helping task handler/workers to finish')
|
| 695 |
+
cls._help_stuff_finish(inqueue, task_handler, len(pool))
|
| 696 |
+
|
| 697 |
+
if (not result_handler.is_alive()) and (len(cache) != 0):
|
| 698 |
+
raise AssertionError(
|
| 699 |
+
"Cannot have cache with result_hander not alive")
|
| 700 |
+
|
| 701 |
+
result_handler._state = TERMINATE
|
| 702 |
+
change_notifier.put(None)
|
| 703 |
+
outqueue.put(None) # sentinel
|
| 704 |
+
|
| 705 |
+
# We must wait for the worker handler to exit before terminating
|
| 706 |
+
# workers because we don't want workers to be restarted behind our back.
|
| 707 |
+
util.debug('joining worker handler')
|
| 708 |
+
if threading.current_thread() is not worker_handler:
|
| 709 |
+
worker_handler.join()
|
| 710 |
+
|
| 711 |
+
# Terminate workers which haven't already finished.
|
| 712 |
+
if pool and hasattr(pool[0], 'terminate'):
|
| 713 |
+
util.debug('terminating workers')
|
| 714 |
+
for p in pool:
|
| 715 |
+
if p.exitcode is None:
|
| 716 |
+
p.terminate()
|
| 717 |
+
|
| 718 |
+
util.debug('joining task handler')
|
| 719 |
+
if threading.current_thread() is not task_handler:
|
| 720 |
+
task_handler.join()
|
| 721 |
+
|
| 722 |
+
util.debug('joining result handler')
|
| 723 |
+
if threading.current_thread() is not result_handler:
|
| 724 |
+
result_handler.join()
|
| 725 |
+
|
| 726 |
+
if pool and hasattr(pool[0], 'terminate'):
|
| 727 |
+
util.debug('joining pool workers')
|
| 728 |
+
for p in pool:
|
| 729 |
+
if p.is_alive():
|
| 730 |
+
# worker has not yet exited
|
| 731 |
+
util.debug('cleaning up worker %d' % p.pid)
|
| 732 |
+
p.join()
|
| 733 |
+
|
| 734 |
+
def __enter__(self):
|
| 735 |
+
self._check_running()
|
| 736 |
+
return self
|
| 737 |
+
|
| 738 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 739 |
+
self.terminate()
|
| 740 |
+
|
| 741 |
+
#
|
| 742 |
+
# Class whose instances are returned by `Pool.apply_async()`
|
| 743 |
+
#
|
| 744 |
+
|
| 745 |
+
class ApplyResult(object):
|
| 746 |
+
|
| 747 |
+
def __init__(self, pool, callback, error_callback):
|
| 748 |
+
self._pool = pool
|
| 749 |
+
self._event = threading.Event()
|
| 750 |
+
self._job = next(job_counter)
|
| 751 |
+
self._cache = pool._cache
|
| 752 |
+
self._callback = callback
|
| 753 |
+
self._error_callback = error_callback
|
| 754 |
+
self._cache[self._job] = self
|
| 755 |
+
|
| 756 |
+
def ready(self):
|
| 757 |
+
return self._event.is_set()
|
| 758 |
+
|
| 759 |
+
def successful(self):
|
| 760 |
+
if not self.ready():
|
| 761 |
+
raise ValueError("{0!r} not ready".format(self))
|
| 762 |
+
return self._success
|
| 763 |
+
|
| 764 |
+
def wait(self, timeout=None):
|
| 765 |
+
self._event.wait(timeout)
|
| 766 |
+
|
| 767 |
+
def get(self, timeout=None):
|
| 768 |
+
self.wait(timeout)
|
| 769 |
+
if not self.ready():
|
| 770 |
+
raise TimeoutError
|
| 771 |
+
if self._success:
|
| 772 |
+
return self._value
|
| 773 |
+
else:
|
| 774 |
+
raise self._value
|
| 775 |
+
|
| 776 |
+
def _set(self, i, obj):
|
| 777 |
+
self._success, self._value = obj
|
| 778 |
+
if self._callback and self._success:
|
| 779 |
+
self._callback(self._value)
|
| 780 |
+
if self._error_callback and not self._success:
|
| 781 |
+
self._error_callback(self._value)
|
| 782 |
+
self._event.set()
|
| 783 |
+
del self._cache[self._job]
|
| 784 |
+
self._pool = None
|
| 785 |
+
|
| 786 |
+
__class_getitem__ = classmethod(types.GenericAlias)
|
| 787 |
+
|
| 788 |
+
AsyncResult = ApplyResult # create alias -- see #17805
|
| 789 |
+
|
| 790 |
+
#
|
| 791 |
+
# Class whose instances are returned by `Pool.map_async()`
|
| 792 |
+
#
|
| 793 |
+
|
| 794 |
+
class MapResult(ApplyResult):
|
| 795 |
+
|
| 796 |
+
def __init__(self, pool, chunksize, length, callback, error_callback):
|
| 797 |
+
ApplyResult.__init__(self, pool, callback,
|
| 798 |
+
error_callback=error_callback)
|
| 799 |
+
self._success = True
|
| 800 |
+
self._value = [None] * length
|
| 801 |
+
self._chunksize = chunksize
|
| 802 |
+
if chunksize <= 0:
|
| 803 |
+
self._number_left = 0
|
| 804 |
+
self._event.set()
|
| 805 |
+
del self._cache[self._job]
|
| 806 |
+
else:
|
| 807 |
+
self._number_left = length//chunksize + bool(length % chunksize)
|
| 808 |
+
|
| 809 |
+
def _set(self, i, success_result):
|
| 810 |
+
self._number_left -= 1
|
| 811 |
+
success, result = success_result
|
| 812 |
+
if success and self._success:
|
| 813 |
+
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
|
| 814 |
+
if self._number_left == 0:
|
| 815 |
+
if self._callback:
|
| 816 |
+
self._callback(self._value)
|
| 817 |
+
del self._cache[self._job]
|
| 818 |
+
self._event.set()
|
| 819 |
+
self._pool = None
|
| 820 |
+
else:
|
| 821 |
+
if not success and self._success:
|
| 822 |
+
# only store first exception
|
| 823 |
+
self._success = False
|
| 824 |
+
self._value = result
|
| 825 |
+
if self._number_left == 0:
|
| 826 |
+
# only consider the result ready once all jobs are done
|
| 827 |
+
if self._error_callback:
|
| 828 |
+
self._error_callback(self._value)
|
| 829 |
+
del self._cache[self._job]
|
| 830 |
+
self._event.set()
|
| 831 |
+
self._pool = None
|
| 832 |
+
|
| 833 |
+
#
|
| 834 |
+
# Class whose instances are returned by `Pool.imap()`
|
| 835 |
+
#
|
| 836 |
+
|
| 837 |
+
class IMapIterator(object):
|
| 838 |
+
|
| 839 |
+
def __init__(self, pool):
|
| 840 |
+
self._pool = pool
|
| 841 |
+
self._cond = threading.Condition(threading.Lock())
|
| 842 |
+
self._job = next(job_counter)
|
| 843 |
+
self._cache = pool._cache
|
| 844 |
+
self._items = collections.deque()
|
| 845 |
+
self._index = 0
|
| 846 |
+
self._length = None
|
| 847 |
+
self._unsorted = {}
|
| 848 |
+
self._cache[self._job] = self
|
| 849 |
+
|
| 850 |
+
def __iter__(self):
|
| 851 |
+
return self
|
| 852 |
+
|
| 853 |
+
def next(self, timeout=None):
|
| 854 |
+
with self._cond:
|
| 855 |
+
try:
|
| 856 |
+
item = self._items.popleft()
|
| 857 |
+
except IndexError:
|
| 858 |
+
if self._index == self._length:
|
| 859 |
+
self._pool = None
|
| 860 |
+
raise StopIteration from None
|
| 861 |
+
self._cond.wait(timeout)
|
| 862 |
+
try:
|
| 863 |
+
item = self._items.popleft()
|
| 864 |
+
except IndexError:
|
| 865 |
+
if self._index == self._length:
|
| 866 |
+
self._pool = None
|
| 867 |
+
raise StopIteration from None
|
| 868 |
+
raise TimeoutError from None
|
| 869 |
+
|
| 870 |
+
success, value = item
|
| 871 |
+
if success:
|
| 872 |
+
return value
|
| 873 |
+
raise value
|
| 874 |
+
|
| 875 |
+
__next__ = next # XXX
|
| 876 |
+
|
| 877 |
+
def _set(self, i, obj):
|
| 878 |
+
with self._cond:
|
| 879 |
+
if self._index == i:
|
| 880 |
+
self._items.append(obj)
|
| 881 |
+
self._index += 1
|
| 882 |
+
while self._index in self._unsorted:
|
| 883 |
+
obj = self._unsorted.pop(self._index)
|
| 884 |
+
self._items.append(obj)
|
| 885 |
+
self._index += 1
|
| 886 |
+
self._cond.notify()
|
| 887 |
+
else:
|
| 888 |
+
self._unsorted[i] = obj
|
| 889 |
+
|
| 890 |
+
if self._index == self._length:
|
| 891 |
+
del self._cache[self._job]
|
| 892 |
+
self._pool = None
|
| 893 |
+
|
| 894 |
+
def _set_length(self, length):
|
| 895 |
+
with self._cond:
|
| 896 |
+
self._length = length
|
| 897 |
+
if self._index == self._length:
|
| 898 |
+
self._cond.notify()
|
| 899 |
+
del self._cache[self._job]
|
| 900 |
+
self._pool = None
|
| 901 |
+
|
| 902 |
+
#
|
| 903 |
+
# Class whose instances are returned by `Pool.imap_unordered()`
|
| 904 |
+
#
|
| 905 |
+
|
| 906 |
+
class IMapUnorderedIterator(IMapIterator):
|
| 907 |
+
|
| 908 |
+
def _set(self, i, obj):
|
| 909 |
+
with self._cond:
|
| 910 |
+
self._items.append(obj)
|
| 911 |
+
self._index += 1
|
| 912 |
+
self._cond.notify()
|
| 913 |
+
if self._index == self._length:
|
| 914 |
+
del self._cache[self._job]
|
| 915 |
+
self._pool = None
|
| 916 |
+
|
| 917 |
+
#
|
| 918 |
+
#
|
| 919 |
+
#
|
| 920 |
+
|
| 921 |
+
class ThreadPool(Pool):
|
| 922 |
+
_wrap_exception = False
|
| 923 |
+
|
| 924 |
+
@staticmethod
|
| 925 |
+
def Process(ctx, *args, **kwds):
|
| 926 |
+
from .dummy import Process
|
| 927 |
+
return Process(*args, **kwds)
|
| 928 |
+
|
| 929 |
+
def __init__(self, processes=None, initializer=None, initargs=()):
|
| 930 |
+
Pool.__init__(self, processes, initializer, initargs)
|
| 931 |
+
|
| 932 |
+
def _setup_queues(self):
|
| 933 |
+
self._inqueue = queue.SimpleQueue()
|
| 934 |
+
self._outqueue = queue.SimpleQueue()
|
| 935 |
+
self._quick_put = self._inqueue.put
|
| 936 |
+
self._quick_get = self._outqueue.get
|
| 937 |
+
|
| 938 |
+
def _get_sentinels(self):
|
| 939 |
+
return [self._change_notifier._reader]
|
| 940 |
+
|
| 941 |
+
@staticmethod
|
| 942 |
+
def _get_worker_sentinels(workers):
|
| 943 |
+
return []
|
| 944 |
+
|
| 945 |
+
@staticmethod
|
| 946 |
+
def _help_stuff_finish(inqueue, task_handler, size):
|
| 947 |
+
# drain inqueue, and put sentinels at its head to make workers finish
|
| 948 |
+
try:
|
| 949 |
+
while True:
|
| 950 |
+
inqueue.get(block=False)
|
| 951 |
+
except queue.Empty:
|
| 952 |
+
pass
|
| 953 |
+
for i in range(size):
|
| 954 |
+
inqueue.put(None)
|
| 955 |
+
|
| 956 |
+
def _wait_for_updates(self, sentinels, change_notifier, timeout):
|
| 957 |
+
time.sleep(timeout)
|
llava/lib/python3.10/multiprocessing/popen_fork.py
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import signal
|
| 3 |
+
|
| 4 |
+
from . import util
|
| 5 |
+
|
| 6 |
+
__all__ = ['Popen']
|
| 7 |
+
|
| 8 |
+
#
|
| 9 |
+
# Start child process using fork
|
| 10 |
+
#
|
| 11 |
+
|
| 12 |
+
class Popen(object):
|
| 13 |
+
method = 'fork'
|
| 14 |
+
|
| 15 |
+
def __init__(self, process_obj):
|
| 16 |
+
util._flush_std_streams()
|
| 17 |
+
self.returncode = None
|
| 18 |
+
self.finalizer = None
|
| 19 |
+
self._launch(process_obj)
|
| 20 |
+
|
| 21 |
+
def duplicate_for_child(self, fd):
|
| 22 |
+
return fd
|
| 23 |
+
|
| 24 |
+
def poll(self, flag=os.WNOHANG):
|
| 25 |
+
if self.returncode is None:
|
| 26 |
+
try:
|
| 27 |
+
pid, sts = os.waitpid(self.pid, flag)
|
| 28 |
+
except OSError:
|
| 29 |
+
# Child process not yet created. See #1731717
|
| 30 |
+
# e.errno == errno.ECHILD == 10
|
| 31 |
+
return None
|
| 32 |
+
if pid == self.pid:
|
| 33 |
+
self.returncode = os.waitstatus_to_exitcode(sts)
|
| 34 |
+
return self.returncode
|
| 35 |
+
|
| 36 |
+
def wait(self, timeout=None):
|
| 37 |
+
if self.returncode is None:
|
| 38 |
+
if timeout is not None:
|
| 39 |
+
from multiprocessing.connection import wait
|
| 40 |
+
if not wait([self.sentinel], timeout):
|
| 41 |
+
return None
|
| 42 |
+
# This shouldn't block if wait() returned successfully.
|
| 43 |
+
return self.poll(os.WNOHANG if timeout == 0.0 else 0)
|
| 44 |
+
return self.returncode
|
| 45 |
+
|
| 46 |
+
def _send_signal(self, sig):
|
| 47 |
+
if self.returncode is None:
|
| 48 |
+
try:
|
| 49 |
+
os.kill(self.pid, sig)
|
| 50 |
+
except ProcessLookupError:
|
| 51 |
+
pass
|
| 52 |
+
except OSError:
|
| 53 |
+
if self.wait(timeout=0.1) is None:
|
| 54 |
+
raise
|
| 55 |
+
|
| 56 |
+
def terminate(self):
|
| 57 |
+
self._send_signal(signal.SIGTERM)
|
| 58 |
+
|
| 59 |
+
def kill(self):
|
| 60 |
+
self._send_signal(signal.SIGKILL)
|
| 61 |
+
|
| 62 |
+
def _launch(self, process_obj):
|
| 63 |
+
code = 1
|
| 64 |
+
parent_r, child_w = os.pipe()
|
| 65 |
+
child_r, parent_w = os.pipe()
|
| 66 |
+
self.pid = os.fork()
|
| 67 |
+
if self.pid == 0:
|
| 68 |
+
try:
|
| 69 |
+
os.close(parent_r)
|
| 70 |
+
os.close(parent_w)
|
| 71 |
+
code = process_obj._bootstrap(parent_sentinel=child_r)
|
| 72 |
+
finally:
|
| 73 |
+
os._exit(code)
|
| 74 |
+
else:
|
| 75 |
+
os.close(child_w)
|
| 76 |
+
os.close(child_r)
|
| 77 |
+
self.finalizer = util.Finalize(self, util.close_fds,
|
| 78 |
+
(parent_r, parent_w,))
|
| 79 |
+
self.sentinel = parent_r
|
| 80 |
+
|
| 81 |
+
def close(self):
|
| 82 |
+
if self.finalizer is not None:
|
| 83 |
+
self.finalizer()
|
llava/lib/python3.10/multiprocessing/popen_spawn_win32.py
ADDED
|
@@ -0,0 +1,131 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import msvcrt
|
| 3 |
+
import signal
|
| 4 |
+
import sys
|
| 5 |
+
import _winapi
|
| 6 |
+
|
| 7 |
+
from .context import reduction, get_spawning_popen, set_spawning_popen
|
| 8 |
+
from . import spawn
|
| 9 |
+
from . import util
|
| 10 |
+
|
| 11 |
+
__all__ = ['Popen']
|
| 12 |
+
|
| 13 |
+
#
|
| 14 |
+
#
|
| 15 |
+
#
|
| 16 |
+
|
| 17 |
+
TERMINATE = 0x10000
|
| 18 |
+
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
|
| 19 |
+
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def _path_eq(p1, p2):
|
| 23 |
+
return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)
|
| 24 |
+
|
| 25 |
+
WINENV = not _path_eq(sys.executable, sys._base_executable)
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def _close_handles(*handles):
|
| 29 |
+
for handle in handles:
|
| 30 |
+
_winapi.CloseHandle(handle)
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
#
|
| 34 |
+
# We define a Popen class similar to the one from subprocess, but
|
| 35 |
+
# whose constructor takes a process object as its argument.
|
| 36 |
+
#
|
| 37 |
+
|
| 38 |
+
class Popen(object):
|
| 39 |
+
'''
|
| 40 |
+
Start a subprocess to run the code of a process object
|
| 41 |
+
'''
|
| 42 |
+
method = 'spawn'
|
| 43 |
+
|
| 44 |
+
def __init__(self, process_obj):
|
| 45 |
+
prep_data = spawn.get_preparation_data(process_obj._name)
|
| 46 |
+
|
| 47 |
+
# read end of pipe will be duplicated by the child process
|
| 48 |
+
# -- see spawn_main() in spawn.py.
|
| 49 |
+
#
|
| 50 |
+
# bpo-33929: Previously, the read end of pipe was "stolen" by the child
|
| 51 |
+
# process, but it leaked a handle if the child process had been
|
| 52 |
+
# terminated before it could steal the handle from the parent process.
|
| 53 |
+
rhandle, whandle = _winapi.CreatePipe(None, 0)
|
| 54 |
+
wfd = msvcrt.open_osfhandle(whandle, 0)
|
| 55 |
+
cmd = spawn.get_command_line(parent_pid=os.getpid(),
|
| 56 |
+
pipe_handle=rhandle)
|
| 57 |
+
cmd = ' '.join('"%s"' % x for x in cmd)
|
| 58 |
+
|
| 59 |
+
python_exe = spawn.get_executable()
|
| 60 |
+
|
| 61 |
+
# bpo-35797: When running in a venv, we bypass the redirect
|
| 62 |
+
# executor and launch our base Python.
|
| 63 |
+
if WINENV and _path_eq(python_exe, sys.executable):
|
| 64 |
+
python_exe = sys._base_executable
|
| 65 |
+
env = os.environ.copy()
|
| 66 |
+
env["__PYVENV_LAUNCHER__"] = sys.executable
|
| 67 |
+
else:
|
| 68 |
+
env = None
|
| 69 |
+
|
| 70 |
+
with open(wfd, 'wb', closefd=True) as to_child:
|
| 71 |
+
# start process
|
| 72 |
+
try:
|
| 73 |
+
hp, ht, pid, tid = _winapi.CreateProcess(
|
| 74 |
+
python_exe, cmd,
|
| 75 |
+
None, None, False, 0, env, None, None)
|
| 76 |
+
_winapi.CloseHandle(ht)
|
| 77 |
+
except:
|
| 78 |
+
_winapi.CloseHandle(rhandle)
|
| 79 |
+
raise
|
| 80 |
+
|
| 81 |
+
# set attributes of self
|
| 82 |
+
self.pid = pid
|
| 83 |
+
self.returncode = None
|
| 84 |
+
self._handle = hp
|
| 85 |
+
self.sentinel = int(hp)
|
| 86 |
+
self.finalizer = util.Finalize(self, _close_handles,
|
| 87 |
+
(self.sentinel, int(rhandle)))
|
| 88 |
+
|
| 89 |
+
# send information to child
|
| 90 |
+
set_spawning_popen(self)
|
| 91 |
+
try:
|
| 92 |
+
reduction.dump(prep_data, to_child)
|
| 93 |
+
reduction.dump(process_obj, to_child)
|
| 94 |
+
finally:
|
| 95 |
+
set_spawning_popen(None)
|
| 96 |
+
|
| 97 |
+
def duplicate_for_child(self, handle):
|
| 98 |
+
assert self is get_spawning_popen()
|
| 99 |
+
return reduction.duplicate(handle, self.sentinel)
|
| 100 |
+
|
| 101 |
+
def wait(self, timeout=None):
|
| 102 |
+
if self.returncode is None:
|
| 103 |
+
if timeout is None:
|
| 104 |
+
msecs = _winapi.INFINITE
|
| 105 |
+
else:
|
| 106 |
+
msecs = max(0, int(timeout * 1000 + 0.5))
|
| 107 |
+
|
| 108 |
+
res = _winapi.WaitForSingleObject(int(self._handle), msecs)
|
| 109 |
+
if res == _winapi.WAIT_OBJECT_0:
|
| 110 |
+
code = _winapi.GetExitCodeProcess(self._handle)
|
| 111 |
+
if code == TERMINATE:
|
| 112 |
+
code = -signal.SIGTERM
|
| 113 |
+
self.returncode = code
|
| 114 |
+
|
| 115 |
+
return self.returncode
|
| 116 |
+
|
| 117 |
+
def poll(self):
|
| 118 |
+
return self.wait(timeout=0)
|
| 119 |
+
|
| 120 |
+
def terminate(self):
|
| 121 |
+
if self.returncode is None:
|
| 122 |
+
try:
|
| 123 |
+
_winapi.TerminateProcess(int(self._handle), TERMINATE)
|
| 124 |
+
except OSError:
|
| 125 |
+
if self.wait(timeout=1.0) is None:
|
| 126 |
+
raise
|
| 127 |
+
|
| 128 |
+
kill = terminate
|
| 129 |
+
|
| 130 |
+
def close(self):
|
| 131 |
+
self.finalizer()
|
llava/lib/python3.10/multiprocessing/process.py
ADDED
|
@@ -0,0 +1,438 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Module providing the `Process` class which emulates `threading.Thread`
|
| 3 |
+
#
|
| 4 |
+
# multiprocessing/process.py
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2006-2008, R Oudkerk
|
| 7 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 8 |
+
#
|
| 9 |
+
|
| 10 |
+
__all__ = ['BaseProcess', 'current_process', 'active_children',
|
| 11 |
+
'parent_process']
|
| 12 |
+
|
| 13 |
+
#
|
| 14 |
+
# Imports
|
| 15 |
+
#
|
| 16 |
+
|
| 17 |
+
import os
|
| 18 |
+
import sys
|
| 19 |
+
import signal
|
| 20 |
+
import itertools
|
| 21 |
+
import threading
|
| 22 |
+
from _weakrefset import WeakSet
|
| 23 |
+
|
| 24 |
+
#
|
| 25 |
+
#
|
| 26 |
+
#
|
| 27 |
+
|
| 28 |
+
try:
|
| 29 |
+
ORIGINAL_DIR = os.path.abspath(os.getcwd())
|
| 30 |
+
except OSError:
|
| 31 |
+
ORIGINAL_DIR = None
|
| 32 |
+
|
| 33 |
+
#
|
| 34 |
+
# Public functions
|
| 35 |
+
#
|
| 36 |
+
|
| 37 |
+
def current_process():
|
| 38 |
+
'''
|
| 39 |
+
Return process object representing the current process
|
| 40 |
+
'''
|
| 41 |
+
return _current_process
|
| 42 |
+
|
| 43 |
+
def active_children():
|
| 44 |
+
'''
|
| 45 |
+
Return list of process objects corresponding to live child processes
|
| 46 |
+
'''
|
| 47 |
+
_cleanup()
|
| 48 |
+
return list(_children)
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
def parent_process():
|
| 52 |
+
'''
|
| 53 |
+
Return process object representing the parent process
|
| 54 |
+
'''
|
| 55 |
+
return _parent_process
|
| 56 |
+
|
| 57 |
+
#
|
| 58 |
+
#
|
| 59 |
+
#
|
| 60 |
+
|
| 61 |
+
def _cleanup():
|
| 62 |
+
# check for processes which have finished
|
| 63 |
+
for p in list(_children):
|
| 64 |
+
if p._popen.poll() is not None:
|
| 65 |
+
_children.discard(p)
|
| 66 |
+
|
| 67 |
+
#
|
| 68 |
+
# The `Process` class
|
| 69 |
+
#
|
| 70 |
+
|
| 71 |
+
class BaseProcess(object):
|
| 72 |
+
'''
|
| 73 |
+
Process objects represent activity that is run in a separate process
|
| 74 |
+
|
| 75 |
+
The class is analogous to `threading.Thread`
|
| 76 |
+
'''
|
| 77 |
+
def _Popen(self):
|
| 78 |
+
raise NotImplementedError
|
| 79 |
+
|
| 80 |
+
def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
|
| 81 |
+
*, daemon=None):
|
| 82 |
+
assert group is None, 'group argument must be None for now'
|
| 83 |
+
count = next(_process_counter)
|
| 84 |
+
self._identity = _current_process._identity + (count,)
|
| 85 |
+
self._config = _current_process._config.copy()
|
| 86 |
+
self._parent_pid = os.getpid()
|
| 87 |
+
self._parent_name = _current_process.name
|
| 88 |
+
self._popen = None
|
| 89 |
+
self._closed = False
|
| 90 |
+
self._target = target
|
| 91 |
+
self._args = tuple(args)
|
| 92 |
+
self._kwargs = dict(kwargs)
|
| 93 |
+
self._name = name or type(self).__name__ + '-' + \
|
| 94 |
+
':'.join(str(i) for i in self._identity)
|
| 95 |
+
if daemon is not None:
|
| 96 |
+
self.daemon = daemon
|
| 97 |
+
_dangling.add(self)
|
| 98 |
+
|
| 99 |
+
def _check_closed(self):
|
| 100 |
+
if self._closed:
|
| 101 |
+
raise ValueError("process object is closed")
|
| 102 |
+
|
| 103 |
+
def run(self):
|
| 104 |
+
'''
|
| 105 |
+
Method to be run in sub-process; can be overridden in sub-class
|
| 106 |
+
'''
|
| 107 |
+
if self._target:
|
| 108 |
+
self._target(*self._args, **self._kwargs)
|
| 109 |
+
|
| 110 |
+
def start(self):
|
| 111 |
+
'''
|
| 112 |
+
Start child process
|
| 113 |
+
'''
|
| 114 |
+
self._check_closed()
|
| 115 |
+
assert self._popen is None, 'cannot start a process twice'
|
| 116 |
+
assert self._parent_pid == os.getpid(), \
|
| 117 |
+
'can only start a process object created by current process'
|
| 118 |
+
assert not _current_process._config.get('daemon'), \
|
| 119 |
+
'daemonic processes are not allowed to have children'
|
| 120 |
+
_cleanup()
|
| 121 |
+
self._popen = self._Popen(self)
|
| 122 |
+
self._sentinel = self._popen.sentinel
|
| 123 |
+
# Avoid a refcycle if the target function holds an indirect
|
| 124 |
+
# reference to the process object (see bpo-30775)
|
| 125 |
+
del self._target, self._args, self._kwargs
|
| 126 |
+
_children.add(self)
|
| 127 |
+
|
| 128 |
+
def terminate(self):
|
| 129 |
+
'''
|
| 130 |
+
Terminate process; sends SIGTERM signal or uses TerminateProcess()
|
| 131 |
+
'''
|
| 132 |
+
self._check_closed()
|
| 133 |
+
self._popen.terminate()
|
| 134 |
+
|
| 135 |
+
def kill(self):
|
| 136 |
+
'''
|
| 137 |
+
Terminate process; sends SIGKILL signal or uses TerminateProcess()
|
| 138 |
+
'''
|
| 139 |
+
self._check_closed()
|
| 140 |
+
self._popen.kill()
|
| 141 |
+
|
| 142 |
+
def join(self, timeout=None):
|
| 143 |
+
'''
|
| 144 |
+
Wait until child process terminates
|
| 145 |
+
'''
|
| 146 |
+
self._check_closed()
|
| 147 |
+
assert self._parent_pid == os.getpid(), 'can only join a child process'
|
| 148 |
+
assert self._popen is not None, 'can only join a started process'
|
| 149 |
+
res = self._popen.wait(timeout)
|
| 150 |
+
if res is not None:
|
| 151 |
+
_children.discard(self)
|
| 152 |
+
|
| 153 |
+
def is_alive(self):
|
| 154 |
+
'''
|
| 155 |
+
Return whether process is alive
|
| 156 |
+
'''
|
| 157 |
+
self._check_closed()
|
| 158 |
+
if self is _current_process:
|
| 159 |
+
return True
|
| 160 |
+
assert self._parent_pid == os.getpid(), 'can only test a child process'
|
| 161 |
+
|
| 162 |
+
if self._popen is None:
|
| 163 |
+
return False
|
| 164 |
+
|
| 165 |
+
returncode = self._popen.poll()
|
| 166 |
+
if returncode is None:
|
| 167 |
+
return True
|
| 168 |
+
else:
|
| 169 |
+
_children.discard(self)
|
| 170 |
+
return False
|
| 171 |
+
|
| 172 |
+
def close(self):
|
| 173 |
+
'''
|
| 174 |
+
Close the Process object.
|
| 175 |
+
|
| 176 |
+
This method releases resources held by the Process object. It is
|
| 177 |
+
an error to call this method if the child process is still running.
|
| 178 |
+
'''
|
| 179 |
+
if self._popen is not None:
|
| 180 |
+
if self._popen.poll() is None:
|
| 181 |
+
raise ValueError("Cannot close a process while it is still running. "
|
| 182 |
+
"You should first call join() or terminate().")
|
| 183 |
+
self._popen.close()
|
| 184 |
+
self._popen = None
|
| 185 |
+
del self._sentinel
|
| 186 |
+
_children.discard(self)
|
| 187 |
+
self._closed = True
|
| 188 |
+
|
| 189 |
+
@property
|
| 190 |
+
def name(self):
|
| 191 |
+
return self._name
|
| 192 |
+
|
| 193 |
+
@name.setter
|
| 194 |
+
def name(self, name):
|
| 195 |
+
assert isinstance(name, str), 'name must be a string'
|
| 196 |
+
self._name = name
|
| 197 |
+
|
| 198 |
+
@property
|
| 199 |
+
def daemon(self):
|
| 200 |
+
'''
|
| 201 |
+
Return whether process is a daemon
|
| 202 |
+
'''
|
| 203 |
+
return self._config.get('daemon', False)
|
| 204 |
+
|
| 205 |
+
@daemon.setter
|
| 206 |
+
def daemon(self, daemonic):
|
| 207 |
+
'''
|
| 208 |
+
Set whether process is a daemon
|
| 209 |
+
'''
|
| 210 |
+
assert self._popen is None, 'process has already started'
|
| 211 |
+
self._config['daemon'] = daemonic
|
| 212 |
+
|
| 213 |
+
@property
|
| 214 |
+
def authkey(self):
|
| 215 |
+
return self._config['authkey']
|
| 216 |
+
|
| 217 |
+
@authkey.setter
|
| 218 |
+
def authkey(self, authkey):
|
| 219 |
+
'''
|
| 220 |
+
Set authorization key of process
|
| 221 |
+
'''
|
| 222 |
+
self._config['authkey'] = AuthenticationString(authkey)
|
| 223 |
+
|
| 224 |
+
@property
|
| 225 |
+
def exitcode(self):
|
| 226 |
+
'''
|
| 227 |
+
Return exit code of process or `None` if it has yet to stop
|
| 228 |
+
'''
|
| 229 |
+
self._check_closed()
|
| 230 |
+
if self._popen is None:
|
| 231 |
+
return self._popen
|
| 232 |
+
return self._popen.poll()
|
| 233 |
+
|
| 234 |
+
@property
|
| 235 |
+
def ident(self):
|
| 236 |
+
'''
|
| 237 |
+
Return identifier (PID) of process or `None` if it has yet to start
|
| 238 |
+
'''
|
| 239 |
+
self._check_closed()
|
| 240 |
+
if self is _current_process:
|
| 241 |
+
return os.getpid()
|
| 242 |
+
else:
|
| 243 |
+
return self._popen and self._popen.pid
|
| 244 |
+
|
| 245 |
+
pid = ident
|
| 246 |
+
|
| 247 |
+
@property
|
| 248 |
+
def sentinel(self):
|
| 249 |
+
'''
|
| 250 |
+
Return a file descriptor (Unix) or handle (Windows) suitable for
|
| 251 |
+
waiting for process termination.
|
| 252 |
+
'''
|
| 253 |
+
self._check_closed()
|
| 254 |
+
try:
|
| 255 |
+
return self._sentinel
|
| 256 |
+
except AttributeError:
|
| 257 |
+
raise ValueError("process not started") from None
|
| 258 |
+
|
| 259 |
+
def __repr__(self):
|
| 260 |
+
exitcode = None
|
| 261 |
+
if self is _current_process:
|
| 262 |
+
status = 'started'
|
| 263 |
+
elif self._closed:
|
| 264 |
+
status = 'closed'
|
| 265 |
+
elif self._parent_pid != os.getpid():
|
| 266 |
+
status = 'unknown'
|
| 267 |
+
elif self._popen is None:
|
| 268 |
+
status = 'initial'
|
| 269 |
+
else:
|
| 270 |
+
exitcode = self._popen.poll()
|
| 271 |
+
if exitcode is not None:
|
| 272 |
+
status = 'stopped'
|
| 273 |
+
else:
|
| 274 |
+
status = 'started'
|
| 275 |
+
|
| 276 |
+
info = [type(self).__name__, 'name=%r' % self._name]
|
| 277 |
+
if self._popen is not None:
|
| 278 |
+
info.append('pid=%s' % self._popen.pid)
|
| 279 |
+
info.append('parent=%s' % self._parent_pid)
|
| 280 |
+
info.append(status)
|
| 281 |
+
if exitcode is not None:
|
| 282 |
+
exitcode = _exitcode_to_name.get(exitcode, exitcode)
|
| 283 |
+
info.append('exitcode=%s' % exitcode)
|
| 284 |
+
if self.daemon:
|
| 285 |
+
info.append('daemon')
|
| 286 |
+
return '<%s>' % ' '.join(info)
|
| 287 |
+
|
| 288 |
+
##
|
| 289 |
+
|
| 290 |
+
def _bootstrap(self, parent_sentinel=None):
|
| 291 |
+
from . import util, context
|
| 292 |
+
global _current_process, _parent_process, _process_counter, _children
|
| 293 |
+
|
| 294 |
+
try:
|
| 295 |
+
if self._start_method is not None:
|
| 296 |
+
context._force_start_method(self._start_method)
|
| 297 |
+
_process_counter = itertools.count(1)
|
| 298 |
+
_children = set()
|
| 299 |
+
util._close_stdin()
|
| 300 |
+
old_process = _current_process
|
| 301 |
+
_current_process = self
|
| 302 |
+
_parent_process = _ParentProcess(
|
| 303 |
+
self._parent_name, self._parent_pid, parent_sentinel)
|
| 304 |
+
if threading._HAVE_THREAD_NATIVE_ID:
|
| 305 |
+
threading.main_thread()._set_native_id()
|
| 306 |
+
try:
|
| 307 |
+
self._after_fork()
|
| 308 |
+
finally:
|
| 309 |
+
# delay finalization of the old process object until after
|
| 310 |
+
# _run_after_forkers() is executed
|
| 311 |
+
del old_process
|
| 312 |
+
util.info('child process calling self.run()')
|
| 313 |
+
try:
|
| 314 |
+
self.run()
|
| 315 |
+
exitcode = 0
|
| 316 |
+
finally:
|
| 317 |
+
util._exit_function()
|
| 318 |
+
except SystemExit as e:
|
| 319 |
+
if e.code is None:
|
| 320 |
+
exitcode = 0
|
| 321 |
+
elif isinstance(e.code, int):
|
| 322 |
+
exitcode = e.code
|
| 323 |
+
else:
|
| 324 |
+
sys.stderr.write(str(e.code) + '\n')
|
| 325 |
+
exitcode = 1
|
| 326 |
+
except:
|
| 327 |
+
exitcode = 1
|
| 328 |
+
import traceback
|
| 329 |
+
sys.stderr.write('Process %s:\n' % self.name)
|
| 330 |
+
traceback.print_exc()
|
| 331 |
+
finally:
|
| 332 |
+
threading._shutdown()
|
| 333 |
+
util.info('process exiting with exitcode %d' % exitcode)
|
| 334 |
+
util._flush_std_streams()
|
| 335 |
+
|
| 336 |
+
return exitcode
|
| 337 |
+
|
| 338 |
+
@staticmethod
|
| 339 |
+
def _after_fork():
|
| 340 |
+
from . import util
|
| 341 |
+
util._finalizer_registry.clear()
|
| 342 |
+
util._run_after_forkers()
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
#
|
| 346 |
+
# We subclass bytes to avoid accidental transmission of auth keys over network
|
| 347 |
+
#
|
| 348 |
+
|
| 349 |
+
class AuthenticationString(bytes):
|
| 350 |
+
def __reduce__(self):
|
| 351 |
+
from .context import get_spawning_popen
|
| 352 |
+
if get_spawning_popen() is None:
|
| 353 |
+
raise TypeError(
|
| 354 |
+
'Pickling an AuthenticationString object is '
|
| 355 |
+
'disallowed for security reasons'
|
| 356 |
+
)
|
| 357 |
+
return AuthenticationString, (bytes(self),)
|
| 358 |
+
|
| 359 |
+
|
| 360 |
+
#
|
| 361 |
+
# Create object representing the parent process
|
| 362 |
+
#
|
| 363 |
+
|
| 364 |
+
class _ParentProcess(BaseProcess):
|
| 365 |
+
|
| 366 |
+
def __init__(self, name, pid, sentinel):
|
| 367 |
+
self._identity = ()
|
| 368 |
+
self._name = name
|
| 369 |
+
self._pid = pid
|
| 370 |
+
self._parent_pid = None
|
| 371 |
+
self._popen = None
|
| 372 |
+
self._closed = False
|
| 373 |
+
self._sentinel = sentinel
|
| 374 |
+
self._config = {}
|
| 375 |
+
|
| 376 |
+
def is_alive(self):
|
| 377 |
+
from multiprocessing.connection import wait
|
| 378 |
+
return not wait([self._sentinel], timeout=0)
|
| 379 |
+
|
| 380 |
+
@property
|
| 381 |
+
def ident(self):
|
| 382 |
+
return self._pid
|
| 383 |
+
|
| 384 |
+
def join(self, timeout=None):
|
| 385 |
+
'''
|
| 386 |
+
Wait until parent process terminates
|
| 387 |
+
'''
|
| 388 |
+
from multiprocessing.connection import wait
|
| 389 |
+
wait([self._sentinel], timeout=timeout)
|
| 390 |
+
|
| 391 |
+
pid = ident
|
| 392 |
+
|
| 393 |
+
#
|
| 394 |
+
# Create object representing the main process
|
| 395 |
+
#
|
| 396 |
+
|
| 397 |
+
class _MainProcess(BaseProcess):
|
| 398 |
+
|
| 399 |
+
def __init__(self):
|
| 400 |
+
self._identity = ()
|
| 401 |
+
self._name = 'MainProcess'
|
| 402 |
+
self._parent_pid = None
|
| 403 |
+
self._popen = None
|
| 404 |
+
self._closed = False
|
| 405 |
+
self._config = {'authkey': AuthenticationString(os.urandom(32)),
|
| 406 |
+
'semprefix': '/mp'}
|
| 407 |
+
# Note that some versions of FreeBSD only allow named
|
| 408 |
+
# semaphores to have names of up to 14 characters. Therefore
|
| 409 |
+
# we choose a short prefix.
|
| 410 |
+
#
|
| 411 |
+
# On MacOSX in a sandbox it may be necessary to use a
|
| 412 |
+
# different prefix -- see #19478.
|
| 413 |
+
#
|
| 414 |
+
# Everything in self._config will be inherited by descendant
|
| 415 |
+
# processes.
|
| 416 |
+
|
| 417 |
+
def close(self):
|
| 418 |
+
pass
|
| 419 |
+
|
| 420 |
+
|
| 421 |
+
_parent_process = None
|
| 422 |
+
_current_process = _MainProcess()
|
| 423 |
+
_process_counter = itertools.count(1)
|
| 424 |
+
_children = set()
|
| 425 |
+
del _MainProcess
|
| 426 |
+
|
| 427 |
+
#
|
| 428 |
+
# Give names to some return codes
|
| 429 |
+
#
|
| 430 |
+
|
| 431 |
+
_exitcode_to_name = {}
|
| 432 |
+
|
| 433 |
+
for name, signum in list(signal.__dict__.items()):
|
| 434 |
+
if name[:3]=='SIG' and '_' not in name:
|
| 435 |
+
_exitcode_to_name[-signum] = f'-{name}'
|
| 436 |
+
|
| 437 |
+
# For debug and leak testing
|
| 438 |
+
_dangling = WeakSet()
|
llava/lib/python3.10/multiprocessing/queues.py
ADDED
|
@@ -0,0 +1,379 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Module implementing queues
|
| 3 |
+
#
|
| 4 |
+
# multiprocessing/queues.py
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2006-2008, R Oudkerk
|
| 7 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 8 |
+
#
|
| 9 |
+
|
| 10 |
+
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
|
| 11 |
+
|
| 12 |
+
import sys
|
| 13 |
+
import os
|
| 14 |
+
import threading
|
| 15 |
+
import collections
|
| 16 |
+
import time
|
| 17 |
+
import types
|
| 18 |
+
import weakref
|
| 19 |
+
import errno
|
| 20 |
+
|
| 21 |
+
from queue import Empty, Full
|
| 22 |
+
|
| 23 |
+
import _multiprocessing
|
| 24 |
+
|
| 25 |
+
from . import connection
|
| 26 |
+
from . import context
|
| 27 |
+
_ForkingPickler = context.reduction.ForkingPickler
|
| 28 |
+
|
| 29 |
+
from .util import debug, info, Finalize, register_after_fork, is_exiting
|
| 30 |
+
|
| 31 |
+
#
|
| 32 |
+
# Queue type using a pipe, buffer and thread
|
| 33 |
+
#
|
| 34 |
+
|
| 35 |
+
class Queue(object):
|
| 36 |
+
|
| 37 |
+
def __init__(self, maxsize=0, *, ctx):
|
| 38 |
+
if maxsize <= 0:
|
| 39 |
+
# Can raise ImportError (see issues #3770 and #23400)
|
| 40 |
+
from .synchronize import SEM_VALUE_MAX as maxsize
|
| 41 |
+
self._maxsize = maxsize
|
| 42 |
+
self._reader, self._writer = connection.Pipe(duplex=False)
|
| 43 |
+
self._rlock = ctx.Lock()
|
| 44 |
+
self._opid = os.getpid()
|
| 45 |
+
if sys.platform == 'win32':
|
| 46 |
+
self._wlock = None
|
| 47 |
+
else:
|
| 48 |
+
self._wlock = ctx.Lock()
|
| 49 |
+
self._sem = ctx.BoundedSemaphore(maxsize)
|
| 50 |
+
# For use by concurrent.futures
|
| 51 |
+
self._ignore_epipe = False
|
| 52 |
+
self._reset()
|
| 53 |
+
|
| 54 |
+
if sys.platform != 'win32':
|
| 55 |
+
register_after_fork(self, Queue._after_fork)
|
| 56 |
+
|
| 57 |
+
def __getstate__(self):
|
| 58 |
+
context.assert_spawning(self)
|
| 59 |
+
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
|
| 60 |
+
self._rlock, self._wlock, self._sem, self._opid)
|
| 61 |
+
|
| 62 |
+
def __setstate__(self, state):
|
| 63 |
+
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
|
| 64 |
+
self._rlock, self._wlock, self._sem, self._opid) = state
|
| 65 |
+
self._reset()
|
| 66 |
+
|
| 67 |
+
def _after_fork(self):
|
| 68 |
+
debug('Queue._after_fork()')
|
| 69 |
+
self._reset(after_fork=True)
|
| 70 |
+
|
| 71 |
+
def _reset(self, after_fork=False):
|
| 72 |
+
if after_fork:
|
| 73 |
+
self._notempty._at_fork_reinit()
|
| 74 |
+
else:
|
| 75 |
+
self._notempty = threading.Condition(threading.Lock())
|
| 76 |
+
self._buffer = collections.deque()
|
| 77 |
+
self._thread = None
|
| 78 |
+
self._jointhread = None
|
| 79 |
+
self._joincancelled = False
|
| 80 |
+
self._closed = False
|
| 81 |
+
self._close = None
|
| 82 |
+
self._send_bytes = self._writer.send_bytes
|
| 83 |
+
self._recv_bytes = self._reader.recv_bytes
|
| 84 |
+
self._poll = self._reader.poll
|
| 85 |
+
|
| 86 |
+
def put(self, obj, block=True, timeout=None):
|
| 87 |
+
if self._closed:
|
| 88 |
+
raise ValueError(f"Queue {self!r} is closed")
|
| 89 |
+
if not self._sem.acquire(block, timeout):
|
| 90 |
+
raise Full
|
| 91 |
+
|
| 92 |
+
with self._notempty:
|
| 93 |
+
if self._thread is None:
|
| 94 |
+
self._start_thread()
|
| 95 |
+
self._buffer.append(obj)
|
| 96 |
+
self._notempty.notify()
|
| 97 |
+
|
| 98 |
+
def get(self, block=True, timeout=None):
|
| 99 |
+
if self._closed:
|
| 100 |
+
raise ValueError(f"Queue {self!r} is closed")
|
| 101 |
+
if block and timeout is None:
|
| 102 |
+
with self._rlock:
|
| 103 |
+
res = self._recv_bytes()
|
| 104 |
+
self._sem.release()
|
| 105 |
+
else:
|
| 106 |
+
if block:
|
| 107 |
+
deadline = time.monotonic() + timeout
|
| 108 |
+
if not self._rlock.acquire(block, timeout):
|
| 109 |
+
raise Empty
|
| 110 |
+
try:
|
| 111 |
+
if block:
|
| 112 |
+
timeout = deadline - time.monotonic()
|
| 113 |
+
if not self._poll(timeout):
|
| 114 |
+
raise Empty
|
| 115 |
+
elif not self._poll():
|
| 116 |
+
raise Empty
|
| 117 |
+
res = self._recv_bytes()
|
| 118 |
+
self._sem.release()
|
| 119 |
+
finally:
|
| 120 |
+
self._rlock.release()
|
| 121 |
+
# unserialize the data after having released the lock
|
| 122 |
+
return _ForkingPickler.loads(res)
|
| 123 |
+
|
| 124 |
+
def qsize(self):
|
| 125 |
+
# Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
|
| 126 |
+
return self._maxsize - self._sem._semlock._get_value()
|
| 127 |
+
|
| 128 |
+
def empty(self):
|
| 129 |
+
return not self._poll()
|
| 130 |
+
|
| 131 |
+
def full(self):
|
| 132 |
+
return self._sem._semlock._is_zero()
|
| 133 |
+
|
| 134 |
+
def get_nowait(self):
|
| 135 |
+
return self.get(False)
|
| 136 |
+
|
| 137 |
+
def put_nowait(self, obj):
|
| 138 |
+
return self.put(obj, False)
|
| 139 |
+
|
| 140 |
+
def close(self):
|
| 141 |
+
self._closed = True
|
| 142 |
+
close = self._close
|
| 143 |
+
if close:
|
| 144 |
+
self._close = None
|
| 145 |
+
close()
|
| 146 |
+
|
| 147 |
+
def join_thread(self):
|
| 148 |
+
debug('Queue.join_thread()')
|
| 149 |
+
assert self._closed, "Queue {0!r} not closed".format(self)
|
| 150 |
+
if self._jointhread:
|
| 151 |
+
self._jointhread()
|
| 152 |
+
|
| 153 |
+
def cancel_join_thread(self):
|
| 154 |
+
debug('Queue.cancel_join_thread()')
|
| 155 |
+
self._joincancelled = True
|
| 156 |
+
try:
|
| 157 |
+
self._jointhread.cancel()
|
| 158 |
+
except AttributeError:
|
| 159 |
+
pass
|
| 160 |
+
|
| 161 |
+
def _start_thread(self):
|
| 162 |
+
debug('Queue._start_thread()')
|
| 163 |
+
|
| 164 |
+
# Start thread which transfers data from buffer to pipe
|
| 165 |
+
self._buffer.clear()
|
| 166 |
+
self._thread = threading.Thread(
|
| 167 |
+
target=Queue._feed,
|
| 168 |
+
args=(self._buffer, self._notempty, self._send_bytes,
|
| 169 |
+
self._wlock, self._reader.close, self._writer.close,
|
| 170 |
+
self._ignore_epipe, self._on_queue_feeder_error,
|
| 171 |
+
self._sem),
|
| 172 |
+
name='QueueFeederThread'
|
| 173 |
+
)
|
| 174 |
+
self._thread.daemon = True
|
| 175 |
+
|
| 176 |
+
debug('doing self._thread.start()')
|
| 177 |
+
self._thread.start()
|
| 178 |
+
debug('... done self._thread.start()')
|
| 179 |
+
|
| 180 |
+
if not self._joincancelled:
|
| 181 |
+
self._jointhread = Finalize(
|
| 182 |
+
self._thread, Queue._finalize_join,
|
| 183 |
+
[weakref.ref(self._thread)],
|
| 184 |
+
exitpriority=-5
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
# Send sentinel to the thread queue object when garbage collected
|
| 188 |
+
self._close = Finalize(
|
| 189 |
+
self, Queue._finalize_close,
|
| 190 |
+
[self._buffer, self._notempty],
|
| 191 |
+
exitpriority=10
|
| 192 |
+
)
|
| 193 |
+
|
| 194 |
+
@staticmethod
|
| 195 |
+
def _finalize_join(twr):
|
| 196 |
+
debug('joining queue thread')
|
| 197 |
+
thread = twr()
|
| 198 |
+
if thread is not None:
|
| 199 |
+
thread.join()
|
| 200 |
+
debug('... queue thread joined')
|
| 201 |
+
else:
|
| 202 |
+
debug('... queue thread already dead')
|
| 203 |
+
|
| 204 |
+
@staticmethod
|
| 205 |
+
def _finalize_close(buffer, notempty):
|
| 206 |
+
debug('telling queue thread to quit')
|
| 207 |
+
with notempty:
|
| 208 |
+
buffer.append(_sentinel)
|
| 209 |
+
notempty.notify()
|
| 210 |
+
|
| 211 |
+
@staticmethod
|
| 212 |
+
def _feed(buffer, notempty, send_bytes, writelock, reader_close,
|
| 213 |
+
writer_close, ignore_epipe, onerror, queue_sem):
|
| 214 |
+
debug('starting thread to feed data to pipe')
|
| 215 |
+
nacquire = notempty.acquire
|
| 216 |
+
nrelease = notempty.release
|
| 217 |
+
nwait = notempty.wait
|
| 218 |
+
bpopleft = buffer.popleft
|
| 219 |
+
sentinel = _sentinel
|
| 220 |
+
if sys.platform != 'win32':
|
| 221 |
+
wacquire = writelock.acquire
|
| 222 |
+
wrelease = writelock.release
|
| 223 |
+
else:
|
| 224 |
+
wacquire = None
|
| 225 |
+
|
| 226 |
+
while 1:
|
| 227 |
+
try:
|
| 228 |
+
nacquire()
|
| 229 |
+
try:
|
| 230 |
+
if not buffer:
|
| 231 |
+
nwait()
|
| 232 |
+
finally:
|
| 233 |
+
nrelease()
|
| 234 |
+
try:
|
| 235 |
+
while 1:
|
| 236 |
+
obj = bpopleft()
|
| 237 |
+
if obj is sentinel:
|
| 238 |
+
debug('feeder thread got sentinel -- exiting')
|
| 239 |
+
reader_close()
|
| 240 |
+
writer_close()
|
| 241 |
+
return
|
| 242 |
+
|
| 243 |
+
# serialize the data before acquiring the lock
|
| 244 |
+
obj = _ForkingPickler.dumps(obj)
|
| 245 |
+
if wacquire is None:
|
| 246 |
+
send_bytes(obj)
|
| 247 |
+
else:
|
| 248 |
+
wacquire()
|
| 249 |
+
try:
|
| 250 |
+
send_bytes(obj)
|
| 251 |
+
finally:
|
| 252 |
+
wrelease()
|
| 253 |
+
except IndexError:
|
| 254 |
+
pass
|
| 255 |
+
except Exception as e:
|
| 256 |
+
if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
|
| 257 |
+
return
|
| 258 |
+
# Since this runs in a daemon thread the resources it uses
|
| 259 |
+
# may be become unusable while the process is cleaning up.
|
| 260 |
+
# We ignore errors which happen after the process has
|
| 261 |
+
# started to cleanup.
|
| 262 |
+
if is_exiting():
|
| 263 |
+
info('error in queue thread: %s', e)
|
| 264 |
+
return
|
| 265 |
+
else:
|
| 266 |
+
# Since the object has not been sent in the queue, we need
|
| 267 |
+
# to decrease the size of the queue. The error acts as
|
| 268 |
+
# if the object had been silently removed from the queue
|
| 269 |
+
# and this step is necessary to have a properly working
|
| 270 |
+
# queue.
|
| 271 |
+
queue_sem.release()
|
| 272 |
+
onerror(e, obj)
|
| 273 |
+
|
| 274 |
+
@staticmethod
|
| 275 |
+
def _on_queue_feeder_error(e, obj):
|
| 276 |
+
"""
|
| 277 |
+
Private API hook called when feeding data in the background thread
|
| 278 |
+
raises an exception. For overriding by concurrent.futures.
|
| 279 |
+
"""
|
| 280 |
+
import traceback
|
| 281 |
+
traceback.print_exc()
|
| 282 |
+
|
| 283 |
+
|
| 284 |
+
_sentinel = object()
|
| 285 |
+
|
| 286 |
+
#
|
| 287 |
+
# A queue type which also supports join() and task_done() methods
|
| 288 |
+
#
|
| 289 |
+
# Note that if you do not call task_done() for each finished task then
|
| 290 |
+
# eventually the counter's semaphore may overflow causing Bad Things
|
| 291 |
+
# to happen.
|
| 292 |
+
#
|
| 293 |
+
|
| 294 |
+
class JoinableQueue(Queue):
|
| 295 |
+
|
| 296 |
+
def __init__(self, maxsize=0, *, ctx):
|
| 297 |
+
Queue.__init__(self, maxsize, ctx=ctx)
|
| 298 |
+
self._unfinished_tasks = ctx.Semaphore(0)
|
| 299 |
+
self._cond = ctx.Condition()
|
| 300 |
+
|
| 301 |
+
def __getstate__(self):
|
| 302 |
+
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
|
| 303 |
+
|
| 304 |
+
def __setstate__(self, state):
|
| 305 |
+
Queue.__setstate__(self, state[:-2])
|
| 306 |
+
self._cond, self._unfinished_tasks = state[-2:]
|
| 307 |
+
|
| 308 |
+
def put(self, obj, block=True, timeout=None):
|
| 309 |
+
if self._closed:
|
| 310 |
+
raise ValueError(f"Queue {self!r} is closed")
|
| 311 |
+
if not self._sem.acquire(block, timeout):
|
| 312 |
+
raise Full
|
| 313 |
+
|
| 314 |
+
with self._notempty, self._cond:
|
| 315 |
+
if self._thread is None:
|
| 316 |
+
self._start_thread()
|
| 317 |
+
self._buffer.append(obj)
|
| 318 |
+
self._unfinished_tasks.release()
|
| 319 |
+
self._notempty.notify()
|
| 320 |
+
|
| 321 |
+
def task_done(self):
|
| 322 |
+
with self._cond:
|
| 323 |
+
if not self._unfinished_tasks.acquire(False):
|
| 324 |
+
raise ValueError('task_done() called too many times')
|
| 325 |
+
if self._unfinished_tasks._semlock._is_zero():
|
| 326 |
+
self._cond.notify_all()
|
| 327 |
+
|
| 328 |
+
def join(self):
|
| 329 |
+
with self._cond:
|
| 330 |
+
if not self._unfinished_tasks._semlock._is_zero():
|
| 331 |
+
self._cond.wait()
|
| 332 |
+
|
| 333 |
+
#
|
| 334 |
+
# Simplified Queue type -- really just a locked pipe
|
| 335 |
+
#
|
| 336 |
+
|
| 337 |
+
class SimpleQueue(object):
|
| 338 |
+
|
| 339 |
+
def __init__(self, *, ctx):
|
| 340 |
+
self._reader, self._writer = connection.Pipe(duplex=False)
|
| 341 |
+
self._rlock = ctx.Lock()
|
| 342 |
+
self._poll = self._reader.poll
|
| 343 |
+
if sys.platform == 'win32':
|
| 344 |
+
self._wlock = None
|
| 345 |
+
else:
|
| 346 |
+
self._wlock = ctx.Lock()
|
| 347 |
+
|
| 348 |
+
def close(self):
|
| 349 |
+
self._reader.close()
|
| 350 |
+
self._writer.close()
|
| 351 |
+
|
| 352 |
+
def empty(self):
|
| 353 |
+
return not self._poll()
|
| 354 |
+
|
| 355 |
+
def __getstate__(self):
|
| 356 |
+
context.assert_spawning(self)
|
| 357 |
+
return (self._reader, self._writer, self._rlock, self._wlock)
|
| 358 |
+
|
| 359 |
+
def __setstate__(self, state):
|
| 360 |
+
(self._reader, self._writer, self._rlock, self._wlock) = state
|
| 361 |
+
self._poll = self._reader.poll
|
| 362 |
+
|
| 363 |
+
def get(self):
|
| 364 |
+
with self._rlock:
|
| 365 |
+
res = self._reader.recv_bytes()
|
| 366 |
+
# unserialize the data after having released the lock
|
| 367 |
+
return _ForkingPickler.loads(res)
|
| 368 |
+
|
| 369 |
+
def put(self, obj):
|
| 370 |
+
# serialize the data before acquiring the lock
|
| 371 |
+
obj = _ForkingPickler.dumps(obj)
|
| 372 |
+
if self._wlock is None:
|
| 373 |
+
# writes to a message oriented win32 pipe are atomic
|
| 374 |
+
self._writer.send_bytes(obj)
|
| 375 |
+
else:
|
| 376 |
+
with self._wlock:
|
| 377 |
+
self._writer.send_bytes(obj)
|
| 378 |
+
|
| 379 |
+
__class_getitem__ = classmethod(types.GenericAlias)
|
llava/lib/python3.10/multiprocessing/resource_tracker.py
ADDED
|
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
###############################################################################
|
| 2 |
+
# Server process to keep track of unlinked resources (like shared memory
|
| 3 |
+
# segments, semaphores etc.) and clean them.
|
| 4 |
+
#
|
| 5 |
+
# On Unix we run a server process which keeps track of unlinked
|
| 6 |
+
# resources. The server ignores SIGINT and SIGTERM and reads from a
|
| 7 |
+
# pipe. Every other process of the program has a copy of the writable
|
| 8 |
+
# end of the pipe, so we get EOF when all other processes have exited.
|
| 9 |
+
# Then the server process unlinks any remaining resource names.
|
| 10 |
+
#
|
| 11 |
+
# This is important because there may be system limits for such resources: for
|
| 12 |
+
# instance, the system only supports a limited number of named semaphores, and
|
| 13 |
+
# shared-memory segments live in the RAM. If a python process leaks such a
|
| 14 |
+
# resource, this resource will not be removed till the next reboot. Without
|
| 15 |
+
# this resource tracker process, "killall python" would probably leave unlinked
|
| 16 |
+
# resources.
|
| 17 |
+
|
| 18 |
+
import os
|
| 19 |
+
import signal
|
| 20 |
+
import sys
|
| 21 |
+
import threading
|
| 22 |
+
import warnings
|
| 23 |
+
|
| 24 |
+
from . import spawn
|
| 25 |
+
from . import util
|
| 26 |
+
|
| 27 |
+
__all__ = ['ensure_running', 'register', 'unregister']
|
| 28 |
+
|
| 29 |
+
_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask')
|
| 30 |
+
_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)
|
| 31 |
+
|
| 32 |
+
_CLEANUP_FUNCS = {
|
| 33 |
+
'noop': lambda: None,
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
if os.name == 'posix':
|
| 37 |
+
import _multiprocessing
|
| 38 |
+
import _posixshmem
|
| 39 |
+
|
| 40 |
+
# Use sem_unlink() to clean up named semaphores.
|
| 41 |
+
#
|
| 42 |
+
# sem_unlink() may be missing if the Python build process detected the
|
| 43 |
+
# absence of POSIX named semaphores. In that case, no named semaphores were
|
| 44 |
+
# ever opened, so no cleanup would be necessary.
|
| 45 |
+
if hasattr(_multiprocessing, 'sem_unlink'):
|
| 46 |
+
_CLEANUP_FUNCS.update({
|
| 47 |
+
'semaphore': _multiprocessing.sem_unlink,
|
| 48 |
+
})
|
| 49 |
+
_CLEANUP_FUNCS.update({
|
| 50 |
+
'shared_memory': _posixshmem.shm_unlink,
|
| 51 |
+
})
|
| 52 |
+
|
| 53 |
+
|
| 54 |
+
class ResourceTracker(object):
|
| 55 |
+
|
| 56 |
+
def __init__(self):
|
| 57 |
+
self._lock = threading.Lock()
|
| 58 |
+
self._fd = None
|
| 59 |
+
self._pid = None
|
| 60 |
+
|
| 61 |
+
def _stop(self):
|
| 62 |
+
with self._lock:
|
| 63 |
+
if self._fd is None:
|
| 64 |
+
# not running
|
| 65 |
+
return
|
| 66 |
+
|
| 67 |
+
# closing the "alive" file descriptor stops main()
|
| 68 |
+
os.close(self._fd)
|
| 69 |
+
self._fd = None
|
| 70 |
+
|
| 71 |
+
os.waitpid(self._pid, 0)
|
| 72 |
+
self._pid = None
|
| 73 |
+
|
| 74 |
+
def getfd(self):
|
| 75 |
+
self.ensure_running()
|
| 76 |
+
return self._fd
|
| 77 |
+
|
| 78 |
+
def ensure_running(self):
|
| 79 |
+
'''Make sure that resource tracker process is running.
|
| 80 |
+
|
| 81 |
+
This can be run from any process. Usually a child process will use
|
| 82 |
+
the resource created by its parent.'''
|
| 83 |
+
with self._lock:
|
| 84 |
+
if self._fd is not None:
|
| 85 |
+
# resource tracker was launched before, is it still running?
|
| 86 |
+
if self._check_alive():
|
| 87 |
+
# => still alive
|
| 88 |
+
return
|
| 89 |
+
# => dead, launch it again
|
| 90 |
+
os.close(self._fd)
|
| 91 |
+
|
| 92 |
+
# Clean-up to avoid dangling processes.
|
| 93 |
+
try:
|
| 94 |
+
# _pid can be None if this process is a child from another
|
| 95 |
+
# python process, which has started the resource_tracker.
|
| 96 |
+
if self._pid is not None:
|
| 97 |
+
os.waitpid(self._pid, 0)
|
| 98 |
+
except ChildProcessError:
|
| 99 |
+
# The resource_tracker has already been terminated.
|
| 100 |
+
pass
|
| 101 |
+
self._fd = None
|
| 102 |
+
self._pid = None
|
| 103 |
+
|
| 104 |
+
warnings.warn('resource_tracker: process died unexpectedly, '
|
| 105 |
+
'relaunching. Some resources might leak.')
|
| 106 |
+
|
| 107 |
+
fds_to_pass = []
|
| 108 |
+
try:
|
| 109 |
+
fds_to_pass.append(sys.stderr.fileno())
|
| 110 |
+
except Exception:
|
| 111 |
+
pass
|
| 112 |
+
cmd = 'from multiprocessing.resource_tracker import main;main(%d)'
|
| 113 |
+
r, w = os.pipe()
|
| 114 |
+
try:
|
| 115 |
+
fds_to_pass.append(r)
|
| 116 |
+
# process will out live us, so no need to wait on pid
|
| 117 |
+
exe = spawn.get_executable()
|
| 118 |
+
args = [exe] + util._args_from_interpreter_flags()
|
| 119 |
+
args += ['-c', cmd % r]
|
| 120 |
+
# bpo-33613: Register a signal mask that will block the signals.
|
| 121 |
+
# This signal mask will be inherited by the child that is going
|
| 122 |
+
# to be spawned and will protect the child from a race condition
|
| 123 |
+
# that can make the child die before it registers signal handlers
|
| 124 |
+
# for SIGINT and SIGTERM. The mask is unregistered after spawning
|
| 125 |
+
# the child.
|
| 126 |
+
try:
|
| 127 |
+
if _HAVE_SIGMASK:
|
| 128 |
+
signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS)
|
| 129 |
+
pid = util.spawnv_passfds(exe, args, fds_to_pass)
|
| 130 |
+
finally:
|
| 131 |
+
if _HAVE_SIGMASK:
|
| 132 |
+
signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
|
| 133 |
+
except:
|
| 134 |
+
os.close(w)
|
| 135 |
+
raise
|
| 136 |
+
else:
|
| 137 |
+
self._fd = w
|
| 138 |
+
self._pid = pid
|
| 139 |
+
finally:
|
| 140 |
+
os.close(r)
|
| 141 |
+
|
| 142 |
+
def _check_alive(self):
|
| 143 |
+
'''Check that the pipe has not been closed by sending a probe.'''
|
| 144 |
+
try:
|
| 145 |
+
# We cannot use send here as it calls ensure_running, creating
|
| 146 |
+
# a cycle.
|
| 147 |
+
os.write(self._fd, b'PROBE:0:noop\n')
|
| 148 |
+
except OSError:
|
| 149 |
+
return False
|
| 150 |
+
else:
|
| 151 |
+
return True
|
| 152 |
+
|
| 153 |
+
def register(self, name, rtype):
|
| 154 |
+
'''Register name of resource with resource tracker.'''
|
| 155 |
+
self._send('REGISTER', name, rtype)
|
| 156 |
+
|
| 157 |
+
def unregister(self, name, rtype):
|
| 158 |
+
'''Unregister name of resource with resource tracker.'''
|
| 159 |
+
self._send('UNREGISTER', name, rtype)
|
| 160 |
+
|
| 161 |
+
def _send(self, cmd, name, rtype):
|
| 162 |
+
self.ensure_running()
|
| 163 |
+
msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
|
| 164 |
+
if len(msg) > 512:
|
| 165 |
+
# posix guarantees that writes to a pipe of less than PIPE_BUF
|
| 166 |
+
# bytes are atomic, and that PIPE_BUF >= 512
|
| 167 |
+
raise ValueError('msg too long')
|
| 168 |
+
nbytes = os.write(self._fd, msg)
|
| 169 |
+
assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format(
|
| 170 |
+
nbytes, len(msg))
|
| 171 |
+
|
| 172 |
+
|
| 173 |
+
_resource_tracker = ResourceTracker()
|
| 174 |
+
ensure_running = _resource_tracker.ensure_running
|
| 175 |
+
register = _resource_tracker.register
|
| 176 |
+
unregister = _resource_tracker.unregister
|
| 177 |
+
getfd = _resource_tracker.getfd
|
| 178 |
+
|
| 179 |
+
def main(fd):
|
| 180 |
+
'''Run resource tracker.'''
|
| 181 |
+
# protect the process from ^C and "killall python" etc
|
| 182 |
+
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
| 183 |
+
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
| 184 |
+
if _HAVE_SIGMASK:
|
| 185 |
+
signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
|
| 186 |
+
|
| 187 |
+
for f in (sys.stdin, sys.stdout):
|
| 188 |
+
try:
|
| 189 |
+
f.close()
|
| 190 |
+
except Exception:
|
| 191 |
+
pass
|
| 192 |
+
|
| 193 |
+
cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()}
|
| 194 |
+
try:
|
| 195 |
+
# keep track of registered/unregistered resources
|
| 196 |
+
with open(fd, 'rb') as f:
|
| 197 |
+
for line in f:
|
| 198 |
+
try:
|
| 199 |
+
cmd, name, rtype = line.strip().decode('ascii').split(':')
|
| 200 |
+
cleanup_func = _CLEANUP_FUNCS.get(rtype, None)
|
| 201 |
+
if cleanup_func is None:
|
| 202 |
+
raise ValueError(
|
| 203 |
+
f'Cannot register {name} for automatic cleanup: '
|
| 204 |
+
f'unknown resource type {rtype}')
|
| 205 |
+
|
| 206 |
+
if cmd == 'REGISTER':
|
| 207 |
+
cache[rtype].add(name)
|
| 208 |
+
elif cmd == 'UNREGISTER':
|
| 209 |
+
cache[rtype].remove(name)
|
| 210 |
+
elif cmd == 'PROBE':
|
| 211 |
+
pass
|
| 212 |
+
else:
|
| 213 |
+
raise RuntimeError('unrecognized command %r' % cmd)
|
| 214 |
+
except Exception:
|
| 215 |
+
try:
|
| 216 |
+
sys.excepthook(*sys.exc_info())
|
| 217 |
+
except:
|
| 218 |
+
pass
|
| 219 |
+
finally:
|
| 220 |
+
# all processes have terminated; cleanup any remaining resources
|
| 221 |
+
for rtype, rtype_cache in cache.items():
|
| 222 |
+
if rtype_cache:
|
| 223 |
+
try:
|
| 224 |
+
warnings.warn('resource_tracker: There appear to be %d '
|
| 225 |
+
'leaked %s objects to clean up at shutdown' %
|
| 226 |
+
(len(rtype_cache), rtype))
|
| 227 |
+
except Exception:
|
| 228 |
+
pass
|
| 229 |
+
for name in rtype_cache:
|
| 230 |
+
# For some reason the process which created and registered this
|
| 231 |
+
# resource has failed to unregister it. Presumably it has
|
| 232 |
+
# died. We therefore unlink it.
|
| 233 |
+
try:
|
| 234 |
+
try:
|
| 235 |
+
_CLEANUP_FUNCS[rtype](name)
|
| 236 |
+
except Exception as e:
|
| 237 |
+
warnings.warn('resource_tracker: %r: %s' % (name, e))
|
| 238 |
+
finally:
|
| 239 |
+
pass
|
llava/lib/python3.10/multiprocessing/sharedctypes.py
ADDED
|
@@ -0,0 +1,240 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Module which supports allocation of ctypes objects from shared memory
|
| 3 |
+
#
|
| 4 |
+
# multiprocessing/sharedctypes.py
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2006-2008, R Oudkerk
|
| 7 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 8 |
+
#
|
| 9 |
+
|
| 10 |
+
import ctypes
|
| 11 |
+
import weakref
|
| 12 |
+
|
| 13 |
+
from . import heap
|
| 14 |
+
from . import get_context
|
| 15 |
+
|
| 16 |
+
from .context import reduction, assert_spawning
|
| 17 |
+
_ForkingPickler = reduction.ForkingPickler
|
| 18 |
+
|
| 19 |
+
__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']
|
| 20 |
+
|
| 21 |
+
#
|
| 22 |
+
#
|
| 23 |
+
#
|
| 24 |
+
|
| 25 |
+
typecode_to_type = {
|
| 26 |
+
'c': ctypes.c_char, 'u': ctypes.c_wchar,
|
| 27 |
+
'b': ctypes.c_byte, 'B': ctypes.c_ubyte,
|
| 28 |
+
'h': ctypes.c_short, 'H': ctypes.c_ushort,
|
| 29 |
+
'i': ctypes.c_int, 'I': ctypes.c_uint,
|
| 30 |
+
'l': ctypes.c_long, 'L': ctypes.c_ulong,
|
| 31 |
+
'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong,
|
| 32 |
+
'f': ctypes.c_float, 'd': ctypes.c_double
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
#
|
| 36 |
+
#
|
| 37 |
+
#
|
| 38 |
+
|
| 39 |
+
def _new_value(type_):
|
| 40 |
+
size = ctypes.sizeof(type_)
|
| 41 |
+
wrapper = heap.BufferWrapper(size)
|
| 42 |
+
return rebuild_ctype(type_, wrapper, None)
|
| 43 |
+
|
| 44 |
+
def RawValue(typecode_or_type, *args):
|
| 45 |
+
'''
|
| 46 |
+
Returns a ctypes object allocated from shared memory
|
| 47 |
+
'''
|
| 48 |
+
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
|
| 49 |
+
obj = _new_value(type_)
|
| 50 |
+
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
|
| 51 |
+
obj.__init__(*args)
|
| 52 |
+
return obj
|
| 53 |
+
|
| 54 |
+
def RawArray(typecode_or_type, size_or_initializer):
|
| 55 |
+
'''
|
| 56 |
+
Returns a ctypes array allocated from shared memory
|
| 57 |
+
'''
|
| 58 |
+
type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
|
| 59 |
+
if isinstance(size_or_initializer, int):
|
| 60 |
+
type_ = type_ * size_or_initializer
|
| 61 |
+
obj = _new_value(type_)
|
| 62 |
+
ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
|
| 63 |
+
return obj
|
| 64 |
+
else:
|
| 65 |
+
type_ = type_ * len(size_or_initializer)
|
| 66 |
+
result = _new_value(type_)
|
| 67 |
+
result.__init__(*size_or_initializer)
|
| 68 |
+
return result
|
| 69 |
+
|
| 70 |
+
def Value(typecode_or_type, *args, lock=True, ctx=None):
|
| 71 |
+
'''
|
| 72 |
+
Return a synchronization wrapper for a Value
|
| 73 |
+
'''
|
| 74 |
+
obj = RawValue(typecode_or_type, *args)
|
| 75 |
+
if lock is False:
|
| 76 |
+
return obj
|
| 77 |
+
if lock in (True, None):
|
| 78 |
+
ctx = ctx or get_context()
|
| 79 |
+
lock = ctx.RLock()
|
| 80 |
+
if not hasattr(lock, 'acquire'):
|
| 81 |
+
raise AttributeError("%r has no method 'acquire'" % lock)
|
| 82 |
+
return synchronized(obj, lock, ctx=ctx)
|
| 83 |
+
|
| 84 |
+
def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None):
|
| 85 |
+
'''
|
| 86 |
+
Return a synchronization wrapper for a RawArray
|
| 87 |
+
'''
|
| 88 |
+
obj = RawArray(typecode_or_type, size_or_initializer)
|
| 89 |
+
if lock is False:
|
| 90 |
+
return obj
|
| 91 |
+
if lock in (True, None):
|
| 92 |
+
ctx = ctx or get_context()
|
| 93 |
+
lock = ctx.RLock()
|
| 94 |
+
if not hasattr(lock, 'acquire'):
|
| 95 |
+
raise AttributeError("%r has no method 'acquire'" % lock)
|
| 96 |
+
return synchronized(obj, lock, ctx=ctx)
|
| 97 |
+
|
| 98 |
+
def copy(obj):
|
| 99 |
+
new_obj = _new_value(type(obj))
|
| 100 |
+
ctypes.pointer(new_obj)[0] = obj
|
| 101 |
+
return new_obj
|
| 102 |
+
|
| 103 |
+
def synchronized(obj, lock=None, ctx=None):
|
| 104 |
+
assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
|
| 105 |
+
ctx = ctx or get_context()
|
| 106 |
+
|
| 107 |
+
if isinstance(obj, ctypes._SimpleCData):
|
| 108 |
+
return Synchronized(obj, lock, ctx)
|
| 109 |
+
elif isinstance(obj, ctypes.Array):
|
| 110 |
+
if obj._type_ is ctypes.c_char:
|
| 111 |
+
return SynchronizedString(obj, lock, ctx)
|
| 112 |
+
return SynchronizedArray(obj, lock, ctx)
|
| 113 |
+
else:
|
| 114 |
+
cls = type(obj)
|
| 115 |
+
try:
|
| 116 |
+
scls = class_cache[cls]
|
| 117 |
+
except KeyError:
|
| 118 |
+
names = [field[0] for field in cls._fields_]
|
| 119 |
+
d = {name: make_property(name) for name in names}
|
| 120 |
+
classname = 'Synchronized' + cls.__name__
|
| 121 |
+
scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
|
| 122 |
+
return scls(obj, lock, ctx)
|
| 123 |
+
|
| 124 |
+
#
|
| 125 |
+
# Functions for pickling/unpickling
|
| 126 |
+
#
|
| 127 |
+
|
| 128 |
+
def reduce_ctype(obj):
|
| 129 |
+
assert_spawning(obj)
|
| 130 |
+
if isinstance(obj, ctypes.Array):
|
| 131 |
+
return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
|
| 132 |
+
else:
|
| 133 |
+
return rebuild_ctype, (type(obj), obj._wrapper, None)
|
| 134 |
+
|
| 135 |
+
def rebuild_ctype(type_, wrapper, length):
|
| 136 |
+
if length is not None:
|
| 137 |
+
type_ = type_ * length
|
| 138 |
+
_ForkingPickler.register(type_, reduce_ctype)
|
| 139 |
+
buf = wrapper.create_memoryview()
|
| 140 |
+
obj = type_.from_buffer(buf)
|
| 141 |
+
obj._wrapper = wrapper
|
| 142 |
+
return obj
|
| 143 |
+
|
| 144 |
+
#
|
| 145 |
+
# Function to create properties
|
| 146 |
+
#
|
| 147 |
+
|
| 148 |
+
def make_property(name):
|
| 149 |
+
try:
|
| 150 |
+
return prop_cache[name]
|
| 151 |
+
except KeyError:
|
| 152 |
+
d = {}
|
| 153 |
+
exec(template % ((name,)*7), d)
|
| 154 |
+
prop_cache[name] = d[name]
|
| 155 |
+
return d[name]
|
| 156 |
+
|
| 157 |
+
template = '''
|
| 158 |
+
def get%s(self):
|
| 159 |
+
self.acquire()
|
| 160 |
+
try:
|
| 161 |
+
return self._obj.%s
|
| 162 |
+
finally:
|
| 163 |
+
self.release()
|
| 164 |
+
def set%s(self, value):
|
| 165 |
+
self.acquire()
|
| 166 |
+
try:
|
| 167 |
+
self._obj.%s = value
|
| 168 |
+
finally:
|
| 169 |
+
self.release()
|
| 170 |
+
%s = property(get%s, set%s)
|
| 171 |
+
'''
|
| 172 |
+
|
| 173 |
+
prop_cache = {}
|
| 174 |
+
class_cache = weakref.WeakKeyDictionary()
|
| 175 |
+
|
| 176 |
+
#
|
| 177 |
+
# Synchronized wrappers
|
| 178 |
+
#
|
| 179 |
+
|
| 180 |
+
class SynchronizedBase(object):
|
| 181 |
+
|
| 182 |
+
def __init__(self, obj, lock=None, ctx=None):
|
| 183 |
+
self._obj = obj
|
| 184 |
+
if lock:
|
| 185 |
+
self._lock = lock
|
| 186 |
+
else:
|
| 187 |
+
ctx = ctx or get_context(force=True)
|
| 188 |
+
self._lock = ctx.RLock()
|
| 189 |
+
self.acquire = self._lock.acquire
|
| 190 |
+
self.release = self._lock.release
|
| 191 |
+
|
| 192 |
+
def __enter__(self):
|
| 193 |
+
return self._lock.__enter__()
|
| 194 |
+
|
| 195 |
+
def __exit__(self, *args):
|
| 196 |
+
return self._lock.__exit__(*args)
|
| 197 |
+
|
| 198 |
+
def __reduce__(self):
|
| 199 |
+
assert_spawning(self)
|
| 200 |
+
return synchronized, (self._obj, self._lock)
|
| 201 |
+
|
| 202 |
+
def get_obj(self):
|
| 203 |
+
return self._obj
|
| 204 |
+
|
| 205 |
+
def get_lock(self):
|
| 206 |
+
return self._lock
|
| 207 |
+
|
| 208 |
+
def __repr__(self):
|
| 209 |
+
return '<%s wrapper for %s>' % (type(self).__name__, self._obj)
|
| 210 |
+
|
| 211 |
+
|
| 212 |
+
class Synchronized(SynchronizedBase):
|
| 213 |
+
value = make_property('value')
|
| 214 |
+
|
| 215 |
+
|
| 216 |
+
class SynchronizedArray(SynchronizedBase):
|
| 217 |
+
|
| 218 |
+
def __len__(self):
|
| 219 |
+
return len(self._obj)
|
| 220 |
+
|
| 221 |
+
def __getitem__(self, i):
|
| 222 |
+
with self:
|
| 223 |
+
return self._obj[i]
|
| 224 |
+
|
| 225 |
+
def __setitem__(self, i, value):
|
| 226 |
+
with self:
|
| 227 |
+
self._obj[i] = value
|
| 228 |
+
|
| 229 |
+
def __getslice__(self, start, stop):
|
| 230 |
+
with self:
|
| 231 |
+
return self._obj[start:stop]
|
| 232 |
+
|
| 233 |
+
def __setslice__(self, start, stop, values):
|
| 234 |
+
with self:
|
| 235 |
+
self._obj[start:stop] = values
|
| 236 |
+
|
| 237 |
+
|
| 238 |
+
class SynchronizedString(SynchronizedArray):
|
| 239 |
+
value = make_property('value')
|
| 240 |
+
raw = make_property('raw')
|
llava/lib/python3.10/multiprocessing/spawn.py
ADDED
|
@@ -0,0 +1,297 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Code used to start processes when using the spawn or forkserver
|
| 3 |
+
# start methods.
|
| 4 |
+
#
|
| 5 |
+
# multiprocessing/spawn.py
|
| 6 |
+
#
|
| 7 |
+
# Copyright (c) 2006-2008, R Oudkerk
|
| 8 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 9 |
+
#
|
| 10 |
+
|
| 11 |
+
import os
|
| 12 |
+
import sys
|
| 13 |
+
import runpy
|
| 14 |
+
import types
|
| 15 |
+
|
| 16 |
+
from . import get_start_method, set_start_method
|
| 17 |
+
from . import process
|
| 18 |
+
from .context import reduction
|
| 19 |
+
from . import util
|
| 20 |
+
|
| 21 |
+
__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable',
|
| 22 |
+
'get_preparation_data', 'get_command_line', 'import_main_path']
|
| 23 |
+
|
| 24 |
+
#
|
| 25 |
+
# _python_exe is the assumed path to the python executable.
|
| 26 |
+
# People embedding Python want to modify it.
|
| 27 |
+
#
|
| 28 |
+
|
| 29 |
+
if sys.platform != 'win32':
|
| 30 |
+
WINEXE = False
|
| 31 |
+
WINSERVICE = False
|
| 32 |
+
else:
|
| 33 |
+
WINEXE = getattr(sys, 'frozen', False)
|
| 34 |
+
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")
|
| 35 |
+
|
| 36 |
+
if WINSERVICE:
|
| 37 |
+
_python_exe = os.path.join(sys.exec_prefix, 'python.exe')
|
| 38 |
+
else:
|
| 39 |
+
_python_exe = sys.executable
|
| 40 |
+
|
| 41 |
+
def set_executable(exe):
|
| 42 |
+
global _python_exe
|
| 43 |
+
_python_exe = exe
|
| 44 |
+
|
| 45 |
+
def get_executable():
|
| 46 |
+
return _python_exe
|
| 47 |
+
|
| 48 |
+
#
|
| 49 |
+
#
|
| 50 |
+
#
|
| 51 |
+
|
| 52 |
+
def is_forking(argv):
|
| 53 |
+
'''
|
| 54 |
+
Return whether commandline indicates we are forking
|
| 55 |
+
'''
|
| 56 |
+
if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
|
| 57 |
+
return True
|
| 58 |
+
else:
|
| 59 |
+
return False
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def freeze_support():
|
| 63 |
+
'''
|
| 64 |
+
Run code for process object if this in not the main process
|
| 65 |
+
'''
|
| 66 |
+
if is_forking(sys.argv):
|
| 67 |
+
kwds = {}
|
| 68 |
+
for arg in sys.argv[2:]:
|
| 69 |
+
name, value = arg.split('=')
|
| 70 |
+
if value == 'None':
|
| 71 |
+
kwds[name] = None
|
| 72 |
+
else:
|
| 73 |
+
kwds[name] = int(value)
|
| 74 |
+
spawn_main(**kwds)
|
| 75 |
+
sys.exit()
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
def get_command_line(**kwds):
|
| 79 |
+
'''
|
| 80 |
+
Returns prefix of command line used for spawning a child process
|
| 81 |
+
'''
|
| 82 |
+
if getattr(sys, 'frozen', False):
|
| 83 |
+
return ([sys.executable, '--multiprocessing-fork'] +
|
| 84 |
+
['%s=%r' % item for item in kwds.items()])
|
| 85 |
+
else:
|
| 86 |
+
prog = 'from multiprocessing.spawn import spawn_main; spawn_main(%s)'
|
| 87 |
+
prog %= ', '.join('%s=%r' % item for item in kwds.items())
|
| 88 |
+
opts = util._args_from_interpreter_flags()
|
| 89 |
+
return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork']
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None):
|
| 93 |
+
'''
|
| 94 |
+
Run code specified by data received over pipe
|
| 95 |
+
'''
|
| 96 |
+
assert is_forking(sys.argv), "Not forking"
|
| 97 |
+
if sys.platform == 'win32':
|
| 98 |
+
import msvcrt
|
| 99 |
+
import _winapi
|
| 100 |
+
|
| 101 |
+
if parent_pid is not None:
|
| 102 |
+
source_process = _winapi.OpenProcess(
|
| 103 |
+
_winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE,
|
| 104 |
+
False, parent_pid)
|
| 105 |
+
else:
|
| 106 |
+
source_process = None
|
| 107 |
+
new_handle = reduction.duplicate(pipe_handle,
|
| 108 |
+
source_process=source_process)
|
| 109 |
+
fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
|
| 110 |
+
parent_sentinel = source_process
|
| 111 |
+
else:
|
| 112 |
+
from . import resource_tracker
|
| 113 |
+
resource_tracker._resource_tracker._fd = tracker_fd
|
| 114 |
+
fd = pipe_handle
|
| 115 |
+
parent_sentinel = os.dup(pipe_handle)
|
| 116 |
+
exitcode = _main(fd, parent_sentinel)
|
| 117 |
+
sys.exit(exitcode)
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def _main(fd, parent_sentinel):
|
| 121 |
+
with os.fdopen(fd, 'rb', closefd=True) as from_parent:
|
| 122 |
+
process.current_process()._inheriting = True
|
| 123 |
+
try:
|
| 124 |
+
preparation_data = reduction.pickle.load(from_parent)
|
| 125 |
+
prepare(preparation_data)
|
| 126 |
+
self = reduction.pickle.load(from_parent)
|
| 127 |
+
finally:
|
| 128 |
+
del process.current_process()._inheriting
|
| 129 |
+
return self._bootstrap(parent_sentinel)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
+
def _check_not_importing_main():
|
| 133 |
+
if getattr(process.current_process(), '_inheriting', False):
|
| 134 |
+
raise RuntimeError('''
|
| 135 |
+
An attempt has been made to start a new process before the
|
| 136 |
+
current process has finished its bootstrapping phase.
|
| 137 |
+
|
| 138 |
+
This probably means that you are not using fork to start your
|
| 139 |
+
child processes and you have forgotten to use the proper idiom
|
| 140 |
+
in the main module:
|
| 141 |
+
|
| 142 |
+
if __name__ == '__main__':
|
| 143 |
+
freeze_support()
|
| 144 |
+
...
|
| 145 |
+
|
| 146 |
+
The "freeze_support()" line can be omitted if the program
|
| 147 |
+
is not going to be frozen to produce an executable.''')
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
def get_preparation_data(name):
|
| 151 |
+
'''
|
| 152 |
+
Return info about parent needed by child to unpickle process object
|
| 153 |
+
'''
|
| 154 |
+
_check_not_importing_main()
|
| 155 |
+
d = dict(
|
| 156 |
+
log_to_stderr=util._log_to_stderr,
|
| 157 |
+
authkey=process.current_process().authkey,
|
| 158 |
+
)
|
| 159 |
+
|
| 160 |
+
if util._logger is not None:
|
| 161 |
+
d['log_level'] = util._logger.getEffectiveLevel()
|
| 162 |
+
|
| 163 |
+
sys_path=sys.path.copy()
|
| 164 |
+
try:
|
| 165 |
+
i = sys_path.index('')
|
| 166 |
+
except ValueError:
|
| 167 |
+
pass
|
| 168 |
+
else:
|
| 169 |
+
sys_path[i] = process.ORIGINAL_DIR
|
| 170 |
+
|
| 171 |
+
d.update(
|
| 172 |
+
name=name,
|
| 173 |
+
sys_path=sys_path,
|
| 174 |
+
sys_argv=sys.argv,
|
| 175 |
+
orig_dir=process.ORIGINAL_DIR,
|
| 176 |
+
dir=os.getcwd(),
|
| 177 |
+
start_method=get_start_method(),
|
| 178 |
+
)
|
| 179 |
+
|
| 180 |
+
# Figure out whether to initialise main in the subprocess as a module
|
| 181 |
+
# or through direct execution (or to leave it alone entirely)
|
| 182 |
+
main_module = sys.modules['__main__']
|
| 183 |
+
main_mod_name = getattr(main_module.__spec__, "name", None)
|
| 184 |
+
if main_mod_name is not None:
|
| 185 |
+
d['init_main_from_name'] = main_mod_name
|
| 186 |
+
elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
|
| 187 |
+
main_path = getattr(main_module, '__file__', None)
|
| 188 |
+
if main_path is not None:
|
| 189 |
+
if (not os.path.isabs(main_path) and
|
| 190 |
+
process.ORIGINAL_DIR is not None):
|
| 191 |
+
main_path = os.path.join(process.ORIGINAL_DIR, main_path)
|
| 192 |
+
d['init_main_from_path'] = os.path.normpath(main_path)
|
| 193 |
+
|
| 194 |
+
return d
|
| 195 |
+
|
| 196 |
+
#
|
| 197 |
+
# Prepare current process
|
| 198 |
+
#
|
| 199 |
+
|
| 200 |
+
old_main_modules = []
|
| 201 |
+
|
| 202 |
+
def prepare(data):
|
| 203 |
+
'''
|
| 204 |
+
Try to get current process ready to unpickle process object
|
| 205 |
+
'''
|
| 206 |
+
if 'name' in data:
|
| 207 |
+
process.current_process().name = data['name']
|
| 208 |
+
|
| 209 |
+
if 'authkey' in data:
|
| 210 |
+
process.current_process().authkey = data['authkey']
|
| 211 |
+
|
| 212 |
+
if 'log_to_stderr' in data and data['log_to_stderr']:
|
| 213 |
+
util.log_to_stderr()
|
| 214 |
+
|
| 215 |
+
if 'log_level' in data:
|
| 216 |
+
util.get_logger().setLevel(data['log_level'])
|
| 217 |
+
|
| 218 |
+
if 'sys_path' in data:
|
| 219 |
+
sys.path = data['sys_path']
|
| 220 |
+
|
| 221 |
+
if 'sys_argv' in data:
|
| 222 |
+
sys.argv = data['sys_argv']
|
| 223 |
+
|
| 224 |
+
if 'dir' in data:
|
| 225 |
+
os.chdir(data['dir'])
|
| 226 |
+
|
| 227 |
+
if 'orig_dir' in data:
|
| 228 |
+
process.ORIGINAL_DIR = data['orig_dir']
|
| 229 |
+
|
| 230 |
+
if 'start_method' in data:
|
| 231 |
+
set_start_method(data['start_method'], force=True)
|
| 232 |
+
|
| 233 |
+
if 'init_main_from_name' in data:
|
| 234 |
+
_fixup_main_from_name(data['init_main_from_name'])
|
| 235 |
+
elif 'init_main_from_path' in data:
|
| 236 |
+
_fixup_main_from_path(data['init_main_from_path'])
|
| 237 |
+
|
| 238 |
+
# Multiprocessing module helpers to fix up the main module in
|
| 239 |
+
# spawned subprocesses
|
| 240 |
+
def _fixup_main_from_name(mod_name):
|
| 241 |
+
# __main__.py files for packages, directories, zip archives, etc, run
|
| 242 |
+
# their "main only" code unconditionally, so we don't even try to
|
| 243 |
+
# populate anything in __main__, nor do we make any changes to
|
| 244 |
+
# __main__ attributes
|
| 245 |
+
current_main = sys.modules['__main__']
|
| 246 |
+
if mod_name == "__main__" or mod_name.endswith(".__main__"):
|
| 247 |
+
return
|
| 248 |
+
|
| 249 |
+
# If this process was forked, __main__ may already be populated
|
| 250 |
+
if getattr(current_main.__spec__, "name", None) == mod_name:
|
| 251 |
+
return
|
| 252 |
+
|
| 253 |
+
# Otherwise, __main__ may contain some non-main code where we need to
|
| 254 |
+
# support unpickling it properly. We rerun it as __mp_main__ and make
|
| 255 |
+
# the normal __main__ an alias to that
|
| 256 |
+
old_main_modules.append(current_main)
|
| 257 |
+
main_module = types.ModuleType("__mp_main__")
|
| 258 |
+
main_content = runpy.run_module(mod_name,
|
| 259 |
+
run_name="__mp_main__",
|
| 260 |
+
alter_sys=True)
|
| 261 |
+
main_module.__dict__.update(main_content)
|
| 262 |
+
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
|
| 263 |
+
|
| 264 |
+
|
| 265 |
+
def _fixup_main_from_path(main_path):
|
| 266 |
+
# If this process was forked, __main__ may already be populated
|
| 267 |
+
current_main = sys.modules['__main__']
|
| 268 |
+
|
| 269 |
+
# Unfortunately, the main ipython launch script historically had no
|
| 270 |
+
# "if __name__ == '__main__'" guard, so we work around that
|
| 271 |
+
# by treating it like a __main__.py file
|
| 272 |
+
# See https://github.com/ipython/ipython/issues/4698
|
| 273 |
+
main_name = os.path.splitext(os.path.basename(main_path))[0]
|
| 274 |
+
if main_name == 'ipython':
|
| 275 |
+
return
|
| 276 |
+
|
| 277 |
+
# Otherwise, if __file__ already has the setting we expect,
|
| 278 |
+
# there's nothing more to do
|
| 279 |
+
if getattr(current_main, '__file__', None) == main_path:
|
| 280 |
+
return
|
| 281 |
+
|
| 282 |
+
# If the parent process has sent a path through rather than a module
|
| 283 |
+
# name we assume it is an executable script that may contain
|
| 284 |
+
# non-main code that needs to be executed
|
| 285 |
+
old_main_modules.append(current_main)
|
| 286 |
+
main_module = types.ModuleType("__mp_main__")
|
| 287 |
+
main_content = runpy.run_path(main_path,
|
| 288 |
+
run_name="__mp_main__")
|
| 289 |
+
main_module.__dict__.update(main_content)
|
| 290 |
+
sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module
|
| 291 |
+
|
| 292 |
+
|
| 293 |
+
def import_main_path(main_path):
|
| 294 |
+
'''
|
| 295 |
+
Set sys.modules['__main__'] to module at main_path
|
| 296 |
+
'''
|
| 297 |
+
_fixup_main_from_path(main_path)
|
llava/lib/python3.10/multiprocessing/synchronize.py
ADDED
|
@@ -0,0 +1,394 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Module implementing synchronization primitives
|
| 3 |
+
#
|
| 4 |
+
# multiprocessing/synchronize.py
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2006-2008, R Oudkerk
|
| 7 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 8 |
+
#
|
| 9 |
+
|
| 10 |
+
__all__ = [
|
| 11 |
+
'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
|
| 12 |
+
]
|
| 13 |
+
|
| 14 |
+
import threading
|
| 15 |
+
import sys
|
| 16 |
+
import tempfile
|
| 17 |
+
import _multiprocessing
|
| 18 |
+
import time
|
| 19 |
+
|
| 20 |
+
from . import context
|
| 21 |
+
from . import process
|
| 22 |
+
from . import util
|
| 23 |
+
|
| 24 |
+
# Try to import the mp.synchronize module cleanly, if it fails
|
| 25 |
+
# raise ImportError for platforms lacking a working sem_open implementation.
|
| 26 |
+
# See issue 3770
|
| 27 |
+
try:
|
| 28 |
+
from _multiprocessing import SemLock, sem_unlink
|
| 29 |
+
except (ImportError):
|
| 30 |
+
raise ImportError("This platform lacks a functioning sem_open" +
|
| 31 |
+
" implementation, therefore, the required" +
|
| 32 |
+
" synchronization primitives needed will not" +
|
| 33 |
+
" function, see issue 3770.")
|
| 34 |
+
|
| 35 |
+
#
|
| 36 |
+
# Constants
|
| 37 |
+
#
|
| 38 |
+
|
| 39 |
+
RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
|
| 40 |
+
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX
|
| 41 |
+
|
| 42 |
+
#
|
| 43 |
+
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
|
| 44 |
+
#
|
| 45 |
+
|
| 46 |
+
class SemLock(object):
|
| 47 |
+
|
| 48 |
+
_rand = tempfile._RandomNameSequence()
|
| 49 |
+
|
| 50 |
+
def __init__(self, kind, value, maxvalue, *, ctx):
|
| 51 |
+
if ctx is None:
|
| 52 |
+
ctx = context._default_context.get_context()
|
| 53 |
+
name = ctx.get_start_method()
|
| 54 |
+
unlink_now = sys.platform == 'win32' or name == 'fork'
|
| 55 |
+
for i in range(100):
|
| 56 |
+
try:
|
| 57 |
+
sl = self._semlock = _multiprocessing.SemLock(
|
| 58 |
+
kind, value, maxvalue, self._make_name(),
|
| 59 |
+
unlink_now)
|
| 60 |
+
except FileExistsError:
|
| 61 |
+
pass
|
| 62 |
+
else:
|
| 63 |
+
break
|
| 64 |
+
else:
|
| 65 |
+
raise FileExistsError('cannot find name for semaphore')
|
| 66 |
+
|
| 67 |
+
util.debug('created semlock with handle %s' % sl.handle)
|
| 68 |
+
self._make_methods()
|
| 69 |
+
|
| 70 |
+
if sys.platform != 'win32':
|
| 71 |
+
def _after_fork(obj):
|
| 72 |
+
obj._semlock._after_fork()
|
| 73 |
+
util.register_after_fork(self, _after_fork)
|
| 74 |
+
|
| 75 |
+
if self._semlock.name is not None:
|
| 76 |
+
# We only get here if we are on Unix with forking
|
| 77 |
+
# disabled. When the object is garbage collected or the
|
| 78 |
+
# process shuts down we unlink the semaphore name
|
| 79 |
+
from .resource_tracker import register
|
| 80 |
+
register(self._semlock.name, "semaphore")
|
| 81 |
+
util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
|
| 82 |
+
exitpriority=0)
|
| 83 |
+
|
| 84 |
+
@staticmethod
|
| 85 |
+
def _cleanup(name):
|
| 86 |
+
from .resource_tracker import unregister
|
| 87 |
+
sem_unlink(name)
|
| 88 |
+
unregister(name, "semaphore")
|
| 89 |
+
|
| 90 |
+
def _make_methods(self):
|
| 91 |
+
self.acquire = self._semlock.acquire
|
| 92 |
+
self.release = self._semlock.release
|
| 93 |
+
|
| 94 |
+
def __enter__(self):
|
| 95 |
+
return self._semlock.__enter__()
|
| 96 |
+
|
| 97 |
+
def __exit__(self, *args):
|
| 98 |
+
return self._semlock.__exit__(*args)
|
| 99 |
+
|
| 100 |
+
def __getstate__(self):
|
| 101 |
+
context.assert_spawning(self)
|
| 102 |
+
sl = self._semlock
|
| 103 |
+
if sys.platform == 'win32':
|
| 104 |
+
h = context.get_spawning_popen().duplicate_for_child(sl.handle)
|
| 105 |
+
else:
|
| 106 |
+
h = sl.handle
|
| 107 |
+
return (h, sl.kind, sl.maxvalue, sl.name)
|
| 108 |
+
|
| 109 |
+
def __setstate__(self, state):
|
| 110 |
+
self._semlock = _multiprocessing.SemLock._rebuild(*state)
|
| 111 |
+
util.debug('recreated blocker with handle %r' % state[0])
|
| 112 |
+
self._make_methods()
|
| 113 |
+
|
| 114 |
+
@staticmethod
|
| 115 |
+
def _make_name():
|
| 116 |
+
return '%s-%s' % (process.current_process()._config['semprefix'],
|
| 117 |
+
next(SemLock._rand))
|
| 118 |
+
|
| 119 |
+
#
|
| 120 |
+
# Semaphore
|
| 121 |
+
#
|
| 122 |
+
|
| 123 |
+
class Semaphore(SemLock):
|
| 124 |
+
|
| 125 |
+
def __init__(self, value=1, *, ctx):
|
| 126 |
+
SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)
|
| 127 |
+
|
| 128 |
+
def get_value(self):
|
| 129 |
+
return self._semlock._get_value()
|
| 130 |
+
|
| 131 |
+
def __repr__(self):
|
| 132 |
+
try:
|
| 133 |
+
value = self._semlock._get_value()
|
| 134 |
+
except Exception:
|
| 135 |
+
value = 'unknown'
|
| 136 |
+
return '<%s(value=%s)>' % (self.__class__.__name__, value)
|
| 137 |
+
|
| 138 |
+
#
|
| 139 |
+
# Bounded semaphore
|
| 140 |
+
#
|
| 141 |
+
|
| 142 |
+
class BoundedSemaphore(Semaphore):
|
| 143 |
+
|
| 144 |
+
def __init__(self, value=1, *, ctx):
|
| 145 |
+
SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)
|
| 146 |
+
|
| 147 |
+
def __repr__(self):
|
| 148 |
+
try:
|
| 149 |
+
value = self._semlock._get_value()
|
| 150 |
+
except Exception:
|
| 151 |
+
value = 'unknown'
|
| 152 |
+
return '<%s(value=%s, maxvalue=%s)>' % \
|
| 153 |
+
(self.__class__.__name__, value, self._semlock.maxvalue)
|
| 154 |
+
|
| 155 |
+
#
|
| 156 |
+
# Non-recursive lock
|
| 157 |
+
#
|
| 158 |
+
|
| 159 |
+
class Lock(SemLock):
|
| 160 |
+
|
| 161 |
+
def __init__(self, *, ctx):
|
| 162 |
+
SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)
|
| 163 |
+
|
| 164 |
+
def __repr__(self):
|
| 165 |
+
try:
|
| 166 |
+
if self._semlock._is_mine():
|
| 167 |
+
name = process.current_process().name
|
| 168 |
+
if threading.current_thread().name != 'MainThread':
|
| 169 |
+
name += '|' + threading.current_thread().name
|
| 170 |
+
elif self._semlock._get_value() == 1:
|
| 171 |
+
name = 'None'
|
| 172 |
+
elif self._semlock._count() > 0:
|
| 173 |
+
name = 'SomeOtherThread'
|
| 174 |
+
else:
|
| 175 |
+
name = 'SomeOtherProcess'
|
| 176 |
+
except Exception:
|
| 177 |
+
name = 'unknown'
|
| 178 |
+
return '<%s(owner=%s)>' % (self.__class__.__name__, name)
|
| 179 |
+
|
| 180 |
+
#
|
| 181 |
+
# Recursive lock
|
| 182 |
+
#
|
| 183 |
+
|
| 184 |
+
class RLock(SemLock):
|
| 185 |
+
|
| 186 |
+
def __init__(self, *, ctx):
|
| 187 |
+
SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)
|
| 188 |
+
|
| 189 |
+
def __repr__(self):
|
| 190 |
+
try:
|
| 191 |
+
if self._semlock._is_mine():
|
| 192 |
+
name = process.current_process().name
|
| 193 |
+
if threading.current_thread().name != 'MainThread':
|
| 194 |
+
name += '|' + threading.current_thread().name
|
| 195 |
+
count = self._semlock._count()
|
| 196 |
+
elif self._semlock._get_value() == 1:
|
| 197 |
+
name, count = 'None', 0
|
| 198 |
+
elif self._semlock._count() > 0:
|
| 199 |
+
name, count = 'SomeOtherThread', 'nonzero'
|
| 200 |
+
else:
|
| 201 |
+
name, count = 'SomeOtherProcess', 'nonzero'
|
| 202 |
+
except Exception:
|
| 203 |
+
name, count = 'unknown', 'unknown'
|
| 204 |
+
return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)
|
| 205 |
+
|
| 206 |
+
#
|
| 207 |
+
# Condition variable
|
| 208 |
+
#
|
| 209 |
+
|
| 210 |
+
class Condition(object):
|
| 211 |
+
|
| 212 |
+
def __init__(self, lock=None, *, ctx):
|
| 213 |
+
self._lock = lock or ctx.RLock()
|
| 214 |
+
self._sleeping_count = ctx.Semaphore(0)
|
| 215 |
+
self._woken_count = ctx.Semaphore(0)
|
| 216 |
+
self._wait_semaphore = ctx.Semaphore(0)
|
| 217 |
+
self._make_methods()
|
| 218 |
+
|
| 219 |
+
def __getstate__(self):
|
| 220 |
+
context.assert_spawning(self)
|
| 221 |
+
return (self._lock, self._sleeping_count,
|
| 222 |
+
self._woken_count, self._wait_semaphore)
|
| 223 |
+
|
| 224 |
+
def __setstate__(self, state):
|
| 225 |
+
(self._lock, self._sleeping_count,
|
| 226 |
+
self._woken_count, self._wait_semaphore) = state
|
| 227 |
+
self._make_methods()
|
| 228 |
+
|
| 229 |
+
def __enter__(self):
|
| 230 |
+
return self._lock.__enter__()
|
| 231 |
+
|
| 232 |
+
def __exit__(self, *args):
|
| 233 |
+
return self._lock.__exit__(*args)
|
| 234 |
+
|
| 235 |
+
def _make_methods(self):
|
| 236 |
+
self.acquire = self._lock.acquire
|
| 237 |
+
self.release = self._lock.release
|
| 238 |
+
|
| 239 |
+
def __repr__(self):
|
| 240 |
+
try:
|
| 241 |
+
num_waiters = (self._sleeping_count._semlock._get_value() -
|
| 242 |
+
self._woken_count._semlock._get_value())
|
| 243 |
+
except Exception:
|
| 244 |
+
num_waiters = 'unknown'
|
| 245 |
+
return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters)
|
| 246 |
+
|
| 247 |
+
def wait(self, timeout=None):
|
| 248 |
+
assert self._lock._semlock._is_mine(), \
|
| 249 |
+
'must acquire() condition before using wait()'
|
| 250 |
+
|
| 251 |
+
# indicate that this thread is going to sleep
|
| 252 |
+
self._sleeping_count.release()
|
| 253 |
+
|
| 254 |
+
# release lock
|
| 255 |
+
count = self._lock._semlock._count()
|
| 256 |
+
for i in range(count):
|
| 257 |
+
self._lock.release()
|
| 258 |
+
|
| 259 |
+
try:
|
| 260 |
+
# wait for notification or timeout
|
| 261 |
+
return self._wait_semaphore.acquire(True, timeout)
|
| 262 |
+
finally:
|
| 263 |
+
# indicate that this thread has woken
|
| 264 |
+
self._woken_count.release()
|
| 265 |
+
|
| 266 |
+
# reacquire lock
|
| 267 |
+
for i in range(count):
|
| 268 |
+
self._lock.acquire()
|
| 269 |
+
|
| 270 |
+
def notify(self, n=1):
|
| 271 |
+
assert self._lock._semlock._is_mine(), 'lock is not owned'
|
| 272 |
+
assert not self._wait_semaphore.acquire(
|
| 273 |
+
False), ('notify: Should not have been able to acquire '
|
| 274 |
+
+ '_wait_semaphore')
|
| 275 |
+
|
| 276 |
+
# to take account of timeouts since last notify*() we subtract
|
| 277 |
+
# woken_count from sleeping_count and rezero woken_count
|
| 278 |
+
while self._woken_count.acquire(False):
|
| 279 |
+
res = self._sleeping_count.acquire(False)
|
| 280 |
+
assert res, ('notify: Bug in sleeping_count.acquire'
|
| 281 |
+
+ '- res should not be False')
|
| 282 |
+
|
| 283 |
+
sleepers = 0
|
| 284 |
+
while sleepers < n and self._sleeping_count.acquire(False):
|
| 285 |
+
self._wait_semaphore.release() # wake up one sleeper
|
| 286 |
+
sleepers += 1
|
| 287 |
+
|
| 288 |
+
if sleepers:
|
| 289 |
+
for i in range(sleepers):
|
| 290 |
+
self._woken_count.acquire() # wait for a sleeper to wake
|
| 291 |
+
|
| 292 |
+
# rezero wait_semaphore in case some timeouts just happened
|
| 293 |
+
while self._wait_semaphore.acquire(False):
|
| 294 |
+
pass
|
| 295 |
+
|
| 296 |
+
def notify_all(self):
|
| 297 |
+
self.notify(n=sys.maxsize)
|
| 298 |
+
|
| 299 |
+
def wait_for(self, predicate, timeout=None):
|
| 300 |
+
result = predicate()
|
| 301 |
+
if result:
|
| 302 |
+
return result
|
| 303 |
+
if timeout is not None:
|
| 304 |
+
endtime = time.monotonic() + timeout
|
| 305 |
+
else:
|
| 306 |
+
endtime = None
|
| 307 |
+
waittime = None
|
| 308 |
+
while not result:
|
| 309 |
+
if endtime is not None:
|
| 310 |
+
waittime = endtime - time.monotonic()
|
| 311 |
+
if waittime <= 0:
|
| 312 |
+
break
|
| 313 |
+
self.wait(waittime)
|
| 314 |
+
result = predicate()
|
| 315 |
+
return result
|
| 316 |
+
|
| 317 |
+
#
|
| 318 |
+
# Event
|
| 319 |
+
#
|
| 320 |
+
|
| 321 |
+
class Event(object):
|
| 322 |
+
|
| 323 |
+
def __init__(self, *, ctx):
|
| 324 |
+
self._cond = ctx.Condition(ctx.Lock())
|
| 325 |
+
self._flag = ctx.Semaphore(0)
|
| 326 |
+
|
| 327 |
+
def is_set(self):
|
| 328 |
+
with self._cond:
|
| 329 |
+
if self._flag.acquire(False):
|
| 330 |
+
self._flag.release()
|
| 331 |
+
return True
|
| 332 |
+
return False
|
| 333 |
+
|
| 334 |
+
def set(self):
|
| 335 |
+
with self._cond:
|
| 336 |
+
self._flag.acquire(False)
|
| 337 |
+
self._flag.release()
|
| 338 |
+
self._cond.notify_all()
|
| 339 |
+
|
| 340 |
+
def clear(self):
|
| 341 |
+
with self._cond:
|
| 342 |
+
self._flag.acquire(False)
|
| 343 |
+
|
| 344 |
+
def wait(self, timeout=None):
|
| 345 |
+
with self._cond:
|
| 346 |
+
if self._flag.acquire(False):
|
| 347 |
+
self._flag.release()
|
| 348 |
+
else:
|
| 349 |
+
self._cond.wait(timeout)
|
| 350 |
+
|
| 351 |
+
if self._flag.acquire(False):
|
| 352 |
+
self._flag.release()
|
| 353 |
+
return True
|
| 354 |
+
return False
|
| 355 |
+
|
| 356 |
+
#
|
| 357 |
+
# Barrier
|
| 358 |
+
#
|
| 359 |
+
|
| 360 |
+
class Barrier(threading.Barrier):
|
| 361 |
+
|
| 362 |
+
def __init__(self, parties, action=None, timeout=None, *, ctx):
|
| 363 |
+
import struct
|
| 364 |
+
from .heap import BufferWrapper
|
| 365 |
+
wrapper = BufferWrapper(struct.calcsize('i') * 2)
|
| 366 |
+
cond = ctx.Condition()
|
| 367 |
+
self.__setstate__((parties, action, timeout, cond, wrapper))
|
| 368 |
+
self._state = 0
|
| 369 |
+
self._count = 0
|
| 370 |
+
|
| 371 |
+
def __setstate__(self, state):
|
| 372 |
+
(self._parties, self._action, self._timeout,
|
| 373 |
+
self._cond, self._wrapper) = state
|
| 374 |
+
self._array = self._wrapper.create_memoryview().cast('i')
|
| 375 |
+
|
| 376 |
+
def __getstate__(self):
|
| 377 |
+
return (self._parties, self._action, self._timeout,
|
| 378 |
+
self._cond, self._wrapper)
|
| 379 |
+
|
| 380 |
+
@property
|
| 381 |
+
def _state(self):
|
| 382 |
+
return self._array[0]
|
| 383 |
+
|
| 384 |
+
@_state.setter
|
| 385 |
+
def _state(self, value):
|
| 386 |
+
self._array[0] = value
|
| 387 |
+
|
| 388 |
+
@property
|
| 389 |
+
def _count(self):
|
| 390 |
+
return self._array[1]
|
| 391 |
+
|
| 392 |
+
@_count.setter
|
| 393 |
+
def _count(self, value):
|
| 394 |
+
self._array[1] = value
|
llava/lib/python3.10/multiprocessing/util.py
ADDED
|
@@ -0,0 +1,489 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#
|
| 2 |
+
# Module providing various facilities to other parts of the package
|
| 3 |
+
#
|
| 4 |
+
# multiprocessing/util.py
|
| 5 |
+
#
|
| 6 |
+
# Copyright (c) 2006-2008, R Oudkerk
|
| 7 |
+
# Licensed to PSF under a Contributor Agreement.
|
| 8 |
+
#
|
| 9 |
+
|
| 10 |
+
import os
|
| 11 |
+
import itertools
|
| 12 |
+
import sys
|
| 13 |
+
import weakref
|
| 14 |
+
import atexit
|
| 15 |
+
import threading # we want threading to install it's
|
| 16 |
+
# cleanup function before multiprocessing does
|
| 17 |
+
from subprocess import _args_from_interpreter_flags
|
| 18 |
+
|
| 19 |
+
from . import process
|
| 20 |
+
|
| 21 |
+
__all__ = [
|
| 22 |
+
'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
|
| 23 |
+
'log_to_stderr', 'get_temp_dir', 'register_after_fork',
|
| 24 |
+
'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
|
| 25 |
+
'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',
|
| 26 |
+
]
|
| 27 |
+
|
| 28 |
+
#
|
| 29 |
+
# Logging
|
| 30 |
+
#
|
| 31 |
+
|
| 32 |
+
NOTSET = 0
|
| 33 |
+
SUBDEBUG = 5
|
| 34 |
+
DEBUG = 10
|
| 35 |
+
INFO = 20
|
| 36 |
+
SUBWARNING = 25
|
| 37 |
+
|
| 38 |
+
LOGGER_NAME = 'multiprocessing'
|
| 39 |
+
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'
|
| 40 |
+
|
| 41 |
+
_logger = None
|
| 42 |
+
_log_to_stderr = False
|
| 43 |
+
|
| 44 |
+
def sub_debug(msg, *args):
|
| 45 |
+
if _logger:
|
| 46 |
+
_logger.log(SUBDEBUG, msg, *args)
|
| 47 |
+
|
| 48 |
+
def debug(msg, *args):
|
| 49 |
+
if _logger:
|
| 50 |
+
_logger.log(DEBUG, msg, *args)
|
| 51 |
+
|
| 52 |
+
def info(msg, *args):
|
| 53 |
+
if _logger:
|
| 54 |
+
_logger.log(INFO, msg, *args)
|
| 55 |
+
|
| 56 |
+
def sub_warning(msg, *args):
|
| 57 |
+
if _logger:
|
| 58 |
+
_logger.log(SUBWARNING, msg, *args)
|
| 59 |
+
|
| 60 |
+
def get_logger():
|
| 61 |
+
'''
|
| 62 |
+
Returns logger used by multiprocessing
|
| 63 |
+
'''
|
| 64 |
+
global _logger
|
| 65 |
+
import logging
|
| 66 |
+
|
| 67 |
+
logging._acquireLock()
|
| 68 |
+
try:
|
| 69 |
+
if not _logger:
|
| 70 |
+
|
| 71 |
+
_logger = logging.getLogger(LOGGER_NAME)
|
| 72 |
+
_logger.propagate = 0
|
| 73 |
+
|
| 74 |
+
# XXX multiprocessing should cleanup before logging
|
| 75 |
+
if hasattr(atexit, 'unregister'):
|
| 76 |
+
atexit.unregister(_exit_function)
|
| 77 |
+
atexit.register(_exit_function)
|
| 78 |
+
else:
|
| 79 |
+
atexit._exithandlers.remove((_exit_function, (), {}))
|
| 80 |
+
atexit._exithandlers.append((_exit_function, (), {}))
|
| 81 |
+
|
| 82 |
+
finally:
|
| 83 |
+
logging._releaseLock()
|
| 84 |
+
|
| 85 |
+
return _logger
|
| 86 |
+
|
| 87 |
+
def log_to_stderr(level=None):
|
| 88 |
+
'''
|
| 89 |
+
Turn on logging and add a handler which prints to stderr
|
| 90 |
+
'''
|
| 91 |
+
global _log_to_stderr
|
| 92 |
+
import logging
|
| 93 |
+
|
| 94 |
+
logger = get_logger()
|
| 95 |
+
formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
|
| 96 |
+
handler = logging.StreamHandler()
|
| 97 |
+
handler.setFormatter(formatter)
|
| 98 |
+
logger.addHandler(handler)
|
| 99 |
+
|
| 100 |
+
if level:
|
| 101 |
+
logger.setLevel(level)
|
| 102 |
+
_log_to_stderr = True
|
| 103 |
+
return _logger
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
# Abstract socket support
|
| 107 |
+
|
| 108 |
+
def _platform_supports_abstract_sockets():
|
| 109 |
+
if sys.platform == "linux":
|
| 110 |
+
return True
|
| 111 |
+
if hasattr(sys, 'getandroidapilevel'):
|
| 112 |
+
return True
|
| 113 |
+
return False
|
| 114 |
+
|
| 115 |
+
|
| 116 |
+
def is_abstract_socket_namespace(address):
|
| 117 |
+
if not address:
|
| 118 |
+
return False
|
| 119 |
+
if isinstance(address, bytes):
|
| 120 |
+
return address[0] == 0
|
| 121 |
+
elif isinstance(address, str):
|
| 122 |
+
return address[0] == "\0"
|
| 123 |
+
raise TypeError(f'address type of {address!r} unrecognized')
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
abstract_sockets_supported = _platform_supports_abstract_sockets()
|
| 127 |
+
|
| 128 |
+
#
|
| 129 |
+
# Function returning a temp directory which will be removed on exit
|
| 130 |
+
#
|
| 131 |
+
|
| 132 |
+
def _remove_temp_dir(rmtree, tempdir):
|
| 133 |
+
rmtree(tempdir)
|
| 134 |
+
|
| 135 |
+
current_process = process.current_process()
|
| 136 |
+
# current_process() can be None if the finalizer is called
|
| 137 |
+
# late during Python finalization
|
| 138 |
+
if current_process is not None:
|
| 139 |
+
current_process._config['tempdir'] = None
|
| 140 |
+
|
| 141 |
+
def get_temp_dir():
|
| 142 |
+
# get name of a temp directory which will be automatically cleaned up
|
| 143 |
+
tempdir = process.current_process()._config.get('tempdir')
|
| 144 |
+
if tempdir is None:
|
| 145 |
+
import shutil, tempfile
|
| 146 |
+
tempdir = tempfile.mkdtemp(prefix='pymp-')
|
| 147 |
+
info('created temp directory %s', tempdir)
|
| 148 |
+
# keep a strong reference to shutil.rmtree(), since the finalizer
|
| 149 |
+
# can be called late during Python shutdown
|
| 150 |
+
Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir),
|
| 151 |
+
exitpriority=-100)
|
| 152 |
+
process.current_process()._config['tempdir'] = tempdir
|
| 153 |
+
return tempdir
|
| 154 |
+
|
| 155 |
+
#
|
| 156 |
+
# Support for reinitialization of objects when bootstrapping a child process
|
| 157 |
+
#
|
| 158 |
+
|
| 159 |
+
_afterfork_registry = weakref.WeakValueDictionary()
|
| 160 |
+
_afterfork_counter = itertools.count()
|
| 161 |
+
|
| 162 |
+
def _run_after_forkers():
|
| 163 |
+
items = list(_afterfork_registry.items())
|
| 164 |
+
items.sort()
|
| 165 |
+
for (index, ident, func), obj in items:
|
| 166 |
+
try:
|
| 167 |
+
func(obj)
|
| 168 |
+
except Exception as e:
|
| 169 |
+
info('after forker raised exception %s', e)
|
| 170 |
+
|
| 171 |
+
def register_after_fork(obj, func):
|
| 172 |
+
_afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj
|
| 173 |
+
|
| 174 |
+
#
|
| 175 |
+
# Finalization using weakrefs
|
| 176 |
+
#
|
| 177 |
+
|
| 178 |
+
_finalizer_registry = {}
|
| 179 |
+
_finalizer_counter = itertools.count()
|
| 180 |
+
|
| 181 |
+
|
| 182 |
+
class Finalize(object):
|
| 183 |
+
'''
|
| 184 |
+
Class which supports object finalization using weakrefs
|
| 185 |
+
'''
|
| 186 |
+
def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
|
| 187 |
+
if (exitpriority is not None) and not isinstance(exitpriority,int):
|
| 188 |
+
raise TypeError(
|
| 189 |
+
"Exitpriority ({0!r}) must be None or int, not {1!s}".format(
|
| 190 |
+
exitpriority, type(exitpriority)))
|
| 191 |
+
|
| 192 |
+
if obj is not None:
|
| 193 |
+
self._weakref = weakref.ref(obj, self)
|
| 194 |
+
elif exitpriority is None:
|
| 195 |
+
raise ValueError("Without object, exitpriority cannot be None")
|
| 196 |
+
|
| 197 |
+
self._callback = callback
|
| 198 |
+
self._args = args
|
| 199 |
+
self._kwargs = kwargs or {}
|
| 200 |
+
self._key = (exitpriority, next(_finalizer_counter))
|
| 201 |
+
self._pid = os.getpid()
|
| 202 |
+
|
| 203 |
+
_finalizer_registry[self._key] = self
|
| 204 |
+
|
| 205 |
+
def __call__(self, wr=None,
|
| 206 |
+
# Need to bind these locally because the globals can have
|
| 207 |
+
# been cleared at shutdown
|
| 208 |
+
_finalizer_registry=_finalizer_registry,
|
| 209 |
+
sub_debug=sub_debug, getpid=os.getpid):
|
| 210 |
+
'''
|
| 211 |
+
Run the callback unless it has already been called or cancelled
|
| 212 |
+
'''
|
| 213 |
+
try:
|
| 214 |
+
del _finalizer_registry[self._key]
|
| 215 |
+
except KeyError:
|
| 216 |
+
sub_debug('finalizer no longer registered')
|
| 217 |
+
else:
|
| 218 |
+
if self._pid != getpid():
|
| 219 |
+
sub_debug('finalizer ignored because different process')
|
| 220 |
+
res = None
|
| 221 |
+
else:
|
| 222 |
+
sub_debug('finalizer calling %s with args %s and kwargs %s',
|
| 223 |
+
self._callback, self._args, self._kwargs)
|
| 224 |
+
res = self._callback(*self._args, **self._kwargs)
|
| 225 |
+
self._weakref = self._callback = self._args = \
|
| 226 |
+
self._kwargs = self._key = None
|
| 227 |
+
return res
|
| 228 |
+
|
| 229 |
+
def cancel(self):
|
| 230 |
+
'''
|
| 231 |
+
Cancel finalization of the object
|
| 232 |
+
'''
|
| 233 |
+
try:
|
| 234 |
+
del _finalizer_registry[self._key]
|
| 235 |
+
except KeyError:
|
| 236 |
+
pass
|
| 237 |
+
else:
|
| 238 |
+
self._weakref = self._callback = self._args = \
|
| 239 |
+
self._kwargs = self._key = None
|
| 240 |
+
|
| 241 |
+
def still_active(self):
|
| 242 |
+
'''
|
| 243 |
+
Return whether this finalizer is still waiting to invoke callback
|
| 244 |
+
'''
|
| 245 |
+
return self._key in _finalizer_registry
|
| 246 |
+
|
| 247 |
+
def __repr__(self):
|
| 248 |
+
try:
|
| 249 |
+
obj = self._weakref()
|
| 250 |
+
except (AttributeError, TypeError):
|
| 251 |
+
obj = None
|
| 252 |
+
|
| 253 |
+
if obj is None:
|
| 254 |
+
return '<%s object, dead>' % self.__class__.__name__
|
| 255 |
+
|
| 256 |
+
x = '<%s object, callback=%s' % (
|
| 257 |
+
self.__class__.__name__,
|
| 258 |
+
getattr(self._callback, '__name__', self._callback))
|
| 259 |
+
if self._args:
|
| 260 |
+
x += ', args=' + str(self._args)
|
| 261 |
+
if self._kwargs:
|
| 262 |
+
x += ', kwargs=' + str(self._kwargs)
|
| 263 |
+
if self._key[0] is not None:
|
| 264 |
+
x += ', exitpriority=' + str(self._key[0])
|
| 265 |
+
return x + '>'
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def _run_finalizers(minpriority=None):
|
| 269 |
+
'''
|
| 270 |
+
Run all finalizers whose exit priority is not None and at least minpriority
|
| 271 |
+
|
| 272 |
+
Finalizers with highest priority are called first; finalizers with
|
| 273 |
+
the same priority will be called in reverse order of creation.
|
| 274 |
+
'''
|
| 275 |
+
if _finalizer_registry is None:
|
| 276 |
+
# This function may be called after this module's globals are
|
| 277 |
+
# destroyed. See the _exit_function function in this module for more
|
| 278 |
+
# notes.
|
| 279 |
+
return
|
| 280 |
+
|
| 281 |
+
if minpriority is None:
|
| 282 |
+
f = lambda p : p[0] is not None
|
| 283 |
+
else:
|
| 284 |
+
f = lambda p : p[0] is not None and p[0] >= minpriority
|
| 285 |
+
|
| 286 |
+
# Careful: _finalizer_registry may be mutated while this function
|
| 287 |
+
# is running (either by a GC run or by another thread).
|
| 288 |
+
|
| 289 |
+
# list(_finalizer_registry) should be atomic, while
|
| 290 |
+
# list(_finalizer_registry.items()) is not.
|
| 291 |
+
keys = [key for key in list(_finalizer_registry) if f(key)]
|
| 292 |
+
keys.sort(reverse=True)
|
| 293 |
+
|
| 294 |
+
for key in keys:
|
| 295 |
+
finalizer = _finalizer_registry.get(key)
|
| 296 |
+
# key may have been removed from the registry
|
| 297 |
+
if finalizer is not None:
|
| 298 |
+
sub_debug('calling %s', finalizer)
|
| 299 |
+
try:
|
| 300 |
+
finalizer()
|
| 301 |
+
except Exception:
|
| 302 |
+
import traceback
|
| 303 |
+
traceback.print_exc()
|
| 304 |
+
|
| 305 |
+
if minpriority is None:
|
| 306 |
+
_finalizer_registry.clear()
|
| 307 |
+
|
| 308 |
+
#
|
| 309 |
+
# Clean up on exit
|
| 310 |
+
#
|
| 311 |
+
|
| 312 |
+
def is_exiting():
|
| 313 |
+
'''
|
| 314 |
+
Returns true if the process is shutting down
|
| 315 |
+
'''
|
| 316 |
+
return _exiting or _exiting is None
|
| 317 |
+
|
| 318 |
+
_exiting = False
|
| 319 |
+
|
| 320 |
+
def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
|
| 321 |
+
active_children=process.active_children,
|
| 322 |
+
current_process=process.current_process):
|
| 323 |
+
# We hold on to references to functions in the arglist due to the
|
| 324 |
+
# situation described below, where this function is called after this
|
| 325 |
+
# module's globals are destroyed.
|
| 326 |
+
|
| 327 |
+
global _exiting
|
| 328 |
+
|
| 329 |
+
if not _exiting:
|
| 330 |
+
_exiting = True
|
| 331 |
+
|
| 332 |
+
info('process shutting down')
|
| 333 |
+
debug('running all "atexit" finalizers with priority >= 0')
|
| 334 |
+
_run_finalizers(0)
|
| 335 |
+
|
| 336 |
+
if current_process() is not None:
|
| 337 |
+
# We check if the current process is None here because if
|
| 338 |
+
# it's None, any call to ``active_children()`` will raise
|
| 339 |
+
# an AttributeError (active_children winds up trying to
|
| 340 |
+
# get attributes from util._current_process). One
|
| 341 |
+
# situation where this can happen is if someone has
|
| 342 |
+
# manipulated sys.modules, causing this module to be
|
| 343 |
+
# garbage collected. The destructor for the module type
|
| 344 |
+
# then replaces all values in the module dict with None.
|
| 345 |
+
# For instance, after setuptools runs a test it replaces
|
| 346 |
+
# sys.modules with a copy created earlier. See issues
|
| 347 |
+
# #9775 and #15881. Also related: #4106, #9205, and
|
| 348 |
+
# #9207.
|
| 349 |
+
|
| 350 |
+
for p in active_children():
|
| 351 |
+
if p.daemon:
|
| 352 |
+
info('calling terminate() for daemon %s', p.name)
|
| 353 |
+
p._popen.terminate()
|
| 354 |
+
|
| 355 |
+
for p in active_children():
|
| 356 |
+
info('calling join() for process %s', p.name)
|
| 357 |
+
p.join()
|
| 358 |
+
|
| 359 |
+
debug('running the remaining "atexit" finalizers')
|
| 360 |
+
_run_finalizers()
|
| 361 |
+
|
| 362 |
+
atexit.register(_exit_function)
|
| 363 |
+
|
| 364 |
+
#
|
| 365 |
+
# Some fork aware types
|
| 366 |
+
#
|
| 367 |
+
|
| 368 |
+
class ForkAwareThreadLock(object):
|
| 369 |
+
def __init__(self):
|
| 370 |
+
self._lock = threading.Lock()
|
| 371 |
+
self.acquire = self._lock.acquire
|
| 372 |
+
self.release = self._lock.release
|
| 373 |
+
register_after_fork(self, ForkAwareThreadLock._at_fork_reinit)
|
| 374 |
+
|
| 375 |
+
def _at_fork_reinit(self):
|
| 376 |
+
self._lock._at_fork_reinit()
|
| 377 |
+
|
| 378 |
+
def __enter__(self):
|
| 379 |
+
return self._lock.__enter__()
|
| 380 |
+
|
| 381 |
+
def __exit__(self, *args):
|
| 382 |
+
return self._lock.__exit__(*args)
|
| 383 |
+
|
| 384 |
+
|
| 385 |
+
class ForkAwareLocal(threading.local):
|
| 386 |
+
def __init__(self):
|
| 387 |
+
register_after_fork(self, lambda obj : obj.__dict__.clear())
|
| 388 |
+
def __reduce__(self):
|
| 389 |
+
return type(self), ()
|
| 390 |
+
|
| 391 |
+
#
|
| 392 |
+
# Close fds except those specified
|
| 393 |
+
#
|
| 394 |
+
|
| 395 |
+
try:
|
| 396 |
+
MAXFD = os.sysconf("SC_OPEN_MAX")
|
| 397 |
+
except Exception:
|
| 398 |
+
MAXFD = 256
|
| 399 |
+
|
| 400 |
+
def close_all_fds_except(fds):
|
| 401 |
+
fds = list(fds) + [-1, MAXFD]
|
| 402 |
+
fds.sort()
|
| 403 |
+
assert fds[-1] == MAXFD, 'fd too large'
|
| 404 |
+
for i in range(len(fds) - 1):
|
| 405 |
+
os.closerange(fds[i]+1, fds[i+1])
|
| 406 |
+
#
|
| 407 |
+
# Close sys.stdin and replace stdin with os.devnull
|
| 408 |
+
#
|
| 409 |
+
|
| 410 |
+
def _close_stdin():
|
| 411 |
+
if sys.stdin is None:
|
| 412 |
+
return
|
| 413 |
+
|
| 414 |
+
try:
|
| 415 |
+
sys.stdin.close()
|
| 416 |
+
except (OSError, ValueError):
|
| 417 |
+
pass
|
| 418 |
+
|
| 419 |
+
try:
|
| 420 |
+
fd = os.open(os.devnull, os.O_RDONLY)
|
| 421 |
+
try:
|
| 422 |
+
sys.stdin = open(fd, encoding="utf-8", closefd=False)
|
| 423 |
+
except:
|
| 424 |
+
os.close(fd)
|
| 425 |
+
raise
|
| 426 |
+
except (OSError, ValueError):
|
| 427 |
+
pass
|
| 428 |
+
|
| 429 |
+
#
|
| 430 |
+
# Flush standard streams, if any
|
| 431 |
+
#
|
| 432 |
+
|
| 433 |
+
def _flush_std_streams():
|
| 434 |
+
try:
|
| 435 |
+
sys.stdout.flush()
|
| 436 |
+
except (AttributeError, ValueError):
|
| 437 |
+
pass
|
| 438 |
+
try:
|
| 439 |
+
sys.stderr.flush()
|
| 440 |
+
except (AttributeError, ValueError):
|
| 441 |
+
pass
|
| 442 |
+
|
| 443 |
+
#
|
| 444 |
+
# Start a program with only specified fds kept open
|
| 445 |
+
#
|
| 446 |
+
|
| 447 |
+
def spawnv_passfds(path, args, passfds):
|
| 448 |
+
import _posixsubprocess
|
| 449 |
+
passfds = tuple(sorted(map(int, passfds)))
|
| 450 |
+
errpipe_read, errpipe_write = os.pipe()
|
| 451 |
+
try:
|
| 452 |
+
return _posixsubprocess.fork_exec(
|
| 453 |
+
args, [os.fsencode(path)], True, passfds, None, None,
|
| 454 |
+
-1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
|
| 455 |
+
False, False, None, None, None, -1, None)
|
| 456 |
+
finally:
|
| 457 |
+
os.close(errpipe_read)
|
| 458 |
+
os.close(errpipe_write)
|
| 459 |
+
|
| 460 |
+
|
| 461 |
+
def close_fds(*fds):
|
| 462 |
+
"""Close each file descriptor given as an argument"""
|
| 463 |
+
for fd in fds:
|
| 464 |
+
os.close(fd)
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def _cleanup_tests():
|
| 468 |
+
"""Cleanup multiprocessing resources when multiprocessing tests
|
| 469 |
+
completed."""
|
| 470 |
+
|
| 471 |
+
from test import support
|
| 472 |
+
|
| 473 |
+
# cleanup multiprocessing
|
| 474 |
+
process._cleanup()
|
| 475 |
+
|
| 476 |
+
# Stop the ForkServer process if it's running
|
| 477 |
+
from multiprocessing import forkserver
|
| 478 |
+
forkserver._forkserver._stop()
|
| 479 |
+
|
| 480 |
+
# Stop the ResourceTracker process if it's running
|
| 481 |
+
from multiprocessing import resource_tracker
|
| 482 |
+
resource_tracker._resource_tracker._stop()
|
| 483 |
+
|
| 484 |
+
# bpo-37421: Explicitly call _run_finalizers() to remove immediately
|
| 485 |
+
# temporary directories created by multiprocessing.util.get_temp_dir().
|
| 486 |
+
_run_finalizers()
|
| 487 |
+
support.gc_collect()
|
| 488 |
+
|
| 489 |
+
support.reap_children()
|
parrot/lib/python3.10/site-packages/torch/include/ATen/ops/_ctc_loss_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, at::IntArrayRef input_lengths, at::IntArrayRef target_lengths, int64_t blank=0, bool zero_infinity=false);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> _ctc_loss(const at::Tensor & log_probs, const at::Tensor & targets, const at::Tensor & input_lengths, const at::Tensor & target_lengths, int64_t blank=0, bool zero_infinity=false);
|
| 22 |
+
|
| 23 |
+
} // namespace cpu
|
| 24 |
+
} // namespace at
|