Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +2 -0
- wemm/lib/python3.10/site-packages/lit/BooleanExpression.py +350 -0
- wemm/lib/python3.10/site-packages/lit/LitTestCase.py +65 -0
- wemm/lib/python3.10/site-packages/lit/ProgressBar.py +326 -0
- wemm/lib/python3.10/site-packages/lit/ShCommands.py +113 -0
- wemm/lib/python3.10/site-packages/lit/ShUtil.py +272 -0
- wemm/lib/python3.10/site-packages/lit/TestRunner.py +2278 -0
- wemm/lib/python3.10/site-packages/lit/TestTimes.py +40 -0
- wemm/lib/python3.10/site-packages/lit/TestingConfig.py +267 -0
- wemm/lib/python3.10/site-packages/lit/__init__.py +8 -0
- wemm/lib/python3.10/site-packages/lit/builtin_commands/__init__.py +0 -0
- wemm/lib/python3.10/site-packages/lit/builtin_commands/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/lit/builtin_commands/__pycache__/cat.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/lit/builtin_commands/cat.py +71 -0
- wemm/lib/python3.10/site-packages/lit/builtin_commands/diff.py +307 -0
- wemm/lib/python3.10/site-packages/lit/cl_arguments.py +376 -0
- wemm/lib/python3.10/site-packages/lit/discovery.py +289 -0
- wemm/lib/python3.10/site-packages/lit/display.py +169 -0
- wemm/lib/python3.10/site-packages/lit/main.py +357 -0
- wemm/lib/python3.10/site-packages/lit/reports.py +280 -0
- wemm/lib/python3.10/site-packages/lit/util.py +550 -0
- wemm/lib/python3.10/site-packages/lit/worker.py +94 -0
- wemm/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL +5 -0
- wemm/lib/python3.10/site-packages/requests/__init__.py +184 -0
- wemm/lib/python3.10/site-packages/requests/__pycache__/_internal_utils.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/requests/__pycache__/help.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/requests/__pycache__/status_codes.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/requests/__version__.py +14 -0
- wemm/lib/python3.10/site-packages/requests/_internal_utils.py +50 -0
- wemm/lib/python3.10/site-packages/requests/adapters.py +719 -0
- wemm/lib/python3.10/site-packages/requests/api.py +157 -0
- wemm/lib/python3.10/site-packages/requests/auth.py +314 -0
- wemm/lib/python3.10/site-packages/requests/compat.py +94 -0
- wemm/lib/python3.10/site-packages/requests/cookies.py +561 -0
- wemm/lib/python3.10/site-packages/requests/exceptions.py +151 -0
- wemm/lib/python3.10/site-packages/requests/help.py +134 -0
- wemm/lib/python3.10/site-packages/requests/hooks.py +33 -0
- wemm/lib/python3.10/site-packages/requests/models.py +1037 -0
- wemm/lib/python3.10/site-packages/requests/packages.py +23 -0
- wemm/lib/python3.10/site-packages/requests/sessions.py +831 -0
- wemm/lib/python3.10/site-packages/requests/status_codes.py +128 -0
- wemm/lib/python3.10/site-packages/requests/structures.py +99 -0
- wemm/lib/python3.10/site-packages/requests/utils.py +1096 -0
- wemm/lib/python3.10/site-packages/safetensors/__init__.py +9 -0
- wemm/lib/python3.10/site-packages/safetensors/__init__.pyi +149 -0
- wemm/lib/python3.10/site-packages/safetensors/__pycache__/__init__.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/safetensors/__pycache__/mlx.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc +0 -0
- wemm/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc +0 -0
.gitattributes
CHANGED
|
@@ -189,3 +189,5 @@ wemm/lib/python3.10/site-packages/triton/third_party/cuda/lib/libdevice.10.bc fi
|
|
| 189 |
wemm/lib/python3.10/site-packages/pillow.libs/libbrotlicommon-5b2eba61.so.1.1.0 filter=lfs diff=lfs merge=lfs -text
|
| 190 |
wemm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/modeling_deta.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 191 |
wemm/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/setuptools-75.3.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
| 189 |
wemm/lib/python3.10/site-packages/pillow.libs/libbrotlicommon-5b2eba61.so.1.1.0 filter=lfs diff=lfs merge=lfs -text
|
| 190 |
wemm/lib/python3.10/site-packages/transformers/models/deta/__pycache__/modeling_deta.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
| 191 |
wemm/lib/python3.10/site-packages/virtualenv/seed/wheels/embed/setuptools-75.3.0-py3-none-any.whl filter=lfs diff=lfs merge=lfs -text
|
| 192 |
+
wemm/lib/python3.10/site-packages/safetensors/_safetensors_rust.abi3.so filter=lfs diff=lfs merge=lfs -text
|
| 193 |
+
wemm/lib/python3.10/site-packages/transformers/models/speecht5/__pycache__/modeling_speecht5.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
|
wemm/lib/python3.10/site-packages/lit/BooleanExpression.py
ADDED
|
@@ -0,0 +1,350 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import re
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class BooleanExpression:
|
| 5 |
+
# A simple evaluator of boolean expressions.
|
| 6 |
+
#
|
| 7 |
+
# Grammar:
|
| 8 |
+
# expr :: or_expr
|
| 9 |
+
# or_expr :: and_expr ('||' and_expr)*
|
| 10 |
+
# and_expr :: not_expr ('&&' not_expr)*
|
| 11 |
+
# not_expr :: '!' not_expr
|
| 12 |
+
# '(' or_expr ')'
|
| 13 |
+
# match_expr
|
| 14 |
+
# match_expr :: braced_regex
|
| 15 |
+
# identifier
|
| 16 |
+
# braced_regex match_expr
|
| 17 |
+
# identifier match_expr
|
| 18 |
+
# identifier :: [-+=._a-zA-Z0-9]+
|
| 19 |
+
# braced_regex :: '{{' python_regex '}}'
|
| 20 |
+
|
| 21 |
+
# Evaluates `string` as a boolean expression.
|
| 22 |
+
# Returns True or False. Throws a ValueError on syntax error.
|
| 23 |
+
#
|
| 24 |
+
# Variables in `variables` are true.
|
| 25 |
+
# Regexes that match any variable in `variables` are true.
|
| 26 |
+
# 'true' is true.
|
| 27 |
+
# All other identifiers are false.
|
| 28 |
+
@staticmethod
|
| 29 |
+
def evaluate(string, variables):
|
| 30 |
+
try:
|
| 31 |
+
parser = BooleanExpression(string, set(variables))
|
| 32 |
+
return parser.parseAll()
|
| 33 |
+
except ValueError as e:
|
| 34 |
+
raise ValueError(str(e) + ("\nin expression: %r" % string))
|
| 35 |
+
|
| 36 |
+
#####
|
| 37 |
+
|
| 38 |
+
def __init__(self, string, variables):
|
| 39 |
+
self.tokens = BooleanExpression.tokenize(string)
|
| 40 |
+
self.variables = variables
|
| 41 |
+
self.variables.add("true")
|
| 42 |
+
self.value = None
|
| 43 |
+
self.token = None
|
| 44 |
+
|
| 45 |
+
# Singleton end-of-expression marker.
|
| 46 |
+
END = object()
|
| 47 |
+
|
| 48 |
+
# Tokenization pattern.
|
| 49 |
+
Pattern = re.compile(
|
| 50 |
+
r"\A\s*([()]|&&|\|\||!|(?:[-+=._a-zA-Z0-9]+|\{\{.+?\}\})+)\s*(.*)\Z"
|
| 51 |
+
)
|
| 52 |
+
|
| 53 |
+
@staticmethod
|
| 54 |
+
def tokenize(string):
|
| 55 |
+
while True:
|
| 56 |
+
m = re.match(BooleanExpression.Pattern, string)
|
| 57 |
+
if m is None:
|
| 58 |
+
if string == "":
|
| 59 |
+
yield BooleanExpression.END
|
| 60 |
+
return
|
| 61 |
+
else:
|
| 62 |
+
raise ValueError("couldn't parse text: %r" % string)
|
| 63 |
+
|
| 64 |
+
token = m.group(1)
|
| 65 |
+
string = m.group(2)
|
| 66 |
+
yield token
|
| 67 |
+
|
| 68 |
+
def quote(self, token):
|
| 69 |
+
if token is BooleanExpression.END:
|
| 70 |
+
return "<end of expression>"
|
| 71 |
+
else:
|
| 72 |
+
return repr(token)
|
| 73 |
+
|
| 74 |
+
def accept(self, t):
|
| 75 |
+
if self.token == t:
|
| 76 |
+
self.token = next(self.tokens)
|
| 77 |
+
return True
|
| 78 |
+
else:
|
| 79 |
+
return False
|
| 80 |
+
|
| 81 |
+
def expect(self, t):
|
| 82 |
+
if self.token == t:
|
| 83 |
+
if self.token != BooleanExpression.END:
|
| 84 |
+
self.token = next(self.tokens)
|
| 85 |
+
else:
|
| 86 |
+
raise ValueError(
|
| 87 |
+
"expected: %s\nhave: %s" % (self.quote(t), self.quote(self.token))
|
| 88 |
+
)
|
| 89 |
+
|
| 90 |
+
@staticmethod
|
| 91 |
+
def isMatchExpression(token):
|
| 92 |
+
if (
|
| 93 |
+
token is BooleanExpression.END
|
| 94 |
+
or token == "&&"
|
| 95 |
+
or token == "||"
|
| 96 |
+
or token == "!"
|
| 97 |
+
or token == "("
|
| 98 |
+
or token == ")"
|
| 99 |
+
):
|
| 100 |
+
return False
|
| 101 |
+
return True
|
| 102 |
+
|
| 103 |
+
def parseMATCH(self):
|
| 104 |
+
regex = ""
|
| 105 |
+
for part in filter(None, re.split(r"(\{\{.+?\}\})", self.token)):
|
| 106 |
+
if part.startswith("{{"):
|
| 107 |
+
assert part.endswith("}}")
|
| 108 |
+
regex += "(?:{})".format(part[2:-2])
|
| 109 |
+
else:
|
| 110 |
+
regex += re.escape(part)
|
| 111 |
+
regex = re.compile(regex)
|
| 112 |
+
self.value = any(regex.fullmatch(var) for var in self.variables)
|
| 113 |
+
self.token = next(self.tokens)
|
| 114 |
+
|
| 115 |
+
def parseNOT(self):
|
| 116 |
+
if self.accept("!"):
|
| 117 |
+
self.parseNOT()
|
| 118 |
+
self.value = not self.value
|
| 119 |
+
elif self.accept("("):
|
| 120 |
+
self.parseOR()
|
| 121 |
+
self.expect(")")
|
| 122 |
+
elif not BooleanExpression.isMatchExpression(self.token):
|
| 123 |
+
raise ValueError(
|
| 124 |
+
"expected: '!', '(', '{{', or identifier\nhave: %s"
|
| 125 |
+
% self.quote(self.token)
|
| 126 |
+
)
|
| 127 |
+
else:
|
| 128 |
+
self.parseMATCH()
|
| 129 |
+
|
| 130 |
+
def parseAND(self):
|
| 131 |
+
self.parseNOT()
|
| 132 |
+
while self.accept("&&"):
|
| 133 |
+
left = self.value
|
| 134 |
+
self.parseNOT()
|
| 135 |
+
right = self.value
|
| 136 |
+
# this is technically the wrong associativity, but it
|
| 137 |
+
# doesn't matter for this limited expression grammar
|
| 138 |
+
self.value = left and right
|
| 139 |
+
|
| 140 |
+
def parseOR(self):
|
| 141 |
+
self.parseAND()
|
| 142 |
+
while self.accept("||"):
|
| 143 |
+
left = self.value
|
| 144 |
+
self.parseAND()
|
| 145 |
+
right = self.value
|
| 146 |
+
# this is technically the wrong associativity, but it
|
| 147 |
+
# doesn't matter for this limited expression grammar
|
| 148 |
+
self.value = left or right
|
| 149 |
+
|
| 150 |
+
def parseAll(self):
|
| 151 |
+
self.token = next(self.tokens)
|
| 152 |
+
self.parseOR()
|
| 153 |
+
self.expect(BooleanExpression.END)
|
| 154 |
+
return self.value
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
#######
|
| 158 |
+
# Tests
|
| 159 |
+
|
| 160 |
+
import unittest
|
| 161 |
+
|
| 162 |
+
|
| 163 |
+
class TestBooleanExpression(unittest.TestCase):
|
| 164 |
+
def test_variables(self):
|
| 165 |
+
variables = {"its-true", "false-lol-true", "under_score", "e=quals", "d1g1ts"}
|
| 166 |
+
self.assertTrue(BooleanExpression.evaluate("true", variables))
|
| 167 |
+
self.assertTrue(BooleanExpression.evaluate("its-true", variables))
|
| 168 |
+
self.assertTrue(BooleanExpression.evaluate("false-lol-true", variables))
|
| 169 |
+
self.assertTrue(BooleanExpression.evaluate("under_score", variables))
|
| 170 |
+
self.assertTrue(BooleanExpression.evaluate("e=quals", variables))
|
| 171 |
+
self.assertTrue(BooleanExpression.evaluate("d1g1ts", variables))
|
| 172 |
+
self.assertTrue(BooleanExpression.evaluate("{{its.+}}", variables))
|
| 173 |
+
self.assertTrue(BooleanExpression.evaluate("{{false-[lo]+-true}}", variables))
|
| 174 |
+
self.assertTrue(
|
| 175 |
+
BooleanExpression.evaluate("{{(true|false)-lol-(true|false)}}", variables)
|
| 176 |
+
)
|
| 177 |
+
self.assertTrue(BooleanExpression.evaluate("d1g{{[0-9]}}ts", variables))
|
| 178 |
+
self.assertTrue(BooleanExpression.evaluate("d1g{{[0-9]}}t{{[a-z]}}", variables))
|
| 179 |
+
self.assertTrue(
|
| 180 |
+
BooleanExpression.evaluate("{{d}}1g{{[0-9]}}t{{[a-z]}}", variables)
|
| 181 |
+
)
|
| 182 |
+
self.assertTrue(BooleanExpression.evaluate("d1{{(g|1)+}}ts", variables))
|
| 183 |
+
|
| 184 |
+
self.assertFalse(BooleanExpression.evaluate("false", variables))
|
| 185 |
+
self.assertFalse(BooleanExpression.evaluate("True", variables))
|
| 186 |
+
self.assertFalse(BooleanExpression.evaluate("true-ish", variables))
|
| 187 |
+
self.assertFalse(BooleanExpression.evaluate("not_true", variables))
|
| 188 |
+
self.assertFalse(BooleanExpression.evaluate("tru", variables))
|
| 189 |
+
self.assertFalse(BooleanExpression.evaluate("{{its-true.+}}", variables))
|
| 190 |
+
|
| 191 |
+
def test_matching(self):
|
| 192 |
+
expr1 = "linux && (target={{aarch64-.+}} || target={{x86_64-.+}})"
|
| 193 |
+
self.assertTrue(
|
| 194 |
+
BooleanExpression.evaluate(
|
| 195 |
+
expr1, {"linux", "target=x86_64-unknown-linux-gnu"}
|
| 196 |
+
)
|
| 197 |
+
)
|
| 198 |
+
self.assertFalse(
|
| 199 |
+
BooleanExpression.evaluate(
|
| 200 |
+
expr1, {"linux", "target=i386-unknown-linux-gnu"}
|
| 201 |
+
)
|
| 202 |
+
)
|
| 203 |
+
|
| 204 |
+
expr2 = "use_system_cxx_lib && target={{.+}}-apple-macosx10.{{9|10|11|12}} && !no-exceptions"
|
| 205 |
+
self.assertTrue(
|
| 206 |
+
BooleanExpression.evaluate(
|
| 207 |
+
expr2, {"use_system_cxx_lib", "target=arm64-apple-macosx10.12"}
|
| 208 |
+
)
|
| 209 |
+
)
|
| 210 |
+
self.assertFalse(
|
| 211 |
+
BooleanExpression.evaluate(
|
| 212 |
+
expr2,
|
| 213 |
+
{
|
| 214 |
+
"use_system_cxx_lib",
|
| 215 |
+
"target=arm64-apple-macosx10.12",
|
| 216 |
+
"no-exceptions",
|
| 217 |
+
},
|
| 218 |
+
)
|
| 219 |
+
)
|
| 220 |
+
self.assertFalse(
|
| 221 |
+
BooleanExpression.evaluate(
|
| 222 |
+
expr2, {"use_system_cxx_lib", "target=arm64-apple-macosx10.15"}
|
| 223 |
+
)
|
| 224 |
+
)
|
| 225 |
+
|
| 226 |
+
def test_operators(self):
|
| 227 |
+
self.assertTrue(BooleanExpression.evaluate("true || true", {}))
|
| 228 |
+
self.assertTrue(BooleanExpression.evaluate("true || false", {}))
|
| 229 |
+
self.assertTrue(BooleanExpression.evaluate("false || true", {}))
|
| 230 |
+
self.assertFalse(BooleanExpression.evaluate("false || false", {}))
|
| 231 |
+
|
| 232 |
+
self.assertTrue(BooleanExpression.evaluate("true && true", {}))
|
| 233 |
+
self.assertFalse(BooleanExpression.evaluate("true && false", {}))
|
| 234 |
+
self.assertFalse(BooleanExpression.evaluate("false && true", {}))
|
| 235 |
+
self.assertFalse(BooleanExpression.evaluate("false && false", {}))
|
| 236 |
+
|
| 237 |
+
self.assertFalse(BooleanExpression.evaluate("!true", {}))
|
| 238 |
+
self.assertTrue(BooleanExpression.evaluate("!false", {}))
|
| 239 |
+
|
| 240 |
+
self.assertTrue(BooleanExpression.evaluate(" ((!((false) )) ) ", {}))
|
| 241 |
+
self.assertTrue(BooleanExpression.evaluate("true && (true && (true))", {}))
|
| 242 |
+
self.assertTrue(BooleanExpression.evaluate("!false && !false && !! !false", {}))
|
| 243 |
+
self.assertTrue(BooleanExpression.evaluate("false && false || true", {}))
|
| 244 |
+
self.assertTrue(BooleanExpression.evaluate("(false && false) || true", {}))
|
| 245 |
+
self.assertFalse(BooleanExpression.evaluate("false && (false || true)", {}))
|
| 246 |
+
|
| 247 |
+
# Evaluate boolean expression `expr`.
|
| 248 |
+
# Fail if it does not throw a ValueError containing the text `error`.
|
| 249 |
+
def checkException(self, expr, error):
|
| 250 |
+
try:
|
| 251 |
+
BooleanExpression.evaluate(expr, {})
|
| 252 |
+
self.fail("expression %r didn't cause an exception" % expr)
|
| 253 |
+
except ValueError as e:
|
| 254 |
+
if -1 == str(e).find(error):
|
| 255 |
+
self.fail(
|
| 256 |
+
(
|
| 257 |
+
"expression %r caused the wrong ValueError\n"
|
| 258 |
+
+ "actual error was:\n%s\n"
|
| 259 |
+
+ "expected error was:\n%s\n"
|
| 260 |
+
)
|
| 261 |
+
% (expr, e, error)
|
| 262 |
+
)
|
| 263 |
+
except BaseException as e:
|
| 264 |
+
self.fail(
|
| 265 |
+
(
|
| 266 |
+
"expression %r caused the wrong exception; actual "
|
| 267 |
+
+ "exception was: \n%r"
|
| 268 |
+
)
|
| 269 |
+
% (expr, e)
|
| 270 |
+
)
|
| 271 |
+
|
| 272 |
+
def test_errors(self):
|
| 273 |
+
self.checkException(
|
| 274 |
+
"ba#d", "couldn't parse text: '#d'\n" + "in expression: 'ba#d'"
|
| 275 |
+
)
|
| 276 |
+
|
| 277 |
+
self.checkException(
|
| 278 |
+
"true and true",
|
| 279 |
+
"expected: <end of expression>\n"
|
| 280 |
+
+ "have: 'and'\n"
|
| 281 |
+
+ "in expression: 'true and true'",
|
| 282 |
+
)
|
| 283 |
+
|
| 284 |
+
self.checkException(
|
| 285 |
+
"|| true",
|
| 286 |
+
"expected: '!', '(', '{{', or identifier\n"
|
| 287 |
+
+ "have: '||'\n"
|
| 288 |
+
+ "in expression: '|| true'",
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
self.checkException(
|
| 292 |
+
"true &&",
|
| 293 |
+
"expected: '!', '(', '{{', or identifier\n"
|
| 294 |
+
+ "have: <end of expression>\n"
|
| 295 |
+
+ "in expression: 'true &&'",
|
| 296 |
+
)
|
| 297 |
+
|
| 298 |
+
self.checkException(
|
| 299 |
+
"",
|
| 300 |
+
"expected: '!', '(', '{{', or identifier\n"
|
| 301 |
+
+ "have: <end of expression>\n"
|
| 302 |
+
+ "in expression: ''",
|
| 303 |
+
)
|
| 304 |
+
|
| 305 |
+
self.checkException("*", "couldn't parse text: '*'\n" + "in expression: '*'")
|
| 306 |
+
|
| 307 |
+
self.checkException(
|
| 308 |
+
"no wait stop",
|
| 309 |
+
"expected: <end of expression>\n"
|
| 310 |
+
+ "have: 'wait'\n"
|
| 311 |
+
+ "in expression: 'no wait stop'",
|
| 312 |
+
)
|
| 313 |
+
|
| 314 |
+
self.checkException(
|
| 315 |
+
"no-$-please",
|
| 316 |
+
"couldn't parse text: '$-please'\n" + "in expression: 'no-$-please'",
|
| 317 |
+
)
|
| 318 |
+
|
| 319 |
+
self.checkException(
|
| 320 |
+
"(((true && true) || true)",
|
| 321 |
+
"expected: ')'\n"
|
| 322 |
+
+ "have: <end of expression>\n"
|
| 323 |
+
+ "in expression: '(((true && true) || true)'",
|
| 324 |
+
)
|
| 325 |
+
|
| 326 |
+
self.checkException(
|
| 327 |
+
"true (true)",
|
| 328 |
+
"expected: <end of expression>\n"
|
| 329 |
+
+ "have: '('\n"
|
| 330 |
+
+ "in expression: 'true (true)'",
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
self.checkException(
|
| 334 |
+
"( )",
|
| 335 |
+
"expected: '!', '(', '{{', or identifier\n"
|
| 336 |
+
+ "have: ')'\n"
|
| 337 |
+
+ "in expression: '( )'",
|
| 338 |
+
)
|
| 339 |
+
|
| 340 |
+
self.checkException(
|
| 341 |
+
"abc{{def", "couldn't parse text: '{{def'\n" + "in expression: 'abc{{def'"
|
| 342 |
+
)
|
| 343 |
+
|
| 344 |
+
self.checkException(
|
| 345 |
+
"{{}}", "couldn't parse text: '{{}}'\n" + "in expression: '{{}}'"
|
| 346 |
+
)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
if __name__ == "__main__":
|
| 350 |
+
unittest.main()
|
wemm/lib/python3.10/site-packages/lit/LitTestCase.py
ADDED
|
@@ -0,0 +1,65 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import unittest
|
| 2 |
+
|
| 3 |
+
import lit.discovery
|
| 4 |
+
import lit.LitConfig
|
| 5 |
+
import lit.worker
|
| 6 |
+
|
| 7 |
+
"""
|
| 8 |
+
TestCase adaptor for providing a Python 'unittest' compatible interface to 'lit'
|
| 9 |
+
tests.
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class UnresolvedError(RuntimeError):
|
| 14 |
+
pass
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class LitTestCase(unittest.TestCase):
|
| 18 |
+
def __init__(self, test, lit_config):
|
| 19 |
+
unittest.TestCase.__init__(self)
|
| 20 |
+
self._test = test
|
| 21 |
+
self._lit_config = lit_config
|
| 22 |
+
|
| 23 |
+
def id(self):
|
| 24 |
+
return self._test.getFullName()
|
| 25 |
+
|
| 26 |
+
def shortDescription(self):
|
| 27 |
+
return self._test.getFullName()
|
| 28 |
+
|
| 29 |
+
def runTest(self):
|
| 30 |
+
# Run the test.
|
| 31 |
+
result = lit.worker._execute(self._test, self._lit_config)
|
| 32 |
+
|
| 33 |
+
# Adapt the result to unittest.
|
| 34 |
+
if result.code is lit.Test.UNRESOLVED:
|
| 35 |
+
raise UnresolvedError(result.output)
|
| 36 |
+
elif result.code.isFailure:
|
| 37 |
+
self.fail(result.output)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def load_test_suite(inputs):
|
| 41 |
+
import platform
|
| 42 |
+
|
| 43 |
+
windows = platform.system() == "Windows"
|
| 44 |
+
|
| 45 |
+
# Create the global config object.
|
| 46 |
+
lit_config = lit.LitConfig.LitConfig(
|
| 47 |
+
progname="lit",
|
| 48 |
+
path=[],
|
| 49 |
+
quiet=False,
|
| 50 |
+
useValgrind=False,
|
| 51 |
+
valgrindLeakCheck=False,
|
| 52 |
+
valgrindArgs=[],
|
| 53 |
+
noExecute=False,
|
| 54 |
+
debug=False,
|
| 55 |
+
isWindows=windows,
|
| 56 |
+
order="smart",
|
| 57 |
+
params={},
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
# Perform test discovery.
|
| 61 |
+
tests = lit.discovery.find_tests_for_inputs(lit_config, inputs)
|
| 62 |
+
test_adaptors = [LitTestCase(t, lit_config) for t in tests]
|
| 63 |
+
|
| 64 |
+
# Return a unittest test suite which just runs the tests in order.
|
| 65 |
+
return unittest.TestSuite(test_adaptors)
|
wemm/lib/python3.10/site-packages/lit/ProgressBar.py
ADDED
|
@@ -0,0 +1,326 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
|
| 3 |
+
# Source: http://code.activestate.com/recipes/475116/, with
|
| 4 |
+
# modifications by Daniel Dunbar.
|
| 5 |
+
|
| 6 |
+
import sys, re, time
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
def to_bytes(str):
|
| 10 |
+
# Encode to UTF-8 to get binary data.
|
| 11 |
+
return str.encode("utf-8")
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class TerminalController:
|
| 15 |
+
"""
|
| 16 |
+
A class that can be used to portably generate formatted output to
|
| 17 |
+
a terminal.
|
| 18 |
+
|
| 19 |
+
`TerminalController` defines a set of instance variables whose
|
| 20 |
+
values are initialized to the control sequence necessary to
|
| 21 |
+
perform a given action. These can be simply included in normal
|
| 22 |
+
output to the terminal:
|
| 23 |
+
|
| 24 |
+
>>> term = TerminalController()
|
| 25 |
+
>>> print('This is '+term.GREEN+'green'+term.NORMAL)
|
| 26 |
+
|
| 27 |
+
Alternatively, the `render()` method can used, which replaces
|
| 28 |
+
'${action}' with the string required to perform 'action':
|
| 29 |
+
|
| 30 |
+
>>> term = TerminalController()
|
| 31 |
+
>>> print(term.render('This is ${GREEN}green${NORMAL}'))
|
| 32 |
+
|
| 33 |
+
If the terminal doesn't support a given action, then the value of
|
| 34 |
+
the corresponding instance variable will be set to ''. As a
|
| 35 |
+
result, the above code will still work on terminals that do not
|
| 36 |
+
support color, except that their output will not be colored.
|
| 37 |
+
Also, this means that you can test whether the terminal supports a
|
| 38 |
+
given action by simply testing the truth value of the
|
| 39 |
+
corresponding instance variable:
|
| 40 |
+
|
| 41 |
+
>>> term = TerminalController()
|
| 42 |
+
>>> if term.CLEAR_SCREEN:
|
| 43 |
+
... print('This terminal supports clearning the screen.')
|
| 44 |
+
|
| 45 |
+
Finally, if the width and height of the terminal are known, then
|
| 46 |
+
they will be stored in the `COLS` and `LINES` attributes.
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
# Cursor movement:
|
| 50 |
+
BOL = "" #: Move the cursor to the beginning of the line
|
| 51 |
+
UP = "" #: Move the cursor up one line
|
| 52 |
+
DOWN = "" #: Move the cursor down one line
|
| 53 |
+
LEFT = "" #: Move the cursor left one char
|
| 54 |
+
RIGHT = "" #: Move the cursor right one char
|
| 55 |
+
|
| 56 |
+
# Deletion:
|
| 57 |
+
CLEAR_SCREEN = "" #: Clear the screen and move to home position
|
| 58 |
+
CLEAR_EOL = "" #: Clear to the end of the line.
|
| 59 |
+
CLEAR_BOL = "" #: Clear to the beginning of the line.
|
| 60 |
+
CLEAR_EOS = "" #: Clear to the end of the screen
|
| 61 |
+
|
| 62 |
+
# Output modes:
|
| 63 |
+
BOLD = "" #: Turn on bold mode
|
| 64 |
+
BLINK = "" #: Turn on blink mode
|
| 65 |
+
DIM = "" #: Turn on half-bright mode
|
| 66 |
+
REVERSE = "" #: Turn on reverse-video mode
|
| 67 |
+
NORMAL = "" #: Turn off all modes
|
| 68 |
+
|
| 69 |
+
# Cursor display:
|
| 70 |
+
HIDE_CURSOR = "" #: Make the cursor invisible
|
| 71 |
+
SHOW_CURSOR = "" #: Make the cursor visible
|
| 72 |
+
|
| 73 |
+
# Terminal size:
|
| 74 |
+
COLS = None #: Width of the terminal (None for unknown)
|
| 75 |
+
LINES = None #: Height of the terminal (None for unknown)
|
| 76 |
+
|
| 77 |
+
# Foreground colors:
|
| 78 |
+
BLACK = BLUE = GREEN = CYAN = RED = MAGENTA = YELLOW = WHITE = ""
|
| 79 |
+
|
| 80 |
+
# Background colors:
|
| 81 |
+
BG_BLACK = BG_BLUE = BG_GREEN = BG_CYAN = ""
|
| 82 |
+
BG_RED = BG_MAGENTA = BG_YELLOW = BG_WHITE = ""
|
| 83 |
+
|
| 84 |
+
_STRING_CAPABILITIES = """
|
| 85 |
+
BOL=cr UP=cuu1 DOWN=cud1 LEFT=cub1 RIGHT=cuf1
|
| 86 |
+
CLEAR_SCREEN=clear CLEAR_EOL=el CLEAR_BOL=el1 CLEAR_EOS=ed BOLD=bold
|
| 87 |
+
BLINK=blink DIM=dim REVERSE=rev UNDERLINE=smul NORMAL=sgr0
|
| 88 |
+
HIDE_CURSOR=cinvis SHOW_CURSOR=cnorm""".split()
|
| 89 |
+
_COLORS = """BLACK BLUE GREEN CYAN RED MAGENTA YELLOW WHITE""".split()
|
| 90 |
+
_ANSICOLORS = "BLACK RED GREEN YELLOW BLUE MAGENTA CYAN WHITE".split()
|
| 91 |
+
|
| 92 |
+
def __init__(self, term_stream=sys.stdout):
|
| 93 |
+
"""
|
| 94 |
+
Create a `TerminalController` and initialize its attributes
|
| 95 |
+
with appropriate values for the current terminal.
|
| 96 |
+
`term_stream` is the stream that will be used for terminal
|
| 97 |
+
output; if this stream is not a tty, then the terminal is
|
| 98 |
+
assumed to be a dumb terminal (i.e., have no capabilities).
|
| 99 |
+
"""
|
| 100 |
+
# Curses isn't available on all platforms
|
| 101 |
+
try:
|
| 102 |
+
import curses
|
| 103 |
+
except:
|
| 104 |
+
return
|
| 105 |
+
|
| 106 |
+
# If the stream isn't a tty, then assume it has no capabilities.
|
| 107 |
+
if not term_stream.isatty():
|
| 108 |
+
return
|
| 109 |
+
|
| 110 |
+
# Check the terminal type. If we fail, then assume that the
|
| 111 |
+
# terminal has no capabilities.
|
| 112 |
+
try:
|
| 113 |
+
curses.setupterm()
|
| 114 |
+
except:
|
| 115 |
+
return
|
| 116 |
+
|
| 117 |
+
# Look up numeric capabilities.
|
| 118 |
+
self.COLS = curses.tigetnum("cols")
|
| 119 |
+
self.LINES = curses.tigetnum("lines")
|
| 120 |
+
self.XN = curses.tigetflag("xenl")
|
| 121 |
+
|
| 122 |
+
# Look up string capabilities.
|
| 123 |
+
for capability in self._STRING_CAPABILITIES:
|
| 124 |
+
(attrib, cap_name) = capability.split("=")
|
| 125 |
+
setattr(self, attrib, self._tigetstr(cap_name) or "")
|
| 126 |
+
|
| 127 |
+
# Colors
|
| 128 |
+
set_fg = self._tigetstr("setf")
|
| 129 |
+
if set_fg:
|
| 130 |
+
for i, color in zip(range(len(self._COLORS)), self._COLORS):
|
| 131 |
+
setattr(self, color, self._tparm(set_fg, i))
|
| 132 |
+
set_fg_ansi = self._tigetstr("setaf")
|
| 133 |
+
if set_fg_ansi:
|
| 134 |
+
for i, color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
|
| 135 |
+
setattr(self, color, self._tparm(set_fg_ansi, i))
|
| 136 |
+
set_bg = self._tigetstr("setb")
|
| 137 |
+
if set_bg:
|
| 138 |
+
for i, color in zip(range(len(self._COLORS)), self._COLORS):
|
| 139 |
+
setattr(self, "BG_" + color, self._tparm(set_bg, i))
|
| 140 |
+
set_bg_ansi = self._tigetstr("setab")
|
| 141 |
+
if set_bg_ansi:
|
| 142 |
+
for i, color in zip(range(len(self._ANSICOLORS)), self._ANSICOLORS):
|
| 143 |
+
setattr(self, "BG_" + color, self._tparm(set_bg_ansi, i))
|
| 144 |
+
|
| 145 |
+
def _tparm(self, arg, index):
|
| 146 |
+
import curses
|
| 147 |
+
|
| 148 |
+
return curses.tparm(to_bytes(arg), index).decode("utf-8") or ""
|
| 149 |
+
|
| 150 |
+
def _tigetstr(self, cap_name):
|
| 151 |
+
# String capabilities can include "delays" of the form "$<2>".
|
| 152 |
+
# For any modern terminal, we should be able to just ignore
|
| 153 |
+
# these, so strip them out.
|
| 154 |
+
import curses
|
| 155 |
+
|
| 156 |
+
cap = curses.tigetstr(cap_name)
|
| 157 |
+
if cap is None:
|
| 158 |
+
cap = ""
|
| 159 |
+
else:
|
| 160 |
+
cap = cap.decode("utf-8")
|
| 161 |
+
return re.sub(r"\$<\d+>[/*]?", "", cap)
|
| 162 |
+
|
| 163 |
+
def render(self, template):
|
| 164 |
+
"""
|
| 165 |
+
Replace each $-substitutions in the given template string with
|
| 166 |
+
the corresponding terminal control string (if it's defined) or
|
| 167 |
+
'' (if it's not).
|
| 168 |
+
"""
|
| 169 |
+
return re.sub(r"\$\$|\${\w+}", self._render_sub, template)
|
| 170 |
+
|
| 171 |
+
def _render_sub(self, match):
|
| 172 |
+
s = match.group()
|
| 173 |
+
if s == "$$":
|
| 174 |
+
return s
|
| 175 |
+
else:
|
| 176 |
+
return getattr(self, s[2:-1])
|
| 177 |
+
|
| 178 |
+
|
| 179 |
+
#######################################################################
|
| 180 |
+
# Example use case: progress bar
|
| 181 |
+
#######################################################################
|
| 182 |
+
|
| 183 |
+
|
| 184 |
+
class SimpleProgressBar:
|
| 185 |
+
"""
|
| 186 |
+
A simple progress bar which doesn't need any terminal support.
|
| 187 |
+
|
| 188 |
+
This prints out a progress bar like:
|
| 189 |
+
'Header: 0.. 10.. 20.. ...'
|
| 190 |
+
"""
|
| 191 |
+
|
| 192 |
+
def __init__(self, header):
|
| 193 |
+
self.header = header
|
| 194 |
+
self.atIndex = None
|
| 195 |
+
|
| 196 |
+
def update(self, percent, message):
|
| 197 |
+
if self.atIndex is None:
|
| 198 |
+
sys.stdout.write(self.header)
|
| 199 |
+
self.atIndex = 0
|
| 200 |
+
|
| 201 |
+
next = int(percent * 50)
|
| 202 |
+
if next == self.atIndex:
|
| 203 |
+
return
|
| 204 |
+
|
| 205 |
+
for i in range(self.atIndex, next):
|
| 206 |
+
idx = i % 5
|
| 207 |
+
if idx == 0:
|
| 208 |
+
sys.stdout.write("%2d" % (i * 2))
|
| 209 |
+
elif idx == 1:
|
| 210 |
+
pass # Skip second char
|
| 211 |
+
elif idx < 4:
|
| 212 |
+
sys.stdout.write(".")
|
| 213 |
+
else:
|
| 214 |
+
sys.stdout.write(" ")
|
| 215 |
+
sys.stdout.flush()
|
| 216 |
+
self.atIndex = next
|
| 217 |
+
|
| 218 |
+
def clear(self, interrupted):
|
| 219 |
+
if self.atIndex is not None and not interrupted:
|
| 220 |
+
sys.stdout.write("\n")
|
| 221 |
+
sys.stdout.flush()
|
| 222 |
+
self.atIndex = None
|
| 223 |
+
|
| 224 |
+
|
| 225 |
+
class ProgressBar:
|
| 226 |
+
"""
|
| 227 |
+
A 3-line progress bar, which looks like::
|
| 228 |
+
|
| 229 |
+
Header
|
| 230 |
+
20% [===========----------------------------------]
|
| 231 |
+
progress message
|
| 232 |
+
|
| 233 |
+
The progress bar is colored, if the terminal supports color
|
| 234 |
+
output; and adjusts to the width of the terminal.
|
| 235 |
+
"""
|
| 236 |
+
|
| 237 |
+
BAR = "%s${%s}[${BOLD}%s%s${NORMAL}${%s}]${NORMAL}%s"
|
| 238 |
+
HEADER = "${BOLD}${CYAN}%s${NORMAL}\n\n"
|
| 239 |
+
|
| 240 |
+
def __init__(self, term, header, useETA=True):
|
| 241 |
+
self.term = term
|
| 242 |
+
if not (self.term.CLEAR_EOL and self.term.UP and self.term.BOL):
|
| 243 |
+
raise ValueError(
|
| 244 |
+
"Terminal isn't capable enough -- you "
|
| 245 |
+
"should use a simpler progress dispaly."
|
| 246 |
+
)
|
| 247 |
+
self.BOL = self.term.BOL # BoL from col#79
|
| 248 |
+
self.XNL = "\n" # Newline from col#79
|
| 249 |
+
if self.term.COLS:
|
| 250 |
+
self.width = self.term.COLS
|
| 251 |
+
if not self.term.XN:
|
| 252 |
+
self.BOL = self.term.UP + self.term.BOL
|
| 253 |
+
self.XNL = "" # Cursor must be fed to the next line
|
| 254 |
+
else:
|
| 255 |
+
self.width = 75
|
| 256 |
+
self.barColor = "GREEN"
|
| 257 |
+
self.header = self.term.render(self.HEADER % header.center(self.width))
|
| 258 |
+
self.cleared = 1 #: true if we haven't drawn the bar yet.
|
| 259 |
+
self.useETA = useETA
|
| 260 |
+
if self.useETA:
|
| 261 |
+
self.startTime = time.time()
|
| 262 |
+
# self.update(0, '')
|
| 263 |
+
|
| 264 |
+
def update(self, percent, message):
|
| 265 |
+
if self.cleared:
|
| 266 |
+
sys.stdout.write(self.header)
|
| 267 |
+
self.cleared = 0
|
| 268 |
+
prefix = "%3d%% " % (percent * 100,)
|
| 269 |
+
suffix = ""
|
| 270 |
+
if self.useETA:
|
| 271 |
+
elapsed = time.time() - self.startTime
|
| 272 |
+
if percent > 0.0001 and elapsed > 1:
|
| 273 |
+
total = elapsed / percent
|
| 274 |
+
eta = total - elapsed
|
| 275 |
+
h = eta // 3600.0
|
| 276 |
+
m = (eta // 60) % 60
|
| 277 |
+
s = eta % 60
|
| 278 |
+
suffix = " ETA: %02d:%02d:%02d" % (h, m, s)
|
| 279 |
+
barWidth = self.width - len(prefix) - len(suffix) - 2
|
| 280 |
+
n = int(barWidth * percent)
|
| 281 |
+
if len(message) < self.width:
|
| 282 |
+
message = message + " " * (self.width - len(message))
|
| 283 |
+
else:
|
| 284 |
+
message = "... " + message[-(self.width - 4) :]
|
| 285 |
+
bc = self.barColor
|
| 286 |
+
bar = self.BAR % (prefix, bc, "=" * n, "-" * (barWidth - n), bc, suffix)
|
| 287 |
+
bar = self.term.render(bar)
|
| 288 |
+
sys.stdout.write(
|
| 289 |
+
self.BOL
|
| 290 |
+
+ self.term.UP
|
| 291 |
+
+ self.term.CLEAR_EOL
|
| 292 |
+
+ bar
|
| 293 |
+
+ self.XNL
|
| 294 |
+
+ self.term.CLEAR_EOL
|
| 295 |
+
+ message
|
| 296 |
+
)
|
| 297 |
+
if not self.term.XN:
|
| 298 |
+
sys.stdout.flush()
|
| 299 |
+
|
| 300 |
+
def clear(self, interrupted):
|
| 301 |
+
if not self.cleared:
|
| 302 |
+
sys.stdout.write(
|
| 303 |
+
self.BOL
|
| 304 |
+
+ self.term.CLEAR_EOL
|
| 305 |
+
+ self.term.UP
|
| 306 |
+
+ self.term.CLEAR_EOL
|
| 307 |
+
+ self.term.UP
|
| 308 |
+
+ self.term.CLEAR_EOL
|
| 309 |
+
)
|
| 310 |
+
if interrupted: # ^C creates extra line. Gobble it up!
|
| 311 |
+
sys.stdout.write(self.term.UP + self.term.CLEAR_EOL)
|
| 312 |
+
sys.stdout.write("^C")
|
| 313 |
+
sys.stdout.flush()
|
| 314 |
+
self.cleared = 1
|
| 315 |
+
|
| 316 |
+
|
| 317 |
+
def test():
|
| 318 |
+
tc = TerminalController()
|
| 319 |
+
p = ProgressBar(tc, "Tests")
|
| 320 |
+
for i in range(101):
|
| 321 |
+
p.update(i / 100.0, str(i))
|
| 322 |
+
time.sleep(0.3)
|
| 323 |
+
|
| 324 |
+
|
| 325 |
+
if __name__ == "__main__":
|
| 326 |
+
test()
|
wemm/lib/python3.10/site-packages/lit/ShCommands.py
ADDED
|
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class Command:
|
| 2 |
+
def __init__(self, args, redirects):
|
| 3 |
+
self.args = list(args)
|
| 4 |
+
self.redirects = list(redirects)
|
| 5 |
+
|
| 6 |
+
def __repr__(self):
|
| 7 |
+
return "Command(%r, %r)" % (self.args, self.redirects)
|
| 8 |
+
|
| 9 |
+
def __eq__(self, other):
|
| 10 |
+
if not isinstance(other, Command):
|
| 11 |
+
return False
|
| 12 |
+
|
| 13 |
+
return (self.args, self.redirects) == (other.args, other.redirects)
|
| 14 |
+
|
| 15 |
+
def toShell(self, file):
|
| 16 |
+
for arg in self.args:
|
| 17 |
+
if "'" not in arg:
|
| 18 |
+
quoted = "'%s'" % arg
|
| 19 |
+
elif '"' not in arg and "$" not in arg:
|
| 20 |
+
quoted = '"%s"' % arg
|
| 21 |
+
else:
|
| 22 |
+
raise NotImplementedError("Unable to quote %r" % arg)
|
| 23 |
+
file.write(quoted)
|
| 24 |
+
|
| 25 |
+
# For debugging / validation.
|
| 26 |
+
import ShUtil
|
| 27 |
+
|
| 28 |
+
dequoted = list(ShUtil.ShLexer(quoted).lex())
|
| 29 |
+
if dequoted != [arg]:
|
| 30 |
+
raise NotImplementedError("Unable to quote %r" % arg)
|
| 31 |
+
|
| 32 |
+
for r in self.redirects:
|
| 33 |
+
if len(r[0]) == 1:
|
| 34 |
+
file.write("%s '%s'" % (r[0][0], r[1]))
|
| 35 |
+
else:
|
| 36 |
+
file.write("%s%s '%s'" % (r[0][1], r[0][0], r[1]))
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class GlobItem:
|
| 40 |
+
def __init__(self, pattern):
|
| 41 |
+
self.pattern = pattern
|
| 42 |
+
|
| 43 |
+
def __repr__(self):
|
| 44 |
+
return self.pattern
|
| 45 |
+
|
| 46 |
+
def __eq__(self, other):
|
| 47 |
+
if not isinstance(other, Command):
|
| 48 |
+
return False
|
| 49 |
+
|
| 50 |
+
return self.pattern == other.pattern
|
| 51 |
+
|
| 52 |
+
def resolve(self, cwd):
|
| 53 |
+
import glob
|
| 54 |
+
import os
|
| 55 |
+
|
| 56 |
+
if os.path.isabs(self.pattern):
|
| 57 |
+
abspath = self.pattern
|
| 58 |
+
else:
|
| 59 |
+
abspath = os.path.join(cwd, self.pattern)
|
| 60 |
+
results = glob.glob(abspath)
|
| 61 |
+
return [self.pattern] if len(results) == 0 else results
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class Pipeline:
|
| 65 |
+
def __init__(self, commands, negate=False, pipe_err=False):
|
| 66 |
+
self.commands = commands
|
| 67 |
+
self.negate = negate
|
| 68 |
+
self.pipe_err = pipe_err
|
| 69 |
+
|
| 70 |
+
def __repr__(self):
|
| 71 |
+
return "Pipeline(%r, %r, %r)" % (self.commands, self.negate, self.pipe_err)
|
| 72 |
+
|
| 73 |
+
def __eq__(self, other):
|
| 74 |
+
if not isinstance(other, Pipeline):
|
| 75 |
+
return False
|
| 76 |
+
|
| 77 |
+
return (self.commands, self.negate, self.pipe_err) == (
|
| 78 |
+
other.commands,
|
| 79 |
+
other.negate,
|
| 80 |
+
self.pipe_err,
|
| 81 |
+
)
|
| 82 |
+
|
| 83 |
+
def toShell(self, file, pipefail=False):
|
| 84 |
+
if pipefail != self.pipe_err:
|
| 85 |
+
raise ValueError('Inconsistent "pipefail" attribute!')
|
| 86 |
+
if self.negate:
|
| 87 |
+
file.write("! ")
|
| 88 |
+
for cmd in self.commands:
|
| 89 |
+
cmd.toShell(file)
|
| 90 |
+
if cmd is not self.commands[-1]:
|
| 91 |
+
file.write("|\n ")
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
class Seq:
|
| 95 |
+
def __init__(self, lhs, op, rhs):
|
| 96 |
+
assert op in (";", "&", "||", "&&")
|
| 97 |
+
self.op = op
|
| 98 |
+
self.lhs = lhs
|
| 99 |
+
self.rhs = rhs
|
| 100 |
+
|
| 101 |
+
def __repr__(self):
|
| 102 |
+
return "Seq(%r, %r, %r)" % (self.lhs, self.op, self.rhs)
|
| 103 |
+
|
| 104 |
+
def __eq__(self, other):
|
| 105 |
+
if not isinstance(other, Seq):
|
| 106 |
+
return False
|
| 107 |
+
|
| 108 |
+
return (self.lhs, self.op, self.rhs) == (other.lhs, other.op, other.rhs)
|
| 109 |
+
|
| 110 |
+
def toShell(self, file, pipefail=False):
|
| 111 |
+
self.lhs.toShell(file, pipefail)
|
| 112 |
+
file.write(" %s\n" % self.op)
|
| 113 |
+
self.rhs.toShell(file, pipefail)
|
wemm/lib/python3.10/site-packages/lit/ShUtil.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import absolute_import
|
| 2 |
+
import itertools
|
| 3 |
+
|
| 4 |
+
import lit.util
|
| 5 |
+
from lit.ShCommands import Command, GlobItem, Pipeline, Seq
|
| 6 |
+
|
| 7 |
+
|
| 8 |
+
class ShLexer:
|
| 9 |
+
def __init__(self, data, win32Escapes=False):
|
| 10 |
+
self.data = data
|
| 11 |
+
self.pos = 0
|
| 12 |
+
self.end = len(data)
|
| 13 |
+
self.win32Escapes = win32Escapes
|
| 14 |
+
|
| 15 |
+
def eat(self):
|
| 16 |
+
c = self.data[self.pos]
|
| 17 |
+
self.pos += 1
|
| 18 |
+
return c
|
| 19 |
+
|
| 20 |
+
def look(self):
|
| 21 |
+
return self.data[self.pos]
|
| 22 |
+
|
| 23 |
+
def maybe_eat(self, c):
|
| 24 |
+
"""
|
| 25 |
+
maybe_eat(c) - Consume the character c if it is the next character,
|
| 26 |
+
returning True if a character was consumed."""
|
| 27 |
+
if self.data[self.pos] == c:
|
| 28 |
+
self.pos += 1
|
| 29 |
+
return True
|
| 30 |
+
return False
|
| 31 |
+
|
| 32 |
+
def lex_arg_fast(self, c):
|
| 33 |
+
# Get the leading whitespace free section.
|
| 34 |
+
chunk = self.data[self.pos - 1 :].split(None, 1)[0]
|
| 35 |
+
|
| 36 |
+
# If it has special characters, the fast path failed.
|
| 37 |
+
if (
|
| 38 |
+
"|" in chunk
|
| 39 |
+
or "&" in chunk
|
| 40 |
+
or "<" in chunk
|
| 41 |
+
or ">" in chunk
|
| 42 |
+
or "'" in chunk
|
| 43 |
+
or '"' in chunk
|
| 44 |
+
or ";" in chunk
|
| 45 |
+
or "\\" in chunk
|
| 46 |
+
):
|
| 47 |
+
return None
|
| 48 |
+
|
| 49 |
+
self.pos = self.pos - 1 + len(chunk)
|
| 50 |
+
return GlobItem(chunk) if "*" in chunk or "?" in chunk else chunk
|
| 51 |
+
|
| 52 |
+
def lex_arg_slow(self, c):
|
| 53 |
+
if c in "'\"":
|
| 54 |
+
str = self.lex_arg_quoted(c)
|
| 55 |
+
else:
|
| 56 |
+
str = c
|
| 57 |
+
unquoted_glob_char = False
|
| 58 |
+
quoted_glob_char = False
|
| 59 |
+
while self.pos != self.end:
|
| 60 |
+
c = self.look()
|
| 61 |
+
if c.isspace() or c in "|&;":
|
| 62 |
+
break
|
| 63 |
+
elif c in "><":
|
| 64 |
+
# This is an annoying case; we treat '2>' as a single token so
|
| 65 |
+
# we don't have to track whitespace tokens.
|
| 66 |
+
|
| 67 |
+
# If the parse string isn't an integer, do the usual thing.
|
| 68 |
+
if not str.isdigit():
|
| 69 |
+
break
|
| 70 |
+
|
| 71 |
+
# Otherwise, lex the operator and convert to a redirection
|
| 72 |
+
# token.
|
| 73 |
+
num = int(str)
|
| 74 |
+
tok = self.lex_one_token()
|
| 75 |
+
assert isinstance(tok, tuple) and len(tok) == 1
|
| 76 |
+
return (tok[0], num)
|
| 77 |
+
elif c == '"' or c == "'":
|
| 78 |
+
self.eat()
|
| 79 |
+
quoted_arg = self.lex_arg_quoted(c)
|
| 80 |
+
if "*" in quoted_arg or "?" in quoted_arg:
|
| 81 |
+
quoted_glob_char = True
|
| 82 |
+
str += quoted_arg
|
| 83 |
+
elif not self.win32Escapes and c == "\\":
|
| 84 |
+
# Outside of a string, '\\' escapes everything.
|
| 85 |
+
self.eat()
|
| 86 |
+
if self.pos == self.end:
|
| 87 |
+
lit.util.warning(
|
| 88 |
+
"escape at end of quoted argument in: %r" % self.data
|
| 89 |
+
)
|
| 90 |
+
return str
|
| 91 |
+
str += self.eat()
|
| 92 |
+
elif c in "*?":
|
| 93 |
+
unquoted_glob_char = True
|
| 94 |
+
str += self.eat()
|
| 95 |
+
else:
|
| 96 |
+
str += self.eat()
|
| 97 |
+
# If a quote character is present, lex_arg_quoted will remove the quotes
|
| 98 |
+
# and append the argument directly. This causes a problem when the
|
| 99 |
+
# quoted portion contains a glob character, as the character will no
|
| 100 |
+
# longer be treated literally. If glob characters occur *only* inside
|
| 101 |
+
# of quotes, then we can handle this by not globbing at all, and if
|
| 102 |
+
# glob characters occur *only* outside of quotes, we can still glob just
|
| 103 |
+
# fine. But if a glob character occurs both inside and outside of
|
| 104 |
+
# quotes this presents a problem. In practice this is such an obscure
|
| 105 |
+
# edge case that it doesn't seem worth the added complexity to support.
|
| 106 |
+
# By adding an assertion, it means some bot somewhere will catch this
|
| 107 |
+
# and flag the user of a non-portable test (which could almost certainly
|
| 108 |
+
# be re-written to work correctly without triggering this).
|
| 109 |
+
assert not (quoted_glob_char and unquoted_glob_char)
|
| 110 |
+
return GlobItem(str) if unquoted_glob_char else str
|
| 111 |
+
|
| 112 |
+
def lex_arg_quoted(self, delim):
|
| 113 |
+
str = ""
|
| 114 |
+
while self.pos != self.end:
|
| 115 |
+
c = self.eat()
|
| 116 |
+
if c == delim:
|
| 117 |
+
return str
|
| 118 |
+
elif c == "\\" and delim == '"':
|
| 119 |
+
# Inside a '"' quoted string, '\\' only escapes the quote
|
| 120 |
+
# character and backslash, otherwise it is preserved.
|
| 121 |
+
if self.pos == self.end:
|
| 122 |
+
lit.util.warning(
|
| 123 |
+
"escape at end of quoted argument in: %r" % self.data
|
| 124 |
+
)
|
| 125 |
+
return str
|
| 126 |
+
c = self.eat()
|
| 127 |
+
if c == '"': #
|
| 128 |
+
str += '"'
|
| 129 |
+
elif c == "\\":
|
| 130 |
+
str += "\\"
|
| 131 |
+
else:
|
| 132 |
+
str += "\\" + c
|
| 133 |
+
else:
|
| 134 |
+
str += c
|
| 135 |
+
lit.util.warning("missing quote character in %r" % self.data)
|
| 136 |
+
return str
|
| 137 |
+
|
| 138 |
+
def lex_arg_checked(self, c):
|
| 139 |
+
pos = self.pos
|
| 140 |
+
res = self.lex_arg_fast(c)
|
| 141 |
+
end = self.pos
|
| 142 |
+
|
| 143 |
+
self.pos = pos
|
| 144 |
+
reference = self.lex_arg_slow(c)
|
| 145 |
+
if res is not None:
|
| 146 |
+
if res != reference:
|
| 147 |
+
raise ValueError("Fast path failure: %r != %r" % (res, reference))
|
| 148 |
+
if self.pos != end:
|
| 149 |
+
raise ValueError("Fast path failure: %r != %r" % (self.pos, end))
|
| 150 |
+
return reference
|
| 151 |
+
|
| 152 |
+
def lex_arg(self, c):
|
| 153 |
+
return self.lex_arg_fast(c) or self.lex_arg_slow(c)
|
| 154 |
+
|
| 155 |
+
def lex_one_token(self):
|
| 156 |
+
"""
|
| 157 |
+
lex_one_token - Lex a single 'sh' token."""
|
| 158 |
+
|
| 159 |
+
c = self.eat()
|
| 160 |
+
if c == ";":
|
| 161 |
+
return (c,)
|
| 162 |
+
if c == "|":
|
| 163 |
+
if self.maybe_eat("|"):
|
| 164 |
+
return ("||",)
|
| 165 |
+
return (c,)
|
| 166 |
+
if c == "&":
|
| 167 |
+
if self.maybe_eat("&"):
|
| 168 |
+
return ("&&",)
|
| 169 |
+
if self.maybe_eat(">"):
|
| 170 |
+
return ("&>",)
|
| 171 |
+
return (c,)
|
| 172 |
+
if c == ">":
|
| 173 |
+
if self.maybe_eat("&"):
|
| 174 |
+
return (">&",)
|
| 175 |
+
if self.maybe_eat(">"):
|
| 176 |
+
return (">>",)
|
| 177 |
+
return (c,)
|
| 178 |
+
if c == "<":
|
| 179 |
+
if self.maybe_eat("&"):
|
| 180 |
+
return ("<&",)
|
| 181 |
+
if self.maybe_eat(">"):
|
| 182 |
+
return ("<<",)
|
| 183 |
+
return (c,)
|
| 184 |
+
|
| 185 |
+
return self.lex_arg(c)
|
| 186 |
+
|
| 187 |
+
def lex(self):
|
| 188 |
+
while self.pos != self.end:
|
| 189 |
+
if self.look().isspace():
|
| 190 |
+
self.eat()
|
| 191 |
+
else:
|
| 192 |
+
yield self.lex_one_token()
|
| 193 |
+
|
| 194 |
+
|
| 195 |
+
###
|
| 196 |
+
|
| 197 |
+
|
| 198 |
+
class ShParser:
|
| 199 |
+
def __init__(self, data, win32Escapes=False, pipefail=False):
|
| 200 |
+
self.data = data
|
| 201 |
+
self.pipefail = pipefail
|
| 202 |
+
self.tokens = ShLexer(data, win32Escapes=win32Escapes).lex()
|
| 203 |
+
|
| 204 |
+
def lex(self):
|
| 205 |
+
for item in self.tokens:
|
| 206 |
+
return item
|
| 207 |
+
return None
|
| 208 |
+
|
| 209 |
+
def look(self):
|
| 210 |
+
token = self.lex()
|
| 211 |
+
if token is not None:
|
| 212 |
+
self.tokens = itertools.chain([token], self.tokens)
|
| 213 |
+
return token
|
| 214 |
+
|
| 215 |
+
def parse_command(self):
|
| 216 |
+
tok = self.lex()
|
| 217 |
+
if not tok:
|
| 218 |
+
raise ValueError("empty command!")
|
| 219 |
+
if isinstance(tok, tuple):
|
| 220 |
+
raise ValueError("syntax error near unexpected token %r" % tok[0])
|
| 221 |
+
|
| 222 |
+
args = [tok]
|
| 223 |
+
redirects = []
|
| 224 |
+
while 1:
|
| 225 |
+
tok = self.look()
|
| 226 |
+
|
| 227 |
+
# EOF?
|
| 228 |
+
if tok is None:
|
| 229 |
+
break
|
| 230 |
+
|
| 231 |
+
# If this is an argument, just add it to the current command.
|
| 232 |
+
if isinstance(tok, (str, GlobItem)):
|
| 233 |
+
args.append(self.lex())
|
| 234 |
+
continue
|
| 235 |
+
|
| 236 |
+
# Otherwise see if it is a terminator.
|
| 237 |
+
assert isinstance(tok, tuple)
|
| 238 |
+
if tok[0] in ("|", ";", "&", "||", "&&"):
|
| 239 |
+
break
|
| 240 |
+
|
| 241 |
+
# Otherwise it must be a redirection.
|
| 242 |
+
op = self.lex()
|
| 243 |
+
arg = self.lex()
|
| 244 |
+
if not arg:
|
| 245 |
+
raise ValueError("syntax error near token %r" % op[0])
|
| 246 |
+
redirects.append((op, arg))
|
| 247 |
+
|
| 248 |
+
return Command(args, redirects)
|
| 249 |
+
|
| 250 |
+
def parse_pipeline(self):
|
| 251 |
+
negate = False
|
| 252 |
+
|
| 253 |
+
commands = [self.parse_command()]
|
| 254 |
+
while self.look() == ("|",):
|
| 255 |
+
self.lex()
|
| 256 |
+
commands.append(self.parse_command())
|
| 257 |
+
return Pipeline(commands, negate, self.pipefail)
|
| 258 |
+
|
| 259 |
+
def parse(self):
|
| 260 |
+
lhs = self.parse_pipeline()
|
| 261 |
+
|
| 262 |
+
while self.look():
|
| 263 |
+
operator = self.lex()
|
| 264 |
+
assert isinstance(operator, tuple) and len(operator) == 1
|
| 265 |
+
|
| 266 |
+
if not self.look():
|
| 267 |
+
raise ValueError("missing argument to operator %r" % operator[0])
|
| 268 |
+
|
| 269 |
+
# FIXME: Operator precedence!!
|
| 270 |
+
lhs = Seq(lhs, operator[0], self.parse_pipeline())
|
| 271 |
+
|
| 272 |
+
return lhs
|
wemm/lib/python3.10/site-packages/lit/TestRunner.py
ADDED
|
@@ -0,0 +1,2278 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import absolute_import
|
| 2 |
+
import errno
|
| 3 |
+
import io
|
| 4 |
+
import itertools
|
| 5 |
+
import getopt
|
| 6 |
+
import os, signal, subprocess, sys
|
| 7 |
+
import re
|
| 8 |
+
import stat
|
| 9 |
+
import pathlib
|
| 10 |
+
import platform
|
| 11 |
+
import shlex
|
| 12 |
+
import shutil
|
| 13 |
+
import tempfile
|
| 14 |
+
import threading
|
| 15 |
+
import typing
|
| 16 |
+
from typing import Optional, Tuple
|
| 17 |
+
|
| 18 |
+
import io
|
| 19 |
+
|
| 20 |
+
try:
|
| 21 |
+
from StringIO import StringIO
|
| 22 |
+
except ImportError:
|
| 23 |
+
from io import StringIO
|
| 24 |
+
|
| 25 |
+
from lit.ShCommands import GlobItem, Command
|
| 26 |
+
import lit.ShUtil as ShUtil
|
| 27 |
+
import lit.Test as Test
|
| 28 |
+
import lit.util
|
| 29 |
+
from lit.util import to_bytes, to_string, to_unicode
|
| 30 |
+
from lit.BooleanExpression import BooleanExpression
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
class InternalShellError(Exception):
|
| 34 |
+
def __init__(self, command, message):
|
| 35 |
+
self.command = command
|
| 36 |
+
self.message = message
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class ScriptFatal(Exception):
|
| 40 |
+
"""
|
| 41 |
+
A script had a fatal error such that there's no point in retrying. The
|
| 42 |
+
message has not been emitted on stdout or stderr but is instead included in
|
| 43 |
+
this exception.
|
| 44 |
+
"""
|
| 45 |
+
|
| 46 |
+
def __init__(self, message):
|
| 47 |
+
super().__init__(message)
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
kIsWindows = platform.system() == "Windows"
|
| 51 |
+
|
| 52 |
+
# Don't use close_fds on Windows.
|
| 53 |
+
kUseCloseFDs = not kIsWindows
|
| 54 |
+
|
| 55 |
+
# Use temporary files to replace /dev/null on Windows.
|
| 56 |
+
kAvoidDevNull = kIsWindows
|
| 57 |
+
kDevNull = "/dev/null"
|
| 58 |
+
|
| 59 |
+
# A regex that matches %dbg(ARG), which lit inserts at the beginning of each
|
| 60 |
+
# run command pipeline such that ARG specifies the pipeline's source line
|
| 61 |
+
# number. lit later expands each %dbg(ARG) to a command that behaves as a null
|
| 62 |
+
# command in the target shell so that the line number is seen in lit's verbose
|
| 63 |
+
# mode.
|
| 64 |
+
#
|
| 65 |
+
# This regex captures ARG. ARG must not contain a right parenthesis, which
|
| 66 |
+
# terminates %dbg. ARG must not contain quotes, in which ARG might be enclosed
|
| 67 |
+
# during expansion.
|
| 68 |
+
#
|
| 69 |
+
# COMMAND that follows %dbg(ARG) is also captured. COMMAND can be
|
| 70 |
+
# empty as a result of conditinal substitution.
|
| 71 |
+
kPdbgRegex = "%dbg\\(([^)'\"]*)\\)((?:.|\\n)*)"
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def buildPdbgCommand(msg, cmd):
|
| 75 |
+
res = f"%dbg({msg}) {cmd}"
|
| 76 |
+
assert re.fullmatch(
|
| 77 |
+
kPdbgRegex, res
|
| 78 |
+
), f"kPdbgRegex expected to match actual %dbg usage: {res}"
|
| 79 |
+
return res
|
| 80 |
+
|
| 81 |
+
|
| 82 |
+
class ShellEnvironment(object):
|
| 83 |
+
|
| 84 |
+
"""Mutable shell environment containing things like CWD and env vars.
|
| 85 |
+
|
| 86 |
+
Environment variables are not implemented, but cwd tracking is. In addition,
|
| 87 |
+
we maintain a dir stack for pushd/popd.
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
def __init__(self, cwd, env):
|
| 91 |
+
self.cwd = cwd
|
| 92 |
+
self.env = dict(env)
|
| 93 |
+
self.dirStack = []
|
| 94 |
+
|
| 95 |
+
def change_dir(self, newdir):
|
| 96 |
+
if os.path.isabs(newdir):
|
| 97 |
+
self.cwd = newdir
|
| 98 |
+
else:
|
| 99 |
+
self.cwd = lit.util.abs_path_preserve_drive(os.path.join(self.cwd, newdir))
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
class TimeoutHelper(object):
|
| 103 |
+
"""
|
| 104 |
+
Object used to helper manage enforcing a timeout in
|
| 105 |
+
_executeShCmd(). It is passed through recursive calls
|
| 106 |
+
to collect processes that have been executed so that when
|
| 107 |
+
the timeout happens they can be killed.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
def __init__(self, timeout):
|
| 111 |
+
self.timeout = timeout
|
| 112 |
+
self._procs = []
|
| 113 |
+
self._timeoutReached = False
|
| 114 |
+
self._doneKillPass = False
|
| 115 |
+
# This lock will be used to protect concurrent access
|
| 116 |
+
# to _procs and _doneKillPass
|
| 117 |
+
self._lock = None
|
| 118 |
+
self._timer = None
|
| 119 |
+
|
| 120 |
+
def cancel(self):
|
| 121 |
+
if not self.active():
|
| 122 |
+
return
|
| 123 |
+
self._timer.cancel()
|
| 124 |
+
|
| 125 |
+
def active(self):
|
| 126 |
+
return self.timeout > 0
|
| 127 |
+
|
| 128 |
+
def addProcess(self, proc):
|
| 129 |
+
if not self.active():
|
| 130 |
+
return
|
| 131 |
+
needToRunKill = False
|
| 132 |
+
with self._lock:
|
| 133 |
+
self._procs.append(proc)
|
| 134 |
+
# Avoid re-entering the lock by finding out if kill needs to be run
|
| 135 |
+
# again here but call it if necessary once we have left the lock.
|
| 136 |
+
# We could use a reentrant lock here instead but this code seems
|
| 137 |
+
# clearer to me.
|
| 138 |
+
needToRunKill = self._doneKillPass
|
| 139 |
+
|
| 140 |
+
# The initial call to _kill() from the timer thread already happened so
|
| 141 |
+
# we need to call it again from this thread, otherwise this process
|
| 142 |
+
# will be left to run even though the timeout was already hit
|
| 143 |
+
if needToRunKill:
|
| 144 |
+
assert self.timeoutReached()
|
| 145 |
+
self._kill()
|
| 146 |
+
|
| 147 |
+
def startTimer(self):
|
| 148 |
+
if not self.active():
|
| 149 |
+
return
|
| 150 |
+
|
| 151 |
+
# Do some late initialisation that's only needed
|
| 152 |
+
# if there is a timeout set
|
| 153 |
+
self._lock = threading.Lock()
|
| 154 |
+
self._timer = threading.Timer(self.timeout, self._handleTimeoutReached)
|
| 155 |
+
self._timer.start()
|
| 156 |
+
|
| 157 |
+
def _handleTimeoutReached(self):
|
| 158 |
+
self._timeoutReached = True
|
| 159 |
+
self._kill()
|
| 160 |
+
|
| 161 |
+
def timeoutReached(self):
|
| 162 |
+
return self._timeoutReached
|
| 163 |
+
|
| 164 |
+
def _kill(self):
|
| 165 |
+
"""
|
| 166 |
+
This method may be called multiple times as we might get unlucky
|
| 167 |
+
and be in the middle of creating a new process in _executeShCmd()
|
| 168 |
+
which won't yet be in ``self._procs``. By locking here and in
|
| 169 |
+
addProcess() we should be able to kill processes launched after
|
| 170 |
+
the initial call to _kill()
|
| 171 |
+
"""
|
| 172 |
+
with self._lock:
|
| 173 |
+
for p in self._procs:
|
| 174 |
+
lit.util.killProcessAndChildren(p.pid)
|
| 175 |
+
# Empty the list and note that we've done a pass over the list
|
| 176 |
+
self._procs = [] # Python2 doesn't have list.clear()
|
| 177 |
+
self._doneKillPass = True
|
| 178 |
+
|
| 179 |
+
|
| 180 |
+
class ShellCommandResult(object):
|
| 181 |
+
"""Captures the result of an individual command."""
|
| 182 |
+
|
| 183 |
+
def __init__(
|
| 184 |
+
self, command, stdout, stderr, exitCode, timeoutReached, outputFiles=[]
|
| 185 |
+
):
|
| 186 |
+
self.command = command
|
| 187 |
+
self.stdout = stdout
|
| 188 |
+
self.stderr = stderr
|
| 189 |
+
self.exitCode = exitCode
|
| 190 |
+
self.timeoutReached = timeoutReached
|
| 191 |
+
self.outputFiles = list(outputFiles)
|
| 192 |
+
|
| 193 |
+
|
| 194 |
+
def executeShCmd(cmd, shenv, results, timeout=0):
|
| 195 |
+
"""
|
| 196 |
+
Wrapper around _executeShCmd that handles
|
| 197 |
+
timeout
|
| 198 |
+
"""
|
| 199 |
+
# Use the helper even when no timeout is required to make
|
| 200 |
+
# other code simpler (i.e. avoid bunch of ``!= None`` checks)
|
| 201 |
+
timeoutHelper = TimeoutHelper(timeout)
|
| 202 |
+
if timeout > 0:
|
| 203 |
+
timeoutHelper.startTimer()
|
| 204 |
+
finalExitCode = _executeShCmd(cmd, shenv, results, timeoutHelper)
|
| 205 |
+
timeoutHelper.cancel()
|
| 206 |
+
timeoutInfo = None
|
| 207 |
+
if timeoutHelper.timeoutReached():
|
| 208 |
+
timeoutInfo = "Reached timeout of {} seconds".format(timeout)
|
| 209 |
+
|
| 210 |
+
return (finalExitCode, timeoutInfo)
|
| 211 |
+
|
| 212 |
+
|
| 213 |
+
def expand_glob(arg, cwd):
|
| 214 |
+
if isinstance(arg, GlobItem):
|
| 215 |
+
return sorted(arg.resolve(cwd))
|
| 216 |
+
return [arg]
|
| 217 |
+
|
| 218 |
+
|
| 219 |
+
def expand_glob_expressions(args, cwd):
|
| 220 |
+
result = [args[0]]
|
| 221 |
+
for arg in args[1:]:
|
| 222 |
+
result.extend(expand_glob(arg, cwd))
|
| 223 |
+
return result
|
| 224 |
+
|
| 225 |
+
|
| 226 |
+
def quote_windows_command(seq):
|
| 227 |
+
r"""
|
| 228 |
+
Reimplement Python's private subprocess.list2cmdline for MSys compatibility
|
| 229 |
+
|
| 230 |
+
Based on CPython implementation here:
|
| 231 |
+
https://hg.python.org/cpython/file/849826a900d2/Lib/subprocess.py#l422
|
| 232 |
+
|
| 233 |
+
Some core util distributions (MSys) don't tokenize command line arguments
|
| 234 |
+
the same way that MSVC CRT does. Lit rolls its own quoting logic similar to
|
| 235 |
+
the stock CPython logic to paper over these quoting and tokenization rule
|
| 236 |
+
differences.
|
| 237 |
+
|
| 238 |
+
We use the same algorithm from MSDN as CPython
|
| 239 |
+
(http://msdn.microsoft.com/en-us/library/17w5ykft.aspx), but we treat more
|
| 240 |
+
characters as needing quoting, such as double quotes themselves, and square
|
| 241 |
+
brackets.
|
| 242 |
+
|
| 243 |
+
For MSys based tools, this is very brittle though, because quoting an
|
| 244 |
+
argument makes the MSys based tool unescape backslashes where it shouldn't
|
| 245 |
+
(e.g. "a\b\\c\\\\d" becomes "a\b\c\\d" where it should stay as it was,
|
| 246 |
+
according to regular win32 command line parsing rules).
|
| 247 |
+
"""
|
| 248 |
+
result = []
|
| 249 |
+
needquote = False
|
| 250 |
+
for arg in seq:
|
| 251 |
+
bs_buf = []
|
| 252 |
+
|
| 253 |
+
# Add a space to separate this argument from the others
|
| 254 |
+
if result:
|
| 255 |
+
result.append(" ")
|
| 256 |
+
|
| 257 |
+
# This logic differs from upstream list2cmdline.
|
| 258 |
+
needquote = (
|
| 259 |
+
(" " in arg)
|
| 260 |
+
or ("\t" in arg)
|
| 261 |
+
or ('"' in arg)
|
| 262 |
+
or ("[" in arg)
|
| 263 |
+
or (";" in arg)
|
| 264 |
+
or not arg
|
| 265 |
+
)
|
| 266 |
+
if needquote:
|
| 267 |
+
result.append('"')
|
| 268 |
+
|
| 269 |
+
for c in arg:
|
| 270 |
+
if c == "\\":
|
| 271 |
+
# Don't know if we need to double yet.
|
| 272 |
+
bs_buf.append(c)
|
| 273 |
+
elif c == '"':
|
| 274 |
+
# Double backslashes.
|
| 275 |
+
result.append("\\" * len(bs_buf) * 2)
|
| 276 |
+
bs_buf = []
|
| 277 |
+
result.append('\\"')
|
| 278 |
+
else:
|
| 279 |
+
# Normal char
|
| 280 |
+
if bs_buf:
|
| 281 |
+
result.extend(bs_buf)
|
| 282 |
+
bs_buf = []
|
| 283 |
+
result.append(c)
|
| 284 |
+
|
| 285 |
+
# Add remaining backslashes, if any.
|
| 286 |
+
if bs_buf:
|
| 287 |
+
result.extend(bs_buf)
|
| 288 |
+
|
| 289 |
+
if needquote:
|
| 290 |
+
result.extend(bs_buf)
|
| 291 |
+
result.append('"')
|
| 292 |
+
|
| 293 |
+
return "".join(result)
|
| 294 |
+
|
| 295 |
+
|
| 296 |
+
# args are from 'export' or 'env' command.
|
| 297 |
+
# Skips the command, and parses its arguments.
|
| 298 |
+
# Modifies env accordingly.
|
| 299 |
+
# Returns copy of args without the command or its arguments.
|
| 300 |
+
def updateEnv(env, args):
|
| 301 |
+
arg_idx_next = len(args)
|
| 302 |
+
unset_next_env_var = False
|
| 303 |
+
for arg_idx, arg in enumerate(args[1:]):
|
| 304 |
+
# Support for the -u flag (unsetting) for env command
|
| 305 |
+
# e.g., env -u FOO -u BAR will remove both FOO and BAR
|
| 306 |
+
# from the environment.
|
| 307 |
+
if arg == "-u":
|
| 308 |
+
unset_next_env_var = True
|
| 309 |
+
continue
|
| 310 |
+
if unset_next_env_var:
|
| 311 |
+
unset_next_env_var = False
|
| 312 |
+
if arg in env.env:
|
| 313 |
+
del env.env[arg]
|
| 314 |
+
continue
|
| 315 |
+
|
| 316 |
+
# Partition the string into KEY=VALUE.
|
| 317 |
+
key, eq, val = arg.partition("=")
|
| 318 |
+
# Stop if there was no equals.
|
| 319 |
+
if eq == "":
|
| 320 |
+
arg_idx_next = arg_idx + 1
|
| 321 |
+
break
|
| 322 |
+
env.env[key] = val
|
| 323 |
+
return args[arg_idx_next:]
|
| 324 |
+
|
| 325 |
+
|
| 326 |
+
def executeBuiltinCd(cmd, shenv):
|
| 327 |
+
"""executeBuiltinCd - Change the current directory."""
|
| 328 |
+
if len(cmd.args) != 2:
|
| 329 |
+
raise InternalShellError(cmd, "'cd' supports only one argument")
|
| 330 |
+
# Update the cwd in the parent environment.
|
| 331 |
+
shenv.change_dir(cmd.args[1])
|
| 332 |
+
# The cd builtin always succeeds. If the directory does not exist, the
|
| 333 |
+
# following Popen calls will fail instead.
|
| 334 |
+
return ShellCommandResult(cmd, "", "", 0, False)
|
| 335 |
+
|
| 336 |
+
|
| 337 |
+
def executeBuiltinPushd(cmd, shenv):
|
| 338 |
+
"""executeBuiltinPushd - Change the current dir and save the old."""
|
| 339 |
+
if len(cmd.args) != 2:
|
| 340 |
+
raise InternalShellError(cmd, "'pushd' supports only one argument")
|
| 341 |
+
shenv.dirStack.append(shenv.cwd)
|
| 342 |
+
shenv.change_dir(cmd.args[1])
|
| 343 |
+
return ShellCommandResult(cmd, "", "", 0, False)
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
def executeBuiltinPopd(cmd, shenv):
|
| 347 |
+
"""executeBuiltinPopd - Restore a previously saved working directory."""
|
| 348 |
+
if len(cmd.args) != 1:
|
| 349 |
+
raise InternalShellError(cmd, "'popd' does not support arguments")
|
| 350 |
+
if not shenv.dirStack:
|
| 351 |
+
raise InternalShellError(cmd, "popd: directory stack empty")
|
| 352 |
+
shenv.cwd = shenv.dirStack.pop()
|
| 353 |
+
return ShellCommandResult(cmd, "", "", 0, False)
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def executeBuiltinExport(cmd, shenv):
|
| 357 |
+
"""executeBuiltinExport - Set an environment variable."""
|
| 358 |
+
if len(cmd.args) != 2:
|
| 359 |
+
raise InternalShellError("'export' supports only one argument")
|
| 360 |
+
updateEnv(shenv, cmd.args)
|
| 361 |
+
return ShellCommandResult(cmd, "", "", 0, False)
|
| 362 |
+
|
| 363 |
+
|
| 364 |
+
def executeBuiltinEcho(cmd, shenv):
|
| 365 |
+
"""Interpret a redirected echo or @echo command"""
|
| 366 |
+
opened_files = []
|
| 367 |
+
stdin, stdout, stderr = processRedirects(cmd, subprocess.PIPE, shenv, opened_files)
|
| 368 |
+
if stdin != subprocess.PIPE or stderr != subprocess.PIPE:
|
| 369 |
+
raise InternalShellError(
|
| 370 |
+
cmd, f"stdin and stderr redirects not supported for {cmd.args[0]}"
|
| 371 |
+
)
|
| 372 |
+
|
| 373 |
+
# Some tests have un-redirected echo commands to help debug test failures.
|
| 374 |
+
# Buffer our output and return it to the caller.
|
| 375 |
+
is_redirected = True
|
| 376 |
+
encode = lambda x: x
|
| 377 |
+
if stdout == subprocess.PIPE:
|
| 378 |
+
is_redirected = False
|
| 379 |
+
stdout = StringIO()
|
| 380 |
+
elif kIsWindows:
|
| 381 |
+
# Reopen stdout in binary mode to avoid CRLF translation. The versions
|
| 382 |
+
# of echo we are replacing on Windows all emit plain LF, and the LLVM
|
| 383 |
+
# tests now depend on this.
|
| 384 |
+
# When we open as binary, however, this also means that we have to write
|
| 385 |
+
# 'bytes' objects to stdout instead of 'str' objects.
|
| 386 |
+
encode = lit.util.to_bytes
|
| 387 |
+
stdout = open(stdout.name, stdout.mode + "b")
|
| 388 |
+
opened_files.append((None, None, stdout, None))
|
| 389 |
+
|
| 390 |
+
# Implement echo flags. We only support -e and -n, and not yet in
|
| 391 |
+
# combination. We have to ignore unknown flags, because `echo "-D FOO"`
|
| 392 |
+
# prints the dash.
|
| 393 |
+
args = cmd.args[1:]
|
| 394 |
+
interpret_escapes = False
|
| 395 |
+
write_newline = True
|
| 396 |
+
while len(args) >= 1 and args[0] in ("-e", "-n"):
|
| 397 |
+
flag = args[0]
|
| 398 |
+
args = args[1:]
|
| 399 |
+
if flag == "-e":
|
| 400 |
+
interpret_escapes = True
|
| 401 |
+
elif flag == "-n":
|
| 402 |
+
write_newline = False
|
| 403 |
+
|
| 404 |
+
def maybeUnescape(arg):
|
| 405 |
+
if not interpret_escapes:
|
| 406 |
+
return arg
|
| 407 |
+
|
| 408 |
+
arg = lit.util.to_bytes(arg)
|
| 409 |
+
codec = "string_escape" if sys.version_info < (3, 0) else "unicode_escape"
|
| 410 |
+
return arg.decode(codec)
|
| 411 |
+
|
| 412 |
+
if args:
|
| 413 |
+
for arg in args[:-1]:
|
| 414 |
+
stdout.write(encode(maybeUnescape(arg)))
|
| 415 |
+
stdout.write(encode(" "))
|
| 416 |
+
stdout.write(encode(maybeUnescape(args[-1])))
|
| 417 |
+
if write_newline:
|
| 418 |
+
stdout.write(encode("\n"))
|
| 419 |
+
|
| 420 |
+
for (name, mode, f, path) in opened_files:
|
| 421 |
+
f.close()
|
| 422 |
+
|
| 423 |
+
output = "" if is_redirected else stdout.getvalue()
|
| 424 |
+
return ShellCommandResult(cmd, output, "", 0, False)
|
| 425 |
+
|
| 426 |
+
|
| 427 |
+
def executeBuiltinMkdir(cmd, cmd_shenv):
|
| 428 |
+
"""executeBuiltinMkdir - Create new directories."""
|
| 429 |
+
args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
|
| 430 |
+
try:
|
| 431 |
+
opts, args = getopt.gnu_getopt(args, "p")
|
| 432 |
+
except getopt.GetoptError as err:
|
| 433 |
+
raise InternalShellError(cmd, "Unsupported: 'mkdir': %s" % str(err))
|
| 434 |
+
|
| 435 |
+
parent = False
|
| 436 |
+
for o, a in opts:
|
| 437 |
+
if o == "-p":
|
| 438 |
+
parent = True
|
| 439 |
+
else:
|
| 440 |
+
assert False, "unhandled option"
|
| 441 |
+
|
| 442 |
+
if len(args) == 0:
|
| 443 |
+
raise InternalShellError(cmd, "Error: 'mkdir' is missing an operand")
|
| 444 |
+
|
| 445 |
+
stderr = StringIO()
|
| 446 |
+
exitCode = 0
|
| 447 |
+
for dir in args:
|
| 448 |
+
cwd = cmd_shenv.cwd
|
| 449 |
+
dir = to_unicode(dir) if kIsWindows else to_bytes(dir)
|
| 450 |
+
cwd = to_unicode(cwd) if kIsWindows else to_bytes(cwd)
|
| 451 |
+
if not os.path.isabs(dir):
|
| 452 |
+
dir = lit.util.abs_path_preserve_drive(os.path.join(cwd, dir))
|
| 453 |
+
if parent:
|
| 454 |
+
lit.util.mkdir_p(dir)
|
| 455 |
+
else:
|
| 456 |
+
try:
|
| 457 |
+
lit.util.mkdir(dir)
|
| 458 |
+
except OSError as err:
|
| 459 |
+
stderr.write("Error: 'mkdir' command failed, %s\n" % str(err))
|
| 460 |
+
exitCode = 1
|
| 461 |
+
return ShellCommandResult(cmd, "", stderr.getvalue(), exitCode, False)
|
| 462 |
+
|
| 463 |
+
|
| 464 |
+
def executeBuiltinRm(cmd, cmd_shenv):
|
| 465 |
+
"""executeBuiltinRm - Removes (deletes) files or directories."""
|
| 466 |
+
args = expand_glob_expressions(cmd.args, cmd_shenv.cwd)[1:]
|
| 467 |
+
try:
|
| 468 |
+
opts, args = getopt.gnu_getopt(args, "frR", ["--recursive"])
|
| 469 |
+
except getopt.GetoptError as err:
|
| 470 |
+
raise InternalShellError(cmd, "Unsupported: 'rm': %s" % str(err))
|
| 471 |
+
|
| 472 |
+
force = False
|
| 473 |
+
recursive = False
|
| 474 |
+
for o, a in opts:
|
| 475 |
+
if o == "-f":
|
| 476 |
+
force = True
|
| 477 |
+
elif o in ("-r", "-R", "--recursive"):
|
| 478 |
+
recursive = True
|
| 479 |
+
else:
|
| 480 |
+
assert False, "unhandled option"
|
| 481 |
+
|
| 482 |
+
if len(args) == 0:
|
| 483 |
+
raise InternalShellError(cmd, "Error: 'rm' is missing an operand")
|
| 484 |
+
|
| 485 |
+
def on_rm_error(func, path, exc_info):
|
| 486 |
+
# path contains the path of the file that couldn't be removed
|
| 487 |
+
# let's just assume that it's read-only and remove it.
|
| 488 |
+
os.chmod(path, stat.S_IMODE(os.stat(path).st_mode) | stat.S_IWRITE)
|
| 489 |
+
os.remove(path)
|
| 490 |
+
|
| 491 |
+
stderr = StringIO()
|
| 492 |
+
exitCode = 0
|
| 493 |
+
for path in args:
|
| 494 |
+
cwd = cmd_shenv.cwd
|
| 495 |
+
path = to_unicode(path) if kIsWindows else to_bytes(path)
|
| 496 |
+
cwd = to_unicode(cwd) if kIsWindows else to_bytes(cwd)
|
| 497 |
+
if not os.path.isabs(path):
|
| 498 |
+
path = lit.util.abs_path_preserve_drive(os.path.join(cwd, path))
|
| 499 |
+
if force and not os.path.exists(path):
|
| 500 |
+
continue
|
| 501 |
+
try:
|
| 502 |
+
if os.path.isdir(path):
|
| 503 |
+
if not recursive:
|
| 504 |
+
stderr.write("Error: %s is a directory\n" % path)
|
| 505 |
+
exitCode = 1
|
| 506 |
+
if platform.system() == "Windows":
|
| 507 |
+
# NOTE: use ctypes to access `SHFileOperationsW` on Windows to
|
| 508 |
+
# use the NT style path to get access to long file paths which
|
| 509 |
+
# cannot be removed otherwise.
|
| 510 |
+
from ctypes.wintypes import BOOL, HWND, LPCWSTR, UINT, WORD
|
| 511 |
+
from ctypes import addressof, byref, c_void_p, create_unicode_buffer
|
| 512 |
+
from ctypes import Structure
|
| 513 |
+
from ctypes import windll, WinError, POINTER
|
| 514 |
+
|
| 515 |
+
class SHFILEOPSTRUCTW(Structure):
|
| 516 |
+
_fields_ = [
|
| 517 |
+
("hWnd", HWND),
|
| 518 |
+
("wFunc", UINT),
|
| 519 |
+
("pFrom", LPCWSTR),
|
| 520 |
+
("pTo", LPCWSTR),
|
| 521 |
+
("fFlags", WORD),
|
| 522 |
+
("fAnyOperationsAborted", BOOL),
|
| 523 |
+
("hNameMappings", c_void_p),
|
| 524 |
+
("lpszProgressTitle", LPCWSTR),
|
| 525 |
+
]
|
| 526 |
+
|
| 527 |
+
FO_MOVE, FO_COPY, FO_DELETE, FO_RENAME = range(1, 5)
|
| 528 |
+
|
| 529 |
+
FOF_SILENT = 4
|
| 530 |
+
FOF_NOCONFIRMATION = 16
|
| 531 |
+
FOF_NOCONFIRMMKDIR = 512
|
| 532 |
+
FOF_NOERRORUI = 1024
|
| 533 |
+
|
| 534 |
+
FOF_NO_UI = (
|
| 535 |
+
FOF_SILENT
|
| 536 |
+
| FOF_NOCONFIRMATION
|
| 537 |
+
| FOF_NOERRORUI
|
| 538 |
+
| FOF_NOCONFIRMMKDIR
|
| 539 |
+
)
|
| 540 |
+
|
| 541 |
+
SHFileOperationW = windll.shell32.SHFileOperationW
|
| 542 |
+
SHFileOperationW.argtypes = [POINTER(SHFILEOPSTRUCTW)]
|
| 543 |
+
|
| 544 |
+
path = os.path.abspath(path)
|
| 545 |
+
|
| 546 |
+
pFrom = create_unicode_buffer(path, len(path) + 2)
|
| 547 |
+
pFrom[len(path)] = pFrom[len(path) + 1] = "\0"
|
| 548 |
+
operation = SHFILEOPSTRUCTW(
|
| 549 |
+
wFunc=UINT(FO_DELETE),
|
| 550 |
+
pFrom=LPCWSTR(addressof(pFrom)),
|
| 551 |
+
fFlags=FOF_NO_UI,
|
| 552 |
+
)
|
| 553 |
+
result = SHFileOperationW(byref(operation))
|
| 554 |
+
if result:
|
| 555 |
+
raise WinError(result)
|
| 556 |
+
else:
|
| 557 |
+
shutil.rmtree(path, onerror=on_rm_error if force else None)
|
| 558 |
+
else:
|
| 559 |
+
if force and not os.access(path, os.W_OK):
|
| 560 |
+
os.chmod(path, stat.S_IMODE(os.stat(path).st_mode) | stat.S_IWRITE)
|
| 561 |
+
os.remove(path)
|
| 562 |
+
except OSError as err:
|
| 563 |
+
stderr.write("Error: 'rm' command failed, %s" % str(err))
|
| 564 |
+
exitCode = 1
|
| 565 |
+
return ShellCommandResult(cmd, "", stderr.getvalue(), exitCode, False)
|
| 566 |
+
|
| 567 |
+
|
| 568 |
+
def executeBuiltinColon(cmd, cmd_shenv):
|
| 569 |
+
"""executeBuiltinColon - Discard arguments and exit with status 0."""
|
| 570 |
+
return ShellCommandResult(cmd, "", "", 0, False)
|
| 571 |
+
|
| 572 |
+
|
| 573 |
+
def processRedirects(cmd, stdin_source, cmd_shenv, opened_files):
|
| 574 |
+
"""Return the standard fds for cmd after applying redirects
|
| 575 |
+
|
| 576 |
+
Returns the three standard file descriptors for the new child process. Each
|
| 577 |
+
fd may be an open, writable file object or a sentinel value from the
|
| 578 |
+
subprocess module.
|
| 579 |
+
"""
|
| 580 |
+
|
| 581 |
+
# Apply the redirections, we use (N,) as a sentinel to indicate stdin,
|
| 582 |
+
# stdout, stderr for N equal to 0, 1, or 2 respectively. Redirects to or
|
| 583 |
+
# from a file are represented with a list [file, mode, file-object]
|
| 584 |
+
# where file-object is initially None.
|
| 585 |
+
redirects = [(0,), (1,), (2,)]
|
| 586 |
+
for (op, filename) in cmd.redirects:
|
| 587 |
+
if op == (">", 2):
|
| 588 |
+
redirects[2] = [filename, "w", None]
|
| 589 |
+
elif op == (">>", 2):
|
| 590 |
+
redirects[2] = [filename, "a", None]
|
| 591 |
+
elif op == (">&", 2) and filename in "012":
|
| 592 |
+
redirects[2] = redirects[int(filename)]
|
| 593 |
+
elif op == (">&",) or op == ("&>",):
|
| 594 |
+
redirects[1] = redirects[2] = [filename, "w", None]
|
| 595 |
+
elif op == (">",):
|
| 596 |
+
redirects[1] = [filename, "w", None]
|
| 597 |
+
elif op == (">>",):
|
| 598 |
+
redirects[1] = [filename, "a", None]
|
| 599 |
+
elif op == ("<",):
|
| 600 |
+
redirects[0] = [filename, "r", None]
|
| 601 |
+
else:
|
| 602 |
+
raise InternalShellError(
|
| 603 |
+
cmd, "Unsupported redirect: %r" % ((op, filename),)
|
| 604 |
+
)
|
| 605 |
+
|
| 606 |
+
# Open file descriptors in a second pass.
|
| 607 |
+
std_fds = [None, None, None]
|
| 608 |
+
for (index, r) in enumerate(redirects):
|
| 609 |
+
# Handle the sentinel values for defaults up front.
|
| 610 |
+
if isinstance(r, tuple):
|
| 611 |
+
if r == (0,):
|
| 612 |
+
fd = stdin_source
|
| 613 |
+
elif r == (1,):
|
| 614 |
+
if index == 0:
|
| 615 |
+
raise InternalShellError(cmd, "Unsupported redirect for stdin")
|
| 616 |
+
elif index == 1:
|
| 617 |
+
fd = subprocess.PIPE
|
| 618 |
+
else:
|
| 619 |
+
fd = subprocess.STDOUT
|
| 620 |
+
elif r == (2,):
|
| 621 |
+
if index != 2:
|
| 622 |
+
raise InternalShellError(cmd, "Unsupported redirect on stdout")
|
| 623 |
+
fd = subprocess.PIPE
|
| 624 |
+
else:
|
| 625 |
+
raise InternalShellError(cmd, "Bad redirect")
|
| 626 |
+
std_fds[index] = fd
|
| 627 |
+
continue
|
| 628 |
+
|
| 629 |
+
(filename, mode, fd) = r
|
| 630 |
+
|
| 631 |
+
# Check if we already have an open fd. This can happen if stdout and
|
| 632 |
+
# stderr go to the same place.
|
| 633 |
+
if fd is not None:
|
| 634 |
+
std_fds[index] = fd
|
| 635 |
+
continue
|
| 636 |
+
|
| 637 |
+
redir_filename = None
|
| 638 |
+
name = expand_glob(filename, cmd_shenv.cwd)
|
| 639 |
+
if len(name) != 1:
|
| 640 |
+
raise InternalShellError(
|
| 641 |
+
cmd, "Unsupported: glob in " "redirect expanded to multiple files"
|
| 642 |
+
)
|
| 643 |
+
name = name[0]
|
| 644 |
+
if kAvoidDevNull and name == kDevNull:
|
| 645 |
+
fd = tempfile.TemporaryFile(mode=mode)
|
| 646 |
+
elif kIsWindows and name == "/dev/tty":
|
| 647 |
+
# Simulate /dev/tty on Windows.
|
| 648 |
+
# "CON" is a special filename for the console.
|
| 649 |
+
fd = open("CON", mode)
|
| 650 |
+
else:
|
| 651 |
+
# Make sure relative paths are relative to the cwd.
|
| 652 |
+
redir_filename = os.path.join(cmd_shenv.cwd, name)
|
| 653 |
+
redir_filename = (
|
| 654 |
+
to_unicode(redir_filename) if kIsWindows else to_bytes(redir_filename)
|
| 655 |
+
)
|
| 656 |
+
fd = open(redir_filename, mode)
|
| 657 |
+
# Workaround a Win32 and/or subprocess bug when appending.
|
| 658 |
+
#
|
| 659 |
+
# FIXME: Actually, this is probably an instance of PR6753.
|
| 660 |
+
if mode == "a":
|
| 661 |
+
fd.seek(0, 2)
|
| 662 |
+
# Mutate the underlying redirect list so that we can redirect stdout
|
| 663 |
+
# and stderr to the same place without opening the file twice.
|
| 664 |
+
r[2] = fd
|
| 665 |
+
opened_files.append((filename, mode, fd) + (redir_filename,))
|
| 666 |
+
std_fds[index] = fd
|
| 667 |
+
|
| 668 |
+
return std_fds
|
| 669 |
+
|
| 670 |
+
|
| 671 |
+
def _executeShCmd(cmd, shenv, results, timeoutHelper):
|
| 672 |
+
if timeoutHelper.timeoutReached():
|
| 673 |
+
# Prevent further recursion if the timeout has been hit
|
| 674 |
+
# as we should try avoid launching more processes.
|
| 675 |
+
return None
|
| 676 |
+
|
| 677 |
+
if isinstance(cmd, ShUtil.Seq):
|
| 678 |
+
if cmd.op == ";":
|
| 679 |
+
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
|
| 680 |
+
return _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
|
| 681 |
+
|
| 682 |
+
if cmd.op == "&":
|
| 683 |
+
raise InternalShellError(cmd, "unsupported shell operator: '&'")
|
| 684 |
+
|
| 685 |
+
if cmd.op == "||":
|
| 686 |
+
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
|
| 687 |
+
if res != 0:
|
| 688 |
+
res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
|
| 689 |
+
return res
|
| 690 |
+
|
| 691 |
+
if cmd.op == "&&":
|
| 692 |
+
res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
|
| 693 |
+
if res is None:
|
| 694 |
+
return res
|
| 695 |
+
|
| 696 |
+
if res == 0:
|
| 697 |
+
res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
|
| 698 |
+
return res
|
| 699 |
+
|
| 700 |
+
raise ValueError("Unknown shell command: %r" % cmd.op)
|
| 701 |
+
assert isinstance(cmd, ShUtil.Pipeline)
|
| 702 |
+
|
| 703 |
+
procs = []
|
| 704 |
+
proc_not_counts = []
|
| 705 |
+
default_stdin = subprocess.PIPE
|
| 706 |
+
stderrTempFiles = []
|
| 707 |
+
opened_files = []
|
| 708 |
+
named_temp_files = []
|
| 709 |
+
builtin_commands = set(["cat", "diff"])
|
| 710 |
+
builtin_commands_dir = os.path.join(
|
| 711 |
+
os.path.dirname(os.path.abspath(__file__)), "builtin_commands"
|
| 712 |
+
)
|
| 713 |
+
inproc_builtins = {
|
| 714 |
+
"cd": executeBuiltinCd,
|
| 715 |
+
"export": executeBuiltinExport,
|
| 716 |
+
"echo": executeBuiltinEcho,
|
| 717 |
+
"@echo": executeBuiltinEcho,
|
| 718 |
+
"mkdir": executeBuiltinMkdir,
|
| 719 |
+
"popd": executeBuiltinPopd,
|
| 720 |
+
"pushd": executeBuiltinPushd,
|
| 721 |
+
"rm": executeBuiltinRm,
|
| 722 |
+
":": executeBuiltinColon,
|
| 723 |
+
}
|
| 724 |
+
# To avoid deadlock, we use a single stderr stream for piped
|
| 725 |
+
# output. This is null until we have seen some output using
|
| 726 |
+
# stderr.
|
| 727 |
+
for i, j in enumerate(cmd.commands):
|
| 728 |
+
# Reference the global environment by default.
|
| 729 |
+
cmd_shenv = shenv
|
| 730 |
+
args = list(j.args)
|
| 731 |
+
not_args = []
|
| 732 |
+
not_count = 0
|
| 733 |
+
not_crash = False
|
| 734 |
+
while True:
|
| 735 |
+
if args[0] == "env":
|
| 736 |
+
# Create a copy of the global environment and modify it for
|
| 737 |
+
# this one command. There might be multiple envs in a pipeline,
|
| 738 |
+
# and there might be multiple envs in a command (usually when
|
| 739 |
+
# one comes from a substitution):
|
| 740 |
+
# env FOO=1 llc < %s | env BAR=2 llvm-mc | FileCheck %s
|
| 741 |
+
# env FOO=1 %{another_env_plus_cmd} | FileCheck %s
|
| 742 |
+
if cmd_shenv is shenv:
|
| 743 |
+
cmd_shenv = ShellEnvironment(shenv.cwd, shenv.env)
|
| 744 |
+
args = updateEnv(cmd_shenv, args)
|
| 745 |
+
if not args:
|
| 746 |
+
raise InternalShellError(j, "Error: 'env' requires a" " subcommand")
|
| 747 |
+
elif args[0] == "not":
|
| 748 |
+
not_args.append(args.pop(0))
|
| 749 |
+
not_count += 1
|
| 750 |
+
if args and args[0] == "--crash":
|
| 751 |
+
not_args.append(args.pop(0))
|
| 752 |
+
not_crash = True
|
| 753 |
+
if not args:
|
| 754 |
+
raise InternalShellError(j, "Error: 'not' requires a" " subcommand")
|
| 755 |
+
elif args[0] == "!":
|
| 756 |
+
not_args.append(args.pop(0))
|
| 757 |
+
not_count += 1
|
| 758 |
+
if not args:
|
| 759 |
+
raise InternalShellError(j, "Error: '!' requires a" " subcommand")
|
| 760 |
+
else:
|
| 761 |
+
break
|
| 762 |
+
|
| 763 |
+
# Handle in-process builtins.
|
| 764 |
+
#
|
| 765 |
+
# Handle "echo" as a builtin if it is not part of a pipeline. This
|
| 766 |
+
# greatly speeds up tests that construct input files by repeatedly
|
| 767 |
+
# echo-appending to a file.
|
| 768 |
+
# FIXME: Standardize on the builtin echo implementation. We can use a
|
| 769 |
+
# temporary file to sidestep blocking pipe write issues.
|
| 770 |
+
inproc_builtin = inproc_builtins.get(args[0], None)
|
| 771 |
+
if inproc_builtin and (args[0] != "echo" or len(cmd.commands) == 1):
|
| 772 |
+
# env calling an in-process builtin is useless, so we take the safe
|
| 773 |
+
# approach of complaining.
|
| 774 |
+
if not cmd_shenv is shenv:
|
| 775 |
+
raise InternalShellError(
|
| 776 |
+
j, "Error: 'env' cannot call '{}'".format(args[0])
|
| 777 |
+
)
|
| 778 |
+
if not_crash:
|
| 779 |
+
raise InternalShellError(
|
| 780 |
+
j, "Error: 'not --crash' cannot call" " '{}'".format(args[0])
|
| 781 |
+
)
|
| 782 |
+
if len(cmd.commands) != 1:
|
| 783 |
+
raise InternalShellError(
|
| 784 |
+
j,
|
| 785 |
+
"Unsupported: '{}' cannot be part" " of a pipeline".format(args[0]),
|
| 786 |
+
)
|
| 787 |
+
result = inproc_builtin(Command(args, j.redirects), cmd_shenv)
|
| 788 |
+
if not_count % 2:
|
| 789 |
+
result.exitCode = int(not result.exitCode)
|
| 790 |
+
result.command.args = j.args
|
| 791 |
+
results.append(result)
|
| 792 |
+
return result.exitCode
|
| 793 |
+
|
| 794 |
+
# Resolve any out-of-process builtin command before adding back 'not'
|
| 795 |
+
# commands.
|
| 796 |
+
if args[0] in builtin_commands:
|
| 797 |
+
args.insert(0, sys.executable)
|
| 798 |
+
cmd_shenv.env["PYTHONPATH"] = os.path.dirname(os.path.abspath(__file__))
|
| 799 |
+
args[1] = os.path.join(builtin_commands_dir, args[1] + ".py")
|
| 800 |
+
|
| 801 |
+
# We had to search through the 'not' commands to find all the 'env'
|
| 802 |
+
# commands and any other in-process builtin command. We don't want to
|
| 803 |
+
# reimplement 'not' and its '--crash' here, so just push all 'not'
|
| 804 |
+
# commands back to be called as external commands. Because this
|
| 805 |
+
# approach effectively moves all 'env' commands up front, it relies on
|
| 806 |
+
# the assumptions that (1) environment variables are not intended to be
|
| 807 |
+
# relevant to 'not' commands and (2) the 'env' command should always
|
| 808 |
+
# blindly pass along the status it receives from any command it calls.
|
| 809 |
+
|
| 810 |
+
# For plain negations, either 'not' without '--crash', or the shell
|
| 811 |
+
# operator '!', leave them out from the command to execute and
|
| 812 |
+
# invert the result code afterwards.
|
| 813 |
+
if not_crash:
|
| 814 |
+
args = not_args + args
|
| 815 |
+
not_count = 0
|
| 816 |
+
else:
|
| 817 |
+
not_args = []
|
| 818 |
+
|
| 819 |
+
stdin, stdout, stderr = processRedirects(
|
| 820 |
+
j, default_stdin, cmd_shenv, opened_files
|
| 821 |
+
)
|
| 822 |
+
|
| 823 |
+
# If stderr wants to come from stdout, but stdout isn't a pipe, then put
|
| 824 |
+
# stderr on a pipe and treat it as stdout.
|
| 825 |
+
if stderr == subprocess.STDOUT and stdout != subprocess.PIPE:
|
| 826 |
+
stderr = subprocess.PIPE
|
| 827 |
+
stderrIsStdout = True
|
| 828 |
+
else:
|
| 829 |
+
stderrIsStdout = False
|
| 830 |
+
|
| 831 |
+
# Don't allow stderr on a PIPE except for the last
|
| 832 |
+
# process, this could deadlock.
|
| 833 |
+
#
|
| 834 |
+
# FIXME: This is slow, but so is deadlock.
|
| 835 |
+
if stderr == subprocess.PIPE and j != cmd.commands[-1]:
|
| 836 |
+
stderr = tempfile.TemporaryFile(mode="w+b")
|
| 837 |
+
stderrTempFiles.append((i, stderr))
|
| 838 |
+
|
| 839 |
+
# Resolve the executable path ourselves.
|
| 840 |
+
executable = None
|
| 841 |
+
# For paths relative to cwd, use the cwd of the shell environment.
|
| 842 |
+
if args[0].startswith("."):
|
| 843 |
+
exe_in_cwd = os.path.join(cmd_shenv.cwd, args[0])
|
| 844 |
+
if os.path.isfile(exe_in_cwd):
|
| 845 |
+
executable = exe_in_cwd
|
| 846 |
+
if not executable:
|
| 847 |
+
executable = lit.util.which(args[0], cmd_shenv.env["PATH"])
|
| 848 |
+
if not executable:
|
| 849 |
+
raise InternalShellError(j, "%r: command not found" % args[0])
|
| 850 |
+
|
| 851 |
+
# Replace uses of /dev/null with temporary files.
|
| 852 |
+
if kAvoidDevNull:
|
| 853 |
+
# In Python 2.x, basestring is the base class for all string (including unicode)
|
| 854 |
+
# In Python 3.x, basestring no longer exist and str is always unicode
|
| 855 |
+
try:
|
| 856 |
+
str_type = basestring
|
| 857 |
+
except NameError:
|
| 858 |
+
str_type = str
|
| 859 |
+
for i, arg in enumerate(args):
|
| 860 |
+
if isinstance(arg, str_type) and kDevNull in arg:
|
| 861 |
+
f = tempfile.NamedTemporaryFile(delete=False)
|
| 862 |
+
f.close()
|
| 863 |
+
named_temp_files.append(f.name)
|
| 864 |
+
args[i] = arg.replace(kDevNull, f.name)
|
| 865 |
+
|
| 866 |
+
# Expand all glob expressions
|
| 867 |
+
args = expand_glob_expressions(args, cmd_shenv.cwd)
|
| 868 |
+
|
| 869 |
+
# On Windows, do our own command line quoting for better compatibility
|
| 870 |
+
# with some core utility distributions.
|
| 871 |
+
if kIsWindows:
|
| 872 |
+
args = quote_windows_command(args)
|
| 873 |
+
|
| 874 |
+
try:
|
| 875 |
+
procs.append(
|
| 876 |
+
subprocess.Popen(
|
| 877 |
+
args,
|
| 878 |
+
cwd=cmd_shenv.cwd,
|
| 879 |
+
executable=executable,
|
| 880 |
+
stdin=stdin,
|
| 881 |
+
stdout=stdout,
|
| 882 |
+
stderr=stderr,
|
| 883 |
+
env=cmd_shenv.env,
|
| 884 |
+
close_fds=kUseCloseFDs,
|
| 885 |
+
universal_newlines=True,
|
| 886 |
+
errors="replace",
|
| 887 |
+
)
|
| 888 |
+
)
|
| 889 |
+
proc_not_counts.append(not_count)
|
| 890 |
+
# Let the helper know about this process
|
| 891 |
+
timeoutHelper.addProcess(procs[-1])
|
| 892 |
+
except OSError as e:
|
| 893 |
+
raise InternalShellError(
|
| 894 |
+
j, "Could not create process ({}) due to {}".format(executable, e)
|
| 895 |
+
)
|
| 896 |
+
|
| 897 |
+
# Immediately close stdin for any process taking stdin from us.
|
| 898 |
+
if stdin == subprocess.PIPE:
|
| 899 |
+
procs[-1].stdin.close()
|
| 900 |
+
procs[-1].stdin = None
|
| 901 |
+
|
| 902 |
+
# Update the current stdin source.
|
| 903 |
+
if stdout == subprocess.PIPE:
|
| 904 |
+
default_stdin = procs[-1].stdout
|
| 905 |
+
elif stderrIsStdout:
|
| 906 |
+
default_stdin = procs[-1].stderr
|
| 907 |
+
else:
|
| 908 |
+
default_stdin = subprocess.PIPE
|
| 909 |
+
|
| 910 |
+
# Explicitly close any redirected files. We need to do this now because we
|
| 911 |
+
# need to release any handles we may have on the temporary files (important
|
| 912 |
+
# on Win32, for example). Since we have already spawned the subprocess, our
|
| 913 |
+
# handles have already been transferred so we do not need them anymore.
|
| 914 |
+
for (name, mode, f, path) in opened_files:
|
| 915 |
+
f.close()
|
| 916 |
+
|
| 917 |
+
# FIXME: There is probably still deadlock potential here. Yawn.
|
| 918 |
+
procData = [None] * len(procs)
|
| 919 |
+
procData[-1] = procs[-1].communicate()
|
| 920 |
+
|
| 921 |
+
for i in range(len(procs) - 1):
|
| 922 |
+
if procs[i].stdout is not None:
|
| 923 |
+
out = procs[i].stdout.read()
|
| 924 |
+
else:
|
| 925 |
+
out = ""
|
| 926 |
+
if procs[i].stderr is not None:
|
| 927 |
+
err = procs[i].stderr.read()
|
| 928 |
+
else:
|
| 929 |
+
err = ""
|
| 930 |
+
procData[i] = (out, err)
|
| 931 |
+
|
| 932 |
+
# Read stderr out of the temp files.
|
| 933 |
+
for i, f in stderrTempFiles:
|
| 934 |
+
f.seek(0, 0)
|
| 935 |
+
procData[i] = (procData[i][0], f.read())
|
| 936 |
+
f.close()
|
| 937 |
+
|
| 938 |
+
exitCode = None
|
| 939 |
+
for i, (out, err) in enumerate(procData):
|
| 940 |
+
res = procs[i].wait()
|
| 941 |
+
# Detect Ctrl-C in subprocess.
|
| 942 |
+
if res == -signal.SIGINT:
|
| 943 |
+
raise KeyboardInterrupt
|
| 944 |
+
if proc_not_counts[i] % 2:
|
| 945 |
+
res = 1 if res == 0 else 0
|
| 946 |
+
elif proc_not_counts[i] > 1:
|
| 947 |
+
res = 1 if res != 0 else 0
|
| 948 |
+
|
| 949 |
+
# Ensure the resulting output is always of string type.
|
| 950 |
+
try:
|
| 951 |
+
if out is None:
|
| 952 |
+
out = ""
|
| 953 |
+
else:
|
| 954 |
+
out = to_string(out.decode("utf-8", errors="replace"))
|
| 955 |
+
except:
|
| 956 |
+
out = str(out)
|
| 957 |
+
try:
|
| 958 |
+
if err is None:
|
| 959 |
+
err = ""
|
| 960 |
+
else:
|
| 961 |
+
err = to_string(err.decode("utf-8", errors="replace"))
|
| 962 |
+
except:
|
| 963 |
+
err = str(err)
|
| 964 |
+
|
| 965 |
+
# Gather the redirected output files for failed commands.
|
| 966 |
+
output_files = []
|
| 967 |
+
if res != 0:
|
| 968 |
+
for (name, mode, f, path) in sorted(opened_files):
|
| 969 |
+
if path is not None and mode in ("w", "a"):
|
| 970 |
+
try:
|
| 971 |
+
with open(path, "rb") as f:
|
| 972 |
+
data = f.read()
|
| 973 |
+
except:
|
| 974 |
+
data = None
|
| 975 |
+
if data is not None:
|
| 976 |
+
output_files.append((name, path, data))
|
| 977 |
+
|
| 978 |
+
results.append(
|
| 979 |
+
ShellCommandResult(
|
| 980 |
+
cmd.commands[i],
|
| 981 |
+
out,
|
| 982 |
+
err,
|
| 983 |
+
res,
|
| 984 |
+
timeoutHelper.timeoutReached(),
|
| 985 |
+
output_files,
|
| 986 |
+
)
|
| 987 |
+
)
|
| 988 |
+
if cmd.pipe_err:
|
| 989 |
+
# Take the last failing exit code from the pipeline.
|
| 990 |
+
if not exitCode or res != 0:
|
| 991 |
+
exitCode = res
|
| 992 |
+
else:
|
| 993 |
+
exitCode = res
|
| 994 |
+
|
| 995 |
+
# Remove any named temporary files we created.
|
| 996 |
+
for f in named_temp_files:
|
| 997 |
+
try:
|
| 998 |
+
os.remove(f)
|
| 999 |
+
except OSError:
|
| 1000 |
+
pass
|
| 1001 |
+
|
| 1002 |
+
if cmd.negate:
|
| 1003 |
+
exitCode = not exitCode
|
| 1004 |
+
|
| 1005 |
+
return exitCode
|
| 1006 |
+
|
| 1007 |
+
|
| 1008 |
+
def formatOutput(title, data, limit=None):
|
| 1009 |
+
if not data.strip():
|
| 1010 |
+
return ""
|
| 1011 |
+
if not limit is None and len(data) > limit:
|
| 1012 |
+
data = data[:limit] + "\n...\n"
|
| 1013 |
+
msg = "data was truncated"
|
| 1014 |
+
else:
|
| 1015 |
+
msg = ""
|
| 1016 |
+
ndashes = 30
|
| 1017 |
+
# fmt: off
|
| 1018 |
+
out = f"# .---{title}{'-' * (ndashes - 4 - len(title))}\n"
|
| 1019 |
+
out += f"# | " + "\n# | ".join(data.splitlines()) + "\n"
|
| 1020 |
+
out += f"# `---{msg}{'-' * (ndashes - 4 - len(msg))}\n"
|
| 1021 |
+
# fmt: on
|
| 1022 |
+
return out
|
| 1023 |
+
|
| 1024 |
+
|
| 1025 |
+
# Always either returns the tuple (out, err, exitCode, timeoutInfo) or raises a
|
| 1026 |
+
# ScriptFatal exception.
|
| 1027 |
+
#
|
| 1028 |
+
# If debug is True (the normal lit behavior), err is empty, and out contains an
|
| 1029 |
+
# execution trace, including stdout and stderr shown per command executed.
|
| 1030 |
+
#
|
| 1031 |
+
# If debug is False (set by some custom lit test formats that call this
|
| 1032 |
+
# function), out contains only stdout from the script, err contains only stderr
|
| 1033 |
+
# from the script, and there is no execution trace.
|
| 1034 |
+
def executeScriptInternal(
|
| 1035 |
+
test, litConfig, tmpBase, commands, cwd, debug=True
|
| 1036 |
+
) -> Tuple[str, str, int, Optional[str]]:
|
| 1037 |
+
cmds = []
|
| 1038 |
+
for i, ln in enumerate(commands):
|
| 1039 |
+
# Within lit, we try to always add '%dbg(...)' to command lines in order
|
| 1040 |
+
# to maximize debuggability. However, custom lit test formats might not
|
| 1041 |
+
# always add it, so add a generic debug message in that case.
|
| 1042 |
+
match = re.fullmatch(kPdbgRegex, ln)
|
| 1043 |
+
if match:
|
| 1044 |
+
dbg = match.group(1)
|
| 1045 |
+
command = match.group(2)
|
| 1046 |
+
else:
|
| 1047 |
+
dbg = "command line"
|
| 1048 |
+
command = ln
|
| 1049 |
+
if debug:
|
| 1050 |
+
ln = f"@echo '# {dbg}' "
|
| 1051 |
+
if command:
|
| 1052 |
+
ln += f"&& @echo {shlex.quote(command.lstrip())} && {command}"
|
| 1053 |
+
else:
|
| 1054 |
+
ln += "has no command after substitutions"
|
| 1055 |
+
else:
|
| 1056 |
+
ln = command
|
| 1057 |
+
try:
|
| 1058 |
+
cmds.append(
|
| 1059 |
+
ShUtil.ShParser(ln, litConfig.isWindows, test.config.pipefail).parse()
|
| 1060 |
+
)
|
| 1061 |
+
except:
|
| 1062 |
+
raise ScriptFatal(
|
| 1063 |
+
f"shell parser error on {dbg}: {command.lstrip()}\n"
|
| 1064 |
+
) from None
|
| 1065 |
+
|
| 1066 |
+
cmd = cmds[0]
|
| 1067 |
+
for c in cmds[1:]:
|
| 1068 |
+
cmd = ShUtil.Seq(cmd, "&&", c)
|
| 1069 |
+
|
| 1070 |
+
results = []
|
| 1071 |
+
timeoutInfo = None
|
| 1072 |
+
try:
|
| 1073 |
+
shenv = ShellEnvironment(cwd, test.config.environment)
|
| 1074 |
+
exitCode, timeoutInfo = executeShCmd(
|
| 1075 |
+
cmd, shenv, results, timeout=litConfig.maxIndividualTestTime
|
| 1076 |
+
)
|
| 1077 |
+
except InternalShellError:
|
| 1078 |
+
e = sys.exc_info()[1]
|
| 1079 |
+
exitCode = 127
|
| 1080 |
+
results.append(ShellCommandResult(e.command, "", e.message, exitCode, False))
|
| 1081 |
+
|
| 1082 |
+
out = err = ""
|
| 1083 |
+
for i, result in enumerate(results):
|
| 1084 |
+
if not debug:
|
| 1085 |
+
out += result.stdout
|
| 1086 |
+
err += result.stderr
|
| 1087 |
+
continue
|
| 1088 |
+
|
| 1089 |
+
# The purpose of an "@echo" command is merely to add a debugging message
|
| 1090 |
+
# directly to lit's output. It is used internally by lit's internal
|
| 1091 |
+
# shell and is not currently documented for use in lit tests. However,
|
| 1092 |
+
# if someone misuses it (e.g., both "echo" and "@echo" complain about
|
| 1093 |
+
# stdin redirection), produce the normal execution trace to facilitate
|
| 1094 |
+
# debugging.
|
| 1095 |
+
if (
|
| 1096 |
+
result.command.args[0] == "@echo"
|
| 1097 |
+
and result.exitCode == 0
|
| 1098 |
+
and not result.stderr
|
| 1099 |
+
and not result.outputFiles
|
| 1100 |
+
and not result.timeoutReached
|
| 1101 |
+
):
|
| 1102 |
+
out += result.stdout
|
| 1103 |
+
continue
|
| 1104 |
+
|
| 1105 |
+
# Write the command line that was run. Properly quote it. Leading
|
| 1106 |
+
# "!" commands should not be quoted as that would indicate they are not
|
| 1107 |
+
# the builtins.
|
| 1108 |
+
out += "# executed command: "
|
| 1109 |
+
nLeadingBangs = next(
|
| 1110 |
+
(i for i, cmd in enumerate(result.command.args) if cmd != "!"),
|
| 1111 |
+
len(result.command.args),
|
| 1112 |
+
)
|
| 1113 |
+
out += "! " * nLeadingBangs
|
| 1114 |
+
out += " ".join(
|
| 1115 |
+
shlex.quote(str(s))
|
| 1116 |
+
for i, s in enumerate(result.command.args)
|
| 1117 |
+
if i >= nLeadingBangs
|
| 1118 |
+
)
|
| 1119 |
+
out += "\n"
|
| 1120 |
+
|
| 1121 |
+
# If nothing interesting happened, move on.
|
| 1122 |
+
if (
|
| 1123 |
+
litConfig.maxIndividualTestTime == 0
|
| 1124 |
+
and result.exitCode == 0
|
| 1125 |
+
and not result.stdout.strip()
|
| 1126 |
+
and not result.stderr.strip()
|
| 1127 |
+
):
|
| 1128 |
+
continue
|
| 1129 |
+
|
| 1130 |
+
# Otherwise, something failed or was printed, show it.
|
| 1131 |
+
|
| 1132 |
+
# Add the command output, if redirected.
|
| 1133 |
+
for (name, path, data) in result.outputFiles:
|
| 1134 |
+
data = to_string(data.decode("utf-8", errors="replace"))
|
| 1135 |
+
out += formatOutput(f"redirected output from '{name}'", data, limit=1024)
|
| 1136 |
+
if result.stdout.strip():
|
| 1137 |
+
out += formatOutput("command stdout", result.stdout)
|
| 1138 |
+
if result.stderr.strip():
|
| 1139 |
+
out += formatOutput("command stderr", result.stderr)
|
| 1140 |
+
if not result.stdout.strip() and not result.stderr.strip():
|
| 1141 |
+
out += "# note: command had no output on stdout or stderr\n"
|
| 1142 |
+
|
| 1143 |
+
# Show the error conditions:
|
| 1144 |
+
if result.exitCode != 0:
|
| 1145 |
+
# On Windows, a negative exit code indicates a signal, and those are
|
| 1146 |
+
# easier to recognize or look up if we print them in hex.
|
| 1147 |
+
if litConfig.isWindows and (result.exitCode < 0 or result.exitCode > 255):
|
| 1148 |
+
codeStr = hex(int(result.exitCode & 0xFFFFFFFF)).rstrip("L")
|
| 1149 |
+
else:
|
| 1150 |
+
codeStr = str(result.exitCode)
|
| 1151 |
+
out += "# error: command failed with exit status: %s\n" % (codeStr,)
|
| 1152 |
+
if litConfig.maxIndividualTestTime > 0 and result.timeoutReached:
|
| 1153 |
+
out += "# error: command reached timeout: %s\n" % (
|
| 1154 |
+
str(result.timeoutReached),
|
| 1155 |
+
)
|
| 1156 |
+
|
| 1157 |
+
return out, err, exitCode, timeoutInfo
|
| 1158 |
+
|
| 1159 |
+
|
| 1160 |
+
def executeScript(test, litConfig, tmpBase, commands, cwd):
|
| 1161 |
+
bashPath = litConfig.getBashPath()
|
| 1162 |
+
isWin32CMDEXE = litConfig.isWindows and not bashPath
|
| 1163 |
+
script = tmpBase + ".script"
|
| 1164 |
+
if isWin32CMDEXE:
|
| 1165 |
+
script += ".bat"
|
| 1166 |
+
|
| 1167 |
+
# Write script file
|
| 1168 |
+
mode = "w"
|
| 1169 |
+
open_kwargs = {}
|
| 1170 |
+
if litConfig.isWindows and not isWin32CMDEXE:
|
| 1171 |
+
mode += "b" # Avoid CRLFs when writing bash scripts.
|
| 1172 |
+
elif sys.version_info > (3, 0):
|
| 1173 |
+
open_kwargs["encoding"] = "utf-8"
|
| 1174 |
+
f = open(script, mode, **open_kwargs)
|
| 1175 |
+
if isWin32CMDEXE:
|
| 1176 |
+
for i, ln in enumerate(commands):
|
| 1177 |
+
match = re.fullmatch(kPdbgRegex, ln)
|
| 1178 |
+
if match:
|
| 1179 |
+
command = match.group(2)
|
| 1180 |
+
commands[i] = match.expand(
|
| 1181 |
+
"echo '\\1' > nul && " if command else "echo '\\1' > nul"
|
| 1182 |
+
)
|
| 1183 |
+
f.write("@echo on\n")
|
| 1184 |
+
f.write("\n@if %ERRORLEVEL% NEQ 0 EXIT\n".join(commands))
|
| 1185 |
+
else:
|
| 1186 |
+
for i, ln in enumerate(commands):
|
| 1187 |
+
match = re.fullmatch(kPdbgRegex, ln)
|
| 1188 |
+
if match:
|
| 1189 |
+
dbg = match.group(1)
|
| 1190 |
+
command = match.group(2)
|
| 1191 |
+
# Echo the debugging diagnostic to stderr.
|
| 1192 |
+
#
|
| 1193 |
+
# For that echo command, use 'set' commands to suppress the
|
| 1194 |
+
# shell's execution trace, which would just add noise. Suppress
|
| 1195 |
+
# the shell's execution trace for the 'set' commands by
|
| 1196 |
+
# redirecting their stderr to /dev/null.
|
| 1197 |
+
if command:
|
| 1198 |
+
msg = f"'{dbg}': {shlex.quote(command.lstrip())}"
|
| 1199 |
+
else:
|
| 1200 |
+
msg = f"'{dbg}' has no command after substitutions"
|
| 1201 |
+
commands[i] = (
|
| 1202 |
+
f"{{ set +x; }} 2>/dev/null && "
|
| 1203 |
+
f"echo {msg} >&2 && "
|
| 1204 |
+
f"{{ set -x; }} 2>/dev/null"
|
| 1205 |
+
)
|
| 1206 |
+
# Execute the command, if any.
|
| 1207 |
+
#
|
| 1208 |
+
# 'command' might be something like:
|
| 1209 |
+
#
|
| 1210 |
+
# subcmd & PID=$!
|
| 1211 |
+
#
|
| 1212 |
+
# In that case, we need something like:
|
| 1213 |
+
#
|
| 1214 |
+
# echo_dbg && { subcmd & PID=$!; }
|
| 1215 |
+
#
|
| 1216 |
+
# Without the '{ ...; }' enclosing the original 'command', '&'
|
| 1217 |
+
# would put all of 'echo_dbg && subcmd' in the background. This
|
| 1218 |
+
# would cause 'echo_dbg' to execute at the wrong time, and a
|
| 1219 |
+
# later kill of $PID would target the wrong process. We have
|
| 1220 |
+
# seen the latter manage to terminate the shell running lit.
|
| 1221 |
+
if command:
|
| 1222 |
+
commands[i] += f" && {{ {command}; }}"
|
| 1223 |
+
if test.config.pipefail:
|
| 1224 |
+
f.write(b"set -o pipefail;" if mode == "wb" else "set -o pipefail;")
|
| 1225 |
+
f.write(b"set -x;" if mode == "wb" else "set -x;")
|
| 1226 |
+
if sys.version_info > (3, 0) and mode == "wb":
|
| 1227 |
+
f.write(bytes("{ " + "; } &&\n{ ".join(commands) + "; }", "utf-8"))
|
| 1228 |
+
else:
|
| 1229 |
+
f.write("{ " + "; } &&\n{ ".join(commands) + "; }")
|
| 1230 |
+
f.write(b"\n" if mode == "wb" else "\n")
|
| 1231 |
+
f.close()
|
| 1232 |
+
|
| 1233 |
+
if isWin32CMDEXE:
|
| 1234 |
+
command = ["cmd", "/c", script]
|
| 1235 |
+
else:
|
| 1236 |
+
if bashPath:
|
| 1237 |
+
command = [bashPath, script]
|
| 1238 |
+
else:
|
| 1239 |
+
command = ["/bin/sh", script]
|
| 1240 |
+
if litConfig.useValgrind:
|
| 1241 |
+
# FIXME: Running valgrind on sh is overkill. We probably could just
|
| 1242 |
+
# run on clang with no real loss.
|
| 1243 |
+
command = litConfig.valgrindArgs + command
|
| 1244 |
+
|
| 1245 |
+
try:
|
| 1246 |
+
out, err, exitCode = lit.util.executeCommand(
|
| 1247 |
+
command,
|
| 1248 |
+
cwd=cwd,
|
| 1249 |
+
env=test.config.environment,
|
| 1250 |
+
timeout=litConfig.maxIndividualTestTime,
|
| 1251 |
+
)
|
| 1252 |
+
return (out, err, exitCode, None)
|
| 1253 |
+
except lit.util.ExecuteCommandTimeoutException as e:
|
| 1254 |
+
return (e.out, e.err, e.exitCode, e.msg)
|
| 1255 |
+
|
| 1256 |
+
|
| 1257 |
+
def parseIntegratedTestScriptCommands(source_path, keywords):
|
| 1258 |
+
"""
|
| 1259 |
+
parseIntegratedTestScriptCommands(source_path) -> commands
|
| 1260 |
+
|
| 1261 |
+
Parse the commands in an integrated test script file into a list of
|
| 1262 |
+
(line_number, command_type, line).
|
| 1263 |
+
"""
|
| 1264 |
+
|
| 1265 |
+
# This code is carefully written to be dual compatible with Python 2.5+ and
|
| 1266 |
+
# Python 3 without requiring input files to always have valid codings. The
|
| 1267 |
+
# trick we use is to open the file in binary mode and use the regular
|
| 1268 |
+
# expression library to find the commands, with it scanning strings in
|
| 1269 |
+
# Python2 and bytes in Python3.
|
| 1270 |
+
#
|
| 1271 |
+
# Once we find a match, we do require each script line to be decodable to
|
| 1272 |
+
# UTF-8, so we convert the outputs to UTF-8 before returning. This way the
|
| 1273 |
+
# remaining code can work with "strings" agnostic of the executing Python
|
| 1274 |
+
# version.
|
| 1275 |
+
|
| 1276 |
+
keywords_re = re.compile(
|
| 1277 |
+
to_bytes("(%s)(.*)\n" % ("|".join(re.escape(k) for k in keywords),))
|
| 1278 |
+
)
|
| 1279 |
+
|
| 1280 |
+
f = open(source_path, "rb")
|
| 1281 |
+
try:
|
| 1282 |
+
# Read the entire file contents.
|
| 1283 |
+
data = f.read()
|
| 1284 |
+
|
| 1285 |
+
# Ensure the data ends with a newline.
|
| 1286 |
+
if not data.endswith(to_bytes("\n")):
|
| 1287 |
+
data = data + to_bytes("\n")
|
| 1288 |
+
|
| 1289 |
+
# Iterate over the matches.
|
| 1290 |
+
line_number = 1
|
| 1291 |
+
last_match_position = 0
|
| 1292 |
+
for match in keywords_re.finditer(data):
|
| 1293 |
+
# Compute the updated line number by counting the intervening
|
| 1294 |
+
# newlines.
|
| 1295 |
+
match_position = match.start()
|
| 1296 |
+
line_number += data.count(
|
| 1297 |
+
to_bytes("\n"), last_match_position, match_position
|
| 1298 |
+
)
|
| 1299 |
+
last_match_position = match_position
|
| 1300 |
+
|
| 1301 |
+
# Convert the keyword and line to UTF-8 strings and yield the
|
| 1302 |
+
# command. Note that we take care to return regular strings in
|
| 1303 |
+
# Python 2, to avoid other code having to differentiate between the
|
| 1304 |
+
# str and unicode types.
|
| 1305 |
+
#
|
| 1306 |
+
# Opening the file in binary mode prevented Windows \r newline
|
| 1307 |
+
# characters from being converted to Unix \n newlines, so manually
|
| 1308 |
+
# strip those from the yielded lines.
|
| 1309 |
+
keyword, ln = match.groups()
|
| 1310 |
+
yield (
|
| 1311 |
+
line_number,
|
| 1312 |
+
to_string(keyword.decode("utf-8")),
|
| 1313 |
+
to_string(ln.decode("utf-8").rstrip("\r")),
|
| 1314 |
+
)
|
| 1315 |
+
finally:
|
| 1316 |
+
f.close()
|
| 1317 |
+
|
| 1318 |
+
|
| 1319 |
+
def getTempPaths(test):
|
| 1320 |
+
"""Get the temporary location, this is always relative to the test suite
|
| 1321 |
+
root, not test source root."""
|
| 1322 |
+
execpath = test.getExecPath()
|
| 1323 |
+
execdir, execbase = os.path.split(execpath)
|
| 1324 |
+
tmpDir = os.path.join(execdir, "Output")
|
| 1325 |
+
tmpBase = os.path.join(tmpDir, execbase)
|
| 1326 |
+
return tmpDir, tmpBase
|
| 1327 |
+
|
| 1328 |
+
|
| 1329 |
+
def colonNormalizePath(path):
|
| 1330 |
+
if kIsWindows:
|
| 1331 |
+
return re.sub(r"^(.):", r"\1", path.replace("\\", "/"))
|
| 1332 |
+
else:
|
| 1333 |
+
assert path[0] == "/"
|
| 1334 |
+
return path[1:]
|
| 1335 |
+
|
| 1336 |
+
|
| 1337 |
+
def getDefaultSubstitutions(test, tmpDir, tmpBase, normalize_slashes=False):
|
| 1338 |
+
sourcepath = test.getSourcePath()
|
| 1339 |
+
sourcedir = os.path.dirname(sourcepath)
|
| 1340 |
+
|
| 1341 |
+
# Normalize slashes, if requested.
|
| 1342 |
+
if normalize_slashes:
|
| 1343 |
+
sourcepath = sourcepath.replace("\\", "/")
|
| 1344 |
+
sourcedir = sourcedir.replace("\\", "/")
|
| 1345 |
+
tmpDir = tmpDir.replace("\\", "/")
|
| 1346 |
+
tmpBase = tmpBase.replace("\\", "/")
|
| 1347 |
+
|
| 1348 |
+
substitutions = []
|
| 1349 |
+
substitutions.extend(test.config.substitutions)
|
| 1350 |
+
tmpName = tmpBase + ".tmp"
|
| 1351 |
+
baseName = os.path.basename(tmpBase)
|
| 1352 |
+
|
| 1353 |
+
substitutions.append(("%{pathsep}", os.pathsep))
|
| 1354 |
+
substitutions.append(("%basename_t", baseName))
|
| 1355 |
+
|
| 1356 |
+
substitutions.extend(
|
| 1357 |
+
[
|
| 1358 |
+
("%{fs-src-root}", pathlib.Path(sourcedir).anchor),
|
| 1359 |
+
("%{fs-tmp-root}", pathlib.Path(tmpBase).anchor),
|
| 1360 |
+
("%{fs-sep}", os.path.sep),
|
| 1361 |
+
]
|
| 1362 |
+
)
|
| 1363 |
+
|
| 1364 |
+
substitutions.append(("%/et", tmpName.replace("\\", "\\\\\\\\\\\\\\\\")))
|
| 1365 |
+
|
| 1366 |
+
def regex_escape(s):
|
| 1367 |
+
s = s.replace("@", r"\@")
|
| 1368 |
+
s = s.replace("&", r"\&")
|
| 1369 |
+
return s
|
| 1370 |
+
|
| 1371 |
+
path_substitutions = [
|
| 1372 |
+
("s", sourcepath), ("S", sourcedir), ("p", sourcedir),
|
| 1373 |
+
("t", tmpName), ("T", tmpDir)
|
| 1374 |
+
]
|
| 1375 |
+
for path_substitution in path_substitutions:
|
| 1376 |
+
letter = path_substitution[0]
|
| 1377 |
+
path = path_substitution[1]
|
| 1378 |
+
|
| 1379 |
+
# Original path variant
|
| 1380 |
+
substitutions.append(("%" + letter, path))
|
| 1381 |
+
|
| 1382 |
+
# Normalized path separator variant
|
| 1383 |
+
substitutions.append(("%/" + letter, path.replace("\\", "/")))
|
| 1384 |
+
|
| 1385 |
+
# realpath variants
|
| 1386 |
+
# Windows paths with substitute drives are not expanded by default
|
| 1387 |
+
# as they are used to avoid MAX_PATH issues, but sometimes we do
|
| 1388 |
+
# need the fully expanded path.
|
| 1389 |
+
real_path = os.path.realpath(path)
|
| 1390 |
+
substitutions.append(("%{" + letter + ":real}", real_path))
|
| 1391 |
+
substitutions.append(("%{/" + letter + ":real}",
|
| 1392 |
+
real_path.replace("\\", "/")))
|
| 1393 |
+
|
| 1394 |
+
# "%{/[STpst]:regex_replacement}" should be normalized like
|
| 1395 |
+
# "%/[STpst]" but we're also in a regex replacement context
|
| 1396 |
+
# of a s@@@ regex.
|
| 1397 |
+
substitutions.append(
|
| 1398 |
+
("%{/" + letter + ":regex_replacement}",
|
| 1399 |
+
regex_escape(path.replace("\\", "/"))))
|
| 1400 |
+
|
| 1401 |
+
# "%:[STpst]" are normalized paths without colons and without
|
| 1402 |
+
# a leading slash.
|
| 1403 |
+
substitutions.append(("%:" + letter, colonNormalizePath(path)))
|
| 1404 |
+
|
| 1405 |
+
return substitutions
|
| 1406 |
+
|
| 1407 |
+
|
| 1408 |
+
def _memoize(f):
|
| 1409 |
+
cache = {} # Intentionally unbounded, see applySubstitutions()
|
| 1410 |
+
|
| 1411 |
+
def memoized(x):
|
| 1412 |
+
if x not in cache:
|
| 1413 |
+
cache[x] = f(x)
|
| 1414 |
+
return cache[x]
|
| 1415 |
+
|
| 1416 |
+
return memoized
|
| 1417 |
+
|
| 1418 |
+
|
| 1419 |
+
@_memoize
|
| 1420 |
+
def _caching_re_compile(r):
|
| 1421 |
+
return re.compile(r)
|
| 1422 |
+
|
| 1423 |
+
|
| 1424 |
+
class ExpandableScriptDirective(object):
|
| 1425 |
+
"""
|
| 1426 |
+
Common interface for lit directives for which any lit substitutions must be
|
| 1427 |
+
expanded to produce the shell script. It includes directives (e.g., 'RUN:')
|
| 1428 |
+
specifying shell commands that might have lit substitutions to be expanded.
|
| 1429 |
+
It also includes lit directives (e.g., 'DEFINE:') that adjust substitutions.
|
| 1430 |
+
|
| 1431 |
+
start_line_number: The directive's starting line number.
|
| 1432 |
+
end_line_number: The directive's ending line number, which is
|
| 1433 |
+
start_line_number if the directive has no line continuations.
|
| 1434 |
+
keyword: The keyword that specifies the directive. For example, 'RUN:'.
|
| 1435 |
+
"""
|
| 1436 |
+
|
| 1437 |
+
def __init__(self, start_line_number, end_line_number, keyword):
|
| 1438 |
+
# Input line number where the directive starts.
|
| 1439 |
+
self.start_line_number = start_line_number
|
| 1440 |
+
# Input line number where the directive ends.
|
| 1441 |
+
self.end_line_number = end_line_number
|
| 1442 |
+
# The keyword used to indicate the directive.
|
| 1443 |
+
self.keyword = keyword
|
| 1444 |
+
|
| 1445 |
+
def add_continuation(self, line_number, keyword, line):
|
| 1446 |
+
"""
|
| 1447 |
+
Add a continuation line to this directive and return True, or do nothing
|
| 1448 |
+
and return False if the specified line is not a continuation for this
|
| 1449 |
+
directive (e.g., previous line does not end in '\', or keywords do not
|
| 1450 |
+
match).
|
| 1451 |
+
|
| 1452 |
+
line_number: The line number for the continuation line.
|
| 1453 |
+
keyword: The keyword that specifies the continuation line. For example,
|
| 1454 |
+
'RUN:'.
|
| 1455 |
+
line: The content of the continuation line after the keyword.
|
| 1456 |
+
"""
|
| 1457 |
+
assert False, "expected method to be called on derived class"
|
| 1458 |
+
|
| 1459 |
+
def needs_continuation(self):
|
| 1460 |
+
"""
|
| 1461 |
+
Does this directive require a continuation line?
|
| 1462 |
+
|
| 1463 |
+
'\' is documented as indicating a line continuation even if whitespace
|
| 1464 |
+
separates it from the newline. It looks like a line continuation, and
|
| 1465 |
+
it would be confusing if it didn't behave as one.
|
| 1466 |
+
"""
|
| 1467 |
+
assert False, "expected method to be called on derived class"
|
| 1468 |
+
|
| 1469 |
+
def get_location(self):
|
| 1470 |
+
"""
|
| 1471 |
+
Get a phrase describing the line or range of lines so far included by
|
| 1472 |
+
this directive and any line continuations.
|
| 1473 |
+
"""
|
| 1474 |
+
if self.start_line_number == self.end_line_number:
|
| 1475 |
+
return f"at line {self.start_line_number}"
|
| 1476 |
+
return f"from line {self.start_line_number} to {self.end_line_number}"
|
| 1477 |
+
|
| 1478 |
+
|
| 1479 |
+
class CommandDirective(ExpandableScriptDirective):
|
| 1480 |
+
"""
|
| 1481 |
+
A lit directive taking a shell command line. For example,
|
| 1482 |
+
'RUN: echo hello world'.
|
| 1483 |
+
|
| 1484 |
+
command: The content accumulated so far from the directive and its
|
| 1485 |
+
continuation lines.
|
| 1486 |
+
"""
|
| 1487 |
+
|
| 1488 |
+
def __init__(self, start_line_number, end_line_number, keyword, line):
|
| 1489 |
+
super().__init__(start_line_number, end_line_number, keyword)
|
| 1490 |
+
self.command = line.rstrip()
|
| 1491 |
+
|
| 1492 |
+
def add_continuation(self, line_number, keyword, line):
|
| 1493 |
+
if keyword != self.keyword or not self.needs_continuation():
|
| 1494 |
+
return False
|
| 1495 |
+
self.command = self.command[:-1] + line.rstrip()
|
| 1496 |
+
self.end_line_number = line_number
|
| 1497 |
+
return True
|
| 1498 |
+
|
| 1499 |
+
def needs_continuation(self):
|
| 1500 |
+
# Trailing whitespace is stripped immediately when each line is added,
|
| 1501 |
+
# so '\' is never hidden here.
|
| 1502 |
+
return self.command[-1] == "\\"
|
| 1503 |
+
|
| 1504 |
+
|
| 1505 |
+
class SubstDirective(ExpandableScriptDirective):
|
| 1506 |
+
"""
|
| 1507 |
+
A lit directive taking a substitution definition or redefinition. For
|
| 1508 |
+
example, 'DEFINE: %{name} = value'.
|
| 1509 |
+
|
| 1510 |
+
new_subst: True if this directive defines a new substitution. False if it
|
| 1511 |
+
redefines an existing substitution.
|
| 1512 |
+
body: The unparsed content accumulated so far from the directive and its
|
| 1513 |
+
continuation lines.
|
| 1514 |
+
name: The substitution's name, or None if more continuation lines are still
|
| 1515 |
+
required.
|
| 1516 |
+
value: The substitution's value, or None if more continuation lines are
|
| 1517 |
+
still required.
|
| 1518 |
+
"""
|
| 1519 |
+
|
| 1520 |
+
def __init__(self, start_line_number, end_line_number, keyword, new_subst, line):
|
| 1521 |
+
super().__init__(start_line_number, end_line_number, keyword)
|
| 1522 |
+
self.new_subst = new_subst
|
| 1523 |
+
self.body = line
|
| 1524 |
+
self.name = None
|
| 1525 |
+
self.value = None
|
| 1526 |
+
self._parse_body()
|
| 1527 |
+
|
| 1528 |
+
def add_continuation(self, line_number, keyword, line):
|
| 1529 |
+
if keyword != self.keyword or not self.needs_continuation():
|
| 1530 |
+
return False
|
| 1531 |
+
if not line.strip():
|
| 1532 |
+
raise ValueError("Substitution's continuation is empty")
|
| 1533 |
+
# Append line. Replace the '\' and any adjacent whitespace with a
|
| 1534 |
+
# single space.
|
| 1535 |
+
self.body = self.body.rstrip()[:-1].rstrip() + " " + line.lstrip()
|
| 1536 |
+
self.end_line_number = line_number
|
| 1537 |
+
self._parse_body()
|
| 1538 |
+
return True
|
| 1539 |
+
|
| 1540 |
+
def needs_continuation(self):
|
| 1541 |
+
return self.body.rstrip()[-1:] == "\\"
|
| 1542 |
+
|
| 1543 |
+
def _parse_body(self):
|
| 1544 |
+
"""
|
| 1545 |
+
If no more line continuations are required, parse all the directive's
|
| 1546 |
+
accumulated lines in order to identify the substitution's name and full
|
| 1547 |
+
value, and raise an exception if invalid.
|
| 1548 |
+
"""
|
| 1549 |
+
if self.needs_continuation():
|
| 1550 |
+
return
|
| 1551 |
+
|
| 1552 |
+
# Extract the left-hand side and value, and discard any whitespace
|
| 1553 |
+
# enclosing each.
|
| 1554 |
+
parts = self.body.split("=", 1)
|
| 1555 |
+
if len(parts) == 1:
|
| 1556 |
+
raise ValueError("Substitution's definition does not contain '='")
|
| 1557 |
+
self.name = parts[0].strip()
|
| 1558 |
+
self.value = parts[1].strip()
|
| 1559 |
+
|
| 1560 |
+
# Check the substitution's name.
|
| 1561 |
+
#
|
| 1562 |
+
# Do not extend this to permit '.' or any sequence that's special in a
|
| 1563 |
+
# python pattern. We could escape that automatically for
|
| 1564 |
+
# DEFINE/REDEFINE directives in test files. However, lit configuration
|
| 1565 |
+
# file authors would still have to remember to escape them manually in
|
| 1566 |
+
# substitution names but not in values. Moreover, the manually chosen
|
| 1567 |
+
# and automatically chosen escape sequences would have to be consistent
|
| 1568 |
+
# (e.g., '\.' vs. '[.]') in order for REDEFINE to successfully redefine
|
| 1569 |
+
# a substitution previously defined by a lit configuration file. All
|
| 1570 |
+
# this seems too error prone and confusing to be worthwhile. If you
|
| 1571 |
+
# want your name to express structure, use ':' instead of '.'.
|
| 1572 |
+
#
|
| 1573 |
+
# Actually, '{' and '}' are special if they contain only digits possibly
|
| 1574 |
+
# separated by a comma. Requiring a leading letter avoids that.
|
| 1575 |
+
if not re.fullmatch(r"%{[_a-zA-Z][-_:0-9a-zA-Z]*}", self.name):
|
| 1576 |
+
raise ValueError(
|
| 1577 |
+
f"Substitution name '{self.name}' is malformed as it must "
|
| 1578 |
+
f"start with '%{{', it must end with '}}', and the rest must "
|
| 1579 |
+
f"start with a letter or underscore and contain only "
|
| 1580 |
+
f"alphanumeric characters, hyphens, underscores, and colons"
|
| 1581 |
+
)
|
| 1582 |
+
|
| 1583 |
+
def adjust_substitutions(self, substitutions):
|
| 1584 |
+
"""
|
| 1585 |
+
Modify the specified substitution list as specified by this directive.
|
| 1586 |
+
"""
|
| 1587 |
+
assert (
|
| 1588 |
+
not self.needs_continuation()
|
| 1589 |
+
), "expected directive continuations to be parsed before applying"
|
| 1590 |
+
value_repl = self.value.replace("\\", "\\\\")
|
| 1591 |
+
existing = [i for i, subst in enumerate(substitutions) if self.name in subst[0]]
|
| 1592 |
+
existing_res = "".join(
|
| 1593 |
+
"\nExisting pattern: " + substitutions[i][0] for i in existing
|
| 1594 |
+
)
|
| 1595 |
+
if self.new_subst:
|
| 1596 |
+
if existing:
|
| 1597 |
+
raise ValueError(
|
| 1598 |
+
f"Substitution whose pattern contains '{self.name}' is "
|
| 1599 |
+
f"already defined before '{self.keyword}' directive "
|
| 1600 |
+
f"{self.get_location()}"
|
| 1601 |
+
f"{existing_res}"
|
| 1602 |
+
)
|
| 1603 |
+
substitutions.insert(0, (self.name, value_repl))
|
| 1604 |
+
return
|
| 1605 |
+
if len(existing) > 1:
|
| 1606 |
+
raise ValueError(
|
| 1607 |
+
f"Multiple substitutions whose patterns contain '{self.name}' "
|
| 1608 |
+
f"are defined before '{self.keyword}' directive "
|
| 1609 |
+
f"{self.get_location()}"
|
| 1610 |
+
f"{existing_res}"
|
| 1611 |
+
)
|
| 1612 |
+
if not existing:
|
| 1613 |
+
raise ValueError(
|
| 1614 |
+
f"No substitution for '{self.name}' is defined before "
|
| 1615 |
+
f"'{self.keyword}' directive {self.get_location()}"
|
| 1616 |
+
)
|
| 1617 |
+
if substitutions[existing[0]][0] != self.name:
|
| 1618 |
+
raise ValueError(
|
| 1619 |
+
f"Existing substitution whose pattern contains '{self.name}' "
|
| 1620 |
+
f"does not have the pattern specified by '{self.keyword}' "
|
| 1621 |
+
f"directive {self.get_location()}\n"
|
| 1622 |
+
f"Expected pattern: {self.name}"
|
| 1623 |
+
f"{existing_res}"
|
| 1624 |
+
)
|
| 1625 |
+
substitutions[existing[0]] = (self.name, value_repl)
|
| 1626 |
+
|
| 1627 |
+
|
| 1628 |
+
def applySubstitutions(script, substitutions, conditions={}, recursion_limit=None):
|
| 1629 |
+
"""
|
| 1630 |
+
Apply substitutions to the script. Allow full regular expression syntax.
|
| 1631 |
+
Replace each matching occurrence of regular expression pattern a with
|
| 1632 |
+
substitution b in line ln.
|
| 1633 |
+
|
| 1634 |
+
If a substitution expands into another substitution, it is expanded
|
| 1635 |
+
recursively until the line has no more expandable substitutions. If
|
| 1636 |
+
the line can still can be substituted after being substituted
|
| 1637 |
+
`recursion_limit` times, it is an error. If the `recursion_limit` is
|
| 1638 |
+
`None` (the default), no recursive substitution is performed at all.
|
| 1639 |
+
"""
|
| 1640 |
+
|
| 1641 |
+
# We use #_MARKER_# to hide %% while we do the other substitutions.
|
| 1642 |
+
def escapePercents(ln):
|
| 1643 |
+
return _caching_re_compile("%%").sub("#_MARKER_#", ln)
|
| 1644 |
+
|
| 1645 |
+
def unescapePercents(ln):
|
| 1646 |
+
return _caching_re_compile("#_MARKER_#").sub("%", ln)
|
| 1647 |
+
|
| 1648 |
+
def substituteIfElse(ln):
|
| 1649 |
+
# early exit to avoid wasting time on lines without
|
| 1650 |
+
# conditional substitutions
|
| 1651 |
+
if ln.find("%if ") == -1:
|
| 1652 |
+
return ln
|
| 1653 |
+
|
| 1654 |
+
def tryParseIfCond(ln):
|
| 1655 |
+
# space is important to not conflict with other (possible)
|
| 1656 |
+
# substitutions
|
| 1657 |
+
if not ln.startswith("%if "):
|
| 1658 |
+
return None, ln
|
| 1659 |
+
ln = ln[4:]
|
| 1660 |
+
|
| 1661 |
+
# stop at '%{'
|
| 1662 |
+
match = _caching_re_compile("%{").search(ln)
|
| 1663 |
+
if not match:
|
| 1664 |
+
raise ValueError("'%{' is missing for %if substitution")
|
| 1665 |
+
cond = ln[: match.start()]
|
| 1666 |
+
|
| 1667 |
+
# eat '%{' as well
|
| 1668 |
+
ln = ln[match.end() :]
|
| 1669 |
+
return cond, ln
|
| 1670 |
+
|
| 1671 |
+
def tryParseElse(ln):
|
| 1672 |
+
match = _caching_re_compile(r"^\s*%else\s*(%{)?").search(ln)
|
| 1673 |
+
if not match:
|
| 1674 |
+
return False, ln
|
| 1675 |
+
if not match.group(1):
|
| 1676 |
+
raise ValueError("'%{' is missing for %else substitution")
|
| 1677 |
+
return True, ln[match.end() :]
|
| 1678 |
+
|
| 1679 |
+
def tryParseEnd(ln):
|
| 1680 |
+
if ln.startswith("%}"):
|
| 1681 |
+
return True, ln[2:]
|
| 1682 |
+
return False, ln
|
| 1683 |
+
|
| 1684 |
+
def parseText(ln, isNested):
|
| 1685 |
+
# parse everything until %if, or %} if we're parsing a
|
| 1686 |
+
# nested expression.
|
| 1687 |
+
match = _caching_re_compile(
|
| 1688 |
+
"(.*?)(?:%if|%})" if isNested else "(.*?)(?:%if)"
|
| 1689 |
+
).search(ln)
|
| 1690 |
+
if not match:
|
| 1691 |
+
# there is no terminating pattern, so treat the whole
|
| 1692 |
+
# line as text
|
| 1693 |
+
return ln, ""
|
| 1694 |
+
text_end = match.end(1)
|
| 1695 |
+
return ln[:text_end], ln[text_end:]
|
| 1696 |
+
|
| 1697 |
+
def parseRecursive(ln, isNested):
|
| 1698 |
+
result = ""
|
| 1699 |
+
while len(ln):
|
| 1700 |
+
if isNested:
|
| 1701 |
+
found_end, _ = tryParseEnd(ln)
|
| 1702 |
+
if found_end:
|
| 1703 |
+
break
|
| 1704 |
+
|
| 1705 |
+
# %if cond %{ branch_if %} %else %{ branch_else %}
|
| 1706 |
+
cond, ln = tryParseIfCond(ln)
|
| 1707 |
+
if cond:
|
| 1708 |
+
branch_if, ln = parseRecursive(ln, isNested=True)
|
| 1709 |
+
found_end, ln = tryParseEnd(ln)
|
| 1710 |
+
if not found_end:
|
| 1711 |
+
raise ValueError("'%}' is missing for %if substitution")
|
| 1712 |
+
|
| 1713 |
+
branch_else = ""
|
| 1714 |
+
found_else, ln = tryParseElse(ln)
|
| 1715 |
+
if found_else:
|
| 1716 |
+
branch_else, ln = parseRecursive(ln, isNested=True)
|
| 1717 |
+
found_end, ln = tryParseEnd(ln)
|
| 1718 |
+
if not found_end:
|
| 1719 |
+
raise ValueError("'%}' is missing for %else substitution")
|
| 1720 |
+
|
| 1721 |
+
if BooleanExpression.evaluate(cond, conditions):
|
| 1722 |
+
result += branch_if
|
| 1723 |
+
else:
|
| 1724 |
+
result += branch_else
|
| 1725 |
+
continue
|
| 1726 |
+
|
| 1727 |
+
# The rest is handled as plain text.
|
| 1728 |
+
text, ln = parseText(ln, isNested)
|
| 1729 |
+
result += text
|
| 1730 |
+
|
| 1731 |
+
return result, ln
|
| 1732 |
+
|
| 1733 |
+
result, ln = parseRecursive(ln, isNested=False)
|
| 1734 |
+
assert len(ln) == 0
|
| 1735 |
+
return result
|
| 1736 |
+
|
| 1737 |
+
def processLine(ln):
|
| 1738 |
+
# Apply substitutions
|
| 1739 |
+
ln = substituteIfElse(escapePercents(ln))
|
| 1740 |
+
for a, b in substitutions:
|
| 1741 |
+
if kIsWindows:
|
| 1742 |
+
b = b.replace("\\", "\\\\")
|
| 1743 |
+
# re.compile() has a built-in LRU cache with 512 entries. In some
|
| 1744 |
+
# test suites lit ends up thrashing that cache, which made e.g.
|
| 1745 |
+
# check-llvm run 50% slower. Use an explicit, unbounded cache
|
| 1746 |
+
# to prevent that from happening. Since lit is fairly
|
| 1747 |
+
# short-lived, since the set of substitutions is fairly small, and
|
| 1748 |
+
# since thrashing has such bad consequences, not bounding the cache
|
| 1749 |
+
# seems reasonable.
|
| 1750 |
+
ln = _caching_re_compile(a).sub(str(b), escapePercents(ln))
|
| 1751 |
+
|
| 1752 |
+
# Strip the trailing newline and any extra whitespace.
|
| 1753 |
+
return ln.strip()
|
| 1754 |
+
|
| 1755 |
+
def processLineToFixedPoint(ln):
|
| 1756 |
+
assert isinstance(recursion_limit, int) and recursion_limit >= 0
|
| 1757 |
+
origLine = ln
|
| 1758 |
+
steps = 0
|
| 1759 |
+
processed = processLine(ln)
|
| 1760 |
+
while processed != ln and steps < recursion_limit:
|
| 1761 |
+
ln = processed
|
| 1762 |
+
processed = processLine(ln)
|
| 1763 |
+
steps += 1
|
| 1764 |
+
|
| 1765 |
+
if processed != ln:
|
| 1766 |
+
raise ValueError(
|
| 1767 |
+
"Recursive substitution of '%s' did not complete "
|
| 1768 |
+
"in the provided recursion limit (%s)" % (origLine, recursion_limit)
|
| 1769 |
+
)
|
| 1770 |
+
|
| 1771 |
+
return processed
|
| 1772 |
+
|
| 1773 |
+
process = processLine if recursion_limit is None else processLineToFixedPoint
|
| 1774 |
+
output = []
|
| 1775 |
+
for directive in script:
|
| 1776 |
+
if isinstance(directive, SubstDirective):
|
| 1777 |
+
directive.adjust_substitutions(substitutions)
|
| 1778 |
+
else:
|
| 1779 |
+
if isinstance(directive, CommandDirective):
|
| 1780 |
+
line = directive.command
|
| 1781 |
+
else:
|
| 1782 |
+
# Can come from preamble_commands.
|
| 1783 |
+
assert isinstance(directive, str)
|
| 1784 |
+
line = directive
|
| 1785 |
+
output.append(unescapePercents(process(line)))
|
| 1786 |
+
|
| 1787 |
+
return output
|
| 1788 |
+
|
| 1789 |
+
|
| 1790 |
+
class ParserKind(object):
|
| 1791 |
+
"""
|
| 1792 |
+
An enumeration representing the style of an integrated test keyword or
|
| 1793 |
+
command.
|
| 1794 |
+
|
| 1795 |
+
TAG: A keyword taking no value. Ex 'END.'
|
| 1796 |
+
COMMAND: A keyword taking a list of shell commands. Ex 'RUN:'
|
| 1797 |
+
LIST: A keyword taking a comma-separated list of values.
|
| 1798 |
+
SPACE_LIST: A keyword taking a space-separated list of values.
|
| 1799 |
+
BOOLEAN_EXPR: A keyword taking a comma-separated list of
|
| 1800 |
+
boolean expressions. Ex 'XFAIL:'
|
| 1801 |
+
INTEGER: A keyword taking a single integer. Ex 'ALLOW_RETRIES:'
|
| 1802 |
+
CUSTOM: A keyword with custom parsing semantics.
|
| 1803 |
+
DEFINE: A keyword taking a new lit substitution definition. Ex
|
| 1804 |
+
'DEFINE: %{name}=value'
|
| 1805 |
+
REDEFINE: A keyword taking a lit substitution redefinition. Ex
|
| 1806 |
+
'REDEFINE: %{name}=value'
|
| 1807 |
+
"""
|
| 1808 |
+
|
| 1809 |
+
TAG = 0
|
| 1810 |
+
COMMAND = 1
|
| 1811 |
+
LIST = 2
|
| 1812 |
+
SPACE_LIST = 3
|
| 1813 |
+
BOOLEAN_EXPR = 4
|
| 1814 |
+
INTEGER = 5
|
| 1815 |
+
CUSTOM = 6
|
| 1816 |
+
DEFINE = 7
|
| 1817 |
+
REDEFINE = 8
|
| 1818 |
+
|
| 1819 |
+
@staticmethod
|
| 1820 |
+
def allowedKeywordSuffixes(value):
|
| 1821 |
+
return {
|
| 1822 |
+
ParserKind.TAG: ["."],
|
| 1823 |
+
ParserKind.COMMAND: [":"],
|
| 1824 |
+
ParserKind.LIST: [":"],
|
| 1825 |
+
ParserKind.SPACE_LIST: [":"],
|
| 1826 |
+
ParserKind.BOOLEAN_EXPR: [":"],
|
| 1827 |
+
ParserKind.INTEGER: [":"],
|
| 1828 |
+
ParserKind.CUSTOM: [":", "."],
|
| 1829 |
+
ParserKind.DEFINE: [":"],
|
| 1830 |
+
ParserKind.REDEFINE: [":"],
|
| 1831 |
+
}[value]
|
| 1832 |
+
|
| 1833 |
+
@staticmethod
|
| 1834 |
+
def str(value):
|
| 1835 |
+
return {
|
| 1836 |
+
ParserKind.TAG: "TAG",
|
| 1837 |
+
ParserKind.COMMAND: "COMMAND",
|
| 1838 |
+
ParserKind.LIST: "LIST",
|
| 1839 |
+
ParserKind.SPACE_LIST: "SPACE_LIST",
|
| 1840 |
+
ParserKind.BOOLEAN_EXPR: "BOOLEAN_EXPR",
|
| 1841 |
+
ParserKind.INTEGER: "INTEGER",
|
| 1842 |
+
ParserKind.CUSTOM: "CUSTOM",
|
| 1843 |
+
ParserKind.DEFINE: "DEFINE",
|
| 1844 |
+
ParserKind.REDEFINE: "REDEFINE",
|
| 1845 |
+
}[value]
|
| 1846 |
+
|
| 1847 |
+
|
| 1848 |
+
class IntegratedTestKeywordParser(object):
|
| 1849 |
+
"""A parser for LLVM/Clang style integrated test scripts.
|
| 1850 |
+
|
| 1851 |
+
keyword: The keyword to parse for. It must end in either '.' or ':'.
|
| 1852 |
+
kind: An value of ParserKind.
|
| 1853 |
+
parser: A custom parser. This value may only be specified with
|
| 1854 |
+
ParserKind.CUSTOM.
|
| 1855 |
+
"""
|
| 1856 |
+
|
| 1857 |
+
def __init__(self, keyword, kind, parser=None, initial_value=None):
|
| 1858 |
+
allowedSuffixes = ParserKind.allowedKeywordSuffixes(kind)
|
| 1859 |
+
if len(keyword) == 0 or keyword[-1] not in allowedSuffixes:
|
| 1860 |
+
if len(allowedSuffixes) == 1:
|
| 1861 |
+
raise ValueError(
|
| 1862 |
+
"Keyword '%s' of kind '%s' must end in '%s'"
|
| 1863 |
+
% (keyword, ParserKind.str(kind), allowedSuffixes[0])
|
| 1864 |
+
)
|
| 1865 |
+
else:
|
| 1866 |
+
raise ValueError(
|
| 1867 |
+
"Keyword '%s' of kind '%s' must end in "
|
| 1868 |
+
" one of '%s'"
|
| 1869 |
+
% (keyword, ParserKind.str(kind), " ".join(allowedSuffixes))
|
| 1870 |
+
)
|
| 1871 |
+
|
| 1872 |
+
if parser is not None and kind != ParserKind.CUSTOM:
|
| 1873 |
+
raise ValueError(
|
| 1874 |
+
"custom parsers can only be specified with " "ParserKind.CUSTOM"
|
| 1875 |
+
)
|
| 1876 |
+
self.keyword = keyword
|
| 1877 |
+
self.kind = kind
|
| 1878 |
+
self.parsed_lines = []
|
| 1879 |
+
self.value = initial_value
|
| 1880 |
+
self.parser = parser
|
| 1881 |
+
|
| 1882 |
+
if kind == ParserKind.COMMAND:
|
| 1883 |
+
self.parser = lambda line_number, line, output: self._handleCommand(
|
| 1884 |
+
line_number, line, output, self.keyword
|
| 1885 |
+
)
|
| 1886 |
+
elif kind == ParserKind.LIST:
|
| 1887 |
+
self.parser = self._handleList
|
| 1888 |
+
elif kind == ParserKind.SPACE_LIST:
|
| 1889 |
+
self.parser = self._handleSpaceList
|
| 1890 |
+
elif kind == ParserKind.BOOLEAN_EXPR:
|
| 1891 |
+
self.parser = self._handleBooleanExpr
|
| 1892 |
+
elif kind == ParserKind.INTEGER:
|
| 1893 |
+
self.parser = self._handleSingleInteger
|
| 1894 |
+
elif kind == ParserKind.TAG:
|
| 1895 |
+
self.parser = self._handleTag
|
| 1896 |
+
elif kind == ParserKind.CUSTOM:
|
| 1897 |
+
if parser is None:
|
| 1898 |
+
raise ValueError("ParserKind.CUSTOM requires a custom parser")
|
| 1899 |
+
self.parser = parser
|
| 1900 |
+
elif kind == ParserKind.DEFINE:
|
| 1901 |
+
self.parser = lambda line_number, line, output: self._handleSubst(
|
| 1902 |
+
line_number, line, output, self.keyword, new_subst=True
|
| 1903 |
+
)
|
| 1904 |
+
elif kind == ParserKind.REDEFINE:
|
| 1905 |
+
self.parser = lambda line_number, line, output: self._handleSubst(
|
| 1906 |
+
line_number, line, output, self.keyword, new_subst=False
|
| 1907 |
+
)
|
| 1908 |
+
else:
|
| 1909 |
+
raise ValueError("Unknown kind '%s'" % kind)
|
| 1910 |
+
|
| 1911 |
+
def parseLine(self, line_number, line):
|
| 1912 |
+
try:
|
| 1913 |
+
self.parsed_lines += [(line_number, line)]
|
| 1914 |
+
self.value = self.parser(line_number, line, self.value)
|
| 1915 |
+
except ValueError as e:
|
| 1916 |
+
raise ValueError(
|
| 1917 |
+
str(e)
|
| 1918 |
+
+ ("\nin %s directive on test line %d" % (self.keyword, line_number))
|
| 1919 |
+
)
|
| 1920 |
+
|
| 1921 |
+
def getValue(self):
|
| 1922 |
+
return self.value
|
| 1923 |
+
|
| 1924 |
+
@staticmethod
|
| 1925 |
+
def _handleTag(line_number, line, output):
|
| 1926 |
+
"""A helper for parsing TAG type keywords"""
|
| 1927 |
+
return not line.strip() or output
|
| 1928 |
+
|
| 1929 |
+
@staticmethod
|
| 1930 |
+
def _substituteLineNumbers(line_number, line):
|
| 1931 |
+
line = re.sub(r"%\(line\)", str(line_number), line)
|
| 1932 |
+
|
| 1933 |
+
def replace_line_number(match):
|
| 1934 |
+
if match.group(1) == "+":
|
| 1935 |
+
return str(line_number + int(match.group(2)))
|
| 1936 |
+
if match.group(1) == "-":
|
| 1937 |
+
return str(line_number - int(match.group(2)))
|
| 1938 |
+
|
| 1939 |
+
return re.sub(r"%\(line *([\+-]) *(\d+)\)", replace_line_number, line)
|
| 1940 |
+
|
| 1941 |
+
@classmethod
|
| 1942 |
+
def _handleCommand(cls, line_number, line, output, keyword):
|
| 1943 |
+
"""A helper for parsing COMMAND type keywords"""
|
| 1944 |
+
# Substitute line number expressions.
|
| 1945 |
+
line = cls._substituteLineNumbers(line_number, line)
|
| 1946 |
+
|
| 1947 |
+
# Collapse lines with trailing '\\', or add line with line number to
|
| 1948 |
+
# start a new pipeline.
|
| 1949 |
+
if not output or not output[-1].add_continuation(line_number, keyword, line):
|
| 1950 |
+
if output is None:
|
| 1951 |
+
output = []
|
| 1952 |
+
line = buildPdbgCommand(f"{keyword} at line {line_number}", line)
|
| 1953 |
+
output.append(CommandDirective(line_number, line_number, keyword, line))
|
| 1954 |
+
return output
|
| 1955 |
+
|
| 1956 |
+
@staticmethod
|
| 1957 |
+
def _handleList(line_number, line, output):
|
| 1958 |
+
"""A parser for LIST type keywords"""
|
| 1959 |
+
if output is None:
|
| 1960 |
+
output = []
|
| 1961 |
+
output.extend([s.strip() for s in line.split(",")])
|
| 1962 |
+
return output
|
| 1963 |
+
|
| 1964 |
+
@staticmethod
|
| 1965 |
+
def _handleSpaceList(line_number, line, output):
|
| 1966 |
+
"""A parser for SPACE_LIST type keywords"""
|
| 1967 |
+
if output is None:
|
| 1968 |
+
output = []
|
| 1969 |
+
output.extend([s.strip() for s in line.split(" ") if s.strip() != ""])
|
| 1970 |
+
return output
|
| 1971 |
+
|
| 1972 |
+
@staticmethod
|
| 1973 |
+
def _handleSingleInteger(line_number, line, output):
|
| 1974 |
+
"""A parser for INTEGER type keywords"""
|
| 1975 |
+
if output is None:
|
| 1976 |
+
output = []
|
| 1977 |
+
try:
|
| 1978 |
+
n = int(line)
|
| 1979 |
+
except ValueError:
|
| 1980 |
+
raise ValueError(
|
| 1981 |
+
"INTEGER parser requires the input to be an integer (got {})".format(
|
| 1982 |
+
line
|
| 1983 |
+
)
|
| 1984 |
+
)
|
| 1985 |
+
output.append(n)
|
| 1986 |
+
return output
|
| 1987 |
+
|
| 1988 |
+
@staticmethod
|
| 1989 |
+
def _handleBooleanExpr(line_number, line, output):
|
| 1990 |
+
"""A parser for BOOLEAN_EXPR type keywords"""
|
| 1991 |
+
parts = [s.strip() for s in line.split(",") if s.strip() != ""]
|
| 1992 |
+
if output and output[-1][-1] == "\\":
|
| 1993 |
+
output[-1] = output[-1][:-1] + parts[0]
|
| 1994 |
+
del parts[0]
|
| 1995 |
+
if output is None:
|
| 1996 |
+
output = []
|
| 1997 |
+
output.extend(parts)
|
| 1998 |
+
# Evaluate each expression to verify syntax.
|
| 1999 |
+
# We don't want any results, just the raised ValueError.
|
| 2000 |
+
for s in output:
|
| 2001 |
+
if s != "*" and not s.endswith("\\"):
|
| 2002 |
+
BooleanExpression.evaluate(s, [])
|
| 2003 |
+
return output
|
| 2004 |
+
|
| 2005 |
+
@classmethod
|
| 2006 |
+
def _handleSubst(cls, line_number, line, output, keyword, new_subst):
|
| 2007 |
+
"""A parser for DEFINE and REDEFINE type keywords"""
|
| 2008 |
+
line = cls._substituteLineNumbers(line_number, line)
|
| 2009 |
+
if output and output[-1].add_continuation(line_number, keyword, line):
|
| 2010 |
+
return output
|
| 2011 |
+
if output is None:
|
| 2012 |
+
output = []
|
| 2013 |
+
output.append(
|
| 2014 |
+
SubstDirective(line_number, line_number, keyword, new_subst, line)
|
| 2015 |
+
)
|
| 2016 |
+
return output
|
| 2017 |
+
|
| 2018 |
+
|
| 2019 |
+
def _parseKeywords(sourcepath, additional_parsers=[], require_script=True):
|
| 2020 |
+
"""_parseKeywords
|
| 2021 |
+
|
| 2022 |
+
Scan an LLVM/Clang style integrated test script and extract all the lines
|
| 2023 |
+
pertaining to a special parser. This includes 'RUN', 'XFAIL', 'REQUIRES',
|
| 2024 |
+
'UNSUPPORTED', 'ALLOW_RETRIES', 'END', 'DEFINE', 'REDEFINE', as well as
|
| 2025 |
+
other specified custom parsers.
|
| 2026 |
+
|
| 2027 |
+
Returns a dictionary mapping each custom parser to its value after
|
| 2028 |
+
parsing the test.
|
| 2029 |
+
"""
|
| 2030 |
+
# Install the built-in keyword parsers.
|
| 2031 |
+
script = []
|
| 2032 |
+
builtin_parsers = [
|
| 2033 |
+
IntegratedTestKeywordParser("RUN:", ParserKind.COMMAND, initial_value=script),
|
| 2034 |
+
IntegratedTestKeywordParser("XFAIL:", ParserKind.BOOLEAN_EXPR),
|
| 2035 |
+
IntegratedTestKeywordParser("REQUIRES:", ParserKind.BOOLEAN_EXPR),
|
| 2036 |
+
IntegratedTestKeywordParser("UNSUPPORTED:", ParserKind.BOOLEAN_EXPR),
|
| 2037 |
+
IntegratedTestKeywordParser("ALLOW_RETRIES:", ParserKind.INTEGER),
|
| 2038 |
+
IntegratedTestKeywordParser("END.", ParserKind.TAG),
|
| 2039 |
+
IntegratedTestKeywordParser("DEFINE:", ParserKind.DEFINE, initial_value=script),
|
| 2040 |
+
IntegratedTestKeywordParser(
|
| 2041 |
+
"REDEFINE:", ParserKind.REDEFINE, initial_value=script
|
| 2042 |
+
),
|
| 2043 |
+
]
|
| 2044 |
+
keyword_parsers = {p.keyword: p for p in builtin_parsers}
|
| 2045 |
+
|
| 2046 |
+
# Install user-defined additional parsers.
|
| 2047 |
+
for parser in additional_parsers:
|
| 2048 |
+
if not isinstance(parser, IntegratedTestKeywordParser):
|
| 2049 |
+
raise ValueError(
|
| 2050 |
+
"Additional parser must be an instance of "
|
| 2051 |
+
"IntegratedTestKeywordParser"
|
| 2052 |
+
)
|
| 2053 |
+
if parser.keyword in keyword_parsers:
|
| 2054 |
+
raise ValueError("Parser for keyword '%s' already exists" % parser.keyword)
|
| 2055 |
+
keyword_parsers[parser.keyword] = parser
|
| 2056 |
+
|
| 2057 |
+
# Collect the test lines from the script.
|
| 2058 |
+
for line_number, command_type, ln in parseIntegratedTestScriptCommands(
|
| 2059 |
+
sourcepath, keyword_parsers.keys()
|
| 2060 |
+
):
|
| 2061 |
+
parser = keyword_parsers[command_type]
|
| 2062 |
+
parser.parseLine(line_number, ln)
|
| 2063 |
+
if command_type == "END." and parser.getValue() is True:
|
| 2064 |
+
break
|
| 2065 |
+
|
| 2066 |
+
# Verify the script contains a run line.
|
| 2067 |
+
if require_script and not any(
|
| 2068 |
+
isinstance(directive, CommandDirective) for directive in script
|
| 2069 |
+
):
|
| 2070 |
+
raise ValueError("Test has no 'RUN:' line")
|
| 2071 |
+
|
| 2072 |
+
# Check for unterminated run or subst lines.
|
| 2073 |
+
#
|
| 2074 |
+
# If, after a line continuation for one kind of directive (e.g., 'RUN:',
|
| 2075 |
+
# 'DEFINE:', 'REDEFINE:') in script, the next directive in script is a
|
| 2076 |
+
# different kind, then the '\\' remains on the former, and we report it
|
| 2077 |
+
# here.
|
| 2078 |
+
for directive in script:
|
| 2079 |
+
if directive.needs_continuation():
|
| 2080 |
+
raise ValueError(
|
| 2081 |
+
f"Test has unterminated '{directive.keyword}' "
|
| 2082 |
+
f"directive (with '\\') "
|
| 2083 |
+
f"{directive.get_location()}"
|
| 2084 |
+
)
|
| 2085 |
+
|
| 2086 |
+
# Check boolean expressions for unterminated lines.
|
| 2087 |
+
for key in keyword_parsers:
|
| 2088 |
+
kp = keyword_parsers[key]
|
| 2089 |
+
if kp.kind != ParserKind.BOOLEAN_EXPR:
|
| 2090 |
+
continue
|
| 2091 |
+
value = kp.getValue()
|
| 2092 |
+
if value and value[-1][-1] == "\\":
|
| 2093 |
+
raise ValueError(
|
| 2094 |
+
"Test has unterminated '{key}' lines (with '\\')".format(key=key)
|
| 2095 |
+
)
|
| 2096 |
+
|
| 2097 |
+
# Make sure there's at most one ALLOW_RETRIES: line
|
| 2098 |
+
allowed_retries = keyword_parsers["ALLOW_RETRIES:"].getValue()
|
| 2099 |
+
if allowed_retries and len(allowed_retries) > 1:
|
| 2100 |
+
raise ValueError("Test has more than one ALLOW_RETRIES lines")
|
| 2101 |
+
|
| 2102 |
+
return {p.keyword: p.getValue() for p in keyword_parsers.values()}
|
| 2103 |
+
|
| 2104 |
+
|
| 2105 |
+
def parseIntegratedTestScript(test, additional_parsers=[], require_script=True):
|
| 2106 |
+
"""parseIntegratedTestScript - Scan an LLVM/Clang style integrated test
|
| 2107 |
+
script and extract the lines to 'RUN' as well as 'XFAIL', 'REQUIRES',
|
| 2108 |
+
'UNSUPPORTED' and 'ALLOW_RETRIES' information into the given test.
|
| 2109 |
+
|
| 2110 |
+
If additional parsers are specified then the test is also scanned for the
|
| 2111 |
+
keywords they specify and all matches are passed to the custom parser.
|
| 2112 |
+
|
| 2113 |
+
If 'require_script' is False an empty script
|
| 2114 |
+
may be returned. This can be used for test formats where the actual script
|
| 2115 |
+
is optional or ignored.
|
| 2116 |
+
"""
|
| 2117 |
+
# Parse the test sources and extract test properties
|
| 2118 |
+
try:
|
| 2119 |
+
parsed = _parseKeywords(
|
| 2120 |
+
test.getSourcePath(), additional_parsers, require_script
|
| 2121 |
+
)
|
| 2122 |
+
except ValueError as e:
|
| 2123 |
+
return lit.Test.Result(Test.UNRESOLVED, str(e))
|
| 2124 |
+
script = parsed["RUN:"] or []
|
| 2125 |
+
assert parsed["DEFINE:"] == script
|
| 2126 |
+
assert parsed["REDEFINE:"] == script
|
| 2127 |
+
test.xfails += parsed["XFAIL:"] or []
|
| 2128 |
+
test.requires += parsed["REQUIRES:"] or []
|
| 2129 |
+
test.unsupported += parsed["UNSUPPORTED:"] or []
|
| 2130 |
+
if parsed["ALLOW_RETRIES:"]:
|
| 2131 |
+
test.allowed_retries = parsed["ALLOW_RETRIES:"][0]
|
| 2132 |
+
|
| 2133 |
+
# Enforce REQUIRES:
|
| 2134 |
+
missing_required_features = test.getMissingRequiredFeatures()
|
| 2135 |
+
if missing_required_features:
|
| 2136 |
+
msg = ", ".join(missing_required_features)
|
| 2137 |
+
return lit.Test.Result(
|
| 2138 |
+
Test.UNSUPPORTED,
|
| 2139 |
+
"Test requires the following unavailable " "features: %s" % msg,
|
| 2140 |
+
)
|
| 2141 |
+
|
| 2142 |
+
# Enforce UNSUPPORTED:
|
| 2143 |
+
unsupported_features = test.getUnsupportedFeatures()
|
| 2144 |
+
if unsupported_features:
|
| 2145 |
+
msg = ", ".join(unsupported_features)
|
| 2146 |
+
return lit.Test.Result(
|
| 2147 |
+
Test.UNSUPPORTED,
|
| 2148 |
+
"Test does not support the following features " "and/or targets: %s" % msg,
|
| 2149 |
+
)
|
| 2150 |
+
|
| 2151 |
+
# Enforce limit_to_features.
|
| 2152 |
+
if not test.isWithinFeatureLimits():
|
| 2153 |
+
msg = ", ".join(test.config.limit_to_features)
|
| 2154 |
+
return lit.Test.Result(
|
| 2155 |
+
Test.UNSUPPORTED,
|
| 2156 |
+
"Test does not require any of the features "
|
| 2157 |
+
"specified in limit_to_features: %s" % msg,
|
| 2158 |
+
)
|
| 2159 |
+
|
| 2160 |
+
return script
|
| 2161 |
+
|
| 2162 |
+
|
| 2163 |
+
def _runShTest(test, litConfig, useExternalSh, script, tmpBase) -> lit.Test.Result:
|
| 2164 |
+
# Always returns the tuple (out, err, exitCode, timeoutInfo, status).
|
| 2165 |
+
def runOnce(
|
| 2166 |
+
execdir,
|
| 2167 |
+
) -> Tuple[str, str, int, Optional[str], Test.ResultCode]:
|
| 2168 |
+
# script is modified below (for litConfig.per_test_coverage, and for
|
| 2169 |
+
# %dbg expansions). runOnce can be called multiple times, but applying
|
| 2170 |
+
# the modifications multiple times can corrupt script, so always modify
|
| 2171 |
+
# a copy.
|
| 2172 |
+
scriptCopy = script[:]
|
| 2173 |
+
# Set unique LLVM_PROFILE_FILE for each run command
|
| 2174 |
+
if litConfig.per_test_coverage:
|
| 2175 |
+
# Extract the test case name from the test object, and remove the
|
| 2176 |
+
# file extension.
|
| 2177 |
+
test_case_name = test.path_in_suite[-1]
|
| 2178 |
+
test_case_name = test_case_name.rsplit(".", 1)[0]
|
| 2179 |
+
coverage_index = 0 # Counter for coverage file index
|
| 2180 |
+
for i, ln in enumerate(scriptCopy):
|
| 2181 |
+
match = re.fullmatch(kPdbgRegex, ln)
|
| 2182 |
+
if match:
|
| 2183 |
+
dbg = match.group(1)
|
| 2184 |
+
command = match.group(2)
|
| 2185 |
+
else:
|
| 2186 |
+
command = ln
|
| 2187 |
+
profile = f"{test_case_name}{coverage_index}.profraw"
|
| 2188 |
+
coverage_index += 1
|
| 2189 |
+
command = f"export LLVM_PROFILE_FILE={profile}; {command}"
|
| 2190 |
+
if match:
|
| 2191 |
+
command = buildPdbgCommand(dbg, command)
|
| 2192 |
+
scriptCopy[i] = command
|
| 2193 |
+
|
| 2194 |
+
try:
|
| 2195 |
+
if useExternalSh:
|
| 2196 |
+
res = executeScript(test, litConfig, tmpBase, scriptCopy, execdir)
|
| 2197 |
+
else:
|
| 2198 |
+
res = executeScriptInternal(
|
| 2199 |
+
test, litConfig, tmpBase, scriptCopy, execdir
|
| 2200 |
+
)
|
| 2201 |
+
except ScriptFatal as e:
|
| 2202 |
+
out = f"# " + "\n# ".join(str(e).splitlines()) + "\n"
|
| 2203 |
+
return out, "", 1, None, Test.UNRESOLVED
|
| 2204 |
+
|
| 2205 |
+
out, err, exitCode, timeoutInfo = res
|
| 2206 |
+
if exitCode == 0:
|
| 2207 |
+
status = Test.PASS
|
| 2208 |
+
else:
|
| 2209 |
+
if timeoutInfo is None:
|
| 2210 |
+
status = Test.FAIL
|
| 2211 |
+
else:
|
| 2212 |
+
status = Test.TIMEOUT
|
| 2213 |
+
return out, err, exitCode, timeoutInfo, status
|
| 2214 |
+
|
| 2215 |
+
# Create the output directory if it does not already exist.
|
| 2216 |
+
lit.util.mkdir_p(os.path.dirname(tmpBase))
|
| 2217 |
+
|
| 2218 |
+
# Re-run failed tests up to test.allowed_retries times.
|
| 2219 |
+
execdir = os.path.dirname(test.getExecPath())
|
| 2220 |
+
attempts = test.allowed_retries + 1
|
| 2221 |
+
for i in range(attempts):
|
| 2222 |
+
res = runOnce(execdir)
|
| 2223 |
+
out, err, exitCode, timeoutInfo, status = res
|
| 2224 |
+
if status != Test.FAIL:
|
| 2225 |
+
break
|
| 2226 |
+
|
| 2227 |
+
# If we had to run the test more than once, count it as a flaky pass. These
|
| 2228 |
+
# will be printed separately in the test summary.
|
| 2229 |
+
if i > 0 and status == Test.PASS:
|
| 2230 |
+
status = Test.FLAKYPASS
|
| 2231 |
+
|
| 2232 |
+
# Form the output log.
|
| 2233 |
+
output = f"Exit Code: {exitCode}\n"
|
| 2234 |
+
|
| 2235 |
+
if timeoutInfo is not None:
|
| 2236 |
+
output += """Timeout: %s\n""" % (timeoutInfo,)
|
| 2237 |
+
output += "\n"
|
| 2238 |
+
|
| 2239 |
+
# Append the outputs, if present.
|
| 2240 |
+
if out:
|
| 2241 |
+
output += """Command Output (stdout):\n--\n%s\n--\n""" % (out,)
|
| 2242 |
+
if err:
|
| 2243 |
+
output += """Command Output (stderr):\n--\n%s\n--\n""" % (err,)
|
| 2244 |
+
|
| 2245 |
+
return lit.Test.Result(status, output)
|
| 2246 |
+
|
| 2247 |
+
|
| 2248 |
+
def executeShTest(
|
| 2249 |
+
test, litConfig, useExternalSh, extra_substitutions=[], preamble_commands=[]
|
| 2250 |
+
):
|
| 2251 |
+
if test.config.unsupported:
|
| 2252 |
+
return lit.Test.Result(Test.UNSUPPORTED, "Test is unsupported")
|
| 2253 |
+
|
| 2254 |
+
script = list(preamble_commands)
|
| 2255 |
+
script = [buildPdbgCommand(f"preamble command line", ln) for ln in script]
|
| 2256 |
+
|
| 2257 |
+
parsed = parseIntegratedTestScript(test, require_script=not script)
|
| 2258 |
+
if isinstance(parsed, lit.Test.Result):
|
| 2259 |
+
return parsed
|
| 2260 |
+
script += parsed
|
| 2261 |
+
|
| 2262 |
+
if litConfig.noExecute:
|
| 2263 |
+
return lit.Test.Result(Test.PASS)
|
| 2264 |
+
|
| 2265 |
+
tmpDir, tmpBase = getTempPaths(test)
|
| 2266 |
+
substitutions = list(extra_substitutions)
|
| 2267 |
+
substitutions += getDefaultSubstitutions(
|
| 2268 |
+
test, tmpDir, tmpBase, normalize_slashes=useExternalSh
|
| 2269 |
+
)
|
| 2270 |
+
conditions = {feature: True for feature in test.config.available_features}
|
| 2271 |
+
script = applySubstitutions(
|
| 2272 |
+
script,
|
| 2273 |
+
substitutions,
|
| 2274 |
+
conditions,
|
| 2275 |
+
recursion_limit=test.config.recursiveExpansionLimit,
|
| 2276 |
+
)
|
| 2277 |
+
|
| 2278 |
+
return _runShTest(test, litConfig, useExternalSh, script, tmpBase)
|
wemm/lib/python3.10/site-packages/lit/TestTimes.py
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def read_test_times(suite):
|
| 5 |
+
test_times = {}
|
| 6 |
+
test_times_file = os.path.join(suite.exec_root, ".lit_test_times.txt")
|
| 7 |
+
if not os.path.exists(test_times_file):
|
| 8 |
+
test_times_file = os.path.join(suite.source_root, ".lit_test_times.txt")
|
| 9 |
+
if os.path.exists(test_times_file):
|
| 10 |
+
with open(test_times_file, "r") as time_file:
|
| 11 |
+
for line in time_file:
|
| 12 |
+
time, path = line.split(maxsplit=1)
|
| 13 |
+
test_times[path.strip("\n")] = float(time)
|
| 14 |
+
return test_times
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
def record_test_times(tests, lit_config):
|
| 18 |
+
times_by_suite = {}
|
| 19 |
+
for t in tests:
|
| 20 |
+
assert t.suite.test_times is None
|
| 21 |
+
if t.result.elapsed is None:
|
| 22 |
+
continue
|
| 23 |
+
if not t.suite.exec_root in times_by_suite:
|
| 24 |
+
times_by_suite[t.suite.exec_root] = read_test_times(t.suite)
|
| 25 |
+
time = -t.result.elapsed if t.isFailure() else t.result.elapsed
|
| 26 |
+
# The "path" here is only used as a key into a dictionary. It is never
|
| 27 |
+
# used as an actual path to a filesystem API, therefore we use '/' as
|
| 28 |
+
# the canonical separator so that Unix and Windows machines can share
|
| 29 |
+
# timing data.
|
| 30 |
+
times_by_suite[t.suite.exec_root]["/".join(t.path_in_suite)] = time
|
| 31 |
+
|
| 32 |
+
for s, value in times_by_suite.items():
|
| 33 |
+
try:
|
| 34 |
+
path = os.path.join(s, ".lit_test_times.txt")
|
| 35 |
+
with open(path, "w") as time_file:
|
| 36 |
+
for name, time in value.items():
|
| 37 |
+
time_file.write(("%e" % time) + " " + name + "\n")
|
| 38 |
+
except:
|
| 39 |
+
lit_config.warning("Could not save test time: " + path)
|
| 40 |
+
continue
|
wemm/lib/python3.10/site-packages/lit/TestingConfig.py
ADDED
|
@@ -0,0 +1,267 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class TestingConfig(object):
|
| 6 |
+
"""
|
| 7 |
+
TestingConfig - Information on the tests inside a suite.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
@staticmethod
|
| 11 |
+
def fromdefaults(litConfig):
|
| 12 |
+
"""
|
| 13 |
+
fromdefaults(litConfig) -> TestingConfig
|
| 14 |
+
|
| 15 |
+
Create a TestingConfig object with default values.
|
| 16 |
+
"""
|
| 17 |
+
# Set the environment based on the command line arguments.
|
| 18 |
+
environment = {
|
| 19 |
+
"PATH": os.pathsep.join(litConfig.path + [os.environ.get("PATH", "")]),
|
| 20 |
+
"LLVM_DISABLE_CRASH_REPORT": "1",
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
pass_vars = [
|
| 24 |
+
"LIBRARY_PATH",
|
| 25 |
+
"LD_LIBRARY_PATH",
|
| 26 |
+
"SYSTEMROOT",
|
| 27 |
+
"TERM",
|
| 28 |
+
"CLANG",
|
| 29 |
+
"LLDB",
|
| 30 |
+
"LD_PRELOAD",
|
| 31 |
+
"LLVM_SYMBOLIZER_PATH",
|
| 32 |
+
"LLVM_PROFILE_FILE",
|
| 33 |
+
"ASAN_SYMBOLIZER_PATH",
|
| 34 |
+
"HWASAN_SYMBOLIZER_PATH",
|
| 35 |
+
"LSAN_SYMBOLIZER_PATH",
|
| 36 |
+
"MSAN_SYMBOLIZER_PATH",
|
| 37 |
+
"TSAN_SYMBOLIZER_PATH",
|
| 38 |
+
"UBSAN_SYMBOLIZER_PATH",
|
| 39 |
+
"ASAN_OPTIONS",
|
| 40 |
+
"LSAN_OPTIONS",
|
| 41 |
+
"HWASAN_OPTIONS",
|
| 42 |
+
"MSAN_OPTIONS",
|
| 43 |
+
"TSAN_OPTIONS",
|
| 44 |
+
"UBSAN_OPTIONS",
|
| 45 |
+
"ADB",
|
| 46 |
+
"ADB_SERVER_SOCKET",
|
| 47 |
+
"ANDROID_SERIAL",
|
| 48 |
+
"SSH_AUTH_SOCK",
|
| 49 |
+
"SANITIZER_IGNORE_CVE_2016_2143",
|
| 50 |
+
"TMPDIR",
|
| 51 |
+
"TMP",
|
| 52 |
+
"TEMP",
|
| 53 |
+
"TEMPDIR",
|
| 54 |
+
"AVRLIT_BOARD",
|
| 55 |
+
"AVRLIT_PORT",
|
| 56 |
+
"FILECHECK_OPTS",
|
| 57 |
+
"VCINSTALLDIR",
|
| 58 |
+
"VCToolsinstallDir",
|
| 59 |
+
"VSINSTALLDIR",
|
| 60 |
+
"WindowsSdkDir",
|
| 61 |
+
"WindowsSDKLibVersion",
|
| 62 |
+
"SOURCE_DATE_EPOCH",
|
| 63 |
+
"GTEST_FILTER",
|
| 64 |
+
"DFLTCC",
|
| 65 |
+
]
|
| 66 |
+
|
| 67 |
+
if sys.platform.startswith("aix"):
|
| 68 |
+
pass_vars += ["LIBPATH"]
|
| 69 |
+
elif sys.platform == "win32":
|
| 70 |
+
pass_vars += [
|
| 71 |
+
"COMSPEC",
|
| 72 |
+
"INCLUDE",
|
| 73 |
+
"LIB",
|
| 74 |
+
"PATHEXT",
|
| 75 |
+
"USERPROFILE",
|
| 76 |
+
]
|
| 77 |
+
environment["PYTHONBUFFERED"] = "1"
|
| 78 |
+
# Avoid Windows heuristics which try to detect potential installer
|
| 79 |
+
# programs (which may need to run with elevated privileges) and ask
|
| 80 |
+
# if the user wants to run them in that way. This heuristic may
|
| 81 |
+
# match for executables containing the substrings "patch" (which is
|
| 82 |
+
# a substring of "dispatch"), "update", "setup", etc. Set this
|
| 83 |
+
# environment variable indicating that we want to execute them with
|
| 84 |
+
# the current user.
|
| 85 |
+
environment["__COMPAT_LAYER"] = "RunAsInvoker"
|
| 86 |
+
|
| 87 |
+
for var in pass_vars:
|
| 88 |
+
val = os.environ.get(var, "")
|
| 89 |
+
# Check for empty string as some variables such as LD_PRELOAD cannot be empty
|
| 90 |
+
# ('') for OS's such as OpenBSD.
|
| 91 |
+
if val:
|
| 92 |
+
environment[var] = val
|
| 93 |
+
|
| 94 |
+
# Set the default available features based on the LitConfig.
|
| 95 |
+
available_features = []
|
| 96 |
+
if litConfig.useValgrind:
|
| 97 |
+
available_features.append("valgrind")
|
| 98 |
+
if litConfig.valgrindLeakCheck:
|
| 99 |
+
available_features.append("vg_leak")
|
| 100 |
+
|
| 101 |
+
return TestingConfig(
|
| 102 |
+
None,
|
| 103 |
+
name="<unnamed>",
|
| 104 |
+
suffixes=set(),
|
| 105 |
+
test_format=None,
|
| 106 |
+
environment=environment,
|
| 107 |
+
substitutions=[],
|
| 108 |
+
unsupported=False,
|
| 109 |
+
test_exec_root=None,
|
| 110 |
+
test_source_root=None,
|
| 111 |
+
excludes=[],
|
| 112 |
+
available_features=available_features,
|
| 113 |
+
pipefail=True,
|
| 114 |
+
standalone_tests=False,
|
| 115 |
+
)
|
| 116 |
+
|
| 117 |
+
def load_from_path(self, path, litConfig):
|
| 118 |
+
"""
|
| 119 |
+
load_from_path(path, litConfig)
|
| 120 |
+
|
| 121 |
+
Load the configuration module at the provided path into the given config
|
| 122 |
+
object.
|
| 123 |
+
"""
|
| 124 |
+
|
| 125 |
+
# Load the config script data.
|
| 126 |
+
data = None
|
| 127 |
+
f = open(path)
|
| 128 |
+
try:
|
| 129 |
+
data = f.read()
|
| 130 |
+
except:
|
| 131 |
+
litConfig.fatal("unable to load config file: %r" % (path,))
|
| 132 |
+
f.close()
|
| 133 |
+
|
| 134 |
+
# Execute the config script to initialize the object.
|
| 135 |
+
cfg_globals = dict(globals())
|
| 136 |
+
cfg_globals["config"] = self
|
| 137 |
+
cfg_globals["lit_config"] = litConfig
|
| 138 |
+
cfg_globals["__file__"] = path
|
| 139 |
+
try:
|
| 140 |
+
exec(compile(data, path, "exec"), cfg_globals, None)
|
| 141 |
+
if litConfig.debug:
|
| 142 |
+
litConfig.note("... loaded config %r" % path)
|
| 143 |
+
except SystemExit:
|
| 144 |
+
e = sys.exc_info()[1]
|
| 145 |
+
# We allow normal system exit inside a config file to just
|
| 146 |
+
# return control without error.
|
| 147 |
+
if e.args:
|
| 148 |
+
raise
|
| 149 |
+
except:
|
| 150 |
+
import traceback
|
| 151 |
+
|
| 152 |
+
litConfig.fatal(
|
| 153 |
+
"unable to parse config file %r, traceback: %s"
|
| 154 |
+
% (path, traceback.format_exc())
|
| 155 |
+
)
|
| 156 |
+
self.finish(litConfig)
|
| 157 |
+
|
| 158 |
+
def __init__(
|
| 159 |
+
self,
|
| 160 |
+
parent,
|
| 161 |
+
name,
|
| 162 |
+
suffixes,
|
| 163 |
+
test_format,
|
| 164 |
+
environment,
|
| 165 |
+
substitutions,
|
| 166 |
+
unsupported,
|
| 167 |
+
test_exec_root,
|
| 168 |
+
test_source_root,
|
| 169 |
+
excludes,
|
| 170 |
+
available_features,
|
| 171 |
+
pipefail,
|
| 172 |
+
limit_to_features=[],
|
| 173 |
+
is_early=False,
|
| 174 |
+
parallelism_group=None,
|
| 175 |
+
standalone_tests=False,
|
| 176 |
+
):
|
| 177 |
+
self.parent = parent
|
| 178 |
+
self.name = str(name)
|
| 179 |
+
self.suffixes = set(suffixes)
|
| 180 |
+
self.test_format = test_format
|
| 181 |
+
self.environment = dict(environment)
|
| 182 |
+
self.substitutions = list(substitutions)
|
| 183 |
+
self.unsupported = unsupported
|
| 184 |
+
self.test_exec_root = test_exec_root
|
| 185 |
+
self.test_source_root = test_source_root
|
| 186 |
+
self.excludes = set(excludes)
|
| 187 |
+
self.available_features = set(available_features)
|
| 188 |
+
self.pipefail = pipefail
|
| 189 |
+
self.standalone_tests = standalone_tests
|
| 190 |
+
# This list is used by TestRunner.py to restrict running only tests that
|
| 191 |
+
# require one of the features in this list if this list is non-empty.
|
| 192 |
+
# Configurations can set this list to restrict the set of tests to run.
|
| 193 |
+
self.limit_to_features = set(limit_to_features)
|
| 194 |
+
self.parallelism_group = parallelism_group
|
| 195 |
+
self._recursiveExpansionLimit = None
|
| 196 |
+
|
| 197 |
+
@property
|
| 198 |
+
def recursiveExpansionLimit(self):
|
| 199 |
+
return self._recursiveExpansionLimit
|
| 200 |
+
|
| 201 |
+
@recursiveExpansionLimit.setter
|
| 202 |
+
def recursiveExpansionLimit(self, value):
|
| 203 |
+
if value is not None and not isinstance(value, int):
|
| 204 |
+
raise ValueError(
|
| 205 |
+
"recursiveExpansionLimit must be either None or an integer (got <{}>)".format(
|
| 206 |
+
value
|
| 207 |
+
)
|
| 208 |
+
)
|
| 209 |
+
if isinstance(value, int) and value < 0:
|
| 210 |
+
raise ValueError(
|
| 211 |
+
"recursiveExpansionLimit must be a non-negative integer (got <{}>)".format(
|
| 212 |
+
value
|
| 213 |
+
)
|
| 214 |
+
)
|
| 215 |
+
self._recursiveExpansionLimit = value
|
| 216 |
+
|
| 217 |
+
def finish(self, litConfig):
|
| 218 |
+
"""finish() - Finish this config object, after loading is complete."""
|
| 219 |
+
|
| 220 |
+
self.name = str(self.name)
|
| 221 |
+
self.suffixes = set(self.suffixes)
|
| 222 |
+
self.environment = dict(self.environment)
|
| 223 |
+
self.substitutions = list(self.substitutions)
|
| 224 |
+
if self.test_exec_root is not None:
|
| 225 |
+
# FIXME: This should really only be suite in test suite config
|
| 226 |
+
# files. Should we distinguish them?
|
| 227 |
+
self.test_exec_root = str(self.test_exec_root)
|
| 228 |
+
if self.test_source_root is not None:
|
| 229 |
+
# FIXME: This should really only be suite in test suite config
|
| 230 |
+
# files. Should we distinguish them?
|
| 231 |
+
self.test_source_root = str(self.test_source_root)
|
| 232 |
+
self.excludes = set(self.excludes)
|
| 233 |
+
|
| 234 |
+
@property
|
| 235 |
+
def root(self):
|
| 236 |
+
"""root attribute - The root configuration for the test suite."""
|
| 237 |
+
if self.parent is None:
|
| 238 |
+
return self
|
| 239 |
+
else:
|
| 240 |
+
return self.parent.root
|
| 241 |
+
|
| 242 |
+
|
| 243 |
+
class SubstituteCaptures:
|
| 244 |
+
"""
|
| 245 |
+
Helper class to indicate that the substitutions contains backreferences.
|
| 246 |
+
|
| 247 |
+
This can be used as the following in lit.cfg to mark subsitutions as having
|
| 248 |
+
back-references::
|
| 249 |
+
|
| 250 |
+
config.substutions.append(('\b[^ ]*.cpp', SubstituteCaptures('\0.txt')))
|
| 251 |
+
|
| 252 |
+
"""
|
| 253 |
+
|
| 254 |
+
def __init__(self, substitution):
|
| 255 |
+
self.substitution = substitution
|
| 256 |
+
|
| 257 |
+
def replace(self, pattern, replacement):
|
| 258 |
+
return self.substitution
|
| 259 |
+
|
| 260 |
+
def __str__(self):
|
| 261 |
+
return self.substitution
|
| 262 |
+
|
| 263 |
+
def __len__(self):
|
| 264 |
+
return len(self.substitution)
|
| 265 |
+
|
| 266 |
+
def __getitem__(self, item):
|
| 267 |
+
return self.substitution.__getitem__(item)
|
wemm/lib/python3.10/site-packages/lit/__init__.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""'lit' Testing Tool"""
|
| 2 |
+
|
| 3 |
+
__author__ = "Daniel Dunbar"
|
| 4 |
+
__email__ = "daniel@minormatter.com"
|
| 5 |
+
__versioninfo__ = (18, 1, 8)
|
| 6 |
+
__version__ = ".".join(str(v) for v in __versioninfo__)
|
| 7 |
+
|
| 8 |
+
__all__ = []
|
wemm/lib/python3.10/site-packages/lit/builtin_commands/__init__.py
ADDED
|
File without changes
|
wemm/lib/python3.10/site-packages/lit/builtin_commands/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (170 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/lit/builtin_commands/__pycache__/cat.cpython-310.pyc
ADDED
|
Binary file (1.74 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/lit/builtin_commands/cat.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import getopt
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
try:
|
| 5 |
+
from StringIO import StringIO
|
| 6 |
+
except ImportError:
|
| 7 |
+
from io import StringIO
|
| 8 |
+
|
| 9 |
+
|
| 10 |
+
def convertToCaretAndMNotation(data):
|
| 11 |
+
newdata = StringIO()
|
| 12 |
+
if isinstance(data, str):
|
| 13 |
+
data = bytearray(data)
|
| 14 |
+
|
| 15 |
+
for intval in data:
|
| 16 |
+
if intval == 9 or intval == 10:
|
| 17 |
+
newdata.write(chr(intval))
|
| 18 |
+
continue
|
| 19 |
+
if intval > 127:
|
| 20 |
+
intval = intval - 128
|
| 21 |
+
newdata.write("M-")
|
| 22 |
+
if intval < 32:
|
| 23 |
+
newdata.write("^")
|
| 24 |
+
newdata.write(chr(intval + 64))
|
| 25 |
+
elif intval == 127:
|
| 26 |
+
newdata.write("^?")
|
| 27 |
+
else:
|
| 28 |
+
newdata.write(chr(intval))
|
| 29 |
+
|
| 30 |
+
return newdata.getvalue().encode()
|
| 31 |
+
|
| 32 |
+
|
| 33 |
+
def main(argv):
|
| 34 |
+
arguments = argv[1:]
|
| 35 |
+
short_options = "v"
|
| 36 |
+
long_options = ["show-nonprinting"]
|
| 37 |
+
show_nonprinting = False
|
| 38 |
+
|
| 39 |
+
try:
|
| 40 |
+
options, filenames = getopt.gnu_getopt(arguments, short_options, long_options)
|
| 41 |
+
except getopt.GetoptError as err:
|
| 42 |
+
sys.stderr.write("Unsupported: 'cat': %s\n" % str(err))
|
| 43 |
+
sys.exit(1)
|
| 44 |
+
|
| 45 |
+
for option, value in options:
|
| 46 |
+
if option == "-v" or option == "--show-nonprinting":
|
| 47 |
+
show_nonprinting = True
|
| 48 |
+
|
| 49 |
+
writer = getattr(sys.stdout, "buffer", None)
|
| 50 |
+
if writer is None:
|
| 51 |
+
writer = sys.stdout
|
| 52 |
+
if sys.platform == "win32":
|
| 53 |
+
import os, msvcrt
|
| 54 |
+
|
| 55 |
+
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
|
| 56 |
+
for filename in filenames:
|
| 57 |
+
try:
|
| 58 |
+
fileToCat = open(filename, "rb")
|
| 59 |
+
contents = fileToCat.read()
|
| 60 |
+
if show_nonprinting:
|
| 61 |
+
contents = convertToCaretAndMNotation(contents)
|
| 62 |
+
writer.write(contents)
|
| 63 |
+
sys.stdout.flush()
|
| 64 |
+
fileToCat.close()
|
| 65 |
+
except IOError as error:
|
| 66 |
+
sys.stderr.write(str(error))
|
| 67 |
+
sys.exit(1)
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
if __name__ == "__main__":
|
| 71 |
+
main(sys.argv)
|
wemm/lib/python3.10/site-packages/lit/builtin_commands/diff.py
ADDED
|
@@ -0,0 +1,307 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import difflib
|
| 2 |
+
import functools
|
| 3 |
+
import getopt
|
| 4 |
+
import io
|
| 5 |
+
import locale
|
| 6 |
+
import os
|
| 7 |
+
import re
|
| 8 |
+
import sys
|
| 9 |
+
|
| 10 |
+
import util
|
| 11 |
+
from util import to_string
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
class DiffFlags:
|
| 15 |
+
def __init__(self):
|
| 16 |
+
self.ignore_all_space = False
|
| 17 |
+
self.ignore_space_change = False
|
| 18 |
+
self.ignore_matching_lines = False
|
| 19 |
+
self.ignore_matching_lines_regex = ""
|
| 20 |
+
self.unified_diff = False
|
| 21 |
+
self.num_context_lines = 3
|
| 22 |
+
self.recursive_diff = False
|
| 23 |
+
self.strip_trailing_cr = False
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
def getDirTree(path, basedir=""):
|
| 27 |
+
# Tree is a tuple of form (dirname, child_trees).
|
| 28 |
+
# An empty dir has child_trees = [], a file has child_trees = None.
|
| 29 |
+
child_trees = []
|
| 30 |
+
for dirname, child_dirs, files in os.walk(os.path.join(basedir, path)):
|
| 31 |
+
for child_dir in child_dirs:
|
| 32 |
+
child_trees.append(getDirTree(child_dir, dirname))
|
| 33 |
+
for filename in files:
|
| 34 |
+
child_trees.append((filename, None))
|
| 35 |
+
return path, sorted(child_trees)
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def compareTwoFiles(flags, filepaths):
|
| 39 |
+
filelines = []
|
| 40 |
+
for file in filepaths:
|
| 41 |
+
if file == "-":
|
| 42 |
+
stdin_fileno = sys.stdin.fileno()
|
| 43 |
+
with os.fdopen(os.dup(stdin_fileno), "rb") as stdin_bin:
|
| 44 |
+
filelines.append(stdin_bin.readlines())
|
| 45 |
+
else:
|
| 46 |
+
with open(file, "rb") as file_bin:
|
| 47 |
+
filelines.append(file_bin.readlines())
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
return compareTwoTextFiles(
|
| 51 |
+
flags, filepaths, filelines, locale.getpreferredencoding(False)
|
| 52 |
+
)
|
| 53 |
+
except UnicodeDecodeError:
|
| 54 |
+
try:
|
| 55 |
+
return compareTwoTextFiles(flags, filepaths, filelines, "utf-8")
|
| 56 |
+
except:
|
| 57 |
+
return compareTwoBinaryFiles(flags, filepaths, filelines)
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def compareTwoBinaryFiles(flags, filepaths, filelines):
|
| 61 |
+
exitCode = 0
|
| 62 |
+
if hasattr(difflib, "diff_bytes"):
|
| 63 |
+
# python 3.5 or newer
|
| 64 |
+
diffs = difflib.diff_bytes(
|
| 65 |
+
difflib.unified_diff,
|
| 66 |
+
filelines[0],
|
| 67 |
+
filelines[1],
|
| 68 |
+
filepaths[0].encode(),
|
| 69 |
+
filepaths[1].encode(),
|
| 70 |
+
n=flags.num_context_lines,
|
| 71 |
+
)
|
| 72 |
+
diffs = [diff.decode(errors="backslashreplace") for diff in diffs]
|
| 73 |
+
else:
|
| 74 |
+
# python 2.7
|
| 75 |
+
if flags.unified_diff:
|
| 76 |
+
func = difflib.unified_diff
|
| 77 |
+
else:
|
| 78 |
+
func = difflib.context_diff
|
| 79 |
+
diffs = func(
|
| 80 |
+
filelines[0],
|
| 81 |
+
filelines[1],
|
| 82 |
+
filepaths[0],
|
| 83 |
+
filepaths[1],
|
| 84 |
+
n=flags.num_context_lines,
|
| 85 |
+
)
|
| 86 |
+
|
| 87 |
+
for diff in diffs:
|
| 88 |
+
sys.stdout.write(to_string(diff))
|
| 89 |
+
exitCode = 1
|
| 90 |
+
return exitCode
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def compareTwoTextFiles(flags, filepaths, filelines_bin, encoding):
|
| 94 |
+
filelines = []
|
| 95 |
+
for lines_bin in filelines_bin:
|
| 96 |
+
lines = []
|
| 97 |
+
for line_bin in lines_bin:
|
| 98 |
+
line = line_bin.decode(encoding=encoding)
|
| 99 |
+
lines.append(line)
|
| 100 |
+
filelines.append(lines)
|
| 101 |
+
|
| 102 |
+
exitCode = 0
|
| 103 |
+
|
| 104 |
+
def compose2(f, g):
|
| 105 |
+
return lambda x: f(g(x))
|
| 106 |
+
|
| 107 |
+
f = lambda x: x
|
| 108 |
+
if flags.strip_trailing_cr:
|
| 109 |
+
f = compose2(lambda line: line.replace("\r\n", "\n"), f)
|
| 110 |
+
if flags.ignore_all_space or flags.ignore_space_change:
|
| 111 |
+
ignoreSpace = lambda line, separator: separator.join(line.split()) + "\n"
|
| 112 |
+
ignoreAllSpaceOrSpaceChange = functools.partial(
|
| 113 |
+
ignoreSpace, separator="" if flags.ignore_all_space else " "
|
| 114 |
+
)
|
| 115 |
+
f = compose2(ignoreAllSpaceOrSpaceChange, f)
|
| 116 |
+
|
| 117 |
+
for idx, lines in enumerate(filelines):
|
| 118 |
+
if flags.ignore_matching_lines:
|
| 119 |
+
lines = filter(
|
| 120 |
+
lambda x: not re.match(
|
| 121 |
+
r"{}".format(flags.ignore_matching_lines_regex), x
|
| 122 |
+
),
|
| 123 |
+
lines,
|
| 124 |
+
)
|
| 125 |
+
filelines[idx] = [f(line) for line in lines]
|
| 126 |
+
|
| 127 |
+
func = difflib.unified_diff if flags.unified_diff else difflib.context_diff
|
| 128 |
+
for diff in func(
|
| 129 |
+
filelines[0],
|
| 130 |
+
filelines[1],
|
| 131 |
+
filepaths[0],
|
| 132 |
+
filepaths[1],
|
| 133 |
+
n=flags.num_context_lines,
|
| 134 |
+
):
|
| 135 |
+
sys.stdout.write(to_string(diff))
|
| 136 |
+
exitCode = 1
|
| 137 |
+
return exitCode
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def printDirVsFile(dir_path, file_path):
|
| 141 |
+
if os.path.getsize(file_path):
|
| 142 |
+
msg = "File %s is a directory while file %s is a regular file"
|
| 143 |
+
else:
|
| 144 |
+
msg = "File %s is a directory while file %s is a regular empty file"
|
| 145 |
+
sys.stdout.write(msg % (dir_path, file_path) + "\n")
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def printFileVsDir(file_path, dir_path):
|
| 149 |
+
if os.path.getsize(file_path):
|
| 150 |
+
msg = "File %s is a regular file while file %s is a directory"
|
| 151 |
+
else:
|
| 152 |
+
msg = "File %s is a regular empty file while file %s is a directory"
|
| 153 |
+
sys.stdout.write(msg % (file_path, dir_path) + "\n")
|
| 154 |
+
|
| 155 |
+
|
| 156 |
+
def printOnlyIn(basedir, path, name):
|
| 157 |
+
sys.stdout.write("Only in %s: %s\n" % (os.path.join(basedir, path), name))
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def compareDirTrees(flags, dir_trees, base_paths=["", ""]):
|
| 161 |
+
# Dirnames of the trees are not checked, it's caller's responsibility,
|
| 162 |
+
# as top-level dirnames are always different. Base paths are important
|
| 163 |
+
# for doing os.walk, but we don't put it into tree's dirname in order
|
| 164 |
+
# to speed up string comparison below and while sorting in getDirTree.
|
| 165 |
+
left_tree, right_tree = dir_trees[0], dir_trees[1]
|
| 166 |
+
left_base, right_base = base_paths[0], base_paths[1]
|
| 167 |
+
|
| 168 |
+
# Compare two files or report file vs. directory mismatch.
|
| 169 |
+
if left_tree[1] is None and right_tree[1] is None:
|
| 170 |
+
return compareTwoFiles(
|
| 171 |
+
flags,
|
| 172 |
+
[
|
| 173 |
+
os.path.join(left_base, left_tree[0]),
|
| 174 |
+
os.path.join(right_base, right_tree[0]),
|
| 175 |
+
],
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
if left_tree[1] is None and right_tree[1] is not None:
|
| 179 |
+
printFileVsDir(
|
| 180 |
+
os.path.join(left_base, left_tree[0]),
|
| 181 |
+
os.path.join(right_base, right_tree[0]),
|
| 182 |
+
)
|
| 183 |
+
return 1
|
| 184 |
+
|
| 185 |
+
if left_tree[1] is not None and right_tree[1] is None:
|
| 186 |
+
printDirVsFile(
|
| 187 |
+
os.path.join(left_base, left_tree[0]),
|
| 188 |
+
os.path.join(right_base, right_tree[0]),
|
| 189 |
+
)
|
| 190 |
+
return 1
|
| 191 |
+
|
| 192 |
+
# Compare two directories via recursive use of compareDirTrees.
|
| 193 |
+
exitCode = 0
|
| 194 |
+
left_names = [node[0] for node in left_tree[1]]
|
| 195 |
+
right_names = [node[0] for node in right_tree[1]]
|
| 196 |
+
l, r = 0, 0
|
| 197 |
+
while l < len(left_names) and r < len(right_names):
|
| 198 |
+
# Names are sorted in getDirTree, rely on that order.
|
| 199 |
+
if left_names[l] < right_names[r]:
|
| 200 |
+
exitCode = 1
|
| 201 |
+
printOnlyIn(left_base, left_tree[0], left_names[l])
|
| 202 |
+
l += 1
|
| 203 |
+
elif left_names[l] > right_names[r]:
|
| 204 |
+
exitCode = 1
|
| 205 |
+
printOnlyIn(right_base, right_tree[0], right_names[r])
|
| 206 |
+
r += 1
|
| 207 |
+
else:
|
| 208 |
+
exitCode |= compareDirTrees(
|
| 209 |
+
flags,
|
| 210 |
+
[left_tree[1][l], right_tree[1][r]],
|
| 211 |
+
[
|
| 212 |
+
os.path.join(left_base, left_tree[0]),
|
| 213 |
+
os.path.join(right_base, right_tree[0]),
|
| 214 |
+
],
|
| 215 |
+
)
|
| 216 |
+
l += 1
|
| 217 |
+
r += 1
|
| 218 |
+
|
| 219 |
+
# At least one of the trees has ended. Report names from the other tree.
|
| 220 |
+
while l < len(left_names):
|
| 221 |
+
exitCode = 1
|
| 222 |
+
printOnlyIn(left_base, left_tree[0], left_names[l])
|
| 223 |
+
l += 1
|
| 224 |
+
while r < len(right_names):
|
| 225 |
+
exitCode = 1
|
| 226 |
+
printOnlyIn(right_base, right_tree[0], right_names[r])
|
| 227 |
+
r += 1
|
| 228 |
+
return exitCode
|
| 229 |
+
|
| 230 |
+
|
| 231 |
+
def main(argv):
|
| 232 |
+
if sys.platform == "win32":
|
| 233 |
+
if hasattr(sys.stdout, "buffer"):
|
| 234 |
+
# python 3
|
| 235 |
+
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, newline="\n")
|
| 236 |
+
else:
|
| 237 |
+
# python 2.7
|
| 238 |
+
import msvcrt
|
| 239 |
+
|
| 240 |
+
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
|
| 241 |
+
args = argv[1:]
|
| 242 |
+
try:
|
| 243 |
+
opts, args = getopt.gnu_getopt(args, "wbuI:U:r", ["strip-trailing-cr"])
|
| 244 |
+
except getopt.GetoptError as err:
|
| 245 |
+
sys.stderr.write("Unsupported: 'diff': %s\n" % str(err))
|
| 246 |
+
sys.exit(1)
|
| 247 |
+
|
| 248 |
+
flags = DiffFlags()
|
| 249 |
+
filelines, filepaths, dir_trees = ([] for i in range(3))
|
| 250 |
+
for o, a in opts:
|
| 251 |
+
if o == "-w":
|
| 252 |
+
flags.ignore_all_space = True
|
| 253 |
+
elif o == "-b":
|
| 254 |
+
flags.ignore_space_change = True
|
| 255 |
+
elif o == "-u":
|
| 256 |
+
flags.unified_diff = True
|
| 257 |
+
elif o.startswith("-U"):
|
| 258 |
+
flags.unified_diff = True
|
| 259 |
+
try:
|
| 260 |
+
flags.num_context_lines = int(a)
|
| 261 |
+
if flags.num_context_lines < 0:
|
| 262 |
+
raise ValueException
|
| 263 |
+
except:
|
| 264 |
+
sys.stderr.write("Error: invalid '-U' argument: {}\n".format(a))
|
| 265 |
+
sys.exit(1)
|
| 266 |
+
elif o == "-I":
|
| 267 |
+
flags.ignore_matching_lines = True
|
| 268 |
+
flags.ignore_matching_lines_regex = a
|
| 269 |
+
elif o == "-r":
|
| 270 |
+
flags.recursive_diff = True
|
| 271 |
+
elif o == "--strip-trailing-cr":
|
| 272 |
+
flags.strip_trailing_cr = True
|
| 273 |
+
else:
|
| 274 |
+
assert False, "unhandled option"
|
| 275 |
+
|
| 276 |
+
if len(args) != 2:
|
| 277 |
+
sys.stderr.write("Error: missing or extra operand\n")
|
| 278 |
+
sys.exit(1)
|
| 279 |
+
|
| 280 |
+
exitCode = 0
|
| 281 |
+
try:
|
| 282 |
+
for file in args:
|
| 283 |
+
if file != "-" and not os.path.isabs(file):
|
| 284 |
+
file = util.abs_path_preserve_drive(file)
|
| 285 |
+
|
| 286 |
+
if flags.recursive_diff:
|
| 287 |
+
if file == "-":
|
| 288 |
+
sys.stderr.write("Error: cannot recursively compare '-'\n")
|
| 289 |
+
sys.exit(1)
|
| 290 |
+
dir_trees.append(getDirTree(file))
|
| 291 |
+
else:
|
| 292 |
+
filepaths.append(file)
|
| 293 |
+
|
| 294 |
+
if not flags.recursive_diff:
|
| 295 |
+
exitCode = compareTwoFiles(flags, filepaths)
|
| 296 |
+
else:
|
| 297 |
+
exitCode = compareDirTrees(flags, dir_trees)
|
| 298 |
+
|
| 299 |
+
except IOError as err:
|
| 300 |
+
sys.stderr.write("Error: 'diff' command failed, %s\n" % str(err))
|
| 301 |
+
exitCode = 1
|
| 302 |
+
|
| 303 |
+
sys.exit(exitCode)
|
| 304 |
+
|
| 305 |
+
|
| 306 |
+
if __name__ == "__main__":
|
| 307 |
+
main(sys.argv)
|
wemm/lib/python3.10/site-packages/lit/cl_arguments.py
ADDED
|
@@ -0,0 +1,376 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import argparse
|
| 2 |
+
import enum
|
| 3 |
+
import os
|
| 4 |
+
import shlex
|
| 5 |
+
import sys
|
| 6 |
+
|
| 7 |
+
import lit.reports
|
| 8 |
+
import lit.util
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
@enum.unique
|
| 12 |
+
class TestOrder(enum.Enum):
|
| 13 |
+
LEXICAL = "lexical"
|
| 14 |
+
RANDOM = "random"
|
| 15 |
+
SMART = "smart"
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def parse_args():
|
| 19 |
+
parser = argparse.ArgumentParser(prog="lit", fromfile_prefix_chars="@")
|
| 20 |
+
parser.add_argument(
|
| 21 |
+
"test_paths",
|
| 22 |
+
nargs="+",
|
| 23 |
+
metavar="TEST_PATH",
|
| 24 |
+
help="File or path to include in the test suite",
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
parser.add_argument(
|
| 28 |
+
"--version", action="version", version="%(prog)s " + lit.__version__
|
| 29 |
+
)
|
| 30 |
+
|
| 31 |
+
parser.add_argument(
|
| 32 |
+
"-j",
|
| 33 |
+
"--threads",
|
| 34 |
+
"--workers",
|
| 35 |
+
dest="workers",
|
| 36 |
+
metavar="N",
|
| 37 |
+
help="Number of workers used for testing",
|
| 38 |
+
type=_positive_int,
|
| 39 |
+
default=os.getenv("LIT_MAX_WORKERS", lit.util.usable_core_count()),
|
| 40 |
+
)
|
| 41 |
+
parser.add_argument(
|
| 42 |
+
"--config-prefix",
|
| 43 |
+
dest="configPrefix",
|
| 44 |
+
metavar="NAME",
|
| 45 |
+
help="Prefix for 'lit' config files",
|
| 46 |
+
)
|
| 47 |
+
parser.add_argument(
|
| 48 |
+
"-D",
|
| 49 |
+
"--param",
|
| 50 |
+
dest="user_params",
|
| 51 |
+
metavar="NAME=VAL",
|
| 52 |
+
help="Add 'NAME' = 'VAL' to the user defined parameters",
|
| 53 |
+
action="append",
|
| 54 |
+
default=[],
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
format_group = parser.add_argument_group("Output Format")
|
| 58 |
+
# FIXME: I find these names very confusing, although I like the
|
| 59 |
+
# functionality.
|
| 60 |
+
format_group.add_argument(
|
| 61 |
+
"-q", "--quiet", help="Suppress no error output", action="store_true"
|
| 62 |
+
)
|
| 63 |
+
format_group.add_argument(
|
| 64 |
+
"-s",
|
| 65 |
+
"--succinct",
|
| 66 |
+
help="Reduce amount of output."
|
| 67 |
+
" Additionally, show a progress bar,"
|
| 68 |
+
" unless --no-progress-bar is specified.",
|
| 69 |
+
action="store_true",
|
| 70 |
+
)
|
| 71 |
+
format_group.add_argument(
|
| 72 |
+
"-v",
|
| 73 |
+
"--verbose",
|
| 74 |
+
dest="showOutput",
|
| 75 |
+
help="For failed tests, show all output. For example, each command is"
|
| 76 |
+
" printed before it is executed, so the last printed command is the one"
|
| 77 |
+
" that failed.",
|
| 78 |
+
action="store_true",
|
| 79 |
+
)
|
| 80 |
+
format_group.add_argument(
|
| 81 |
+
"-vv",
|
| 82 |
+
"--echo-all-commands",
|
| 83 |
+
dest="showOutput",
|
| 84 |
+
help="Deprecated alias for -v.",
|
| 85 |
+
action="store_true",
|
| 86 |
+
)
|
| 87 |
+
format_group.add_argument(
|
| 88 |
+
"-a",
|
| 89 |
+
"--show-all",
|
| 90 |
+
dest="showAllOutput",
|
| 91 |
+
help="Enable -v, but for all tests not just failed tests.",
|
| 92 |
+
action="store_true",
|
| 93 |
+
)
|
| 94 |
+
format_group.add_argument(
|
| 95 |
+
"-o",
|
| 96 |
+
"--output",
|
| 97 |
+
type=lit.reports.JsonReport,
|
| 98 |
+
help="Write test results to the provided path",
|
| 99 |
+
metavar="PATH",
|
| 100 |
+
)
|
| 101 |
+
format_group.add_argument(
|
| 102 |
+
"--no-progress-bar",
|
| 103 |
+
dest="useProgressBar",
|
| 104 |
+
help="Do not use curses based progress bar",
|
| 105 |
+
action="store_false",
|
| 106 |
+
)
|
| 107 |
+
|
| 108 |
+
# Note: this does not generate flags for user-defined result codes.
|
| 109 |
+
success_codes = [c for c in lit.Test.ResultCode.all_codes() if not c.isFailure]
|
| 110 |
+
for code in success_codes:
|
| 111 |
+
format_group.add_argument(
|
| 112 |
+
"--show-{}".format(code.name.lower()),
|
| 113 |
+
dest="shown_codes",
|
| 114 |
+
help="Show {} tests ({})".format(code.label.lower(), code.name),
|
| 115 |
+
action="append_const",
|
| 116 |
+
const=code,
|
| 117 |
+
default=[],
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
execution_group = parser.add_argument_group("Test Execution")
|
| 121 |
+
execution_group.add_argument(
|
| 122 |
+
"--gtest-sharding",
|
| 123 |
+
help="Enable sharding for GoogleTest format",
|
| 124 |
+
action="store_true",
|
| 125 |
+
default=True,
|
| 126 |
+
)
|
| 127 |
+
execution_group.add_argument(
|
| 128 |
+
"--no-gtest-sharding",
|
| 129 |
+
dest="gtest_sharding",
|
| 130 |
+
help="Disable sharding for GoogleTest format",
|
| 131 |
+
action="store_false",
|
| 132 |
+
)
|
| 133 |
+
execution_group.add_argument(
|
| 134 |
+
"--path",
|
| 135 |
+
help="Additional paths to add to testing environment",
|
| 136 |
+
action="append",
|
| 137 |
+
default=[],
|
| 138 |
+
type=os.path.abspath,
|
| 139 |
+
)
|
| 140 |
+
execution_group.add_argument(
|
| 141 |
+
"--vg", dest="useValgrind", help="Run tests under valgrind", action="store_true"
|
| 142 |
+
)
|
| 143 |
+
execution_group.add_argument(
|
| 144 |
+
"--vg-leak",
|
| 145 |
+
dest="valgrindLeakCheck",
|
| 146 |
+
help="Check for memory leaks under valgrind",
|
| 147 |
+
action="store_true",
|
| 148 |
+
)
|
| 149 |
+
execution_group.add_argument(
|
| 150 |
+
"--vg-arg",
|
| 151 |
+
dest="valgrindArgs",
|
| 152 |
+
metavar="ARG",
|
| 153 |
+
help="Specify an extra argument for valgrind",
|
| 154 |
+
action="append",
|
| 155 |
+
default=[],
|
| 156 |
+
)
|
| 157 |
+
execution_group.add_argument(
|
| 158 |
+
"--time-tests",
|
| 159 |
+
help="Track elapsed wall time for each test",
|
| 160 |
+
action="store_true",
|
| 161 |
+
)
|
| 162 |
+
execution_group.add_argument(
|
| 163 |
+
"--no-execute",
|
| 164 |
+
dest="noExecute",
|
| 165 |
+
help="Don't execute any tests (assume PASS)",
|
| 166 |
+
action="store_true",
|
| 167 |
+
)
|
| 168 |
+
execution_group.add_argument(
|
| 169 |
+
"--xunit-xml-output",
|
| 170 |
+
type=lit.reports.XunitReport,
|
| 171 |
+
help="Write XUnit-compatible XML test reports to the specified file",
|
| 172 |
+
)
|
| 173 |
+
execution_group.add_argument(
|
| 174 |
+
"--resultdb-output",
|
| 175 |
+
type=lit.reports.ResultDBReport,
|
| 176 |
+
help="Write LuCI ResuldDB compatible JSON to the specified file",
|
| 177 |
+
)
|
| 178 |
+
execution_group.add_argument(
|
| 179 |
+
"--time-trace-output",
|
| 180 |
+
type=lit.reports.TimeTraceReport,
|
| 181 |
+
help="Write Chrome tracing compatible JSON to the specified file",
|
| 182 |
+
)
|
| 183 |
+
execution_group.add_argument(
|
| 184 |
+
"--timeout",
|
| 185 |
+
dest="maxIndividualTestTime",
|
| 186 |
+
help="Maximum time to spend running a single test (in seconds). "
|
| 187 |
+
"0 means no time limit. [Default: 0]",
|
| 188 |
+
type=_non_negative_int,
|
| 189 |
+
)
|
| 190 |
+
execution_group.add_argument(
|
| 191 |
+
"--max-failures",
|
| 192 |
+
help="Stop execution after the given number of failures.",
|
| 193 |
+
type=_positive_int,
|
| 194 |
+
)
|
| 195 |
+
execution_group.add_argument(
|
| 196 |
+
"--allow-empty-runs",
|
| 197 |
+
help="Do not fail the run if all tests are filtered out",
|
| 198 |
+
action="store_true",
|
| 199 |
+
)
|
| 200 |
+
execution_group.add_argument(
|
| 201 |
+
"--per-test-coverage",
|
| 202 |
+
dest="per_test_coverage",
|
| 203 |
+
action="store_true",
|
| 204 |
+
help="Enable individual test case coverage",
|
| 205 |
+
)
|
| 206 |
+
execution_group.add_argument(
|
| 207 |
+
"--ignore-fail",
|
| 208 |
+
dest="ignoreFail",
|
| 209 |
+
action="store_true",
|
| 210 |
+
help="Exit with status zero even if some tests fail",
|
| 211 |
+
)
|
| 212 |
+
|
| 213 |
+
selection_group = parser.add_argument_group("Test Selection")
|
| 214 |
+
selection_group.add_argument(
|
| 215 |
+
"--max-tests",
|
| 216 |
+
metavar="N",
|
| 217 |
+
help="Maximum number of tests to run",
|
| 218 |
+
type=_positive_int,
|
| 219 |
+
)
|
| 220 |
+
selection_group.add_argument(
|
| 221 |
+
"--max-time",
|
| 222 |
+
dest="timeout",
|
| 223 |
+
metavar="N",
|
| 224 |
+
help="Maximum time to spend testing (in seconds)",
|
| 225 |
+
type=_positive_int,
|
| 226 |
+
)
|
| 227 |
+
selection_group.add_argument(
|
| 228 |
+
"--order",
|
| 229 |
+
choices=[x.value for x in TestOrder],
|
| 230 |
+
default=TestOrder.SMART,
|
| 231 |
+
help="Test order to use (default: smart)",
|
| 232 |
+
)
|
| 233 |
+
selection_group.add_argument(
|
| 234 |
+
"--shuffle",
|
| 235 |
+
dest="order",
|
| 236 |
+
help="Run tests in random order (DEPRECATED: use --order=random)",
|
| 237 |
+
action="store_const",
|
| 238 |
+
const=TestOrder.RANDOM,
|
| 239 |
+
)
|
| 240 |
+
selection_group.add_argument(
|
| 241 |
+
"-i",
|
| 242 |
+
"--incremental",
|
| 243 |
+
help="Run failed tests first (DEPRECATED: use --order=smart)",
|
| 244 |
+
action="store_true",
|
| 245 |
+
)
|
| 246 |
+
selection_group.add_argument(
|
| 247 |
+
"--filter",
|
| 248 |
+
metavar="REGEX",
|
| 249 |
+
type=_case_insensitive_regex,
|
| 250 |
+
help="Only run tests with paths matching the given regular expression",
|
| 251 |
+
default=os.environ.get("LIT_FILTER", ".*"),
|
| 252 |
+
)
|
| 253 |
+
selection_group.add_argument(
|
| 254 |
+
"--filter-out",
|
| 255 |
+
metavar="REGEX",
|
| 256 |
+
type=_case_insensitive_regex,
|
| 257 |
+
help="Filter out tests with paths matching the given regular expression",
|
| 258 |
+
default=os.environ.get("LIT_FILTER_OUT", "^$"),
|
| 259 |
+
)
|
| 260 |
+
selection_group.add_argument(
|
| 261 |
+
"--xfail",
|
| 262 |
+
metavar="LIST",
|
| 263 |
+
type=_semicolon_list,
|
| 264 |
+
help="XFAIL tests with paths in the semicolon separated list",
|
| 265 |
+
default=os.environ.get("LIT_XFAIL", ""),
|
| 266 |
+
)
|
| 267 |
+
selection_group.add_argument(
|
| 268 |
+
"--xfail-not",
|
| 269 |
+
metavar="LIST",
|
| 270 |
+
type=_semicolon_list,
|
| 271 |
+
help="do not XFAIL tests with paths in the semicolon separated list",
|
| 272 |
+
default=os.environ.get("LIT_XFAIL_NOT", ""),
|
| 273 |
+
)
|
| 274 |
+
selection_group.add_argument(
|
| 275 |
+
"--num-shards",
|
| 276 |
+
dest="numShards",
|
| 277 |
+
metavar="M",
|
| 278 |
+
help="Split testsuite into M pieces and only run one",
|
| 279 |
+
type=_positive_int,
|
| 280 |
+
default=os.environ.get("LIT_NUM_SHARDS"),
|
| 281 |
+
)
|
| 282 |
+
selection_group.add_argument(
|
| 283 |
+
"--run-shard",
|
| 284 |
+
dest="runShard",
|
| 285 |
+
metavar="N",
|
| 286 |
+
help="Run shard #N of the testsuite",
|
| 287 |
+
type=_positive_int,
|
| 288 |
+
default=os.environ.get("LIT_RUN_SHARD"),
|
| 289 |
+
)
|
| 290 |
+
|
| 291 |
+
debug_group = parser.add_argument_group("Debug and Experimental Options")
|
| 292 |
+
debug_group.add_argument(
|
| 293 |
+
"--debug", help="Enable debugging (for 'lit' development)", action="store_true"
|
| 294 |
+
)
|
| 295 |
+
debug_group.add_argument(
|
| 296 |
+
"--show-suites",
|
| 297 |
+
help="Show discovered test suites and exit",
|
| 298 |
+
action="store_true",
|
| 299 |
+
)
|
| 300 |
+
debug_group.add_argument(
|
| 301 |
+
"--show-tests", help="Show all discovered tests and exit", action="store_true"
|
| 302 |
+
)
|
| 303 |
+
debug_group.add_argument(
|
| 304 |
+
"--show-used-features",
|
| 305 |
+
help="Show all features used in the test suite (in XFAIL, UNSUPPORTED and REQUIRES) and exit",
|
| 306 |
+
action="store_true",
|
| 307 |
+
)
|
| 308 |
+
|
| 309 |
+
# LIT is special: environment variables override command line arguments.
|
| 310 |
+
env_args = shlex.split(os.environ.get("LIT_OPTS", ""))
|
| 311 |
+
args = sys.argv[1:] + env_args
|
| 312 |
+
opts = parser.parse_args(args)
|
| 313 |
+
|
| 314 |
+
# Validate command line options
|
| 315 |
+
if opts.incremental:
|
| 316 |
+
print(
|
| 317 |
+
"WARNING: --incremental is deprecated. Failing tests now always run first."
|
| 318 |
+
)
|
| 319 |
+
|
| 320 |
+
if opts.numShards or opts.runShard:
|
| 321 |
+
if not opts.numShards or not opts.runShard:
|
| 322 |
+
parser.error("--num-shards and --run-shard must be used together")
|
| 323 |
+
if opts.runShard > opts.numShards:
|
| 324 |
+
parser.error("--run-shard must be between 1 and --num-shards (inclusive)")
|
| 325 |
+
opts.shard = (opts.runShard, opts.numShards)
|
| 326 |
+
else:
|
| 327 |
+
opts.shard = None
|
| 328 |
+
|
| 329 |
+
opts.reports = filter(
|
| 330 |
+
None,
|
| 331 |
+
[
|
| 332 |
+
opts.output,
|
| 333 |
+
opts.xunit_xml_output,
|
| 334 |
+
opts.resultdb_output,
|
| 335 |
+
opts.time_trace_output,
|
| 336 |
+
],
|
| 337 |
+
)
|
| 338 |
+
|
| 339 |
+
return opts
|
| 340 |
+
|
| 341 |
+
|
| 342 |
+
def _positive_int(arg):
|
| 343 |
+
return _int(arg, "positive", lambda i: i > 0)
|
| 344 |
+
|
| 345 |
+
|
| 346 |
+
def _non_negative_int(arg):
|
| 347 |
+
return _int(arg, "non-negative", lambda i: i >= 0)
|
| 348 |
+
|
| 349 |
+
|
| 350 |
+
def _int(arg, kind, pred):
|
| 351 |
+
desc = "requires {} integer, but found '{}'"
|
| 352 |
+
try:
|
| 353 |
+
i = int(arg)
|
| 354 |
+
except ValueError:
|
| 355 |
+
raise _error(desc, kind, arg)
|
| 356 |
+
if not pred(i):
|
| 357 |
+
raise _error(desc, kind, arg)
|
| 358 |
+
return i
|
| 359 |
+
|
| 360 |
+
|
| 361 |
+
def _case_insensitive_regex(arg):
|
| 362 |
+
import re
|
| 363 |
+
|
| 364 |
+
try:
|
| 365 |
+
return re.compile(arg, re.IGNORECASE)
|
| 366 |
+
except re.error as reason:
|
| 367 |
+
raise _error("invalid regular expression: '{}', {}", arg, reason)
|
| 368 |
+
|
| 369 |
+
|
| 370 |
+
def _semicolon_list(arg):
|
| 371 |
+
return arg.split(";")
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
def _error(desc, *args):
|
| 375 |
+
msg = desc.format(*args)
|
| 376 |
+
return argparse.ArgumentTypeError(msg)
|
wemm/lib/python3.10/site-packages/lit/discovery.py
ADDED
|
@@ -0,0 +1,289 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Test discovery functions.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
import copy
|
| 6 |
+
import os
|
| 7 |
+
import sys
|
| 8 |
+
|
| 9 |
+
from lit.TestingConfig import TestingConfig
|
| 10 |
+
from lit import LitConfig, Test, util
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
def chooseConfigFileFromDir(dir, config_names):
|
| 14 |
+
for name in config_names:
|
| 15 |
+
p = os.path.join(dir, name)
|
| 16 |
+
if os.path.exists(p):
|
| 17 |
+
return p
|
| 18 |
+
return None
|
| 19 |
+
|
| 20 |
+
|
| 21 |
+
def dirContainsTestSuite(path, lit_config):
|
| 22 |
+
cfgpath = chooseConfigFileFromDir(path, lit_config.site_config_names)
|
| 23 |
+
if not cfgpath:
|
| 24 |
+
cfgpath = chooseConfigFileFromDir(path, lit_config.config_names)
|
| 25 |
+
return cfgpath
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
def getTestSuite(item, litConfig, cache):
|
| 29 |
+
"""getTestSuite(item, litConfig, cache) -> (suite, relative_path)
|
| 30 |
+
|
| 31 |
+
Find the test suite containing @arg item.
|
| 32 |
+
|
| 33 |
+
@retval (None, ...) - Indicates no test suite contains @arg item.
|
| 34 |
+
@retval (suite, relative_path) - The suite that @arg item is in, and its
|
| 35 |
+
relative path inside that suite.
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def search1(path):
|
| 39 |
+
# Check for a site config or a lit config.
|
| 40 |
+
cfgpath = dirContainsTestSuite(path, litConfig)
|
| 41 |
+
|
| 42 |
+
# If we didn't find a config file, keep looking.
|
| 43 |
+
if not cfgpath:
|
| 44 |
+
parent, base = os.path.split(path)
|
| 45 |
+
if parent == path:
|
| 46 |
+
return (None, ())
|
| 47 |
+
|
| 48 |
+
ts, relative = search(parent)
|
| 49 |
+
return (ts, relative + (base,))
|
| 50 |
+
|
| 51 |
+
# This is a private builtin parameter which can be used to perform
|
| 52 |
+
# translation of configuration paths. Specifically, this parameter
|
| 53 |
+
# can be set to a dictionary that the discovery process will consult
|
| 54 |
+
# when it finds a configuration it is about to load. If the given
|
| 55 |
+
# path is in the map, the value of that key is a path to the
|
| 56 |
+
# configuration to load instead.
|
| 57 |
+
config_map = litConfig.params.get("config_map")
|
| 58 |
+
if config_map:
|
| 59 |
+
cfgpath = util.abs_path_preserve_drive(cfgpath)
|
| 60 |
+
target = config_map.get(os.path.normcase(cfgpath))
|
| 61 |
+
if target:
|
| 62 |
+
cfgpath = target
|
| 63 |
+
|
| 64 |
+
# We found a test suite, create a new config for it and load it.
|
| 65 |
+
if litConfig.debug:
|
| 66 |
+
litConfig.note("loading suite config %r" % cfgpath)
|
| 67 |
+
|
| 68 |
+
cfg = TestingConfig.fromdefaults(litConfig)
|
| 69 |
+
cfg.load_from_path(cfgpath, litConfig)
|
| 70 |
+
source_root = util.abs_path_preserve_drive(cfg.test_source_root or path)
|
| 71 |
+
exec_root = util.abs_path_preserve_drive(cfg.test_exec_root or path)
|
| 72 |
+
return Test.TestSuite(cfg.name, source_root, exec_root, cfg), ()
|
| 73 |
+
|
| 74 |
+
def search(path):
|
| 75 |
+
# Check for an already instantiated test suite.
|
| 76 |
+
real_path = util.abs_path_preserve_drive(path)
|
| 77 |
+
res = cache.get(real_path)
|
| 78 |
+
if res is None:
|
| 79 |
+
cache[real_path] = res = search1(path)
|
| 80 |
+
return res
|
| 81 |
+
|
| 82 |
+
# Canonicalize the path.
|
| 83 |
+
item = os.path.normpath(os.path.join(os.getcwd(), item))
|
| 84 |
+
|
| 85 |
+
# Skip files and virtual components.
|
| 86 |
+
components = []
|
| 87 |
+
while not os.path.isdir(item):
|
| 88 |
+
parent, base = os.path.split(item)
|
| 89 |
+
if parent == item:
|
| 90 |
+
return (None, ())
|
| 91 |
+
components.append(base)
|
| 92 |
+
item = parent
|
| 93 |
+
components.reverse()
|
| 94 |
+
|
| 95 |
+
ts, relative = search(item)
|
| 96 |
+
return ts, tuple(relative + tuple(components))
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def getLocalConfig(ts, path_in_suite, litConfig, cache):
|
| 100 |
+
def search1(path_in_suite):
|
| 101 |
+
# Get the parent config.
|
| 102 |
+
if not path_in_suite:
|
| 103 |
+
parent = ts.config
|
| 104 |
+
else:
|
| 105 |
+
parent = search(path_in_suite[:-1])
|
| 106 |
+
|
| 107 |
+
# Check if there is a local configuration file.
|
| 108 |
+
source_path = ts.getSourcePath(path_in_suite)
|
| 109 |
+
cfgpath = chooseConfigFileFromDir(source_path, litConfig.local_config_names)
|
| 110 |
+
|
| 111 |
+
# If not, just reuse the parent config.
|
| 112 |
+
if not cfgpath:
|
| 113 |
+
return parent
|
| 114 |
+
|
| 115 |
+
# Otherwise, copy the current config and load the local configuration
|
| 116 |
+
# file into it.
|
| 117 |
+
config = copy.deepcopy(parent)
|
| 118 |
+
if litConfig.debug:
|
| 119 |
+
litConfig.note("loading local config %r" % cfgpath)
|
| 120 |
+
config.load_from_path(cfgpath, litConfig)
|
| 121 |
+
return config
|
| 122 |
+
|
| 123 |
+
def search(path_in_suite):
|
| 124 |
+
key = (ts, path_in_suite)
|
| 125 |
+
res = cache.get(key)
|
| 126 |
+
if res is None:
|
| 127 |
+
cache[key] = res = search1(path_in_suite)
|
| 128 |
+
return res
|
| 129 |
+
|
| 130 |
+
return search(path_in_suite)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def getTests(path, litConfig, testSuiteCache, localConfigCache):
|
| 134 |
+
# Find the test suite for this input and its relative path.
|
| 135 |
+
ts, path_in_suite = getTestSuite(path, litConfig, testSuiteCache)
|
| 136 |
+
if ts is None:
|
| 137 |
+
litConfig.warning("unable to find test suite for %r" % path)
|
| 138 |
+
return (), ()
|
| 139 |
+
|
| 140 |
+
if litConfig.debug:
|
| 141 |
+
litConfig.note("resolved input %r to %r::%r" % (path, ts.name, path_in_suite))
|
| 142 |
+
|
| 143 |
+
return ts, getTestsInSuite(
|
| 144 |
+
ts,
|
| 145 |
+
path_in_suite,
|
| 146 |
+
litConfig,
|
| 147 |
+
testSuiteCache,
|
| 148 |
+
localConfigCache,
|
| 149 |
+
)
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def getTestsInSuite(
|
| 153 |
+
ts, path_in_suite, litConfig, testSuiteCache, localConfigCache
|
| 154 |
+
):
|
| 155 |
+
# Check that the source path exists (errors here are reported by the
|
| 156 |
+
# caller).
|
| 157 |
+
source_path = ts.getSourcePath(path_in_suite)
|
| 158 |
+
if not os.path.exists(source_path):
|
| 159 |
+
return
|
| 160 |
+
|
| 161 |
+
# Check if the user named a test directly.
|
| 162 |
+
if not os.path.isdir(source_path):
|
| 163 |
+
test_dir_in_suite = path_in_suite[:-1]
|
| 164 |
+
lc = getLocalConfig(ts, test_dir_in_suite, litConfig, localConfigCache)
|
| 165 |
+
|
| 166 |
+
# If we don't have a test format or if we are running standalone tests,
|
| 167 |
+
# always "find" the test itself. Otherwise, we might find no tests at
|
| 168 |
+
# all, which is considered an error but isn't an error with standalone
|
| 169 |
+
# tests.
|
| 170 |
+
tests = [Test.Test(ts, path_in_suite, lc)] if lc.test_format is None or lc.standalone_tests else \
|
| 171 |
+
lc.test_format.getTestsForPath(ts, path_in_suite, litConfig, lc)
|
| 172 |
+
|
| 173 |
+
for test in tests:
|
| 174 |
+
yield test
|
| 175 |
+
return
|
| 176 |
+
|
| 177 |
+
# Otherwise we have a directory to search for tests, start by getting the
|
| 178 |
+
# local configuration.
|
| 179 |
+
lc = getLocalConfig(ts, path_in_suite, litConfig, localConfigCache)
|
| 180 |
+
|
| 181 |
+
# Directory contains tests to be run standalone. Do not try to discover.
|
| 182 |
+
if lc.standalone_tests:
|
| 183 |
+
if lc.suffixes or lc.excludes:
|
| 184 |
+
litConfig.warning(
|
| 185 |
+
"standalone_tests set in LIT config but suffixes or excludes"
|
| 186 |
+
" are also set"
|
| 187 |
+
)
|
| 188 |
+
return
|
| 189 |
+
|
| 190 |
+
# Search for tests.
|
| 191 |
+
if lc.test_format is not None:
|
| 192 |
+
for res in lc.test_format.getTestsInDirectory(ts, path_in_suite, litConfig, lc):
|
| 193 |
+
yield res
|
| 194 |
+
|
| 195 |
+
# Search subdirectories.
|
| 196 |
+
for filename in os.listdir(source_path):
|
| 197 |
+
# FIXME: This doesn't belong here?
|
| 198 |
+
if filename in ("Output", ".svn", ".git") or filename in lc.excludes:
|
| 199 |
+
continue
|
| 200 |
+
|
| 201 |
+
# Ignore non-directories.
|
| 202 |
+
file_sourcepath = os.path.join(source_path, filename)
|
| 203 |
+
if not os.path.isdir(file_sourcepath):
|
| 204 |
+
continue
|
| 205 |
+
|
| 206 |
+
# Check for nested test suites, first in the execpath in case there is a
|
| 207 |
+
# site configuration and then in the source path.
|
| 208 |
+
subpath = path_in_suite + (filename,)
|
| 209 |
+
file_execpath = ts.getExecPath(subpath)
|
| 210 |
+
if dirContainsTestSuite(file_execpath, litConfig):
|
| 211 |
+
sub_ts, subpath_in_suite = getTestSuite(
|
| 212 |
+
file_execpath, litConfig, testSuiteCache
|
| 213 |
+
)
|
| 214 |
+
elif dirContainsTestSuite(file_sourcepath, litConfig):
|
| 215 |
+
sub_ts, subpath_in_suite = getTestSuite(
|
| 216 |
+
file_sourcepath, litConfig, testSuiteCache
|
| 217 |
+
)
|
| 218 |
+
else:
|
| 219 |
+
sub_ts = None
|
| 220 |
+
|
| 221 |
+
# If the this directory recursively maps back to the current test suite,
|
| 222 |
+
# disregard it (this can happen if the exec root is located inside the
|
| 223 |
+
# current test suite, for example).
|
| 224 |
+
if sub_ts is ts:
|
| 225 |
+
continue
|
| 226 |
+
|
| 227 |
+
# Otherwise, load from the nested test suite, if present.
|
| 228 |
+
if sub_ts is not None:
|
| 229 |
+
subiter = getTestsInSuite(
|
| 230 |
+
sub_ts,
|
| 231 |
+
subpath_in_suite,
|
| 232 |
+
litConfig,
|
| 233 |
+
testSuiteCache,
|
| 234 |
+
localConfigCache,
|
| 235 |
+
)
|
| 236 |
+
else:
|
| 237 |
+
subiter = getTestsInSuite(
|
| 238 |
+
ts,
|
| 239 |
+
subpath,
|
| 240 |
+
litConfig,
|
| 241 |
+
testSuiteCache,
|
| 242 |
+
localConfigCache,
|
| 243 |
+
)
|
| 244 |
+
|
| 245 |
+
N = 0
|
| 246 |
+
for res in subiter:
|
| 247 |
+
N += 1
|
| 248 |
+
yield res
|
| 249 |
+
if sub_ts and not N:
|
| 250 |
+
litConfig.warning("test suite %r contained no tests" % sub_ts.name)
|
| 251 |
+
|
| 252 |
+
|
| 253 |
+
def find_tests_for_inputs(lit_config, inputs):
|
| 254 |
+
"""
|
| 255 |
+
find_tests_for_inputs(lit_config, inputs) -> [Test]
|
| 256 |
+
|
| 257 |
+
Given a configuration object and a list of input specifiers, find all the
|
| 258 |
+
tests to execute.
|
| 259 |
+
"""
|
| 260 |
+
|
| 261 |
+
# Load the tests from the inputs.
|
| 262 |
+
tests = []
|
| 263 |
+
test_suite_cache = {}
|
| 264 |
+
local_config_cache = {}
|
| 265 |
+
for input in inputs:
|
| 266 |
+
prev = len(tests)
|
| 267 |
+
tests.extend(
|
| 268 |
+
getTests(
|
| 269 |
+
input,
|
| 270 |
+
lit_config,
|
| 271 |
+
test_suite_cache,
|
| 272 |
+
local_config_cache,
|
| 273 |
+
)[1]
|
| 274 |
+
)
|
| 275 |
+
if prev == len(tests):
|
| 276 |
+
lit_config.warning("input %r contained no tests" % input)
|
| 277 |
+
|
| 278 |
+
# This data is no longer needed but keeping it around causes awful
|
| 279 |
+
# performance problems while the test suites run.
|
| 280 |
+
for k, suite in test_suite_cache.items():
|
| 281 |
+
if suite[0]:
|
| 282 |
+
suite[0].test_times = None
|
| 283 |
+
|
| 284 |
+
# If there were any errors during test discovery, exit now.
|
| 285 |
+
if lit_config.numErrors:
|
| 286 |
+
sys.stderr.write("%d errors, exiting.\n" % lit_config.numErrors)
|
| 287 |
+
sys.exit(2)
|
| 288 |
+
|
| 289 |
+
return tests
|
wemm/lib/python3.10/site-packages/lit/display.py
ADDED
|
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
def create_display(opts, tests, total_tests, workers):
|
| 5 |
+
if opts.quiet:
|
| 6 |
+
return NopDisplay()
|
| 7 |
+
|
| 8 |
+
num_tests = len(tests)
|
| 9 |
+
of_total = (" of %d" % total_tests) if (num_tests != total_tests) else ""
|
| 10 |
+
header = "-- Testing: %d%s tests, %d workers --" % (num_tests, of_total, workers)
|
| 11 |
+
|
| 12 |
+
progress_bar = None
|
| 13 |
+
if opts.succinct and opts.useProgressBar:
|
| 14 |
+
import lit.ProgressBar
|
| 15 |
+
|
| 16 |
+
try:
|
| 17 |
+
tc = lit.ProgressBar.TerminalController()
|
| 18 |
+
progress_bar = lit.ProgressBar.ProgressBar(tc, header)
|
| 19 |
+
header = None
|
| 20 |
+
except ValueError:
|
| 21 |
+
progress_bar = lit.ProgressBar.SimpleProgressBar("Testing: ")
|
| 22 |
+
|
| 23 |
+
return Display(opts, tests, header, progress_bar)
|
| 24 |
+
|
| 25 |
+
|
| 26 |
+
class ProgressPredictor(object):
|
| 27 |
+
def __init__(self, tests):
|
| 28 |
+
self.completed = 0
|
| 29 |
+
self.time_elapsed = 0.0
|
| 30 |
+
self.predictable_tests_remaining = 0
|
| 31 |
+
self.predictable_time_remaining = 0.0
|
| 32 |
+
self.unpredictable_tests_remaining = 0
|
| 33 |
+
|
| 34 |
+
for test in tests:
|
| 35 |
+
if test.previous_elapsed:
|
| 36 |
+
self.predictable_tests_remaining += 1
|
| 37 |
+
self.predictable_time_remaining += test.previous_elapsed
|
| 38 |
+
else:
|
| 39 |
+
self.unpredictable_tests_remaining += 1
|
| 40 |
+
|
| 41 |
+
def update(self, test):
|
| 42 |
+
self.completed += 1
|
| 43 |
+
self.time_elapsed += test.result.elapsed
|
| 44 |
+
|
| 45 |
+
if test.previous_elapsed:
|
| 46 |
+
self.predictable_tests_remaining -= 1
|
| 47 |
+
self.predictable_time_remaining -= test.previous_elapsed
|
| 48 |
+
else:
|
| 49 |
+
self.unpredictable_tests_remaining -= 1
|
| 50 |
+
|
| 51 |
+
# NOTE: median would be more precise, but might be too slow.
|
| 52 |
+
average_test_time = (self.time_elapsed + self.predictable_time_remaining) / (
|
| 53 |
+
self.completed + self.predictable_tests_remaining
|
| 54 |
+
)
|
| 55 |
+
unpredictable_time_remaining = (
|
| 56 |
+
average_test_time * self.unpredictable_tests_remaining
|
| 57 |
+
)
|
| 58 |
+
total_time_remaining = (
|
| 59 |
+
self.predictable_time_remaining + unpredictable_time_remaining
|
| 60 |
+
)
|
| 61 |
+
total_time = self.time_elapsed + total_time_remaining
|
| 62 |
+
|
| 63 |
+
if total_time > 0:
|
| 64 |
+
return self.time_elapsed / total_time
|
| 65 |
+
return 0
|
| 66 |
+
|
| 67 |
+
|
| 68 |
+
class NopDisplay(object):
|
| 69 |
+
def print_header(self):
|
| 70 |
+
pass
|
| 71 |
+
|
| 72 |
+
def update(self, test):
|
| 73 |
+
pass
|
| 74 |
+
|
| 75 |
+
def clear(self, interrupted):
|
| 76 |
+
pass
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
class Display(object):
|
| 80 |
+
def __init__(self, opts, tests, header, progress_bar):
|
| 81 |
+
self.opts = opts
|
| 82 |
+
self.num_tests = len(tests)
|
| 83 |
+
self.header = header
|
| 84 |
+
self.progress_predictor = ProgressPredictor(tests) if progress_bar else None
|
| 85 |
+
self.progress_bar = progress_bar
|
| 86 |
+
self.completed = 0
|
| 87 |
+
|
| 88 |
+
def print_header(self):
|
| 89 |
+
if self.header:
|
| 90 |
+
print(self.header)
|
| 91 |
+
if self.progress_bar:
|
| 92 |
+
self.progress_bar.update(0.0, "")
|
| 93 |
+
|
| 94 |
+
def update(self, test):
|
| 95 |
+
self.completed += 1
|
| 96 |
+
|
| 97 |
+
show_result = (
|
| 98 |
+
test.isFailure()
|
| 99 |
+
or self.opts.showAllOutput
|
| 100 |
+
or (not self.opts.quiet and not self.opts.succinct)
|
| 101 |
+
)
|
| 102 |
+
if show_result:
|
| 103 |
+
if self.progress_bar:
|
| 104 |
+
self.progress_bar.clear(interrupted=False)
|
| 105 |
+
self.print_result(test)
|
| 106 |
+
|
| 107 |
+
if self.progress_bar:
|
| 108 |
+
if test.isFailure():
|
| 109 |
+
self.progress_bar.barColor = "RED"
|
| 110 |
+
percent = self.progress_predictor.update(test)
|
| 111 |
+
self.progress_bar.update(percent, test.getFullName())
|
| 112 |
+
|
| 113 |
+
def clear(self, interrupted):
|
| 114 |
+
if self.progress_bar:
|
| 115 |
+
self.progress_bar.clear(interrupted)
|
| 116 |
+
|
| 117 |
+
def print_result(self, test):
|
| 118 |
+
# Show the test result line.
|
| 119 |
+
test_name = test.getFullName()
|
| 120 |
+
print(
|
| 121 |
+
"%s: %s (%d of %d)"
|
| 122 |
+
% (test.result.code.name, test_name, self.completed, self.num_tests)
|
| 123 |
+
)
|
| 124 |
+
|
| 125 |
+
# Show the test failure output, if requested.
|
| 126 |
+
if (test.isFailure() and self.opts.showOutput) or self.opts.showAllOutput:
|
| 127 |
+
if test.isFailure():
|
| 128 |
+
print(
|
| 129 |
+
"%s TEST '%s' FAILED %s" % ("*" * 20, test.getFullName(), "*" * 20)
|
| 130 |
+
)
|
| 131 |
+
out = test.result.output
|
| 132 |
+
# Encode/decode so that, when using Python 3.6.5 in Windows 10,
|
| 133 |
+
# print(out) doesn't raise UnicodeEncodeError if out contains
|
| 134 |
+
# special characters. However, Python 2 might try to decode
|
| 135 |
+
# as part of the encode call if out is already encoded, so skip
|
| 136 |
+
# encoding if it raises UnicodeDecodeError.
|
| 137 |
+
if sys.stdout.encoding:
|
| 138 |
+
try:
|
| 139 |
+
out = out.encode(encoding=sys.stdout.encoding, errors="replace")
|
| 140 |
+
except UnicodeDecodeError:
|
| 141 |
+
pass
|
| 142 |
+
# Python 2 can raise UnicodeDecodeError here too in cases
|
| 143 |
+
# where the stdout encoding is ASCII. Ignore decode errors
|
| 144 |
+
# in this case.
|
| 145 |
+
out = out.decode(encoding=sys.stdout.encoding, errors="ignore")
|
| 146 |
+
print(out)
|
| 147 |
+
print("*" * 20)
|
| 148 |
+
|
| 149 |
+
# Report test metrics, if present.
|
| 150 |
+
if test.result.metrics:
|
| 151 |
+
print("%s TEST '%s' RESULTS %s" % ("*" * 10, test.getFullName(), "*" * 10))
|
| 152 |
+
items = sorted(test.result.metrics.items())
|
| 153 |
+
for metric_name, value in items:
|
| 154 |
+
print("%s: %s " % (metric_name, value.format()))
|
| 155 |
+
print("*" * 10)
|
| 156 |
+
|
| 157 |
+
# Report micro-tests, if present
|
| 158 |
+
if test.result.microResults:
|
| 159 |
+
items = sorted(test.result.microResults.items())
|
| 160 |
+
for micro_test_name, micro_test in items:
|
| 161 |
+
print("%s MICRO-TEST: %s" % ("*" * 3, micro_test_name))
|
| 162 |
+
|
| 163 |
+
if micro_test.metrics:
|
| 164 |
+
sorted_metrics = sorted(micro_test.metrics.items())
|
| 165 |
+
for metric_name, value in sorted_metrics:
|
| 166 |
+
print(" %s: %s " % (metric_name, value.format()))
|
| 167 |
+
|
| 168 |
+
# Ensure the output is flushed.
|
| 169 |
+
sys.stdout.flush()
|
wemm/lib/python3.10/site-packages/lit/main.py
ADDED
|
@@ -0,0 +1,357 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
lit - LLVM Integrated Tester.
|
| 3 |
+
|
| 4 |
+
See lit.pod for more information.
|
| 5 |
+
"""
|
| 6 |
+
|
| 7 |
+
import itertools
|
| 8 |
+
import os
|
| 9 |
+
import platform
|
| 10 |
+
import sys
|
| 11 |
+
import time
|
| 12 |
+
|
| 13 |
+
import lit.cl_arguments
|
| 14 |
+
import lit.discovery
|
| 15 |
+
import lit.display
|
| 16 |
+
import lit.LitConfig
|
| 17 |
+
import lit.reports
|
| 18 |
+
import lit.run
|
| 19 |
+
import lit.Test
|
| 20 |
+
import lit.util
|
| 21 |
+
from lit.formats.googletest import GoogleTest
|
| 22 |
+
from lit.TestTimes import record_test_times
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def main(builtin_params={}):
|
| 26 |
+
opts = lit.cl_arguments.parse_args()
|
| 27 |
+
params = create_params(builtin_params, opts.user_params)
|
| 28 |
+
is_windows = platform.system() == "Windows"
|
| 29 |
+
|
| 30 |
+
lit_config = lit.LitConfig.LitConfig(
|
| 31 |
+
progname=os.path.basename(sys.argv[0]),
|
| 32 |
+
path=opts.path,
|
| 33 |
+
quiet=opts.quiet,
|
| 34 |
+
useValgrind=opts.useValgrind,
|
| 35 |
+
valgrindLeakCheck=opts.valgrindLeakCheck,
|
| 36 |
+
valgrindArgs=opts.valgrindArgs,
|
| 37 |
+
noExecute=opts.noExecute,
|
| 38 |
+
debug=opts.debug,
|
| 39 |
+
isWindows=is_windows,
|
| 40 |
+
order=opts.order,
|
| 41 |
+
params=params,
|
| 42 |
+
config_prefix=opts.configPrefix,
|
| 43 |
+
per_test_coverage=opts.per_test_coverage,
|
| 44 |
+
gtest_sharding=opts.gtest_sharding,
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
discovered_tests = lit.discovery.find_tests_for_inputs(
|
| 48 |
+
lit_config, opts.test_paths
|
| 49 |
+
)
|
| 50 |
+
if not discovered_tests:
|
| 51 |
+
sys.stderr.write("error: did not discover any tests for provided path(s)\n")
|
| 52 |
+
sys.exit(2)
|
| 53 |
+
|
| 54 |
+
if opts.show_suites or opts.show_tests:
|
| 55 |
+
print_discovered(discovered_tests, opts.show_suites, opts.show_tests)
|
| 56 |
+
sys.exit(0)
|
| 57 |
+
|
| 58 |
+
if opts.show_used_features:
|
| 59 |
+
features = set(
|
| 60 |
+
itertools.chain.from_iterable(
|
| 61 |
+
t.getUsedFeatures()
|
| 62 |
+
for t in discovered_tests
|
| 63 |
+
if t.gtest_json_file is None
|
| 64 |
+
)
|
| 65 |
+
)
|
| 66 |
+
print(" ".join(sorted(features)))
|
| 67 |
+
sys.exit(0)
|
| 68 |
+
|
| 69 |
+
# Command line overrides configuration for maxIndividualTestTime.
|
| 70 |
+
if opts.maxIndividualTestTime is not None: # `not None` is important (default: 0)
|
| 71 |
+
if opts.maxIndividualTestTime != lit_config.maxIndividualTestTime:
|
| 72 |
+
lit_config.note(
|
| 73 |
+
(
|
| 74 |
+
"The test suite configuration requested an individual"
|
| 75 |
+
" test timeout of {0} seconds but a timeout of {1} seconds was"
|
| 76 |
+
" requested on the command line. Forcing timeout to be {1}"
|
| 77 |
+
" seconds."
|
| 78 |
+
).format(lit_config.maxIndividualTestTime, opts.maxIndividualTestTime)
|
| 79 |
+
)
|
| 80 |
+
lit_config.maxIndividualTestTime = opts.maxIndividualTestTime
|
| 81 |
+
|
| 82 |
+
determine_order(discovered_tests, opts.order)
|
| 83 |
+
|
| 84 |
+
selected_tests = [
|
| 85 |
+
t
|
| 86 |
+
for t in discovered_tests
|
| 87 |
+
if opts.filter.search(t.getFullName())
|
| 88 |
+
and not opts.filter_out.search(t.getFullName())
|
| 89 |
+
]
|
| 90 |
+
|
| 91 |
+
if not selected_tests:
|
| 92 |
+
sys.stderr.write(
|
| 93 |
+
"error: filter did not match any tests "
|
| 94 |
+
"(of %d discovered). " % len(discovered_tests)
|
| 95 |
+
)
|
| 96 |
+
if opts.allow_empty_runs:
|
| 97 |
+
sys.stderr.write(
|
| 98 |
+
"Suppressing error because '--allow-empty-runs' " "was specified.\n"
|
| 99 |
+
)
|
| 100 |
+
sys.exit(0)
|
| 101 |
+
else:
|
| 102 |
+
sys.stderr.write("Use '--allow-empty-runs' to suppress this " "error.\n")
|
| 103 |
+
sys.exit(2)
|
| 104 |
+
|
| 105 |
+
# When running multiple shards, don't include skipped tests in the xunit
|
| 106 |
+
# output since merging the files will result in duplicates.
|
| 107 |
+
if opts.shard:
|
| 108 |
+
(run, shards) = opts.shard
|
| 109 |
+
selected_tests = filter_by_shard(selected_tests, run, shards, lit_config)
|
| 110 |
+
if not selected_tests:
|
| 111 |
+
sys.stderr.write(
|
| 112 |
+
"warning: shard does not contain any tests. "
|
| 113 |
+
"Consider decreasing the number of shards.\n"
|
| 114 |
+
)
|
| 115 |
+
sys.exit(0)
|
| 116 |
+
|
| 117 |
+
selected_tests = selected_tests[: opts.max_tests]
|
| 118 |
+
|
| 119 |
+
mark_xfail(discovered_tests, opts)
|
| 120 |
+
|
| 121 |
+
mark_excluded(discovered_tests, selected_tests)
|
| 122 |
+
|
| 123 |
+
start = time.time()
|
| 124 |
+
run_tests(selected_tests, lit_config, opts, len(discovered_tests))
|
| 125 |
+
elapsed = time.time() - start
|
| 126 |
+
|
| 127 |
+
record_test_times(selected_tests, lit_config)
|
| 128 |
+
|
| 129 |
+
selected_tests, discovered_tests = GoogleTest.post_process_shard_results(
|
| 130 |
+
selected_tests, discovered_tests
|
| 131 |
+
)
|
| 132 |
+
|
| 133 |
+
if opts.time_tests:
|
| 134 |
+
print_histogram(discovered_tests)
|
| 135 |
+
|
| 136 |
+
print_results(discovered_tests, elapsed, opts)
|
| 137 |
+
|
| 138 |
+
tests_for_report = selected_tests if opts.shard else discovered_tests
|
| 139 |
+
for report in opts.reports:
|
| 140 |
+
report.write_results(tests_for_report, elapsed)
|
| 141 |
+
|
| 142 |
+
if lit_config.numErrors:
|
| 143 |
+
sys.stderr.write("\n%d error(s) in tests\n" % lit_config.numErrors)
|
| 144 |
+
sys.exit(2)
|
| 145 |
+
|
| 146 |
+
if lit_config.numWarnings:
|
| 147 |
+
sys.stderr.write("\n%d warning(s) in tests\n" % lit_config.numWarnings)
|
| 148 |
+
|
| 149 |
+
has_failure = any(t.isFailure() for t in discovered_tests)
|
| 150 |
+
if has_failure:
|
| 151 |
+
if opts.ignoreFail:
|
| 152 |
+
sys.stderr.write(
|
| 153 |
+
"\nExiting with status 0 instead of 1 because "
|
| 154 |
+
"'--ignore-fail' was specified.\n"
|
| 155 |
+
)
|
| 156 |
+
else:
|
| 157 |
+
sys.exit(1)
|
| 158 |
+
|
| 159 |
+
|
| 160 |
+
def create_params(builtin_params, user_params):
|
| 161 |
+
def parse(p):
|
| 162 |
+
return p.split("=", 1) if "=" in p else (p, "")
|
| 163 |
+
|
| 164 |
+
params = dict(builtin_params)
|
| 165 |
+
params.update([parse(p) for p in user_params])
|
| 166 |
+
return params
|
| 167 |
+
|
| 168 |
+
|
| 169 |
+
def print_discovered(tests, show_suites, show_tests):
|
| 170 |
+
tests.sort(key=lit.reports.by_suite_and_test_path)
|
| 171 |
+
|
| 172 |
+
if show_suites:
|
| 173 |
+
tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
|
| 174 |
+
print("-- Test Suites --")
|
| 175 |
+
for suite, test_iter in tests_by_suite:
|
| 176 |
+
test_count = sum(1 for _ in test_iter)
|
| 177 |
+
print(" %s - %d tests" % (suite.name, test_count))
|
| 178 |
+
print(" Source Root: %s" % suite.source_root)
|
| 179 |
+
print(" Exec Root : %s" % suite.exec_root)
|
| 180 |
+
features = " ".join(sorted(suite.config.available_features))
|
| 181 |
+
print(" Available Features: %s" % features)
|
| 182 |
+
substitutions = sorted(suite.config.substitutions)
|
| 183 |
+
substitutions = ("%s => %s" % (x, y) for (x, y) in substitutions)
|
| 184 |
+
substitutions = "\n".ljust(30).join(substitutions)
|
| 185 |
+
print(" Available Substitutions: %s" % substitutions)
|
| 186 |
+
|
| 187 |
+
if show_tests:
|
| 188 |
+
print("-- Available Tests --")
|
| 189 |
+
for t in tests:
|
| 190 |
+
print(" %s" % t.getFullName())
|
| 191 |
+
|
| 192 |
+
|
| 193 |
+
def determine_order(tests, order):
|
| 194 |
+
from lit.cl_arguments import TestOrder
|
| 195 |
+
|
| 196 |
+
enum_order = TestOrder(order)
|
| 197 |
+
if enum_order == TestOrder.RANDOM:
|
| 198 |
+
import random
|
| 199 |
+
|
| 200 |
+
random.shuffle(tests)
|
| 201 |
+
elif enum_order == TestOrder.LEXICAL:
|
| 202 |
+
tests.sort(key=lambda t: t.getFullName())
|
| 203 |
+
else:
|
| 204 |
+
assert enum_order == TestOrder.SMART, "Unknown TestOrder value"
|
| 205 |
+
tests.sort(
|
| 206 |
+
key=lambda t: (not t.previous_failure, -t.previous_elapsed, t.getFullName())
|
| 207 |
+
)
|
| 208 |
+
|
| 209 |
+
|
| 210 |
+
def filter_by_shard(tests, run, shards, lit_config):
|
| 211 |
+
test_ixs = range(run - 1, len(tests), shards)
|
| 212 |
+
selected_tests = [tests[i] for i in test_ixs]
|
| 213 |
+
|
| 214 |
+
# For clarity, generate a preview of the first few test indices in the shard
|
| 215 |
+
# to accompany the arithmetic expression.
|
| 216 |
+
preview_len = 3
|
| 217 |
+
preview = ", ".join([str(i + 1) for i in test_ixs[:preview_len]])
|
| 218 |
+
if len(test_ixs) > preview_len:
|
| 219 |
+
preview += ", ..."
|
| 220 |
+
msg = (
|
| 221 |
+
f"Selecting shard {run}/{shards} = "
|
| 222 |
+
f"size {len(selected_tests)}/{len(tests)} = "
|
| 223 |
+
f"tests #({shards}*k)+{run} = [{preview}]"
|
| 224 |
+
)
|
| 225 |
+
lit_config.note(msg)
|
| 226 |
+
return selected_tests
|
| 227 |
+
|
| 228 |
+
|
| 229 |
+
def mark_xfail(selected_tests, opts):
|
| 230 |
+
for t in selected_tests:
|
| 231 |
+
test_file = os.sep.join(t.path_in_suite)
|
| 232 |
+
test_full_name = t.getFullName()
|
| 233 |
+
if test_file in opts.xfail or test_full_name in opts.xfail:
|
| 234 |
+
t.xfails += "*"
|
| 235 |
+
if test_file in opts.xfail_not or test_full_name in opts.xfail_not:
|
| 236 |
+
t.xfail_not = True
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
def mark_excluded(discovered_tests, selected_tests):
|
| 240 |
+
excluded_tests = set(discovered_tests) - set(selected_tests)
|
| 241 |
+
result = lit.Test.Result(lit.Test.EXCLUDED)
|
| 242 |
+
for t in excluded_tests:
|
| 243 |
+
t.setResult(result)
|
| 244 |
+
|
| 245 |
+
|
| 246 |
+
def run_tests(tests, lit_config, opts, discovered_tests):
|
| 247 |
+
workers = min(len(tests), opts.workers)
|
| 248 |
+
display = lit.display.create_display(opts, tests, discovered_tests, workers)
|
| 249 |
+
|
| 250 |
+
run = lit.run.Run(
|
| 251 |
+
tests, lit_config, workers, display.update, opts.max_failures, opts.timeout
|
| 252 |
+
)
|
| 253 |
+
|
| 254 |
+
display.print_header()
|
| 255 |
+
|
| 256 |
+
interrupted = False
|
| 257 |
+
error = None
|
| 258 |
+
try:
|
| 259 |
+
execute_in_tmp_dir(run, lit_config)
|
| 260 |
+
except KeyboardInterrupt:
|
| 261 |
+
interrupted = True
|
| 262 |
+
error = " interrupted by user"
|
| 263 |
+
except lit.run.MaxFailuresError:
|
| 264 |
+
error = "warning: reached maximum number of test failures"
|
| 265 |
+
except lit.run.TimeoutError:
|
| 266 |
+
error = "warning: reached timeout"
|
| 267 |
+
|
| 268 |
+
display.clear(interrupted)
|
| 269 |
+
if error:
|
| 270 |
+
sys.stderr.write("%s, skipping remaining tests\n" % error)
|
| 271 |
+
|
| 272 |
+
|
| 273 |
+
def execute_in_tmp_dir(run, lit_config):
|
| 274 |
+
# Create a temp directory inside the normal temp directory so that we can
|
| 275 |
+
# try to avoid temporary test file leaks. The user can avoid this behavior
|
| 276 |
+
# by setting LIT_PRESERVES_TMP in the environment, so they can easily use
|
| 277 |
+
# their own temp directory to monitor temporary file leaks or handle them at
|
| 278 |
+
# the buildbot level.
|
| 279 |
+
tmp_dir = None
|
| 280 |
+
if "LIT_PRESERVES_TMP" not in os.environ:
|
| 281 |
+
import tempfile
|
| 282 |
+
|
| 283 |
+
# z/OS linker does not support '_' in paths, so use '-'.
|
| 284 |
+
tmp_dir = tempfile.mkdtemp(prefix="lit-tmp-")
|
| 285 |
+
tmp_dir_envs = {k: tmp_dir for k in ["TMP", "TMPDIR", "TEMP", "TEMPDIR"]}
|
| 286 |
+
os.environ.update(tmp_dir_envs)
|
| 287 |
+
for cfg in {t.config for t in run.tests}:
|
| 288 |
+
cfg.environment.update(tmp_dir_envs)
|
| 289 |
+
try:
|
| 290 |
+
run.execute()
|
| 291 |
+
finally:
|
| 292 |
+
if tmp_dir:
|
| 293 |
+
try:
|
| 294 |
+
import shutil
|
| 295 |
+
|
| 296 |
+
shutil.rmtree(tmp_dir)
|
| 297 |
+
except Exception as e:
|
| 298 |
+
lit_config.warning(
|
| 299 |
+
"Failed to delete temp directory '%s', try upgrading your version of Python to fix this"
|
| 300 |
+
% tmp_dir
|
| 301 |
+
)
|
| 302 |
+
|
| 303 |
+
|
| 304 |
+
def print_histogram(tests):
|
| 305 |
+
test_times = [
|
| 306 |
+
(t.getFullName(), t.result.elapsed) for t in tests if t.result.elapsed
|
| 307 |
+
]
|
| 308 |
+
if test_times:
|
| 309 |
+
lit.util.printHistogram(test_times, title="Tests")
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
def print_results(tests, elapsed, opts):
|
| 313 |
+
tests_by_code = {code: [] for code in lit.Test.ResultCode.all_codes()}
|
| 314 |
+
total_tests = len(tests)
|
| 315 |
+
for test in tests:
|
| 316 |
+
tests_by_code[test.result.code].append(test)
|
| 317 |
+
|
| 318 |
+
for code in lit.Test.ResultCode.all_codes():
|
| 319 |
+
print_group(
|
| 320 |
+
sorted(tests_by_code[code], key=lambda t: t.getFullName()),
|
| 321 |
+
code,
|
| 322 |
+
opts.shown_codes,
|
| 323 |
+
)
|
| 324 |
+
|
| 325 |
+
print_summary(total_tests, tests_by_code, opts.quiet, elapsed)
|
| 326 |
+
|
| 327 |
+
|
| 328 |
+
def print_group(tests, code, shown_codes):
|
| 329 |
+
if not tests:
|
| 330 |
+
return
|
| 331 |
+
if not code.isFailure and code not in shown_codes:
|
| 332 |
+
return
|
| 333 |
+
print("*" * 20)
|
| 334 |
+
print("{} Tests ({}):".format(code.label, len(tests)))
|
| 335 |
+
for test in tests:
|
| 336 |
+
print(" %s" % test.getFullName())
|
| 337 |
+
sys.stdout.write("\n")
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
def print_summary(total_tests, tests_by_code, quiet, elapsed):
|
| 341 |
+
if not quiet:
|
| 342 |
+
print("\nTesting Time: %.2fs" % elapsed)
|
| 343 |
+
|
| 344 |
+
print("\nTotal Discovered Tests: %s" % (total_tests))
|
| 345 |
+
codes = [c for c in lit.Test.ResultCode.all_codes() if not quiet or c.isFailure]
|
| 346 |
+
groups = [(c.label, len(tests_by_code[c])) for c in codes]
|
| 347 |
+
groups = [(label, count) for label, count in groups if count]
|
| 348 |
+
if not groups:
|
| 349 |
+
return
|
| 350 |
+
|
| 351 |
+
max_label_len = max(len(label) for label, _ in groups)
|
| 352 |
+
max_count_len = max(len(str(count)) for _, count in groups)
|
| 353 |
+
|
| 354 |
+
for (label, count) in groups:
|
| 355 |
+
label = label.ljust(max_label_len)
|
| 356 |
+
count = str(count).rjust(max_count_len)
|
| 357 |
+
print(" %s: %s (%.2f%%)" % (label, count, float(count) / total_tests * 100))
|
wemm/lib/python3.10/site-packages/lit/reports.py
ADDED
|
@@ -0,0 +1,280 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import base64
|
| 2 |
+
import datetime
|
| 3 |
+
import itertools
|
| 4 |
+
import json
|
| 5 |
+
|
| 6 |
+
from xml.sax.saxutils import quoteattr as quo
|
| 7 |
+
|
| 8 |
+
import lit.Test
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
def by_suite_and_test_path(test):
|
| 12 |
+
# Suite names are not necessarily unique. Include object identity in sort
|
| 13 |
+
# key to avoid mixing tests of different suites.
|
| 14 |
+
return (test.suite.name, id(test.suite), test.path_in_suite)
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class JsonReport(object):
|
| 18 |
+
def __init__(self, output_file):
|
| 19 |
+
self.output_file = output_file
|
| 20 |
+
|
| 21 |
+
def write_results(self, tests, elapsed):
|
| 22 |
+
unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
|
| 23 |
+
tests = [t for t in tests if t.result.code not in unexecuted_codes]
|
| 24 |
+
# Construct the data we will write.
|
| 25 |
+
data = {}
|
| 26 |
+
# Encode the current lit version as a schema version.
|
| 27 |
+
data["__version__"] = lit.__versioninfo__
|
| 28 |
+
data["elapsed"] = elapsed
|
| 29 |
+
# FIXME: Record some information on the lit configuration used?
|
| 30 |
+
# FIXME: Record information from the individual test suites?
|
| 31 |
+
|
| 32 |
+
# Encode the tests.
|
| 33 |
+
data["tests"] = tests_data = []
|
| 34 |
+
for test in tests:
|
| 35 |
+
test_data = {
|
| 36 |
+
"name": test.getFullName(),
|
| 37 |
+
"code": test.result.code.name,
|
| 38 |
+
"output": test.result.output,
|
| 39 |
+
"elapsed": test.result.elapsed,
|
| 40 |
+
}
|
| 41 |
+
|
| 42 |
+
# Add test metrics, if present.
|
| 43 |
+
if test.result.metrics:
|
| 44 |
+
test_data["metrics"] = metrics_data = {}
|
| 45 |
+
for key, value in test.result.metrics.items():
|
| 46 |
+
metrics_data[key] = value.todata()
|
| 47 |
+
|
| 48 |
+
# Report micro-tests separately, if present
|
| 49 |
+
if test.result.microResults:
|
| 50 |
+
for key, micro_test in test.result.microResults.items():
|
| 51 |
+
# Expand parent test name with micro test name
|
| 52 |
+
parent_name = test.getFullName()
|
| 53 |
+
micro_full_name = parent_name + ":" + key
|
| 54 |
+
|
| 55 |
+
micro_test_data = {
|
| 56 |
+
"name": micro_full_name,
|
| 57 |
+
"code": micro_test.code.name,
|
| 58 |
+
"output": micro_test.output,
|
| 59 |
+
"elapsed": micro_test.elapsed,
|
| 60 |
+
}
|
| 61 |
+
if micro_test.metrics:
|
| 62 |
+
micro_test_data["metrics"] = micro_metrics_data = {}
|
| 63 |
+
for key, value in micro_test.metrics.items():
|
| 64 |
+
micro_metrics_data[key] = value.todata()
|
| 65 |
+
|
| 66 |
+
tests_data.append(micro_test_data)
|
| 67 |
+
|
| 68 |
+
tests_data.append(test_data)
|
| 69 |
+
|
| 70 |
+
with open(self.output_file, "w") as file:
|
| 71 |
+
json.dump(data, file, indent=2, sort_keys=True)
|
| 72 |
+
file.write("\n")
|
| 73 |
+
|
| 74 |
+
|
| 75 |
+
_invalid_xml_chars_dict = {
|
| 76 |
+
c: None for c in range(32) if chr(c) not in ("\t", "\n", "\r")
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
def remove_invalid_xml_chars(s):
|
| 81 |
+
# According to the XML 1.0 spec, control characters other than
|
| 82 |
+
# \t,\r, and \n are not permitted anywhere in the document
|
| 83 |
+
# (https://www.w3.org/TR/xml/#charsets) and therefore this function
|
| 84 |
+
# removes them to produce a valid XML document.
|
| 85 |
+
#
|
| 86 |
+
# Note: In XML 1.1 only \0 is illegal (https://www.w3.org/TR/xml11/#charsets)
|
| 87 |
+
# but lit currently produces XML 1.0 output.
|
| 88 |
+
return s.translate(_invalid_xml_chars_dict)
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class XunitReport(object):
|
| 92 |
+
def __init__(self, output_file):
|
| 93 |
+
self.output_file = output_file
|
| 94 |
+
self.skipped_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
|
| 95 |
+
|
| 96 |
+
def write_results(self, tests, elapsed):
|
| 97 |
+
tests.sort(key=by_suite_and_test_path)
|
| 98 |
+
tests_by_suite = itertools.groupby(tests, lambda t: t.suite)
|
| 99 |
+
|
| 100 |
+
with open(self.output_file, "w") as file:
|
| 101 |
+
file.write('<?xml version="1.0" encoding="UTF-8"?>\n')
|
| 102 |
+
file.write('<testsuites time="{time:.2f}">\n'.format(time=elapsed))
|
| 103 |
+
for suite, test_iter in tests_by_suite:
|
| 104 |
+
self._write_testsuite(file, suite, list(test_iter))
|
| 105 |
+
file.write("</testsuites>\n")
|
| 106 |
+
|
| 107 |
+
def _write_testsuite(self, file, suite, tests):
|
| 108 |
+
skipped = sum(1 for t in tests if t.result.code in self.skipped_codes)
|
| 109 |
+
failures = sum(1 for t in tests if t.isFailure())
|
| 110 |
+
|
| 111 |
+
name = suite.config.name.replace(".", "-")
|
| 112 |
+
file.write(
|
| 113 |
+
f'<testsuite name={quo(name)} tests="{len(tests)}" failures="{failures}" skipped="{skipped}">\n'
|
| 114 |
+
)
|
| 115 |
+
for test in tests:
|
| 116 |
+
self._write_test(file, test, name)
|
| 117 |
+
file.write("</testsuite>\n")
|
| 118 |
+
|
| 119 |
+
def _write_test(self, file, test, suite_name):
|
| 120 |
+
path = "/".join(test.path_in_suite[:-1]).replace(".", "_")
|
| 121 |
+
class_name = f"{suite_name}.{path or suite_name}"
|
| 122 |
+
name = test.path_in_suite[-1]
|
| 123 |
+
time = test.result.elapsed or 0.0
|
| 124 |
+
file.write(
|
| 125 |
+
f'<testcase classname={quo(class_name)} name={quo(name)} time="{time:.2f}"'
|
| 126 |
+
)
|
| 127 |
+
|
| 128 |
+
if test.isFailure():
|
| 129 |
+
file.write(">\n <failure><![CDATA[")
|
| 130 |
+
# In the unlikely case that the output contains the CDATA
|
| 131 |
+
# terminator we wrap it by creating a new CDATA block.
|
| 132 |
+
output = test.result.output.replace("]]>", "]]]]><![CDATA[>")
|
| 133 |
+
if isinstance(output, bytes):
|
| 134 |
+
output = output.decode("utf-8", "ignore")
|
| 135 |
+
|
| 136 |
+
# Failing test output sometimes contains control characters like
|
| 137 |
+
# \x1b (e.g. if there was some -fcolor-diagnostics output) which are
|
| 138 |
+
# not allowed inside XML files.
|
| 139 |
+
# This causes problems with CI systems: for example, the Jenkins
|
| 140 |
+
# JUnit XML will throw an exception when ecountering those
|
| 141 |
+
# characters and similar problems also occur with GitLab CI.
|
| 142 |
+
output = remove_invalid_xml_chars(output)
|
| 143 |
+
file.write(output)
|
| 144 |
+
file.write("]]></failure>\n</testcase>\n")
|
| 145 |
+
elif test.result.code in self.skipped_codes:
|
| 146 |
+
reason = self._get_skip_reason(test)
|
| 147 |
+
file.write(f">\n <skipped message={quo(reason)}/>\n</testcase>\n")
|
| 148 |
+
else:
|
| 149 |
+
file.write("/>\n")
|
| 150 |
+
|
| 151 |
+
def _get_skip_reason(self, test):
|
| 152 |
+
code = test.result.code
|
| 153 |
+
if code == lit.Test.EXCLUDED:
|
| 154 |
+
return "Test not selected (--filter, --max-tests)"
|
| 155 |
+
if code == lit.Test.SKIPPED:
|
| 156 |
+
return "User interrupt"
|
| 157 |
+
|
| 158 |
+
assert code == lit.Test.UNSUPPORTED
|
| 159 |
+
features = test.getMissingRequiredFeatures()
|
| 160 |
+
if features:
|
| 161 |
+
return "Missing required feature(s): " + ", ".join(features)
|
| 162 |
+
return "Unsupported configuration"
|
| 163 |
+
|
| 164 |
+
|
| 165 |
+
def gen_resultdb_test_entry(
|
| 166 |
+
test_name, start_time, elapsed_time, test_output, result_code, is_expected
|
| 167 |
+
):
|
| 168 |
+
test_data = {
|
| 169 |
+
"testId": test_name,
|
| 170 |
+
"start_time": datetime.datetime.fromtimestamp(start_time).isoformat() + "Z",
|
| 171 |
+
"duration": "%.9fs" % elapsed_time,
|
| 172 |
+
"summary_html": '<p><text-artifact artifact-id="artifact-content-in-request"></p>',
|
| 173 |
+
"artifacts": {
|
| 174 |
+
"artifact-content-in-request": {
|
| 175 |
+
"contents": base64.b64encode(test_output.encode("utf-8")).decode(
|
| 176 |
+
"utf-8"
|
| 177 |
+
),
|
| 178 |
+
},
|
| 179 |
+
},
|
| 180 |
+
"expected": is_expected,
|
| 181 |
+
}
|
| 182 |
+
if (
|
| 183 |
+
result_code == lit.Test.PASS
|
| 184 |
+
or result_code == lit.Test.XPASS
|
| 185 |
+
or result_code == lit.Test.FLAKYPASS
|
| 186 |
+
):
|
| 187 |
+
test_data["status"] = "PASS"
|
| 188 |
+
elif result_code == lit.Test.FAIL or result_code == lit.Test.XFAIL:
|
| 189 |
+
test_data["status"] = "FAIL"
|
| 190 |
+
elif (
|
| 191 |
+
result_code == lit.Test.UNSUPPORTED
|
| 192 |
+
or result_code == lit.Test.SKIPPED
|
| 193 |
+
or result_code == lit.Test.EXCLUDED
|
| 194 |
+
):
|
| 195 |
+
test_data["status"] = "SKIP"
|
| 196 |
+
elif result_code == lit.Test.UNRESOLVED or result_code == lit.Test.TIMEOUT:
|
| 197 |
+
test_data["status"] = "ABORT"
|
| 198 |
+
return test_data
|
| 199 |
+
|
| 200 |
+
|
| 201 |
+
class ResultDBReport(object):
|
| 202 |
+
def __init__(self, output_file):
|
| 203 |
+
self.output_file = output_file
|
| 204 |
+
|
| 205 |
+
def write_results(self, tests, elapsed):
|
| 206 |
+
unexecuted_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED}
|
| 207 |
+
tests = [t for t in tests if t.result.code not in unexecuted_codes]
|
| 208 |
+
data = {}
|
| 209 |
+
data["__version__"] = lit.__versioninfo__
|
| 210 |
+
data["elapsed"] = elapsed
|
| 211 |
+
# Encode the tests.
|
| 212 |
+
data["tests"] = tests_data = []
|
| 213 |
+
for test in tests:
|
| 214 |
+
tests_data.append(
|
| 215 |
+
gen_resultdb_test_entry(
|
| 216 |
+
test_name=test.getFullName(),
|
| 217 |
+
start_time=test.result.start,
|
| 218 |
+
elapsed_time=test.result.elapsed,
|
| 219 |
+
test_output=test.result.output,
|
| 220 |
+
result_code=test.result.code,
|
| 221 |
+
is_expected=not test.result.code.isFailure,
|
| 222 |
+
)
|
| 223 |
+
)
|
| 224 |
+
if test.result.microResults:
|
| 225 |
+
for key, micro_test in test.result.microResults.items():
|
| 226 |
+
# Expand parent test name with micro test name
|
| 227 |
+
parent_name = test.getFullName()
|
| 228 |
+
micro_full_name = parent_name + ":" + key + "microres"
|
| 229 |
+
tests_data.append(
|
| 230 |
+
gen_resultdb_test_entry(
|
| 231 |
+
test_name=micro_full_name,
|
| 232 |
+
start_time=micro_test.start
|
| 233 |
+
if micro_test.start
|
| 234 |
+
else test.result.start,
|
| 235 |
+
elapsed_time=micro_test.elapsed
|
| 236 |
+
if micro_test.elapsed
|
| 237 |
+
else test.result.elapsed,
|
| 238 |
+
test_output=micro_test.output,
|
| 239 |
+
result_code=micro_test.code,
|
| 240 |
+
is_expected=not micro_test.code.isFailure,
|
| 241 |
+
)
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
with open(self.output_file, "w") as file:
|
| 245 |
+
json.dump(data, file, indent=2, sort_keys=True)
|
| 246 |
+
file.write("\n")
|
| 247 |
+
|
| 248 |
+
|
| 249 |
+
class TimeTraceReport(object):
|
| 250 |
+
def __init__(self, output_file):
|
| 251 |
+
self.output_file = output_file
|
| 252 |
+
self.skipped_codes = {lit.Test.EXCLUDED, lit.Test.SKIPPED, lit.Test.UNSUPPORTED}
|
| 253 |
+
|
| 254 |
+
def write_results(self, tests, elapsed):
|
| 255 |
+
# Find when first test started so we can make start times relative.
|
| 256 |
+
first_start_time = min([t.result.start for t in tests])
|
| 257 |
+
events = [
|
| 258 |
+
self._get_test_event(x, first_start_time)
|
| 259 |
+
for x in tests
|
| 260 |
+
if x.result.code not in self.skipped_codes
|
| 261 |
+
]
|
| 262 |
+
|
| 263 |
+
json_data = {"traceEvents": events}
|
| 264 |
+
|
| 265 |
+
with open(self.output_file, "w") as time_trace_file:
|
| 266 |
+
json.dump(json_data, time_trace_file, indent=2, sort_keys=True)
|
| 267 |
+
|
| 268 |
+
def _get_test_event(self, test, first_start_time):
|
| 269 |
+
test_name = test.getFullName()
|
| 270 |
+
elapsed_time = test.result.elapsed or 0.0
|
| 271 |
+
start_time = test.result.start - first_start_time if test.result.start else 0.0
|
| 272 |
+
pid = test.result.pid or 0
|
| 273 |
+
return {
|
| 274 |
+
"pid": pid,
|
| 275 |
+
"tid": 1,
|
| 276 |
+
"ph": "X",
|
| 277 |
+
"ts": int(start_time * 1000000.0),
|
| 278 |
+
"dur": int(elapsed_time * 1000000.0),
|
| 279 |
+
"name": test_name,
|
| 280 |
+
}
|
wemm/lib/python3.10/site-packages/lit/util.py
ADDED
|
@@ -0,0 +1,550 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import print_function
|
| 2 |
+
|
| 3 |
+
import errno
|
| 4 |
+
import itertools
|
| 5 |
+
import math
|
| 6 |
+
import numbers
|
| 7 |
+
import os
|
| 8 |
+
import platform
|
| 9 |
+
import re
|
| 10 |
+
import signal
|
| 11 |
+
import subprocess
|
| 12 |
+
import sys
|
| 13 |
+
import threading
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def is_string(value):
|
| 17 |
+
try:
|
| 18 |
+
# Python 2 and Python 3 are different here.
|
| 19 |
+
return isinstance(value, basestring)
|
| 20 |
+
except NameError:
|
| 21 |
+
return isinstance(value, str)
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
def pythonize_bool(value):
|
| 25 |
+
if value is None:
|
| 26 |
+
return False
|
| 27 |
+
if type(value) is bool:
|
| 28 |
+
return value
|
| 29 |
+
if isinstance(value, numbers.Number):
|
| 30 |
+
return value != 0
|
| 31 |
+
if is_string(value):
|
| 32 |
+
if value.lower() in ("1", "true", "on", "yes"):
|
| 33 |
+
return True
|
| 34 |
+
if value.lower() in ("", "0", "false", "off", "no"):
|
| 35 |
+
return False
|
| 36 |
+
raise ValueError('"{}" is not a valid boolean'.format(value))
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
def make_word_regex(word):
|
| 40 |
+
return r"\b" + word + r"\b"
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def to_bytes(s):
|
| 44 |
+
"""Return the parameter as type 'bytes', possibly encoding it.
|
| 45 |
+
|
| 46 |
+
In Python2, the 'bytes' type is the same as 'str'. In Python3, they
|
| 47 |
+
are distinct.
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
if isinstance(s, bytes):
|
| 51 |
+
# In Python2, this branch is taken for both 'str' and 'bytes'.
|
| 52 |
+
# In Python3, this branch is taken only for 'bytes'.
|
| 53 |
+
return s
|
| 54 |
+
# In Python2, 's' is a 'unicode' object.
|
| 55 |
+
# In Python3, 's' is a 'str' object.
|
| 56 |
+
# Encode to UTF-8 to get 'bytes' data.
|
| 57 |
+
return s.encode("utf-8")
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
def to_string(b):
|
| 61 |
+
"""Return the parameter as type 'str', possibly encoding it.
|
| 62 |
+
|
| 63 |
+
In Python2, the 'str' type is the same as 'bytes'. In Python3, the
|
| 64 |
+
'str' type is (essentially) Python2's 'unicode' type, and 'bytes' is
|
| 65 |
+
distinct.
|
| 66 |
+
|
| 67 |
+
"""
|
| 68 |
+
if isinstance(b, str):
|
| 69 |
+
# In Python2, this branch is taken for types 'str' and 'bytes'.
|
| 70 |
+
# In Python3, this branch is taken only for 'str'.
|
| 71 |
+
return b
|
| 72 |
+
if isinstance(b, bytes):
|
| 73 |
+
# In Python2, this branch is never taken ('bytes' is handled as 'str').
|
| 74 |
+
# In Python3, this is true only for 'bytes'.
|
| 75 |
+
try:
|
| 76 |
+
return b.decode("utf-8")
|
| 77 |
+
except UnicodeDecodeError:
|
| 78 |
+
# If the value is not valid Unicode, return the default
|
| 79 |
+
# repr-line encoding.
|
| 80 |
+
return str(b)
|
| 81 |
+
|
| 82 |
+
# By this point, here's what we *don't* have:
|
| 83 |
+
#
|
| 84 |
+
# - In Python2:
|
| 85 |
+
# - 'str' or 'bytes' (1st branch above)
|
| 86 |
+
# - In Python3:
|
| 87 |
+
# - 'str' (1st branch above)
|
| 88 |
+
# - 'bytes' (2nd branch above)
|
| 89 |
+
#
|
| 90 |
+
# The last type we might expect is the Python2 'unicode' type. There is no
|
| 91 |
+
# 'unicode' type in Python3 (all the Python3 cases were already handled). In
|
| 92 |
+
# order to get a 'str' object, we need to encode the 'unicode' object.
|
| 93 |
+
try:
|
| 94 |
+
return b.encode("utf-8")
|
| 95 |
+
except AttributeError:
|
| 96 |
+
raise TypeError("not sure how to convert %s to %s" % (type(b), str))
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def to_unicode(s):
|
| 100 |
+
"""Return the parameter as type which supports unicode, possibly decoding
|
| 101 |
+
it.
|
| 102 |
+
|
| 103 |
+
In Python2, this is the unicode type. In Python3 it's the str type.
|
| 104 |
+
|
| 105 |
+
"""
|
| 106 |
+
if isinstance(s, bytes):
|
| 107 |
+
# In Python2, this branch is taken for both 'str' and 'bytes'.
|
| 108 |
+
# In Python3, this branch is taken only for 'bytes'.
|
| 109 |
+
return s.decode("utf-8")
|
| 110 |
+
return s
|
| 111 |
+
|
| 112 |
+
|
| 113 |
+
def usable_core_count():
|
| 114 |
+
"""Return the number of cores the current process can use, if supported.
|
| 115 |
+
Otherwise, return the total number of cores (like `os.cpu_count()`).
|
| 116 |
+
Default to 1 if undetermined.
|
| 117 |
+
|
| 118 |
+
"""
|
| 119 |
+
try:
|
| 120 |
+
n = len(os.sched_getaffinity(0))
|
| 121 |
+
except AttributeError:
|
| 122 |
+
n = os.cpu_count() or 1
|
| 123 |
+
|
| 124 |
+
# On Windows with more than 60 processes, multiprocessing's call to
|
| 125 |
+
# _winapi.WaitForMultipleObjects() prints an error and lit hangs.
|
| 126 |
+
if platform.system() == "Windows":
|
| 127 |
+
return min(n, 60)
|
| 128 |
+
|
| 129 |
+
return n
|
| 130 |
+
|
| 131 |
+
def abs_path_preserve_drive(path):
|
| 132 |
+
"""Return the absolute path without resolving drive mappings on Windows.
|
| 133 |
+
|
| 134 |
+
"""
|
| 135 |
+
if platform.system() == "Windows":
|
| 136 |
+
# Windows has limitations on path length (MAX_PATH) that
|
| 137 |
+
# can be worked around using substitute drives, which map
|
| 138 |
+
# a drive letter to a longer path on another drive.
|
| 139 |
+
# Since Python 3.8, os.path.realpath resolves sustitute drives,
|
| 140 |
+
# so we should not use it. In Python 3.7, os.path.realpath
|
| 141 |
+
# was implemented as os.path.abspath.
|
| 142 |
+
return os.path.abspath(path)
|
| 143 |
+
else:
|
| 144 |
+
# On UNIX, the current directory always has symbolic links resolved,
|
| 145 |
+
# so any program accepting relative paths cannot preserve symbolic
|
| 146 |
+
# links in paths and we should always use os.path.realpath.
|
| 147 |
+
return os.path.realpath(path)
|
| 148 |
+
|
| 149 |
+
def mkdir(path):
|
| 150 |
+
try:
|
| 151 |
+
if platform.system() == "Windows":
|
| 152 |
+
from ctypes import windll
|
| 153 |
+
from ctypes import GetLastError, WinError
|
| 154 |
+
|
| 155 |
+
path = os.path.abspath(path)
|
| 156 |
+
# Make sure that the path uses backslashes here, in case
|
| 157 |
+
# python would have happened to use forward slashes, as the
|
| 158 |
+
# NT path format only supports backslashes.
|
| 159 |
+
path = path.replace("/", "\\")
|
| 160 |
+
NTPath = to_unicode(r"\\?\%s" % path)
|
| 161 |
+
if not windll.kernel32.CreateDirectoryW(NTPath, None):
|
| 162 |
+
raise WinError(GetLastError())
|
| 163 |
+
else:
|
| 164 |
+
os.mkdir(path)
|
| 165 |
+
except OSError:
|
| 166 |
+
e = sys.exc_info()[1]
|
| 167 |
+
# ignore EEXIST, which may occur during a race condition
|
| 168 |
+
if e.errno != errno.EEXIST:
|
| 169 |
+
raise
|
| 170 |
+
|
| 171 |
+
|
| 172 |
+
def mkdir_p(path):
|
| 173 |
+
"""mkdir_p(path) - Make the "path" directory, if it does not exist; this
|
| 174 |
+
will also make directories for any missing parent directories."""
|
| 175 |
+
if not path or os.path.exists(path):
|
| 176 |
+
return
|
| 177 |
+
|
| 178 |
+
parent = os.path.dirname(path)
|
| 179 |
+
if parent != path:
|
| 180 |
+
mkdir_p(parent)
|
| 181 |
+
|
| 182 |
+
mkdir(path)
|
| 183 |
+
|
| 184 |
+
|
| 185 |
+
def listdir_files(dirname, suffixes=None, exclude_filenames=None):
|
| 186 |
+
"""Yields files in a directory.
|
| 187 |
+
|
| 188 |
+
Filenames that are not excluded by rules below are yielded one at a time, as
|
| 189 |
+
basenames (i.e., without dirname).
|
| 190 |
+
|
| 191 |
+
Files starting with '.' are always skipped.
|
| 192 |
+
|
| 193 |
+
If 'suffixes' is not None, then only filenames ending with one of its
|
| 194 |
+
members will be yielded. These can be extensions, like '.exe', or strings,
|
| 195 |
+
like 'Test'. (It is a lexicographic check; so an empty sequence will yield
|
| 196 |
+
nothing, but a single empty string will yield all filenames.)
|
| 197 |
+
|
| 198 |
+
If 'exclude_filenames' is not None, then none of the file basenames in it
|
| 199 |
+
will be yielded.
|
| 200 |
+
|
| 201 |
+
If specified, the containers for 'suffixes' and 'exclude_filenames' must
|
| 202 |
+
support membership checking for strs.
|
| 203 |
+
|
| 204 |
+
Args:
|
| 205 |
+
dirname: a directory path.
|
| 206 |
+
suffixes: (optional) a sequence of strings (set, list, etc.).
|
| 207 |
+
exclude_filenames: (optional) a sequence of strings.
|
| 208 |
+
|
| 209 |
+
Yields:
|
| 210 |
+
Filenames as returned by os.listdir (generally, str).
|
| 211 |
+
|
| 212 |
+
"""
|
| 213 |
+
if exclude_filenames is None:
|
| 214 |
+
exclude_filenames = set()
|
| 215 |
+
if suffixes is None:
|
| 216 |
+
suffixes = {""}
|
| 217 |
+
for filename in os.listdir(dirname):
|
| 218 |
+
if (
|
| 219 |
+
os.path.isdir(os.path.join(dirname, filename))
|
| 220 |
+
or filename.startswith(".")
|
| 221 |
+
or filename in exclude_filenames
|
| 222 |
+
or not any(filename.endswith(sfx) for sfx in suffixes)
|
| 223 |
+
):
|
| 224 |
+
continue
|
| 225 |
+
yield filename
|
| 226 |
+
|
| 227 |
+
|
| 228 |
+
def which(command, paths=None):
|
| 229 |
+
"""which(command, [paths]) - Look up the given command in the paths string
|
| 230 |
+
(or the PATH environment variable, if unspecified)."""
|
| 231 |
+
|
| 232 |
+
if paths is None:
|
| 233 |
+
paths = os.environ.get("PATH", "")
|
| 234 |
+
|
| 235 |
+
# Check for absolute match first.
|
| 236 |
+
if os.path.isabs(command) and os.path.isfile(command):
|
| 237 |
+
return os.path.normcase(os.path.normpath(command))
|
| 238 |
+
|
| 239 |
+
# Would be nice if Python had a lib function for this.
|
| 240 |
+
if not paths:
|
| 241 |
+
paths = os.defpath
|
| 242 |
+
|
| 243 |
+
# Get suffixes to search.
|
| 244 |
+
# On Cygwin, 'PATHEXT' may exist but it should not be used.
|
| 245 |
+
if os.pathsep == ";":
|
| 246 |
+
pathext = os.environ.get("PATHEXT", "").split(";")
|
| 247 |
+
else:
|
| 248 |
+
pathext = [""]
|
| 249 |
+
|
| 250 |
+
# Search the paths...
|
| 251 |
+
for path in paths.split(os.pathsep):
|
| 252 |
+
for ext in pathext:
|
| 253 |
+
p = os.path.join(path, command + ext)
|
| 254 |
+
if os.path.exists(p) and not os.path.isdir(p):
|
| 255 |
+
return os.path.normcase(os.path.abspath(p))
|
| 256 |
+
|
| 257 |
+
return None
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
def checkToolsPath(dir, tools):
|
| 261 |
+
for tool in tools:
|
| 262 |
+
if not os.path.exists(os.path.join(dir, tool)):
|
| 263 |
+
return False
|
| 264 |
+
return True
|
| 265 |
+
|
| 266 |
+
|
| 267 |
+
def whichTools(tools, paths):
|
| 268 |
+
for path in paths.split(os.pathsep):
|
| 269 |
+
if checkToolsPath(path, tools):
|
| 270 |
+
return path
|
| 271 |
+
return None
|
| 272 |
+
|
| 273 |
+
|
| 274 |
+
def printHistogram(items, title="Items"):
|
| 275 |
+
items.sort(key=lambda item: item[1])
|
| 276 |
+
|
| 277 |
+
maxValue = max([v for _, v in items])
|
| 278 |
+
|
| 279 |
+
# Select first "nice" bar height that produces more than 10 bars.
|
| 280 |
+
power = int(math.ceil(math.log(maxValue, 10)))
|
| 281 |
+
for inc in itertools.cycle((5, 2, 2.5, 1)):
|
| 282 |
+
barH = inc * 10**power
|
| 283 |
+
N = int(math.ceil(maxValue / barH))
|
| 284 |
+
if N > 10:
|
| 285 |
+
break
|
| 286 |
+
elif inc == 1:
|
| 287 |
+
power -= 1
|
| 288 |
+
|
| 289 |
+
histo = [set() for i in range(N)]
|
| 290 |
+
for name, v in items:
|
| 291 |
+
bin = min(int(N * v / maxValue), N - 1)
|
| 292 |
+
histo[bin].add(name)
|
| 293 |
+
|
| 294 |
+
barW = 40
|
| 295 |
+
hr = "-" * (barW + 34)
|
| 296 |
+
print("Slowest %s:" % title)
|
| 297 |
+
print(hr)
|
| 298 |
+
for name, value in reversed(items[-20:]):
|
| 299 |
+
print("%.2fs: %s" % (value, name))
|
| 300 |
+
print("\n%s Times:" % title)
|
| 301 |
+
print(hr)
|
| 302 |
+
pDigits = int(math.ceil(math.log(maxValue, 10)))
|
| 303 |
+
pfDigits = max(0, 3 - pDigits)
|
| 304 |
+
if pfDigits:
|
| 305 |
+
pDigits += pfDigits + 1
|
| 306 |
+
cDigits = int(math.ceil(math.log(len(items), 10)))
|
| 307 |
+
print(
|
| 308 |
+
"[%s] :: [%s] :: [%s]"
|
| 309 |
+
% (
|
| 310 |
+
"Range".center((pDigits + 1) * 2 + 3),
|
| 311 |
+
"Percentage".center(barW),
|
| 312 |
+
"Count".center(cDigits * 2 + 1),
|
| 313 |
+
)
|
| 314 |
+
)
|
| 315 |
+
print(hr)
|
| 316 |
+
for i, row in reversed(list(enumerate(histo))):
|
| 317 |
+
pct = float(len(row)) / len(items)
|
| 318 |
+
w = int(barW * pct)
|
| 319 |
+
print(
|
| 320 |
+
"[%*.*fs,%*.*fs) :: [%s%s] :: [%*d/%*d]"
|
| 321 |
+
% (
|
| 322 |
+
pDigits,
|
| 323 |
+
pfDigits,
|
| 324 |
+
i * barH,
|
| 325 |
+
pDigits,
|
| 326 |
+
pfDigits,
|
| 327 |
+
(i + 1) * barH,
|
| 328 |
+
"*" * w,
|
| 329 |
+
" " * (barW - w),
|
| 330 |
+
cDigits,
|
| 331 |
+
len(row),
|
| 332 |
+
cDigits,
|
| 333 |
+
len(items),
|
| 334 |
+
)
|
| 335 |
+
)
|
| 336 |
+
print(hr)
|
| 337 |
+
|
| 338 |
+
|
| 339 |
+
class ExecuteCommandTimeoutException(Exception):
|
| 340 |
+
def __init__(self, msg, out, err, exitCode):
|
| 341 |
+
assert isinstance(msg, str)
|
| 342 |
+
assert isinstance(out, str)
|
| 343 |
+
assert isinstance(err, str)
|
| 344 |
+
assert isinstance(exitCode, int)
|
| 345 |
+
self.msg = msg
|
| 346 |
+
self.out = out
|
| 347 |
+
self.err = err
|
| 348 |
+
self.exitCode = exitCode
|
| 349 |
+
|
| 350 |
+
|
| 351 |
+
# Close extra file handles on UNIX (on Windows this cannot be done while
|
| 352 |
+
# also redirecting input).
|
| 353 |
+
kUseCloseFDs = not (platform.system() == "Windows")
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
def executeCommand(
|
| 357 |
+
command, cwd=None, env=None, input=None, timeout=0, redirect_stderr=False
|
| 358 |
+
):
|
| 359 |
+
"""Execute command ``command`` (list of arguments or string) with.
|
| 360 |
+
|
| 361 |
+
* working directory ``cwd`` (str), use None to use the current
|
| 362 |
+
working directory
|
| 363 |
+
* environment ``env`` (dict), use None for none
|
| 364 |
+
* Input to the command ``input`` (str), use string to pass
|
| 365 |
+
no input.
|
| 366 |
+
* Max execution time ``timeout`` (int) seconds. Use 0 for no timeout.
|
| 367 |
+
* ``redirect_stderr`` (bool), use True if redirect stderr to stdout
|
| 368 |
+
|
| 369 |
+
Returns a tuple (out, err, exitCode) where
|
| 370 |
+
* ``out`` (str) is the standard output of running the command
|
| 371 |
+
* ``err`` (str) is the standard error of running the command
|
| 372 |
+
* ``exitCode`` (int) is the exitCode of running the command
|
| 373 |
+
|
| 374 |
+
If the timeout is hit an ``ExecuteCommandTimeoutException``
|
| 375 |
+
is raised.
|
| 376 |
+
|
| 377 |
+
"""
|
| 378 |
+
if input is not None:
|
| 379 |
+
input = to_bytes(input)
|
| 380 |
+
err_out = subprocess.STDOUT if redirect_stderr else subprocess.PIPE
|
| 381 |
+
p = subprocess.Popen(
|
| 382 |
+
command,
|
| 383 |
+
cwd=cwd,
|
| 384 |
+
stdin=subprocess.PIPE,
|
| 385 |
+
stdout=subprocess.PIPE,
|
| 386 |
+
stderr=err_out,
|
| 387 |
+
env=env,
|
| 388 |
+
close_fds=kUseCloseFDs,
|
| 389 |
+
)
|
| 390 |
+
timerObject = None
|
| 391 |
+
# FIXME: Because of the way nested function scopes work in Python 2.x we
|
| 392 |
+
# need to use a reference to a mutable object rather than a plain
|
| 393 |
+
# bool. In Python 3 we could use the "nonlocal" keyword but we need
|
| 394 |
+
# to support Python 2 as well.
|
| 395 |
+
hitTimeOut = [False]
|
| 396 |
+
try:
|
| 397 |
+
if timeout > 0:
|
| 398 |
+
|
| 399 |
+
def killProcess():
|
| 400 |
+
# We may be invoking a shell so we need to kill the
|
| 401 |
+
# process and all its children.
|
| 402 |
+
hitTimeOut[0] = True
|
| 403 |
+
killProcessAndChildren(p.pid)
|
| 404 |
+
|
| 405 |
+
timerObject = threading.Timer(timeout, killProcess)
|
| 406 |
+
timerObject.start()
|
| 407 |
+
|
| 408 |
+
out, err = p.communicate(input=input)
|
| 409 |
+
exitCode = p.wait()
|
| 410 |
+
finally:
|
| 411 |
+
if timerObject != None:
|
| 412 |
+
timerObject.cancel()
|
| 413 |
+
|
| 414 |
+
# Ensure the resulting output is always of string type.
|
| 415 |
+
out = to_string(out)
|
| 416 |
+
err = "" if redirect_stderr else to_string(err)
|
| 417 |
+
|
| 418 |
+
if hitTimeOut[0]:
|
| 419 |
+
raise ExecuteCommandTimeoutException(
|
| 420 |
+
msg="Reached timeout of {} seconds".format(timeout),
|
| 421 |
+
out=out,
|
| 422 |
+
err=err,
|
| 423 |
+
exitCode=exitCode,
|
| 424 |
+
)
|
| 425 |
+
|
| 426 |
+
# Detect Ctrl-C in subprocess.
|
| 427 |
+
if exitCode == -signal.SIGINT:
|
| 428 |
+
raise KeyboardInterrupt
|
| 429 |
+
|
| 430 |
+
return out, err, exitCode
|
| 431 |
+
|
| 432 |
+
|
| 433 |
+
def isAIXTriple(target_triple):
|
| 434 |
+
"""Whether the given target triple is for AIX,
|
| 435 |
+
e.g. powerpc64-ibm-aix
|
| 436 |
+
"""
|
| 437 |
+
return "aix" in target_triple
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def addAIXVersion(target_triple):
|
| 441 |
+
"""Add the AIX version to the given target triple,
|
| 442 |
+
e.g. powerpc64-ibm-aix7.2.5.6
|
| 443 |
+
"""
|
| 444 |
+
os_cmd = "oslevel -s | awk -F\'-\' \'{printf \"%.1f.%d.%d\", $1/1000, $2, $3}\'"
|
| 445 |
+
os_version = subprocess.run(os_cmd, capture_output=True, shell=True).stdout.decode()
|
| 446 |
+
return re.sub("aix", "aix" + os_version, target_triple)
|
| 447 |
+
|
| 448 |
+
|
| 449 |
+
def isMacOSTriple(target_triple):
|
| 450 |
+
"""Whether the given target triple is for macOS,
|
| 451 |
+
e.g. x86_64-apple-darwin, arm64-apple-macos
|
| 452 |
+
"""
|
| 453 |
+
return "darwin" in target_triple or "macos" in target_triple
|
| 454 |
+
|
| 455 |
+
|
| 456 |
+
def usePlatformSdkOnDarwin(config, lit_config):
|
| 457 |
+
# On Darwin, support relocatable SDKs by providing Clang with a
|
| 458 |
+
# default system root path.
|
| 459 |
+
if isMacOSTriple(config.target_triple):
|
| 460 |
+
try:
|
| 461 |
+
cmd = subprocess.Popen(
|
| 462 |
+
["xcrun", "--show-sdk-path", "--sdk", "macosx"],
|
| 463 |
+
stdout=subprocess.PIPE,
|
| 464 |
+
stderr=subprocess.PIPE,
|
| 465 |
+
)
|
| 466 |
+
out, err = cmd.communicate()
|
| 467 |
+
out = out.strip()
|
| 468 |
+
res = cmd.wait()
|
| 469 |
+
except OSError:
|
| 470 |
+
res = -1
|
| 471 |
+
if res == 0 and out:
|
| 472 |
+
sdk_path = out.decode()
|
| 473 |
+
lit_config.note("using SDKROOT: %r" % sdk_path)
|
| 474 |
+
config.environment["SDKROOT"] = sdk_path
|
| 475 |
+
|
| 476 |
+
|
| 477 |
+
def findPlatformSdkVersionOnMacOS(config, lit_config):
|
| 478 |
+
if isMacOSTriple(config.target_triple):
|
| 479 |
+
try:
|
| 480 |
+
cmd = subprocess.Popen(
|
| 481 |
+
["xcrun", "--show-sdk-version", "--sdk", "macosx"],
|
| 482 |
+
stdout=subprocess.PIPE,
|
| 483 |
+
stderr=subprocess.PIPE,
|
| 484 |
+
)
|
| 485 |
+
out, err = cmd.communicate()
|
| 486 |
+
out = out.strip()
|
| 487 |
+
res = cmd.wait()
|
| 488 |
+
except OSError:
|
| 489 |
+
res = -1
|
| 490 |
+
if res == 0 and out:
|
| 491 |
+
return out.decode()
|
| 492 |
+
return None
|
| 493 |
+
|
| 494 |
+
|
| 495 |
+
def killProcessAndChildrenIsSupported():
|
| 496 |
+
"""
|
| 497 |
+
Returns a tuple (<supported> , <error message>)
|
| 498 |
+
where
|
| 499 |
+
`<supported>` is True if `killProcessAndChildren()` is supported on
|
| 500 |
+
the current host, returns False otherwise.
|
| 501 |
+
`<error message>` is an empty string if `<supported>` is True,
|
| 502 |
+
otherwise is contains a string describing why the function is
|
| 503 |
+
not supported.
|
| 504 |
+
"""
|
| 505 |
+
if platform.system() == "AIX":
|
| 506 |
+
return (True, "")
|
| 507 |
+
try:
|
| 508 |
+
import psutil # noqa: F401
|
| 509 |
+
|
| 510 |
+
return (True, "")
|
| 511 |
+
except ImportError:
|
| 512 |
+
return (
|
| 513 |
+
False,
|
| 514 |
+
"Requires the Python psutil module but it could"
|
| 515 |
+
" not be found. Try installing it via pip or via"
|
| 516 |
+
" your operating system's package manager.",
|
| 517 |
+
)
|
| 518 |
+
|
| 519 |
+
|
| 520 |
+
def killProcessAndChildren(pid):
|
| 521 |
+
"""This function kills a process with ``pid`` and all its running children
|
| 522 |
+
(recursively). It is currently implemented using the psutil module on some
|
| 523 |
+
platforms which provides a simple platform neutral implementation.
|
| 524 |
+
|
| 525 |
+
TODO: Reimplement this without using psutil on all platforms so we can
|
| 526 |
+
remove our dependency on it.
|
| 527 |
+
|
| 528 |
+
"""
|
| 529 |
+
if platform.system() == "AIX":
|
| 530 |
+
subprocess.call("kill -kill $(ps -o pid= -L{})".format(pid), shell=True)
|
| 531 |
+
else:
|
| 532 |
+
import psutil
|
| 533 |
+
|
| 534 |
+
try:
|
| 535 |
+
psutilProc = psutil.Process(pid)
|
| 536 |
+
# Handle the different psutil API versions
|
| 537 |
+
try:
|
| 538 |
+
# psutil >= 2.x
|
| 539 |
+
children_iterator = psutilProc.children(recursive=True)
|
| 540 |
+
except AttributeError:
|
| 541 |
+
# psutil 1.x
|
| 542 |
+
children_iterator = psutilProc.get_children(recursive=True)
|
| 543 |
+
for child in children_iterator:
|
| 544 |
+
try:
|
| 545 |
+
child.kill()
|
| 546 |
+
except psutil.NoSuchProcess:
|
| 547 |
+
pass
|
| 548 |
+
psutilProc.kill()
|
| 549 |
+
except psutil.NoSuchProcess:
|
| 550 |
+
pass
|
wemm/lib/python3.10/site-packages/lit/worker.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
The functions in this module are meant to run on a separate worker process.
|
| 3 |
+
Exception: in single process mode _execute is called directly.
|
| 4 |
+
|
| 5 |
+
For efficiency, we copy all data needed to execute all tests into each worker
|
| 6 |
+
and store it in global variables. This reduces the cost of each task.
|
| 7 |
+
"""
|
| 8 |
+
import contextlib
|
| 9 |
+
import os
|
| 10 |
+
import signal
|
| 11 |
+
import time
|
| 12 |
+
import traceback
|
| 13 |
+
|
| 14 |
+
import lit.Test
|
| 15 |
+
import lit.util
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
_lit_config = None
|
| 19 |
+
_parallelism_semaphores = None
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def initialize(lit_config, parallelism_semaphores):
|
| 23 |
+
"""Copy data shared by all test executions into worker processes"""
|
| 24 |
+
global _lit_config
|
| 25 |
+
global _parallelism_semaphores
|
| 26 |
+
_lit_config = lit_config
|
| 27 |
+
_parallelism_semaphores = parallelism_semaphores
|
| 28 |
+
|
| 29 |
+
# We use the following strategy for dealing with Ctrl+C/KeyboardInterrupt in
|
| 30 |
+
# subprocesses created by the multiprocessing.Pool.
|
| 31 |
+
# https://noswap.com/blog/python-multiprocessing-keyboardinterrupt
|
| 32 |
+
signal.signal(signal.SIGINT, signal.SIG_IGN)
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
def execute(test):
|
| 36 |
+
"""Run one test in a multiprocessing.Pool
|
| 37 |
+
|
| 38 |
+
Side effects in this function and functions it calls are not visible in the
|
| 39 |
+
main lit process.
|
| 40 |
+
|
| 41 |
+
Arguments and results of this function are pickled, so they should be cheap
|
| 42 |
+
to copy.
|
| 43 |
+
"""
|
| 44 |
+
with _get_parallelism_semaphore(test):
|
| 45 |
+
result = _execute(test, _lit_config)
|
| 46 |
+
|
| 47 |
+
test.setResult(result)
|
| 48 |
+
return test
|
| 49 |
+
|
| 50 |
+
|
| 51 |
+
# TODO(python3): replace with contextlib.nullcontext
|
| 52 |
+
@contextlib.contextmanager
|
| 53 |
+
def NopSemaphore():
|
| 54 |
+
yield
|
| 55 |
+
|
| 56 |
+
|
| 57 |
+
def _get_parallelism_semaphore(test):
|
| 58 |
+
pg = test.config.parallelism_group
|
| 59 |
+
if callable(pg):
|
| 60 |
+
pg = pg(test)
|
| 61 |
+
return _parallelism_semaphores.get(pg, NopSemaphore())
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
# Do not inline! Directly used by LitTestCase.py
|
| 65 |
+
def _execute(test, lit_config):
|
| 66 |
+
start = time.time()
|
| 67 |
+
result = _execute_test_handle_errors(test, lit_config)
|
| 68 |
+
result.elapsed = time.time() - start
|
| 69 |
+
result.start = start
|
| 70 |
+
result.pid = os.getpid()
|
| 71 |
+
return result
|
| 72 |
+
|
| 73 |
+
|
| 74 |
+
def _execute_test_handle_errors(test, lit_config):
|
| 75 |
+
try:
|
| 76 |
+
result = test.config.test_format.execute(test, lit_config)
|
| 77 |
+
return _adapt_result(result)
|
| 78 |
+
except:
|
| 79 |
+
if lit_config.debug:
|
| 80 |
+
raise
|
| 81 |
+
output = "Exception during script execution:\n"
|
| 82 |
+
output += traceback.format_exc()
|
| 83 |
+
output += "\n"
|
| 84 |
+
return lit.Test.Result(lit.Test.UNRESOLVED, output)
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
# Support deprecated result from execute() which returned the result
|
| 88 |
+
# code and additional output as a tuple.
|
| 89 |
+
def _adapt_result(result):
|
| 90 |
+
if isinstance(result, lit.Test.Result):
|
| 91 |
+
return result
|
| 92 |
+
assert isinstance(result, tuple)
|
| 93 |
+
code, output = result
|
| 94 |
+
return lit.Test.Result(code, output)
|
wemm/lib/python3.10/site-packages/nvidia_cusolver_cu12-11.4.5.107.dist-info/WHEEL
ADDED
|
@@ -0,0 +1,5 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
Wheel-Version: 1.0
|
| 2 |
+
Generator: bdist_wheel (0.37.1)
|
| 3 |
+
Root-Is-Purelib: true
|
| 4 |
+
Tag: py3-none-manylinux1_x86_64
|
| 5 |
+
|
wemm/lib/python3.10/site-packages/requests/__init__.py
ADDED
|
@@ -0,0 +1,184 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# __
|
| 2 |
+
# /__) _ _ _ _ _/ _
|
| 3 |
+
# / ( (- (/ (/ (- _) / _)
|
| 4 |
+
# /
|
| 5 |
+
|
| 6 |
+
"""
|
| 7 |
+
Requests HTTP Library
|
| 8 |
+
~~~~~~~~~~~~~~~~~~~~~
|
| 9 |
+
|
| 10 |
+
Requests is an HTTP library, written in Python, for human beings.
|
| 11 |
+
Basic GET usage:
|
| 12 |
+
|
| 13 |
+
>>> import requests
|
| 14 |
+
>>> r = requests.get('https://www.python.org')
|
| 15 |
+
>>> r.status_code
|
| 16 |
+
200
|
| 17 |
+
>>> b'Python is a programming language' in r.content
|
| 18 |
+
True
|
| 19 |
+
|
| 20 |
+
... or POST:
|
| 21 |
+
|
| 22 |
+
>>> payload = dict(key1='value1', key2='value2')
|
| 23 |
+
>>> r = requests.post('https://httpbin.org/post', data=payload)
|
| 24 |
+
>>> print(r.text)
|
| 25 |
+
{
|
| 26 |
+
...
|
| 27 |
+
"form": {
|
| 28 |
+
"key1": "value1",
|
| 29 |
+
"key2": "value2"
|
| 30 |
+
},
|
| 31 |
+
...
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
The other HTTP methods are supported - see `requests.api`. Full documentation
|
| 35 |
+
is at <https://requests.readthedocs.io>.
|
| 36 |
+
|
| 37 |
+
:copyright: (c) 2017 by Kenneth Reitz.
|
| 38 |
+
:license: Apache 2.0, see LICENSE for more details.
|
| 39 |
+
"""
|
| 40 |
+
|
| 41 |
+
import warnings
|
| 42 |
+
|
| 43 |
+
import urllib3
|
| 44 |
+
|
| 45 |
+
from .exceptions import RequestsDependencyWarning
|
| 46 |
+
|
| 47 |
+
try:
|
| 48 |
+
from charset_normalizer import __version__ as charset_normalizer_version
|
| 49 |
+
except ImportError:
|
| 50 |
+
charset_normalizer_version = None
|
| 51 |
+
|
| 52 |
+
try:
|
| 53 |
+
from chardet import __version__ as chardet_version
|
| 54 |
+
except ImportError:
|
| 55 |
+
chardet_version = None
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
def check_compatibility(urllib3_version, chardet_version, charset_normalizer_version):
|
| 59 |
+
urllib3_version = urllib3_version.split(".")
|
| 60 |
+
assert urllib3_version != ["dev"] # Verify urllib3 isn't installed from git.
|
| 61 |
+
|
| 62 |
+
# Sometimes, urllib3 only reports its version as 16.1.
|
| 63 |
+
if len(urllib3_version) == 2:
|
| 64 |
+
urllib3_version.append("0")
|
| 65 |
+
|
| 66 |
+
# Check urllib3 for compatibility.
|
| 67 |
+
major, minor, patch = urllib3_version # noqa: F811
|
| 68 |
+
major, minor, patch = int(major), int(minor), int(patch)
|
| 69 |
+
# urllib3 >= 1.21.1
|
| 70 |
+
assert major >= 1
|
| 71 |
+
if major == 1:
|
| 72 |
+
assert minor >= 21
|
| 73 |
+
|
| 74 |
+
# Check charset_normalizer for compatibility.
|
| 75 |
+
if chardet_version:
|
| 76 |
+
major, minor, patch = chardet_version.split(".")[:3]
|
| 77 |
+
major, minor, patch = int(major), int(minor), int(patch)
|
| 78 |
+
# chardet_version >= 3.0.2, < 6.0.0
|
| 79 |
+
assert (3, 0, 2) <= (major, minor, patch) < (6, 0, 0)
|
| 80 |
+
elif charset_normalizer_version:
|
| 81 |
+
major, minor, patch = charset_normalizer_version.split(".")[:3]
|
| 82 |
+
major, minor, patch = int(major), int(minor), int(patch)
|
| 83 |
+
# charset_normalizer >= 2.0.0 < 4.0.0
|
| 84 |
+
assert (2, 0, 0) <= (major, minor, patch) < (4, 0, 0)
|
| 85 |
+
else:
|
| 86 |
+
warnings.warn(
|
| 87 |
+
"Unable to find acceptable character detection dependency "
|
| 88 |
+
"(chardet or charset_normalizer).",
|
| 89 |
+
RequestsDependencyWarning,
|
| 90 |
+
)
|
| 91 |
+
|
| 92 |
+
|
| 93 |
+
def _check_cryptography(cryptography_version):
|
| 94 |
+
# cryptography < 1.3.4
|
| 95 |
+
try:
|
| 96 |
+
cryptography_version = list(map(int, cryptography_version.split(".")))
|
| 97 |
+
except ValueError:
|
| 98 |
+
return
|
| 99 |
+
|
| 100 |
+
if cryptography_version < [1, 3, 4]:
|
| 101 |
+
warning = "Old version of cryptography ({}) may cause slowdown.".format(
|
| 102 |
+
cryptography_version
|
| 103 |
+
)
|
| 104 |
+
warnings.warn(warning, RequestsDependencyWarning)
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
# Check imported dependencies for compatibility.
|
| 108 |
+
try:
|
| 109 |
+
check_compatibility(
|
| 110 |
+
urllib3.__version__, chardet_version, charset_normalizer_version
|
| 111 |
+
)
|
| 112 |
+
except (AssertionError, ValueError):
|
| 113 |
+
warnings.warn(
|
| 114 |
+
"urllib3 ({}) or chardet ({})/charset_normalizer ({}) doesn't match a supported "
|
| 115 |
+
"version!".format(
|
| 116 |
+
urllib3.__version__, chardet_version, charset_normalizer_version
|
| 117 |
+
),
|
| 118 |
+
RequestsDependencyWarning,
|
| 119 |
+
)
|
| 120 |
+
|
| 121 |
+
# Attempt to enable urllib3's fallback for SNI support
|
| 122 |
+
# if the standard library doesn't support SNI or the
|
| 123 |
+
# 'ssl' library isn't available.
|
| 124 |
+
try:
|
| 125 |
+
try:
|
| 126 |
+
import ssl
|
| 127 |
+
except ImportError:
|
| 128 |
+
ssl = None
|
| 129 |
+
|
| 130 |
+
if not getattr(ssl, "HAS_SNI", False):
|
| 131 |
+
from urllib3.contrib import pyopenssl
|
| 132 |
+
|
| 133 |
+
pyopenssl.inject_into_urllib3()
|
| 134 |
+
|
| 135 |
+
# Check cryptography version
|
| 136 |
+
from cryptography import __version__ as cryptography_version
|
| 137 |
+
|
| 138 |
+
_check_cryptography(cryptography_version)
|
| 139 |
+
except ImportError:
|
| 140 |
+
pass
|
| 141 |
+
|
| 142 |
+
# urllib3's DependencyWarnings should be silenced.
|
| 143 |
+
from urllib3.exceptions import DependencyWarning
|
| 144 |
+
|
| 145 |
+
warnings.simplefilter("ignore", DependencyWarning)
|
| 146 |
+
|
| 147 |
+
# Set default logging handler to avoid "No handler found" warnings.
|
| 148 |
+
import logging
|
| 149 |
+
from logging import NullHandler
|
| 150 |
+
|
| 151 |
+
from . import packages, utils
|
| 152 |
+
from .__version__ import (
|
| 153 |
+
__author__,
|
| 154 |
+
__author_email__,
|
| 155 |
+
__build__,
|
| 156 |
+
__cake__,
|
| 157 |
+
__copyright__,
|
| 158 |
+
__description__,
|
| 159 |
+
__license__,
|
| 160 |
+
__title__,
|
| 161 |
+
__url__,
|
| 162 |
+
__version__,
|
| 163 |
+
)
|
| 164 |
+
from .api import delete, get, head, options, patch, post, put, request
|
| 165 |
+
from .exceptions import (
|
| 166 |
+
ConnectionError,
|
| 167 |
+
ConnectTimeout,
|
| 168 |
+
FileModeWarning,
|
| 169 |
+
HTTPError,
|
| 170 |
+
JSONDecodeError,
|
| 171 |
+
ReadTimeout,
|
| 172 |
+
RequestException,
|
| 173 |
+
Timeout,
|
| 174 |
+
TooManyRedirects,
|
| 175 |
+
URLRequired,
|
| 176 |
+
)
|
| 177 |
+
from .models import PreparedRequest, Request, Response
|
| 178 |
+
from .sessions import Session, session
|
| 179 |
+
from .status_codes import codes
|
| 180 |
+
|
| 181 |
+
logging.getLogger(__name__).addHandler(NullHandler())
|
| 182 |
+
|
| 183 |
+
# FileModeWarnings go off per the default.
|
| 184 |
+
warnings.simplefilter("default", FileModeWarning, append=True)
|
wemm/lib/python3.10/site-packages/requests/__pycache__/_internal_utils.cpython-310.pyc
ADDED
|
Binary file (1.59 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/requests/__pycache__/help.cpython-310.pyc
ADDED
|
Binary file (2.82 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/requests/__pycache__/status_codes.cpython-310.pyc
ADDED
|
Binary file (4.71 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/requests/__version__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# .-. .-. .-. . . .-. .-. .-. .-.
|
| 2 |
+
# |( |- |.| | | |- `-. | `-.
|
| 3 |
+
# ' ' `-' `-`.`-' `-' `-' ' `-'
|
| 4 |
+
|
| 5 |
+
__title__ = "requests"
|
| 6 |
+
__description__ = "Python HTTP for Humans."
|
| 7 |
+
__url__ = "https://requests.readthedocs.io"
|
| 8 |
+
__version__ = "2.32.3"
|
| 9 |
+
__build__ = 0x023203
|
| 10 |
+
__author__ = "Kenneth Reitz"
|
| 11 |
+
__author_email__ = "me@kennethreitz.org"
|
| 12 |
+
__license__ = "Apache-2.0"
|
| 13 |
+
__copyright__ = "Copyright Kenneth Reitz"
|
| 14 |
+
__cake__ = "\u2728 \U0001f370 \u2728"
|
wemm/lib/python3.10/site-packages/requests/_internal_utils.py
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests._internal_utils
|
| 3 |
+
~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Provides utility functions that are consumed internally by Requests
|
| 6 |
+
which depend on extremely few external helpers (such as compat)
|
| 7 |
+
"""
|
| 8 |
+
import re
|
| 9 |
+
|
| 10 |
+
from .compat import builtin_str
|
| 11 |
+
|
| 12 |
+
_VALID_HEADER_NAME_RE_BYTE = re.compile(rb"^[^:\s][^:\r\n]*$")
|
| 13 |
+
_VALID_HEADER_NAME_RE_STR = re.compile(r"^[^:\s][^:\r\n]*$")
|
| 14 |
+
_VALID_HEADER_VALUE_RE_BYTE = re.compile(rb"^\S[^\r\n]*$|^$")
|
| 15 |
+
_VALID_HEADER_VALUE_RE_STR = re.compile(r"^\S[^\r\n]*$|^$")
|
| 16 |
+
|
| 17 |
+
_HEADER_VALIDATORS_STR = (_VALID_HEADER_NAME_RE_STR, _VALID_HEADER_VALUE_RE_STR)
|
| 18 |
+
_HEADER_VALIDATORS_BYTE = (_VALID_HEADER_NAME_RE_BYTE, _VALID_HEADER_VALUE_RE_BYTE)
|
| 19 |
+
HEADER_VALIDATORS = {
|
| 20 |
+
bytes: _HEADER_VALIDATORS_BYTE,
|
| 21 |
+
str: _HEADER_VALIDATORS_STR,
|
| 22 |
+
}
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def to_native_string(string, encoding="ascii"):
|
| 26 |
+
"""Given a string object, regardless of type, returns a representation of
|
| 27 |
+
that string in the native string type, encoding and decoding where
|
| 28 |
+
necessary. This assumes ASCII unless told otherwise.
|
| 29 |
+
"""
|
| 30 |
+
if isinstance(string, builtin_str):
|
| 31 |
+
out = string
|
| 32 |
+
else:
|
| 33 |
+
out = string.decode(encoding)
|
| 34 |
+
|
| 35 |
+
return out
|
| 36 |
+
|
| 37 |
+
|
| 38 |
+
def unicode_is_ascii(u_string):
|
| 39 |
+
"""Determine if unicode string only contains ASCII characters.
|
| 40 |
+
|
| 41 |
+
:param str u_string: unicode string to check. Must be unicode
|
| 42 |
+
and not Python 2 `str`.
|
| 43 |
+
:rtype: bool
|
| 44 |
+
"""
|
| 45 |
+
assert isinstance(u_string, str)
|
| 46 |
+
try:
|
| 47 |
+
u_string.encode("ascii")
|
| 48 |
+
return True
|
| 49 |
+
except UnicodeEncodeError:
|
| 50 |
+
return False
|
wemm/lib/python3.10/site-packages/requests/adapters.py
ADDED
|
@@ -0,0 +1,719 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.adapters
|
| 3 |
+
~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This module contains the transport adapters that Requests uses to define
|
| 6 |
+
and maintain connections.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import os.path
|
| 10 |
+
import socket # noqa: F401
|
| 11 |
+
import typing
|
| 12 |
+
import warnings
|
| 13 |
+
|
| 14 |
+
from urllib3.exceptions import ClosedPoolError, ConnectTimeoutError
|
| 15 |
+
from urllib3.exceptions import HTTPError as _HTTPError
|
| 16 |
+
from urllib3.exceptions import InvalidHeader as _InvalidHeader
|
| 17 |
+
from urllib3.exceptions import (
|
| 18 |
+
LocationValueError,
|
| 19 |
+
MaxRetryError,
|
| 20 |
+
NewConnectionError,
|
| 21 |
+
ProtocolError,
|
| 22 |
+
)
|
| 23 |
+
from urllib3.exceptions import ProxyError as _ProxyError
|
| 24 |
+
from urllib3.exceptions import ReadTimeoutError, ResponseError
|
| 25 |
+
from urllib3.exceptions import SSLError as _SSLError
|
| 26 |
+
from urllib3.poolmanager import PoolManager, proxy_from_url
|
| 27 |
+
from urllib3.util import Timeout as TimeoutSauce
|
| 28 |
+
from urllib3.util import parse_url
|
| 29 |
+
from urllib3.util.retry import Retry
|
| 30 |
+
from urllib3.util.ssl_ import create_urllib3_context
|
| 31 |
+
|
| 32 |
+
from .auth import _basic_auth_str
|
| 33 |
+
from .compat import basestring, urlparse
|
| 34 |
+
from .cookies import extract_cookies_to_jar
|
| 35 |
+
from .exceptions import (
|
| 36 |
+
ConnectionError,
|
| 37 |
+
ConnectTimeout,
|
| 38 |
+
InvalidHeader,
|
| 39 |
+
InvalidProxyURL,
|
| 40 |
+
InvalidSchema,
|
| 41 |
+
InvalidURL,
|
| 42 |
+
ProxyError,
|
| 43 |
+
ReadTimeout,
|
| 44 |
+
RetryError,
|
| 45 |
+
SSLError,
|
| 46 |
+
)
|
| 47 |
+
from .models import Response
|
| 48 |
+
from .structures import CaseInsensitiveDict
|
| 49 |
+
from .utils import (
|
| 50 |
+
DEFAULT_CA_BUNDLE_PATH,
|
| 51 |
+
extract_zipped_paths,
|
| 52 |
+
get_auth_from_url,
|
| 53 |
+
get_encoding_from_headers,
|
| 54 |
+
prepend_scheme_if_needed,
|
| 55 |
+
select_proxy,
|
| 56 |
+
urldefragauth,
|
| 57 |
+
)
|
| 58 |
+
|
| 59 |
+
try:
|
| 60 |
+
from urllib3.contrib.socks import SOCKSProxyManager
|
| 61 |
+
except ImportError:
|
| 62 |
+
|
| 63 |
+
def SOCKSProxyManager(*args, **kwargs):
|
| 64 |
+
raise InvalidSchema("Missing dependencies for SOCKS support.")
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
if typing.TYPE_CHECKING:
|
| 68 |
+
from .models import PreparedRequest
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
DEFAULT_POOLBLOCK = False
|
| 72 |
+
DEFAULT_POOLSIZE = 10
|
| 73 |
+
DEFAULT_RETRIES = 0
|
| 74 |
+
DEFAULT_POOL_TIMEOUT = None
|
| 75 |
+
|
| 76 |
+
|
| 77 |
+
try:
|
| 78 |
+
import ssl # noqa: F401
|
| 79 |
+
|
| 80 |
+
_preloaded_ssl_context = create_urllib3_context()
|
| 81 |
+
_preloaded_ssl_context.load_verify_locations(
|
| 82 |
+
extract_zipped_paths(DEFAULT_CA_BUNDLE_PATH)
|
| 83 |
+
)
|
| 84 |
+
except ImportError:
|
| 85 |
+
# Bypass default SSLContext creation when Python
|
| 86 |
+
# interpreter isn't built with the ssl module.
|
| 87 |
+
_preloaded_ssl_context = None
|
| 88 |
+
|
| 89 |
+
|
| 90 |
+
def _urllib3_request_context(
|
| 91 |
+
request: "PreparedRequest",
|
| 92 |
+
verify: "bool | str | None",
|
| 93 |
+
client_cert: "typing.Tuple[str, str] | str | None",
|
| 94 |
+
poolmanager: "PoolManager",
|
| 95 |
+
) -> "(typing.Dict[str, typing.Any], typing.Dict[str, typing.Any])":
|
| 96 |
+
host_params = {}
|
| 97 |
+
pool_kwargs = {}
|
| 98 |
+
parsed_request_url = urlparse(request.url)
|
| 99 |
+
scheme = parsed_request_url.scheme.lower()
|
| 100 |
+
port = parsed_request_url.port
|
| 101 |
+
|
| 102 |
+
# Determine if we have and should use our default SSLContext
|
| 103 |
+
# to optimize performance on standard requests.
|
| 104 |
+
poolmanager_kwargs = getattr(poolmanager, "connection_pool_kw", {})
|
| 105 |
+
has_poolmanager_ssl_context = poolmanager_kwargs.get("ssl_context")
|
| 106 |
+
should_use_default_ssl_context = (
|
| 107 |
+
_preloaded_ssl_context is not None and not has_poolmanager_ssl_context
|
| 108 |
+
)
|
| 109 |
+
|
| 110 |
+
cert_reqs = "CERT_REQUIRED"
|
| 111 |
+
if verify is False:
|
| 112 |
+
cert_reqs = "CERT_NONE"
|
| 113 |
+
elif verify is True and should_use_default_ssl_context:
|
| 114 |
+
pool_kwargs["ssl_context"] = _preloaded_ssl_context
|
| 115 |
+
elif isinstance(verify, str):
|
| 116 |
+
if not os.path.isdir(verify):
|
| 117 |
+
pool_kwargs["ca_certs"] = verify
|
| 118 |
+
else:
|
| 119 |
+
pool_kwargs["ca_cert_dir"] = verify
|
| 120 |
+
pool_kwargs["cert_reqs"] = cert_reqs
|
| 121 |
+
if client_cert is not None:
|
| 122 |
+
if isinstance(client_cert, tuple) and len(client_cert) == 2:
|
| 123 |
+
pool_kwargs["cert_file"] = client_cert[0]
|
| 124 |
+
pool_kwargs["key_file"] = client_cert[1]
|
| 125 |
+
else:
|
| 126 |
+
# According to our docs, we allow users to specify just the client
|
| 127 |
+
# cert path
|
| 128 |
+
pool_kwargs["cert_file"] = client_cert
|
| 129 |
+
host_params = {
|
| 130 |
+
"scheme": scheme,
|
| 131 |
+
"host": parsed_request_url.hostname,
|
| 132 |
+
"port": port,
|
| 133 |
+
}
|
| 134 |
+
return host_params, pool_kwargs
|
| 135 |
+
|
| 136 |
+
|
| 137 |
+
class BaseAdapter:
|
| 138 |
+
"""The Base Transport Adapter"""
|
| 139 |
+
|
| 140 |
+
def __init__(self):
|
| 141 |
+
super().__init__()
|
| 142 |
+
|
| 143 |
+
def send(
|
| 144 |
+
self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
|
| 145 |
+
):
|
| 146 |
+
"""Sends PreparedRequest object. Returns Response object.
|
| 147 |
+
|
| 148 |
+
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
|
| 149 |
+
:param stream: (optional) Whether to stream the request content.
|
| 150 |
+
:param timeout: (optional) How long to wait for the server to send
|
| 151 |
+
data before giving up, as a float, or a :ref:`(connect timeout,
|
| 152 |
+
read timeout) <timeouts>` tuple.
|
| 153 |
+
:type timeout: float or tuple
|
| 154 |
+
:param verify: (optional) Either a boolean, in which case it controls whether we verify
|
| 155 |
+
the server's TLS certificate, or a string, in which case it must be a path
|
| 156 |
+
to a CA bundle to use
|
| 157 |
+
:param cert: (optional) Any user-provided SSL certificate to be trusted.
|
| 158 |
+
:param proxies: (optional) The proxies dictionary to apply to the request.
|
| 159 |
+
"""
|
| 160 |
+
raise NotImplementedError
|
| 161 |
+
|
| 162 |
+
def close(self):
|
| 163 |
+
"""Cleans up adapter specific items."""
|
| 164 |
+
raise NotImplementedError
|
| 165 |
+
|
| 166 |
+
|
| 167 |
+
class HTTPAdapter(BaseAdapter):
|
| 168 |
+
"""The built-in HTTP Adapter for urllib3.
|
| 169 |
+
|
| 170 |
+
Provides a general-case interface for Requests sessions to contact HTTP and
|
| 171 |
+
HTTPS urls by implementing the Transport Adapter interface. This class will
|
| 172 |
+
usually be created by the :class:`Session <Session>` class under the
|
| 173 |
+
covers.
|
| 174 |
+
|
| 175 |
+
:param pool_connections: The number of urllib3 connection pools to cache.
|
| 176 |
+
:param pool_maxsize: The maximum number of connections to save in the pool.
|
| 177 |
+
:param max_retries: The maximum number of retries each connection
|
| 178 |
+
should attempt. Note, this applies only to failed DNS lookups, socket
|
| 179 |
+
connections and connection timeouts, never to requests where data has
|
| 180 |
+
made it to the server. By default, Requests does not retry failed
|
| 181 |
+
connections. If you need granular control over the conditions under
|
| 182 |
+
which we retry a request, import urllib3's ``Retry`` class and pass
|
| 183 |
+
that instead.
|
| 184 |
+
:param pool_block: Whether the connection pool should block for connections.
|
| 185 |
+
|
| 186 |
+
Usage::
|
| 187 |
+
|
| 188 |
+
>>> import requests
|
| 189 |
+
>>> s = requests.Session()
|
| 190 |
+
>>> a = requests.adapters.HTTPAdapter(max_retries=3)
|
| 191 |
+
>>> s.mount('http://', a)
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
__attrs__ = [
|
| 195 |
+
"max_retries",
|
| 196 |
+
"config",
|
| 197 |
+
"_pool_connections",
|
| 198 |
+
"_pool_maxsize",
|
| 199 |
+
"_pool_block",
|
| 200 |
+
]
|
| 201 |
+
|
| 202 |
+
def __init__(
|
| 203 |
+
self,
|
| 204 |
+
pool_connections=DEFAULT_POOLSIZE,
|
| 205 |
+
pool_maxsize=DEFAULT_POOLSIZE,
|
| 206 |
+
max_retries=DEFAULT_RETRIES,
|
| 207 |
+
pool_block=DEFAULT_POOLBLOCK,
|
| 208 |
+
):
|
| 209 |
+
if max_retries == DEFAULT_RETRIES:
|
| 210 |
+
self.max_retries = Retry(0, read=False)
|
| 211 |
+
else:
|
| 212 |
+
self.max_retries = Retry.from_int(max_retries)
|
| 213 |
+
self.config = {}
|
| 214 |
+
self.proxy_manager = {}
|
| 215 |
+
|
| 216 |
+
super().__init__()
|
| 217 |
+
|
| 218 |
+
self._pool_connections = pool_connections
|
| 219 |
+
self._pool_maxsize = pool_maxsize
|
| 220 |
+
self._pool_block = pool_block
|
| 221 |
+
|
| 222 |
+
self.init_poolmanager(pool_connections, pool_maxsize, block=pool_block)
|
| 223 |
+
|
| 224 |
+
def __getstate__(self):
|
| 225 |
+
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
|
| 226 |
+
|
| 227 |
+
def __setstate__(self, state):
|
| 228 |
+
# Can't handle by adding 'proxy_manager' to self.__attrs__ because
|
| 229 |
+
# self.poolmanager uses a lambda function, which isn't pickleable.
|
| 230 |
+
self.proxy_manager = {}
|
| 231 |
+
self.config = {}
|
| 232 |
+
|
| 233 |
+
for attr, value in state.items():
|
| 234 |
+
setattr(self, attr, value)
|
| 235 |
+
|
| 236 |
+
self.init_poolmanager(
|
| 237 |
+
self._pool_connections, self._pool_maxsize, block=self._pool_block
|
| 238 |
+
)
|
| 239 |
+
|
| 240 |
+
def init_poolmanager(
|
| 241 |
+
self, connections, maxsize, block=DEFAULT_POOLBLOCK, **pool_kwargs
|
| 242 |
+
):
|
| 243 |
+
"""Initializes a urllib3 PoolManager.
|
| 244 |
+
|
| 245 |
+
This method should not be called from user code, and is only
|
| 246 |
+
exposed for use when subclassing the
|
| 247 |
+
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
| 248 |
+
|
| 249 |
+
:param connections: The number of urllib3 connection pools to cache.
|
| 250 |
+
:param maxsize: The maximum number of connections to save in the pool.
|
| 251 |
+
:param block: Block when no free connections are available.
|
| 252 |
+
:param pool_kwargs: Extra keyword arguments used to initialize the Pool Manager.
|
| 253 |
+
"""
|
| 254 |
+
# save these values for pickling
|
| 255 |
+
self._pool_connections = connections
|
| 256 |
+
self._pool_maxsize = maxsize
|
| 257 |
+
self._pool_block = block
|
| 258 |
+
|
| 259 |
+
self.poolmanager = PoolManager(
|
| 260 |
+
num_pools=connections,
|
| 261 |
+
maxsize=maxsize,
|
| 262 |
+
block=block,
|
| 263 |
+
**pool_kwargs,
|
| 264 |
+
)
|
| 265 |
+
|
| 266 |
+
def proxy_manager_for(self, proxy, **proxy_kwargs):
|
| 267 |
+
"""Return urllib3 ProxyManager for the given proxy.
|
| 268 |
+
|
| 269 |
+
This method should not be called from user code, and is only
|
| 270 |
+
exposed for use when subclassing the
|
| 271 |
+
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
| 272 |
+
|
| 273 |
+
:param proxy: The proxy to return a urllib3 ProxyManager for.
|
| 274 |
+
:param proxy_kwargs: Extra keyword arguments used to configure the Proxy Manager.
|
| 275 |
+
:returns: ProxyManager
|
| 276 |
+
:rtype: urllib3.ProxyManager
|
| 277 |
+
"""
|
| 278 |
+
if proxy in self.proxy_manager:
|
| 279 |
+
manager = self.proxy_manager[proxy]
|
| 280 |
+
elif proxy.lower().startswith("socks"):
|
| 281 |
+
username, password = get_auth_from_url(proxy)
|
| 282 |
+
manager = self.proxy_manager[proxy] = SOCKSProxyManager(
|
| 283 |
+
proxy,
|
| 284 |
+
username=username,
|
| 285 |
+
password=password,
|
| 286 |
+
num_pools=self._pool_connections,
|
| 287 |
+
maxsize=self._pool_maxsize,
|
| 288 |
+
block=self._pool_block,
|
| 289 |
+
**proxy_kwargs,
|
| 290 |
+
)
|
| 291 |
+
else:
|
| 292 |
+
proxy_headers = self.proxy_headers(proxy)
|
| 293 |
+
manager = self.proxy_manager[proxy] = proxy_from_url(
|
| 294 |
+
proxy,
|
| 295 |
+
proxy_headers=proxy_headers,
|
| 296 |
+
num_pools=self._pool_connections,
|
| 297 |
+
maxsize=self._pool_maxsize,
|
| 298 |
+
block=self._pool_block,
|
| 299 |
+
**proxy_kwargs,
|
| 300 |
+
)
|
| 301 |
+
|
| 302 |
+
return manager
|
| 303 |
+
|
| 304 |
+
def cert_verify(self, conn, url, verify, cert):
|
| 305 |
+
"""Verify a SSL certificate. This method should not be called from user
|
| 306 |
+
code, and is only exposed for use when subclassing the
|
| 307 |
+
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
| 308 |
+
|
| 309 |
+
:param conn: The urllib3 connection object associated with the cert.
|
| 310 |
+
:param url: The requested URL.
|
| 311 |
+
:param verify: Either a boolean, in which case it controls whether we verify
|
| 312 |
+
the server's TLS certificate, or a string, in which case it must be a path
|
| 313 |
+
to a CA bundle to use
|
| 314 |
+
:param cert: The SSL certificate to verify.
|
| 315 |
+
"""
|
| 316 |
+
if url.lower().startswith("https") and verify:
|
| 317 |
+
conn.cert_reqs = "CERT_REQUIRED"
|
| 318 |
+
|
| 319 |
+
# Only load the CA certificates if 'verify' is a string indicating the CA bundle to use.
|
| 320 |
+
# Otherwise, if verify is a boolean, we don't load anything since
|
| 321 |
+
# the connection will be using a context with the default certificates already loaded,
|
| 322 |
+
# and this avoids a call to the slow load_verify_locations()
|
| 323 |
+
if verify is not True:
|
| 324 |
+
# `verify` must be a str with a path then
|
| 325 |
+
cert_loc = verify
|
| 326 |
+
|
| 327 |
+
if not os.path.exists(cert_loc):
|
| 328 |
+
raise OSError(
|
| 329 |
+
f"Could not find a suitable TLS CA certificate bundle, "
|
| 330 |
+
f"invalid path: {cert_loc}"
|
| 331 |
+
)
|
| 332 |
+
|
| 333 |
+
if not os.path.isdir(cert_loc):
|
| 334 |
+
conn.ca_certs = cert_loc
|
| 335 |
+
else:
|
| 336 |
+
conn.ca_cert_dir = cert_loc
|
| 337 |
+
else:
|
| 338 |
+
conn.cert_reqs = "CERT_NONE"
|
| 339 |
+
conn.ca_certs = None
|
| 340 |
+
conn.ca_cert_dir = None
|
| 341 |
+
|
| 342 |
+
if cert:
|
| 343 |
+
if not isinstance(cert, basestring):
|
| 344 |
+
conn.cert_file = cert[0]
|
| 345 |
+
conn.key_file = cert[1]
|
| 346 |
+
else:
|
| 347 |
+
conn.cert_file = cert
|
| 348 |
+
conn.key_file = None
|
| 349 |
+
if conn.cert_file and not os.path.exists(conn.cert_file):
|
| 350 |
+
raise OSError(
|
| 351 |
+
f"Could not find the TLS certificate file, "
|
| 352 |
+
f"invalid path: {conn.cert_file}"
|
| 353 |
+
)
|
| 354 |
+
if conn.key_file and not os.path.exists(conn.key_file):
|
| 355 |
+
raise OSError(
|
| 356 |
+
f"Could not find the TLS key file, invalid path: {conn.key_file}"
|
| 357 |
+
)
|
| 358 |
+
|
| 359 |
+
def build_response(self, req, resp):
|
| 360 |
+
"""Builds a :class:`Response <requests.Response>` object from a urllib3
|
| 361 |
+
response. This should not be called from user code, and is only exposed
|
| 362 |
+
for use when subclassing the
|
| 363 |
+
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`
|
| 364 |
+
|
| 365 |
+
:param req: The :class:`PreparedRequest <PreparedRequest>` used to generate the response.
|
| 366 |
+
:param resp: The urllib3 response object.
|
| 367 |
+
:rtype: requests.Response
|
| 368 |
+
"""
|
| 369 |
+
response = Response()
|
| 370 |
+
|
| 371 |
+
# Fallback to None if there's no status_code, for whatever reason.
|
| 372 |
+
response.status_code = getattr(resp, "status", None)
|
| 373 |
+
|
| 374 |
+
# Make headers case-insensitive.
|
| 375 |
+
response.headers = CaseInsensitiveDict(getattr(resp, "headers", {}))
|
| 376 |
+
|
| 377 |
+
# Set encoding.
|
| 378 |
+
response.encoding = get_encoding_from_headers(response.headers)
|
| 379 |
+
response.raw = resp
|
| 380 |
+
response.reason = response.raw.reason
|
| 381 |
+
|
| 382 |
+
if isinstance(req.url, bytes):
|
| 383 |
+
response.url = req.url.decode("utf-8")
|
| 384 |
+
else:
|
| 385 |
+
response.url = req.url
|
| 386 |
+
|
| 387 |
+
# Add new cookies from the server.
|
| 388 |
+
extract_cookies_to_jar(response.cookies, req, resp)
|
| 389 |
+
|
| 390 |
+
# Give the Response some context.
|
| 391 |
+
response.request = req
|
| 392 |
+
response.connection = self
|
| 393 |
+
|
| 394 |
+
return response
|
| 395 |
+
|
| 396 |
+
def build_connection_pool_key_attributes(self, request, verify, cert=None):
|
| 397 |
+
"""Build the PoolKey attributes used by urllib3 to return a connection.
|
| 398 |
+
|
| 399 |
+
This looks at the PreparedRequest, the user-specified verify value,
|
| 400 |
+
and the value of the cert parameter to determine what PoolKey values
|
| 401 |
+
to use to select a connection from a given urllib3 Connection Pool.
|
| 402 |
+
|
| 403 |
+
The SSL related pool key arguments are not consistently set. As of
|
| 404 |
+
this writing, use the following to determine what keys may be in that
|
| 405 |
+
dictionary:
|
| 406 |
+
|
| 407 |
+
* If ``verify`` is ``True``, ``"ssl_context"`` will be set and will be the
|
| 408 |
+
default Requests SSL Context
|
| 409 |
+
* If ``verify`` is ``False``, ``"ssl_context"`` will not be set but
|
| 410 |
+
``"cert_reqs"`` will be set
|
| 411 |
+
* If ``verify`` is a string, (i.e., it is a user-specified trust bundle)
|
| 412 |
+
``"ca_certs"`` will be set if the string is not a directory recognized
|
| 413 |
+
by :py:func:`os.path.isdir`, otherwise ``"ca_certs_dir"`` will be
|
| 414 |
+
set.
|
| 415 |
+
* If ``"cert"`` is specified, ``"cert_file"`` will always be set. If
|
| 416 |
+
``"cert"`` is a tuple with a second item, ``"key_file"`` will also
|
| 417 |
+
be present
|
| 418 |
+
|
| 419 |
+
To override these settings, one may subclass this class, call this
|
| 420 |
+
method and use the above logic to change parameters as desired. For
|
| 421 |
+
example, if one wishes to use a custom :py:class:`ssl.SSLContext` one
|
| 422 |
+
must both set ``"ssl_context"`` and based on what else they require,
|
| 423 |
+
alter the other keys to ensure the desired behaviour.
|
| 424 |
+
|
| 425 |
+
:param request:
|
| 426 |
+
The PreparedReqest being sent over the connection.
|
| 427 |
+
:type request:
|
| 428 |
+
:class:`~requests.models.PreparedRequest`
|
| 429 |
+
:param verify:
|
| 430 |
+
Either a boolean, in which case it controls whether
|
| 431 |
+
we verify the server's TLS certificate, or a string, in which case it
|
| 432 |
+
must be a path to a CA bundle to use.
|
| 433 |
+
:param cert:
|
| 434 |
+
(optional) Any user-provided SSL certificate for client
|
| 435 |
+
authentication (a.k.a., mTLS). This may be a string (i.e., just
|
| 436 |
+
the path to a file which holds both certificate and key) or a
|
| 437 |
+
tuple of length 2 with the certificate file path and key file
|
| 438 |
+
path.
|
| 439 |
+
:returns:
|
| 440 |
+
A tuple of two dictionaries. The first is the "host parameters"
|
| 441 |
+
portion of the Pool Key including scheme, hostname, and port. The
|
| 442 |
+
second is a dictionary of SSLContext related parameters.
|
| 443 |
+
"""
|
| 444 |
+
return _urllib3_request_context(request, verify, cert, self.poolmanager)
|
| 445 |
+
|
| 446 |
+
def get_connection_with_tls_context(self, request, verify, proxies=None, cert=None):
|
| 447 |
+
"""Returns a urllib3 connection for the given request and TLS settings.
|
| 448 |
+
This should not be called from user code, and is only exposed for use
|
| 449 |
+
when subclassing the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
| 450 |
+
|
| 451 |
+
:param request:
|
| 452 |
+
The :class:`PreparedRequest <PreparedRequest>` object to be sent
|
| 453 |
+
over the connection.
|
| 454 |
+
:param verify:
|
| 455 |
+
Either a boolean, in which case it controls whether we verify the
|
| 456 |
+
server's TLS certificate, or a string, in which case it must be a
|
| 457 |
+
path to a CA bundle to use.
|
| 458 |
+
:param proxies:
|
| 459 |
+
(optional) The proxies dictionary to apply to the request.
|
| 460 |
+
:param cert:
|
| 461 |
+
(optional) Any user-provided SSL certificate to be used for client
|
| 462 |
+
authentication (a.k.a., mTLS).
|
| 463 |
+
:rtype:
|
| 464 |
+
urllib3.ConnectionPool
|
| 465 |
+
"""
|
| 466 |
+
proxy = select_proxy(request.url, proxies)
|
| 467 |
+
try:
|
| 468 |
+
host_params, pool_kwargs = self.build_connection_pool_key_attributes(
|
| 469 |
+
request,
|
| 470 |
+
verify,
|
| 471 |
+
cert,
|
| 472 |
+
)
|
| 473 |
+
except ValueError as e:
|
| 474 |
+
raise InvalidURL(e, request=request)
|
| 475 |
+
if proxy:
|
| 476 |
+
proxy = prepend_scheme_if_needed(proxy, "http")
|
| 477 |
+
proxy_url = parse_url(proxy)
|
| 478 |
+
if not proxy_url.host:
|
| 479 |
+
raise InvalidProxyURL(
|
| 480 |
+
"Please check proxy URL. It is malformed "
|
| 481 |
+
"and could be missing the host."
|
| 482 |
+
)
|
| 483 |
+
proxy_manager = self.proxy_manager_for(proxy)
|
| 484 |
+
conn = proxy_manager.connection_from_host(
|
| 485 |
+
**host_params, pool_kwargs=pool_kwargs
|
| 486 |
+
)
|
| 487 |
+
else:
|
| 488 |
+
# Only scheme should be lower case
|
| 489 |
+
conn = self.poolmanager.connection_from_host(
|
| 490 |
+
**host_params, pool_kwargs=pool_kwargs
|
| 491 |
+
)
|
| 492 |
+
|
| 493 |
+
return conn
|
| 494 |
+
|
| 495 |
+
def get_connection(self, url, proxies=None):
|
| 496 |
+
"""DEPRECATED: Users should move to `get_connection_with_tls_context`
|
| 497 |
+
for all subclasses of HTTPAdapter using Requests>=2.32.2.
|
| 498 |
+
|
| 499 |
+
Returns a urllib3 connection for the given URL. This should not be
|
| 500 |
+
called from user code, and is only exposed for use when subclassing the
|
| 501 |
+
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
| 502 |
+
|
| 503 |
+
:param url: The URL to connect to.
|
| 504 |
+
:param proxies: (optional) A Requests-style dictionary of proxies used on this request.
|
| 505 |
+
:rtype: urllib3.ConnectionPool
|
| 506 |
+
"""
|
| 507 |
+
warnings.warn(
|
| 508 |
+
(
|
| 509 |
+
"`get_connection` has been deprecated in favor of "
|
| 510 |
+
"`get_connection_with_tls_context`. Custom HTTPAdapter subclasses "
|
| 511 |
+
"will need to migrate for Requests>=2.32.2. Please see "
|
| 512 |
+
"https://github.com/psf/requests/pull/6710 for more details."
|
| 513 |
+
),
|
| 514 |
+
DeprecationWarning,
|
| 515 |
+
)
|
| 516 |
+
proxy = select_proxy(url, proxies)
|
| 517 |
+
|
| 518 |
+
if proxy:
|
| 519 |
+
proxy = prepend_scheme_if_needed(proxy, "http")
|
| 520 |
+
proxy_url = parse_url(proxy)
|
| 521 |
+
if not proxy_url.host:
|
| 522 |
+
raise InvalidProxyURL(
|
| 523 |
+
"Please check proxy URL. It is malformed "
|
| 524 |
+
"and could be missing the host."
|
| 525 |
+
)
|
| 526 |
+
proxy_manager = self.proxy_manager_for(proxy)
|
| 527 |
+
conn = proxy_manager.connection_from_url(url)
|
| 528 |
+
else:
|
| 529 |
+
# Only scheme should be lower case
|
| 530 |
+
parsed = urlparse(url)
|
| 531 |
+
url = parsed.geturl()
|
| 532 |
+
conn = self.poolmanager.connection_from_url(url)
|
| 533 |
+
|
| 534 |
+
return conn
|
| 535 |
+
|
| 536 |
+
def close(self):
|
| 537 |
+
"""Disposes of any internal state.
|
| 538 |
+
|
| 539 |
+
Currently, this closes the PoolManager and any active ProxyManager,
|
| 540 |
+
which closes any pooled connections.
|
| 541 |
+
"""
|
| 542 |
+
self.poolmanager.clear()
|
| 543 |
+
for proxy in self.proxy_manager.values():
|
| 544 |
+
proxy.clear()
|
| 545 |
+
|
| 546 |
+
def request_url(self, request, proxies):
|
| 547 |
+
"""Obtain the url to use when making the final request.
|
| 548 |
+
|
| 549 |
+
If the message is being sent through a HTTP proxy, the full URL has to
|
| 550 |
+
be used. Otherwise, we should only use the path portion of the URL.
|
| 551 |
+
|
| 552 |
+
This should not be called from user code, and is only exposed for use
|
| 553 |
+
when subclassing the
|
| 554 |
+
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
| 555 |
+
|
| 556 |
+
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
|
| 557 |
+
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs.
|
| 558 |
+
:rtype: str
|
| 559 |
+
"""
|
| 560 |
+
proxy = select_proxy(request.url, proxies)
|
| 561 |
+
scheme = urlparse(request.url).scheme
|
| 562 |
+
|
| 563 |
+
is_proxied_http_request = proxy and scheme != "https"
|
| 564 |
+
using_socks_proxy = False
|
| 565 |
+
if proxy:
|
| 566 |
+
proxy_scheme = urlparse(proxy).scheme.lower()
|
| 567 |
+
using_socks_proxy = proxy_scheme.startswith("socks")
|
| 568 |
+
|
| 569 |
+
url = request.path_url
|
| 570 |
+
if url.startswith("//"): # Don't confuse urllib3
|
| 571 |
+
url = f"/{url.lstrip('/')}"
|
| 572 |
+
|
| 573 |
+
if is_proxied_http_request and not using_socks_proxy:
|
| 574 |
+
url = urldefragauth(request.url)
|
| 575 |
+
|
| 576 |
+
return url
|
| 577 |
+
|
| 578 |
+
def add_headers(self, request, **kwargs):
|
| 579 |
+
"""Add any headers needed by the connection. As of v2.0 this does
|
| 580 |
+
nothing by default, but is left for overriding by users that subclass
|
| 581 |
+
the :class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
| 582 |
+
|
| 583 |
+
This should not be called from user code, and is only exposed for use
|
| 584 |
+
when subclassing the
|
| 585 |
+
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
| 586 |
+
|
| 587 |
+
:param request: The :class:`PreparedRequest <PreparedRequest>` to add headers to.
|
| 588 |
+
:param kwargs: The keyword arguments from the call to send().
|
| 589 |
+
"""
|
| 590 |
+
pass
|
| 591 |
+
|
| 592 |
+
def proxy_headers(self, proxy):
|
| 593 |
+
"""Returns a dictionary of the headers to add to any request sent
|
| 594 |
+
through a proxy. This works with urllib3 magic to ensure that they are
|
| 595 |
+
correctly sent to the proxy, rather than in a tunnelled request if
|
| 596 |
+
CONNECT is being used.
|
| 597 |
+
|
| 598 |
+
This should not be called from user code, and is only exposed for use
|
| 599 |
+
when subclassing the
|
| 600 |
+
:class:`HTTPAdapter <requests.adapters.HTTPAdapter>`.
|
| 601 |
+
|
| 602 |
+
:param proxy: The url of the proxy being used for this request.
|
| 603 |
+
:rtype: dict
|
| 604 |
+
"""
|
| 605 |
+
headers = {}
|
| 606 |
+
username, password = get_auth_from_url(proxy)
|
| 607 |
+
|
| 608 |
+
if username:
|
| 609 |
+
headers["Proxy-Authorization"] = _basic_auth_str(username, password)
|
| 610 |
+
|
| 611 |
+
return headers
|
| 612 |
+
|
| 613 |
+
def send(
|
| 614 |
+
self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None
|
| 615 |
+
):
|
| 616 |
+
"""Sends PreparedRequest object. Returns Response object.
|
| 617 |
+
|
| 618 |
+
:param request: The :class:`PreparedRequest <PreparedRequest>` being sent.
|
| 619 |
+
:param stream: (optional) Whether to stream the request content.
|
| 620 |
+
:param timeout: (optional) How long to wait for the server to send
|
| 621 |
+
data before giving up, as a float, or a :ref:`(connect timeout,
|
| 622 |
+
read timeout) <timeouts>` tuple.
|
| 623 |
+
:type timeout: float or tuple or urllib3 Timeout object
|
| 624 |
+
:param verify: (optional) Either a boolean, in which case it controls whether
|
| 625 |
+
we verify the server's TLS certificate, or a string, in which case it
|
| 626 |
+
must be a path to a CA bundle to use
|
| 627 |
+
:param cert: (optional) Any user-provided SSL certificate to be trusted.
|
| 628 |
+
:param proxies: (optional) The proxies dictionary to apply to the request.
|
| 629 |
+
:rtype: requests.Response
|
| 630 |
+
"""
|
| 631 |
+
|
| 632 |
+
try:
|
| 633 |
+
conn = self.get_connection_with_tls_context(
|
| 634 |
+
request, verify, proxies=proxies, cert=cert
|
| 635 |
+
)
|
| 636 |
+
except LocationValueError as e:
|
| 637 |
+
raise InvalidURL(e, request=request)
|
| 638 |
+
|
| 639 |
+
self.cert_verify(conn, request.url, verify, cert)
|
| 640 |
+
url = self.request_url(request, proxies)
|
| 641 |
+
self.add_headers(
|
| 642 |
+
request,
|
| 643 |
+
stream=stream,
|
| 644 |
+
timeout=timeout,
|
| 645 |
+
verify=verify,
|
| 646 |
+
cert=cert,
|
| 647 |
+
proxies=proxies,
|
| 648 |
+
)
|
| 649 |
+
|
| 650 |
+
chunked = not (request.body is None or "Content-Length" in request.headers)
|
| 651 |
+
|
| 652 |
+
if isinstance(timeout, tuple):
|
| 653 |
+
try:
|
| 654 |
+
connect, read = timeout
|
| 655 |
+
timeout = TimeoutSauce(connect=connect, read=read)
|
| 656 |
+
except ValueError:
|
| 657 |
+
raise ValueError(
|
| 658 |
+
f"Invalid timeout {timeout}. Pass a (connect, read) timeout tuple, "
|
| 659 |
+
f"or a single float to set both timeouts to the same value."
|
| 660 |
+
)
|
| 661 |
+
elif isinstance(timeout, TimeoutSauce):
|
| 662 |
+
pass
|
| 663 |
+
else:
|
| 664 |
+
timeout = TimeoutSauce(connect=timeout, read=timeout)
|
| 665 |
+
|
| 666 |
+
try:
|
| 667 |
+
resp = conn.urlopen(
|
| 668 |
+
method=request.method,
|
| 669 |
+
url=url,
|
| 670 |
+
body=request.body,
|
| 671 |
+
headers=request.headers,
|
| 672 |
+
redirect=False,
|
| 673 |
+
assert_same_host=False,
|
| 674 |
+
preload_content=False,
|
| 675 |
+
decode_content=False,
|
| 676 |
+
retries=self.max_retries,
|
| 677 |
+
timeout=timeout,
|
| 678 |
+
chunked=chunked,
|
| 679 |
+
)
|
| 680 |
+
|
| 681 |
+
except (ProtocolError, OSError) as err:
|
| 682 |
+
raise ConnectionError(err, request=request)
|
| 683 |
+
|
| 684 |
+
except MaxRetryError as e:
|
| 685 |
+
if isinstance(e.reason, ConnectTimeoutError):
|
| 686 |
+
# TODO: Remove this in 3.0.0: see #2811
|
| 687 |
+
if not isinstance(e.reason, NewConnectionError):
|
| 688 |
+
raise ConnectTimeout(e, request=request)
|
| 689 |
+
|
| 690 |
+
if isinstance(e.reason, ResponseError):
|
| 691 |
+
raise RetryError(e, request=request)
|
| 692 |
+
|
| 693 |
+
if isinstance(e.reason, _ProxyError):
|
| 694 |
+
raise ProxyError(e, request=request)
|
| 695 |
+
|
| 696 |
+
if isinstance(e.reason, _SSLError):
|
| 697 |
+
# This branch is for urllib3 v1.22 and later.
|
| 698 |
+
raise SSLError(e, request=request)
|
| 699 |
+
|
| 700 |
+
raise ConnectionError(e, request=request)
|
| 701 |
+
|
| 702 |
+
except ClosedPoolError as e:
|
| 703 |
+
raise ConnectionError(e, request=request)
|
| 704 |
+
|
| 705 |
+
except _ProxyError as e:
|
| 706 |
+
raise ProxyError(e)
|
| 707 |
+
|
| 708 |
+
except (_SSLError, _HTTPError) as e:
|
| 709 |
+
if isinstance(e, _SSLError):
|
| 710 |
+
# This branch is for urllib3 versions earlier than v1.22
|
| 711 |
+
raise SSLError(e, request=request)
|
| 712 |
+
elif isinstance(e, ReadTimeoutError):
|
| 713 |
+
raise ReadTimeout(e, request=request)
|
| 714 |
+
elif isinstance(e, _InvalidHeader):
|
| 715 |
+
raise InvalidHeader(e, request=request)
|
| 716 |
+
else:
|
| 717 |
+
raise
|
| 718 |
+
|
| 719 |
+
return self.build_response(request, resp)
|
wemm/lib/python3.10/site-packages/requests/api.py
ADDED
|
@@ -0,0 +1,157 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.api
|
| 3 |
+
~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This module implements the Requests API.
|
| 6 |
+
|
| 7 |
+
:copyright: (c) 2012 by Kenneth Reitz.
|
| 8 |
+
:license: Apache2, see LICENSE for more details.
|
| 9 |
+
"""
|
| 10 |
+
|
| 11 |
+
from . import sessions
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
def request(method, url, **kwargs):
|
| 15 |
+
"""Constructs and sends a :class:`Request <Request>`.
|
| 16 |
+
|
| 17 |
+
:param method: method for the new :class:`Request` object: ``GET``, ``OPTIONS``, ``HEAD``, ``POST``, ``PUT``, ``PATCH``, or ``DELETE``.
|
| 18 |
+
:param url: URL for the new :class:`Request` object.
|
| 19 |
+
:param params: (optional) Dictionary, list of tuples or bytes to send
|
| 20 |
+
in the query string for the :class:`Request`.
|
| 21 |
+
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
| 22 |
+
object to send in the body of the :class:`Request`.
|
| 23 |
+
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
|
| 24 |
+
:param headers: (optional) Dictionary of HTTP Headers to send with the :class:`Request`.
|
| 25 |
+
:param cookies: (optional) Dict or CookieJar object to send with the :class:`Request`.
|
| 26 |
+
:param files: (optional) Dictionary of ``'name': file-like-objects`` (or ``{'name': file-tuple}``) for multipart encoding upload.
|
| 27 |
+
``file-tuple`` can be a 2-tuple ``('filename', fileobj)``, 3-tuple ``('filename', fileobj, 'content_type')``
|
| 28 |
+
or a 4-tuple ``('filename', fileobj, 'content_type', custom_headers)``, where ``'content_type'`` is a string
|
| 29 |
+
defining the content type of the given file and ``custom_headers`` a dict-like object containing additional headers
|
| 30 |
+
to add for the file.
|
| 31 |
+
:param auth: (optional) Auth tuple to enable Basic/Digest/Custom HTTP Auth.
|
| 32 |
+
:param timeout: (optional) How many seconds to wait for the server to send data
|
| 33 |
+
before giving up, as a float, or a :ref:`(connect timeout, read
|
| 34 |
+
timeout) <timeouts>` tuple.
|
| 35 |
+
:type timeout: float or tuple
|
| 36 |
+
:param allow_redirects: (optional) Boolean. Enable/disable GET/OPTIONS/POST/PUT/PATCH/DELETE/HEAD redirection. Defaults to ``True``.
|
| 37 |
+
:type allow_redirects: bool
|
| 38 |
+
:param proxies: (optional) Dictionary mapping protocol to the URL of the proxy.
|
| 39 |
+
:param verify: (optional) Either a boolean, in which case it controls whether we verify
|
| 40 |
+
the server's TLS certificate, or a string, in which case it must be a path
|
| 41 |
+
to a CA bundle to use. Defaults to ``True``.
|
| 42 |
+
:param stream: (optional) if ``False``, the response content will be immediately downloaded.
|
| 43 |
+
:param cert: (optional) if String, path to ssl client cert file (.pem). If Tuple, ('cert', 'key') pair.
|
| 44 |
+
:return: :class:`Response <Response>` object
|
| 45 |
+
:rtype: requests.Response
|
| 46 |
+
|
| 47 |
+
Usage::
|
| 48 |
+
|
| 49 |
+
>>> import requests
|
| 50 |
+
>>> req = requests.request('GET', 'https://httpbin.org/get')
|
| 51 |
+
>>> req
|
| 52 |
+
<Response [200]>
|
| 53 |
+
"""
|
| 54 |
+
|
| 55 |
+
# By using the 'with' statement we are sure the session is closed, thus we
|
| 56 |
+
# avoid leaving sockets open which can trigger a ResourceWarning in some
|
| 57 |
+
# cases, and look like a memory leak in others.
|
| 58 |
+
with sessions.Session() as session:
|
| 59 |
+
return session.request(method=method, url=url, **kwargs)
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
def get(url, params=None, **kwargs):
|
| 63 |
+
r"""Sends a GET request.
|
| 64 |
+
|
| 65 |
+
:param url: URL for the new :class:`Request` object.
|
| 66 |
+
:param params: (optional) Dictionary, list of tuples or bytes to send
|
| 67 |
+
in the query string for the :class:`Request`.
|
| 68 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 69 |
+
:return: :class:`Response <Response>` object
|
| 70 |
+
:rtype: requests.Response
|
| 71 |
+
"""
|
| 72 |
+
|
| 73 |
+
return request("get", url, params=params, **kwargs)
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
def options(url, **kwargs):
|
| 77 |
+
r"""Sends an OPTIONS request.
|
| 78 |
+
|
| 79 |
+
:param url: URL for the new :class:`Request` object.
|
| 80 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 81 |
+
:return: :class:`Response <Response>` object
|
| 82 |
+
:rtype: requests.Response
|
| 83 |
+
"""
|
| 84 |
+
|
| 85 |
+
return request("options", url, **kwargs)
|
| 86 |
+
|
| 87 |
+
|
| 88 |
+
def head(url, **kwargs):
|
| 89 |
+
r"""Sends a HEAD request.
|
| 90 |
+
|
| 91 |
+
:param url: URL for the new :class:`Request` object.
|
| 92 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes. If
|
| 93 |
+
`allow_redirects` is not provided, it will be set to `False` (as
|
| 94 |
+
opposed to the default :meth:`request` behavior).
|
| 95 |
+
:return: :class:`Response <Response>` object
|
| 96 |
+
:rtype: requests.Response
|
| 97 |
+
"""
|
| 98 |
+
|
| 99 |
+
kwargs.setdefault("allow_redirects", False)
|
| 100 |
+
return request("head", url, **kwargs)
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
def post(url, data=None, json=None, **kwargs):
|
| 104 |
+
r"""Sends a POST request.
|
| 105 |
+
|
| 106 |
+
:param url: URL for the new :class:`Request` object.
|
| 107 |
+
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
| 108 |
+
object to send in the body of the :class:`Request`.
|
| 109 |
+
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
|
| 110 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 111 |
+
:return: :class:`Response <Response>` object
|
| 112 |
+
:rtype: requests.Response
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
return request("post", url, data=data, json=json, **kwargs)
|
| 116 |
+
|
| 117 |
+
|
| 118 |
+
def put(url, data=None, **kwargs):
|
| 119 |
+
r"""Sends a PUT request.
|
| 120 |
+
|
| 121 |
+
:param url: URL for the new :class:`Request` object.
|
| 122 |
+
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
| 123 |
+
object to send in the body of the :class:`Request`.
|
| 124 |
+
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
|
| 125 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 126 |
+
:return: :class:`Response <Response>` object
|
| 127 |
+
:rtype: requests.Response
|
| 128 |
+
"""
|
| 129 |
+
|
| 130 |
+
return request("put", url, data=data, **kwargs)
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
def patch(url, data=None, **kwargs):
|
| 134 |
+
r"""Sends a PATCH request.
|
| 135 |
+
|
| 136 |
+
:param url: URL for the new :class:`Request` object.
|
| 137 |
+
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
| 138 |
+
object to send in the body of the :class:`Request`.
|
| 139 |
+
:param json: (optional) A JSON serializable Python object to send in the body of the :class:`Request`.
|
| 140 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 141 |
+
:return: :class:`Response <Response>` object
|
| 142 |
+
:rtype: requests.Response
|
| 143 |
+
"""
|
| 144 |
+
|
| 145 |
+
return request("patch", url, data=data, **kwargs)
|
| 146 |
+
|
| 147 |
+
|
| 148 |
+
def delete(url, **kwargs):
|
| 149 |
+
r"""Sends a DELETE request.
|
| 150 |
+
|
| 151 |
+
:param url: URL for the new :class:`Request` object.
|
| 152 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 153 |
+
:return: :class:`Response <Response>` object
|
| 154 |
+
:rtype: requests.Response
|
| 155 |
+
"""
|
| 156 |
+
|
| 157 |
+
return request("delete", url, **kwargs)
|
wemm/lib/python3.10/site-packages/requests/auth.py
ADDED
|
@@ -0,0 +1,314 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.auth
|
| 3 |
+
~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This module contains the authentication handlers for Requests.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import hashlib
|
| 9 |
+
import os
|
| 10 |
+
import re
|
| 11 |
+
import threading
|
| 12 |
+
import time
|
| 13 |
+
import warnings
|
| 14 |
+
from base64 import b64encode
|
| 15 |
+
|
| 16 |
+
from ._internal_utils import to_native_string
|
| 17 |
+
from .compat import basestring, str, urlparse
|
| 18 |
+
from .cookies import extract_cookies_to_jar
|
| 19 |
+
from .utils import parse_dict_header
|
| 20 |
+
|
| 21 |
+
CONTENT_TYPE_FORM_URLENCODED = "application/x-www-form-urlencoded"
|
| 22 |
+
CONTENT_TYPE_MULTI_PART = "multipart/form-data"
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
def _basic_auth_str(username, password):
|
| 26 |
+
"""Returns a Basic Auth string."""
|
| 27 |
+
|
| 28 |
+
# "I want us to put a big-ol' comment on top of it that
|
| 29 |
+
# says that this behaviour is dumb but we need to preserve
|
| 30 |
+
# it because people are relying on it."
|
| 31 |
+
# - Lukasa
|
| 32 |
+
#
|
| 33 |
+
# These are here solely to maintain backwards compatibility
|
| 34 |
+
# for things like ints. This will be removed in 3.0.0.
|
| 35 |
+
if not isinstance(username, basestring):
|
| 36 |
+
warnings.warn(
|
| 37 |
+
"Non-string usernames will no longer be supported in Requests "
|
| 38 |
+
"3.0.0. Please convert the object you've passed in ({!r}) to "
|
| 39 |
+
"a string or bytes object in the near future to avoid "
|
| 40 |
+
"problems.".format(username),
|
| 41 |
+
category=DeprecationWarning,
|
| 42 |
+
)
|
| 43 |
+
username = str(username)
|
| 44 |
+
|
| 45 |
+
if not isinstance(password, basestring):
|
| 46 |
+
warnings.warn(
|
| 47 |
+
"Non-string passwords will no longer be supported in Requests "
|
| 48 |
+
"3.0.0. Please convert the object you've passed in ({!r}) to "
|
| 49 |
+
"a string or bytes object in the near future to avoid "
|
| 50 |
+
"problems.".format(type(password)),
|
| 51 |
+
category=DeprecationWarning,
|
| 52 |
+
)
|
| 53 |
+
password = str(password)
|
| 54 |
+
# -- End Removal --
|
| 55 |
+
|
| 56 |
+
if isinstance(username, str):
|
| 57 |
+
username = username.encode("latin1")
|
| 58 |
+
|
| 59 |
+
if isinstance(password, str):
|
| 60 |
+
password = password.encode("latin1")
|
| 61 |
+
|
| 62 |
+
authstr = "Basic " + to_native_string(
|
| 63 |
+
b64encode(b":".join((username, password))).strip()
|
| 64 |
+
)
|
| 65 |
+
|
| 66 |
+
return authstr
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
class AuthBase:
|
| 70 |
+
"""Base class that all auth implementations derive from"""
|
| 71 |
+
|
| 72 |
+
def __call__(self, r):
|
| 73 |
+
raise NotImplementedError("Auth hooks must be callable.")
|
| 74 |
+
|
| 75 |
+
|
| 76 |
+
class HTTPBasicAuth(AuthBase):
|
| 77 |
+
"""Attaches HTTP Basic Authentication to the given Request object."""
|
| 78 |
+
|
| 79 |
+
def __init__(self, username, password):
|
| 80 |
+
self.username = username
|
| 81 |
+
self.password = password
|
| 82 |
+
|
| 83 |
+
def __eq__(self, other):
|
| 84 |
+
return all(
|
| 85 |
+
[
|
| 86 |
+
self.username == getattr(other, "username", None),
|
| 87 |
+
self.password == getattr(other, "password", None),
|
| 88 |
+
]
|
| 89 |
+
)
|
| 90 |
+
|
| 91 |
+
def __ne__(self, other):
|
| 92 |
+
return not self == other
|
| 93 |
+
|
| 94 |
+
def __call__(self, r):
|
| 95 |
+
r.headers["Authorization"] = _basic_auth_str(self.username, self.password)
|
| 96 |
+
return r
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class HTTPProxyAuth(HTTPBasicAuth):
|
| 100 |
+
"""Attaches HTTP Proxy Authentication to a given Request object."""
|
| 101 |
+
|
| 102 |
+
def __call__(self, r):
|
| 103 |
+
r.headers["Proxy-Authorization"] = _basic_auth_str(self.username, self.password)
|
| 104 |
+
return r
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class HTTPDigestAuth(AuthBase):
|
| 108 |
+
"""Attaches HTTP Digest Authentication to the given Request object."""
|
| 109 |
+
|
| 110 |
+
def __init__(self, username, password):
|
| 111 |
+
self.username = username
|
| 112 |
+
self.password = password
|
| 113 |
+
# Keep state in per-thread local storage
|
| 114 |
+
self._thread_local = threading.local()
|
| 115 |
+
|
| 116 |
+
def init_per_thread_state(self):
|
| 117 |
+
# Ensure state is initialized just once per-thread
|
| 118 |
+
if not hasattr(self._thread_local, "init"):
|
| 119 |
+
self._thread_local.init = True
|
| 120 |
+
self._thread_local.last_nonce = ""
|
| 121 |
+
self._thread_local.nonce_count = 0
|
| 122 |
+
self._thread_local.chal = {}
|
| 123 |
+
self._thread_local.pos = None
|
| 124 |
+
self._thread_local.num_401_calls = None
|
| 125 |
+
|
| 126 |
+
def build_digest_header(self, method, url):
|
| 127 |
+
"""
|
| 128 |
+
:rtype: str
|
| 129 |
+
"""
|
| 130 |
+
|
| 131 |
+
realm = self._thread_local.chal["realm"]
|
| 132 |
+
nonce = self._thread_local.chal["nonce"]
|
| 133 |
+
qop = self._thread_local.chal.get("qop")
|
| 134 |
+
algorithm = self._thread_local.chal.get("algorithm")
|
| 135 |
+
opaque = self._thread_local.chal.get("opaque")
|
| 136 |
+
hash_utf8 = None
|
| 137 |
+
|
| 138 |
+
if algorithm is None:
|
| 139 |
+
_algorithm = "MD5"
|
| 140 |
+
else:
|
| 141 |
+
_algorithm = algorithm.upper()
|
| 142 |
+
# lambdas assume digest modules are imported at the top level
|
| 143 |
+
if _algorithm == "MD5" or _algorithm == "MD5-SESS":
|
| 144 |
+
|
| 145 |
+
def md5_utf8(x):
|
| 146 |
+
if isinstance(x, str):
|
| 147 |
+
x = x.encode("utf-8")
|
| 148 |
+
return hashlib.md5(x).hexdigest()
|
| 149 |
+
|
| 150 |
+
hash_utf8 = md5_utf8
|
| 151 |
+
elif _algorithm == "SHA":
|
| 152 |
+
|
| 153 |
+
def sha_utf8(x):
|
| 154 |
+
if isinstance(x, str):
|
| 155 |
+
x = x.encode("utf-8")
|
| 156 |
+
return hashlib.sha1(x).hexdigest()
|
| 157 |
+
|
| 158 |
+
hash_utf8 = sha_utf8
|
| 159 |
+
elif _algorithm == "SHA-256":
|
| 160 |
+
|
| 161 |
+
def sha256_utf8(x):
|
| 162 |
+
if isinstance(x, str):
|
| 163 |
+
x = x.encode("utf-8")
|
| 164 |
+
return hashlib.sha256(x).hexdigest()
|
| 165 |
+
|
| 166 |
+
hash_utf8 = sha256_utf8
|
| 167 |
+
elif _algorithm == "SHA-512":
|
| 168 |
+
|
| 169 |
+
def sha512_utf8(x):
|
| 170 |
+
if isinstance(x, str):
|
| 171 |
+
x = x.encode("utf-8")
|
| 172 |
+
return hashlib.sha512(x).hexdigest()
|
| 173 |
+
|
| 174 |
+
hash_utf8 = sha512_utf8
|
| 175 |
+
|
| 176 |
+
KD = lambda s, d: hash_utf8(f"{s}:{d}") # noqa:E731
|
| 177 |
+
|
| 178 |
+
if hash_utf8 is None:
|
| 179 |
+
return None
|
| 180 |
+
|
| 181 |
+
# XXX not implemented yet
|
| 182 |
+
entdig = None
|
| 183 |
+
p_parsed = urlparse(url)
|
| 184 |
+
#: path is request-uri defined in RFC 2616 which should not be empty
|
| 185 |
+
path = p_parsed.path or "/"
|
| 186 |
+
if p_parsed.query:
|
| 187 |
+
path += f"?{p_parsed.query}"
|
| 188 |
+
|
| 189 |
+
A1 = f"{self.username}:{realm}:{self.password}"
|
| 190 |
+
A2 = f"{method}:{path}"
|
| 191 |
+
|
| 192 |
+
HA1 = hash_utf8(A1)
|
| 193 |
+
HA2 = hash_utf8(A2)
|
| 194 |
+
|
| 195 |
+
if nonce == self._thread_local.last_nonce:
|
| 196 |
+
self._thread_local.nonce_count += 1
|
| 197 |
+
else:
|
| 198 |
+
self._thread_local.nonce_count = 1
|
| 199 |
+
ncvalue = f"{self._thread_local.nonce_count:08x}"
|
| 200 |
+
s = str(self._thread_local.nonce_count).encode("utf-8")
|
| 201 |
+
s += nonce.encode("utf-8")
|
| 202 |
+
s += time.ctime().encode("utf-8")
|
| 203 |
+
s += os.urandom(8)
|
| 204 |
+
|
| 205 |
+
cnonce = hashlib.sha1(s).hexdigest()[:16]
|
| 206 |
+
if _algorithm == "MD5-SESS":
|
| 207 |
+
HA1 = hash_utf8(f"{HA1}:{nonce}:{cnonce}")
|
| 208 |
+
|
| 209 |
+
if not qop:
|
| 210 |
+
respdig = KD(HA1, f"{nonce}:{HA2}")
|
| 211 |
+
elif qop == "auth" or "auth" in qop.split(","):
|
| 212 |
+
noncebit = f"{nonce}:{ncvalue}:{cnonce}:auth:{HA2}"
|
| 213 |
+
respdig = KD(HA1, noncebit)
|
| 214 |
+
else:
|
| 215 |
+
# XXX handle auth-int.
|
| 216 |
+
return None
|
| 217 |
+
|
| 218 |
+
self._thread_local.last_nonce = nonce
|
| 219 |
+
|
| 220 |
+
# XXX should the partial digests be encoded too?
|
| 221 |
+
base = (
|
| 222 |
+
f'username="{self.username}", realm="{realm}", nonce="{nonce}", '
|
| 223 |
+
f'uri="{path}", response="{respdig}"'
|
| 224 |
+
)
|
| 225 |
+
if opaque:
|
| 226 |
+
base += f', opaque="{opaque}"'
|
| 227 |
+
if algorithm:
|
| 228 |
+
base += f', algorithm="{algorithm}"'
|
| 229 |
+
if entdig:
|
| 230 |
+
base += f', digest="{entdig}"'
|
| 231 |
+
if qop:
|
| 232 |
+
base += f', qop="auth", nc={ncvalue}, cnonce="{cnonce}"'
|
| 233 |
+
|
| 234 |
+
return f"Digest {base}"
|
| 235 |
+
|
| 236 |
+
def handle_redirect(self, r, **kwargs):
|
| 237 |
+
"""Reset num_401_calls counter on redirects."""
|
| 238 |
+
if r.is_redirect:
|
| 239 |
+
self._thread_local.num_401_calls = 1
|
| 240 |
+
|
| 241 |
+
def handle_401(self, r, **kwargs):
|
| 242 |
+
"""
|
| 243 |
+
Takes the given response and tries digest-auth, if needed.
|
| 244 |
+
|
| 245 |
+
:rtype: requests.Response
|
| 246 |
+
"""
|
| 247 |
+
|
| 248 |
+
# If response is not 4xx, do not auth
|
| 249 |
+
# See https://github.com/psf/requests/issues/3772
|
| 250 |
+
if not 400 <= r.status_code < 500:
|
| 251 |
+
self._thread_local.num_401_calls = 1
|
| 252 |
+
return r
|
| 253 |
+
|
| 254 |
+
if self._thread_local.pos is not None:
|
| 255 |
+
# Rewind the file position indicator of the body to where
|
| 256 |
+
# it was to resend the request.
|
| 257 |
+
r.request.body.seek(self._thread_local.pos)
|
| 258 |
+
s_auth = r.headers.get("www-authenticate", "")
|
| 259 |
+
|
| 260 |
+
if "digest" in s_auth.lower() and self._thread_local.num_401_calls < 2:
|
| 261 |
+
self._thread_local.num_401_calls += 1
|
| 262 |
+
pat = re.compile(r"digest ", flags=re.IGNORECASE)
|
| 263 |
+
self._thread_local.chal = parse_dict_header(pat.sub("", s_auth, count=1))
|
| 264 |
+
|
| 265 |
+
# Consume content and release the original connection
|
| 266 |
+
# to allow our new request to reuse the same one.
|
| 267 |
+
r.content
|
| 268 |
+
r.close()
|
| 269 |
+
prep = r.request.copy()
|
| 270 |
+
extract_cookies_to_jar(prep._cookies, r.request, r.raw)
|
| 271 |
+
prep.prepare_cookies(prep._cookies)
|
| 272 |
+
|
| 273 |
+
prep.headers["Authorization"] = self.build_digest_header(
|
| 274 |
+
prep.method, prep.url
|
| 275 |
+
)
|
| 276 |
+
_r = r.connection.send(prep, **kwargs)
|
| 277 |
+
_r.history.append(r)
|
| 278 |
+
_r.request = prep
|
| 279 |
+
|
| 280 |
+
return _r
|
| 281 |
+
|
| 282 |
+
self._thread_local.num_401_calls = 1
|
| 283 |
+
return r
|
| 284 |
+
|
| 285 |
+
def __call__(self, r):
|
| 286 |
+
# Initialize per-thread state, if needed
|
| 287 |
+
self.init_per_thread_state()
|
| 288 |
+
# If we have a saved nonce, skip the 401
|
| 289 |
+
if self._thread_local.last_nonce:
|
| 290 |
+
r.headers["Authorization"] = self.build_digest_header(r.method, r.url)
|
| 291 |
+
try:
|
| 292 |
+
self._thread_local.pos = r.body.tell()
|
| 293 |
+
except AttributeError:
|
| 294 |
+
# In the case of HTTPDigestAuth being reused and the body of
|
| 295 |
+
# the previous request was a file-like object, pos has the
|
| 296 |
+
# file position of the previous body. Ensure it's set to
|
| 297 |
+
# None.
|
| 298 |
+
self._thread_local.pos = None
|
| 299 |
+
r.register_hook("response", self.handle_401)
|
| 300 |
+
r.register_hook("response", self.handle_redirect)
|
| 301 |
+
self._thread_local.num_401_calls = 1
|
| 302 |
+
|
| 303 |
+
return r
|
| 304 |
+
|
| 305 |
+
def __eq__(self, other):
|
| 306 |
+
return all(
|
| 307 |
+
[
|
| 308 |
+
self.username == getattr(other, "username", None),
|
| 309 |
+
self.password == getattr(other, "password", None),
|
| 310 |
+
]
|
| 311 |
+
)
|
| 312 |
+
|
| 313 |
+
def __ne__(self, other):
|
| 314 |
+
return not self == other
|
wemm/lib/python3.10/site-packages/requests/compat.py
ADDED
|
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.compat
|
| 3 |
+
~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This module previously handled import compatibility issues
|
| 6 |
+
between Python 2 and Python 3. It remains for backwards
|
| 7 |
+
compatibility until the next major version.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import importlib
|
| 11 |
+
import sys
|
| 12 |
+
|
| 13 |
+
# -------------------
|
| 14 |
+
# Character Detection
|
| 15 |
+
# -------------------
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
def _resolve_char_detection():
|
| 19 |
+
"""Find supported character detection libraries."""
|
| 20 |
+
chardet = None
|
| 21 |
+
for lib in ("chardet", "charset_normalizer"):
|
| 22 |
+
if chardet is None:
|
| 23 |
+
try:
|
| 24 |
+
chardet = importlib.import_module(lib)
|
| 25 |
+
except ImportError:
|
| 26 |
+
pass
|
| 27 |
+
return chardet
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
chardet = _resolve_char_detection()
|
| 31 |
+
|
| 32 |
+
# -------
|
| 33 |
+
# Pythons
|
| 34 |
+
# -------
|
| 35 |
+
|
| 36 |
+
# Syntax sugar.
|
| 37 |
+
_ver = sys.version_info
|
| 38 |
+
|
| 39 |
+
#: Python 2.x?
|
| 40 |
+
is_py2 = _ver[0] == 2
|
| 41 |
+
|
| 42 |
+
#: Python 3.x?
|
| 43 |
+
is_py3 = _ver[0] == 3
|
| 44 |
+
|
| 45 |
+
# json/simplejson module import resolution
|
| 46 |
+
has_simplejson = False
|
| 47 |
+
try:
|
| 48 |
+
import simplejson as json
|
| 49 |
+
|
| 50 |
+
has_simplejson = True
|
| 51 |
+
except ImportError:
|
| 52 |
+
import json
|
| 53 |
+
|
| 54 |
+
if has_simplejson:
|
| 55 |
+
from simplejson import JSONDecodeError
|
| 56 |
+
else:
|
| 57 |
+
from json import JSONDecodeError
|
| 58 |
+
|
| 59 |
+
# Keep OrderedDict for backwards compatibility.
|
| 60 |
+
from collections import OrderedDict
|
| 61 |
+
from collections.abc import Callable, Mapping, MutableMapping
|
| 62 |
+
from http import cookiejar as cookielib
|
| 63 |
+
from http.cookies import Morsel
|
| 64 |
+
from io import StringIO
|
| 65 |
+
|
| 66 |
+
# --------------
|
| 67 |
+
# Legacy Imports
|
| 68 |
+
# --------------
|
| 69 |
+
from urllib.parse import (
|
| 70 |
+
quote,
|
| 71 |
+
quote_plus,
|
| 72 |
+
unquote,
|
| 73 |
+
unquote_plus,
|
| 74 |
+
urldefrag,
|
| 75 |
+
urlencode,
|
| 76 |
+
urljoin,
|
| 77 |
+
urlparse,
|
| 78 |
+
urlsplit,
|
| 79 |
+
urlunparse,
|
| 80 |
+
)
|
| 81 |
+
from urllib.request import (
|
| 82 |
+
getproxies,
|
| 83 |
+
getproxies_environment,
|
| 84 |
+
parse_http_list,
|
| 85 |
+
proxy_bypass,
|
| 86 |
+
proxy_bypass_environment,
|
| 87 |
+
)
|
| 88 |
+
|
| 89 |
+
builtin_str = str
|
| 90 |
+
str = str
|
| 91 |
+
bytes = bytes
|
| 92 |
+
basestring = (str, bytes)
|
| 93 |
+
numeric_types = (int, float)
|
| 94 |
+
integer_types = (int,)
|
wemm/lib/python3.10/site-packages/requests/cookies.py
ADDED
|
@@ -0,0 +1,561 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.cookies
|
| 3 |
+
~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Compatibility code to be able to use `http.cookiejar.CookieJar` with requests.
|
| 6 |
+
|
| 7 |
+
requests.utils imports from here, so be careful with imports.
|
| 8 |
+
"""
|
| 9 |
+
|
| 10 |
+
import calendar
|
| 11 |
+
import copy
|
| 12 |
+
import time
|
| 13 |
+
|
| 14 |
+
from ._internal_utils import to_native_string
|
| 15 |
+
from .compat import Morsel, MutableMapping, cookielib, urlparse, urlunparse
|
| 16 |
+
|
| 17 |
+
try:
|
| 18 |
+
import threading
|
| 19 |
+
except ImportError:
|
| 20 |
+
import dummy_threading as threading
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
class MockRequest:
|
| 24 |
+
"""Wraps a `requests.Request` to mimic a `urllib2.Request`.
|
| 25 |
+
|
| 26 |
+
The code in `http.cookiejar.CookieJar` expects this interface in order to correctly
|
| 27 |
+
manage cookie policies, i.e., determine whether a cookie can be set, given the
|
| 28 |
+
domains of the request and the cookie.
|
| 29 |
+
|
| 30 |
+
The original request object is read-only. The client is responsible for collecting
|
| 31 |
+
the new headers via `get_new_headers()` and interpreting them appropriately. You
|
| 32 |
+
probably want `get_cookie_header`, defined below.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
def __init__(self, request):
|
| 36 |
+
self._r = request
|
| 37 |
+
self._new_headers = {}
|
| 38 |
+
self.type = urlparse(self._r.url).scheme
|
| 39 |
+
|
| 40 |
+
def get_type(self):
|
| 41 |
+
return self.type
|
| 42 |
+
|
| 43 |
+
def get_host(self):
|
| 44 |
+
return urlparse(self._r.url).netloc
|
| 45 |
+
|
| 46 |
+
def get_origin_req_host(self):
|
| 47 |
+
return self.get_host()
|
| 48 |
+
|
| 49 |
+
def get_full_url(self):
|
| 50 |
+
# Only return the response's URL if the user hadn't set the Host
|
| 51 |
+
# header
|
| 52 |
+
if not self._r.headers.get("Host"):
|
| 53 |
+
return self._r.url
|
| 54 |
+
# If they did set it, retrieve it and reconstruct the expected domain
|
| 55 |
+
host = to_native_string(self._r.headers["Host"], encoding="utf-8")
|
| 56 |
+
parsed = urlparse(self._r.url)
|
| 57 |
+
# Reconstruct the URL as we expect it
|
| 58 |
+
return urlunparse(
|
| 59 |
+
[
|
| 60 |
+
parsed.scheme,
|
| 61 |
+
host,
|
| 62 |
+
parsed.path,
|
| 63 |
+
parsed.params,
|
| 64 |
+
parsed.query,
|
| 65 |
+
parsed.fragment,
|
| 66 |
+
]
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
def is_unverifiable(self):
|
| 70 |
+
return True
|
| 71 |
+
|
| 72 |
+
def has_header(self, name):
|
| 73 |
+
return name in self._r.headers or name in self._new_headers
|
| 74 |
+
|
| 75 |
+
def get_header(self, name, default=None):
|
| 76 |
+
return self._r.headers.get(name, self._new_headers.get(name, default))
|
| 77 |
+
|
| 78 |
+
def add_header(self, key, val):
|
| 79 |
+
"""cookiejar has no legitimate use for this method; add it back if you find one."""
|
| 80 |
+
raise NotImplementedError(
|
| 81 |
+
"Cookie headers should be added with add_unredirected_header()"
|
| 82 |
+
)
|
| 83 |
+
|
| 84 |
+
def add_unredirected_header(self, name, value):
|
| 85 |
+
self._new_headers[name] = value
|
| 86 |
+
|
| 87 |
+
def get_new_headers(self):
|
| 88 |
+
return self._new_headers
|
| 89 |
+
|
| 90 |
+
@property
|
| 91 |
+
def unverifiable(self):
|
| 92 |
+
return self.is_unverifiable()
|
| 93 |
+
|
| 94 |
+
@property
|
| 95 |
+
def origin_req_host(self):
|
| 96 |
+
return self.get_origin_req_host()
|
| 97 |
+
|
| 98 |
+
@property
|
| 99 |
+
def host(self):
|
| 100 |
+
return self.get_host()
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class MockResponse:
|
| 104 |
+
"""Wraps a `httplib.HTTPMessage` to mimic a `urllib.addinfourl`.
|
| 105 |
+
|
| 106 |
+
...what? Basically, expose the parsed HTTP headers from the server response
|
| 107 |
+
the way `http.cookiejar` expects to see them.
|
| 108 |
+
"""
|
| 109 |
+
|
| 110 |
+
def __init__(self, headers):
|
| 111 |
+
"""Make a MockResponse for `cookiejar` to read.
|
| 112 |
+
|
| 113 |
+
:param headers: a httplib.HTTPMessage or analogous carrying the headers
|
| 114 |
+
"""
|
| 115 |
+
self._headers = headers
|
| 116 |
+
|
| 117 |
+
def info(self):
|
| 118 |
+
return self._headers
|
| 119 |
+
|
| 120 |
+
def getheaders(self, name):
|
| 121 |
+
self._headers.getheaders(name)
|
| 122 |
+
|
| 123 |
+
|
| 124 |
+
def extract_cookies_to_jar(jar, request, response):
|
| 125 |
+
"""Extract the cookies from the response into a CookieJar.
|
| 126 |
+
|
| 127 |
+
:param jar: http.cookiejar.CookieJar (not necessarily a RequestsCookieJar)
|
| 128 |
+
:param request: our own requests.Request object
|
| 129 |
+
:param response: urllib3.HTTPResponse object
|
| 130 |
+
"""
|
| 131 |
+
if not (hasattr(response, "_original_response") and response._original_response):
|
| 132 |
+
return
|
| 133 |
+
# the _original_response field is the wrapped httplib.HTTPResponse object,
|
| 134 |
+
req = MockRequest(request)
|
| 135 |
+
# pull out the HTTPMessage with the headers and put it in the mock:
|
| 136 |
+
res = MockResponse(response._original_response.msg)
|
| 137 |
+
jar.extract_cookies(res, req)
|
| 138 |
+
|
| 139 |
+
|
| 140 |
+
def get_cookie_header(jar, request):
|
| 141 |
+
"""
|
| 142 |
+
Produce an appropriate Cookie header string to be sent with `request`, or None.
|
| 143 |
+
|
| 144 |
+
:rtype: str
|
| 145 |
+
"""
|
| 146 |
+
r = MockRequest(request)
|
| 147 |
+
jar.add_cookie_header(r)
|
| 148 |
+
return r.get_new_headers().get("Cookie")
|
| 149 |
+
|
| 150 |
+
|
| 151 |
+
def remove_cookie_by_name(cookiejar, name, domain=None, path=None):
|
| 152 |
+
"""Unsets a cookie by name, by default over all domains and paths.
|
| 153 |
+
|
| 154 |
+
Wraps CookieJar.clear(), is O(n).
|
| 155 |
+
"""
|
| 156 |
+
clearables = []
|
| 157 |
+
for cookie in cookiejar:
|
| 158 |
+
if cookie.name != name:
|
| 159 |
+
continue
|
| 160 |
+
if domain is not None and domain != cookie.domain:
|
| 161 |
+
continue
|
| 162 |
+
if path is not None and path != cookie.path:
|
| 163 |
+
continue
|
| 164 |
+
clearables.append((cookie.domain, cookie.path, cookie.name))
|
| 165 |
+
|
| 166 |
+
for domain, path, name in clearables:
|
| 167 |
+
cookiejar.clear(domain, path, name)
|
| 168 |
+
|
| 169 |
+
|
| 170 |
+
class CookieConflictError(RuntimeError):
|
| 171 |
+
"""There are two cookies that meet the criteria specified in the cookie jar.
|
| 172 |
+
Use .get and .set and include domain and path args in order to be more specific.
|
| 173 |
+
"""
|
| 174 |
+
|
| 175 |
+
|
| 176 |
+
class RequestsCookieJar(cookielib.CookieJar, MutableMapping):
|
| 177 |
+
"""Compatibility class; is a http.cookiejar.CookieJar, but exposes a dict
|
| 178 |
+
interface.
|
| 179 |
+
|
| 180 |
+
This is the CookieJar we create by default for requests and sessions that
|
| 181 |
+
don't specify one, since some clients may expect response.cookies and
|
| 182 |
+
session.cookies to support dict operations.
|
| 183 |
+
|
| 184 |
+
Requests does not use the dict interface internally; it's just for
|
| 185 |
+
compatibility with external client code. All requests code should work
|
| 186 |
+
out of the box with externally provided instances of ``CookieJar``, e.g.
|
| 187 |
+
``LWPCookieJar`` and ``FileCookieJar``.
|
| 188 |
+
|
| 189 |
+
Unlike a regular CookieJar, this class is pickleable.
|
| 190 |
+
|
| 191 |
+
.. warning:: dictionary operations that are normally O(1) may be O(n).
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
def get(self, name, default=None, domain=None, path=None):
|
| 195 |
+
"""Dict-like get() that also supports optional domain and path args in
|
| 196 |
+
order to resolve naming collisions from using one cookie jar over
|
| 197 |
+
multiple domains.
|
| 198 |
+
|
| 199 |
+
.. warning:: operation is O(n), not O(1).
|
| 200 |
+
"""
|
| 201 |
+
try:
|
| 202 |
+
return self._find_no_duplicates(name, domain, path)
|
| 203 |
+
except KeyError:
|
| 204 |
+
return default
|
| 205 |
+
|
| 206 |
+
def set(self, name, value, **kwargs):
|
| 207 |
+
"""Dict-like set() that also supports optional domain and path args in
|
| 208 |
+
order to resolve naming collisions from using one cookie jar over
|
| 209 |
+
multiple domains.
|
| 210 |
+
"""
|
| 211 |
+
# support client code that unsets cookies by assignment of a None value:
|
| 212 |
+
if value is None:
|
| 213 |
+
remove_cookie_by_name(
|
| 214 |
+
self, name, domain=kwargs.get("domain"), path=kwargs.get("path")
|
| 215 |
+
)
|
| 216 |
+
return
|
| 217 |
+
|
| 218 |
+
if isinstance(value, Morsel):
|
| 219 |
+
c = morsel_to_cookie(value)
|
| 220 |
+
else:
|
| 221 |
+
c = create_cookie(name, value, **kwargs)
|
| 222 |
+
self.set_cookie(c)
|
| 223 |
+
return c
|
| 224 |
+
|
| 225 |
+
def iterkeys(self):
|
| 226 |
+
"""Dict-like iterkeys() that returns an iterator of names of cookies
|
| 227 |
+
from the jar.
|
| 228 |
+
|
| 229 |
+
.. seealso:: itervalues() and iteritems().
|
| 230 |
+
"""
|
| 231 |
+
for cookie in iter(self):
|
| 232 |
+
yield cookie.name
|
| 233 |
+
|
| 234 |
+
def keys(self):
|
| 235 |
+
"""Dict-like keys() that returns a list of names of cookies from the
|
| 236 |
+
jar.
|
| 237 |
+
|
| 238 |
+
.. seealso:: values() and items().
|
| 239 |
+
"""
|
| 240 |
+
return list(self.iterkeys())
|
| 241 |
+
|
| 242 |
+
def itervalues(self):
|
| 243 |
+
"""Dict-like itervalues() that returns an iterator of values of cookies
|
| 244 |
+
from the jar.
|
| 245 |
+
|
| 246 |
+
.. seealso:: iterkeys() and iteritems().
|
| 247 |
+
"""
|
| 248 |
+
for cookie in iter(self):
|
| 249 |
+
yield cookie.value
|
| 250 |
+
|
| 251 |
+
def values(self):
|
| 252 |
+
"""Dict-like values() that returns a list of values of cookies from the
|
| 253 |
+
jar.
|
| 254 |
+
|
| 255 |
+
.. seealso:: keys() and items().
|
| 256 |
+
"""
|
| 257 |
+
return list(self.itervalues())
|
| 258 |
+
|
| 259 |
+
def iteritems(self):
|
| 260 |
+
"""Dict-like iteritems() that returns an iterator of name-value tuples
|
| 261 |
+
from the jar.
|
| 262 |
+
|
| 263 |
+
.. seealso:: iterkeys() and itervalues().
|
| 264 |
+
"""
|
| 265 |
+
for cookie in iter(self):
|
| 266 |
+
yield cookie.name, cookie.value
|
| 267 |
+
|
| 268 |
+
def items(self):
|
| 269 |
+
"""Dict-like items() that returns a list of name-value tuples from the
|
| 270 |
+
jar. Allows client-code to call ``dict(RequestsCookieJar)`` and get a
|
| 271 |
+
vanilla python dict of key value pairs.
|
| 272 |
+
|
| 273 |
+
.. seealso:: keys() and values().
|
| 274 |
+
"""
|
| 275 |
+
return list(self.iteritems())
|
| 276 |
+
|
| 277 |
+
def list_domains(self):
|
| 278 |
+
"""Utility method to list all the domains in the jar."""
|
| 279 |
+
domains = []
|
| 280 |
+
for cookie in iter(self):
|
| 281 |
+
if cookie.domain not in domains:
|
| 282 |
+
domains.append(cookie.domain)
|
| 283 |
+
return domains
|
| 284 |
+
|
| 285 |
+
def list_paths(self):
|
| 286 |
+
"""Utility method to list all the paths in the jar."""
|
| 287 |
+
paths = []
|
| 288 |
+
for cookie in iter(self):
|
| 289 |
+
if cookie.path not in paths:
|
| 290 |
+
paths.append(cookie.path)
|
| 291 |
+
return paths
|
| 292 |
+
|
| 293 |
+
def multiple_domains(self):
|
| 294 |
+
"""Returns True if there are multiple domains in the jar.
|
| 295 |
+
Returns False otherwise.
|
| 296 |
+
|
| 297 |
+
:rtype: bool
|
| 298 |
+
"""
|
| 299 |
+
domains = []
|
| 300 |
+
for cookie in iter(self):
|
| 301 |
+
if cookie.domain is not None and cookie.domain in domains:
|
| 302 |
+
return True
|
| 303 |
+
domains.append(cookie.domain)
|
| 304 |
+
return False # there is only one domain in jar
|
| 305 |
+
|
| 306 |
+
def get_dict(self, domain=None, path=None):
|
| 307 |
+
"""Takes as an argument an optional domain and path and returns a plain
|
| 308 |
+
old Python dict of name-value pairs of cookies that meet the
|
| 309 |
+
requirements.
|
| 310 |
+
|
| 311 |
+
:rtype: dict
|
| 312 |
+
"""
|
| 313 |
+
dictionary = {}
|
| 314 |
+
for cookie in iter(self):
|
| 315 |
+
if (domain is None or cookie.domain == domain) and (
|
| 316 |
+
path is None or cookie.path == path
|
| 317 |
+
):
|
| 318 |
+
dictionary[cookie.name] = cookie.value
|
| 319 |
+
return dictionary
|
| 320 |
+
|
| 321 |
+
def __contains__(self, name):
|
| 322 |
+
try:
|
| 323 |
+
return super().__contains__(name)
|
| 324 |
+
except CookieConflictError:
|
| 325 |
+
return True
|
| 326 |
+
|
| 327 |
+
def __getitem__(self, name):
|
| 328 |
+
"""Dict-like __getitem__() for compatibility with client code. Throws
|
| 329 |
+
exception if there are more than one cookie with name. In that case,
|
| 330 |
+
use the more explicit get() method instead.
|
| 331 |
+
|
| 332 |
+
.. warning:: operation is O(n), not O(1).
|
| 333 |
+
"""
|
| 334 |
+
return self._find_no_duplicates(name)
|
| 335 |
+
|
| 336 |
+
def __setitem__(self, name, value):
|
| 337 |
+
"""Dict-like __setitem__ for compatibility with client code. Throws
|
| 338 |
+
exception if there is already a cookie of that name in the jar. In that
|
| 339 |
+
case, use the more explicit set() method instead.
|
| 340 |
+
"""
|
| 341 |
+
self.set(name, value)
|
| 342 |
+
|
| 343 |
+
def __delitem__(self, name):
|
| 344 |
+
"""Deletes a cookie given a name. Wraps ``http.cookiejar.CookieJar``'s
|
| 345 |
+
``remove_cookie_by_name()``.
|
| 346 |
+
"""
|
| 347 |
+
remove_cookie_by_name(self, name)
|
| 348 |
+
|
| 349 |
+
def set_cookie(self, cookie, *args, **kwargs):
|
| 350 |
+
if (
|
| 351 |
+
hasattr(cookie.value, "startswith")
|
| 352 |
+
and cookie.value.startswith('"')
|
| 353 |
+
and cookie.value.endswith('"')
|
| 354 |
+
):
|
| 355 |
+
cookie.value = cookie.value.replace('\\"', "")
|
| 356 |
+
return super().set_cookie(cookie, *args, **kwargs)
|
| 357 |
+
|
| 358 |
+
def update(self, other):
|
| 359 |
+
"""Updates this jar with cookies from another CookieJar or dict-like"""
|
| 360 |
+
if isinstance(other, cookielib.CookieJar):
|
| 361 |
+
for cookie in other:
|
| 362 |
+
self.set_cookie(copy.copy(cookie))
|
| 363 |
+
else:
|
| 364 |
+
super().update(other)
|
| 365 |
+
|
| 366 |
+
def _find(self, name, domain=None, path=None):
|
| 367 |
+
"""Requests uses this method internally to get cookie values.
|
| 368 |
+
|
| 369 |
+
If there are conflicting cookies, _find arbitrarily chooses one.
|
| 370 |
+
See _find_no_duplicates if you want an exception thrown if there are
|
| 371 |
+
conflicting cookies.
|
| 372 |
+
|
| 373 |
+
:param name: a string containing name of cookie
|
| 374 |
+
:param domain: (optional) string containing domain of cookie
|
| 375 |
+
:param path: (optional) string containing path of cookie
|
| 376 |
+
:return: cookie.value
|
| 377 |
+
"""
|
| 378 |
+
for cookie in iter(self):
|
| 379 |
+
if cookie.name == name:
|
| 380 |
+
if domain is None or cookie.domain == domain:
|
| 381 |
+
if path is None or cookie.path == path:
|
| 382 |
+
return cookie.value
|
| 383 |
+
|
| 384 |
+
raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
|
| 385 |
+
|
| 386 |
+
def _find_no_duplicates(self, name, domain=None, path=None):
|
| 387 |
+
"""Both ``__get_item__`` and ``get`` call this function: it's never
|
| 388 |
+
used elsewhere in Requests.
|
| 389 |
+
|
| 390 |
+
:param name: a string containing name of cookie
|
| 391 |
+
:param domain: (optional) string containing domain of cookie
|
| 392 |
+
:param path: (optional) string containing path of cookie
|
| 393 |
+
:raises KeyError: if cookie is not found
|
| 394 |
+
:raises CookieConflictError: if there are multiple cookies
|
| 395 |
+
that match name and optionally domain and path
|
| 396 |
+
:return: cookie.value
|
| 397 |
+
"""
|
| 398 |
+
toReturn = None
|
| 399 |
+
for cookie in iter(self):
|
| 400 |
+
if cookie.name == name:
|
| 401 |
+
if domain is None or cookie.domain == domain:
|
| 402 |
+
if path is None or cookie.path == path:
|
| 403 |
+
if toReturn is not None:
|
| 404 |
+
# if there are multiple cookies that meet passed in criteria
|
| 405 |
+
raise CookieConflictError(
|
| 406 |
+
f"There are multiple cookies with name, {name!r}"
|
| 407 |
+
)
|
| 408 |
+
# we will eventually return this as long as no cookie conflict
|
| 409 |
+
toReturn = cookie.value
|
| 410 |
+
|
| 411 |
+
if toReturn:
|
| 412 |
+
return toReturn
|
| 413 |
+
raise KeyError(f"name={name!r}, domain={domain!r}, path={path!r}")
|
| 414 |
+
|
| 415 |
+
def __getstate__(self):
|
| 416 |
+
"""Unlike a normal CookieJar, this class is pickleable."""
|
| 417 |
+
state = self.__dict__.copy()
|
| 418 |
+
# remove the unpickleable RLock object
|
| 419 |
+
state.pop("_cookies_lock")
|
| 420 |
+
return state
|
| 421 |
+
|
| 422 |
+
def __setstate__(self, state):
|
| 423 |
+
"""Unlike a normal CookieJar, this class is pickleable."""
|
| 424 |
+
self.__dict__.update(state)
|
| 425 |
+
if "_cookies_lock" not in self.__dict__:
|
| 426 |
+
self._cookies_lock = threading.RLock()
|
| 427 |
+
|
| 428 |
+
def copy(self):
|
| 429 |
+
"""Return a copy of this RequestsCookieJar."""
|
| 430 |
+
new_cj = RequestsCookieJar()
|
| 431 |
+
new_cj.set_policy(self.get_policy())
|
| 432 |
+
new_cj.update(self)
|
| 433 |
+
return new_cj
|
| 434 |
+
|
| 435 |
+
def get_policy(self):
|
| 436 |
+
"""Return the CookiePolicy instance used."""
|
| 437 |
+
return self._policy
|
| 438 |
+
|
| 439 |
+
|
| 440 |
+
def _copy_cookie_jar(jar):
|
| 441 |
+
if jar is None:
|
| 442 |
+
return None
|
| 443 |
+
|
| 444 |
+
if hasattr(jar, "copy"):
|
| 445 |
+
# We're dealing with an instance of RequestsCookieJar
|
| 446 |
+
return jar.copy()
|
| 447 |
+
# We're dealing with a generic CookieJar instance
|
| 448 |
+
new_jar = copy.copy(jar)
|
| 449 |
+
new_jar.clear()
|
| 450 |
+
for cookie in jar:
|
| 451 |
+
new_jar.set_cookie(copy.copy(cookie))
|
| 452 |
+
return new_jar
|
| 453 |
+
|
| 454 |
+
|
| 455 |
+
def create_cookie(name, value, **kwargs):
|
| 456 |
+
"""Make a cookie from underspecified parameters.
|
| 457 |
+
|
| 458 |
+
By default, the pair of `name` and `value` will be set for the domain ''
|
| 459 |
+
and sent on every request (this is sometimes called a "supercookie").
|
| 460 |
+
"""
|
| 461 |
+
result = {
|
| 462 |
+
"version": 0,
|
| 463 |
+
"name": name,
|
| 464 |
+
"value": value,
|
| 465 |
+
"port": None,
|
| 466 |
+
"domain": "",
|
| 467 |
+
"path": "/",
|
| 468 |
+
"secure": False,
|
| 469 |
+
"expires": None,
|
| 470 |
+
"discard": True,
|
| 471 |
+
"comment": None,
|
| 472 |
+
"comment_url": None,
|
| 473 |
+
"rest": {"HttpOnly": None},
|
| 474 |
+
"rfc2109": False,
|
| 475 |
+
}
|
| 476 |
+
|
| 477 |
+
badargs = set(kwargs) - set(result)
|
| 478 |
+
if badargs:
|
| 479 |
+
raise TypeError(
|
| 480 |
+
f"create_cookie() got unexpected keyword arguments: {list(badargs)}"
|
| 481 |
+
)
|
| 482 |
+
|
| 483 |
+
result.update(kwargs)
|
| 484 |
+
result["port_specified"] = bool(result["port"])
|
| 485 |
+
result["domain_specified"] = bool(result["domain"])
|
| 486 |
+
result["domain_initial_dot"] = result["domain"].startswith(".")
|
| 487 |
+
result["path_specified"] = bool(result["path"])
|
| 488 |
+
|
| 489 |
+
return cookielib.Cookie(**result)
|
| 490 |
+
|
| 491 |
+
|
| 492 |
+
def morsel_to_cookie(morsel):
|
| 493 |
+
"""Convert a Morsel object into a Cookie containing the one k/v pair."""
|
| 494 |
+
|
| 495 |
+
expires = None
|
| 496 |
+
if morsel["max-age"]:
|
| 497 |
+
try:
|
| 498 |
+
expires = int(time.time() + int(morsel["max-age"]))
|
| 499 |
+
except ValueError:
|
| 500 |
+
raise TypeError(f"max-age: {morsel['max-age']} must be integer")
|
| 501 |
+
elif morsel["expires"]:
|
| 502 |
+
time_template = "%a, %d-%b-%Y %H:%M:%S GMT"
|
| 503 |
+
expires = calendar.timegm(time.strptime(morsel["expires"], time_template))
|
| 504 |
+
return create_cookie(
|
| 505 |
+
comment=morsel["comment"],
|
| 506 |
+
comment_url=bool(morsel["comment"]),
|
| 507 |
+
discard=False,
|
| 508 |
+
domain=morsel["domain"],
|
| 509 |
+
expires=expires,
|
| 510 |
+
name=morsel.key,
|
| 511 |
+
path=morsel["path"],
|
| 512 |
+
port=None,
|
| 513 |
+
rest={"HttpOnly": morsel["httponly"]},
|
| 514 |
+
rfc2109=False,
|
| 515 |
+
secure=bool(morsel["secure"]),
|
| 516 |
+
value=morsel.value,
|
| 517 |
+
version=morsel["version"] or 0,
|
| 518 |
+
)
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
def cookiejar_from_dict(cookie_dict, cookiejar=None, overwrite=True):
|
| 522 |
+
"""Returns a CookieJar from a key/value dictionary.
|
| 523 |
+
|
| 524 |
+
:param cookie_dict: Dict of key/values to insert into CookieJar.
|
| 525 |
+
:param cookiejar: (optional) A cookiejar to add the cookies to.
|
| 526 |
+
:param overwrite: (optional) If False, will not replace cookies
|
| 527 |
+
already in the jar with new ones.
|
| 528 |
+
:rtype: CookieJar
|
| 529 |
+
"""
|
| 530 |
+
if cookiejar is None:
|
| 531 |
+
cookiejar = RequestsCookieJar()
|
| 532 |
+
|
| 533 |
+
if cookie_dict is not None:
|
| 534 |
+
names_from_jar = [cookie.name for cookie in cookiejar]
|
| 535 |
+
for name in cookie_dict:
|
| 536 |
+
if overwrite or (name not in names_from_jar):
|
| 537 |
+
cookiejar.set_cookie(create_cookie(name, cookie_dict[name]))
|
| 538 |
+
|
| 539 |
+
return cookiejar
|
| 540 |
+
|
| 541 |
+
|
| 542 |
+
def merge_cookies(cookiejar, cookies):
|
| 543 |
+
"""Add cookies to cookiejar and returns a merged CookieJar.
|
| 544 |
+
|
| 545 |
+
:param cookiejar: CookieJar object to add the cookies to.
|
| 546 |
+
:param cookies: Dictionary or CookieJar object to be added.
|
| 547 |
+
:rtype: CookieJar
|
| 548 |
+
"""
|
| 549 |
+
if not isinstance(cookiejar, cookielib.CookieJar):
|
| 550 |
+
raise ValueError("You can only merge into CookieJar")
|
| 551 |
+
|
| 552 |
+
if isinstance(cookies, dict):
|
| 553 |
+
cookiejar = cookiejar_from_dict(cookies, cookiejar=cookiejar, overwrite=False)
|
| 554 |
+
elif isinstance(cookies, cookielib.CookieJar):
|
| 555 |
+
try:
|
| 556 |
+
cookiejar.update(cookies)
|
| 557 |
+
except AttributeError:
|
| 558 |
+
for cookie_in_jar in cookies:
|
| 559 |
+
cookiejar.set_cookie(cookie_in_jar)
|
| 560 |
+
|
| 561 |
+
return cookiejar
|
wemm/lib/python3.10/site-packages/requests/exceptions.py
ADDED
|
@@ -0,0 +1,151 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.exceptions
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This module contains the set of Requests' exceptions.
|
| 6 |
+
"""
|
| 7 |
+
from urllib3.exceptions import HTTPError as BaseHTTPError
|
| 8 |
+
|
| 9 |
+
from .compat import JSONDecodeError as CompatJSONDecodeError
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class RequestException(IOError):
|
| 13 |
+
"""There was an ambiguous exception that occurred while handling your
|
| 14 |
+
request.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, *args, **kwargs):
|
| 18 |
+
"""Initialize RequestException with `request` and `response` objects."""
|
| 19 |
+
response = kwargs.pop("response", None)
|
| 20 |
+
self.response = response
|
| 21 |
+
self.request = kwargs.pop("request", None)
|
| 22 |
+
if response is not None and not self.request and hasattr(response, "request"):
|
| 23 |
+
self.request = self.response.request
|
| 24 |
+
super().__init__(*args, **kwargs)
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class InvalidJSONError(RequestException):
|
| 28 |
+
"""A JSON error occurred."""
|
| 29 |
+
|
| 30 |
+
|
| 31 |
+
class JSONDecodeError(InvalidJSONError, CompatJSONDecodeError):
|
| 32 |
+
"""Couldn't decode the text into json"""
|
| 33 |
+
|
| 34 |
+
def __init__(self, *args, **kwargs):
|
| 35 |
+
"""
|
| 36 |
+
Construct the JSONDecodeError instance first with all
|
| 37 |
+
args. Then use it's args to construct the IOError so that
|
| 38 |
+
the json specific args aren't used as IOError specific args
|
| 39 |
+
and the error message from JSONDecodeError is preserved.
|
| 40 |
+
"""
|
| 41 |
+
CompatJSONDecodeError.__init__(self, *args)
|
| 42 |
+
InvalidJSONError.__init__(self, *self.args, **kwargs)
|
| 43 |
+
|
| 44 |
+
def __reduce__(self):
|
| 45 |
+
"""
|
| 46 |
+
The __reduce__ method called when pickling the object must
|
| 47 |
+
be the one from the JSONDecodeError (be it json/simplejson)
|
| 48 |
+
as it expects all the arguments for instantiation, not just
|
| 49 |
+
one like the IOError, and the MRO would by default call the
|
| 50 |
+
__reduce__ method from the IOError due to the inheritance order.
|
| 51 |
+
"""
|
| 52 |
+
return CompatJSONDecodeError.__reduce__(self)
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
class HTTPError(RequestException):
|
| 56 |
+
"""An HTTP error occurred."""
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
class ConnectionError(RequestException):
|
| 60 |
+
"""A Connection error occurred."""
|
| 61 |
+
|
| 62 |
+
|
| 63 |
+
class ProxyError(ConnectionError):
|
| 64 |
+
"""A proxy error occurred."""
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
class SSLError(ConnectionError):
|
| 68 |
+
"""An SSL error occurred."""
|
| 69 |
+
|
| 70 |
+
|
| 71 |
+
class Timeout(RequestException):
|
| 72 |
+
"""The request timed out.
|
| 73 |
+
|
| 74 |
+
Catching this error will catch both
|
| 75 |
+
:exc:`~requests.exceptions.ConnectTimeout` and
|
| 76 |
+
:exc:`~requests.exceptions.ReadTimeout` errors.
|
| 77 |
+
"""
|
| 78 |
+
|
| 79 |
+
|
| 80 |
+
class ConnectTimeout(ConnectionError, Timeout):
|
| 81 |
+
"""The request timed out while trying to connect to the remote server.
|
| 82 |
+
|
| 83 |
+
Requests that produced this error are safe to retry.
|
| 84 |
+
"""
|
| 85 |
+
|
| 86 |
+
|
| 87 |
+
class ReadTimeout(Timeout):
|
| 88 |
+
"""The server did not send any data in the allotted amount of time."""
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
class URLRequired(RequestException):
|
| 92 |
+
"""A valid URL is required to make a request."""
|
| 93 |
+
|
| 94 |
+
|
| 95 |
+
class TooManyRedirects(RequestException):
|
| 96 |
+
"""Too many redirects."""
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
class MissingSchema(RequestException, ValueError):
|
| 100 |
+
"""The URL scheme (e.g. http or https) is missing."""
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class InvalidSchema(RequestException, ValueError):
|
| 104 |
+
"""The URL scheme provided is either invalid or unsupported."""
|
| 105 |
+
|
| 106 |
+
|
| 107 |
+
class InvalidURL(RequestException, ValueError):
|
| 108 |
+
"""The URL provided was somehow invalid."""
|
| 109 |
+
|
| 110 |
+
|
| 111 |
+
class InvalidHeader(RequestException, ValueError):
|
| 112 |
+
"""The header value provided was somehow invalid."""
|
| 113 |
+
|
| 114 |
+
|
| 115 |
+
class InvalidProxyURL(InvalidURL):
|
| 116 |
+
"""The proxy URL provided is invalid."""
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
class ChunkedEncodingError(RequestException):
|
| 120 |
+
"""The server declared chunked encoding but sent an invalid chunk."""
|
| 121 |
+
|
| 122 |
+
|
| 123 |
+
class ContentDecodingError(RequestException, BaseHTTPError):
|
| 124 |
+
"""Failed to decode response content."""
|
| 125 |
+
|
| 126 |
+
|
| 127 |
+
class StreamConsumedError(RequestException, TypeError):
|
| 128 |
+
"""The content for this response was already consumed."""
|
| 129 |
+
|
| 130 |
+
|
| 131 |
+
class RetryError(RequestException):
|
| 132 |
+
"""Custom retries logic failed"""
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
class UnrewindableBodyError(RequestException):
|
| 136 |
+
"""Requests encountered an error when trying to rewind a body."""
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
# Warnings
|
| 140 |
+
|
| 141 |
+
|
| 142 |
+
class RequestsWarning(Warning):
|
| 143 |
+
"""Base warning for Requests."""
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
class FileModeWarning(RequestsWarning, DeprecationWarning):
|
| 147 |
+
"""A file was opened in text mode, but Requests determined its binary length."""
|
| 148 |
+
|
| 149 |
+
|
| 150 |
+
class RequestsDependencyWarning(RequestsWarning):
|
| 151 |
+
"""An imported dependency doesn't match the expected version range."""
|
wemm/lib/python3.10/site-packages/requests/help.py
ADDED
|
@@ -0,0 +1,134 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Module containing bug report helper(s)."""
|
| 2 |
+
|
| 3 |
+
import json
|
| 4 |
+
import platform
|
| 5 |
+
import ssl
|
| 6 |
+
import sys
|
| 7 |
+
|
| 8 |
+
import idna
|
| 9 |
+
import urllib3
|
| 10 |
+
|
| 11 |
+
from . import __version__ as requests_version
|
| 12 |
+
|
| 13 |
+
try:
|
| 14 |
+
import charset_normalizer
|
| 15 |
+
except ImportError:
|
| 16 |
+
charset_normalizer = None
|
| 17 |
+
|
| 18 |
+
try:
|
| 19 |
+
import chardet
|
| 20 |
+
except ImportError:
|
| 21 |
+
chardet = None
|
| 22 |
+
|
| 23 |
+
try:
|
| 24 |
+
from urllib3.contrib import pyopenssl
|
| 25 |
+
except ImportError:
|
| 26 |
+
pyopenssl = None
|
| 27 |
+
OpenSSL = None
|
| 28 |
+
cryptography = None
|
| 29 |
+
else:
|
| 30 |
+
import cryptography
|
| 31 |
+
import OpenSSL
|
| 32 |
+
|
| 33 |
+
|
| 34 |
+
def _implementation():
|
| 35 |
+
"""Return a dict with the Python implementation and version.
|
| 36 |
+
|
| 37 |
+
Provide both the name and the version of the Python implementation
|
| 38 |
+
currently running. For example, on CPython 3.10.3 it will return
|
| 39 |
+
{'name': 'CPython', 'version': '3.10.3'}.
|
| 40 |
+
|
| 41 |
+
This function works best on CPython and PyPy: in particular, it probably
|
| 42 |
+
doesn't work for Jython or IronPython. Future investigation should be done
|
| 43 |
+
to work out the correct shape of the code for those platforms.
|
| 44 |
+
"""
|
| 45 |
+
implementation = platform.python_implementation()
|
| 46 |
+
|
| 47 |
+
if implementation == "CPython":
|
| 48 |
+
implementation_version = platform.python_version()
|
| 49 |
+
elif implementation == "PyPy":
|
| 50 |
+
implementation_version = "{}.{}.{}".format(
|
| 51 |
+
sys.pypy_version_info.major,
|
| 52 |
+
sys.pypy_version_info.minor,
|
| 53 |
+
sys.pypy_version_info.micro,
|
| 54 |
+
)
|
| 55 |
+
if sys.pypy_version_info.releaselevel != "final":
|
| 56 |
+
implementation_version = "".join(
|
| 57 |
+
[implementation_version, sys.pypy_version_info.releaselevel]
|
| 58 |
+
)
|
| 59 |
+
elif implementation == "Jython":
|
| 60 |
+
implementation_version = platform.python_version() # Complete Guess
|
| 61 |
+
elif implementation == "IronPython":
|
| 62 |
+
implementation_version = platform.python_version() # Complete Guess
|
| 63 |
+
else:
|
| 64 |
+
implementation_version = "Unknown"
|
| 65 |
+
|
| 66 |
+
return {"name": implementation, "version": implementation_version}
|
| 67 |
+
|
| 68 |
+
|
| 69 |
+
def info():
|
| 70 |
+
"""Generate information for a bug report."""
|
| 71 |
+
try:
|
| 72 |
+
platform_info = {
|
| 73 |
+
"system": platform.system(),
|
| 74 |
+
"release": platform.release(),
|
| 75 |
+
}
|
| 76 |
+
except OSError:
|
| 77 |
+
platform_info = {
|
| 78 |
+
"system": "Unknown",
|
| 79 |
+
"release": "Unknown",
|
| 80 |
+
}
|
| 81 |
+
|
| 82 |
+
implementation_info = _implementation()
|
| 83 |
+
urllib3_info = {"version": urllib3.__version__}
|
| 84 |
+
charset_normalizer_info = {"version": None}
|
| 85 |
+
chardet_info = {"version": None}
|
| 86 |
+
if charset_normalizer:
|
| 87 |
+
charset_normalizer_info = {"version": charset_normalizer.__version__}
|
| 88 |
+
if chardet:
|
| 89 |
+
chardet_info = {"version": chardet.__version__}
|
| 90 |
+
|
| 91 |
+
pyopenssl_info = {
|
| 92 |
+
"version": None,
|
| 93 |
+
"openssl_version": "",
|
| 94 |
+
}
|
| 95 |
+
if OpenSSL:
|
| 96 |
+
pyopenssl_info = {
|
| 97 |
+
"version": OpenSSL.__version__,
|
| 98 |
+
"openssl_version": f"{OpenSSL.SSL.OPENSSL_VERSION_NUMBER:x}",
|
| 99 |
+
}
|
| 100 |
+
cryptography_info = {
|
| 101 |
+
"version": getattr(cryptography, "__version__", ""),
|
| 102 |
+
}
|
| 103 |
+
idna_info = {
|
| 104 |
+
"version": getattr(idna, "__version__", ""),
|
| 105 |
+
}
|
| 106 |
+
|
| 107 |
+
system_ssl = ssl.OPENSSL_VERSION_NUMBER
|
| 108 |
+
system_ssl_info = {"version": f"{system_ssl:x}" if system_ssl is not None else ""}
|
| 109 |
+
|
| 110 |
+
return {
|
| 111 |
+
"platform": platform_info,
|
| 112 |
+
"implementation": implementation_info,
|
| 113 |
+
"system_ssl": system_ssl_info,
|
| 114 |
+
"using_pyopenssl": pyopenssl is not None,
|
| 115 |
+
"using_charset_normalizer": chardet is None,
|
| 116 |
+
"pyOpenSSL": pyopenssl_info,
|
| 117 |
+
"urllib3": urllib3_info,
|
| 118 |
+
"chardet": chardet_info,
|
| 119 |
+
"charset_normalizer": charset_normalizer_info,
|
| 120 |
+
"cryptography": cryptography_info,
|
| 121 |
+
"idna": idna_info,
|
| 122 |
+
"requests": {
|
| 123 |
+
"version": requests_version,
|
| 124 |
+
},
|
| 125 |
+
}
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
def main():
|
| 129 |
+
"""Pretty-print the bug information as JSON."""
|
| 130 |
+
print(json.dumps(info(), sort_keys=True, indent=2))
|
| 131 |
+
|
| 132 |
+
|
| 133 |
+
if __name__ == "__main__":
|
| 134 |
+
main()
|
wemm/lib/python3.10/site-packages/requests/hooks.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.hooks
|
| 3 |
+
~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This module provides the capabilities for the Requests hooks system.
|
| 6 |
+
|
| 7 |
+
Available hooks:
|
| 8 |
+
|
| 9 |
+
``response``:
|
| 10 |
+
The response generated from a Request.
|
| 11 |
+
"""
|
| 12 |
+
HOOKS = ["response"]
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
def default_hooks():
|
| 16 |
+
return {event: [] for event in HOOKS}
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
# TODO: response is the only one
|
| 20 |
+
|
| 21 |
+
|
| 22 |
+
def dispatch_hook(key, hooks, hook_data, **kwargs):
|
| 23 |
+
"""Dispatches a hook dictionary on a given piece of data."""
|
| 24 |
+
hooks = hooks or {}
|
| 25 |
+
hooks = hooks.get(key)
|
| 26 |
+
if hooks:
|
| 27 |
+
if hasattr(hooks, "__call__"):
|
| 28 |
+
hooks = [hooks]
|
| 29 |
+
for hook in hooks:
|
| 30 |
+
_hook_data = hook(hook_data, **kwargs)
|
| 31 |
+
if _hook_data is not None:
|
| 32 |
+
hook_data = _hook_data
|
| 33 |
+
return hook_data
|
wemm/lib/python3.10/site-packages/requests/models.py
ADDED
|
@@ -0,0 +1,1037 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.models
|
| 3 |
+
~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This module contains the primary objects that power Requests.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import datetime
|
| 9 |
+
|
| 10 |
+
# Import encoding now, to avoid implicit import later.
|
| 11 |
+
# Implicit import within threads may cause LookupError when standard library is in a ZIP,
|
| 12 |
+
# such as in Embedded Python. See https://github.com/psf/requests/issues/3578.
|
| 13 |
+
import encodings.idna # noqa: F401
|
| 14 |
+
from io import UnsupportedOperation
|
| 15 |
+
|
| 16 |
+
from urllib3.exceptions import (
|
| 17 |
+
DecodeError,
|
| 18 |
+
LocationParseError,
|
| 19 |
+
ProtocolError,
|
| 20 |
+
ReadTimeoutError,
|
| 21 |
+
SSLError,
|
| 22 |
+
)
|
| 23 |
+
from urllib3.fields import RequestField
|
| 24 |
+
from urllib3.filepost import encode_multipart_formdata
|
| 25 |
+
from urllib3.util import parse_url
|
| 26 |
+
|
| 27 |
+
from ._internal_utils import to_native_string, unicode_is_ascii
|
| 28 |
+
from .auth import HTTPBasicAuth
|
| 29 |
+
from .compat import (
|
| 30 |
+
Callable,
|
| 31 |
+
JSONDecodeError,
|
| 32 |
+
Mapping,
|
| 33 |
+
basestring,
|
| 34 |
+
builtin_str,
|
| 35 |
+
chardet,
|
| 36 |
+
cookielib,
|
| 37 |
+
)
|
| 38 |
+
from .compat import json as complexjson
|
| 39 |
+
from .compat import urlencode, urlsplit, urlunparse
|
| 40 |
+
from .cookies import _copy_cookie_jar, cookiejar_from_dict, get_cookie_header
|
| 41 |
+
from .exceptions import (
|
| 42 |
+
ChunkedEncodingError,
|
| 43 |
+
ConnectionError,
|
| 44 |
+
ContentDecodingError,
|
| 45 |
+
HTTPError,
|
| 46 |
+
InvalidJSONError,
|
| 47 |
+
InvalidURL,
|
| 48 |
+
)
|
| 49 |
+
from .exceptions import JSONDecodeError as RequestsJSONDecodeError
|
| 50 |
+
from .exceptions import MissingSchema
|
| 51 |
+
from .exceptions import SSLError as RequestsSSLError
|
| 52 |
+
from .exceptions import StreamConsumedError
|
| 53 |
+
from .hooks import default_hooks
|
| 54 |
+
from .status_codes import codes
|
| 55 |
+
from .structures import CaseInsensitiveDict
|
| 56 |
+
from .utils import (
|
| 57 |
+
check_header_validity,
|
| 58 |
+
get_auth_from_url,
|
| 59 |
+
guess_filename,
|
| 60 |
+
guess_json_utf,
|
| 61 |
+
iter_slices,
|
| 62 |
+
parse_header_links,
|
| 63 |
+
requote_uri,
|
| 64 |
+
stream_decode_response_unicode,
|
| 65 |
+
super_len,
|
| 66 |
+
to_key_val_list,
|
| 67 |
+
)
|
| 68 |
+
|
| 69 |
+
#: The set of HTTP status codes that indicate an automatically
|
| 70 |
+
#: processable redirect.
|
| 71 |
+
REDIRECT_STATI = (
|
| 72 |
+
codes.moved, # 301
|
| 73 |
+
codes.found, # 302
|
| 74 |
+
codes.other, # 303
|
| 75 |
+
codes.temporary_redirect, # 307
|
| 76 |
+
codes.permanent_redirect, # 308
|
| 77 |
+
)
|
| 78 |
+
|
| 79 |
+
DEFAULT_REDIRECT_LIMIT = 30
|
| 80 |
+
CONTENT_CHUNK_SIZE = 10 * 1024
|
| 81 |
+
ITER_CHUNK_SIZE = 512
|
| 82 |
+
|
| 83 |
+
|
| 84 |
+
class RequestEncodingMixin:
|
| 85 |
+
@property
|
| 86 |
+
def path_url(self):
|
| 87 |
+
"""Build the path URL to use."""
|
| 88 |
+
|
| 89 |
+
url = []
|
| 90 |
+
|
| 91 |
+
p = urlsplit(self.url)
|
| 92 |
+
|
| 93 |
+
path = p.path
|
| 94 |
+
if not path:
|
| 95 |
+
path = "/"
|
| 96 |
+
|
| 97 |
+
url.append(path)
|
| 98 |
+
|
| 99 |
+
query = p.query
|
| 100 |
+
if query:
|
| 101 |
+
url.append("?")
|
| 102 |
+
url.append(query)
|
| 103 |
+
|
| 104 |
+
return "".join(url)
|
| 105 |
+
|
| 106 |
+
@staticmethod
|
| 107 |
+
def _encode_params(data):
|
| 108 |
+
"""Encode parameters in a piece of data.
|
| 109 |
+
|
| 110 |
+
Will successfully encode parameters when passed as a dict or a list of
|
| 111 |
+
2-tuples. Order is retained if data is a list of 2-tuples but arbitrary
|
| 112 |
+
if parameters are supplied as a dict.
|
| 113 |
+
"""
|
| 114 |
+
|
| 115 |
+
if isinstance(data, (str, bytes)):
|
| 116 |
+
return data
|
| 117 |
+
elif hasattr(data, "read"):
|
| 118 |
+
return data
|
| 119 |
+
elif hasattr(data, "__iter__"):
|
| 120 |
+
result = []
|
| 121 |
+
for k, vs in to_key_val_list(data):
|
| 122 |
+
if isinstance(vs, basestring) or not hasattr(vs, "__iter__"):
|
| 123 |
+
vs = [vs]
|
| 124 |
+
for v in vs:
|
| 125 |
+
if v is not None:
|
| 126 |
+
result.append(
|
| 127 |
+
(
|
| 128 |
+
k.encode("utf-8") if isinstance(k, str) else k,
|
| 129 |
+
v.encode("utf-8") if isinstance(v, str) else v,
|
| 130 |
+
)
|
| 131 |
+
)
|
| 132 |
+
return urlencode(result, doseq=True)
|
| 133 |
+
else:
|
| 134 |
+
return data
|
| 135 |
+
|
| 136 |
+
@staticmethod
|
| 137 |
+
def _encode_files(files, data):
|
| 138 |
+
"""Build the body for a multipart/form-data request.
|
| 139 |
+
|
| 140 |
+
Will successfully encode files when passed as a dict or a list of
|
| 141 |
+
tuples. Order is retained if data is a list of tuples but arbitrary
|
| 142 |
+
if parameters are supplied as a dict.
|
| 143 |
+
The tuples may be 2-tuples (filename, fileobj), 3-tuples (filename, fileobj, contentype)
|
| 144 |
+
or 4-tuples (filename, fileobj, contentype, custom_headers).
|
| 145 |
+
"""
|
| 146 |
+
if not files:
|
| 147 |
+
raise ValueError("Files must be provided.")
|
| 148 |
+
elif isinstance(data, basestring):
|
| 149 |
+
raise ValueError("Data must not be a string.")
|
| 150 |
+
|
| 151 |
+
new_fields = []
|
| 152 |
+
fields = to_key_val_list(data or {})
|
| 153 |
+
files = to_key_val_list(files or {})
|
| 154 |
+
|
| 155 |
+
for field, val in fields:
|
| 156 |
+
if isinstance(val, basestring) or not hasattr(val, "__iter__"):
|
| 157 |
+
val = [val]
|
| 158 |
+
for v in val:
|
| 159 |
+
if v is not None:
|
| 160 |
+
# Don't call str() on bytestrings: in Py3 it all goes wrong.
|
| 161 |
+
if not isinstance(v, bytes):
|
| 162 |
+
v = str(v)
|
| 163 |
+
|
| 164 |
+
new_fields.append(
|
| 165 |
+
(
|
| 166 |
+
field.decode("utf-8")
|
| 167 |
+
if isinstance(field, bytes)
|
| 168 |
+
else field,
|
| 169 |
+
v.encode("utf-8") if isinstance(v, str) else v,
|
| 170 |
+
)
|
| 171 |
+
)
|
| 172 |
+
|
| 173 |
+
for k, v in files:
|
| 174 |
+
# support for explicit filename
|
| 175 |
+
ft = None
|
| 176 |
+
fh = None
|
| 177 |
+
if isinstance(v, (tuple, list)):
|
| 178 |
+
if len(v) == 2:
|
| 179 |
+
fn, fp = v
|
| 180 |
+
elif len(v) == 3:
|
| 181 |
+
fn, fp, ft = v
|
| 182 |
+
else:
|
| 183 |
+
fn, fp, ft, fh = v
|
| 184 |
+
else:
|
| 185 |
+
fn = guess_filename(v) or k
|
| 186 |
+
fp = v
|
| 187 |
+
|
| 188 |
+
if isinstance(fp, (str, bytes, bytearray)):
|
| 189 |
+
fdata = fp
|
| 190 |
+
elif hasattr(fp, "read"):
|
| 191 |
+
fdata = fp.read()
|
| 192 |
+
elif fp is None:
|
| 193 |
+
continue
|
| 194 |
+
else:
|
| 195 |
+
fdata = fp
|
| 196 |
+
|
| 197 |
+
rf = RequestField(name=k, data=fdata, filename=fn, headers=fh)
|
| 198 |
+
rf.make_multipart(content_type=ft)
|
| 199 |
+
new_fields.append(rf)
|
| 200 |
+
|
| 201 |
+
body, content_type = encode_multipart_formdata(new_fields)
|
| 202 |
+
|
| 203 |
+
return body, content_type
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
class RequestHooksMixin:
|
| 207 |
+
def register_hook(self, event, hook):
|
| 208 |
+
"""Properly register a hook."""
|
| 209 |
+
|
| 210 |
+
if event not in self.hooks:
|
| 211 |
+
raise ValueError(f'Unsupported event specified, with event name "{event}"')
|
| 212 |
+
|
| 213 |
+
if isinstance(hook, Callable):
|
| 214 |
+
self.hooks[event].append(hook)
|
| 215 |
+
elif hasattr(hook, "__iter__"):
|
| 216 |
+
self.hooks[event].extend(h for h in hook if isinstance(h, Callable))
|
| 217 |
+
|
| 218 |
+
def deregister_hook(self, event, hook):
|
| 219 |
+
"""Deregister a previously registered hook.
|
| 220 |
+
Returns True if the hook existed, False if not.
|
| 221 |
+
"""
|
| 222 |
+
|
| 223 |
+
try:
|
| 224 |
+
self.hooks[event].remove(hook)
|
| 225 |
+
return True
|
| 226 |
+
except ValueError:
|
| 227 |
+
return False
|
| 228 |
+
|
| 229 |
+
|
| 230 |
+
class Request(RequestHooksMixin):
|
| 231 |
+
"""A user-created :class:`Request <Request>` object.
|
| 232 |
+
|
| 233 |
+
Used to prepare a :class:`PreparedRequest <PreparedRequest>`, which is sent to the server.
|
| 234 |
+
|
| 235 |
+
:param method: HTTP method to use.
|
| 236 |
+
:param url: URL to send.
|
| 237 |
+
:param headers: dictionary of headers to send.
|
| 238 |
+
:param files: dictionary of {filename: fileobject} files to multipart upload.
|
| 239 |
+
:param data: the body to attach to the request. If a dictionary or
|
| 240 |
+
list of tuples ``[(key, value)]`` is provided, form-encoding will
|
| 241 |
+
take place.
|
| 242 |
+
:param json: json for the body to attach to the request (if files or data is not specified).
|
| 243 |
+
:param params: URL parameters to append to the URL. If a dictionary or
|
| 244 |
+
list of tuples ``[(key, value)]`` is provided, form-encoding will
|
| 245 |
+
take place.
|
| 246 |
+
:param auth: Auth handler or (user, pass) tuple.
|
| 247 |
+
:param cookies: dictionary or CookieJar of cookies to attach to this request.
|
| 248 |
+
:param hooks: dictionary of callback hooks, for internal usage.
|
| 249 |
+
|
| 250 |
+
Usage::
|
| 251 |
+
|
| 252 |
+
>>> import requests
|
| 253 |
+
>>> req = requests.Request('GET', 'https://httpbin.org/get')
|
| 254 |
+
>>> req.prepare()
|
| 255 |
+
<PreparedRequest [GET]>
|
| 256 |
+
"""
|
| 257 |
+
|
| 258 |
+
def __init__(
|
| 259 |
+
self,
|
| 260 |
+
method=None,
|
| 261 |
+
url=None,
|
| 262 |
+
headers=None,
|
| 263 |
+
files=None,
|
| 264 |
+
data=None,
|
| 265 |
+
params=None,
|
| 266 |
+
auth=None,
|
| 267 |
+
cookies=None,
|
| 268 |
+
hooks=None,
|
| 269 |
+
json=None,
|
| 270 |
+
):
|
| 271 |
+
# Default empty dicts for dict params.
|
| 272 |
+
data = [] if data is None else data
|
| 273 |
+
files = [] if files is None else files
|
| 274 |
+
headers = {} if headers is None else headers
|
| 275 |
+
params = {} if params is None else params
|
| 276 |
+
hooks = {} if hooks is None else hooks
|
| 277 |
+
|
| 278 |
+
self.hooks = default_hooks()
|
| 279 |
+
for k, v in list(hooks.items()):
|
| 280 |
+
self.register_hook(event=k, hook=v)
|
| 281 |
+
|
| 282 |
+
self.method = method
|
| 283 |
+
self.url = url
|
| 284 |
+
self.headers = headers
|
| 285 |
+
self.files = files
|
| 286 |
+
self.data = data
|
| 287 |
+
self.json = json
|
| 288 |
+
self.params = params
|
| 289 |
+
self.auth = auth
|
| 290 |
+
self.cookies = cookies
|
| 291 |
+
|
| 292 |
+
def __repr__(self):
|
| 293 |
+
return f"<Request [{self.method}]>"
|
| 294 |
+
|
| 295 |
+
def prepare(self):
|
| 296 |
+
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for transmission and returns it."""
|
| 297 |
+
p = PreparedRequest()
|
| 298 |
+
p.prepare(
|
| 299 |
+
method=self.method,
|
| 300 |
+
url=self.url,
|
| 301 |
+
headers=self.headers,
|
| 302 |
+
files=self.files,
|
| 303 |
+
data=self.data,
|
| 304 |
+
json=self.json,
|
| 305 |
+
params=self.params,
|
| 306 |
+
auth=self.auth,
|
| 307 |
+
cookies=self.cookies,
|
| 308 |
+
hooks=self.hooks,
|
| 309 |
+
)
|
| 310 |
+
return p
|
| 311 |
+
|
| 312 |
+
|
| 313 |
+
class PreparedRequest(RequestEncodingMixin, RequestHooksMixin):
|
| 314 |
+
"""The fully mutable :class:`PreparedRequest <PreparedRequest>` object,
|
| 315 |
+
containing the exact bytes that will be sent to the server.
|
| 316 |
+
|
| 317 |
+
Instances are generated from a :class:`Request <Request>` object, and
|
| 318 |
+
should not be instantiated manually; doing so may produce undesirable
|
| 319 |
+
effects.
|
| 320 |
+
|
| 321 |
+
Usage::
|
| 322 |
+
|
| 323 |
+
>>> import requests
|
| 324 |
+
>>> req = requests.Request('GET', 'https://httpbin.org/get')
|
| 325 |
+
>>> r = req.prepare()
|
| 326 |
+
>>> r
|
| 327 |
+
<PreparedRequest [GET]>
|
| 328 |
+
|
| 329 |
+
>>> s = requests.Session()
|
| 330 |
+
>>> s.send(r)
|
| 331 |
+
<Response [200]>
|
| 332 |
+
"""
|
| 333 |
+
|
| 334 |
+
def __init__(self):
|
| 335 |
+
#: HTTP verb to send to the server.
|
| 336 |
+
self.method = None
|
| 337 |
+
#: HTTP URL to send the request to.
|
| 338 |
+
self.url = None
|
| 339 |
+
#: dictionary of HTTP headers.
|
| 340 |
+
self.headers = None
|
| 341 |
+
# The `CookieJar` used to create the Cookie header will be stored here
|
| 342 |
+
# after prepare_cookies is called
|
| 343 |
+
self._cookies = None
|
| 344 |
+
#: request body to send to the server.
|
| 345 |
+
self.body = None
|
| 346 |
+
#: dictionary of callback hooks, for internal usage.
|
| 347 |
+
self.hooks = default_hooks()
|
| 348 |
+
#: integer denoting starting position of a readable file-like body.
|
| 349 |
+
self._body_position = None
|
| 350 |
+
|
| 351 |
+
def prepare(
|
| 352 |
+
self,
|
| 353 |
+
method=None,
|
| 354 |
+
url=None,
|
| 355 |
+
headers=None,
|
| 356 |
+
files=None,
|
| 357 |
+
data=None,
|
| 358 |
+
params=None,
|
| 359 |
+
auth=None,
|
| 360 |
+
cookies=None,
|
| 361 |
+
hooks=None,
|
| 362 |
+
json=None,
|
| 363 |
+
):
|
| 364 |
+
"""Prepares the entire request with the given parameters."""
|
| 365 |
+
|
| 366 |
+
self.prepare_method(method)
|
| 367 |
+
self.prepare_url(url, params)
|
| 368 |
+
self.prepare_headers(headers)
|
| 369 |
+
self.prepare_cookies(cookies)
|
| 370 |
+
self.prepare_body(data, files, json)
|
| 371 |
+
self.prepare_auth(auth, url)
|
| 372 |
+
|
| 373 |
+
# Note that prepare_auth must be last to enable authentication schemes
|
| 374 |
+
# such as OAuth to work on a fully prepared request.
|
| 375 |
+
|
| 376 |
+
# This MUST go after prepare_auth. Authenticators could add a hook
|
| 377 |
+
self.prepare_hooks(hooks)
|
| 378 |
+
|
| 379 |
+
def __repr__(self):
|
| 380 |
+
return f"<PreparedRequest [{self.method}]>"
|
| 381 |
+
|
| 382 |
+
def copy(self):
|
| 383 |
+
p = PreparedRequest()
|
| 384 |
+
p.method = self.method
|
| 385 |
+
p.url = self.url
|
| 386 |
+
p.headers = self.headers.copy() if self.headers is not None else None
|
| 387 |
+
p._cookies = _copy_cookie_jar(self._cookies)
|
| 388 |
+
p.body = self.body
|
| 389 |
+
p.hooks = self.hooks
|
| 390 |
+
p._body_position = self._body_position
|
| 391 |
+
return p
|
| 392 |
+
|
| 393 |
+
def prepare_method(self, method):
|
| 394 |
+
"""Prepares the given HTTP method."""
|
| 395 |
+
self.method = method
|
| 396 |
+
if self.method is not None:
|
| 397 |
+
self.method = to_native_string(self.method.upper())
|
| 398 |
+
|
| 399 |
+
@staticmethod
|
| 400 |
+
def _get_idna_encoded_host(host):
|
| 401 |
+
import idna
|
| 402 |
+
|
| 403 |
+
try:
|
| 404 |
+
host = idna.encode(host, uts46=True).decode("utf-8")
|
| 405 |
+
except idna.IDNAError:
|
| 406 |
+
raise UnicodeError
|
| 407 |
+
return host
|
| 408 |
+
|
| 409 |
+
def prepare_url(self, url, params):
|
| 410 |
+
"""Prepares the given HTTP URL."""
|
| 411 |
+
#: Accept objects that have string representations.
|
| 412 |
+
#: We're unable to blindly call unicode/str functions
|
| 413 |
+
#: as this will include the bytestring indicator (b'')
|
| 414 |
+
#: on python 3.x.
|
| 415 |
+
#: https://github.com/psf/requests/pull/2238
|
| 416 |
+
if isinstance(url, bytes):
|
| 417 |
+
url = url.decode("utf8")
|
| 418 |
+
else:
|
| 419 |
+
url = str(url)
|
| 420 |
+
|
| 421 |
+
# Remove leading whitespaces from url
|
| 422 |
+
url = url.lstrip()
|
| 423 |
+
|
| 424 |
+
# Don't do any URL preparation for non-HTTP schemes like `mailto`,
|
| 425 |
+
# `data` etc to work around exceptions from `url_parse`, which
|
| 426 |
+
# handles RFC 3986 only.
|
| 427 |
+
if ":" in url and not url.lower().startswith("http"):
|
| 428 |
+
self.url = url
|
| 429 |
+
return
|
| 430 |
+
|
| 431 |
+
# Support for unicode domain names and paths.
|
| 432 |
+
try:
|
| 433 |
+
scheme, auth, host, port, path, query, fragment = parse_url(url)
|
| 434 |
+
except LocationParseError as e:
|
| 435 |
+
raise InvalidURL(*e.args)
|
| 436 |
+
|
| 437 |
+
if not scheme:
|
| 438 |
+
raise MissingSchema(
|
| 439 |
+
f"Invalid URL {url!r}: No scheme supplied. "
|
| 440 |
+
f"Perhaps you meant https://{url}?"
|
| 441 |
+
)
|
| 442 |
+
|
| 443 |
+
if not host:
|
| 444 |
+
raise InvalidURL(f"Invalid URL {url!r}: No host supplied")
|
| 445 |
+
|
| 446 |
+
# In general, we want to try IDNA encoding the hostname if the string contains
|
| 447 |
+
# non-ASCII characters. This allows users to automatically get the correct IDNA
|
| 448 |
+
# behaviour. For strings containing only ASCII characters, we need to also verify
|
| 449 |
+
# it doesn't start with a wildcard (*), before allowing the unencoded hostname.
|
| 450 |
+
if not unicode_is_ascii(host):
|
| 451 |
+
try:
|
| 452 |
+
host = self._get_idna_encoded_host(host)
|
| 453 |
+
except UnicodeError:
|
| 454 |
+
raise InvalidURL("URL has an invalid label.")
|
| 455 |
+
elif host.startswith(("*", ".")):
|
| 456 |
+
raise InvalidURL("URL has an invalid label.")
|
| 457 |
+
|
| 458 |
+
# Carefully reconstruct the network location
|
| 459 |
+
netloc = auth or ""
|
| 460 |
+
if netloc:
|
| 461 |
+
netloc += "@"
|
| 462 |
+
netloc += host
|
| 463 |
+
if port:
|
| 464 |
+
netloc += f":{port}"
|
| 465 |
+
|
| 466 |
+
# Bare domains aren't valid URLs.
|
| 467 |
+
if not path:
|
| 468 |
+
path = "/"
|
| 469 |
+
|
| 470 |
+
if isinstance(params, (str, bytes)):
|
| 471 |
+
params = to_native_string(params)
|
| 472 |
+
|
| 473 |
+
enc_params = self._encode_params(params)
|
| 474 |
+
if enc_params:
|
| 475 |
+
if query:
|
| 476 |
+
query = f"{query}&{enc_params}"
|
| 477 |
+
else:
|
| 478 |
+
query = enc_params
|
| 479 |
+
|
| 480 |
+
url = requote_uri(urlunparse([scheme, netloc, path, None, query, fragment]))
|
| 481 |
+
self.url = url
|
| 482 |
+
|
| 483 |
+
def prepare_headers(self, headers):
|
| 484 |
+
"""Prepares the given HTTP headers."""
|
| 485 |
+
|
| 486 |
+
self.headers = CaseInsensitiveDict()
|
| 487 |
+
if headers:
|
| 488 |
+
for header in headers.items():
|
| 489 |
+
# Raise exception on invalid header value.
|
| 490 |
+
check_header_validity(header)
|
| 491 |
+
name, value = header
|
| 492 |
+
self.headers[to_native_string(name)] = value
|
| 493 |
+
|
| 494 |
+
def prepare_body(self, data, files, json=None):
|
| 495 |
+
"""Prepares the given HTTP body data."""
|
| 496 |
+
|
| 497 |
+
# Check if file, fo, generator, iterator.
|
| 498 |
+
# If not, run through normal process.
|
| 499 |
+
|
| 500 |
+
# Nottin' on you.
|
| 501 |
+
body = None
|
| 502 |
+
content_type = None
|
| 503 |
+
|
| 504 |
+
if not data and json is not None:
|
| 505 |
+
# urllib3 requires a bytes-like body. Python 2's json.dumps
|
| 506 |
+
# provides this natively, but Python 3 gives a Unicode string.
|
| 507 |
+
content_type = "application/json"
|
| 508 |
+
|
| 509 |
+
try:
|
| 510 |
+
body = complexjson.dumps(json, allow_nan=False)
|
| 511 |
+
except ValueError as ve:
|
| 512 |
+
raise InvalidJSONError(ve, request=self)
|
| 513 |
+
|
| 514 |
+
if not isinstance(body, bytes):
|
| 515 |
+
body = body.encode("utf-8")
|
| 516 |
+
|
| 517 |
+
is_stream = all(
|
| 518 |
+
[
|
| 519 |
+
hasattr(data, "__iter__"),
|
| 520 |
+
not isinstance(data, (basestring, list, tuple, Mapping)),
|
| 521 |
+
]
|
| 522 |
+
)
|
| 523 |
+
|
| 524 |
+
if is_stream:
|
| 525 |
+
try:
|
| 526 |
+
length = super_len(data)
|
| 527 |
+
except (TypeError, AttributeError, UnsupportedOperation):
|
| 528 |
+
length = None
|
| 529 |
+
|
| 530 |
+
body = data
|
| 531 |
+
|
| 532 |
+
if getattr(body, "tell", None) is not None:
|
| 533 |
+
# Record the current file position before reading.
|
| 534 |
+
# This will allow us to rewind a file in the event
|
| 535 |
+
# of a redirect.
|
| 536 |
+
try:
|
| 537 |
+
self._body_position = body.tell()
|
| 538 |
+
except OSError:
|
| 539 |
+
# This differentiates from None, allowing us to catch
|
| 540 |
+
# a failed `tell()` later when trying to rewind the body
|
| 541 |
+
self._body_position = object()
|
| 542 |
+
|
| 543 |
+
if files:
|
| 544 |
+
raise NotImplementedError(
|
| 545 |
+
"Streamed bodies and files are mutually exclusive."
|
| 546 |
+
)
|
| 547 |
+
|
| 548 |
+
if length:
|
| 549 |
+
self.headers["Content-Length"] = builtin_str(length)
|
| 550 |
+
else:
|
| 551 |
+
self.headers["Transfer-Encoding"] = "chunked"
|
| 552 |
+
else:
|
| 553 |
+
# Multi-part file uploads.
|
| 554 |
+
if files:
|
| 555 |
+
(body, content_type) = self._encode_files(files, data)
|
| 556 |
+
else:
|
| 557 |
+
if data:
|
| 558 |
+
body = self._encode_params(data)
|
| 559 |
+
if isinstance(data, basestring) or hasattr(data, "read"):
|
| 560 |
+
content_type = None
|
| 561 |
+
else:
|
| 562 |
+
content_type = "application/x-www-form-urlencoded"
|
| 563 |
+
|
| 564 |
+
self.prepare_content_length(body)
|
| 565 |
+
|
| 566 |
+
# Add content-type if it wasn't explicitly provided.
|
| 567 |
+
if content_type and ("content-type" not in self.headers):
|
| 568 |
+
self.headers["Content-Type"] = content_type
|
| 569 |
+
|
| 570 |
+
self.body = body
|
| 571 |
+
|
| 572 |
+
def prepare_content_length(self, body):
|
| 573 |
+
"""Prepare Content-Length header based on request method and body"""
|
| 574 |
+
if body is not None:
|
| 575 |
+
length = super_len(body)
|
| 576 |
+
if length:
|
| 577 |
+
# If length exists, set it. Otherwise, we fallback
|
| 578 |
+
# to Transfer-Encoding: chunked.
|
| 579 |
+
self.headers["Content-Length"] = builtin_str(length)
|
| 580 |
+
elif (
|
| 581 |
+
self.method not in ("GET", "HEAD")
|
| 582 |
+
and self.headers.get("Content-Length") is None
|
| 583 |
+
):
|
| 584 |
+
# Set Content-Length to 0 for methods that can have a body
|
| 585 |
+
# but don't provide one. (i.e. not GET or HEAD)
|
| 586 |
+
self.headers["Content-Length"] = "0"
|
| 587 |
+
|
| 588 |
+
def prepare_auth(self, auth, url=""):
|
| 589 |
+
"""Prepares the given HTTP auth data."""
|
| 590 |
+
|
| 591 |
+
# If no Auth is explicitly provided, extract it from the URL first.
|
| 592 |
+
if auth is None:
|
| 593 |
+
url_auth = get_auth_from_url(self.url)
|
| 594 |
+
auth = url_auth if any(url_auth) else None
|
| 595 |
+
|
| 596 |
+
if auth:
|
| 597 |
+
if isinstance(auth, tuple) and len(auth) == 2:
|
| 598 |
+
# special-case basic HTTP auth
|
| 599 |
+
auth = HTTPBasicAuth(*auth)
|
| 600 |
+
|
| 601 |
+
# Allow auth to make its changes.
|
| 602 |
+
r = auth(self)
|
| 603 |
+
|
| 604 |
+
# Update self to reflect the auth changes.
|
| 605 |
+
self.__dict__.update(r.__dict__)
|
| 606 |
+
|
| 607 |
+
# Recompute Content-Length
|
| 608 |
+
self.prepare_content_length(self.body)
|
| 609 |
+
|
| 610 |
+
def prepare_cookies(self, cookies):
|
| 611 |
+
"""Prepares the given HTTP cookie data.
|
| 612 |
+
|
| 613 |
+
This function eventually generates a ``Cookie`` header from the
|
| 614 |
+
given cookies using cookielib. Due to cookielib's design, the header
|
| 615 |
+
will not be regenerated if it already exists, meaning this function
|
| 616 |
+
can only be called once for the life of the
|
| 617 |
+
:class:`PreparedRequest <PreparedRequest>` object. Any subsequent calls
|
| 618 |
+
to ``prepare_cookies`` will have no actual effect, unless the "Cookie"
|
| 619 |
+
header is removed beforehand.
|
| 620 |
+
"""
|
| 621 |
+
if isinstance(cookies, cookielib.CookieJar):
|
| 622 |
+
self._cookies = cookies
|
| 623 |
+
else:
|
| 624 |
+
self._cookies = cookiejar_from_dict(cookies)
|
| 625 |
+
|
| 626 |
+
cookie_header = get_cookie_header(self._cookies, self)
|
| 627 |
+
if cookie_header is not None:
|
| 628 |
+
self.headers["Cookie"] = cookie_header
|
| 629 |
+
|
| 630 |
+
def prepare_hooks(self, hooks):
|
| 631 |
+
"""Prepares the given hooks."""
|
| 632 |
+
# hooks can be passed as None to the prepare method and to this
|
| 633 |
+
# method. To prevent iterating over None, simply use an empty list
|
| 634 |
+
# if hooks is False-y
|
| 635 |
+
hooks = hooks or []
|
| 636 |
+
for event in hooks:
|
| 637 |
+
self.register_hook(event, hooks[event])
|
| 638 |
+
|
| 639 |
+
|
| 640 |
+
class Response:
|
| 641 |
+
"""The :class:`Response <Response>` object, which contains a
|
| 642 |
+
server's response to an HTTP request.
|
| 643 |
+
"""
|
| 644 |
+
|
| 645 |
+
__attrs__ = [
|
| 646 |
+
"_content",
|
| 647 |
+
"status_code",
|
| 648 |
+
"headers",
|
| 649 |
+
"url",
|
| 650 |
+
"history",
|
| 651 |
+
"encoding",
|
| 652 |
+
"reason",
|
| 653 |
+
"cookies",
|
| 654 |
+
"elapsed",
|
| 655 |
+
"request",
|
| 656 |
+
]
|
| 657 |
+
|
| 658 |
+
def __init__(self):
|
| 659 |
+
self._content = False
|
| 660 |
+
self._content_consumed = False
|
| 661 |
+
self._next = None
|
| 662 |
+
|
| 663 |
+
#: Integer Code of responded HTTP Status, e.g. 404 or 200.
|
| 664 |
+
self.status_code = None
|
| 665 |
+
|
| 666 |
+
#: Case-insensitive Dictionary of Response Headers.
|
| 667 |
+
#: For example, ``headers['content-encoding']`` will return the
|
| 668 |
+
#: value of a ``'Content-Encoding'`` response header.
|
| 669 |
+
self.headers = CaseInsensitiveDict()
|
| 670 |
+
|
| 671 |
+
#: File-like object representation of response (for advanced usage).
|
| 672 |
+
#: Use of ``raw`` requires that ``stream=True`` be set on the request.
|
| 673 |
+
#: This requirement does not apply for use internally to Requests.
|
| 674 |
+
self.raw = None
|
| 675 |
+
|
| 676 |
+
#: Final URL location of Response.
|
| 677 |
+
self.url = None
|
| 678 |
+
|
| 679 |
+
#: Encoding to decode with when accessing r.text.
|
| 680 |
+
self.encoding = None
|
| 681 |
+
|
| 682 |
+
#: A list of :class:`Response <Response>` objects from
|
| 683 |
+
#: the history of the Request. Any redirect responses will end
|
| 684 |
+
#: up here. The list is sorted from the oldest to the most recent request.
|
| 685 |
+
self.history = []
|
| 686 |
+
|
| 687 |
+
#: Textual reason of responded HTTP Status, e.g. "Not Found" or "OK".
|
| 688 |
+
self.reason = None
|
| 689 |
+
|
| 690 |
+
#: A CookieJar of Cookies the server sent back.
|
| 691 |
+
self.cookies = cookiejar_from_dict({})
|
| 692 |
+
|
| 693 |
+
#: The amount of time elapsed between sending the request
|
| 694 |
+
#: and the arrival of the response (as a timedelta).
|
| 695 |
+
#: This property specifically measures the time taken between sending
|
| 696 |
+
#: the first byte of the request and finishing parsing the headers. It
|
| 697 |
+
#: is therefore unaffected by consuming the response content or the
|
| 698 |
+
#: value of the ``stream`` keyword argument.
|
| 699 |
+
self.elapsed = datetime.timedelta(0)
|
| 700 |
+
|
| 701 |
+
#: The :class:`PreparedRequest <PreparedRequest>` object to which this
|
| 702 |
+
#: is a response.
|
| 703 |
+
self.request = None
|
| 704 |
+
|
| 705 |
+
def __enter__(self):
|
| 706 |
+
return self
|
| 707 |
+
|
| 708 |
+
def __exit__(self, *args):
|
| 709 |
+
self.close()
|
| 710 |
+
|
| 711 |
+
def __getstate__(self):
|
| 712 |
+
# Consume everything; accessing the content attribute makes
|
| 713 |
+
# sure the content has been fully read.
|
| 714 |
+
if not self._content_consumed:
|
| 715 |
+
self.content
|
| 716 |
+
|
| 717 |
+
return {attr: getattr(self, attr, None) for attr in self.__attrs__}
|
| 718 |
+
|
| 719 |
+
def __setstate__(self, state):
|
| 720 |
+
for name, value in state.items():
|
| 721 |
+
setattr(self, name, value)
|
| 722 |
+
|
| 723 |
+
# pickled objects do not have .raw
|
| 724 |
+
setattr(self, "_content_consumed", True)
|
| 725 |
+
setattr(self, "raw", None)
|
| 726 |
+
|
| 727 |
+
def __repr__(self):
|
| 728 |
+
return f"<Response [{self.status_code}]>"
|
| 729 |
+
|
| 730 |
+
def __bool__(self):
|
| 731 |
+
"""Returns True if :attr:`status_code` is less than 400.
|
| 732 |
+
|
| 733 |
+
This attribute checks if the status code of the response is between
|
| 734 |
+
400 and 600 to see if there was a client error or a server error. If
|
| 735 |
+
the status code, is between 200 and 400, this will return True. This
|
| 736 |
+
is **not** a check to see if the response code is ``200 OK``.
|
| 737 |
+
"""
|
| 738 |
+
return self.ok
|
| 739 |
+
|
| 740 |
+
def __nonzero__(self):
|
| 741 |
+
"""Returns True if :attr:`status_code` is less than 400.
|
| 742 |
+
|
| 743 |
+
This attribute checks if the status code of the response is between
|
| 744 |
+
400 and 600 to see if there was a client error or a server error. If
|
| 745 |
+
the status code, is between 200 and 400, this will return True. This
|
| 746 |
+
is **not** a check to see if the response code is ``200 OK``.
|
| 747 |
+
"""
|
| 748 |
+
return self.ok
|
| 749 |
+
|
| 750 |
+
def __iter__(self):
|
| 751 |
+
"""Allows you to use a response as an iterator."""
|
| 752 |
+
return self.iter_content(128)
|
| 753 |
+
|
| 754 |
+
@property
|
| 755 |
+
def ok(self):
|
| 756 |
+
"""Returns True if :attr:`status_code` is less than 400, False if not.
|
| 757 |
+
|
| 758 |
+
This attribute checks if the status code of the response is between
|
| 759 |
+
400 and 600 to see if there was a client error or a server error. If
|
| 760 |
+
the status code is between 200 and 400, this will return True. This
|
| 761 |
+
is **not** a check to see if the response code is ``200 OK``.
|
| 762 |
+
"""
|
| 763 |
+
try:
|
| 764 |
+
self.raise_for_status()
|
| 765 |
+
except HTTPError:
|
| 766 |
+
return False
|
| 767 |
+
return True
|
| 768 |
+
|
| 769 |
+
@property
|
| 770 |
+
def is_redirect(self):
|
| 771 |
+
"""True if this Response is a well-formed HTTP redirect that could have
|
| 772 |
+
been processed automatically (by :meth:`Session.resolve_redirects`).
|
| 773 |
+
"""
|
| 774 |
+
return "location" in self.headers and self.status_code in REDIRECT_STATI
|
| 775 |
+
|
| 776 |
+
@property
|
| 777 |
+
def is_permanent_redirect(self):
|
| 778 |
+
"""True if this Response one of the permanent versions of redirect."""
|
| 779 |
+
return "location" in self.headers and self.status_code in (
|
| 780 |
+
codes.moved_permanently,
|
| 781 |
+
codes.permanent_redirect,
|
| 782 |
+
)
|
| 783 |
+
|
| 784 |
+
@property
|
| 785 |
+
def next(self):
|
| 786 |
+
"""Returns a PreparedRequest for the next request in a redirect chain, if there is one."""
|
| 787 |
+
return self._next
|
| 788 |
+
|
| 789 |
+
@property
|
| 790 |
+
def apparent_encoding(self):
|
| 791 |
+
"""The apparent encoding, provided by the charset_normalizer or chardet libraries."""
|
| 792 |
+
if chardet is not None:
|
| 793 |
+
return chardet.detect(self.content)["encoding"]
|
| 794 |
+
else:
|
| 795 |
+
# If no character detection library is available, we'll fall back
|
| 796 |
+
# to a standard Python utf-8 str.
|
| 797 |
+
return "utf-8"
|
| 798 |
+
|
| 799 |
+
def iter_content(self, chunk_size=1, decode_unicode=False):
|
| 800 |
+
"""Iterates over the response data. When stream=True is set on the
|
| 801 |
+
request, this avoids reading the content at once into memory for
|
| 802 |
+
large responses. The chunk size is the number of bytes it should
|
| 803 |
+
read into memory. This is not necessarily the length of each item
|
| 804 |
+
returned as decoding can take place.
|
| 805 |
+
|
| 806 |
+
chunk_size must be of type int or None. A value of None will
|
| 807 |
+
function differently depending on the value of `stream`.
|
| 808 |
+
stream=True will read data as it arrives in whatever size the
|
| 809 |
+
chunks are received. If stream=False, data is returned as
|
| 810 |
+
a single chunk.
|
| 811 |
+
|
| 812 |
+
If decode_unicode is True, content will be decoded using the best
|
| 813 |
+
available encoding based on the response.
|
| 814 |
+
"""
|
| 815 |
+
|
| 816 |
+
def generate():
|
| 817 |
+
# Special case for urllib3.
|
| 818 |
+
if hasattr(self.raw, "stream"):
|
| 819 |
+
try:
|
| 820 |
+
yield from self.raw.stream(chunk_size, decode_content=True)
|
| 821 |
+
except ProtocolError as e:
|
| 822 |
+
raise ChunkedEncodingError(e)
|
| 823 |
+
except DecodeError as e:
|
| 824 |
+
raise ContentDecodingError(e)
|
| 825 |
+
except ReadTimeoutError as e:
|
| 826 |
+
raise ConnectionError(e)
|
| 827 |
+
except SSLError as e:
|
| 828 |
+
raise RequestsSSLError(e)
|
| 829 |
+
else:
|
| 830 |
+
# Standard file-like object.
|
| 831 |
+
while True:
|
| 832 |
+
chunk = self.raw.read(chunk_size)
|
| 833 |
+
if not chunk:
|
| 834 |
+
break
|
| 835 |
+
yield chunk
|
| 836 |
+
|
| 837 |
+
self._content_consumed = True
|
| 838 |
+
|
| 839 |
+
if self._content_consumed and isinstance(self._content, bool):
|
| 840 |
+
raise StreamConsumedError()
|
| 841 |
+
elif chunk_size is not None and not isinstance(chunk_size, int):
|
| 842 |
+
raise TypeError(
|
| 843 |
+
f"chunk_size must be an int, it is instead a {type(chunk_size)}."
|
| 844 |
+
)
|
| 845 |
+
# simulate reading small chunks of the content
|
| 846 |
+
reused_chunks = iter_slices(self._content, chunk_size)
|
| 847 |
+
|
| 848 |
+
stream_chunks = generate()
|
| 849 |
+
|
| 850 |
+
chunks = reused_chunks if self._content_consumed else stream_chunks
|
| 851 |
+
|
| 852 |
+
if decode_unicode:
|
| 853 |
+
chunks = stream_decode_response_unicode(chunks, self)
|
| 854 |
+
|
| 855 |
+
return chunks
|
| 856 |
+
|
| 857 |
+
def iter_lines(
|
| 858 |
+
self, chunk_size=ITER_CHUNK_SIZE, decode_unicode=False, delimiter=None
|
| 859 |
+
):
|
| 860 |
+
"""Iterates over the response data, one line at a time. When
|
| 861 |
+
stream=True is set on the request, this avoids reading the
|
| 862 |
+
content at once into memory for large responses.
|
| 863 |
+
|
| 864 |
+
.. note:: This method is not reentrant safe.
|
| 865 |
+
"""
|
| 866 |
+
|
| 867 |
+
pending = None
|
| 868 |
+
|
| 869 |
+
for chunk in self.iter_content(
|
| 870 |
+
chunk_size=chunk_size, decode_unicode=decode_unicode
|
| 871 |
+
):
|
| 872 |
+
if pending is not None:
|
| 873 |
+
chunk = pending + chunk
|
| 874 |
+
|
| 875 |
+
if delimiter:
|
| 876 |
+
lines = chunk.split(delimiter)
|
| 877 |
+
else:
|
| 878 |
+
lines = chunk.splitlines()
|
| 879 |
+
|
| 880 |
+
if lines and lines[-1] and chunk and lines[-1][-1] == chunk[-1]:
|
| 881 |
+
pending = lines.pop()
|
| 882 |
+
else:
|
| 883 |
+
pending = None
|
| 884 |
+
|
| 885 |
+
yield from lines
|
| 886 |
+
|
| 887 |
+
if pending is not None:
|
| 888 |
+
yield pending
|
| 889 |
+
|
| 890 |
+
@property
|
| 891 |
+
def content(self):
|
| 892 |
+
"""Content of the response, in bytes."""
|
| 893 |
+
|
| 894 |
+
if self._content is False:
|
| 895 |
+
# Read the contents.
|
| 896 |
+
if self._content_consumed:
|
| 897 |
+
raise RuntimeError("The content for this response was already consumed")
|
| 898 |
+
|
| 899 |
+
if self.status_code == 0 or self.raw is None:
|
| 900 |
+
self._content = None
|
| 901 |
+
else:
|
| 902 |
+
self._content = b"".join(self.iter_content(CONTENT_CHUNK_SIZE)) or b""
|
| 903 |
+
|
| 904 |
+
self._content_consumed = True
|
| 905 |
+
# don't need to release the connection; that's been handled by urllib3
|
| 906 |
+
# since we exhausted the data.
|
| 907 |
+
return self._content
|
| 908 |
+
|
| 909 |
+
@property
|
| 910 |
+
def text(self):
|
| 911 |
+
"""Content of the response, in unicode.
|
| 912 |
+
|
| 913 |
+
If Response.encoding is None, encoding will be guessed using
|
| 914 |
+
``charset_normalizer`` or ``chardet``.
|
| 915 |
+
|
| 916 |
+
The encoding of the response content is determined based solely on HTTP
|
| 917 |
+
headers, following RFC 2616 to the letter. If you can take advantage of
|
| 918 |
+
non-HTTP knowledge to make a better guess at the encoding, you should
|
| 919 |
+
set ``r.encoding`` appropriately before accessing this property.
|
| 920 |
+
"""
|
| 921 |
+
|
| 922 |
+
# Try charset from content-type
|
| 923 |
+
content = None
|
| 924 |
+
encoding = self.encoding
|
| 925 |
+
|
| 926 |
+
if not self.content:
|
| 927 |
+
return ""
|
| 928 |
+
|
| 929 |
+
# Fallback to auto-detected encoding.
|
| 930 |
+
if self.encoding is None:
|
| 931 |
+
encoding = self.apparent_encoding
|
| 932 |
+
|
| 933 |
+
# Decode unicode from given encoding.
|
| 934 |
+
try:
|
| 935 |
+
content = str(self.content, encoding, errors="replace")
|
| 936 |
+
except (LookupError, TypeError):
|
| 937 |
+
# A LookupError is raised if the encoding was not found which could
|
| 938 |
+
# indicate a misspelling or similar mistake.
|
| 939 |
+
#
|
| 940 |
+
# A TypeError can be raised if encoding is None
|
| 941 |
+
#
|
| 942 |
+
# So we try blindly encoding.
|
| 943 |
+
content = str(self.content, errors="replace")
|
| 944 |
+
|
| 945 |
+
return content
|
| 946 |
+
|
| 947 |
+
def json(self, **kwargs):
|
| 948 |
+
r"""Returns the json-encoded content of a response, if any.
|
| 949 |
+
|
| 950 |
+
:param \*\*kwargs: Optional arguments that ``json.loads`` takes.
|
| 951 |
+
:raises requests.exceptions.JSONDecodeError: If the response body does not
|
| 952 |
+
contain valid json.
|
| 953 |
+
"""
|
| 954 |
+
|
| 955 |
+
if not self.encoding and self.content and len(self.content) > 3:
|
| 956 |
+
# No encoding set. JSON RFC 4627 section 3 states we should expect
|
| 957 |
+
# UTF-8, -16 or -32. Detect which one to use; If the detection or
|
| 958 |
+
# decoding fails, fall back to `self.text` (using charset_normalizer to make
|
| 959 |
+
# a best guess).
|
| 960 |
+
encoding = guess_json_utf(self.content)
|
| 961 |
+
if encoding is not None:
|
| 962 |
+
try:
|
| 963 |
+
return complexjson.loads(self.content.decode(encoding), **kwargs)
|
| 964 |
+
except UnicodeDecodeError:
|
| 965 |
+
# Wrong UTF codec detected; usually because it's not UTF-8
|
| 966 |
+
# but some other 8-bit codec. This is an RFC violation,
|
| 967 |
+
# and the server didn't bother to tell us what codec *was*
|
| 968 |
+
# used.
|
| 969 |
+
pass
|
| 970 |
+
except JSONDecodeError as e:
|
| 971 |
+
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
|
| 972 |
+
|
| 973 |
+
try:
|
| 974 |
+
return complexjson.loads(self.text, **kwargs)
|
| 975 |
+
except JSONDecodeError as e:
|
| 976 |
+
# Catch JSON-related errors and raise as requests.JSONDecodeError
|
| 977 |
+
# This aliases json.JSONDecodeError and simplejson.JSONDecodeError
|
| 978 |
+
raise RequestsJSONDecodeError(e.msg, e.doc, e.pos)
|
| 979 |
+
|
| 980 |
+
@property
|
| 981 |
+
def links(self):
|
| 982 |
+
"""Returns the parsed header links of the response, if any."""
|
| 983 |
+
|
| 984 |
+
header = self.headers.get("link")
|
| 985 |
+
|
| 986 |
+
resolved_links = {}
|
| 987 |
+
|
| 988 |
+
if header:
|
| 989 |
+
links = parse_header_links(header)
|
| 990 |
+
|
| 991 |
+
for link in links:
|
| 992 |
+
key = link.get("rel") or link.get("url")
|
| 993 |
+
resolved_links[key] = link
|
| 994 |
+
|
| 995 |
+
return resolved_links
|
| 996 |
+
|
| 997 |
+
def raise_for_status(self):
|
| 998 |
+
"""Raises :class:`HTTPError`, if one occurred."""
|
| 999 |
+
|
| 1000 |
+
http_error_msg = ""
|
| 1001 |
+
if isinstance(self.reason, bytes):
|
| 1002 |
+
# We attempt to decode utf-8 first because some servers
|
| 1003 |
+
# choose to localize their reason strings. If the string
|
| 1004 |
+
# isn't utf-8, we fall back to iso-8859-1 for all other
|
| 1005 |
+
# encodings. (See PR #3538)
|
| 1006 |
+
try:
|
| 1007 |
+
reason = self.reason.decode("utf-8")
|
| 1008 |
+
except UnicodeDecodeError:
|
| 1009 |
+
reason = self.reason.decode("iso-8859-1")
|
| 1010 |
+
else:
|
| 1011 |
+
reason = self.reason
|
| 1012 |
+
|
| 1013 |
+
if 400 <= self.status_code < 500:
|
| 1014 |
+
http_error_msg = (
|
| 1015 |
+
f"{self.status_code} Client Error: {reason} for url: {self.url}"
|
| 1016 |
+
)
|
| 1017 |
+
|
| 1018 |
+
elif 500 <= self.status_code < 600:
|
| 1019 |
+
http_error_msg = (
|
| 1020 |
+
f"{self.status_code} Server Error: {reason} for url: {self.url}"
|
| 1021 |
+
)
|
| 1022 |
+
|
| 1023 |
+
if http_error_msg:
|
| 1024 |
+
raise HTTPError(http_error_msg, response=self)
|
| 1025 |
+
|
| 1026 |
+
def close(self):
|
| 1027 |
+
"""Releases the connection back to the pool. Once this method has been
|
| 1028 |
+
called the underlying ``raw`` object must not be accessed again.
|
| 1029 |
+
|
| 1030 |
+
*Note: Should not normally need to be called explicitly.*
|
| 1031 |
+
"""
|
| 1032 |
+
if not self._content_consumed:
|
| 1033 |
+
self.raw.close()
|
| 1034 |
+
|
| 1035 |
+
release_conn = getattr(self.raw, "release_conn", None)
|
| 1036 |
+
if release_conn is not None:
|
| 1037 |
+
release_conn()
|
wemm/lib/python3.10/site-packages/requests/packages.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
|
| 3 |
+
from .compat import chardet
|
| 4 |
+
|
| 5 |
+
# This code exists for backwards compatibility reasons.
|
| 6 |
+
# I don't like it either. Just look the other way. :)
|
| 7 |
+
|
| 8 |
+
for package in ("urllib3", "idna"):
|
| 9 |
+
locals()[package] = __import__(package)
|
| 10 |
+
# This traversal is apparently necessary such that the identities are
|
| 11 |
+
# preserved (requests.packages.urllib3.* is urllib3.*)
|
| 12 |
+
for mod in list(sys.modules):
|
| 13 |
+
if mod == package or mod.startswith(f"{package}."):
|
| 14 |
+
sys.modules[f"requests.packages.{mod}"] = sys.modules[mod]
|
| 15 |
+
|
| 16 |
+
if chardet is not None:
|
| 17 |
+
target = chardet.__name__
|
| 18 |
+
for mod in list(sys.modules):
|
| 19 |
+
if mod == target or mod.startswith(f"{target}."):
|
| 20 |
+
imported_mod = sys.modules[mod]
|
| 21 |
+
sys.modules[f"requests.packages.{mod}"] = imported_mod
|
| 22 |
+
mod = mod.replace(target, "chardet")
|
| 23 |
+
sys.modules[f"requests.packages.{mod}"] = imported_mod
|
wemm/lib/python3.10/site-packages/requests/sessions.py
ADDED
|
@@ -0,0 +1,831 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.sessions
|
| 3 |
+
~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This module provides a Session object to manage and persist settings across
|
| 6 |
+
requests (cookies, auth, proxies).
|
| 7 |
+
"""
|
| 8 |
+
import os
|
| 9 |
+
import sys
|
| 10 |
+
import time
|
| 11 |
+
from collections import OrderedDict
|
| 12 |
+
from datetime import timedelta
|
| 13 |
+
|
| 14 |
+
from ._internal_utils import to_native_string
|
| 15 |
+
from .adapters import HTTPAdapter
|
| 16 |
+
from .auth import _basic_auth_str
|
| 17 |
+
from .compat import Mapping, cookielib, urljoin, urlparse
|
| 18 |
+
from .cookies import (
|
| 19 |
+
RequestsCookieJar,
|
| 20 |
+
cookiejar_from_dict,
|
| 21 |
+
extract_cookies_to_jar,
|
| 22 |
+
merge_cookies,
|
| 23 |
+
)
|
| 24 |
+
from .exceptions import (
|
| 25 |
+
ChunkedEncodingError,
|
| 26 |
+
ContentDecodingError,
|
| 27 |
+
InvalidSchema,
|
| 28 |
+
TooManyRedirects,
|
| 29 |
+
)
|
| 30 |
+
from .hooks import default_hooks, dispatch_hook
|
| 31 |
+
|
| 32 |
+
# formerly defined here, reexposed here for backward compatibility
|
| 33 |
+
from .models import ( # noqa: F401
|
| 34 |
+
DEFAULT_REDIRECT_LIMIT,
|
| 35 |
+
REDIRECT_STATI,
|
| 36 |
+
PreparedRequest,
|
| 37 |
+
Request,
|
| 38 |
+
)
|
| 39 |
+
from .status_codes import codes
|
| 40 |
+
from .structures import CaseInsensitiveDict
|
| 41 |
+
from .utils import ( # noqa: F401
|
| 42 |
+
DEFAULT_PORTS,
|
| 43 |
+
default_headers,
|
| 44 |
+
get_auth_from_url,
|
| 45 |
+
get_environ_proxies,
|
| 46 |
+
get_netrc_auth,
|
| 47 |
+
requote_uri,
|
| 48 |
+
resolve_proxies,
|
| 49 |
+
rewind_body,
|
| 50 |
+
should_bypass_proxies,
|
| 51 |
+
to_key_val_list,
|
| 52 |
+
)
|
| 53 |
+
|
| 54 |
+
# Preferred clock, based on which one is more accurate on a given system.
|
| 55 |
+
if sys.platform == "win32":
|
| 56 |
+
preferred_clock = time.perf_counter
|
| 57 |
+
else:
|
| 58 |
+
preferred_clock = time.time
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
def merge_setting(request_setting, session_setting, dict_class=OrderedDict):
|
| 62 |
+
"""Determines appropriate setting for a given request, taking into account
|
| 63 |
+
the explicit setting on that request, and the setting in the session. If a
|
| 64 |
+
setting is a dictionary, they will be merged together using `dict_class`
|
| 65 |
+
"""
|
| 66 |
+
|
| 67 |
+
if session_setting is None:
|
| 68 |
+
return request_setting
|
| 69 |
+
|
| 70 |
+
if request_setting is None:
|
| 71 |
+
return session_setting
|
| 72 |
+
|
| 73 |
+
# Bypass if not a dictionary (e.g. verify)
|
| 74 |
+
if not (
|
| 75 |
+
isinstance(session_setting, Mapping) and isinstance(request_setting, Mapping)
|
| 76 |
+
):
|
| 77 |
+
return request_setting
|
| 78 |
+
|
| 79 |
+
merged_setting = dict_class(to_key_val_list(session_setting))
|
| 80 |
+
merged_setting.update(to_key_val_list(request_setting))
|
| 81 |
+
|
| 82 |
+
# Remove keys that are set to None. Extract keys first to avoid altering
|
| 83 |
+
# the dictionary during iteration.
|
| 84 |
+
none_keys = [k for (k, v) in merged_setting.items() if v is None]
|
| 85 |
+
for key in none_keys:
|
| 86 |
+
del merged_setting[key]
|
| 87 |
+
|
| 88 |
+
return merged_setting
|
| 89 |
+
|
| 90 |
+
|
| 91 |
+
def merge_hooks(request_hooks, session_hooks, dict_class=OrderedDict):
|
| 92 |
+
"""Properly merges both requests and session hooks.
|
| 93 |
+
|
| 94 |
+
This is necessary because when request_hooks == {'response': []}, the
|
| 95 |
+
merge breaks Session hooks entirely.
|
| 96 |
+
"""
|
| 97 |
+
if session_hooks is None or session_hooks.get("response") == []:
|
| 98 |
+
return request_hooks
|
| 99 |
+
|
| 100 |
+
if request_hooks is None or request_hooks.get("response") == []:
|
| 101 |
+
return session_hooks
|
| 102 |
+
|
| 103 |
+
return merge_setting(request_hooks, session_hooks, dict_class)
|
| 104 |
+
|
| 105 |
+
|
| 106 |
+
class SessionRedirectMixin:
|
| 107 |
+
def get_redirect_target(self, resp):
|
| 108 |
+
"""Receives a Response. Returns a redirect URI or ``None``"""
|
| 109 |
+
# Due to the nature of how requests processes redirects this method will
|
| 110 |
+
# be called at least once upon the original response and at least twice
|
| 111 |
+
# on each subsequent redirect response (if any).
|
| 112 |
+
# If a custom mixin is used to handle this logic, it may be advantageous
|
| 113 |
+
# to cache the redirect location onto the response object as a private
|
| 114 |
+
# attribute.
|
| 115 |
+
if resp.is_redirect:
|
| 116 |
+
location = resp.headers["location"]
|
| 117 |
+
# Currently the underlying http module on py3 decode headers
|
| 118 |
+
# in latin1, but empirical evidence suggests that latin1 is very
|
| 119 |
+
# rarely used with non-ASCII characters in HTTP headers.
|
| 120 |
+
# It is more likely to get UTF8 header rather than latin1.
|
| 121 |
+
# This causes incorrect handling of UTF8 encoded location headers.
|
| 122 |
+
# To solve this, we re-encode the location in latin1.
|
| 123 |
+
location = location.encode("latin1")
|
| 124 |
+
return to_native_string(location, "utf8")
|
| 125 |
+
return None
|
| 126 |
+
|
| 127 |
+
def should_strip_auth(self, old_url, new_url):
|
| 128 |
+
"""Decide whether Authorization header should be removed when redirecting"""
|
| 129 |
+
old_parsed = urlparse(old_url)
|
| 130 |
+
new_parsed = urlparse(new_url)
|
| 131 |
+
if old_parsed.hostname != new_parsed.hostname:
|
| 132 |
+
return True
|
| 133 |
+
# Special case: allow http -> https redirect when using the standard
|
| 134 |
+
# ports. This isn't specified by RFC 7235, but is kept to avoid
|
| 135 |
+
# breaking backwards compatibility with older versions of requests
|
| 136 |
+
# that allowed any redirects on the same host.
|
| 137 |
+
if (
|
| 138 |
+
old_parsed.scheme == "http"
|
| 139 |
+
and old_parsed.port in (80, None)
|
| 140 |
+
and new_parsed.scheme == "https"
|
| 141 |
+
and new_parsed.port in (443, None)
|
| 142 |
+
):
|
| 143 |
+
return False
|
| 144 |
+
|
| 145 |
+
# Handle default port usage corresponding to scheme.
|
| 146 |
+
changed_port = old_parsed.port != new_parsed.port
|
| 147 |
+
changed_scheme = old_parsed.scheme != new_parsed.scheme
|
| 148 |
+
default_port = (DEFAULT_PORTS.get(old_parsed.scheme, None), None)
|
| 149 |
+
if (
|
| 150 |
+
not changed_scheme
|
| 151 |
+
and old_parsed.port in default_port
|
| 152 |
+
and new_parsed.port in default_port
|
| 153 |
+
):
|
| 154 |
+
return False
|
| 155 |
+
|
| 156 |
+
# Standard case: root URI must match
|
| 157 |
+
return changed_port or changed_scheme
|
| 158 |
+
|
| 159 |
+
def resolve_redirects(
|
| 160 |
+
self,
|
| 161 |
+
resp,
|
| 162 |
+
req,
|
| 163 |
+
stream=False,
|
| 164 |
+
timeout=None,
|
| 165 |
+
verify=True,
|
| 166 |
+
cert=None,
|
| 167 |
+
proxies=None,
|
| 168 |
+
yield_requests=False,
|
| 169 |
+
**adapter_kwargs,
|
| 170 |
+
):
|
| 171 |
+
"""Receives a Response. Returns a generator of Responses or Requests."""
|
| 172 |
+
|
| 173 |
+
hist = [] # keep track of history
|
| 174 |
+
|
| 175 |
+
url = self.get_redirect_target(resp)
|
| 176 |
+
previous_fragment = urlparse(req.url).fragment
|
| 177 |
+
while url:
|
| 178 |
+
prepared_request = req.copy()
|
| 179 |
+
|
| 180 |
+
# Update history and keep track of redirects.
|
| 181 |
+
# resp.history must ignore the original request in this loop
|
| 182 |
+
hist.append(resp)
|
| 183 |
+
resp.history = hist[1:]
|
| 184 |
+
|
| 185 |
+
try:
|
| 186 |
+
resp.content # Consume socket so it can be released
|
| 187 |
+
except (ChunkedEncodingError, ContentDecodingError, RuntimeError):
|
| 188 |
+
resp.raw.read(decode_content=False)
|
| 189 |
+
|
| 190 |
+
if len(resp.history) >= self.max_redirects:
|
| 191 |
+
raise TooManyRedirects(
|
| 192 |
+
f"Exceeded {self.max_redirects} redirects.", response=resp
|
| 193 |
+
)
|
| 194 |
+
|
| 195 |
+
# Release the connection back into the pool.
|
| 196 |
+
resp.close()
|
| 197 |
+
|
| 198 |
+
# Handle redirection without scheme (see: RFC 1808 Section 4)
|
| 199 |
+
if url.startswith("//"):
|
| 200 |
+
parsed_rurl = urlparse(resp.url)
|
| 201 |
+
url = ":".join([to_native_string(parsed_rurl.scheme), url])
|
| 202 |
+
|
| 203 |
+
# Normalize url case and attach previous fragment if needed (RFC 7231 7.1.2)
|
| 204 |
+
parsed = urlparse(url)
|
| 205 |
+
if parsed.fragment == "" and previous_fragment:
|
| 206 |
+
parsed = parsed._replace(fragment=previous_fragment)
|
| 207 |
+
elif parsed.fragment:
|
| 208 |
+
previous_fragment = parsed.fragment
|
| 209 |
+
url = parsed.geturl()
|
| 210 |
+
|
| 211 |
+
# Facilitate relative 'location' headers, as allowed by RFC 7231.
|
| 212 |
+
# (e.g. '/path/to/resource' instead of 'http://domain.tld/path/to/resource')
|
| 213 |
+
# Compliant with RFC3986, we percent encode the url.
|
| 214 |
+
if not parsed.netloc:
|
| 215 |
+
url = urljoin(resp.url, requote_uri(url))
|
| 216 |
+
else:
|
| 217 |
+
url = requote_uri(url)
|
| 218 |
+
|
| 219 |
+
prepared_request.url = to_native_string(url)
|
| 220 |
+
|
| 221 |
+
self.rebuild_method(prepared_request, resp)
|
| 222 |
+
|
| 223 |
+
# https://github.com/psf/requests/issues/1084
|
| 224 |
+
if resp.status_code not in (
|
| 225 |
+
codes.temporary_redirect,
|
| 226 |
+
codes.permanent_redirect,
|
| 227 |
+
):
|
| 228 |
+
# https://github.com/psf/requests/issues/3490
|
| 229 |
+
purged_headers = ("Content-Length", "Content-Type", "Transfer-Encoding")
|
| 230 |
+
for header in purged_headers:
|
| 231 |
+
prepared_request.headers.pop(header, None)
|
| 232 |
+
prepared_request.body = None
|
| 233 |
+
|
| 234 |
+
headers = prepared_request.headers
|
| 235 |
+
headers.pop("Cookie", None)
|
| 236 |
+
|
| 237 |
+
# Extract any cookies sent on the response to the cookiejar
|
| 238 |
+
# in the new request. Because we've mutated our copied prepared
|
| 239 |
+
# request, use the old one that we haven't yet touched.
|
| 240 |
+
extract_cookies_to_jar(prepared_request._cookies, req, resp.raw)
|
| 241 |
+
merge_cookies(prepared_request._cookies, self.cookies)
|
| 242 |
+
prepared_request.prepare_cookies(prepared_request._cookies)
|
| 243 |
+
|
| 244 |
+
# Rebuild auth and proxy information.
|
| 245 |
+
proxies = self.rebuild_proxies(prepared_request, proxies)
|
| 246 |
+
self.rebuild_auth(prepared_request, resp)
|
| 247 |
+
|
| 248 |
+
# A failed tell() sets `_body_position` to `object()`. This non-None
|
| 249 |
+
# value ensures `rewindable` will be True, allowing us to raise an
|
| 250 |
+
# UnrewindableBodyError, instead of hanging the connection.
|
| 251 |
+
rewindable = prepared_request._body_position is not None and (
|
| 252 |
+
"Content-Length" in headers or "Transfer-Encoding" in headers
|
| 253 |
+
)
|
| 254 |
+
|
| 255 |
+
# Attempt to rewind consumed file-like object.
|
| 256 |
+
if rewindable:
|
| 257 |
+
rewind_body(prepared_request)
|
| 258 |
+
|
| 259 |
+
# Override the original request.
|
| 260 |
+
req = prepared_request
|
| 261 |
+
|
| 262 |
+
if yield_requests:
|
| 263 |
+
yield req
|
| 264 |
+
else:
|
| 265 |
+
resp = self.send(
|
| 266 |
+
req,
|
| 267 |
+
stream=stream,
|
| 268 |
+
timeout=timeout,
|
| 269 |
+
verify=verify,
|
| 270 |
+
cert=cert,
|
| 271 |
+
proxies=proxies,
|
| 272 |
+
allow_redirects=False,
|
| 273 |
+
**adapter_kwargs,
|
| 274 |
+
)
|
| 275 |
+
|
| 276 |
+
extract_cookies_to_jar(self.cookies, prepared_request, resp.raw)
|
| 277 |
+
|
| 278 |
+
# extract redirect url, if any, for the next loop
|
| 279 |
+
url = self.get_redirect_target(resp)
|
| 280 |
+
yield resp
|
| 281 |
+
|
| 282 |
+
def rebuild_auth(self, prepared_request, response):
|
| 283 |
+
"""When being redirected we may want to strip authentication from the
|
| 284 |
+
request to avoid leaking credentials. This method intelligently removes
|
| 285 |
+
and reapplies authentication where possible to avoid credential loss.
|
| 286 |
+
"""
|
| 287 |
+
headers = prepared_request.headers
|
| 288 |
+
url = prepared_request.url
|
| 289 |
+
|
| 290 |
+
if "Authorization" in headers and self.should_strip_auth(
|
| 291 |
+
response.request.url, url
|
| 292 |
+
):
|
| 293 |
+
# If we get redirected to a new host, we should strip out any
|
| 294 |
+
# authentication headers.
|
| 295 |
+
del headers["Authorization"]
|
| 296 |
+
|
| 297 |
+
# .netrc might have more auth for us on our new host.
|
| 298 |
+
new_auth = get_netrc_auth(url) if self.trust_env else None
|
| 299 |
+
if new_auth is not None:
|
| 300 |
+
prepared_request.prepare_auth(new_auth)
|
| 301 |
+
|
| 302 |
+
def rebuild_proxies(self, prepared_request, proxies):
|
| 303 |
+
"""This method re-evaluates the proxy configuration by considering the
|
| 304 |
+
environment variables. If we are redirected to a URL covered by
|
| 305 |
+
NO_PROXY, we strip the proxy configuration. Otherwise, we set missing
|
| 306 |
+
proxy keys for this URL (in case they were stripped by a previous
|
| 307 |
+
redirect).
|
| 308 |
+
|
| 309 |
+
This method also replaces the Proxy-Authorization header where
|
| 310 |
+
necessary.
|
| 311 |
+
|
| 312 |
+
:rtype: dict
|
| 313 |
+
"""
|
| 314 |
+
headers = prepared_request.headers
|
| 315 |
+
scheme = urlparse(prepared_request.url).scheme
|
| 316 |
+
new_proxies = resolve_proxies(prepared_request, proxies, self.trust_env)
|
| 317 |
+
|
| 318 |
+
if "Proxy-Authorization" in headers:
|
| 319 |
+
del headers["Proxy-Authorization"]
|
| 320 |
+
|
| 321 |
+
try:
|
| 322 |
+
username, password = get_auth_from_url(new_proxies[scheme])
|
| 323 |
+
except KeyError:
|
| 324 |
+
username, password = None, None
|
| 325 |
+
|
| 326 |
+
# urllib3 handles proxy authorization for us in the standard adapter.
|
| 327 |
+
# Avoid appending this to TLS tunneled requests where it may be leaked.
|
| 328 |
+
if not scheme.startswith("https") and username and password:
|
| 329 |
+
headers["Proxy-Authorization"] = _basic_auth_str(username, password)
|
| 330 |
+
|
| 331 |
+
return new_proxies
|
| 332 |
+
|
| 333 |
+
def rebuild_method(self, prepared_request, response):
|
| 334 |
+
"""When being redirected we may want to change the method of the request
|
| 335 |
+
based on certain specs or browser behavior.
|
| 336 |
+
"""
|
| 337 |
+
method = prepared_request.method
|
| 338 |
+
|
| 339 |
+
# https://tools.ietf.org/html/rfc7231#section-6.4.4
|
| 340 |
+
if response.status_code == codes.see_other and method != "HEAD":
|
| 341 |
+
method = "GET"
|
| 342 |
+
|
| 343 |
+
# Do what the browsers do, despite standards...
|
| 344 |
+
# First, turn 302s into GETs.
|
| 345 |
+
if response.status_code == codes.found and method != "HEAD":
|
| 346 |
+
method = "GET"
|
| 347 |
+
|
| 348 |
+
# Second, if a POST is responded to with a 301, turn it into a GET.
|
| 349 |
+
# This bizarre behaviour is explained in Issue 1704.
|
| 350 |
+
if response.status_code == codes.moved and method == "POST":
|
| 351 |
+
method = "GET"
|
| 352 |
+
|
| 353 |
+
prepared_request.method = method
|
| 354 |
+
|
| 355 |
+
|
| 356 |
+
class Session(SessionRedirectMixin):
|
| 357 |
+
"""A Requests session.
|
| 358 |
+
|
| 359 |
+
Provides cookie persistence, connection-pooling, and configuration.
|
| 360 |
+
|
| 361 |
+
Basic Usage::
|
| 362 |
+
|
| 363 |
+
>>> import requests
|
| 364 |
+
>>> s = requests.Session()
|
| 365 |
+
>>> s.get('https://httpbin.org/get')
|
| 366 |
+
<Response [200]>
|
| 367 |
+
|
| 368 |
+
Or as a context manager::
|
| 369 |
+
|
| 370 |
+
>>> with requests.Session() as s:
|
| 371 |
+
... s.get('https://httpbin.org/get')
|
| 372 |
+
<Response [200]>
|
| 373 |
+
"""
|
| 374 |
+
|
| 375 |
+
__attrs__ = [
|
| 376 |
+
"headers",
|
| 377 |
+
"cookies",
|
| 378 |
+
"auth",
|
| 379 |
+
"proxies",
|
| 380 |
+
"hooks",
|
| 381 |
+
"params",
|
| 382 |
+
"verify",
|
| 383 |
+
"cert",
|
| 384 |
+
"adapters",
|
| 385 |
+
"stream",
|
| 386 |
+
"trust_env",
|
| 387 |
+
"max_redirects",
|
| 388 |
+
]
|
| 389 |
+
|
| 390 |
+
def __init__(self):
|
| 391 |
+
#: A case-insensitive dictionary of headers to be sent on each
|
| 392 |
+
#: :class:`Request <Request>` sent from this
|
| 393 |
+
#: :class:`Session <Session>`.
|
| 394 |
+
self.headers = default_headers()
|
| 395 |
+
|
| 396 |
+
#: Default Authentication tuple or object to attach to
|
| 397 |
+
#: :class:`Request <Request>`.
|
| 398 |
+
self.auth = None
|
| 399 |
+
|
| 400 |
+
#: Dictionary mapping protocol or protocol and host to the URL of the proxy
|
| 401 |
+
#: (e.g. {'http': 'foo.bar:3128', 'http://host.name': 'foo.bar:4012'}) to
|
| 402 |
+
#: be used on each :class:`Request <Request>`.
|
| 403 |
+
self.proxies = {}
|
| 404 |
+
|
| 405 |
+
#: Event-handling hooks.
|
| 406 |
+
self.hooks = default_hooks()
|
| 407 |
+
|
| 408 |
+
#: Dictionary of querystring data to attach to each
|
| 409 |
+
#: :class:`Request <Request>`. The dictionary values may be lists for
|
| 410 |
+
#: representing multivalued query parameters.
|
| 411 |
+
self.params = {}
|
| 412 |
+
|
| 413 |
+
#: Stream response content default.
|
| 414 |
+
self.stream = False
|
| 415 |
+
|
| 416 |
+
#: SSL Verification default.
|
| 417 |
+
#: Defaults to `True`, requiring requests to verify the TLS certificate at the
|
| 418 |
+
#: remote end.
|
| 419 |
+
#: If verify is set to `False`, requests will accept any TLS certificate
|
| 420 |
+
#: presented by the server, and will ignore hostname mismatches and/or
|
| 421 |
+
#: expired certificates, which will make your application vulnerable to
|
| 422 |
+
#: man-in-the-middle (MitM) attacks.
|
| 423 |
+
#: Only set this to `False` for testing.
|
| 424 |
+
self.verify = True
|
| 425 |
+
|
| 426 |
+
#: SSL client certificate default, if String, path to ssl client
|
| 427 |
+
#: cert file (.pem). If Tuple, ('cert', 'key') pair.
|
| 428 |
+
self.cert = None
|
| 429 |
+
|
| 430 |
+
#: Maximum number of redirects allowed. If the request exceeds this
|
| 431 |
+
#: limit, a :class:`TooManyRedirects` exception is raised.
|
| 432 |
+
#: This defaults to requests.models.DEFAULT_REDIRECT_LIMIT, which is
|
| 433 |
+
#: 30.
|
| 434 |
+
self.max_redirects = DEFAULT_REDIRECT_LIMIT
|
| 435 |
+
|
| 436 |
+
#: Trust environment settings for proxy configuration, default
|
| 437 |
+
#: authentication and similar.
|
| 438 |
+
self.trust_env = True
|
| 439 |
+
|
| 440 |
+
#: A CookieJar containing all currently outstanding cookies set on this
|
| 441 |
+
#: session. By default it is a
|
| 442 |
+
#: :class:`RequestsCookieJar <requests.cookies.RequestsCookieJar>`, but
|
| 443 |
+
#: may be any other ``cookielib.CookieJar`` compatible object.
|
| 444 |
+
self.cookies = cookiejar_from_dict({})
|
| 445 |
+
|
| 446 |
+
# Default connection adapters.
|
| 447 |
+
self.adapters = OrderedDict()
|
| 448 |
+
self.mount("https://", HTTPAdapter())
|
| 449 |
+
self.mount("http://", HTTPAdapter())
|
| 450 |
+
|
| 451 |
+
def __enter__(self):
|
| 452 |
+
return self
|
| 453 |
+
|
| 454 |
+
def __exit__(self, *args):
|
| 455 |
+
self.close()
|
| 456 |
+
|
| 457 |
+
def prepare_request(self, request):
|
| 458 |
+
"""Constructs a :class:`PreparedRequest <PreparedRequest>` for
|
| 459 |
+
transmission and returns it. The :class:`PreparedRequest` has settings
|
| 460 |
+
merged from the :class:`Request <Request>` instance and those of the
|
| 461 |
+
:class:`Session`.
|
| 462 |
+
|
| 463 |
+
:param request: :class:`Request` instance to prepare with this
|
| 464 |
+
session's settings.
|
| 465 |
+
:rtype: requests.PreparedRequest
|
| 466 |
+
"""
|
| 467 |
+
cookies = request.cookies or {}
|
| 468 |
+
|
| 469 |
+
# Bootstrap CookieJar.
|
| 470 |
+
if not isinstance(cookies, cookielib.CookieJar):
|
| 471 |
+
cookies = cookiejar_from_dict(cookies)
|
| 472 |
+
|
| 473 |
+
# Merge with session cookies
|
| 474 |
+
merged_cookies = merge_cookies(
|
| 475 |
+
merge_cookies(RequestsCookieJar(), self.cookies), cookies
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
# Set environment's basic authentication if not explicitly set.
|
| 479 |
+
auth = request.auth
|
| 480 |
+
if self.trust_env and not auth and not self.auth:
|
| 481 |
+
auth = get_netrc_auth(request.url)
|
| 482 |
+
|
| 483 |
+
p = PreparedRequest()
|
| 484 |
+
p.prepare(
|
| 485 |
+
method=request.method.upper(),
|
| 486 |
+
url=request.url,
|
| 487 |
+
files=request.files,
|
| 488 |
+
data=request.data,
|
| 489 |
+
json=request.json,
|
| 490 |
+
headers=merge_setting(
|
| 491 |
+
request.headers, self.headers, dict_class=CaseInsensitiveDict
|
| 492 |
+
),
|
| 493 |
+
params=merge_setting(request.params, self.params),
|
| 494 |
+
auth=merge_setting(auth, self.auth),
|
| 495 |
+
cookies=merged_cookies,
|
| 496 |
+
hooks=merge_hooks(request.hooks, self.hooks),
|
| 497 |
+
)
|
| 498 |
+
return p
|
| 499 |
+
|
| 500 |
+
def request(
|
| 501 |
+
self,
|
| 502 |
+
method,
|
| 503 |
+
url,
|
| 504 |
+
params=None,
|
| 505 |
+
data=None,
|
| 506 |
+
headers=None,
|
| 507 |
+
cookies=None,
|
| 508 |
+
files=None,
|
| 509 |
+
auth=None,
|
| 510 |
+
timeout=None,
|
| 511 |
+
allow_redirects=True,
|
| 512 |
+
proxies=None,
|
| 513 |
+
hooks=None,
|
| 514 |
+
stream=None,
|
| 515 |
+
verify=None,
|
| 516 |
+
cert=None,
|
| 517 |
+
json=None,
|
| 518 |
+
):
|
| 519 |
+
"""Constructs a :class:`Request <Request>`, prepares it and sends it.
|
| 520 |
+
Returns :class:`Response <Response>` object.
|
| 521 |
+
|
| 522 |
+
:param method: method for the new :class:`Request` object.
|
| 523 |
+
:param url: URL for the new :class:`Request` object.
|
| 524 |
+
:param params: (optional) Dictionary or bytes to be sent in the query
|
| 525 |
+
string for the :class:`Request`.
|
| 526 |
+
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
| 527 |
+
object to send in the body of the :class:`Request`.
|
| 528 |
+
:param json: (optional) json to send in the body of the
|
| 529 |
+
:class:`Request`.
|
| 530 |
+
:param headers: (optional) Dictionary of HTTP Headers to send with the
|
| 531 |
+
:class:`Request`.
|
| 532 |
+
:param cookies: (optional) Dict or CookieJar object to send with the
|
| 533 |
+
:class:`Request`.
|
| 534 |
+
:param files: (optional) Dictionary of ``'filename': file-like-objects``
|
| 535 |
+
for multipart encoding upload.
|
| 536 |
+
:param auth: (optional) Auth tuple or callable to enable
|
| 537 |
+
Basic/Digest/Custom HTTP Auth.
|
| 538 |
+
:param timeout: (optional) How long to wait for the server to send
|
| 539 |
+
data before giving up, as a float, or a :ref:`(connect timeout,
|
| 540 |
+
read timeout) <timeouts>` tuple.
|
| 541 |
+
:type timeout: float or tuple
|
| 542 |
+
:param allow_redirects: (optional) Set to True by default.
|
| 543 |
+
:type allow_redirects: bool
|
| 544 |
+
:param proxies: (optional) Dictionary mapping protocol or protocol and
|
| 545 |
+
hostname to the URL of the proxy.
|
| 546 |
+
:param hooks: (optional) Dictionary mapping hook name to one event or
|
| 547 |
+
list of events, event must be callable.
|
| 548 |
+
:param stream: (optional) whether to immediately download the response
|
| 549 |
+
content. Defaults to ``False``.
|
| 550 |
+
:param verify: (optional) Either a boolean, in which case it controls whether we verify
|
| 551 |
+
the server's TLS certificate, or a string, in which case it must be a path
|
| 552 |
+
to a CA bundle to use. Defaults to ``True``. When set to
|
| 553 |
+
``False``, requests will accept any TLS certificate presented by
|
| 554 |
+
the server, and will ignore hostname mismatches and/or expired
|
| 555 |
+
certificates, which will make your application vulnerable to
|
| 556 |
+
man-in-the-middle (MitM) attacks. Setting verify to ``False``
|
| 557 |
+
may be useful during local development or testing.
|
| 558 |
+
:param cert: (optional) if String, path to ssl client cert file (.pem).
|
| 559 |
+
If Tuple, ('cert', 'key') pair.
|
| 560 |
+
:rtype: requests.Response
|
| 561 |
+
"""
|
| 562 |
+
# Create the Request.
|
| 563 |
+
req = Request(
|
| 564 |
+
method=method.upper(),
|
| 565 |
+
url=url,
|
| 566 |
+
headers=headers,
|
| 567 |
+
files=files,
|
| 568 |
+
data=data or {},
|
| 569 |
+
json=json,
|
| 570 |
+
params=params or {},
|
| 571 |
+
auth=auth,
|
| 572 |
+
cookies=cookies,
|
| 573 |
+
hooks=hooks,
|
| 574 |
+
)
|
| 575 |
+
prep = self.prepare_request(req)
|
| 576 |
+
|
| 577 |
+
proxies = proxies or {}
|
| 578 |
+
|
| 579 |
+
settings = self.merge_environment_settings(
|
| 580 |
+
prep.url, proxies, stream, verify, cert
|
| 581 |
+
)
|
| 582 |
+
|
| 583 |
+
# Send the request.
|
| 584 |
+
send_kwargs = {
|
| 585 |
+
"timeout": timeout,
|
| 586 |
+
"allow_redirects": allow_redirects,
|
| 587 |
+
}
|
| 588 |
+
send_kwargs.update(settings)
|
| 589 |
+
resp = self.send(prep, **send_kwargs)
|
| 590 |
+
|
| 591 |
+
return resp
|
| 592 |
+
|
| 593 |
+
def get(self, url, **kwargs):
|
| 594 |
+
r"""Sends a GET request. Returns :class:`Response` object.
|
| 595 |
+
|
| 596 |
+
:param url: URL for the new :class:`Request` object.
|
| 597 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 598 |
+
:rtype: requests.Response
|
| 599 |
+
"""
|
| 600 |
+
|
| 601 |
+
kwargs.setdefault("allow_redirects", True)
|
| 602 |
+
return self.request("GET", url, **kwargs)
|
| 603 |
+
|
| 604 |
+
def options(self, url, **kwargs):
|
| 605 |
+
r"""Sends a OPTIONS request. Returns :class:`Response` object.
|
| 606 |
+
|
| 607 |
+
:param url: URL for the new :class:`Request` object.
|
| 608 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 609 |
+
:rtype: requests.Response
|
| 610 |
+
"""
|
| 611 |
+
|
| 612 |
+
kwargs.setdefault("allow_redirects", True)
|
| 613 |
+
return self.request("OPTIONS", url, **kwargs)
|
| 614 |
+
|
| 615 |
+
def head(self, url, **kwargs):
|
| 616 |
+
r"""Sends a HEAD request. Returns :class:`Response` object.
|
| 617 |
+
|
| 618 |
+
:param url: URL for the new :class:`Request` object.
|
| 619 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 620 |
+
:rtype: requests.Response
|
| 621 |
+
"""
|
| 622 |
+
|
| 623 |
+
kwargs.setdefault("allow_redirects", False)
|
| 624 |
+
return self.request("HEAD", url, **kwargs)
|
| 625 |
+
|
| 626 |
+
def post(self, url, data=None, json=None, **kwargs):
|
| 627 |
+
r"""Sends a POST request. Returns :class:`Response` object.
|
| 628 |
+
|
| 629 |
+
:param url: URL for the new :class:`Request` object.
|
| 630 |
+
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
| 631 |
+
object to send in the body of the :class:`Request`.
|
| 632 |
+
:param json: (optional) json to send in the body of the :class:`Request`.
|
| 633 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 634 |
+
:rtype: requests.Response
|
| 635 |
+
"""
|
| 636 |
+
|
| 637 |
+
return self.request("POST", url, data=data, json=json, **kwargs)
|
| 638 |
+
|
| 639 |
+
def put(self, url, data=None, **kwargs):
|
| 640 |
+
r"""Sends a PUT request. Returns :class:`Response` object.
|
| 641 |
+
|
| 642 |
+
:param url: URL for the new :class:`Request` object.
|
| 643 |
+
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
| 644 |
+
object to send in the body of the :class:`Request`.
|
| 645 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 646 |
+
:rtype: requests.Response
|
| 647 |
+
"""
|
| 648 |
+
|
| 649 |
+
return self.request("PUT", url, data=data, **kwargs)
|
| 650 |
+
|
| 651 |
+
def patch(self, url, data=None, **kwargs):
|
| 652 |
+
r"""Sends a PATCH request. Returns :class:`Response` object.
|
| 653 |
+
|
| 654 |
+
:param url: URL for the new :class:`Request` object.
|
| 655 |
+
:param data: (optional) Dictionary, list of tuples, bytes, or file-like
|
| 656 |
+
object to send in the body of the :class:`Request`.
|
| 657 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 658 |
+
:rtype: requests.Response
|
| 659 |
+
"""
|
| 660 |
+
|
| 661 |
+
return self.request("PATCH", url, data=data, **kwargs)
|
| 662 |
+
|
| 663 |
+
def delete(self, url, **kwargs):
|
| 664 |
+
r"""Sends a DELETE request. Returns :class:`Response` object.
|
| 665 |
+
|
| 666 |
+
:param url: URL for the new :class:`Request` object.
|
| 667 |
+
:param \*\*kwargs: Optional arguments that ``request`` takes.
|
| 668 |
+
:rtype: requests.Response
|
| 669 |
+
"""
|
| 670 |
+
|
| 671 |
+
return self.request("DELETE", url, **kwargs)
|
| 672 |
+
|
| 673 |
+
def send(self, request, **kwargs):
|
| 674 |
+
"""Send a given PreparedRequest.
|
| 675 |
+
|
| 676 |
+
:rtype: requests.Response
|
| 677 |
+
"""
|
| 678 |
+
# Set defaults that the hooks can utilize to ensure they always have
|
| 679 |
+
# the correct parameters to reproduce the previous request.
|
| 680 |
+
kwargs.setdefault("stream", self.stream)
|
| 681 |
+
kwargs.setdefault("verify", self.verify)
|
| 682 |
+
kwargs.setdefault("cert", self.cert)
|
| 683 |
+
if "proxies" not in kwargs:
|
| 684 |
+
kwargs["proxies"] = resolve_proxies(request, self.proxies, self.trust_env)
|
| 685 |
+
|
| 686 |
+
# It's possible that users might accidentally send a Request object.
|
| 687 |
+
# Guard against that specific failure case.
|
| 688 |
+
if isinstance(request, Request):
|
| 689 |
+
raise ValueError("You can only send PreparedRequests.")
|
| 690 |
+
|
| 691 |
+
# Set up variables needed for resolve_redirects and dispatching of hooks
|
| 692 |
+
allow_redirects = kwargs.pop("allow_redirects", True)
|
| 693 |
+
stream = kwargs.get("stream")
|
| 694 |
+
hooks = request.hooks
|
| 695 |
+
|
| 696 |
+
# Get the appropriate adapter to use
|
| 697 |
+
adapter = self.get_adapter(url=request.url)
|
| 698 |
+
|
| 699 |
+
# Start time (approximately) of the request
|
| 700 |
+
start = preferred_clock()
|
| 701 |
+
|
| 702 |
+
# Send the request
|
| 703 |
+
r = adapter.send(request, **kwargs)
|
| 704 |
+
|
| 705 |
+
# Total elapsed time of the request (approximately)
|
| 706 |
+
elapsed = preferred_clock() - start
|
| 707 |
+
r.elapsed = timedelta(seconds=elapsed)
|
| 708 |
+
|
| 709 |
+
# Response manipulation hooks
|
| 710 |
+
r = dispatch_hook("response", hooks, r, **kwargs)
|
| 711 |
+
|
| 712 |
+
# Persist cookies
|
| 713 |
+
if r.history:
|
| 714 |
+
# If the hooks create history then we want those cookies too
|
| 715 |
+
for resp in r.history:
|
| 716 |
+
extract_cookies_to_jar(self.cookies, resp.request, resp.raw)
|
| 717 |
+
|
| 718 |
+
extract_cookies_to_jar(self.cookies, request, r.raw)
|
| 719 |
+
|
| 720 |
+
# Resolve redirects if allowed.
|
| 721 |
+
if allow_redirects:
|
| 722 |
+
# Redirect resolving generator.
|
| 723 |
+
gen = self.resolve_redirects(r, request, **kwargs)
|
| 724 |
+
history = [resp for resp in gen]
|
| 725 |
+
else:
|
| 726 |
+
history = []
|
| 727 |
+
|
| 728 |
+
# Shuffle things around if there's history.
|
| 729 |
+
if history:
|
| 730 |
+
# Insert the first (original) request at the start
|
| 731 |
+
history.insert(0, r)
|
| 732 |
+
# Get the last request made
|
| 733 |
+
r = history.pop()
|
| 734 |
+
r.history = history
|
| 735 |
+
|
| 736 |
+
# If redirects aren't being followed, store the response on the Request for Response.next().
|
| 737 |
+
if not allow_redirects:
|
| 738 |
+
try:
|
| 739 |
+
r._next = next(
|
| 740 |
+
self.resolve_redirects(r, request, yield_requests=True, **kwargs)
|
| 741 |
+
)
|
| 742 |
+
except StopIteration:
|
| 743 |
+
pass
|
| 744 |
+
|
| 745 |
+
if not stream:
|
| 746 |
+
r.content
|
| 747 |
+
|
| 748 |
+
return r
|
| 749 |
+
|
| 750 |
+
def merge_environment_settings(self, url, proxies, stream, verify, cert):
|
| 751 |
+
"""
|
| 752 |
+
Check the environment and merge it with some settings.
|
| 753 |
+
|
| 754 |
+
:rtype: dict
|
| 755 |
+
"""
|
| 756 |
+
# Gather clues from the surrounding environment.
|
| 757 |
+
if self.trust_env:
|
| 758 |
+
# Set environment's proxies.
|
| 759 |
+
no_proxy = proxies.get("no_proxy") if proxies is not None else None
|
| 760 |
+
env_proxies = get_environ_proxies(url, no_proxy=no_proxy)
|
| 761 |
+
for k, v in env_proxies.items():
|
| 762 |
+
proxies.setdefault(k, v)
|
| 763 |
+
|
| 764 |
+
# Look for requests environment configuration
|
| 765 |
+
# and be compatible with cURL.
|
| 766 |
+
if verify is True or verify is None:
|
| 767 |
+
verify = (
|
| 768 |
+
os.environ.get("REQUESTS_CA_BUNDLE")
|
| 769 |
+
or os.environ.get("CURL_CA_BUNDLE")
|
| 770 |
+
or verify
|
| 771 |
+
)
|
| 772 |
+
|
| 773 |
+
# Merge all the kwargs.
|
| 774 |
+
proxies = merge_setting(proxies, self.proxies)
|
| 775 |
+
stream = merge_setting(stream, self.stream)
|
| 776 |
+
verify = merge_setting(verify, self.verify)
|
| 777 |
+
cert = merge_setting(cert, self.cert)
|
| 778 |
+
|
| 779 |
+
return {"proxies": proxies, "stream": stream, "verify": verify, "cert": cert}
|
| 780 |
+
|
| 781 |
+
def get_adapter(self, url):
|
| 782 |
+
"""
|
| 783 |
+
Returns the appropriate connection adapter for the given URL.
|
| 784 |
+
|
| 785 |
+
:rtype: requests.adapters.BaseAdapter
|
| 786 |
+
"""
|
| 787 |
+
for prefix, adapter in self.adapters.items():
|
| 788 |
+
if url.lower().startswith(prefix.lower()):
|
| 789 |
+
return adapter
|
| 790 |
+
|
| 791 |
+
# Nothing matches :-/
|
| 792 |
+
raise InvalidSchema(f"No connection adapters were found for {url!r}")
|
| 793 |
+
|
| 794 |
+
def close(self):
|
| 795 |
+
"""Closes all adapters and as such the session"""
|
| 796 |
+
for v in self.adapters.values():
|
| 797 |
+
v.close()
|
| 798 |
+
|
| 799 |
+
def mount(self, prefix, adapter):
|
| 800 |
+
"""Registers a connection adapter to a prefix.
|
| 801 |
+
|
| 802 |
+
Adapters are sorted in descending order by prefix length.
|
| 803 |
+
"""
|
| 804 |
+
self.adapters[prefix] = adapter
|
| 805 |
+
keys_to_move = [k for k in self.adapters if len(k) < len(prefix)]
|
| 806 |
+
|
| 807 |
+
for key in keys_to_move:
|
| 808 |
+
self.adapters[key] = self.adapters.pop(key)
|
| 809 |
+
|
| 810 |
+
def __getstate__(self):
|
| 811 |
+
state = {attr: getattr(self, attr, None) for attr in self.__attrs__}
|
| 812 |
+
return state
|
| 813 |
+
|
| 814 |
+
def __setstate__(self, state):
|
| 815 |
+
for attr, value in state.items():
|
| 816 |
+
setattr(self, attr, value)
|
| 817 |
+
|
| 818 |
+
|
| 819 |
+
def session():
|
| 820 |
+
"""
|
| 821 |
+
Returns a :class:`Session` for context-management.
|
| 822 |
+
|
| 823 |
+
.. deprecated:: 1.0.0
|
| 824 |
+
|
| 825 |
+
This method has been deprecated since version 1.0.0 and is only kept for
|
| 826 |
+
backwards compatibility. New code should use :class:`~requests.sessions.Session`
|
| 827 |
+
to create a session. This may be removed at a future date.
|
| 828 |
+
|
| 829 |
+
:rtype: Session
|
| 830 |
+
"""
|
| 831 |
+
return Session()
|
wemm/lib/python3.10/site-packages/requests/status_codes.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""
|
| 2 |
+
The ``codes`` object defines a mapping from common names for HTTP statuses
|
| 3 |
+
to their numerical codes, accessible either as attributes or as dictionary
|
| 4 |
+
items.
|
| 5 |
+
|
| 6 |
+
Example::
|
| 7 |
+
|
| 8 |
+
>>> import requests
|
| 9 |
+
>>> requests.codes['temporary_redirect']
|
| 10 |
+
307
|
| 11 |
+
>>> requests.codes.teapot
|
| 12 |
+
418
|
| 13 |
+
>>> requests.codes['\o/']
|
| 14 |
+
200
|
| 15 |
+
|
| 16 |
+
Some codes have multiple names, and both upper- and lower-case versions of
|
| 17 |
+
the names are allowed. For example, ``codes.ok``, ``codes.OK``, and
|
| 18 |
+
``codes.okay`` all correspond to the HTTP status code 200.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
from .structures import LookupDict
|
| 22 |
+
|
| 23 |
+
_codes = {
|
| 24 |
+
# Informational.
|
| 25 |
+
100: ("continue",),
|
| 26 |
+
101: ("switching_protocols",),
|
| 27 |
+
102: ("processing", "early-hints"),
|
| 28 |
+
103: ("checkpoint",),
|
| 29 |
+
122: ("uri_too_long", "request_uri_too_long"),
|
| 30 |
+
200: ("ok", "okay", "all_ok", "all_okay", "all_good", "\\o/", "✓"),
|
| 31 |
+
201: ("created",),
|
| 32 |
+
202: ("accepted",),
|
| 33 |
+
203: ("non_authoritative_info", "non_authoritative_information"),
|
| 34 |
+
204: ("no_content",),
|
| 35 |
+
205: ("reset_content", "reset"),
|
| 36 |
+
206: ("partial_content", "partial"),
|
| 37 |
+
207: ("multi_status", "multiple_status", "multi_stati", "multiple_stati"),
|
| 38 |
+
208: ("already_reported",),
|
| 39 |
+
226: ("im_used",),
|
| 40 |
+
# Redirection.
|
| 41 |
+
300: ("multiple_choices",),
|
| 42 |
+
301: ("moved_permanently", "moved", "\\o-"),
|
| 43 |
+
302: ("found",),
|
| 44 |
+
303: ("see_other", "other"),
|
| 45 |
+
304: ("not_modified",),
|
| 46 |
+
305: ("use_proxy",),
|
| 47 |
+
306: ("switch_proxy",),
|
| 48 |
+
307: ("temporary_redirect", "temporary_moved", "temporary"),
|
| 49 |
+
308: (
|
| 50 |
+
"permanent_redirect",
|
| 51 |
+
"resume_incomplete",
|
| 52 |
+
"resume",
|
| 53 |
+
), # "resume" and "resume_incomplete" to be removed in 3.0
|
| 54 |
+
# Client Error.
|
| 55 |
+
400: ("bad_request", "bad"),
|
| 56 |
+
401: ("unauthorized",),
|
| 57 |
+
402: ("payment_required", "payment"),
|
| 58 |
+
403: ("forbidden",),
|
| 59 |
+
404: ("not_found", "-o-"),
|
| 60 |
+
405: ("method_not_allowed", "not_allowed"),
|
| 61 |
+
406: ("not_acceptable",),
|
| 62 |
+
407: ("proxy_authentication_required", "proxy_auth", "proxy_authentication"),
|
| 63 |
+
408: ("request_timeout", "timeout"),
|
| 64 |
+
409: ("conflict",),
|
| 65 |
+
410: ("gone",),
|
| 66 |
+
411: ("length_required",),
|
| 67 |
+
412: ("precondition_failed", "precondition"),
|
| 68 |
+
413: ("request_entity_too_large", "content_too_large"),
|
| 69 |
+
414: ("request_uri_too_large", "uri_too_long"),
|
| 70 |
+
415: ("unsupported_media_type", "unsupported_media", "media_type"),
|
| 71 |
+
416: (
|
| 72 |
+
"requested_range_not_satisfiable",
|
| 73 |
+
"requested_range",
|
| 74 |
+
"range_not_satisfiable",
|
| 75 |
+
),
|
| 76 |
+
417: ("expectation_failed",),
|
| 77 |
+
418: ("im_a_teapot", "teapot", "i_am_a_teapot"),
|
| 78 |
+
421: ("misdirected_request",),
|
| 79 |
+
422: ("unprocessable_entity", "unprocessable", "unprocessable_content"),
|
| 80 |
+
423: ("locked",),
|
| 81 |
+
424: ("failed_dependency", "dependency"),
|
| 82 |
+
425: ("unordered_collection", "unordered", "too_early"),
|
| 83 |
+
426: ("upgrade_required", "upgrade"),
|
| 84 |
+
428: ("precondition_required", "precondition"),
|
| 85 |
+
429: ("too_many_requests", "too_many"),
|
| 86 |
+
431: ("header_fields_too_large", "fields_too_large"),
|
| 87 |
+
444: ("no_response", "none"),
|
| 88 |
+
449: ("retry_with", "retry"),
|
| 89 |
+
450: ("blocked_by_windows_parental_controls", "parental_controls"),
|
| 90 |
+
451: ("unavailable_for_legal_reasons", "legal_reasons"),
|
| 91 |
+
499: ("client_closed_request",),
|
| 92 |
+
# Server Error.
|
| 93 |
+
500: ("internal_server_error", "server_error", "/o\\", "✗"),
|
| 94 |
+
501: ("not_implemented",),
|
| 95 |
+
502: ("bad_gateway",),
|
| 96 |
+
503: ("service_unavailable", "unavailable"),
|
| 97 |
+
504: ("gateway_timeout",),
|
| 98 |
+
505: ("http_version_not_supported", "http_version"),
|
| 99 |
+
506: ("variant_also_negotiates",),
|
| 100 |
+
507: ("insufficient_storage",),
|
| 101 |
+
509: ("bandwidth_limit_exceeded", "bandwidth"),
|
| 102 |
+
510: ("not_extended",),
|
| 103 |
+
511: ("network_authentication_required", "network_auth", "network_authentication"),
|
| 104 |
+
}
|
| 105 |
+
|
| 106 |
+
codes = LookupDict(name="status_codes")
|
| 107 |
+
|
| 108 |
+
|
| 109 |
+
def _init():
|
| 110 |
+
for code, titles in _codes.items():
|
| 111 |
+
for title in titles:
|
| 112 |
+
setattr(codes, title, code)
|
| 113 |
+
if not title.startswith(("\\", "/")):
|
| 114 |
+
setattr(codes, title.upper(), code)
|
| 115 |
+
|
| 116 |
+
def doc(code):
|
| 117 |
+
names = ", ".join(f"``{n}``" for n in _codes[code])
|
| 118 |
+
return "* %d: %s" % (code, names)
|
| 119 |
+
|
| 120 |
+
global __doc__
|
| 121 |
+
__doc__ = (
|
| 122 |
+
__doc__ + "\n" + "\n".join(doc(code) for code in sorted(_codes))
|
| 123 |
+
if __doc__ is not None
|
| 124 |
+
else None
|
| 125 |
+
)
|
| 126 |
+
|
| 127 |
+
|
| 128 |
+
_init()
|
wemm/lib/python3.10/site-packages/requests/structures.py
ADDED
|
@@ -0,0 +1,99 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.structures
|
| 3 |
+
~~~~~~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
Data structures that power Requests.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
from collections import OrderedDict
|
| 9 |
+
|
| 10 |
+
from .compat import Mapping, MutableMapping
|
| 11 |
+
|
| 12 |
+
|
| 13 |
+
class CaseInsensitiveDict(MutableMapping):
|
| 14 |
+
"""A case-insensitive ``dict``-like object.
|
| 15 |
+
|
| 16 |
+
Implements all methods and operations of
|
| 17 |
+
``MutableMapping`` as well as dict's ``copy``. Also
|
| 18 |
+
provides ``lower_items``.
|
| 19 |
+
|
| 20 |
+
All keys are expected to be strings. The structure remembers the
|
| 21 |
+
case of the last key to be set, and ``iter(instance)``,
|
| 22 |
+
``keys()``, ``items()``, ``iterkeys()``, and ``iteritems()``
|
| 23 |
+
will contain case-sensitive keys. However, querying and contains
|
| 24 |
+
testing is case insensitive::
|
| 25 |
+
|
| 26 |
+
cid = CaseInsensitiveDict()
|
| 27 |
+
cid['Accept'] = 'application/json'
|
| 28 |
+
cid['aCCEPT'] == 'application/json' # True
|
| 29 |
+
list(cid) == ['Accept'] # True
|
| 30 |
+
|
| 31 |
+
For example, ``headers['content-encoding']`` will return the
|
| 32 |
+
value of a ``'Content-Encoding'`` response header, regardless
|
| 33 |
+
of how the header name was originally stored.
|
| 34 |
+
|
| 35 |
+
If the constructor, ``.update``, or equality comparison
|
| 36 |
+
operations are given keys that have equal ``.lower()``s, the
|
| 37 |
+
behavior is undefined.
|
| 38 |
+
"""
|
| 39 |
+
|
| 40 |
+
def __init__(self, data=None, **kwargs):
|
| 41 |
+
self._store = OrderedDict()
|
| 42 |
+
if data is None:
|
| 43 |
+
data = {}
|
| 44 |
+
self.update(data, **kwargs)
|
| 45 |
+
|
| 46 |
+
def __setitem__(self, key, value):
|
| 47 |
+
# Use the lowercased key for lookups, but store the actual
|
| 48 |
+
# key alongside the value.
|
| 49 |
+
self._store[key.lower()] = (key, value)
|
| 50 |
+
|
| 51 |
+
def __getitem__(self, key):
|
| 52 |
+
return self._store[key.lower()][1]
|
| 53 |
+
|
| 54 |
+
def __delitem__(self, key):
|
| 55 |
+
del self._store[key.lower()]
|
| 56 |
+
|
| 57 |
+
def __iter__(self):
|
| 58 |
+
return (casedkey for casedkey, mappedvalue in self._store.values())
|
| 59 |
+
|
| 60 |
+
def __len__(self):
|
| 61 |
+
return len(self._store)
|
| 62 |
+
|
| 63 |
+
def lower_items(self):
|
| 64 |
+
"""Like iteritems(), but with all lowercase keys."""
|
| 65 |
+
return ((lowerkey, keyval[1]) for (lowerkey, keyval) in self._store.items())
|
| 66 |
+
|
| 67 |
+
def __eq__(self, other):
|
| 68 |
+
if isinstance(other, Mapping):
|
| 69 |
+
other = CaseInsensitiveDict(other)
|
| 70 |
+
else:
|
| 71 |
+
return NotImplemented
|
| 72 |
+
# Compare insensitively
|
| 73 |
+
return dict(self.lower_items()) == dict(other.lower_items())
|
| 74 |
+
|
| 75 |
+
# Copy is required
|
| 76 |
+
def copy(self):
|
| 77 |
+
return CaseInsensitiveDict(self._store.values())
|
| 78 |
+
|
| 79 |
+
def __repr__(self):
|
| 80 |
+
return str(dict(self.items()))
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class LookupDict(dict):
|
| 84 |
+
"""Dictionary lookup object."""
|
| 85 |
+
|
| 86 |
+
def __init__(self, name=None):
|
| 87 |
+
self.name = name
|
| 88 |
+
super().__init__()
|
| 89 |
+
|
| 90 |
+
def __repr__(self):
|
| 91 |
+
return f"<lookup '{self.name}'>"
|
| 92 |
+
|
| 93 |
+
def __getitem__(self, key):
|
| 94 |
+
# We allow fall-through here, so values default to None
|
| 95 |
+
|
| 96 |
+
return self.__dict__.get(key, None)
|
| 97 |
+
|
| 98 |
+
def get(self, key, default=None):
|
| 99 |
+
return self.__dict__.get(key, default)
|
wemm/lib/python3.10/site-packages/requests/utils.py
ADDED
|
@@ -0,0 +1,1096 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
requests.utils
|
| 3 |
+
~~~~~~~~~~~~~~
|
| 4 |
+
|
| 5 |
+
This module provides utility functions that are used within Requests
|
| 6 |
+
that are also useful for external consumption.
|
| 7 |
+
"""
|
| 8 |
+
|
| 9 |
+
import codecs
|
| 10 |
+
import contextlib
|
| 11 |
+
import io
|
| 12 |
+
import os
|
| 13 |
+
import re
|
| 14 |
+
import socket
|
| 15 |
+
import struct
|
| 16 |
+
import sys
|
| 17 |
+
import tempfile
|
| 18 |
+
import warnings
|
| 19 |
+
import zipfile
|
| 20 |
+
from collections import OrderedDict
|
| 21 |
+
|
| 22 |
+
from urllib3.util import make_headers, parse_url
|
| 23 |
+
|
| 24 |
+
from . import certs
|
| 25 |
+
from .__version__ import __version__
|
| 26 |
+
|
| 27 |
+
# to_native_string is unused here, but imported here for backwards compatibility
|
| 28 |
+
from ._internal_utils import ( # noqa: F401
|
| 29 |
+
_HEADER_VALIDATORS_BYTE,
|
| 30 |
+
_HEADER_VALIDATORS_STR,
|
| 31 |
+
HEADER_VALIDATORS,
|
| 32 |
+
to_native_string,
|
| 33 |
+
)
|
| 34 |
+
from .compat import (
|
| 35 |
+
Mapping,
|
| 36 |
+
basestring,
|
| 37 |
+
bytes,
|
| 38 |
+
getproxies,
|
| 39 |
+
getproxies_environment,
|
| 40 |
+
integer_types,
|
| 41 |
+
)
|
| 42 |
+
from .compat import parse_http_list as _parse_list_header
|
| 43 |
+
from .compat import (
|
| 44 |
+
proxy_bypass,
|
| 45 |
+
proxy_bypass_environment,
|
| 46 |
+
quote,
|
| 47 |
+
str,
|
| 48 |
+
unquote,
|
| 49 |
+
urlparse,
|
| 50 |
+
urlunparse,
|
| 51 |
+
)
|
| 52 |
+
from .cookies import cookiejar_from_dict
|
| 53 |
+
from .exceptions import (
|
| 54 |
+
FileModeWarning,
|
| 55 |
+
InvalidHeader,
|
| 56 |
+
InvalidURL,
|
| 57 |
+
UnrewindableBodyError,
|
| 58 |
+
)
|
| 59 |
+
from .structures import CaseInsensitiveDict
|
| 60 |
+
|
| 61 |
+
NETRC_FILES = (".netrc", "_netrc")
|
| 62 |
+
|
| 63 |
+
DEFAULT_CA_BUNDLE_PATH = certs.where()
|
| 64 |
+
|
| 65 |
+
DEFAULT_PORTS = {"http": 80, "https": 443}
|
| 66 |
+
|
| 67 |
+
# Ensure that ', ' is used to preserve previous delimiter behavior.
|
| 68 |
+
DEFAULT_ACCEPT_ENCODING = ", ".join(
|
| 69 |
+
re.split(r",\s*", make_headers(accept_encoding=True)["accept-encoding"])
|
| 70 |
+
)
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
if sys.platform == "win32":
|
| 74 |
+
# provide a proxy_bypass version on Windows without DNS lookups
|
| 75 |
+
|
| 76 |
+
def proxy_bypass_registry(host):
|
| 77 |
+
try:
|
| 78 |
+
import winreg
|
| 79 |
+
except ImportError:
|
| 80 |
+
return False
|
| 81 |
+
|
| 82 |
+
try:
|
| 83 |
+
internetSettings = winreg.OpenKey(
|
| 84 |
+
winreg.HKEY_CURRENT_USER,
|
| 85 |
+
r"Software\Microsoft\Windows\CurrentVersion\Internet Settings",
|
| 86 |
+
)
|
| 87 |
+
# ProxyEnable could be REG_SZ or REG_DWORD, normalizing it
|
| 88 |
+
proxyEnable = int(winreg.QueryValueEx(internetSettings, "ProxyEnable")[0])
|
| 89 |
+
# ProxyOverride is almost always a string
|
| 90 |
+
proxyOverride = winreg.QueryValueEx(internetSettings, "ProxyOverride")[0]
|
| 91 |
+
except (OSError, ValueError):
|
| 92 |
+
return False
|
| 93 |
+
if not proxyEnable or not proxyOverride:
|
| 94 |
+
return False
|
| 95 |
+
|
| 96 |
+
# make a check value list from the registry entry: replace the
|
| 97 |
+
# '<local>' string by the localhost entry and the corresponding
|
| 98 |
+
# canonical entry.
|
| 99 |
+
proxyOverride = proxyOverride.split(";")
|
| 100 |
+
# filter out empty strings to avoid re.match return true in the following code.
|
| 101 |
+
proxyOverride = filter(None, proxyOverride)
|
| 102 |
+
# now check if we match one of the registry values.
|
| 103 |
+
for test in proxyOverride:
|
| 104 |
+
if test == "<local>":
|
| 105 |
+
if "." not in host:
|
| 106 |
+
return True
|
| 107 |
+
test = test.replace(".", r"\.") # mask dots
|
| 108 |
+
test = test.replace("*", r".*") # change glob sequence
|
| 109 |
+
test = test.replace("?", r".") # change glob char
|
| 110 |
+
if re.match(test, host, re.I):
|
| 111 |
+
return True
|
| 112 |
+
return False
|
| 113 |
+
|
| 114 |
+
def proxy_bypass(host): # noqa
|
| 115 |
+
"""Return True, if the host should be bypassed.
|
| 116 |
+
|
| 117 |
+
Checks proxy settings gathered from the environment, if specified,
|
| 118 |
+
or the registry.
|
| 119 |
+
"""
|
| 120 |
+
if getproxies_environment():
|
| 121 |
+
return proxy_bypass_environment(host)
|
| 122 |
+
else:
|
| 123 |
+
return proxy_bypass_registry(host)
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
def dict_to_sequence(d):
|
| 127 |
+
"""Returns an internal sequence dictionary update."""
|
| 128 |
+
|
| 129 |
+
if hasattr(d, "items"):
|
| 130 |
+
d = d.items()
|
| 131 |
+
|
| 132 |
+
return d
|
| 133 |
+
|
| 134 |
+
|
| 135 |
+
def super_len(o):
|
| 136 |
+
total_length = None
|
| 137 |
+
current_position = 0
|
| 138 |
+
|
| 139 |
+
if isinstance(o, str):
|
| 140 |
+
o = o.encode("utf-8")
|
| 141 |
+
|
| 142 |
+
if hasattr(o, "__len__"):
|
| 143 |
+
total_length = len(o)
|
| 144 |
+
|
| 145 |
+
elif hasattr(o, "len"):
|
| 146 |
+
total_length = o.len
|
| 147 |
+
|
| 148 |
+
elif hasattr(o, "fileno"):
|
| 149 |
+
try:
|
| 150 |
+
fileno = o.fileno()
|
| 151 |
+
except (io.UnsupportedOperation, AttributeError):
|
| 152 |
+
# AttributeError is a surprising exception, seeing as how we've just checked
|
| 153 |
+
# that `hasattr(o, 'fileno')`. It happens for objects obtained via
|
| 154 |
+
# `Tarfile.extractfile()`, per issue 5229.
|
| 155 |
+
pass
|
| 156 |
+
else:
|
| 157 |
+
total_length = os.fstat(fileno).st_size
|
| 158 |
+
|
| 159 |
+
# Having used fstat to determine the file length, we need to
|
| 160 |
+
# confirm that this file was opened up in binary mode.
|
| 161 |
+
if "b" not in o.mode:
|
| 162 |
+
warnings.warn(
|
| 163 |
+
(
|
| 164 |
+
"Requests has determined the content-length for this "
|
| 165 |
+
"request using the binary size of the file: however, the "
|
| 166 |
+
"file has been opened in text mode (i.e. without the 'b' "
|
| 167 |
+
"flag in the mode). This may lead to an incorrect "
|
| 168 |
+
"content-length. In Requests 3.0, support will be removed "
|
| 169 |
+
"for files in text mode."
|
| 170 |
+
),
|
| 171 |
+
FileModeWarning,
|
| 172 |
+
)
|
| 173 |
+
|
| 174 |
+
if hasattr(o, "tell"):
|
| 175 |
+
try:
|
| 176 |
+
current_position = o.tell()
|
| 177 |
+
except OSError:
|
| 178 |
+
# This can happen in some weird situations, such as when the file
|
| 179 |
+
# is actually a special file descriptor like stdin. In this
|
| 180 |
+
# instance, we don't know what the length is, so set it to zero and
|
| 181 |
+
# let requests chunk it instead.
|
| 182 |
+
if total_length is not None:
|
| 183 |
+
current_position = total_length
|
| 184 |
+
else:
|
| 185 |
+
if hasattr(o, "seek") and total_length is None:
|
| 186 |
+
# StringIO and BytesIO have seek but no usable fileno
|
| 187 |
+
try:
|
| 188 |
+
# seek to end of file
|
| 189 |
+
o.seek(0, 2)
|
| 190 |
+
total_length = o.tell()
|
| 191 |
+
|
| 192 |
+
# seek back to current position to support
|
| 193 |
+
# partially read file-like objects
|
| 194 |
+
o.seek(current_position or 0)
|
| 195 |
+
except OSError:
|
| 196 |
+
total_length = 0
|
| 197 |
+
|
| 198 |
+
if total_length is None:
|
| 199 |
+
total_length = 0
|
| 200 |
+
|
| 201 |
+
return max(0, total_length - current_position)
|
| 202 |
+
|
| 203 |
+
|
| 204 |
+
def get_netrc_auth(url, raise_errors=False):
|
| 205 |
+
"""Returns the Requests tuple auth for a given url from netrc."""
|
| 206 |
+
|
| 207 |
+
netrc_file = os.environ.get("NETRC")
|
| 208 |
+
if netrc_file is not None:
|
| 209 |
+
netrc_locations = (netrc_file,)
|
| 210 |
+
else:
|
| 211 |
+
netrc_locations = (f"~/{f}" for f in NETRC_FILES)
|
| 212 |
+
|
| 213 |
+
try:
|
| 214 |
+
from netrc import NetrcParseError, netrc
|
| 215 |
+
|
| 216 |
+
netrc_path = None
|
| 217 |
+
|
| 218 |
+
for f in netrc_locations:
|
| 219 |
+
try:
|
| 220 |
+
loc = os.path.expanduser(f)
|
| 221 |
+
except KeyError:
|
| 222 |
+
# os.path.expanduser can fail when $HOME is undefined and
|
| 223 |
+
# getpwuid fails. See https://bugs.python.org/issue20164 &
|
| 224 |
+
# https://github.com/psf/requests/issues/1846
|
| 225 |
+
return
|
| 226 |
+
|
| 227 |
+
if os.path.exists(loc):
|
| 228 |
+
netrc_path = loc
|
| 229 |
+
break
|
| 230 |
+
|
| 231 |
+
# Abort early if there isn't one.
|
| 232 |
+
if netrc_path is None:
|
| 233 |
+
return
|
| 234 |
+
|
| 235 |
+
ri = urlparse(url)
|
| 236 |
+
|
| 237 |
+
# Strip port numbers from netloc. This weird `if...encode`` dance is
|
| 238 |
+
# used for Python 3.2, which doesn't support unicode literals.
|
| 239 |
+
splitstr = b":"
|
| 240 |
+
if isinstance(url, str):
|
| 241 |
+
splitstr = splitstr.decode("ascii")
|
| 242 |
+
host = ri.netloc.split(splitstr)[0]
|
| 243 |
+
|
| 244 |
+
try:
|
| 245 |
+
_netrc = netrc(netrc_path).authenticators(host)
|
| 246 |
+
if _netrc:
|
| 247 |
+
# Return with login / password
|
| 248 |
+
login_i = 0 if _netrc[0] else 1
|
| 249 |
+
return (_netrc[login_i], _netrc[2])
|
| 250 |
+
except (NetrcParseError, OSError):
|
| 251 |
+
# If there was a parsing error or a permissions issue reading the file,
|
| 252 |
+
# we'll just skip netrc auth unless explicitly asked to raise errors.
|
| 253 |
+
if raise_errors:
|
| 254 |
+
raise
|
| 255 |
+
|
| 256 |
+
# App Engine hackiness.
|
| 257 |
+
except (ImportError, AttributeError):
|
| 258 |
+
pass
|
| 259 |
+
|
| 260 |
+
|
| 261 |
+
def guess_filename(obj):
|
| 262 |
+
"""Tries to guess the filename of the given object."""
|
| 263 |
+
name = getattr(obj, "name", None)
|
| 264 |
+
if name and isinstance(name, basestring) and name[0] != "<" and name[-1] != ">":
|
| 265 |
+
return os.path.basename(name)
|
| 266 |
+
|
| 267 |
+
|
| 268 |
+
def extract_zipped_paths(path):
|
| 269 |
+
"""Replace nonexistent paths that look like they refer to a member of a zip
|
| 270 |
+
archive with the location of an extracted copy of the target, or else
|
| 271 |
+
just return the provided path unchanged.
|
| 272 |
+
"""
|
| 273 |
+
if os.path.exists(path):
|
| 274 |
+
# this is already a valid path, no need to do anything further
|
| 275 |
+
return path
|
| 276 |
+
|
| 277 |
+
# find the first valid part of the provided path and treat that as a zip archive
|
| 278 |
+
# assume the rest of the path is the name of a member in the archive
|
| 279 |
+
archive, member = os.path.split(path)
|
| 280 |
+
while archive and not os.path.exists(archive):
|
| 281 |
+
archive, prefix = os.path.split(archive)
|
| 282 |
+
if not prefix:
|
| 283 |
+
# If we don't check for an empty prefix after the split (in other words, archive remains unchanged after the split),
|
| 284 |
+
# we _can_ end up in an infinite loop on a rare corner case affecting a small number of users
|
| 285 |
+
break
|
| 286 |
+
member = "/".join([prefix, member])
|
| 287 |
+
|
| 288 |
+
if not zipfile.is_zipfile(archive):
|
| 289 |
+
return path
|
| 290 |
+
|
| 291 |
+
zip_file = zipfile.ZipFile(archive)
|
| 292 |
+
if member not in zip_file.namelist():
|
| 293 |
+
return path
|
| 294 |
+
|
| 295 |
+
# we have a valid zip archive and a valid member of that archive
|
| 296 |
+
tmp = tempfile.gettempdir()
|
| 297 |
+
extracted_path = os.path.join(tmp, member.split("/")[-1])
|
| 298 |
+
if not os.path.exists(extracted_path):
|
| 299 |
+
# use read + write to avoid the creating nested folders, we only want the file, avoids mkdir racing condition
|
| 300 |
+
with atomic_open(extracted_path) as file_handler:
|
| 301 |
+
file_handler.write(zip_file.read(member))
|
| 302 |
+
return extracted_path
|
| 303 |
+
|
| 304 |
+
|
| 305 |
+
@contextlib.contextmanager
|
| 306 |
+
def atomic_open(filename):
|
| 307 |
+
"""Write a file to the disk in an atomic fashion"""
|
| 308 |
+
tmp_descriptor, tmp_name = tempfile.mkstemp(dir=os.path.dirname(filename))
|
| 309 |
+
try:
|
| 310 |
+
with os.fdopen(tmp_descriptor, "wb") as tmp_handler:
|
| 311 |
+
yield tmp_handler
|
| 312 |
+
os.replace(tmp_name, filename)
|
| 313 |
+
except BaseException:
|
| 314 |
+
os.remove(tmp_name)
|
| 315 |
+
raise
|
| 316 |
+
|
| 317 |
+
|
| 318 |
+
def from_key_val_list(value):
|
| 319 |
+
"""Take an object and test to see if it can be represented as a
|
| 320 |
+
dictionary. Unless it can not be represented as such, return an
|
| 321 |
+
OrderedDict, e.g.,
|
| 322 |
+
|
| 323 |
+
::
|
| 324 |
+
|
| 325 |
+
>>> from_key_val_list([('key', 'val')])
|
| 326 |
+
OrderedDict([('key', 'val')])
|
| 327 |
+
>>> from_key_val_list('string')
|
| 328 |
+
Traceback (most recent call last):
|
| 329 |
+
...
|
| 330 |
+
ValueError: cannot encode objects that are not 2-tuples
|
| 331 |
+
>>> from_key_val_list({'key': 'val'})
|
| 332 |
+
OrderedDict([('key', 'val')])
|
| 333 |
+
|
| 334 |
+
:rtype: OrderedDict
|
| 335 |
+
"""
|
| 336 |
+
if value is None:
|
| 337 |
+
return None
|
| 338 |
+
|
| 339 |
+
if isinstance(value, (str, bytes, bool, int)):
|
| 340 |
+
raise ValueError("cannot encode objects that are not 2-tuples")
|
| 341 |
+
|
| 342 |
+
return OrderedDict(value)
|
| 343 |
+
|
| 344 |
+
|
| 345 |
+
def to_key_val_list(value):
|
| 346 |
+
"""Take an object and test to see if it can be represented as a
|
| 347 |
+
dictionary. If it can be, return a list of tuples, e.g.,
|
| 348 |
+
|
| 349 |
+
::
|
| 350 |
+
|
| 351 |
+
>>> to_key_val_list([('key', 'val')])
|
| 352 |
+
[('key', 'val')]
|
| 353 |
+
>>> to_key_val_list({'key': 'val'})
|
| 354 |
+
[('key', 'val')]
|
| 355 |
+
>>> to_key_val_list('string')
|
| 356 |
+
Traceback (most recent call last):
|
| 357 |
+
...
|
| 358 |
+
ValueError: cannot encode objects that are not 2-tuples
|
| 359 |
+
|
| 360 |
+
:rtype: list
|
| 361 |
+
"""
|
| 362 |
+
if value is None:
|
| 363 |
+
return None
|
| 364 |
+
|
| 365 |
+
if isinstance(value, (str, bytes, bool, int)):
|
| 366 |
+
raise ValueError("cannot encode objects that are not 2-tuples")
|
| 367 |
+
|
| 368 |
+
if isinstance(value, Mapping):
|
| 369 |
+
value = value.items()
|
| 370 |
+
|
| 371 |
+
return list(value)
|
| 372 |
+
|
| 373 |
+
|
| 374 |
+
# From mitsuhiko/werkzeug (used with permission).
|
| 375 |
+
def parse_list_header(value):
|
| 376 |
+
"""Parse lists as described by RFC 2068 Section 2.
|
| 377 |
+
|
| 378 |
+
In particular, parse comma-separated lists where the elements of
|
| 379 |
+
the list may include quoted-strings. A quoted-string could
|
| 380 |
+
contain a comma. A non-quoted string could have quotes in the
|
| 381 |
+
middle. Quotes are removed automatically after parsing.
|
| 382 |
+
|
| 383 |
+
It basically works like :func:`parse_set_header` just that items
|
| 384 |
+
may appear multiple times and case sensitivity is preserved.
|
| 385 |
+
|
| 386 |
+
The return value is a standard :class:`list`:
|
| 387 |
+
|
| 388 |
+
>>> parse_list_header('token, "quoted value"')
|
| 389 |
+
['token', 'quoted value']
|
| 390 |
+
|
| 391 |
+
To create a header from the :class:`list` again, use the
|
| 392 |
+
:func:`dump_header` function.
|
| 393 |
+
|
| 394 |
+
:param value: a string with a list header.
|
| 395 |
+
:return: :class:`list`
|
| 396 |
+
:rtype: list
|
| 397 |
+
"""
|
| 398 |
+
result = []
|
| 399 |
+
for item in _parse_list_header(value):
|
| 400 |
+
if item[:1] == item[-1:] == '"':
|
| 401 |
+
item = unquote_header_value(item[1:-1])
|
| 402 |
+
result.append(item)
|
| 403 |
+
return result
|
| 404 |
+
|
| 405 |
+
|
| 406 |
+
# From mitsuhiko/werkzeug (used with permission).
|
| 407 |
+
def parse_dict_header(value):
|
| 408 |
+
"""Parse lists of key, value pairs as described by RFC 2068 Section 2 and
|
| 409 |
+
convert them into a python dict:
|
| 410 |
+
|
| 411 |
+
>>> d = parse_dict_header('foo="is a fish", bar="as well"')
|
| 412 |
+
>>> type(d) is dict
|
| 413 |
+
True
|
| 414 |
+
>>> sorted(d.items())
|
| 415 |
+
[('bar', 'as well'), ('foo', 'is a fish')]
|
| 416 |
+
|
| 417 |
+
If there is no value for a key it will be `None`:
|
| 418 |
+
|
| 419 |
+
>>> parse_dict_header('key_without_value')
|
| 420 |
+
{'key_without_value': None}
|
| 421 |
+
|
| 422 |
+
To create a header from the :class:`dict` again, use the
|
| 423 |
+
:func:`dump_header` function.
|
| 424 |
+
|
| 425 |
+
:param value: a string with a dict header.
|
| 426 |
+
:return: :class:`dict`
|
| 427 |
+
:rtype: dict
|
| 428 |
+
"""
|
| 429 |
+
result = {}
|
| 430 |
+
for item in _parse_list_header(value):
|
| 431 |
+
if "=" not in item:
|
| 432 |
+
result[item] = None
|
| 433 |
+
continue
|
| 434 |
+
name, value = item.split("=", 1)
|
| 435 |
+
if value[:1] == value[-1:] == '"':
|
| 436 |
+
value = unquote_header_value(value[1:-1])
|
| 437 |
+
result[name] = value
|
| 438 |
+
return result
|
| 439 |
+
|
| 440 |
+
|
| 441 |
+
# From mitsuhiko/werkzeug (used with permission).
|
| 442 |
+
def unquote_header_value(value, is_filename=False):
|
| 443 |
+
r"""Unquotes a header value. (Reversal of :func:`quote_header_value`).
|
| 444 |
+
This does not use the real unquoting but what browsers are actually
|
| 445 |
+
using for quoting.
|
| 446 |
+
|
| 447 |
+
:param value: the header value to unquote.
|
| 448 |
+
:rtype: str
|
| 449 |
+
"""
|
| 450 |
+
if value and value[0] == value[-1] == '"':
|
| 451 |
+
# this is not the real unquoting, but fixing this so that the
|
| 452 |
+
# RFC is met will result in bugs with internet explorer and
|
| 453 |
+
# probably some other browsers as well. IE for example is
|
| 454 |
+
# uploading files with "C:\foo\bar.txt" as filename
|
| 455 |
+
value = value[1:-1]
|
| 456 |
+
|
| 457 |
+
# if this is a filename and the starting characters look like
|
| 458 |
+
# a UNC path, then just return the value without quotes. Using the
|
| 459 |
+
# replace sequence below on a UNC path has the effect of turning
|
| 460 |
+
# the leading double slash into a single slash and then
|
| 461 |
+
# _fix_ie_filename() doesn't work correctly. See #458.
|
| 462 |
+
if not is_filename or value[:2] != "\\\\":
|
| 463 |
+
return value.replace("\\\\", "\\").replace('\\"', '"')
|
| 464 |
+
return value
|
| 465 |
+
|
| 466 |
+
|
| 467 |
+
def dict_from_cookiejar(cj):
|
| 468 |
+
"""Returns a key/value dictionary from a CookieJar.
|
| 469 |
+
|
| 470 |
+
:param cj: CookieJar object to extract cookies from.
|
| 471 |
+
:rtype: dict
|
| 472 |
+
"""
|
| 473 |
+
|
| 474 |
+
cookie_dict = {cookie.name: cookie.value for cookie in cj}
|
| 475 |
+
return cookie_dict
|
| 476 |
+
|
| 477 |
+
|
| 478 |
+
def add_dict_to_cookiejar(cj, cookie_dict):
|
| 479 |
+
"""Returns a CookieJar from a key/value dictionary.
|
| 480 |
+
|
| 481 |
+
:param cj: CookieJar to insert cookies into.
|
| 482 |
+
:param cookie_dict: Dict of key/values to insert into CookieJar.
|
| 483 |
+
:rtype: CookieJar
|
| 484 |
+
"""
|
| 485 |
+
|
| 486 |
+
return cookiejar_from_dict(cookie_dict, cj)
|
| 487 |
+
|
| 488 |
+
|
| 489 |
+
def get_encodings_from_content(content):
|
| 490 |
+
"""Returns encodings from given content string.
|
| 491 |
+
|
| 492 |
+
:param content: bytestring to extract encodings from.
|
| 493 |
+
"""
|
| 494 |
+
warnings.warn(
|
| 495 |
+
(
|
| 496 |
+
"In requests 3.0, get_encodings_from_content will be removed. For "
|
| 497 |
+
"more information, please see the discussion on issue #2266. (This"
|
| 498 |
+
" warning should only appear once.)"
|
| 499 |
+
),
|
| 500 |
+
DeprecationWarning,
|
| 501 |
+
)
|
| 502 |
+
|
| 503 |
+
charset_re = re.compile(r'<meta.*?charset=["\']*(.+?)["\'>]', flags=re.I)
|
| 504 |
+
pragma_re = re.compile(r'<meta.*?content=["\']*;?charset=(.+?)["\'>]', flags=re.I)
|
| 505 |
+
xml_re = re.compile(r'^<\?xml.*?encoding=["\']*(.+?)["\'>]')
|
| 506 |
+
|
| 507 |
+
return (
|
| 508 |
+
charset_re.findall(content)
|
| 509 |
+
+ pragma_re.findall(content)
|
| 510 |
+
+ xml_re.findall(content)
|
| 511 |
+
)
|
| 512 |
+
|
| 513 |
+
|
| 514 |
+
def _parse_content_type_header(header):
|
| 515 |
+
"""Returns content type and parameters from given header
|
| 516 |
+
|
| 517 |
+
:param header: string
|
| 518 |
+
:return: tuple containing content type and dictionary of
|
| 519 |
+
parameters
|
| 520 |
+
"""
|
| 521 |
+
|
| 522 |
+
tokens = header.split(";")
|
| 523 |
+
content_type, params = tokens[0].strip(), tokens[1:]
|
| 524 |
+
params_dict = {}
|
| 525 |
+
items_to_strip = "\"' "
|
| 526 |
+
|
| 527 |
+
for param in params:
|
| 528 |
+
param = param.strip()
|
| 529 |
+
if param:
|
| 530 |
+
key, value = param, True
|
| 531 |
+
index_of_equals = param.find("=")
|
| 532 |
+
if index_of_equals != -1:
|
| 533 |
+
key = param[:index_of_equals].strip(items_to_strip)
|
| 534 |
+
value = param[index_of_equals + 1 :].strip(items_to_strip)
|
| 535 |
+
params_dict[key.lower()] = value
|
| 536 |
+
return content_type, params_dict
|
| 537 |
+
|
| 538 |
+
|
| 539 |
+
def get_encoding_from_headers(headers):
|
| 540 |
+
"""Returns encodings from given HTTP Header Dict.
|
| 541 |
+
|
| 542 |
+
:param headers: dictionary to extract encoding from.
|
| 543 |
+
:rtype: str
|
| 544 |
+
"""
|
| 545 |
+
|
| 546 |
+
content_type = headers.get("content-type")
|
| 547 |
+
|
| 548 |
+
if not content_type:
|
| 549 |
+
return None
|
| 550 |
+
|
| 551 |
+
content_type, params = _parse_content_type_header(content_type)
|
| 552 |
+
|
| 553 |
+
if "charset" in params:
|
| 554 |
+
return params["charset"].strip("'\"")
|
| 555 |
+
|
| 556 |
+
if "text" in content_type:
|
| 557 |
+
return "ISO-8859-1"
|
| 558 |
+
|
| 559 |
+
if "application/json" in content_type:
|
| 560 |
+
# Assume UTF-8 based on RFC 4627: https://www.ietf.org/rfc/rfc4627.txt since the charset was unset
|
| 561 |
+
return "utf-8"
|
| 562 |
+
|
| 563 |
+
|
| 564 |
+
def stream_decode_response_unicode(iterator, r):
|
| 565 |
+
"""Stream decodes an iterator."""
|
| 566 |
+
|
| 567 |
+
if r.encoding is None:
|
| 568 |
+
yield from iterator
|
| 569 |
+
return
|
| 570 |
+
|
| 571 |
+
decoder = codecs.getincrementaldecoder(r.encoding)(errors="replace")
|
| 572 |
+
for chunk in iterator:
|
| 573 |
+
rv = decoder.decode(chunk)
|
| 574 |
+
if rv:
|
| 575 |
+
yield rv
|
| 576 |
+
rv = decoder.decode(b"", final=True)
|
| 577 |
+
if rv:
|
| 578 |
+
yield rv
|
| 579 |
+
|
| 580 |
+
|
| 581 |
+
def iter_slices(string, slice_length):
|
| 582 |
+
"""Iterate over slices of a string."""
|
| 583 |
+
pos = 0
|
| 584 |
+
if slice_length is None or slice_length <= 0:
|
| 585 |
+
slice_length = len(string)
|
| 586 |
+
while pos < len(string):
|
| 587 |
+
yield string[pos : pos + slice_length]
|
| 588 |
+
pos += slice_length
|
| 589 |
+
|
| 590 |
+
|
| 591 |
+
def get_unicode_from_response(r):
|
| 592 |
+
"""Returns the requested content back in unicode.
|
| 593 |
+
|
| 594 |
+
:param r: Response object to get unicode content from.
|
| 595 |
+
|
| 596 |
+
Tried:
|
| 597 |
+
|
| 598 |
+
1. charset from content-type
|
| 599 |
+
2. fall back and replace all unicode characters
|
| 600 |
+
|
| 601 |
+
:rtype: str
|
| 602 |
+
"""
|
| 603 |
+
warnings.warn(
|
| 604 |
+
(
|
| 605 |
+
"In requests 3.0, get_unicode_from_response will be removed. For "
|
| 606 |
+
"more information, please see the discussion on issue #2266. (This"
|
| 607 |
+
" warning should only appear once.)"
|
| 608 |
+
),
|
| 609 |
+
DeprecationWarning,
|
| 610 |
+
)
|
| 611 |
+
|
| 612 |
+
tried_encodings = []
|
| 613 |
+
|
| 614 |
+
# Try charset from content-type
|
| 615 |
+
encoding = get_encoding_from_headers(r.headers)
|
| 616 |
+
|
| 617 |
+
if encoding:
|
| 618 |
+
try:
|
| 619 |
+
return str(r.content, encoding)
|
| 620 |
+
except UnicodeError:
|
| 621 |
+
tried_encodings.append(encoding)
|
| 622 |
+
|
| 623 |
+
# Fall back:
|
| 624 |
+
try:
|
| 625 |
+
return str(r.content, encoding, errors="replace")
|
| 626 |
+
except TypeError:
|
| 627 |
+
return r.content
|
| 628 |
+
|
| 629 |
+
|
| 630 |
+
# The unreserved URI characters (RFC 3986)
|
| 631 |
+
UNRESERVED_SET = frozenset(
|
| 632 |
+
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz" + "0123456789-._~"
|
| 633 |
+
)
|
| 634 |
+
|
| 635 |
+
|
| 636 |
+
def unquote_unreserved(uri):
|
| 637 |
+
"""Un-escape any percent-escape sequences in a URI that are unreserved
|
| 638 |
+
characters. This leaves all reserved, illegal and non-ASCII bytes encoded.
|
| 639 |
+
|
| 640 |
+
:rtype: str
|
| 641 |
+
"""
|
| 642 |
+
parts = uri.split("%")
|
| 643 |
+
for i in range(1, len(parts)):
|
| 644 |
+
h = parts[i][0:2]
|
| 645 |
+
if len(h) == 2 and h.isalnum():
|
| 646 |
+
try:
|
| 647 |
+
c = chr(int(h, 16))
|
| 648 |
+
except ValueError:
|
| 649 |
+
raise InvalidURL(f"Invalid percent-escape sequence: '{h}'")
|
| 650 |
+
|
| 651 |
+
if c in UNRESERVED_SET:
|
| 652 |
+
parts[i] = c + parts[i][2:]
|
| 653 |
+
else:
|
| 654 |
+
parts[i] = f"%{parts[i]}"
|
| 655 |
+
else:
|
| 656 |
+
parts[i] = f"%{parts[i]}"
|
| 657 |
+
return "".join(parts)
|
| 658 |
+
|
| 659 |
+
|
| 660 |
+
def requote_uri(uri):
|
| 661 |
+
"""Re-quote the given URI.
|
| 662 |
+
|
| 663 |
+
This function passes the given URI through an unquote/quote cycle to
|
| 664 |
+
ensure that it is fully and consistently quoted.
|
| 665 |
+
|
| 666 |
+
:rtype: str
|
| 667 |
+
"""
|
| 668 |
+
safe_with_percent = "!#$%&'()*+,/:;=?@[]~"
|
| 669 |
+
safe_without_percent = "!#$&'()*+,/:;=?@[]~"
|
| 670 |
+
try:
|
| 671 |
+
# Unquote only the unreserved characters
|
| 672 |
+
# Then quote only illegal characters (do not quote reserved,
|
| 673 |
+
# unreserved, or '%')
|
| 674 |
+
return quote(unquote_unreserved(uri), safe=safe_with_percent)
|
| 675 |
+
except InvalidURL:
|
| 676 |
+
# We couldn't unquote the given URI, so let's try quoting it, but
|
| 677 |
+
# there may be unquoted '%'s in the URI. We need to make sure they're
|
| 678 |
+
# properly quoted so they do not cause issues elsewhere.
|
| 679 |
+
return quote(uri, safe=safe_without_percent)
|
| 680 |
+
|
| 681 |
+
|
| 682 |
+
def address_in_network(ip, net):
|
| 683 |
+
"""This function allows you to check if an IP belongs to a network subnet
|
| 684 |
+
|
| 685 |
+
Example: returns True if ip = 192.168.1.1 and net = 192.168.1.0/24
|
| 686 |
+
returns False if ip = 192.168.1.1 and net = 192.168.100.0/24
|
| 687 |
+
|
| 688 |
+
:rtype: bool
|
| 689 |
+
"""
|
| 690 |
+
ipaddr = struct.unpack("=L", socket.inet_aton(ip))[0]
|
| 691 |
+
netaddr, bits = net.split("/")
|
| 692 |
+
netmask = struct.unpack("=L", socket.inet_aton(dotted_netmask(int(bits))))[0]
|
| 693 |
+
network = struct.unpack("=L", socket.inet_aton(netaddr))[0] & netmask
|
| 694 |
+
return (ipaddr & netmask) == (network & netmask)
|
| 695 |
+
|
| 696 |
+
|
| 697 |
+
def dotted_netmask(mask):
|
| 698 |
+
"""Converts mask from /xx format to xxx.xxx.xxx.xxx
|
| 699 |
+
|
| 700 |
+
Example: if mask is 24 function returns 255.255.255.0
|
| 701 |
+
|
| 702 |
+
:rtype: str
|
| 703 |
+
"""
|
| 704 |
+
bits = 0xFFFFFFFF ^ (1 << 32 - mask) - 1
|
| 705 |
+
return socket.inet_ntoa(struct.pack(">I", bits))
|
| 706 |
+
|
| 707 |
+
|
| 708 |
+
def is_ipv4_address(string_ip):
|
| 709 |
+
"""
|
| 710 |
+
:rtype: bool
|
| 711 |
+
"""
|
| 712 |
+
try:
|
| 713 |
+
socket.inet_aton(string_ip)
|
| 714 |
+
except OSError:
|
| 715 |
+
return False
|
| 716 |
+
return True
|
| 717 |
+
|
| 718 |
+
|
| 719 |
+
def is_valid_cidr(string_network):
|
| 720 |
+
"""
|
| 721 |
+
Very simple check of the cidr format in no_proxy variable.
|
| 722 |
+
|
| 723 |
+
:rtype: bool
|
| 724 |
+
"""
|
| 725 |
+
if string_network.count("/") == 1:
|
| 726 |
+
try:
|
| 727 |
+
mask = int(string_network.split("/")[1])
|
| 728 |
+
except ValueError:
|
| 729 |
+
return False
|
| 730 |
+
|
| 731 |
+
if mask < 1 or mask > 32:
|
| 732 |
+
return False
|
| 733 |
+
|
| 734 |
+
try:
|
| 735 |
+
socket.inet_aton(string_network.split("/")[0])
|
| 736 |
+
except OSError:
|
| 737 |
+
return False
|
| 738 |
+
else:
|
| 739 |
+
return False
|
| 740 |
+
return True
|
| 741 |
+
|
| 742 |
+
|
| 743 |
+
@contextlib.contextmanager
|
| 744 |
+
def set_environ(env_name, value):
|
| 745 |
+
"""Set the environment variable 'env_name' to 'value'
|
| 746 |
+
|
| 747 |
+
Save previous value, yield, and then restore the previous value stored in
|
| 748 |
+
the environment variable 'env_name'.
|
| 749 |
+
|
| 750 |
+
If 'value' is None, do nothing"""
|
| 751 |
+
value_changed = value is not None
|
| 752 |
+
if value_changed:
|
| 753 |
+
old_value = os.environ.get(env_name)
|
| 754 |
+
os.environ[env_name] = value
|
| 755 |
+
try:
|
| 756 |
+
yield
|
| 757 |
+
finally:
|
| 758 |
+
if value_changed:
|
| 759 |
+
if old_value is None:
|
| 760 |
+
del os.environ[env_name]
|
| 761 |
+
else:
|
| 762 |
+
os.environ[env_name] = old_value
|
| 763 |
+
|
| 764 |
+
|
| 765 |
+
def should_bypass_proxies(url, no_proxy):
|
| 766 |
+
"""
|
| 767 |
+
Returns whether we should bypass proxies or not.
|
| 768 |
+
|
| 769 |
+
:rtype: bool
|
| 770 |
+
"""
|
| 771 |
+
|
| 772 |
+
# Prioritize lowercase environment variables over uppercase
|
| 773 |
+
# to keep a consistent behaviour with other http projects (curl, wget).
|
| 774 |
+
def get_proxy(key):
|
| 775 |
+
return os.environ.get(key) or os.environ.get(key.upper())
|
| 776 |
+
|
| 777 |
+
# First check whether no_proxy is defined. If it is, check that the URL
|
| 778 |
+
# we're getting isn't in the no_proxy list.
|
| 779 |
+
no_proxy_arg = no_proxy
|
| 780 |
+
if no_proxy is None:
|
| 781 |
+
no_proxy = get_proxy("no_proxy")
|
| 782 |
+
parsed = urlparse(url)
|
| 783 |
+
|
| 784 |
+
if parsed.hostname is None:
|
| 785 |
+
# URLs don't always have hostnames, e.g. file:/// urls.
|
| 786 |
+
return True
|
| 787 |
+
|
| 788 |
+
if no_proxy:
|
| 789 |
+
# We need to check whether we match here. We need to see if we match
|
| 790 |
+
# the end of the hostname, both with and without the port.
|
| 791 |
+
no_proxy = (host for host in no_proxy.replace(" ", "").split(",") if host)
|
| 792 |
+
|
| 793 |
+
if is_ipv4_address(parsed.hostname):
|
| 794 |
+
for proxy_ip in no_proxy:
|
| 795 |
+
if is_valid_cidr(proxy_ip):
|
| 796 |
+
if address_in_network(parsed.hostname, proxy_ip):
|
| 797 |
+
return True
|
| 798 |
+
elif parsed.hostname == proxy_ip:
|
| 799 |
+
# If no_proxy ip was defined in plain IP notation instead of cidr notation &
|
| 800 |
+
# matches the IP of the index
|
| 801 |
+
return True
|
| 802 |
+
else:
|
| 803 |
+
host_with_port = parsed.hostname
|
| 804 |
+
if parsed.port:
|
| 805 |
+
host_with_port += f":{parsed.port}"
|
| 806 |
+
|
| 807 |
+
for host in no_proxy:
|
| 808 |
+
if parsed.hostname.endswith(host) or host_with_port.endswith(host):
|
| 809 |
+
# The URL does match something in no_proxy, so we don't want
|
| 810 |
+
# to apply the proxies on this URL.
|
| 811 |
+
return True
|
| 812 |
+
|
| 813 |
+
with set_environ("no_proxy", no_proxy_arg):
|
| 814 |
+
# parsed.hostname can be `None` in cases such as a file URI.
|
| 815 |
+
try:
|
| 816 |
+
bypass = proxy_bypass(parsed.hostname)
|
| 817 |
+
except (TypeError, socket.gaierror):
|
| 818 |
+
bypass = False
|
| 819 |
+
|
| 820 |
+
if bypass:
|
| 821 |
+
return True
|
| 822 |
+
|
| 823 |
+
return False
|
| 824 |
+
|
| 825 |
+
|
| 826 |
+
def get_environ_proxies(url, no_proxy=None):
|
| 827 |
+
"""
|
| 828 |
+
Return a dict of environment proxies.
|
| 829 |
+
|
| 830 |
+
:rtype: dict
|
| 831 |
+
"""
|
| 832 |
+
if should_bypass_proxies(url, no_proxy=no_proxy):
|
| 833 |
+
return {}
|
| 834 |
+
else:
|
| 835 |
+
return getproxies()
|
| 836 |
+
|
| 837 |
+
|
| 838 |
+
def select_proxy(url, proxies):
|
| 839 |
+
"""Select a proxy for the url, if applicable.
|
| 840 |
+
|
| 841 |
+
:param url: The url being for the request
|
| 842 |
+
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
|
| 843 |
+
"""
|
| 844 |
+
proxies = proxies or {}
|
| 845 |
+
urlparts = urlparse(url)
|
| 846 |
+
if urlparts.hostname is None:
|
| 847 |
+
return proxies.get(urlparts.scheme, proxies.get("all"))
|
| 848 |
+
|
| 849 |
+
proxy_keys = [
|
| 850 |
+
urlparts.scheme + "://" + urlparts.hostname,
|
| 851 |
+
urlparts.scheme,
|
| 852 |
+
"all://" + urlparts.hostname,
|
| 853 |
+
"all",
|
| 854 |
+
]
|
| 855 |
+
proxy = None
|
| 856 |
+
for proxy_key in proxy_keys:
|
| 857 |
+
if proxy_key in proxies:
|
| 858 |
+
proxy = proxies[proxy_key]
|
| 859 |
+
break
|
| 860 |
+
|
| 861 |
+
return proxy
|
| 862 |
+
|
| 863 |
+
|
| 864 |
+
def resolve_proxies(request, proxies, trust_env=True):
|
| 865 |
+
"""This method takes proxy information from a request and configuration
|
| 866 |
+
input to resolve a mapping of target proxies. This will consider settings
|
| 867 |
+
such as NO_PROXY to strip proxy configurations.
|
| 868 |
+
|
| 869 |
+
:param request: Request or PreparedRequest
|
| 870 |
+
:param proxies: A dictionary of schemes or schemes and hosts to proxy URLs
|
| 871 |
+
:param trust_env: Boolean declaring whether to trust environment configs
|
| 872 |
+
|
| 873 |
+
:rtype: dict
|
| 874 |
+
"""
|
| 875 |
+
proxies = proxies if proxies is not None else {}
|
| 876 |
+
url = request.url
|
| 877 |
+
scheme = urlparse(url).scheme
|
| 878 |
+
no_proxy = proxies.get("no_proxy")
|
| 879 |
+
new_proxies = proxies.copy()
|
| 880 |
+
|
| 881 |
+
if trust_env and not should_bypass_proxies(url, no_proxy=no_proxy):
|
| 882 |
+
environ_proxies = get_environ_proxies(url, no_proxy=no_proxy)
|
| 883 |
+
|
| 884 |
+
proxy = environ_proxies.get(scheme, environ_proxies.get("all"))
|
| 885 |
+
|
| 886 |
+
if proxy:
|
| 887 |
+
new_proxies.setdefault(scheme, proxy)
|
| 888 |
+
return new_proxies
|
| 889 |
+
|
| 890 |
+
|
| 891 |
+
def default_user_agent(name="python-requests"):
|
| 892 |
+
"""
|
| 893 |
+
Return a string representing the default user agent.
|
| 894 |
+
|
| 895 |
+
:rtype: str
|
| 896 |
+
"""
|
| 897 |
+
return f"{name}/{__version__}"
|
| 898 |
+
|
| 899 |
+
|
| 900 |
+
def default_headers():
|
| 901 |
+
"""
|
| 902 |
+
:rtype: requests.structures.CaseInsensitiveDict
|
| 903 |
+
"""
|
| 904 |
+
return CaseInsensitiveDict(
|
| 905 |
+
{
|
| 906 |
+
"User-Agent": default_user_agent(),
|
| 907 |
+
"Accept-Encoding": DEFAULT_ACCEPT_ENCODING,
|
| 908 |
+
"Accept": "*/*",
|
| 909 |
+
"Connection": "keep-alive",
|
| 910 |
+
}
|
| 911 |
+
)
|
| 912 |
+
|
| 913 |
+
|
| 914 |
+
def parse_header_links(value):
|
| 915 |
+
"""Return a list of parsed link headers proxies.
|
| 916 |
+
|
| 917 |
+
i.e. Link: <http:/.../front.jpeg>; rel=front; type="image/jpeg",<http://.../back.jpeg>; rel=back;type="image/jpeg"
|
| 918 |
+
|
| 919 |
+
:rtype: list
|
| 920 |
+
"""
|
| 921 |
+
|
| 922 |
+
links = []
|
| 923 |
+
|
| 924 |
+
replace_chars = " '\""
|
| 925 |
+
|
| 926 |
+
value = value.strip(replace_chars)
|
| 927 |
+
if not value:
|
| 928 |
+
return links
|
| 929 |
+
|
| 930 |
+
for val in re.split(", *<", value):
|
| 931 |
+
try:
|
| 932 |
+
url, params = val.split(";", 1)
|
| 933 |
+
except ValueError:
|
| 934 |
+
url, params = val, ""
|
| 935 |
+
|
| 936 |
+
link = {"url": url.strip("<> '\"")}
|
| 937 |
+
|
| 938 |
+
for param in params.split(";"):
|
| 939 |
+
try:
|
| 940 |
+
key, value = param.split("=")
|
| 941 |
+
except ValueError:
|
| 942 |
+
break
|
| 943 |
+
|
| 944 |
+
link[key.strip(replace_chars)] = value.strip(replace_chars)
|
| 945 |
+
|
| 946 |
+
links.append(link)
|
| 947 |
+
|
| 948 |
+
return links
|
| 949 |
+
|
| 950 |
+
|
| 951 |
+
# Null bytes; no need to recreate these on each call to guess_json_utf
|
| 952 |
+
_null = "\x00".encode("ascii") # encoding to ASCII for Python 3
|
| 953 |
+
_null2 = _null * 2
|
| 954 |
+
_null3 = _null * 3
|
| 955 |
+
|
| 956 |
+
|
| 957 |
+
def guess_json_utf(data):
|
| 958 |
+
"""
|
| 959 |
+
:rtype: str
|
| 960 |
+
"""
|
| 961 |
+
# JSON always starts with two ASCII characters, so detection is as
|
| 962 |
+
# easy as counting the nulls and from their location and count
|
| 963 |
+
# determine the encoding. Also detect a BOM, if present.
|
| 964 |
+
sample = data[:4]
|
| 965 |
+
if sample in (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE):
|
| 966 |
+
return "utf-32" # BOM included
|
| 967 |
+
if sample[:3] == codecs.BOM_UTF8:
|
| 968 |
+
return "utf-8-sig" # BOM included, MS style (discouraged)
|
| 969 |
+
if sample[:2] in (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE):
|
| 970 |
+
return "utf-16" # BOM included
|
| 971 |
+
nullcount = sample.count(_null)
|
| 972 |
+
if nullcount == 0:
|
| 973 |
+
return "utf-8"
|
| 974 |
+
if nullcount == 2:
|
| 975 |
+
if sample[::2] == _null2: # 1st and 3rd are null
|
| 976 |
+
return "utf-16-be"
|
| 977 |
+
if sample[1::2] == _null2: # 2nd and 4th are null
|
| 978 |
+
return "utf-16-le"
|
| 979 |
+
# Did not detect 2 valid UTF-16 ascii-range characters
|
| 980 |
+
if nullcount == 3:
|
| 981 |
+
if sample[:3] == _null3:
|
| 982 |
+
return "utf-32-be"
|
| 983 |
+
if sample[1:] == _null3:
|
| 984 |
+
return "utf-32-le"
|
| 985 |
+
# Did not detect a valid UTF-32 ascii-range character
|
| 986 |
+
return None
|
| 987 |
+
|
| 988 |
+
|
| 989 |
+
def prepend_scheme_if_needed(url, new_scheme):
|
| 990 |
+
"""Given a URL that may or may not have a scheme, prepend the given scheme.
|
| 991 |
+
Does not replace a present scheme with the one provided as an argument.
|
| 992 |
+
|
| 993 |
+
:rtype: str
|
| 994 |
+
"""
|
| 995 |
+
parsed = parse_url(url)
|
| 996 |
+
scheme, auth, host, port, path, query, fragment = parsed
|
| 997 |
+
|
| 998 |
+
# A defect in urlparse determines that there isn't a netloc present in some
|
| 999 |
+
# urls. We previously assumed parsing was overly cautious, and swapped the
|
| 1000 |
+
# netloc and path. Due to a lack of tests on the original defect, this is
|
| 1001 |
+
# maintained with parse_url for backwards compatibility.
|
| 1002 |
+
netloc = parsed.netloc
|
| 1003 |
+
if not netloc:
|
| 1004 |
+
netloc, path = path, netloc
|
| 1005 |
+
|
| 1006 |
+
if auth:
|
| 1007 |
+
# parse_url doesn't provide the netloc with auth
|
| 1008 |
+
# so we'll add it ourselves.
|
| 1009 |
+
netloc = "@".join([auth, netloc])
|
| 1010 |
+
if scheme is None:
|
| 1011 |
+
scheme = new_scheme
|
| 1012 |
+
if path is None:
|
| 1013 |
+
path = ""
|
| 1014 |
+
|
| 1015 |
+
return urlunparse((scheme, netloc, path, "", query, fragment))
|
| 1016 |
+
|
| 1017 |
+
|
| 1018 |
+
def get_auth_from_url(url):
|
| 1019 |
+
"""Given a url with authentication components, extract them into a tuple of
|
| 1020 |
+
username,password.
|
| 1021 |
+
|
| 1022 |
+
:rtype: (str,str)
|
| 1023 |
+
"""
|
| 1024 |
+
parsed = urlparse(url)
|
| 1025 |
+
|
| 1026 |
+
try:
|
| 1027 |
+
auth = (unquote(parsed.username), unquote(parsed.password))
|
| 1028 |
+
except (AttributeError, TypeError):
|
| 1029 |
+
auth = ("", "")
|
| 1030 |
+
|
| 1031 |
+
return auth
|
| 1032 |
+
|
| 1033 |
+
|
| 1034 |
+
def check_header_validity(header):
|
| 1035 |
+
"""Verifies that header parts don't contain leading whitespace
|
| 1036 |
+
reserved characters, or return characters.
|
| 1037 |
+
|
| 1038 |
+
:param header: tuple, in the format (name, value).
|
| 1039 |
+
"""
|
| 1040 |
+
name, value = header
|
| 1041 |
+
_validate_header_part(header, name, 0)
|
| 1042 |
+
_validate_header_part(header, value, 1)
|
| 1043 |
+
|
| 1044 |
+
|
| 1045 |
+
def _validate_header_part(header, header_part, header_validator_index):
|
| 1046 |
+
if isinstance(header_part, str):
|
| 1047 |
+
validator = _HEADER_VALIDATORS_STR[header_validator_index]
|
| 1048 |
+
elif isinstance(header_part, bytes):
|
| 1049 |
+
validator = _HEADER_VALIDATORS_BYTE[header_validator_index]
|
| 1050 |
+
else:
|
| 1051 |
+
raise InvalidHeader(
|
| 1052 |
+
f"Header part ({header_part!r}) from {header} "
|
| 1053 |
+
f"must be of type str or bytes, not {type(header_part)}"
|
| 1054 |
+
)
|
| 1055 |
+
|
| 1056 |
+
if not validator.match(header_part):
|
| 1057 |
+
header_kind = "name" if header_validator_index == 0 else "value"
|
| 1058 |
+
raise InvalidHeader(
|
| 1059 |
+
f"Invalid leading whitespace, reserved character(s), or return "
|
| 1060 |
+
f"character(s) in header {header_kind}: {header_part!r}"
|
| 1061 |
+
)
|
| 1062 |
+
|
| 1063 |
+
|
| 1064 |
+
def urldefragauth(url):
|
| 1065 |
+
"""
|
| 1066 |
+
Given a url remove the fragment and the authentication part.
|
| 1067 |
+
|
| 1068 |
+
:rtype: str
|
| 1069 |
+
"""
|
| 1070 |
+
scheme, netloc, path, params, query, fragment = urlparse(url)
|
| 1071 |
+
|
| 1072 |
+
# see func:`prepend_scheme_if_needed`
|
| 1073 |
+
if not netloc:
|
| 1074 |
+
netloc, path = path, netloc
|
| 1075 |
+
|
| 1076 |
+
netloc = netloc.rsplit("@", 1)[-1]
|
| 1077 |
+
|
| 1078 |
+
return urlunparse((scheme, netloc, path, params, query, ""))
|
| 1079 |
+
|
| 1080 |
+
|
| 1081 |
+
def rewind_body(prepared_request):
|
| 1082 |
+
"""Move file pointer back to its recorded starting position
|
| 1083 |
+
so it can be read again on redirect.
|
| 1084 |
+
"""
|
| 1085 |
+
body_seek = getattr(prepared_request.body, "seek", None)
|
| 1086 |
+
if body_seek is not None and isinstance(
|
| 1087 |
+
prepared_request._body_position, integer_types
|
| 1088 |
+
):
|
| 1089 |
+
try:
|
| 1090 |
+
body_seek(prepared_request._body_position)
|
| 1091 |
+
except OSError:
|
| 1092 |
+
raise UnrewindableBodyError(
|
| 1093 |
+
"An error occurred when rewinding request body for redirect."
|
| 1094 |
+
)
|
| 1095 |
+
else:
|
| 1096 |
+
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
|
wemm/lib/python3.10/site-packages/safetensors/__init__.py
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Re-export this
|
| 2 |
+
from ._safetensors_rust import ( # noqa: F401
|
| 3 |
+
SafetensorError,
|
| 4 |
+
__version__,
|
| 5 |
+
deserialize,
|
| 6 |
+
safe_open,
|
| 7 |
+
serialize,
|
| 8 |
+
serialize_file,
|
| 9 |
+
)
|
wemm/lib/python3.10/site-packages/safetensors/__init__.pyi
ADDED
|
@@ -0,0 +1,149 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Generated content DO NOT EDIT
|
| 2 |
+
@staticmethod
|
| 3 |
+
def deserialize(bytes):
|
| 4 |
+
"""
|
| 5 |
+
Opens a safetensors lazily and returns tensors as asked
|
| 6 |
+
|
| 7 |
+
Args:
|
| 8 |
+
data (`bytes`):
|
| 9 |
+
The byte content of a file
|
| 10 |
+
|
| 11 |
+
Returns:
|
| 12 |
+
(`List[str, Dict[str, Dict[str, any]]]`):
|
| 13 |
+
The deserialized content is like:
|
| 14 |
+
[("tensor_name", {"shape": [2, 3], "dtype": "F32", "data": b"\0\0.." }), (...)]
|
| 15 |
+
"""
|
| 16 |
+
pass
|
| 17 |
+
|
| 18 |
+
@staticmethod
|
| 19 |
+
def serialize(tensor_dict, metadata=None):
|
| 20 |
+
"""
|
| 21 |
+
Serializes raw data.
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
tensor_dict (`Dict[str, Dict[Any]]`):
|
| 25 |
+
The tensor dict is like:
|
| 26 |
+
{"tensor_name": {"dtype": "F32", "shape": [2, 3], "data": b"\0\0"}}
|
| 27 |
+
metadata (`Dict[str, str]`, *optional*):
|
| 28 |
+
The optional purely text annotations
|
| 29 |
+
|
| 30 |
+
Returns:
|
| 31 |
+
(`bytes`):
|
| 32 |
+
The serialized content.
|
| 33 |
+
"""
|
| 34 |
+
pass
|
| 35 |
+
|
| 36 |
+
@staticmethod
|
| 37 |
+
def serialize_file(tensor_dict, filename, metadata=None):
|
| 38 |
+
"""
|
| 39 |
+
Serializes raw data.
|
| 40 |
+
|
| 41 |
+
Args:
|
| 42 |
+
tensor_dict (`Dict[str, Dict[Any]]`):
|
| 43 |
+
The tensor dict is like:
|
| 44 |
+
{"tensor_name": {"dtype": "F32", "shape": [2, 3], "data": b"\0\0"}}
|
| 45 |
+
filename (`str`, or `os.PathLike`):
|
| 46 |
+
The name of the file to write into.
|
| 47 |
+
metadata (`Dict[str, str]`, *optional*):
|
| 48 |
+
The optional purely text annotations
|
| 49 |
+
|
| 50 |
+
Returns:
|
| 51 |
+
(`bytes`):
|
| 52 |
+
The serialized content.
|
| 53 |
+
"""
|
| 54 |
+
pass
|
| 55 |
+
|
| 56 |
+
class safe_open:
|
| 57 |
+
"""
|
| 58 |
+
Opens a safetensors lazily and returns tensors as asked
|
| 59 |
+
|
| 60 |
+
Args:
|
| 61 |
+
filename (`str`, or `os.PathLike`):
|
| 62 |
+
The filename to open
|
| 63 |
+
|
| 64 |
+
framework (`str`):
|
| 65 |
+
The framework you want you tensors in. Supported values:
|
| 66 |
+
`pt`, `tf`, `flax`, `numpy`.
|
| 67 |
+
|
| 68 |
+
device (`str`, defaults to `"cpu"`):
|
| 69 |
+
The device on which you want the tensors.
|
| 70 |
+
"""
|
| 71 |
+
|
| 72 |
+
def __init__(self, filename, framework, device=...):
|
| 73 |
+
pass
|
| 74 |
+
def __enter__(self):
|
| 75 |
+
"""
|
| 76 |
+
Start the context manager
|
| 77 |
+
"""
|
| 78 |
+
pass
|
| 79 |
+
def __exit__(self, _exc_type, _exc_value, _traceback):
|
| 80 |
+
"""
|
| 81 |
+
Exits the context manager
|
| 82 |
+
"""
|
| 83 |
+
pass
|
| 84 |
+
def get_slice(self, name):
|
| 85 |
+
"""
|
| 86 |
+
Returns a full slice view object
|
| 87 |
+
|
| 88 |
+
Args:
|
| 89 |
+
name (`str`):
|
| 90 |
+
The name of the tensor you want
|
| 91 |
+
|
| 92 |
+
Returns:
|
| 93 |
+
(`PySafeSlice`):
|
| 94 |
+
A dummy object you can slice into to get a real tensor
|
| 95 |
+
Example:
|
| 96 |
+
```python
|
| 97 |
+
from safetensors import safe_open
|
| 98 |
+
|
| 99 |
+
with safe_open("model.safetensors", framework="pt", device=0) as f:
|
| 100 |
+
tensor_part = f.get_slice("embedding")[:, ::8]
|
| 101 |
+
|
| 102 |
+
```
|
| 103 |
+
"""
|
| 104 |
+
pass
|
| 105 |
+
def get_tensor(self, name):
|
| 106 |
+
"""
|
| 107 |
+
Returns a full tensor
|
| 108 |
+
|
| 109 |
+
Args:
|
| 110 |
+
name (`str`):
|
| 111 |
+
The name of the tensor you want
|
| 112 |
+
|
| 113 |
+
Returns:
|
| 114 |
+
(`Tensor`):
|
| 115 |
+
The tensor in the framework you opened the file for.
|
| 116 |
+
|
| 117 |
+
Example:
|
| 118 |
+
```python
|
| 119 |
+
from safetensors import safe_open
|
| 120 |
+
|
| 121 |
+
with safe_open("model.safetensors", framework="pt", device=0) as f:
|
| 122 |
+
tensor = f.get_tensor("embedding")
|
| 123 |
+
|
| 124 |
+
```
|
| 125 |
+
"""
|
| 126 |
+
pass
|
| 127 |
+
def keys(self):
|
| 128 |
+
"""
|
| 129 |
+
Returns the names of the tensors in the file.
|
| 130 |
+
|
| 131 |
+
Returns:
|
| 132 |
+
(`List[str]`):
|
| 133 |
+
The name of the tensors contained in that file
|
| 134 |
+
"""
|
| 135 |
+
pass
|
| 136 |
+
def metadata(self):
|
| 137 |
+
"""
|
| 138 |
+
Return the special non tensor information in the header
|
| 139 |
+
|
| 140 |
+
Returns:
|
| 141 |
+
(`Dict[str, str]`):
|
| 142 |
+
The freeform metadata.
|
| 143 |
+
"""
|
| 144 |
+
pass
|
| 145 |
+
|
| 146 |
+
class SafetensorError(Exception):
|
| 147 |
+
"""
|
| 148 |
+
Custom Python Exception for Safetensor errors.
|
| 149 |
+
"""
|
wemm/lib/python3.10/site-packages/safetensors/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (327 Bytes). View file
|
|
|
wemm/lib/python3.10/site-packages/safetensors/__pycache__/flax.cpython-310.pyc
ADDED
|
Binary file (4.28 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/safetensors/__pycache__/mlx.cpython-310.pyc
ADDED
|
Binary file (4.3 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/safetensors/__pycache__/numpy.cpython-310.pyc
ADDED
|
Binary file (5.44 kB). View file
|
|
|
wemm/lib/python3.10/site-packages/safetensors/__pycache__/paddle.cpython-310.pyc
ADDED
|
Binary file (4.53 kB). View file
|
|
|